]> Git Repo - linux.git/blob - net/rxrpc/proc.c
dma-mapping: don't return errors from dma_set_max_seg_size
[linux.git] / net / rxrpc / proc.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* /proc/net/ support for AF_RXRPC
3  *
4  * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5  * Written by David Howells ([email protected])
6  */
7
8 #include <linux/module.h>
9 #include <net/sock.h>
10 #include <net/af_rxrpc.h>
11 #include "ar-internal.h"
12
13 static const char *const rxrpc_conn_states[RXRPC_CONN__NR_STATES] = {
14         [RXRPC_CONN_UNUSED]                     = "Unused  ",
15         [RXRPC_CONN_CLIENT_UNSECURED]           = "ClUnsec ",
16         [RXRPC_CONN_CLIENT]                     = "Client  ",
17         [RXRPC_CONN_SERVICE_PREALLOC]           = "SvPrealc",
18         [RXRPC_CONN_SERVICE_UNSECURED]          = "SvUnsec ",
19         [RXRPC_CONN_SERVICE_CHALLENGING]        = "SvChall ",
20         [RXRPC_CONN_SERVICE]                    = "SvSecure",
21         [RXRPC_CONN_ABORTED]                    = "Aborted ",
22 };
23
24 /*
25  * generate a list of extant and dead calls in /proc/net/rxrpc_calls
26  */
27 static void *rxrpc_call_seq_start(struct seq_file *seq, loff_t *_pos)
28         __acquires(rcu)
29 {
30         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
31
32         rcu_read_lock();
33         return seq_list_start_head_rcu(&rxnet->calls, *_pos);
34 }
35
36 static void *rxrpc_call_seq_next(struct seq_file *seq, void *v, loff_t *pos)
37 {
38         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
39
40         return seq_list_next_rcu(v, &rxnet->calls, pos);
41 }
42
43 static void rxrpc_call_seq_stop(struct seq_file *seq, void *v)
44         __releases(rcu)
45 {
46         rcu_read_unlock();
47 }
48
49 static int rxrpc_call_seq_show(struct seq_file *seq, void *v)
50 {
51         struct rxrpc_local *local;
52         struct rxrpc_call *call;
53         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
54         enum rxrpc_call_state state;
55         rxrpc_seq_t acks_hard_ack;
56         char lbuff[50], rbuff[50];
57         long timeout = 0;
58
59         if (v == &rxnet->calls) {
60                 seq_puts(seq,
61                          "Proto Local                                          "
62                          " Remote                                         "
63                          " SvID ConnID   CallID   End Use State    Abort   "
64                          " DebugId  TxSeq    TW RxSeq    RW RxSerial CW RxTimo\n");
65                 return 0;
66         }
67
68         call = list_entry(v, struct rxrpc_call, link);
69
70         local = call->local;
71         if (local)
72                 sprintf(lbuff, "%pISpc", &local->srx.transport);
73         else
74                 strcpy(lbuff, "no_local");
75
76         sprintf(rbuff, "%pISpc", &call->dest_srx.transport);
77
78         state = rxrpc_call_state(call);
79         if (state != RXRPC_CALL_SERVER_PREALLOC)
80                 timeout = ktime_ms_delta(READ_ONCE(call->expect_rx_by), ktime_get_real());
81
82         acks_hard_ack = READ_ONCE(call->acks_hard_ack);
83         seq_printf(seq,
84                    "UDP   %-47.47s %-47.47s %4x %08x %08x %s %3u"
85                    " %-8.8s %08x %08x %08x %02x %08x %02x %08x %02x %06lx\n",
86                    lbuff,
87                    rbuff,
88                    call->dest_srx.srx_service,
89                    call->cid,
90                    call->call_id,
91                    rxrpc_is_service_call(call) ? "Svc" : "Clt",
92                    refcount_read(&call->ref),
93                    rxrpc_call_states[state],
94                    call->abort_code,
95                    call->debug_id,
96                    acks_hard_ack, READ_ONCE(call->tx_top) - acks_hard_ack,
97                    call->ackr_window, call->ackr_wtop - call->ackr_window,
98                    call->rx_serial,
99                    call->cong_cwnd,
100                    timeout);
101
102         return 0;
103 }
104
105 const struct seq_operations rxrpc_call_seq_ops = {
106         .start  = rxrpc_call_seq_start,
107         .next   = rxrpc_call_seq_next,
108         .stop   = rxrpc_call_seq_stop,
109         .show   = rxrpc_call_seq_show,
110 };
111
112 /*
113  * generate a list of extant virtual connections in /proc/net/rxrpc_conns
114  */
115 static void *rxrpc_connection_seq_start(struct seq_file *seq, loff_t *_pos)
116         __acquires(rxnet->conn_lock)
117 {
118         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
119
120         read_lock(&rxnet->conn_lock);
121         return seq_list_start_head(&rxnet->conn_proc_list, *_pos);
122 }
123
124 static void *rxrpc_connection_seq_next(struct seq_file *seq, void *v,
125                                        loff_t *pos)
126 {
127         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
128
129         return seq_list_next(v, &rxnet->conn_proc_list, pos);
130 }
131
132 static void rxrpc_connection_seq_stop(struct seq_file *seq, void *v)
133         __releases(rxnet->conn_lock)
134 {
135         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
136
137         read_unlock(&rxnet->conn_lock);
138 }
139
140 static int rxrpc_connection_seq_show(struct seq_file *seq, void *v)
141 {
142         struct rxrpc_connection *conn;
143         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
144         const char *state;
145         char lbuff[50], rbuff[50];
146
147         if (v == &rxnet->conn_proc_list) {
148                 seq_puts(seq,
149                          "Proto Local                                          "
150                          " Remote                                         "
151                          " SvID ConnID   End Ref Act State    Key     "
152                          " Serial   ISerial  CallId0  CallId1  CallId2  CallId3\n"
153                          );
154                 return 0;
155         }
156
157         conn = list_entry(v, struct rxrpc_connection, proc_link);
158         if (conn->state == RXRPC_CONN_SERVICE_PREALLOC) {
159                 strcpy(lbuff, "no_local");
160                 strcpy(rbuff, "no_connection");
161                 goto print;
162         }
163
164         sprintf(lbuff, "%pISpc", &conn->local->srx.transport);
165         sprintf(rbuff, "%pISpc", &conn->peer->srx.transport);
166 print:
167         state = rxrpc_is_conn_aborted(conn) ?
168                 rxrpc_call_completions[conn->completion] :
169                 rxrpc_conn_states[conn->state];
170         seq_printf(seq,
171                    "UDP   %-47.47s %-47.47s %4x %08x %s %3u %3d"
172                    " %s %08x %08x %08x %08x %08x %08x %08x\n",
173                    lbuff,
174                    rbuff,
175                    conn->service_id,
176                    conn->proto.cid,
177                    rxrpc_conn_is_service(conn) ? "Svc" : "Clt",
178                    refcount_read(&conn->ref),
179                    atomic_read(&conn->active),
180                    state,
181                    key_serial(conn->key),
182                    conn->tx_serial,
183                    conn->hi_serial,
184                    conn->channels[0].call_id,
185                    conn->channels[1].call_id,
186                    conn->channels[2].call_id,
187                    conn->channels[3].call_id);
188
189         return 0;
190 }
191
192 const struct seq_operations rxrpc_connection_seq_ops = {
193         .start  = rxrpc_connection_seq_start,
194         .next   = rxrpc_connection_seq_next,
195         .stop   = rxrpc_connection_seq_stop,
196         .show   = rxrpc_connection_seq_show,
197 };
198
199 /*
200  * generate a list of extant virtual bundles in /proc/net/rxrpc/bundles
201  */
202 static void *rxrpc_bundle_seq_start(struct seq_file *seq, loff_t *_pos)
203         __acquires(rxnet->conn_lock)
204 {
205         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
206
207         read_lock(&rxnet->conn_lock);
208         return seq_list_start_head(&rxnet->bundle_proc_list, *_pos);
209 }
210
211 static void *rxrpc_bundle_seq_next(struct seq_file *seq, void *v,
212                                        loff_t *pos)
213 {
214         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
215
216         return seq_list_next(v, &rxnet->bundle_proc_list, pos);
217 }
218
219 static void rxrpc_bundle_seq_stop(struct seq_file *seq, void *v)
220         __releases(rxnet->conn_lock)
221 {
222         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
223
224         read_unlock(&rxnet->conn_lock);
225 }
226
227 static int rxrpc_bundle_seq_show(struct seq_file *seq, void *v)
228 {
229         struct rxrpc_bundle *bundle;
230         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
231         char lbuff[50], rbuff[50];
232
233         if (v == &rxnet->bundle_proc_list) {
234                 seq_puts(seq,
235                          "Proto Local                                          "
236                          " Remote                                         "
237                          " SvID Ref Act Flg Key      |"
238                          " Bundle   Conn_0   Conn_1   Conn_2   Conn_3\n"
239                          );
240                 return 0;
241         }
242
243         bundle = list_entry(v, struct rxrpc_bundle, proc_link);
244
245         sprintf(lbuff, "%pISpc", &bundle->local->srx.transport);
246         sprintf(rbuff, "%pISpc", &bundle->peer->srx.transport);
247         seq_printf(seq,
248                    "UDP   %-47.47s %-47.47s %4x %3u %3d"
249                    " %c%c%c %08x | %08x %08x %08x %08x %08x\n",
250                    lbuff,
251                    rbuff,
252                    bundle->service_id,
253                    refcount_read(&bundle->ref),
254                    atomic_read(&bundle->active),
255                    bundle->try_upgrade ? 'U' : '-',
256                    bundle->exclusive ? 'e' : '-',
257                    bundle->upgrade ? 'u' : '-',
258                    key_serial(bundle->key),
259                    bundle->debug_id,
260                    bundle->conn_ids[0],
261                    bundle->conn_ids[1],
262                    bundle->conn_ids[2],
263                    bundle->conn_ids[3]);
264
265         return 0;
266 }
267
268 const struct seq_operations rxrpc_bundle_seq_ops = {
269         .start  = rxrpc_bundle_seq_start,
270         .next   = rxrpc_bundle_seq_next,
271         .stop   = rxrpc_bundle_seq_stop,
272         .show   = rxrpc_bundle_seq_show,
273 };
274
275 /*
276  * generate a list of extant virtual peers in /proc/net/rxrpc/peers
277  */
278 static int rxrpc_peer_seq_show(struct seq_file *seq, void *v)
279 {
280         struct rxrpc_peer *peer;
281         time64_t now;
282         char lbuff[50], rbuff[50];
283
284         if (v == SEQ_START_TOKEN) {
285                 seq_puts(seq,
286                          "Proto Local                                          "
287                          " Remote                                         "
288                          " Use SST   MTU LastUse      RTT      RTO\n"
289                          );
290                 return 0;
291         }
292
293         peer = list_entry(v, struct rxrpc_peer, hash_link);
294
295         sprintf(lbuff, "%pISpc", &peer->local->srx.transport);
296
297         sprintf(rbuff, "%pISpc", &peer->srx.transport);
298
299         now = ktime_get_seconds();
300         seq_printf(seq,
301                    "UDP   %-47.47s %-47.47s %3u"
302                    " %3u %5u %6llus %8u %8u\n",
303                    lbuff,
304                    rbuff,
305                    refcount_read(&peer->ref),
306                    peer->cong_ssthresh,
307                    peer->mtu,
308                    now - peer->last_tx_at,
309                    peer->srtt_us >> 3,
310                    peer->rto_us);
311
312         return 0;
313 }
314
315 static void *rxrpc_peer_seq_start(struct seq_file *seq, loff_t *_pos)
316         __acquires(rcu)
317 {
318         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
319         unsigned int bucket, n;
320         unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
321         void *p;
322
323         rcu_read_lock();
324
325         if (*_pos >= UINT_MAX)
326                 return NULL;
327
328         n = *_pos & ((1U << shift) - 1);
329         bucket = *_pos >> shift;
330         for (;;) {
331                 if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
332                         *_pos = UINT_MAX;
333                         return NULL;
334                 }
335                 if (n == 0) {
336                         if (bucket == 0)
337                                 return SEQ_START_TOKEN;
338                         *_pos += 1;
339                         n++;
340                 }
341
342                 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
343                 if (p)
344                         return p;
345                 bucket++;
346                 n = 1;
347                 *_pos = (bucket << shift) | n;
348         }
349 }
350
351 static void *rxrpc_peer_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
352 {
353         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
354         unsigned int bucket, n;
355         unsigned int shift = 32 - HASH_BITS(rxnet->peer_hash);
356         void *p;
357
358         if (*_pos >= UINT_MAX)
359                 return NULL;
360
361         bucket = *_pos >> shift;
362
363         p = seq_hlist_next_rcu(v, &rxnet->peer_hash[bucket], _pos);
364         if (p)
365                 return p;
366
367         for (;;) {
368                 bucket++;
369                 n = 1;
370                 *_pos = (bucket << shift) | n;
371
372                 if (bucket >= HASH_SIZE(rxnet->peer_hash)) {
373                         *_pos = UINT_MAX;
374                         return NULL;
375                 }
376                 if (n == 0) {
377                         *_pos += 1;
378                         n++;
379                 }
380
381                 p = seq_hlist_start_rcu(&rxnet->peer_hash[bucket], n - 1);
382                 if (p)
383                         return p;
384         }
385 }
386
387 static void rxrpc_peer_seq_stop(struct seq_file *seq, void *v)
388         __releases(rcu)
389 {
390         rcu_read_unlock();
391 }
392
393
394 const struct seq_operations rxrpc_peer_seq_ops = {
395         .start  = rxrpc_peer_seq_start,
396         .next   = rxrpc_peer_seq_next,
397         .stop   = rxrpc_peer_seq_stop,
398         .show   = rxrpc_peer_seq_show,
399 };
400
401 /*
402  * Generate a list of extant virtual local endpoints in /proc/net/rxrpc/locals
403  */
404 static int rxrpc_local_seq_show(struct seq_file *seq, void *v)
405 {
406         struct rxrpc_local *local;
407         char lbuff[50];
408
409         if (v == SEQ_START_TOKEN) {
410                 seq_puts(seq,
411                          "Proto Local                                          "
412                          " Use Act RxQ\n");
413                 return 0;
414         }
415
416         local = hlist_entry(v, struct rxrpc_local, link);
417
418         sprintf(lbuff, "%pISpc", &local->srx.transport);
419
420         seq_printf(seq,
421                    "UDP   %-47.47s %3u %3u %3u\n",
422                    lbuff,
423                    refcount_read(&local->ref),
424                    atomic_read(&local->active_users),
425                    local->rx_queue.qlen);
426
427         return 0;
428 }
429
430 static void *rxrpc_local_seq_start(struct seq_file *seq, loff_t *_pos)
431         __acquires(rcu)
432 {
433         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
434         unsigned int n;
435
436         rcu_read_lock();
437
438         if (*_pos >= UINT_MAX)
439                 return NULL;
440
441         n = *_pos;
442         if (n == 0)
443                 return SEQ_START_TOKEN;
444
445         return seq_hlist_start_rcu(&rxnet->local_endpoints, n - 1);
446 }
447
448 static void *rxrpc_local_seq_next(struct seq_file *seq, void *v, loff_t *_pos)
449 {
450         struct rxrpc_net *rxnet = rxrpc_net(seq_file_net(seq));
451
452         if (*_pos >= UINT_MAX)
453                 return NULL;
454
455         return seq_hlist_next_rcu(v, &rxnet->local_endpoints, _pos);
456 }
457
458 static void rxrpc_local_seq_stop(struct seq_file *seq, void *v)
459         __releases(rcu)
460 {
461         rcu_read_unlock();
462 }
463
464 const struct seq_operations rxrpc_local_seq_ops = {
465         .start  = rxrpc_local_seq_start,
466         .next   = rxrpc_local_seq_next,
467         .stop   = rxrpc_local_seq_stop,
468         .show   = rxrpc_local_seq_show,
469 };
470
471 /*
472  * Display stats in /proc/net/rxrpc/stats
473  */
474 int rxrpc_stats_show(struct seq_file *seq, void *v)
475 {
476         struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(seq));
477
478         seq_printf(seq,
479                    "Data     : send=%u sendf=%u fail=%u\n",
480                    atomic_read(&rxnet->stat_tx_data_send),
481                    atomic_read(&rxnet->stat_tx_data_send_frag),
482                    atomic_read(&rxnet->stat_tx_data_send_fail));
483         seq_printf(seq,
484                    "Data-Tx  : nr=%u retrans=%u uf=%u cwr=%u\n",
485                    atomic_read(&rxnet->stat_tx_data),
486                    atomic_read(&rxnet->stat_tx_data_retrans),
487                    atomic_read(&rxnet->stat_tx_data_underflow),
488                    atomic_read(&rxnet->stat_tx_data_cwnd_reset));
489         seq_printf(seq,
490                    "Data-Rx  : nr=%u reqack=%u jumbo=%u\n",
491                    atomic_read(&rxnet->stat_rx_data),
492                    atomic_read(&rxnet->stat_rx_data_reqack),
493                    atomic_read(&rxnet->stat_rx_data_jumbo));
494         seq_printf(seq,
495                    "Ack      : fill=%u send=%u skip=%u\n",
496                    atomic_read(&rxnet->stat_tx_ack_fill),
497                    atomic_read(&rxnet->stat_tx_ack_send),
498                    atomic_read(&rxnet->stat_tx_ack_skip));
499         seq_printf(seq,
500                    "Ack-Tx   : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
501                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_REQUESTED]),
502                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DUPLICATE]),
503                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
504                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
505                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_NOSPACE]),
506                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING]),
507                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_PING_RESPONSE]),
508                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_DELAY]),
509                    atomic_read(&rxnet->stat_tx_acks[RXRPC_ACK_IDLE]));
510         seq_printf(seq,
511                    "Ack-Rx   : req=%u dup=%u oos=%u exw=%u nos=%u png=%u prs=%u dly=%u idl=%u\n",
512                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_REQUESTED]),
513                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DUPLICATE]),
514                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_OUT_OF_SEQUENCE]),
515                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_EXCEEDS_WINDOW]),
516                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_NOSPACE]),
517                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING]),
518                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_PING_RESPONSE]),
519                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_DELAY]),
520                    atomic_read(&rxnet->stat_rx_acks[RXRPC_ACK_IDLE]));
521         seq_printf(seq,
522                    "Why-Req-A: acklost=%u already=%u mrtt=%u ortt=%u\n",
523                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_ack_lost]),
524                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_already_on]),
525                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_more_rtt]),
526                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_old_rtt]));
527         seq_printf(seq,
528                    "Why-Req-A: nolast=%u retx=%u slows=%u smtxw=%u\n",
529                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_no_srv_last]),
530                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_retrans]),
531                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_slow_start]),
532                    atomic_read(&rxnet->stat_why_req_ack[rxrpc_reqack_small_txwin]));
533         seq_printf(seq,
534                    "Buffers  : txb=%u rxb=%u\n",
535                    atomic_read(&rxrpc_nr_txbuf),
536                    atomic_read(&rxrpc_n_rx_skbs));
537         seq_printf(seq,
538                    "IO-thread: loops=%u\n",
539                    atomic_read(&rxnet->stat_io_loop));
540         return 0;
541 }
542
543 /*
544  * Clear stats if /proc/net/rxrpc/stats is written to.
545  */
546 int rxrpc_stats_clear(struct file *file, char *buf, size_t size)
547 {
548         struct seq_file *m = file->private_data;
549         struct rxrpc_net *rxnet = rxrpc_net(seq_file_single_net(m));
550
551         if (size > 1 || (size == 1 && buf[0] != '\n'))
552                 return -EINVAL;
553
554         atomic_set(&rxnet->stat_tx_data, 0);
555         atomic_set(&rxnet->stat_tx_data_retrans, 0);
556         atomic_set(&rxnet->stat_tx_data_underflow, 0);
557         atomic_set(&rxnet->stat_tx_data_cwnd_reset, 0);
558         atomic_set(&rxnet->stat_tx_data_send, 0);
559         atomic_set(&rxnet->stat_tx_data_send_frag, 0);
560         atomic_set(&rxnet->stat_tx_data_send_fail, 0);
561         atomic_set(&rxnet->stat_rx_data, 0);
562         atomic_set(&rxnet->stat_rx_data_reqack, 0);
563         atomic_set(&rxnet->stat_rx_data_jumbo, 0);
564
565         atomic_set(&rxnet->stat_tx_ack_fill, 0);
566         atomic_set(&rxnet->stat_tx_ack_send, 0);
567         atomic_set(&rxnet->stat_tx_ack_skip, 0);
568         memset(&rxnet->stat_tx_acks, 0, sizeof(rxnet->stat_tx_acks));
569         memset(&rxnet->stat_rx_acks, 0, sizeof(rxnet->stat_rx_acks));
570
571         memset(&rxnet->stat_why_req_ack, 0, sizeof(rxnet->stat_why_req_ack));
572
573         atomic_set(&rxnet->stat_io_loop, 0);
574         return size;
575 }
This page took 0.065472 seconds and 4 git commands to generate.