1 /* RxRPC virtual connection handler
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/net.h>
15 #include <linux/skbuff.h>
16 #include <linux/crypto.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
22 * Time till a connection expires after last use (in seconds).
24 unsigned rxrpc_connection_expiry = 10 * 60;
26 static void rxrpc_connection_reaper(struct work_struct *work);
28 LIST_HEAD(rxrpc_connections);
29 DEFINE_RWLOCK(rxrpc_connection_lock);
30 static DECLARE_DELAYED_WORK(rxrpc_connection_reap, rxrpc_connection_reaper);
33 * allocate a new client connection bundle
35 static struct rxrpc_conn_bundle *rxrpc_alloc_bundle(gfp_t gfp)
37 struct rxrpc_conn_bundle *bundle;
41 bundle = kzalloc(sizeof(struct rxrpc_conn_bundle), gfp);
43 INIT_LIST_HEAD(&bundle->unused_conns);
44 INIT_LIST_HEAD(&bundle->avail_conns);
45 INIT_LIST_HEAD(&bundle->busy_conns);
46 init_waitqueue_head(&bundle->chanwait);
47 atomic_set(&bundle->usage, 1);
50 _leave(" = %p", bundle);
55 * compare bundle parameters with what we're looking for
56 * - return -ve, 0 or +ve
59 int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle *bundle,
60 struct key *key, __be16 service_id)
62 return (bundle->service_id - service_id) ?:
63 ((unsigned long) bundle->key - (unsigned long) key);
67 * get bundle of client connections that a client socket can make use of
69 struct rxrpc_conn_bundle *rxrpc_get_bundle(struct rxrpc_sock *rx,
70 struct rxrpc_transport *trans,
75 struct rxrpc_conn_bundle *bundle, *candidate;
76 struct rb_node *p, *parent, **pp;
78 _enter("%p{%x},%x,%hx,",
79 rx, key_serial(key), trans->debug_id, ntohs(service_id));
81 if (rx->trans == trans && rx->bundle) {
82 atomic_inc(&rx->bundle->usage);
86 /* search the extant bundles first for one that matches the specified
88 spin_lock(&trans->client_lock);
90 p = trans->bundles.rb_node;
92 bundle = rb_entry(p, struct rxrpc_conn_bundle, node);
94 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
96 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
99 goto found_extant_bundle;
102 spin_unlock(&trans->client_lock);
104 /* not yet present - create a candidate for a new record and then
106 candidate = rxrpc_alloc_bundle(gfp);
108 _leave(" = -ENOMEM");
109 return ERR_PTR(-ENOMEM);
112 candidate->key = key_get(key);
113 candidate->service_id = service_id;
115 spin_lock(&trans->client_lock);
117 pp = &trans->bundles.rb_node;
121 bundle = rb_entry(parent, struct rxrpc_conn_bundle, node);
123 if (rxrpc_cmp_bundle(bundle, key, service_id) < 0)
124 pp = &(*pp)->rb_left;
125 else if (rxrpc_cmp_bundle(bundle, key, service_id) > 0)
126 pp = &(*pp)->rb_right;
128 goto found_extant_second;
131 /* second search also failed; add the new bundle */
135 rb_link_node(&bundle->node, parent, pp);
136 rb_insert_color(&bundle->node, &trans->bundles);
137 spin_unlock(&trans->client_lock);
138 _net("BUNDLE new on trans %d", trans->debug_id);
139 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
140 atomic_inc(&bundle->usage);
143 _leave(" = %p [new]", bundle);
146 /* we found the bundle in the list immediately */
148 atomic_inc(&bundle->usage);
149 spin_unlock(&trans->client_lock);
150 _net("BUNDLE old on trans %d", trans->debug_id);
151 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
152 atomic_inc(&bundle->usage);
155 _leave(" = %p [extant %d]", bundle, atomic_read(&bundle->usage));
158 /* we found the bundle on the second time through the list */
160 atomic_inc(&bundle->usage);
161 spin_unlock(&trans->client_lock);
163 _net("BUNDLE old2 on trans %d", trans->debug_id);
164 if (!rx->bundle && rx->sk.sk_state == RXRPC_CLIENT_CONNECTED) {
165 atomic_inc(&bundle->usage);
168 _leave(" = %p [second %d]", bundle, atomic_read(&bundle->usage));
175 void rxrpc_put_bundle(struct rxrpc_transport *trans,
176 struct rxrpc_conn_bundle *bundle)
178 _enter("%p,%p{%d}",trans, bundle, atomic_read(&bundle->usage));
180 if (atomic_dec_and_lock(&bundle->usage, &trans->client_lock)) {
181 _debug("Destroy bundle");
182 rb_erase(&bundle->node, &trans->bundles);
183 spin_unlock(&trans->client_lock);
184 ASSERT(list_empty(&bundle->unused_conns));
185 ASSERT(list_empty(&bundle->avail_conns));
186 ASSERT(list_empty(&bundle->busy_conns));
187 ASSERTCMP(bundle->num_conns, ==, 0);
188 key_put(bundle->key);
196 * allocate a new connection
198 static struct rxrpc_connection *rxrpc_alloc_connection(gfp_t gfp)
200 struct rxrpc_connection *conn;
204 conn = kzalloc(sizeof(struct rxrpc_connection), gfp);
206 INIT_WORK(&conn->processor, &rxrpc_process_connection);
207 INIT_LIST_HEAD(&conn->bundle_link);
208 conn->calls = RB_ROOT;
209 skb_queue_head_init(&conn->rx_queue);
210 rwlock_init(&conn->lock);
211 spin_lock_init(&conn->state_lock);
212 atomic_set(&conn->usage, 1);
213 conn->debug_id = atomic_inc_return(&rxrpc_debug_id);
214 conn->avail_calls = RXRPC_MAXCALLS;
215 conn->size_align = 4;
216 conn->header_size = sizeof(struct rxrpc_header);
219 _leave(" = %p{%d}", conn, conn ? conn->debug_id : 0);
224 * assign a connection ID to a connection and add it to the transport's
225 * connection lookup tree
226 * - called with transport client lock held
228 static void rxrpc_assign_connection_id(struct rxrpc_connection *conn)
230 struct rxrpc_connection *xconn;
231 struct rb_node *parent, **p;
239 write_lock_bh(&conn->trans->conn_lock);
241 conn->trans->conn_idcounter += RXRPC_CID_INC;
242 if (conn->trans->conn_idcounter < RXRPC_CID_INC)
243 conn->trans->conn_idcounter = RXRPC_CID_INC;
244 real_conn_id = conn->trans->conn_idcounter;
248 p = &conn->trans->client_conns.rb_node;
252 xconn = rb_entry(parent, struct rxrpc_connection, node);
254 if (epoch < xconn->epoch)
256 else if (epoch > xconn->epoch)
258 else if (real_conn_id < xconn->real_conn_id)
260 else if (real_conn_id > xconn->real_conn_id)
266 /* we've found a suitable hole - arrange for this connection to occupy
268 rb_link_node(&conn->node, parent, p);
269 rb_insert_color(&conn->node, &conn->trans->client_conns);
271 conn->real_conn_id = real_conn_id;
272 conn->cid = htonl(real_conn_id);
273 write_unlock_bh(&conn->trans->conn_lock);
274 _leave(" [CONNID %x CID %x]", real_conn_id, ntohl(conn->cid));
277 /* we found a connection with the proposed ID - walk the tree from that
278 * point looking for the next unused ID */
281 real_conn_id += RXRPC_CID_INC;
282 if (real_conn_id < RXRPC_CID_INC) {
283 real_conn_id = RXRPC_CID_INC;
284 conn->trans->conn_idcounter = real_conn_id;
285 goto attempt_insertion;
288 parent = rb_next(parent);
290 goto attempt_insertion;
292 xconn = rb_entry(parent, struct rxrpc_connection, node);
293 if (epoch < xconn->epoch ||
294 real_conn_id < xconn->real_conn_id)
295 goto attempt_insertion;
300 * add a call to a connection's call-by-ID tree
302 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection *conn,
303 struct rxrpc_call *call)
305 struct rxrpc_call *xcall;
306 struct rb_node *parent, **p;
309 write_lock_bh(&conn->lock);
311 call_id = call->call_id;
312 p = &conn->calls.rb_node;
316 xcall = rb_entry(parent, struct rxrpc_call, conn_node);
318 if (call_id < xcall->call_id)
320 else if (call_id > xcall->call_id)
326 rb_link_node(&call->conn_node, parent, p);
327 rb_insert_color(&call->conn_node, &conn->calls);
329 write_unlock_bh(&conn->lock);
333 * connect a call on an exclusive connection
335 static int rxrpc_connect_exclusive(struct rxrpc_sock *rx,
336 struct rxrpc_transport *trans,
338 struct rxrpc_call *call,
341 struct rxrpc_connection *conn;
348 /* not yet present - create a candidate for a new connection
349 * and then redo the check */
350 conn = rxrpc_alloc_connection(gfp);
352 _leave(" = -ENOMEM");
358 conn->service_id = service_id;
359 conn->epoch = rxrpc_epoch;
360 conn->in_clientflag = 0;
361 conn->out_clientflag = RXRPC_CLIENT_INITIATED;
363 conn->state = RXRPC_CONN_CLIENT;
364 conn->avail_calls = RXRPC_MAXCALLS - 1;
365 conn->security_level = rx->min_sec_level;
366 conn->key = key_get(rx->key);
368 ret = rxrpc_init_client_conn_security(conn);
372 _leave(" = %d [key]", ret);
376 write_lock_bh(&rxrpc_connection_lock);
377 list_add_tail(&conn->link, &rxrpc_connections);
378 write_unlock_bh(&rxrpc_connection_lock);
380 spin_lock(&trans->client_lock);
381 atomic_inc(&trans->usage);
383 _net("CONNECT EXCL new %d on TRANS %d",
384 conn->debug_id, conn->trans->debug_id);
386 rxrpc_assign_connection_id(conn);
389 spin_lock(&trans->client_lock);
392 /* we've got a connection with a free channel and we can now attach the
394 * - we're holding the transport's client lock
395 * - we're holding a reference on the connection
397 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
398 if (!conn->channels[chan])
400 goto no_free_channels;
403 atomic_inc(&conn->usage);
404 conn->channels[chan] = call;
406 call->channel = chan;
407 call->cid = conn->cid | htonl(chan);
408 call->call_id = htonl(++conn->call_counter);
410 _net("CONNECT client on conn %d chan %d as call %x",
411 conn->debug_id, chan, ntohl(call->call_id));
413 spin_unlock(&trans->client_lock);
415 rxrpc_add_call_ID_to_conn(conn, call);
420 spin_unlock(&trans->client_lock);
426 * find a connection for a call
427 * - called in process context with IRQs enabled
429 int rxrpc_connect_call(struct rxrpc_sock *rx,
430 struct rxrpc_transport *trans,
431 struct rxrpc_conn_bundle *bundle,
432 struct rxrpc_call *call,
435 struct rxrpc_connection *conn, *candidate;
438 DECLARE_WAITQUEUE(myself, current);
440 _enter("%p,%lx,", rx, call->user_call_ID);
442 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN, &rx->flags))
443 return rxrpc_connect_exclusive(rx, trans, bundle->service_id,
446 spin_lock(&trans->client_lock);
448 /* see if the bundle has a call slot available */
449 if (!list_empty(&bundle->avail_conns)) {
451 conn = list_entry(bundle->avail_conns.next,
452 struct rxrpc_connection,
454 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
455 list_del_init(&conn->bundle_link);
459 if (--conn->avail_calls == 0)
460 list_move(&conn->bundle_link,
461 &bundle->busy_conns);
462 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
463 ASSERT(conn->channels[0] == NULL ||
464 conn->channels[1] == NULL ||
465 conn->channels[2] == NULL ||
466 conn->channels[3] == NULL);
467 atomic_inc(&conn->usage);
471 if (!list_empty(&bundle->unused_conns)) {
473 conn = list_entry(bundle->unused_conns.next,
474 struct rxrpc_connection,
476 if (conn->state >= RXRPC_CONN_REMOTELY_ABORTED) {
477 list_del_init(&conn->bundle_link);
481 ASSERTCMP(conn->avail_calls, ==, RXRPC_MAXCALLS);
482 conn->avail_calls = RXRPC_MAXCALLS - 1;
483 ASSERT(conn->channels[0] == NULL &&
484 conn->channels[1] == NULL &&
485 conn->channels[2] == NULL &&
486 conn->channels[3] == NULL);
487 atomic_inc(&conn->usage);
488 list_move(&conn->bundle_link, &bundle->avail_conns);
492 /* need to allocate a new connection */
493 _debug("get new conn [%d]", bundle->num_conns);
495 spin_unlock(&trans->client_lock);
497 if (signal_pending(current))
500 if (bundle->num_conns >= 20) {
501 _debug("too many conns");
503 if (!(gfp & __GFP_WAIT)) {
504 _leave(" = -EAGAIN");
508 add_wait_queue(&bundle->chanwait, &myself);
510 set_current_state(TASK_INTERRUPTIBLE);
511 if (bundle->num_conns < 20 ||
512 !list_empty(&bundle->unused_conns) ||
513 !list_empty(&bundle->avail_conns))
515 if (signal_pending(current))
516 goto interrupted_dequeue;
519 remove_wait_queue(&bundle->chanwait, &myself);
520 __set_current_state(TASK_RUNNING);
521 spin_lock(&trans->client_lock);
525 /* not yet present - create a candidate for a new connection and then
527 candidate = rxrpc_alloc_connection(gfp);
529 _leave(" = -ENOMEM");
533 candidate->trans = trans;
534 candidate->bundle = bundle;
535 candidate->service_id = bundle->service_id;
536 candidate->epoch = rxrpc_epoch;
537 candidate->in_clientflag = 0;
538 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
540 candidate->state = RXRPC_CONN_CLIENT;
541 candidate->avail_calls = RXRPC_MAXCALLS;
542 candidate->security_level = rx->min_sec_level;
543 candidate->key = key_get(bundle->key);
545 ret = rxrpc_init_client_conn_security(candidate);
547 key_put(candidate->key);
549 _leave(" = %d [key]", ret);
553 write_lock_bh(&rxrpc_connection_lock);
554 list_add_tail(&candidate->link, &rxrpc_connections);
555 write_unlock_bh(&rxrpc_connection_lock);
557 spin_lock(&trans->client_lock);
559 list_add(&candidate->bundle_link, &bundle->unused_conns);
561 atomic_inc(&bundle->usage);
562 atomic_inc(&trans->usage);
564 _net("CONNECT new %d on TRANS %d",
565 candidate->debug_id, candidate->trans->debug_id);
567 rxrpc_assign_connection_id(candidate);
568 if (candidate->security)
569 candidate->security->prime_packet_security(candidate);
571 /* leave the candidate lurking in zombie mode attached to the
572 * bundle until we're ready for it */
573 rxrpc_put_connection(candidate);
577 /* we've got a connection with a free channel and we can now attach the
579 * - we're holding the transport's client lock
580 * - we're holding a reference on the connection
581 * - we're holding a reference on the bundle
583 for (chan = 0; chan < RXRPC_MAXCALLS; chan++)
584 if (!conn->channels[chan])
586 ASSERT(conn->channels[0] == NULL ||
587 conn->channels[1] == NULL ||
588 conn->channels[2] == NULL ||
589 conn->channels[3] == NULL);
593 conn->channels[chan] = call;
595 call->channel = chan;
596 call->cid = conn->cid | htonl(chan);
597 call->call_id = htonl(++conn->call_counter);
599 _net("CONNECT client on conn %d chan %d as call %x",
600 conn->debug_id, chan, ntohl(call->call_id));
602 ASSERTCMP(conn->avail_calls, <, RXRPC_MAXCALLS);
603 spin_unlock(&trans->client_lock);
605 rxrpc_add_call_ID_to_conn(conn, call);
611 remove_wait_queue(&bundle->chanwait, &myself);
612 __set_current_state(TASK_RUNNING);
614 _leave(" = -ERESTARTSYS");
619 * get a record of an incoming connection
621 struct rxrpc_connection *
622 rxrpc_incoming_connection(struct rxrpc_transport *trans,
623 struct rxrpc_header *hdr,
626 struct rxrpc_connection *conn, *candidate = NULL;
627 struct rb_node *p, **pp;
628 const char *new = "old";
634 ASSERT(hdr->flags & RXRPC_CLIENT_INITIATED);
637 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
639 /* search the connection list first */
640 read_lock_bh(&trans->conn_lock);
642 p = trans->server_conns.rb_node;
644 conn = rb_entry(p, struct rxrpc_connection, node);
646 _debug("maybe %x", conn->real_conn_id);
648 if (epoch < conn->epoch)
650 else if (epoch > conn->epoch)
652 else if (conn_id < conn->real_conn_id)
654 else if (conn_id > conn->real_conn_id)
657 goto found_extant_connection;
659 read_unlock_bh(&trans->conn_lock);
661 /* not yet present - create a candidate for a new record and then
663 candidate = rxrpc_alloc_connection(gfp);
665 _leave(" = -ENOMEM");
666 return ERR_PTR(-ENOMEM);
669 candidate->trans = trans;
670 candidate->epoch = hdr->epoch;
671 candidate->cid = hdr->cid & cpu_to_be32(RXRPC_CIDMASK);
672 candidate->service_id = hdr->serviceId;
673 candidate->security_ix = hdr->securityIndex;
674 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
675 candidate->out_clientflag = 0;
676 candidate->real_conn_id = conn_id;
677 candidate->state = RXRPC_CONN_SERVER;
678 if (candidate->service_id)
679 candidate->state = RXRPC_CONN_SERVER_UNSECURED;
681 write_lock_bh(&trans->conn_lock);
683 pp = &trans->server_conns.rb_node;
687 conn = rb_entry(p, struct rxrpc_connection, node);
689 if (epoch < conn->epoch)
690 pp = &(*pp)->rb_left;
691 else if (epoch > conn->epoch)
692 pp = &(*pp)->rb_right;
693 else if (conn_id < conn->real_conn_id)
694 pp = &(*pp)->rb_left;
695 else if (conn_id > conn->real_conn_id)
696 pp = &(*pp)->rb_right;
698 goto found_extant_second;
701 /* we can now add the new candidate to the list */
704 rb_link_node(&conn->node, p, pp);
705 rb_insert_color(&conn->node, &trans->server_conns);
706 atomic_inc(&conn->trans->usage);
708 write_unlock_bh(&trans->conn_lock);
710 write_lock_bh(&rxrpc_connection_lock);
711 list_add_tail(&conn->link, &rxrpc_connections);
712 write_unlock_bh(&rxrpc_connection_lock);
717 _net("CONNECTION %s %d {%x}", new, conn->debug_id, conn->real_conn_id);
719 _leave(" = %p {u=%d}", conn, atomic_read(&conn->usage));
722 /* we found the connection in the list immediately */
723 found_extant_connection:
724 if (hdr->securityIndex != conn->security_ix) {
725 read_unlock_bh(&trans->conn_lock);
726 goto security_mismatch;
728 atomic_inc(&conn->usage);
729 read_unlock_bh(&trans->conn_lock);
732 /* we found the connection on the second time through the list */
734 if (hdr->securityIndex != conn->security_ix) {
735 write_unlock_bh(&trans->conn_lock);
736 goto security_mismatch;
738 atomic_inc(&conn->usage);
739 write_unlock_bh(&trans->conn_lock);
745 _leave(" = -EKEYREJECTED");
746 return ERR_PTR(-EKEYREJECTED);
750 * find a connection based on transport and RxRPC connection ID for an incoming
753 struct rxrpc_connection *rxrpc_find_connection(struct rxrpc_transport *trans,
754 struct rxrpc_header *hdr)
756 struct rxrpc_connection *conn;
761 _enter(",{%x,%x}", ntohl(hdr->cid), hdr->flags);
763 read_lock_bh(&trans->conn_lock);
765 conn_id = ntohl(hdr->cid) & RXRPC_CIDMASK;
768 if (hdr->flags & RXRPC_CLIENT_INITIATED)
769 p = trans->server_conns.rb_node;
771 p = trans->client_conns.rb_node;
774 conn = rb_entry(p, struct rxrpc_connection, node);
776 _debug("maybe %x", conn->real_conn_id);
778 if (epoch < conn->epoch)
780 else if (epoch > conn->epoch)
782 else if (conn_id < conn->real_conn_id)
784 else if (conn_id > conn->real_conn_id)
790 read_unlock_bh(&trans->conn_lock);
795 atomic_inc(&conn->usage);
796 read_unlock_bh(&trans->conn_lock);
797 _leave(" = %p", conn);
802 * release a virtual connection
804 void rxrpc_put_connection(struct rxrpc_connection *conn)
806 _enter("%p{u=%d,d=%d}",
807 conn, atomic_read(&conn->usage), conn->debug_id);
809 ASSERTCMP(atomic_read(&conn->usage), >, 0);
811 conn->put_time = get_seconds();
812 if (atomic_dec_and_test(&conn->usage)) {
814 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);
821 * destroy a virtual connection
823 static void rxrpc_destroy_connection(struct rxrpc_connection *conn)
825 _enter("%p{%d}", conn, atomic_read(&conn->usage));
827 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
829 _net("DESTROY CONN %d", conn->debug_id);
832 rxrpc_put_bundle(conn->trans, conn->bundle);
834 ASSERT(RB_EMPTY_ROOT(&conn->calls));
835 rxrpc_purge_queue(&conn->rx_queue);
837 rxrpc_clear_conn_security(conn);
838 rxrpc_put_transport(conn->trans);
844 * reap dead connections
846 static void rxrpc_connection_reaper(struct work_struct *work)
848 struct rxrpc_connection *conn, *_p;
849 unsigned long now, earliest, reap_time;
851 LIST_HEAD(graveyard);
856 earliest = ULONG_MAX;
858 write_lock_bh(&rxrpc_connection_lock);
859 list_for_each_entry_safe(conn, _p, &rxrpc_connections, link) {
860 _debug("reap CONN %d { u=%d,t=%ld }",
861 conn->debug_id, atomic_read(&conn->usage),
862 (long) now - (long) conn->put_time);
864 if (likely(atomic_read(&conn->usage) > 0))
867 spin_lock(&conn->trans->client_lock);
868 write_lock(&conn->trans->conn_lock);
869 reap_time = conn->put_time + rxrpc_connection_expiry;
871 if (atomic_read(&conn->usage) > 0) {
873 } else if (reap_time <= now) {
874 list_move_tail(&conn->link, &graveyard);
875 if (conn->out_clientflag)
876 rb_erase(&conn->node,
877 &conn->trans->client_conns);
879 rb_erase(&conn->node,
880 &conn->trans->server_conns);
882 list_del_init(&conn->bundle_link);
883 conn->bundle->num_conns--;
886 } else if (reap_time < earliest) {
887 earliest = reap_time;
890 write_unlock(&conn->trans->conn_lock);
891 spin_unlock(&conn->trans->client_lock);
893 write_unlock_bh(&rxrpc_connection_lock);
895 if (earliest != ULONG_MAX) {
896 _debug("reschedule reaper %ld", (long) earliest - now);
897 ASSERTCMP(earliest, >, now);
898 rxrpc_queue_delayed_work(&rxrpc_connection_reap,
899 (earliest - now) * HZ);
902 /* then destroy all those pulled out */
903 while (!list_empty(&graveyard)) {
904 conn = list_entry(graveyard.next, struct rxrpc_connection,
906 list_del_init(&conn->link);
908 ASSERTCMP(atomic_read(&conn->usage), ==, 0);
909 rxrpc_destroy_connection(conn);
916 * preemptively destroy all the connection records rather than waiting for them
919 void __exit rxrpc_destroy_all_connections(void)
923 rxrpc_connection_expiry = 0;
924 cancel_delayed_work(&rxrpc_connection_reap);
925 rxrpc_queue_delayed_work(&rxrpc_connection_reap, 0);