1 /* RxRPC virtual connection handler
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/net.h>
15 #include <linux/skbuff.h>
16 #include <linux/crypto.h>
18 #include <net/af_rxrpc.h>
19 #include "ar-internal.h"
22 * Time till a connection expires after last use (in seconds).
24 unsigned int rxrpc_connection_expiry
= 10 * 60;
26 static void rxrpc_connection_reaper(struct work_struct
*work
);
28 LIST_HEAD(rxrpc_connections
);
29 DEFINE_RWLOCK(rxrpc_connection_lock
);
30 static DECLARE_DELAYED_WORK(rxrpc_connection_reap
, rxrpc_connection_reaper
);
33 * allocate a new client connection bundle
35 static struct rxrpc_conn_bundle
*rxrpc_alloc_bundle(gfp_t gfp
)
37 struct rxrpc_conn_bundle
*bundle
;
41 bundle
= kzalloc(sizeof(struct rxrpc_conn_bundle
), gfp
);
43 INIT_LIST_HEAD(&bundle
->unused_conns
);
44 INIT_LIST_HEAD(&bundle
->avail_conns
);
45 INIT_LIST_HEAD(&bundle
->busy_conns
);
46 init_waitqueue_head(&bundle
->chanwait
);
47 atomic_set(&bundle
->usage
, 1);
50 _leave(" = %p", bundle
);
55 * compare bundle parameters with what we're looking for
56 * - return -ve, 0 or +ve
59 int rxrpc_cmp_bundle(const struct rxrpc_conn_bundle
*bundle
,
60 struct key
*key
, u16 service_id
)
62 return (bundle
->service_id
- service_id
) ?:
63 ((unsigned long)bundle
->key
- (unsigned long)key
);
67 * get bundle of client connections that a client socket can make use of
69 struct rxrpc_conn_bundle
*rxrpc_get_bundle(struct rxrpc_sock
*rx
,
70 struct rxrpc_transport
*trans
,
75 struct rxrpc_conn_bundle
*bundle
, *candidate
;
76 struct rb_node
*p
, *parent
, **pp
;
78 _enter("%p{%x},%x,%hx,",
79 rx
, key_serial(key
), trans
->debug_id
, service_id
);
81 if (rx
->trans
== trans
&& rx
->bundle
) {
82 atomic_inc(&rx
->bundle
->usage
);
86 /* search the extant bundles first for one that matches the specified
88 spin_lock(&trans
->client_lock
);
90 p
= trans
->bundles
.rb_node
;
92 bundle
= rb_entry(p
, struct rxrpc_conn_bundle
, node
);
94 if (rxrpc_cmp_bundle(bundle
, key
, service_id
) < 0)
96 else if (rxrpc_cmp_bundle(bundle
, key
, service_id
) > 0)
99 goto found_extant_bundle
;
102 spin_unlock(&trans
->client_lock
);
104 /* not yet present - create a candidate for a new record and then
106 candidate
= rxrpc_alloc_bundle(gfp
);
108 _leave(" = -ENOMEM");
109 return ERR_PTR(-ENOMEM
);
112 candidate
->key
= key_get(key
);
113 candidate
->service_id
= service_id
;
115 spin_lock(&trans
->client_lock
);
117 pp
= &trans
->bundles
.rb_node
;
121 bundle
= rb_entry(parent
, struct rxrpc_conn_bundle
, node
);
123 if (rxrpc_cmp_bundle(bundle
, key
, service_id
) < 0)
124 pp
= &(*pp
)->rb_left
;
125 else if (rxrpc_cmp_bundle(bundle
, key
, service_id
) > 0)
126 pp
= &(*pp
)->rb_right
;
128 goto found_extant_second
;
131 /* second search also failed; add the new bundle */
135 rb_link_node(&bundle
->node
, parent
, pp
);
136 rb_insert_color(&bundle
->node
, &trans
->bundles
);
137 spin_unlock(&trans
->client_lock
);
138 _net("BUNDLE new on trans %d", trans
->debug_id
);
139 if (!rx
->bundle
&& rx
->sk
.sk_state
== RXRPC_CLIENT_CONNECTED
) {
140 atomic_inc(&bundle
->usage
);
143 _leave(" = %p [new]", bundle
);
146 /* we found the bundle in the list immediately */
148 atomic_inc(&bundle
->usage
);
149 spin_unlock(&trans
->client_lock
);
150 _net("BUNDLE old on trans %d", trans
->debug_id
);
151 if (!rx
->bundle
&& rx
->sk
.sk_state
== RXRPC_CLIENT_CONNECTED
) {
152 atomic_inc(&bundle
->usage
);
155 _leave(" = %p [extant %d]", bundle
, atomic_read(&bundle
->usage
));
158 /* we found the bundle on the second time through the list */
160 atomic_inc(&bundle
->usage
);
161 spin_unlock(&trans
->client_lock
);
163 _net("BUNDLE old2 on trans %d", trans
->debug_id
);
164 if (!rx
->bundle
&& rx
->sk
.sk_state
== RXRPC_CLIENT_CONNECTED
) {
165 atomic_inc(&bundle
->usage
);
168 _leave(" = %p [second %d]", bundle
, atomic_read(&bundle
->usage
));
175 void rxrpc_put_bundle(struct rxrpc_transport
*trans
,
176 struct rxrpc_conn_bundle
*bundle
)
178 _enter("%p,%p{%d}",trans
, bundle
, atomic_read(&bundle
->usage
));
180 if (atomic_dec_and_lock(&bundle
->usage
, &trans
->client_lock
)) {
181 _debug("Destroy bundle");
182 rb_erase(&bundle
->node
, &trans
->bundles
);
183 spin_unlock(&trans
->client_lock
);
184 ASSERT(list_empty(&bundle
->unused_conns
));
185 ASSERT(list_empty(&bundle
->avail_conns
));
186 ASSERT(list_empty(&bundle
->busy_conns
));
187 ASSERTCMP(bundle
->num_conns
, ==, 0);
188 key_put(bundle
->key
);
196 * allocate a new connection
198 static struct rxrpc_connection
*rxrpc_alloc_connection(gfp_t gfp
)
200 struct rxrpc_connection
*conn
;
204 conn
= kzalloc(sizeof(struct rxrpc_connection
), gfp
);
206 INIT_WORK(&conn
->processor
, &rxrpc_process_connection
);
207 INIT_LIST_HEAD(&conn
->bundle_link
);
208 conn
->calls
= RB_ROOT
;
209 skb_queue_head_init(&conn
->rx_queue
);
210 conn
->security
= &rxrpc_no_security
;
211 rwlock_init(&conn
->lock
);
212 spin_lock_init(&conn
->state_lock
);
213 atomic_set(&conn
->usage
, 1);
214 conn
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
215 conn
->avail_calls
= RXRPC_MAXCALLS
;
216 conn
->size_align
= 4;
217 conn
->header_size
= sizeof(struct rxrpc_wire_header
);
220 _leave(" = %p{%d}", conn
, conn
? conn
->debug_id
: 0);
225 * assign a connection ID to a connection and add it to the transport's
226 * connection lookup tree
227 * - called with transport client lock held
229 static void rxrpc_assign_connection_id(struct rxrpc_connection
*conn
)
231 struct rxrpc_connection
*xconn
;
232 struct rb_node
*parent
, **p
;
240 write_lock_bh(&conn
->trans
->conn_lock
);
242 conn
->trans
->conn_idcounter
+= RXRPC_CID_INC
;
243 if (conn
->trans
->conn_idcounter
< RXRPC_CID_INC
)
244 conn
->trans
->conn_idcounter
= RXRPC_CID_INC
;
245 cid
= conn
->trans
->conn_idcounter
;
249 p
= &conn
->trans
->client_conns
.rb_node
;
253 xconn
= rb_entry(parent
, struct rxrpc_connection
, node
);
255 if (epoch
< xconn
->epoch
)
257 else if (epoch
> xconn
->epoch
)
259 else if (cid
< xconn
->cid
)
261 else if (cid
> xconn
->cid
)
267 /* we've found a suitable hole - arrange for this connection to occupy
269 rb_link_node(&conn
->node
, parent
, p
);
270 rb_insert_color(&conn
->node
, &conn
->trans
->client_conns
);
273 write_unlock_bh(&conn
->trans
->conn_lock
);
274 _leave(" [CID %x]", cid
);
277 /* we found a connection with the proposed ID - walk the tree from that
278 * point looking for the next unused ID */
281 cid
+= RXRPC_CID_INC
;
282 if (cid
< RXRPC_CID_INC
) {
284 conn
->trans
->conn_idcounter
= cid
;
285 goto attempt_insertion
;
288 parent
= rb_next(parent
);
290 goto attempt_insertion
;
292 xconn
= rb_entry(parent
, struct rxrpc_connection
, node
);
293 if (epoch
< xconn
->epoch
||
295 goto attempt_insertion
;
300 * add a call to a connection's call-by-ID tree
302 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection
*conn
,
303 struct rxrpc_call
*call
)
305 struct rxrpc_call
*xcall
;
306 struct rb_node
*parent
, **p
;
309 write_lock_bh(&conn
->lock
);
311 call_id
= call
->call_id
;
312 p
= &conn
->calls
.rb_node
;
316 xcall
= rb_entry(parent
, struct rxrpc_call
, conn_node
);
318 if (call_id
< xcall
->call_id
)
320 else if (call_id
> xcall
->call_id
)
326 rb_link_node(&call
->conn_node
, parent
, p
);
327 rb_insert_color(&call
->conn_node
, &conn
->calls
);
329 write_unlock_bh(&conn
->lock
);
333 * connect a call on an exclusive connection
335 static int rxrpc_connect_exclusive(struct rxrpc_sock
*rx
,
336 struct rxrpc_transport
*trans
,
338 struct rxrpc_call
*call
,
341 struct rxrpc_connection
*conn
;
348 /* not yet present - create a candidate for a new connection
349 * and then redo the check */
350 conn
= rxrpc_alloc_connection(gfp
);
352 _leave(" = -ENOMEM");
358 conn
->service_id
= service_id
;
359 conn
->epoch
= rxrpc_epoch
;
360 conn
->in_clientflag
= 0;
361 conn
->out_clientflag
= RXRPC_CLIENT_INITIATED
;
363 conn
->state
= RXRPC_CONN_CLIENT
;
364 conn
->avail_calls
= RXRPC_MAXCALLS
- 1;
365 conn
->security_level
= rx
->min_sec_level
;
366 conn
->key
= key_get(rx
->key
);
368 ret
= rxrpc_init_client_conn_security(conn
);
372 _leave(" = %d [key]", ret
);
376 write_lock_bh(&rxrpc_connection_lock
);
377 list_add_tail(&conn
->link
, &rxrpc_connections
);
378 write_unlock_bh(&rxrpc_connection_lock
);
380 spin_lock(&trans
->client_lock
);
381 atomic_inc(&trans
->usage
);
383 _net("CONNECT EXCL new %d on TRANS %d",
384 conn
->debug_id
, conn
->trans
->debug_id
);
386 rxrpc_assign_connection_id(conn
);
389 spin_lock(&trans
->client_lock
);
392 /* we've got a connection with a free channel and we can now attach the
394 * - we're holding the transport's client lock
395 * - we're holding a reference on the connection
397 for (chan
= 0; chan
< RXRPC_MAXCALLS
; chan
++)
398 if (!conn
->channels
[chan
])
400 goto no_free_channels
;
403 atomic_inc(&conn
->usage
);
404 conn
->channels
[chan
] = call
;
406 call
->channel
= chan
;
407 call
->cid
= conn
->cid
| chan
;
408 call
->call_id
= ++conn
->call_counter
;
410 _net("CONNECT client on conn %d chan %d as call %x",
411 conn
->debug_id
, chan
, call
->call_id
);
413 spin_unlock(&trans
->client_lock
);
415 rxrpc_add_call_ID_to_conn(conn
, call
);
420 spin_unlock(&trans
->client_lock
);
426 * find a connection for a call
427 * - called in process context with IRQs enabled
429 int rxrpc_connect_call(struct rxrpc_sock
*rx
,
430 struct rxrpc_transport
*trans
,
431 struct rxrpc_conn_bundle
*bundle
,
432 struct rxrpc_call
*call
,
435 struct rxrpc_connection
*conn
, *candidate
;
438 DECLARE_WAITQUEUE(myself
, current
);
440 _enter("%p,%lx,", rx
, call
->user_call_ID
);
442 if (test_bit(RXRPC_SOCK_EXCLUSIVE_CONN
, &rx
->flags
))
443 return rxrpc_connect_exclusive(rx
, trans
, bundle
->service_id
,
446 spin_lock(&trans
->client_lock
);
448 /* see if the bundle has a call slot available */
449 if (!list_empty(&bundle
->avail_conns
)) {
451 conn
= list_entry(bundle
->avail_conns
.next
,
452 struct rxrpc_connection
,
454 if (conn
->state
>= RXRPC_CONN_REMOTELY_ABORTED
) {
455 list_del_init(&conn
->bundle_link
);
459 if (--conn
->avail_calls
== 0)
460 list_move(&conn
->bundle_link
,
461 &bundle
->busy_conns
);
462 ASSERTCMP(conn
->avail_calls
, <, RXRPC_MAXCALLS
);
463 ASSERT(conn
->channels
[0] == NULL
||
464 conn
->channels
[1] == NULL
||
465 conn
->channels
[2] == NULL
||
466 conn
->channels
[3] == NULL
);
467 atomic_inc(&conn
->usage
);
471 if (!list_empty(&bundle
->unused_conns
)) {
473 conn
= list_entry(bundle
->unused_conns
.next
,
474 struct rxrpc_connection
,
476 if (conn
->state
>= RXRPC_CONN_REMOTELY_ABORTED
) {
477 list_del_init(&conn
->bundle_link
);
481 ASSERTCMP(conn
->avail_calls
, ==, RXRPC_MAXCALLS
);
482 conn
->avail_calls
= RXRPC_MAXCALLS
- 1;
483 ASSERT(conn
->channels
[0] == NULL
&&
484 conn
->channels
[1] == NULL
&&
485 conn
->channels
[2] == NULL
&&
486 conn
->channels
[3] == NULL
);
487 atomic_inc(&conn
->usage
);
488 list_move(&conn
->bundle_link
, &bundle
->avail_conns
);
492 /* need to allocate a new connection */
493 _debug("get new conn [%d]", bundle
->num_conns
);
495 spin_unlock(&trans
->client_lock
);
497 if (signal_pending(current
))
500 if (bundle
->num_conns
>= 20) {
501 _debug("too many conns");
503 if (!gfpflags_allow_blocking(gfp
)) {
504 _leave(" = -EAGAIN");
508 add_wait_queue(&bundle
->chanwait
, &myself
);
510 set_current_state(TASK_INTERRUPTIBLE
);
511 if (bundle
->num_conns
< 20 ||
512 !list_empty(&bundle
->unused_conns
) ||
513 !list_empty(&bundle
->avail_conns
))
515 if (signal_pending(current
))
516 goto interrupted_dequeue
;
519 remove_wait_queue(&bundle
->chanwait
, &myself
);
520 __set_current_state(TASK_RUNNING
);
521 spin_lock(&trans
->client_lock
);
525 /* not yet present - create a candidate for a new connection and then
527 candidate
= rxrpc_alloc_connection(gfp
);
529 _leave(" = -ENOMEM");
533 candidate
->trans
= trans
;
534 candidate
->bundle
= bundle
;
535 candidate
->service_id
= bundle
->service_id
;
536 candidate
->epoch
= rxrpc_epoch
;
537 candidate
->in_clientflag
= 0;
538 candidate
->out_clientflag
= RXRPC_CLIENT_INITIATED
;
540 candidate
->state
= RXRPC_CONN_CLIENT
;
541 candidate
->avail_calls
= RXRPC_MAXCALLS
;
542 candidate
->security_level
= rx
->min_sec_level
;
543 candidate
->key
= key_get(bundle
->key
);
545 ret
= rxrpc_init_client_conn_security(candidate
);
547 key_put(candidate
->key
);
549 _leave(" = %d [key]", ret
);
553 write_lock_bh(&rxrpc_connection_lock
);
554 list_add_tail(&candidate
->link
, &rxrpc_connections
);
555 write_unlock_bh(&rxrpc_connection_lock
);
557 spin_lock(&trans
->client_lock
);
559 list_add(&candidate
->bundle_link
, &bundle
->unused_conns
);
561 atomic_inc(&bundle
->usage
);
562 atomic_inc(&trans
->usage
);
564 _net("CONNECT new %d on TRANS %d",
565 candidate
->debug_id
, candidate
->trans
->debug_id
);
567 rxrpc_assign_connection_id(candidate
);
568 candidate
->security
->prime_packet_security(candidate
);
570 /* leave the candidate lurking in zombie mode attached to the
571 * bundle until we're ready for it */
572 rxrpc_put_connection(candidate
);
576 /* we've got a connection with a free channel and we can now attach the
578 * - we're holding the transport's client lock
579 * - we're holding a reference on the connection
580 * - we're holding a reference on the bundle
582 for (chan
= 0; chan
< RXRPC_MAXCALLS
; chan
++)
583 if (!conn
->channels
[chan
])
585 ASSERT(conn
->channels
[0] == NULL
||
586 conn
->channels
[1] == NULL
||
587 conn
->channels
[2] == NULL
||
588 conn
->channels
[3] == NULL
);
592 conn
->channels
[chan
] = call
;
594 call
->channel
= chan
;
595 call
->cid
= conn
->cid
| chan
;
596 call
->call_id
= ++conn
->call_counter
;
598 _net("CONNECT client on conn %d chan %d as call %x",
599 conn
->debug_id
, chan
, call
->call_id
);
601 ASSERTCMP(conn
->avail_calls
, <, RXRPC_MAXCALLS
);
602 spin_unlock(&trans
->client_lock
);
604 rxrpc_add_call_ID_to_conn(conn
, call
);
610 remove_wait_queue(&bundle
->chanwait
, &myself
);
611 __set_current_state(TASK_RUNNING
);
613 _leave(" = -ERESTARTSYS");
618 * get a record of an incoming connection
620 struct rxrpc_connection
*
621 rxrpc_incoming_connection(struct rxrpc_transport
*trans
,
622 struct rxrpc_host_header
*hdr
)
624 struct rxrpc_connection
*conn
, *candidate
= NULL
;
625 struct rb_node
*p
, **pp
;
626 const char *new = "old";
632 ASSERT(hdr
->flags
& RXRPC_CLIENT_INITIATED
);
635 cid
= hdr
->cid
& RXRPC_CIDMASK
;
637 /* search the connection list first */
638 read_lock_bh(&trans
->conn_lock
);
640 p
= trans
->server_conns
.rb_node
;
642 conn
= rb_entry(p
, struct rxrpc_connection
, node
);
644 _debug("maybe %x", conn
->cid
);
646 if (epoch
< conn
->epoch
)
648 else if (epoch
> conn
->epoch
)
650 else if (cid
< conn
->cid
)
652 else if (cid
> conn
->cid
)
655 goto found_extant_connection
;
657 read_unlock_bh(&trans
->conn_lock
);
659 /* not yet present - create a candidate for a new record and then
661 candidate
= rxrpc_alloc_connection(GFP_NOIO
);
663 _leave(" = -ENOMEM");
664 return ERR_PTR(-ENOMEM
);
667 candidate
->trans
= trans
;
668 candidate
->epoch
= hdr
->epoch
;
669 candidate
->cid
= hdr
->cid
& RXRPC_CIDMASK
;
670 candidate
->service_id
= hdr
->serviceId
;
671 candidate
->security_ix
= hdr
->securityIndex
;
672 candidate
->in_clientflag
= RXRPC_CLIENT_INITIATED
;
673 candidate
->out_clientflag
= 0;
674 candidate
->state
= RXRPC_CONN_SERVER
;
675 if (candidate
->service_id
)
676 candidate
->state
= RXRPC_CONN_SERVER_UNSECURED
;
678 write_lock_bh(&trans
->conn_lock
);
680 pp
= &trans
->server_conns
.rb_node
;
684 conn
= rb_entry(p
, struct rxrpc_connection
, node
);
686 if (epoch
< conn
->epoch
)
687 pp
= &(*pp
)->rb_left
;
688 else if (epoch
> conn
->epoch
)
689 pp
= &(*pp
)->rb_right
;
690 else if (cid
< conn
->cid
)
691 pp
= &(*pp
)->rb_left
;
692 else if (cid
> conn
->cid
)
693 pp
= &(*pp
)->rb_right
;
695 goto found_extant_second
;
698 /* we can now add the new candidate to the list */
701 rb_link_node(&conn
->node
, p
, pp
);
702 rb_insert_color(&conn
->node
, &trans
->server_conns
);
703 atomic_inc(&conn
->trans
->usage
);
705 write_unlock_bh(&trans
->conn_lock
);
707 write_lock_bh(&rxrpc_connection_lock
);
708 list_add_tail(&conn
->link
, &rxrpc_connections
);
709 write_unlock_bh(&rxrpc_connection_lock
);
714 _net("CONNECTION %s %d {%x}", new, conn
->debug_id
, conn
->cid
);
716 _leave(" = %p {u=%d}", conn
, atomic_read(&conn
->usage
));
719 /* we found the connection in the list immediately */
720 found_extant_connection
:
721 if (hdr
->securityIndex
!= conn
->security_ix
) {
722 read_unlock_bh(&trans
->conn_lock
);
723 goto security_mismatch
;
725 atomic_inc(&conn
->usage
);
726 read_unlock_bh(&trans
->conn_lock
);
729 /* we found the connection on the second time through the list */
731 if (hdr
->securityIndex
!= conn
->security_ix
) {
732 write_unlock_bh(&trans
->conn_lock
);
733 goto security_mismatch
;
735 atomic_inc(&conn
->usage
);
736 write_unlock_bh(&trans
->conn_lock
);
742 _leave(" = -EKEYREJECTED");
743 return ERR_PTR(-EKEYREJECTED
);
747 * find a connection based on transport and RxRPC connection ID for an incoming
750 struct rxrpc_connection
*rxrpc_find_connection(struct rxrpc_transport
*trans
,
751 struct rxrpc_host_header
*hdr
)
753 struct rxrpc_connection
*conn
;
757 _enter(",{%x,%x}", hdr
->cid
, hdr
->flags
);
759 read_lock_bh(&trans
->conn_lock
);
761 cid
= hdr
->cid
& RXRPC_CIDMASK
;
764 if (hdr
->flags
& RXRPC_CLIENT_INITIATED
)
765 p
= trans
->server_conns
.rb_node
;
767 p
= trans
->client_conns
.rb_node
;
770 conn
= rb_entry(p
, struct rxrpc_connection
, node
);
772 _debug("maybe %x", conn
->cid
);
774 if (epoch
< conn
->epoch
)
776 else if (epoch
> conn
->epoch
)
778 else if (cid
< conn
->cid
)
780 else if (cid
> conn
->cid
)
786 read_unlock_bh(&trans
->conn_lock
);
791 atomic_inc(&conn
->usage
);
792 read_unlock_bh(&trans
->conn_lock
);
793 _leave(" = %p", conn
);
798 * release a virtual connection
800 void rxrpc_put_connection(struct rxrpc_connection
*conn
)
802 _enter("%p{u=%d,d=%d}",
803 conn
, atomic_read(&conn
->usage
), conn
->debug_id
);
805 ASSERTCMP(atomic_read(&conn
->usage
), >, 0);
807 conn
->put_time
= ktime_get_seconds();
808 if (atomic_dec_and_test(&conn
->usage
)) {
810 rxrpc_queue_delayed_work(&rxrpc_connection_reap
, 0);
817 * destroy a virtual connection
819 static void rxrpc_destroy_connection(struct rxrpc_connection
*conn
)
821 _enter("%p{%d}", conn
, atomic_read(&conn
->usage
));
823 ASSERTCMP(atomic_read(&conn
->usage
), ==, 0);
825 _net("DESTROY CONN %d", conn
->debug_id
);
828 rxrpc_put_bundle(conn
->trans
, conn
->bundle
);
830 ASSERT(RB_EMPTY_ROOT(&conn
->calls
));
831 rxrpc_purge_queue(&conn
->rx_queue
);
833 conn
->security
->clear(conn
);
835 key_put(conn
->server_key
);
837 rxrpc_put_transport(conn
->trans
);
843 * reap dead connections
845 static void rxrpc_connection_reaper(struct work_struct
*work
)
847 struct rxrpc_connection
*conn
, *_p
;
848 unsigned long now
, earliest
, reap_time
;
850 LIST_HEAD(graveyard
);
854 now
= ktime_get_seconds();
855 earliest
= ULONG_MAX
;
857 write_lock_bh(&rxrpc_connection_lock
);
858 list_for_each_entry_safe(conn
, _p
, &rxrpc_connections
, link
) {
859 _debug("reap CONN %d { u=%d,t=%ld }",
860 conn
->debug_id
, atomic_read(&conn
->usage
),
861 (long) now
- (long) conn
->put_time
);
863 if (likely(atomic_read(&conn
->usage
) > 0))
866 spin_lock(&conn
->trans
->client_lock
);
867 write_lock(&conn
->trans
->conn_lock
);
868 reap_time
= conn
->put_time
+ rxrpc_connection_expiry
;
870 if (atomic_read(&conn
->usage
) > 0) {
872 } else if (reap_time
<= now
) {
873 list_move_tail(&conn
->link
, &graveyard
);
874 if (conn
->out_clientflag
)
875 rb_erase(&conn
->node
,
876 &conn
->trans
->client_conns
);
878 rb_erase(&conn
->node
,
879 &conn
->trans
->server_conns
);
881 list_del_init(&conn
->bundle_link
);
882 conn
->bundle
->num_conns
--;
885 } else if (reap_time
< earliest
) {
886 earliest
= reap_time
;
889 write_unlock(&conn
->trans
->conn_lock
);
890 spin_unlock(&conn
->trans
->client_lock
);
892 write_unlock_bh(&rxrpc_connection_lock
);
894 if (earliest
!= ULONG_MAX
) {
895 _debug("reschedule reaper %ld", (long) earliest
- now
);
896 ASSERTCMP(earliest
, >, now
);
897 rxrpc_queue_delayed_work(&rxrpc_connection_reap
,
898 (earliest
- now
) * HZ
);
901 /* then destroy all those pulled out */
902 while (!list_empty(&graveyard
)) {
903 conn
= list_entry(graveyard
.next
, struct rxrpc_connection
,
905 list_del_init(&conn
->link
);
907 ASSERTCMP(atomic_read(&conn
->usage
), ==, 0);
908 rxrpc_destroy_connection(conn
);
915 * preemptively destroy all the connection records rather than waiting for them
918 void __exit
rxrpc_destroy_all_connections(void)
922 rxrpc_connection_expiry
= 0;
923 cancel_delayed_work(&rxrpc_connection_reap
);
924 rxrpc_queue_delayed_work(&rxrpc_connection_reap
, 0);