1 /* RxRPC virtual connection handler
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/slab.h>
16 #include <linux/net.h>
17 #include <linux/skbuff.h>
18 #include <linux/crypto.h>
20 #include <net/af_rxrpc.h>
21 #include "ar-internal.h"
24 * Time till a connection expires after last use (in seconds).
26 unsigned int rxrpc_connection_expiry
= 10 * 60;
28 static void rxrpc_connection_reaper(struct work_struct
*work
);
30 LIST_HEAD(rxrpc_connections
);
31 DEFINE_RWLOCK(rxrpc_connection_lock
);
32 static DECLARE_DELAYED_WORK(rxrpc_connection_reap
, rxrpc_connection_reaper
);
35 * allocate a new connection
37 static struct rxrpc_connection
*rxrpc_alloc_connection(gfp_t gfp
)
39 struct rxrpc_connection
*conn
;
43 conn
= kzalloc(sizeof(struct rxrpc_connection
), gfp
);
45 spin_lock_init(&conn
->channel_lock
);
46 init_waitqueue_head(&conn
->channel_wq
);
47 INIT_WORK(&conn
->processor
, &rxrpc_process_connection
);
48 INIT_LIST_HEAD(&conn
->link
);
49 conn
->calls
= RB_ROOT
;
50 skb_queue_head_init(&conn
->rx_queue
);
51 conn
->security
= &rxrpc_no_security
;
52 rwlock_init(&conn
->lock
);
53 spin_lock_init(&conn
->state_lock
);
54 atomic_set(&conn
->usage
, 1);
55 conn
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
56 atomic_set(&conn
->avail_chans
, RXRPC_MAXCALLS
);
58 conn
->header_size
= sizeof(struct rxrpc_wire_header
);
61 _leave(" = %p{%d}", conn
, conn
? conn
->debug_id
: 0);
66 * add a call to a connection's call-by-ID tree
68 static void rxrpc_add_call_ID_to_conn(struct rxrpc_connection
*conn
,
69 struct rxrpc_call
*call
)
71 struct rxrpc_call
*xcall
;
72 struct rb_node
*parent
, **p
;
75 write_lock_bh(&conn
->lock
);
77 call_id
= call
->call_id
;
78 p
= &conn
->calls
.rb_node
;
82 xcall
= rb_entry(parent
, struct rxrpc_call
, conn_node
);
84 if (call_id
< xcall
->call_id
)
86 else if (call_id
> xcall
->call_id
)
92 rb_link_node(&call
->conn_node
, parent
, p
);
93 rb_insert_color(&call
->conn_node
, &conn
->calls
);
95 write_unlock_bh(&conn
->lock
);
99 * Allocate a client connection. The caller must take care to clear any
100 * padding bytes in *cp.
102 static struct rxrpc_connection
*
103 rxrpc_alloc_client_connection(struct rxrpc_conn_parameters
*cp
,
104 struct rxrpc_transport
*trans
,
107 struct rxrpc_connection
*conn
;
112 conn
= rxrpc_alloc_connection(gfp
);
114 _leave(" = -ENOMEM");
115 return ERR_PTR(-ENOMEM
);
119 conn
->proto
.local
= cp
->local
;
120 conn
->proto
.epoch
= rxrpc_epoch
;
122 conn
->proto
.in_clientflag
= 0;
123 conn
->proto
.family
= cp
->peer
->srx
.transport
.family
;
124 conn
->out_clientflag
= RXRPC_CLIENT_INITIATED
;
125 conn
->state
= RXRPC_CONN_CLIENT
;
127 switch (conn
->proto
.family
) {
129 conn
->proto
.addr_size
= sizeof(conn
->proto
.ipv4_addr
);
130 conn
->proto
.ipv4_addr
= cp
->peer
->srx
.transport
.sin
.sin_addr
;
131 conn
->proto
.port
= cp
->peer
->srx
.transport
.sin
.sin_port
;
135 ret
= rxrpc_get_client_connection_id(conn
, gfp
);
139 ret
= rxrpc_init_client_conn_security(conn
);
143 conn
->security
->prime_packet_security(conn
);
145 write_lock(&rxrpc_connection_lock
);
146 list_add_tail(&conn
->link
, &rxrpc_connections
);
147 write_unlock(&rxrpc_connection_lock
);
149 key_get(conn
->params
.key
);
151 atomic_inc(&trans
->usage
);
153 _leave(" = %p", conn
);
157 rxrpc_put_client_connection_id(conn
);
160 _leave(" = %d", ret
);
165 * find a connection for a call
166 * - called in process context with IRQs enabled
168 int rxrpc_connect_call(struct rxrpc_call
*call
,
169 struct rxrpc_conn_parameters
*cp
,
170 struct rxrpc_transport
*trans
,
171 struct sockaddr_rxrpc
*srx
,
174 struct rxrpc_connection
*conn
, *candidate
= NULL
;
175 struct rxrpc_local
*local
= cp
->local
;
176 struct rb_node
*p
, **pp
, *parent
;
180 DECLARE_WAITQUEUE(myself
, current
);
182 _enter("{%d,%lx},", call
->debug_id
, call
->user_call_ID
);
184 cp
->peer
= trans
->peer
;
185 rxrpc_get_peer(cp
->peer
);
187 if (!cp
->exclusive
) {
188 /* Search for a existing client connection unless this is going
189 * to be a connection that's used exclusively for a single call.
192 spin_lock(&local
->client_conns_lock
);
193 p
= local
->client_conns
.rb_node
;
195 conn
= rb_entry(p
, struct rxrpc_connection
, client_node
);
197 #define cmp(X) ((long)conn->params.X - (long)cp->X)
200 cmp(security_level
));
206 goto found_extant_conn
;
208 spin_unlock(&local
->client_conns_lock
);
211 /* We didn't find a connection or we want an exclusive one. */
212 _debug("get new conn");
213 candidate
= rxrpc_alloc_client_connection(cp
, trans
, gfp
);
215 _leave(" = -ENOMEM");
220 /* Assign the call on an exclusive connection to channel 0 and
221 * don't add the connection to the endpoint's shareable conn
224 _debug("exclusive chan 0");
226 atomic_set(&conn
->avail_chans
, RXRPC_MAXCALLS
- 1);
227 spin_lock(&conn
->channel_lock
);
232 /* We need to redo the search before attempting to add a new connection
233 * lest we race with someone else adding a conflicting instance.
236 spin_lock(&local
->client_conns_lock
);
238 pp
= &local
->client_conns
.rb_node
;
242 conn
= rb_entry(parent
, struct rxrpc_connection
, client_node
);
246 cmp(security_level
));
248 pp
= &(*pp
)->rb_left
;
250 pp
= &(*pp
)->rb_right
;
252 goto found_extant_conn
;
255 /* The second search also failed; simply add the new connection with
256 * the new call in channel 0. Note that we need to take the channel
257 * lock before dropping the client conn lock.
263 rb_link_node(&conn
->client_node
, parent
, pp
);
264 rb_insert_color(&conn
->client_node
, &local
->client_conns
);
266 atomic_set(&conn
->avail_chans
, RXRPC_MAXCALLS
- 1);
267 spin_lock(&conn
->channel_lock
);
268 spin_unlock(&local
->client_conns_lock
);
272 _debug("found chan");
274 call
->channel
= chan
;
275 call
->epoch
= conn
->proto
.epoch
;
276 call
->cid
= conn
->proto
.cid
| chan
;
277 call
->call_id
= ++conn
->call_counter
;
278 rcu_assign_pointer(conn
->channels
[chan
], call
);
280 _net("CONNECT call %d on conn %d", call
->debug_id
, conn
->debug_id
);
282 rxrpc_add_call_ID_to_conn(conn
, call
);
283 spin_unlock(&conn
->channel_lock
);
284 _leave(" = %p {u=%d}", conn
, atomic_read(&conn
->usage
));
287 /* We found a suitable connection already in existence. Discard any
288 * candidate we may have allocated, and try to get a channel on this
292 _debug("found conn");
293 rxrpc_get_connection(conn
);
294 spin_unlock(&local
->client_conns_lock
);
296 rxrpc_put_connection(candidate
);
298 if (!atomic_add_unless(&conn
->avail_chans
, -1, 0)) {
299 if (!gfpflags_allow_blocking(gfp
)) {
300 rxrpc_put_connection(conn
);
301 _leave(" = -EAGAIN");
305 add_wait_queue(&conn
->channel_wq
, &myself
);
307 set_current_state(TASK_INTERRUPTIBLE
);
308 if (atomic_add_unless(&conn
->avail_chans
, -1, 0))
310 if (signal_pending(current
))
314 remove_wait_queue(&conn
->channel_wq
, &myself
);
315 __set_current_state(TASK_RUNNING
);
318 /* The connection allegedly now has a free channel and we can now
319 * attach the call to it.
321 spin_lock(&conn
->channel_lock
);
323 for (chan
= 0; chan
< RXRPC_MAXCALLS
; chan
++)
324 if (!conn
->channels
[chan
])
329 remove_wait_queue(&conn
->channel_wq
, &myself
);
330 __set_current_state(TASK_RUNNING
);
331 rxrpc_put_connection(conn
);
332 _leave(" = -ERESTARTSYS");
337 * get a record of an incoming connection
339 struct rxrpc_connection
*rxrpc_incoming_connection(struct rxrpc_transport
*trans
,
342 struct rxrpc_connection
*conn
, *candidate
= NULL
;
343 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
344 struct rb_node
*p
, **pp
;
345 const char *new = "old";
351 ASSERT(sp
->hdr
.flags
& RXRPC_CLIENT_INITIATED
);
353 epoch
= sp
->hdr
.epoch
;
354 cid
= sp
->hdr
.cid
& RXRPC_CIDMASK
;
356 /* search the connection list first */
357 read_lock_bh(&trans
->conn_lock
);
359 p
= trans
->server_conns
.rb_node
;
361 conn
= rb_entry(p
, struct rxrpc_connection
, service_node
);
363 _debug("maybe %x", conn
->proto
.cid
);
365 if (epoch
< conn
->proto
.epoch
)
367 else if (epoch
> conn
->proto
.epoch
)
369 else if (cid
< conn
->proto
.cid
)
371 else if (cid
> conn
->proto
.cid
)
374 goto found_extant_connection
;
376 read_unlock_bh(&trans
->conn_lock
);
378 /* not yet present - create a candidate for a new record and then
380 candidate
= rxrpc_alloc_connection(GFP_NOIO
);
382 _leave(" = -ENOMEM");
383 return ERR_PTR(-ENOMEM
);
386 candidate
->trans
= trans
;
387 candidate
->proto
.local
= trans
->local
;
388 candidate
->proto
.epoch
= sp
->hdr
.epoch
;
389 candidate
->proto
.cid
= sp
->hdr
.cid
& RXRPC_CIDMASK
;
390 candidate
->proto
.in_clientflag
= RXRPC_CLIENT_INITIATED
;
391 candidate
->params
.local
= trans
->local
;
392 candidate
->params
.peer
= trans
->peer
;
393 candidate
->params
.service_id
= sp
->hdr
.serviceId
;
394 candidate
->security_ix
= sp
->hdr
.securityIndex
;
395 candidate
->out_clientflag
= 0;
396 candidate
->state
= RXRPC_CONN_SERVER
;
397 if (candidate
->params
.service_id
)
398 candidate
->state
= RXRPC_CONN_SERVER_UNSECURED
;
400 write_lock_bh(&trans
->conn_lock
);
402 pp
= &trans
->server_conns
.rb_node
;
406 conn
= rb_entry(p
, struct rxrpc_connection
, service_node
);
408 if (epoch
< conn
->proto
.epoch
)
409 pp
= &(*pp
)->rb_left
;
410 else if (epoch
> conn
->proto
.epoch
)
411 pp
= &(*pp
)->rb_right
;
412 else if (cid
< conn
->proto
.cid
)
413 pp
= &(*pp
)->rb_left
;
414 else if (cid
> conn
->proto
.cid
)
415 pp
= &(*pp
)->rb_right
;
417 goto found_extant_second
;
420 /* we can now add the new candidate to the list */
423 rb_link_node(&conn
->service_node
, p
, pp
);
424 rb_insert_color(&conn
->service_node
, &trans
->server_conns
);
425 atomic_inc(&conn
->trans
->usage
);
427 write_unlock_bh(&trans
->conn_lock
);
429 write_lock(&rxrpc_connection_lock
);
430 list_add_tail(&conn
->link
, &rxrpc_connections
);
431 write_unlock(&rxrpc_connection_lock
);
436 _net("CONNECTION %s %d {%x}", new, conn
->debug_id
, conn
->proto
.cid
);
438 _leave(" = %p {u=%d}", conn
, atomic_read(&conn
->usage
));
441 /* we found the connection in the list immediately */
442 found_extant_connection
:
443 if (sp
->hdr
.securityIndex
!= conn
->security_ix
) {
444 read_unlock_bh(&trans
->conn_lock
);
445 goto security_mismatch
;
447 rxrpc_get_connection(conn
);
448 read_unlock_bh(&trans
->conn_lock
);
451 /* we found the connection on the second time through the list */
453 if (sp
->hdr
.securityIndex
!= conn
->security_ix
) {
454 write_unlock_bh(&trans
->conn_lock
);
455 goto security_mismatch
;
457 rxrpc_get_connection(conn
);
458 write_unlock_bh(&trans
->conn_lock
);
464 _leave(" = -EKEYREJECTED");
465 return ERR_PTR(-EKEYREJECTED
);
469 * find a connection based on transport and RxRPC connection ID for an incoming
472 struct rxrpc_connection
*rxrpc_find_connection(struct rxrpc_transport
*trans
,
475 struct rxrpc_connection
*conn
;
476 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
480 _enter(",{%x,%x}", sp
->hdr
.cid
, sp
->hdr
.flags
);
482 read_lock_bh(&trans
->conn_lock
);
484 cid
= sp
->hdr
.cid
& RXRPC_CIDMASK
;
485 epoch
= sp
->hdr
.epoch
;
487 if (sp
->hdr
.flags
& RXRPC_CLIENT_INITIATED
) {
488 p
= trans
->server_conns
.rb_node
;
490 conn
= rb_entry(p
, struct rxrpc_connection
, service_node
);
492 _debug("maybe %x", conn
->proto
.cid
);
494 if (epoch
< conn
->proto
.epoch
)
496 else if (epoch
> conn
->proto
.epoch
)
498 else if (cid
< conn
->proto
.cid
)
500 else if (cid
> conn
->proto
.cid
)
506 conn
= idr_find(&rxrpc_client_conn_ids
, cid
>> RXRPC_CIDSHIFT
);
507 if (conn
&& conn
->proto
.epoch
== epoch
)
511 read_unlock_bh(&trans
->conn_lock
);
516 rxrpc_get_connection(conn
);
517 read_unlock_bh(&trans
->conn_lock
);
518 _leave(" = %p", conn
);
523 * Disconnect a call and clear any channel it occupies when that call
526 void rxrpc_disconnect_call(struct rxrpc_call
*call
)
528 struct rxrpc_connection
*conn
= call
->conn
;
529 unsigned chan
= call
->channel
;
531 _enter("%d,%d", conn
->debug_id
, call
->channel
);
533 if (conn
->channels
[chan
] == call
) {
534 rcu_assign_pointer(conn
->channels
[chan
], NULL
);
535 atomic_inc(&conn
->avail_chans
);
536 wake_up(&conn
->channel_wq
);
541 * release a virtual connection
543 void rxrpc_put_connection(struct rxrpc_connection
*conn
)
548 _enter("%p{u=%d,d=%d}",
549 conn
, atomic_read(&conn
->usage
), conn
->debug_id
);
551 ASSERTCMP(atomic_read(&conn
->usage
), >, 0);
553 conn
->put_time
= ktime_get_seconds();
554 if (atomic_dec_and_test(&conn
->usage
)) {
556 rxrpc_queue_delayed_work(&rxrpc_connection_reap
, 0);
563 * destroy a virtual connection
565 static void rxrpc_destroy_connection(struct rxrpc_connection
*conn
)
567 _enter("%p{%d}", conn
, atomic_read(&conn
->usage
));
569 ASSERTCMP(atomic_read(&conn
->usage
), ==, 0);
571 _net("DESTROY CONN %d", conn
->debug_id
);
573 ASSERT(RB_EMPTY_ROOT(&conn
->calls
));
574 rxrpc_purge_queue(&conn
->rx_queue
);
576 conn
->security
->clear(conn
);
577 key_put(conn
->params
.key
);
578 key_put(conn
->server_key
);
580 rxrpc_put_transport(conn
->trans
);
586 * reap dead connections
588 static void rxrpc_connection_reaper(struct work_struct
*work
)
590 struct rxrpc_connection
*conn
, *_p
;
591 unsigned long now
, earliest
, reap_time
;
593 LIST_HEAD(graveyard
);
597 now
= ktime_get_seconds();
598 earliest
= ULONG_MAX
;
600 write_lock(&rxrpc_connection_lock
);
601 list_for_each_entry_safe(conn
, _p
, &rxrpc_connections
, link
) {
602 _debug("reap CONN %d { u=%d,t=%ld }",
603 conn
->debug_id
, atomic_read(&conn
->usage
),
604 (long) now
- (long) conn
->put_time
);
606 if (likely(atomic_read(&conn
->usage
) > 0))
609 if (rxrpc_conn_is_client(conn
)) {
610 struct rxrpc_local
*local
= conn
->params
.local
;
611 spin_lock(&local
->client_conns_lock
);
612 reap_time
= conn
->put_time
+ rxrpc_connection_expiry
;
614 if (atomic_read(&conn
->usage
) > 0) {
616 } else if (reap_time
<= now
) {
617 list_move_tail(&conn
->link
, &graveyard
);
618 rxrpc_put_client_connection_id(conn
);
619 rb_erase(&conn
->client_node
,
620 &local
->client_conns
);
621 } else if (reap_time
< earliest
) {
622 earliest
= reap_time
;
625 spin_unlock(&local
->client_conns_lock
);
627 write_lock_bh(&conn
->trans
->conn_lock
);
628 reap_time
= conn
->put_time
+ rxrpc_connection_expiry
;
630 if (atomic_read(&conn
->usage
) > 0) {
632 } else if (reap_time
<= now
) {
633 list_move_tail(&conn
->link
, &graveyard
);
634 rb_erase(&conn
->service_node
,
635 &conn
->trans
->server_conns
);
636 } else if (reap_time
< earliest
) {
637 earliest
= reap_time
;
640 write_unlock_bh(&conn
->trans
->conn_lock
);
643 write_unlock(&rxrpc_connection_lock
);
645 if (earliest
!= ULONG_MAX
) {
646 _debug("reschedule reaper %ld", (long) earliest
- now
);
647 ASSERTCMP(earliest
, >, now
);
648 rxrpc_queue_delayed_work(&rxrpc_connection_reap
,
649 (earliest
- now
) * HZ
);
652 /* then destroy all those pulled out */
653 while (!list_empty(&graveyard
)) {
654 conn
= list_entry(graveyard
.next
, struct rxrpc_connection
,
656 list_del_init(&conn
->link
);
658 ASSERTCMP(atomic_read(&conn
->usage
), ==, 0);
659 rxrpc_destroy_connection(conn
);
666 * preemptively destroy all the connection records rather than waiting for them
669 void __exit
rxrpc_destroy_all_connections(void)
673 rxrpc_connection_expiry
= 0;
674 cancel_delayed_work(&rxrpc_connection_reap
);
675 rxrpc_queue_delayed_work(&rxrpc_connection_reap
, 0);