1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
23 * Maximum lifetime of a call (in jiffies).
25 unsigned int rxrpc_max_call_lifetime
= 60 * HZ
;
28 * Time till dead call expires after last use (in jiffies).
30 unsigned int rxrpc_dead_call_expiry
= 2 * HZ
;
32 const char *const rxrpc_call_states
[NR__RXRPC_CALL_STATES
] = {
33 [RXRPC_CALL_UNINITIALISED
] = "Uninit ",
34 [RXRPC_CALL_CLIENT_AWAIT_CONN
] = "ClWtConn",
35 [RXRPC_CALL_CLIENT_SEND_REQUEST
] = "ClSndReq",
36 [RXRPC_CALL_CLIENT_AWAIT_REPLY
] = "ClAwtRpl",
37 [RXRPC_CALL_CLIENT_RECV_REPLY
] = "ClRcvRpl",
38 [RXRPC_CALL_CLIENT_FINAL_ACK
] = "ClFnlACK",
39 [RXRPC_CALL_SERVER_SECURING
] = "SvSecure",
40 [RXRPC_CALL_SERVER_ACCEPTING
] = "SvAccept",
41 [RXRPC_CALL_SERVER_RECV_REQUEST
] = "SvRcvReq",
42 [RXRPC_CALL_SERVER_ACK_REQUEST
] = "SvAckReq",
43 [RXRPC_CALL_SERVER_SEND_REPLY
] = "SvSndRpl",
44 [RXRPC_CALL_SERVER_AWAIT_ACK
] = "SvAwtACK",
45 [RXRPC_CALL_COMPLETE
] = "Complete",
46 [RXRPC_CALL_DEAD
] = "Dead ",
49 const char *const rxrpc_call_completions
[NR__RXRPC_CALL_COMPLETIONS
] = {
50 [RXRPC_CALL_SUCCEEDED
] = "Complete",
51 [RXRPC_CALL_SERVER_BUSY
] = "SvBusy ",
52 [RXRPC_CALL_REMOTELY_ABORTED
] = "RmtAbort",
53 [RXRPC_CALL_LOCALLY_ABORTED
] = "LocAbort",
54 [RXRPC_CALL_LOCAL_ERROR
] = "LocError",
55 [RXRPC_CALL_NETWORK_ERROR
] = "NetError",
58 struct kmem_cache
*rxrpc_call_jar
;
59 LIST_HEAD(rxrpc_calls
);
60 DEFINE_RWLOCK(rxrpc_call_lock
);
62 static void rxrpc_destroy_call(struct work_struct
*work
);
63 static void rxrpc_call_life_expired(unsigned long _call
);
64 static void rxrpc_dead_call_expired(unsigned long _call
);
65 static void rxrpc_ack_time_expired(unsigned long _call
);
66 static void rxrpc_resend_time_expired(unsigned long _call
);
69 * find an extant server call
70 * - called in process context with IRQs enabled
72 struct rxrpc_call
*rxrpc_find_call_by_user_ID(struct rxrpc_sock
*rx
,
73 unsigned long user_call_ID
)
75 struct rxrpc_call
*call
;
78 _enter("%p,%lx", rx
, user_call_ID
);
80 read_lock(&rx
->call_lock
);
82 p
= rx
->calls
.rb_node
;
84 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
86 if (user_call_ID
< call
->user_call_ID
)
88 else if (user_call_ID
> call
->user_call_ID
)
91 goto found_extant_call
;
94 read_unlock(&rx
->call_lock
);
100 read_unlock(&rx
->call_lock
);
101 _leave(" = %p [%d]", call
, atomic_read(&call
->usage
));
106 * allocate a new call
108 static struct rxrpc_call
*rxrpc_alloc_call(gfp_t gfp
)
110 struct rxrpc_call
*call
;
112 call
= kmem_cache_zalloc(rxrpc_call_jar
, gfp
);
116 call
->acks_winsz
= 16;
117 call
->acks_window
= kmalloc(call
->acks_winsz
* sizeof(unsigned long),
119 if (!call
->acks_window
) {
120 kmem_cache_free(rxrpc_call_jar
, call
);
124 setup_timer(&call
->lifetimer
, &rxrpc_call_life_expired
,
125 (unsigned long) call
);
126 setup_timer(&call
->deadspan
, &rxrpc_dead_call_expired
,
127 (unsigned long) call
);
128 setup_timer(&call
->ack_timer
, &rxrpc_ack_time_expired
,
129 (unsigned long) call
);
130 setup_timer(&call
->resend_timer
, &rxrpc_resend_time_expired
,
131 (unsigned long) call
);
132 INIT_WORK(&call
->destroyer
, &rxrpc_destroy_call
);
133 INIT_WORK(&call
->processor
, &rxrpc_process_call
);
134 INIT_LIST_HEAD(&call
->link
);
135 INIT_LIST_HEAD(&call
->chan_wait_link
);
136 INIT_LIST_HEAD(&call
->accept_link
);
137 skb_queue_head_init(&call
->rx_queue
);
138 skb_queue_head_init(&call
->rx_oos_queue
);
139 skb_queue_head_init(&call
->knlrecv_queue
);
140 init_waitqueue_head(&call
->waitq
);
141 spin_lock_init(&call
->lock
);
142 rwlock_init(&call
->state_lock
);
143 atomic_set(&call
->usage
, 1);
144 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
146 memset(&call
->sock_node
, 0xed, sizeof(call
->sock_node
));
148 call
->rx_data_expect
= 1;
149 call
->rx_data_eaten
= 0;
150 call
->rx_first_oos
= 0;
151 call
->ackr_win_top
= call
->rx_data_eaten
+ 1 + rxrpc_rx_window_size
;
152 call
->creation_jif
= jiffies
;
157 * Allocate a new client call.
159 static struct rxrpc_call
*rxrpc_alloc_client_call(struct rxrpc_sock
*rx
,
160 struct sockaddr_rxrpc
*srx
,
163 struct rxrpc_call
*call
;
167 ASSERT(rx
->local
!= NULL
);
169 call
= rxrpc_alloc_call(gfp
);
171 return ERR_PTR(-ENOMEM
);
172 call
->state
= RXRPC_CALL_CLIENT_AWAIT_CONN
;
176 call
->rx_data_post
= 1;
177 call
->service_id
= srx
->srx_service
;
179 _leave(" = %p", call
);
186 static int rxrpc_begin_client_call(struct rxrpc_call
*call
,
187 struct rxrpc_conn_parameters
*cp
,
188 struct sockaddr_rxrpc
*srx
,
193 /* Set up or get a connection record and set the protocol parameters,
194 * including channel number and call ID.
196 ret
= rxrpc_connect_call(call
, cp
, srx
, gfp
);
200 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
202 spin_lock(&call
->conn
->params
.peer
->lock
);
203 hlist_add_head(&call
->error_link
, &call
->conn
->params
.peer
->error_targets
);
204 spin_unlock(&call
->conn
->params
.peer
->lock
);
206 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
207 add_timer(&call
->lifetimer
);
212 * set up a call for the given data
213 * - called in process context with IRQs enabled
215 struct rxrpc_call
*rxrpc_new_client_call(struct rxrpc_sock
*rx
,
216 struct rxrpc_conn_parameters
*cp
,
217 struct sockaddr_rxrpc
*srx
,
218 unsigned long user_call_ID
,
221 struct rxrpc_call
*call
, *xcall
;
222 struct rb_node
*parent
, **pp
;
223 const void *here
= __builtin_return_address(0);
226 _enter("%p,%lx", rx
, user_call_ID
);
228 call
= rxrpc_alloc_client_call(rx
, srx
, gfp
);
230 _leave(" = %ld", PTR_ERR(call
));
234 trace_rxrpc_call(call
, 0, atomic_read(&call
->usage
), 0, here
,
235 (const void *)user_call_ID
);
237 /* Publish the call, even though it is incompletely set up as yet */
238 call
->user_call_ID
= user_call_ID
;
239 __set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
241 write_lock(&rx
->call_lock
);
243 pp
= &rx
->calls
.rb_node
;
247 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
249 if (user_call_ID
< xcall
->user_call_ID
)
250 pp
= &(*pp
)->rb_left
;
251 else if (user_call_ID
> xcall
->user_call_ID
)
252 pp
= &(*pp
)->rb_right
;
254 goto found_user_ID_now_present
;
257 rxrpc_get_call(call
);
259 rb_link_node(&call
->sock_node
, parent
, pp
);
260 rb_insert_color(&call
->sock_node
, &rx
->calls
);
261 write_unlock(&rx
->call_lock
);
263 write_lock_bh(&rxrpc_call_lock
);
264 list_add_tail(&call
->link
, &rxrpc_calls
);
265 write_unlock_bh(&rxrpc_call_lock
);
267 ret
= rxrpc_begin_client_call(call
, cp
, srx
, gfp
);
271 _net("CALL new %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
273 _leave(" = %p [new]", call
);
277 write_lock(&rx
->call_lock
);
278 rb_erase(&call
->sock_node
, &rx
->calls
);
279 write_unlock(&rx
->call_lock
);
280 rxrpc_put_call(call
);
282 write_lock_bh(&rxrpc_call_lock
);
283 list_del_init(&call
->link
);
284 write_unlock_bh(&rxrpc_call_lock
);
286 set_bit(RXRPC_CALL_RELEASED
, &call
->flags
);
287 call
->state
= RXRPC_CALL_DEAD
;
288 rxrpc_put_call(call
);
289 _leave(" = %d", ret
);
292 /* We unexpectedly found the user ID in the list after taking
293 * the call_lock. This shouldn't happen unless the user races
294 * with itself and tries to add the same user ID twice at the
295 * same time in different threads.
297 found_user_ID_now_present
:
298 write_unlock(&rx
->call_lock
);
299 set_bit(RXRPC_CALL_RELEASED
, &call
->flags
);
300 call
->state
= RXRPC_CALL_DEAD
;
301 rxrpc_put_call(call
);
302 _leave(" = -EEXIST [%p]", call
);
303 return ERR_PTR(-EEXIST
);
307 * set up an incoming call
308 * - called in process context with IRQs enabled
310 struct rxrpc_call
*rxrpc_incoming_call(struct rxrpc_sock
*rx
,
311 struct rxrpc_connection
*conn
,
314 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
315 struct rxrpc_call
*call
, *candidate
;
316 const void *here
= __builtin_return_address(0);
319 _enter(",%d", conn
->debug_id
);
323 candidate
= rxrpc_alloc_call(GFP_NOIO
);
325 return ERR_PTR(-EBUSY
);
327 trace_rxrpc_call(candidate
, 1, atomic_read(&candidate
->usage
),
330 chan
= sp
->hdr
.cid
& RXRPC_CHANNELMASK
;
331 candidate
->socket
= rx
;
332 candidate
->conn
= conn
;
333 candidate
->peer
= conn
->params
.peer
;
334 candidate
->cid
= sp
->hdr
.cid
;
335 candidate
->call_id
= sp
->hdr
.callNumber
;
336 candidate
->rx_data_post
= 0;
337 candidate
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
338 candidate
->flags
|= (1 << RXRPC_CALL_IS_SERVICE
);
339 if (conn
->security_ix
> 0)
340 candidate
->state
= RXRPC_CALL_SERVER_SECURING
;
342 spin_lock(&conn
->channel_lock
);
344 /* set the channel for this call */
345 call
= rcu_dereference_protected(conn
->channels
[chan
].call
,
346 lockdep_is_held(&conn
->channel_lock
));
348 _debug("channel[%u] is %p", candidate
->cid
& RXRPC_CHANNELMASK
, call
);
349 if (call
&& call
->call_id
== sp
->hdr
.callNumber
) {
350 /* already set; must've been a duplicate packet */
351 _debug("extant call [%d]", call
->state
);
352 ASSERTCMP(call
->conn
, ==, conn
);
354 read_lock(&call
->state_lock
);
355 switch (call
->state
) {
356 case RXRPC_CALL_LOCALLY_ABORTED
:
357 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
358 rxrpc_queue_call(call
);
359 case RXRPC_CALL_REMOTELY_ABORTED
:
360 read_unlock(&call
->state_lock
);
363 rxrpc_get_call(call
);
364 read_unlock(&call
->state_lock
);
370 /* it seems the channel is still in use from the previous call
371 * - ditch the old binding if its call is now complete */
372 _debug("CALL: %u { %s }",
373 call
->debug_id
, rxrpc_call_states
[call
->state
]);
375 if (call
->state
== RXRPC_CALL_COMPLETE
) {
376 __rxrpc_disconnect_call(conn
, call
);
378 spin_unlock(&conn
->channel_lock
);
379 kmem_cache_free(rxrpc_call_jar
, candidate
);
381 return ERR_PTR(-EBUSY
);
385 /* check the call number isn't duplicate */
387 call_id
= sp
->hdr
.callNumber
;
389 /* We just ignore calls prior to the current call ID. Terminated calls
390 * are handled via the connection.
392 if (call_id
<= conn
->channels
[chan
].call_counter
)
393 goto old_call
; /* TODO: Just drop packet */
395 /* make the call available */
399 conn
->channels
[chan
].call_counter
= call_id
;
400 rcu_assign_pointer(conn
->channels
[chan
].call
, call
);
402 rxrpc_get_connection(conn
);
403 rxrpc_get_peer(call
->peer
);
404 spin_unlock(&conn
->channel_lock
);
406 spin_lock(&conn
->params
.peer
->lock
);
407 hlist_add_head(&call
->error_link
, &conn
->params
.peer
->error_targets
);
408 spin_unlock(&conn
->params
.peer
->lock
);
410 write_lock_bh(&rxrpc_call_lock
);
411 list_add_tail(&call
->link
, &rxrpc_calls
);
412 write_unlock_bh(&rxrpc_call_lock
);
414 call
->service_id
= conn
->params
.service_id
;
416 _net("CALL incoming %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
418 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
419 add_timer(&call
->lifetimer
);
420 _leave(" = %p {%d} [new]", call
, call
->debug_id
);
424 spin_unlock(&conn
->channel_lock
);
425 kmem_cache_free(rxrpc_call_jar
, candidate
);
426 _leave(" = %p {%d} [extant]", call
, call
? call
->debug_id
: -1);
430 spin_unlock(&conn
->channel_lock
);
431 kmem_cache_free(rxrpc_call_jar
, candidate
);
432 _leave(" = -ECONNABORTED");
433 return ERR_PTR(-ECONNABORTED
);
436 spin_unlock(&conn
->channel_lock
);
437 kmem_cache_free(rxrpc_call_jar
, candidate
);
438 _leave(" = -ECONNRESET [old]");
439 return ERR_PTR(-ECONNRESET
);
443 * Note the re-emergence of a call.
445 void rxrpc_see_call(struct rxrpc_call
*call
)
447 const void *here
= __builtin_return_address(0);
449 int n
= atomic_read(&call
->usage
);
450 int m
= atomic_read(&call
->skb_count
);
452 trace_rxrpc_call(call
, 2, n
, m
, here
, 0);
457 * Note the addition of a ref on a call.
459 void rxrpc_get_call(struct rxrpc_call
*call
)
461 const void *here
= __builtin_return_address(0);
462 int n
= atomic_inc_return(&call
->usage
);
463 int m
= atomic_read(&call
->skb_count
);
465 trace_rxrpc_call(call
, 3, n
, m
, here
, 0);
469 * Note the addition of a ref on a call for a socket buffer.
471 void rxrpc_get_call_for_skb(struct rxrpc_call
*call
, struct sk_buff
*skb
)
473 const void *here
= __builtin_return_address(0);
474 int n
= atomic_inc_return(&call
->usage
);
475 int m
= atomic_inc_return(&call
->skb_count
);
477 trace_rxrpc_call(call
, 4, n
, m
, here
, skb
);
481 * detach a call from a socket and set up for release
483 void rxrpc_release_call(struct rxrpc_call
*call
)
485 struct rxrpc_connection
*conn
= call
->conn
;
486 struct rxrpc_sock
*rx
= call
->socket
;
488 _enter("{%d,%d,%d,%d}",
489 call
->debug_id
, atomic_read(&call
->usage
),
490 atomic_read(&call
->ackr_not_idle
),
493 rxrpc_see_call(call
);
495 spin_lock_bh(&call
->lock
);
496 if (test_and_set_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
498 spin_unlock_bh(&call
->lock
);
500 /* dissociate from the socket
501 * - the socket's ref on the call is passed to the death timer
503 _debug("RELEASE CALL %p (%d CONN %p)", call
, call
->debug_id
, conn
);
505 spin_lock(&conn
->params
.peer
->lock
);
506 hlist_del_init(&call
->error_link
);
507 spin_unlock(&conn
->params
.peer
->lock
);
509 write_lock_bh(&rx
->call_lock
);
510 if (!list_empty(&call
->accept_link
)) {
511 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
512 call
, call
->events
, call
->flags
);
513 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
));
514 list_del_init(&call
->accept_link
);
515 sk_acceptq_removed(&rx
->sk
);
516 } else if (test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
517 rb_erase(&call
->sock_node
, &rx
->calls
);
518 memset(&call
->sock_node
, 0xdd, sizeof(call
->sock_node
));
519 clear_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
521 write_unlock_bh(&rx
->call_lock
);
523 /* free up the channel for reuse */
524 write_lock_bh(&call
->state_lock
);
526 if (call
->state
< RXRPC_CALL_COMPLETE
&&
527 call
->state
!= RXRPC_CALL_CLIENT_FINAL_ACK
) {
528 _debug("+++ ABORTING STATE %d +++\n", call
->state
);
529 __rxrpc_abort_call(call
, RX_CALL_DEAD
, ECONNRESET
);
531 write_unlock_bh(&call
->state_lock
);
533 rxrpc_disconnect_call(call
);
535 /* clean up the Rx queue */
536 if (!skb_queue_empty(&call
->rx_queue
) ||
537 !skb_queue_empty(&call
->rx_oos_queue
)) {
538 struct rxrpc_skb_priv
*sp
;
541 _debug("purge Rx queues");
543 spin_lock_bh(&call
->lock
);
544 while ((skb
= skb_dequeue(&call
->rx_queue
)) ||
545 (skb
= skb_dequeue(&call
->rx_oos_queue
))) {
546 spin_unlock_bh(&call
->lock
);
549 _debug("- zap %s %%%u #%u",
550 rxrpc_pkts
[sp
->hdr
.type
],
551 sp
->hdr
.serial
, sp
->hdr
.seq
);
553 spin_lock_bh(&call
->lock
);
555 spin_unlock_bh(&call
->lock
);
558 del_timer_sync(&call
->resend_timer
);
559 del_timer_sync(&call
->ack_timer
);
560 del_timer_sync(&call
->lifetimer
);
561 call
->deadspan
.expires
= jiffies
+ rxrpc_dead_call_expiry
;
562 add_timer(&call
->deadspan
);
568 * handle a dead call being ready for reaping
570 static void rxrpc_dead_call_expired(unsigned long _call
)
572 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
574 _enter("{%d}", call
->debug_id
);
576 rxrpc_see_call(call
);
577 write_lock_bh(&call
->state_lock
);
578 call
->state
= RXRPC_CALL_DEAD
;
579 write_unlock_bh(&call
->state_lock
);
580 rxrpc_put_call(call
);
584 * mark a call as to be released, aborting it if it's still in progress
585 * - called with softirqs disabled
587 static void rxrpc_mark_call_released(struct rxrpc_call
*call
)
591 rxrpc_see_call(call
);
592 write_lock(&call
->state_lock
);
593 if (call
->state
< RXRPC_CALL_DEAD
) {
594 sched
= __rxrpc_abort_call(call
, RX_CALL_DEAD
, ECONNRESET
);
595 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
))
598 write_unlock(&call
->state_lock
);
600 rxrpc_queue_call(call
);
604 * release all the calls associated with a socket
606 void rxrpc_release_calls_on_socket(struct rxrpc_sock
*rx
)
608 struct rxrpc_call
*call
;
613 read_lock_bh(&rx
->call_lock
);
615 /* kill the not-yet-accepted incoming calls */
616 list_for_each_entry(call
, &rx
->secureq
, accept_link
) {
617 rxrpc_mark_call_released(call
);
620 list_for_each_entry(call
, &rx
->acceptq
, accept_link
) {
621 rxrpc_mark_call_released(call
);
624 /* mark all the calls as no longer wanting incoming packets */
625 for (p
= rb_first(&rx
->calls
); p
; p
= rb_next(p
)) {
626 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
627 rxrpc_mark_call_released(call
);
630 read_unlock_bh(&rx
->call_lock
);
637 void rxrpc_put_call(struct rxrpc_call
*call
)
639 const void *here
= __builtin_return_address(0);
642 ASSERT(call
!= NULL
);
644 n
= atomic_dec_return(&call
->usage
);
645 m
= atomic_read(&call
->skb_count
);
646 trace_rxrpc_call(call
, 5, n
, m
, here
, NULL
);
649 _debug("call %d dead", call
->debug_id
);
651 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
652 rxrpc_queue_work(&call
->destroyer
);
657 * Release a call ref held by a socket buffer.
659 void rxrpc_put_call_for_skb(struct rxrpc_call
*call
, struct sk_buff
*skb
)
661 const void *here
= __builtin_return_address(0);
664 n
= atomic_dec_return(&call
->usage
);
665 m
= atomic_dec_return(&call
->skb_count
);
666 trace_rxrpc_call(call
, 6, n
, m
, here
, skb
);
669 _debug("call %d dead", call
->debug_id
);
671 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
672 rxrpc_queue_work(&call
->destroyer
);
677 * Final call destruction under RCU.
679 static void rxrpc_rcu_destroy_call(struct rcu_head
*rcu
)
681 struct rxrpc_call
*call
= container_of(rcu
, struct rxrpc_call
, rcu
);
683 rxrpc_purge_queue(&call
->rx_queue
);
684 rxrpc_purge_queue(&call
->knlrecv_queue
);
685 rxrpc_put_peer(call
->peer
);
686 kmem_cache_free(rxrpc_call_jar
, call
);
692 static void rxrpc_cleanup_call(struct rxrpc_call
*call
)
694 _net("DESTROY CALL %d", call
->debug_id
);
696 ASSERT(call
->socket
);
698 memset(&call
->sock_node
, 0xcd, sizeof(call
->sock_node
));
700 del_timer_sync(&call
->lifetimer
);
701 del_timer_sync(&call
->deadspan
);
702 del_timer_sync(&call
->ack_timer
);
703 del_timer_sync(&call
->resend_timer
);
705 ASSERT(test_bit(RXRPC_CALL_RELEASED
, &call
->flags
));
706 ASSERTCMP(call
->events
, ==, 0);
707 if (work_pending(&call
->processor
)) {
708 _debug("defer destroy");
709 rxrpc_queue_work(&call
->destroyer
);
713 ASSERTCMP(call
->conn
, ==, NULL
);
715 if (call
->acks_window
) {
716 _debug("kill Tx window %d",
717 CIRC_CNT(call
->acks_head
, call
->acks_tail
,
720 while (CIRC_CNT(call
->acks_head
, call
->acks_tail
,
721 call
->acks_winsz
) > 0) {
722 struct rxrpc_skb_priv
*sp
;
725 _skb
= call
->acks_window
[call
->acks_tail
] & ~1;
726 sp
= rxrpc_skb((struct sk_buff
*)_skb
);
727 _debug("+++ clear Tx %u", sp
->hdr
.seq
);
728 rxrpc_free_skb((struct sk_buff
*)_skb
);
730 (call
->acks_tail
+ 1) & (call
->acks_winsz
- 1);
733 kfree(call
->acks_window
);
736 rxrpc_free_skb(call
->tx_pending
);
738 rxrpc_purge_queue(&call
->rx_queue
);
739 ASSERT(skb_queue_empty(&call
->rx_oos_queue
));
740 rxrpc_purge_queue(&call
->knlrecv_queue
);
741 sock_put(&call
->socket
->sk
);
742 call_rcu(&call
->rcu
, rxrpc_rcu_destroy_call
);
748 static void rxrpc_destroy_call(struct work_struct
*work
)
750 struct rxrpc_call
*call
=
751 container_of(work
, struct rxrpc_call
, destroyer
);
753 _enter("%p{%d,%x,%p}",
754 call
, atomic_read(&call
->usage
), call
->cid
, call
->conn
);
756 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
758 write_lock_bh(&rxrpc_call_lock
);
759 list_del_init(&call
->link
);
760 write_unlock_bh(&rxrpc_call_lock
);
762 rxrpc_cleanup_call(call
);
767 * preemptively destroy all the call records from a transport endpoint rather
768 * than waiting for them to time out
770 void __exit
rxrpc_destroy_all_calls(void)
772 struct rxrpc_call
*call
;
775 write_lock_bh(&rxrpc_call_lock
);
777 while (!list_empty(&rxrpc_calls
)) {
778 call
= list_entry(rxrpc_calls
.next
, struct rxrpc_call
, link
);
779 _debug("Zapping call %p", call
);
781 rxrpc_see_call(call
);
782 list_del_init(&call
->link
);
784 switch (atomic_read(&call
->usage
)) {
786 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
789 if (del_timer_sync(&call
->deadspan
) != 0 &&
790 call
->state
!= RXRPC_CALL_DEAD
)
791 rxrpc_dead_call_expired((unsigned long) call
);
792 if (call
->state
!= RXRPC_CALL_DEAD
)
795 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
796 call
, atomic_read(&call
->usage
),
797 atomic_read(&call
->ackr_not_idle
),
798 rxrpc_call_states
[call
->state
],
799 call
->flags
, call
->events
);
800 if (!skb_queue_empty(&call
->rx_queue
))
801 pr_err("Rx queue occupied\n");
802 if (!skb_queue_empty(&call
->rx_oos_queue
))
803 pr_err("OOS queue occupied\n");
807 write_unlock_bh(&rxrpc_call_lock
);
809 write_lock_bh(&rxrpc_call_lock
);
812 write_unlock_bh(&rxrpc_call_lock
);
817 * handle call lifetime being exceeded
819 static void rxrpc_call_life_expired(unsigned long _call
)
821 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
823 _enter("{%d}", call
->debug_id
);
825 rxrpc_see_call(call
);
826 if (call
->state
>= RXRPC_CALL_COMPLETE
)
829 set_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
);
830 rxrpc_queue_call(call
);
834 * handle resend timer expiry
835 * - may not take call->state_lock as this can deadlock against del_timer_sync()
837 static void rxrpc_resend_time_expired(unsigned long _call
)
839 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
841 _enter("{%d}", call
->debug_id
);
843 rxrpc_see_call(call
);
844 if (call
->state
>= RXRPC_CALL_COMPLETE
)
847 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
848 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
))
849 rxrpc_queue_call(call
);
853 * handle ACK timer expiry
855 static void rxrpc_ack_time_expired(unsigned long _call
)
857 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
859 _enter("{%d}", call
->debug_id
);
861 rxrpc_see_call(call
);
862 if (call
->state
>= RXRPC_CALL_COMPLETE
)
865 if (!test_and_set_bit(RXRPC_CALL_EV_ACK
, &call
->events
))
866 rxrpc_queue_call(call
);