1 /* RxRPC individual remote procedure call handling
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/slab.h>
15 #include <linux/module.h>
16 #include <linux/circ_buf.h>
17 #include <linux/spinlock_types.h>
19 #include <net/af_rxrpc.h>
20 #include "ar-internal.h"
23 * Maximum lifetime of a call (in jiffies).
25 unsigned int rxrpc_max_call_lifetime
= 60 * HZ
;
28 * Time till dead call expires after last use (in jiffies).
30 unsigned int rxrpc_dead_call_expiry
= 2 * HZ
;
32 const char *const rxrpc_call_states
[NR__RXRPC_CALL_STATES
] = {
33 [RXRPC_CALL_UNINITIALISED
] = "Uninit",
34 [RXRPC_CALL_CLIENT_AWAIT_CONN
] = "ClWtConn",
35 [RXRPC_CALL_CLIENT_SEND_REQUEST
] = "ClSndReq",
36 [RXRPC_CALL_CLIENT_AWAIT_REPLY
] = "ClAwtRpl",
37 [RXRPC_CALL_CLIENT_RECV_REPLY
] = "ClRcvRpl",
38 [RXRPC_CALL_CLIENT_FINAL_ACK
] = "ClFnlACK",
39 [RXRPC_CALL_SERVER_SECURING
] = "SvSecure",
40 [RXRPC_CALL_SERVER_ACCEPTING
] = "SvAccept",
41 [RXRPC_CALL_SERVER_RECV_REQUEST
] = "SvRcvReq",
42 [RXRPC_CALL_SERVER_ACK_REQUEST
] = "SvAckReq",
43 [RXRPC_CALL_SERVER_SEND_REPLY
] = "SvSndRpl",
44 [RXRPC_CALL_SERVER_AWAIT_ACK
] = "SvAwtACK",
45 [RXRPC_CALL_COMPLETE
] = "Complete",
46 [RXRPC_CALL_SERVER_BUSY
] = "SvBusy ",
47 [RXRPC_CALL_REMOTELY_ABORTED
] = "RmtAbort",
48 [RXRPC_CALL_LOCALLY_ABORTED
] = "LocAbort",
49 [RXRPC_CALL_NETWORK_ERROR
] = "NetError",
50 [RXRPC_CALL_DEAD
] = "Dead ",
53 struct kmem_cache
*rxrpc_call_jar
;
54 LIST_HEAD(rxrpc_calls
);
55 DEFINE_RWLOCK(rxrpc_call_lock
);
57 static void rxrpc_destroy_call(struct work_struct
*work
);
58 static void rxrpc_call_life_expired(unsigned long _call
);
59 static void rxrpc_dead_call_expired(unsigned long _call
);
60 static void rxrpc_ack_time_expired(unsigned long _call
);
61 static void rxrpc_resend_time_expired(unsigned long _call
);
64 * find an extant server call
65 * - called in process context with IRQs enabled
67 struct rxrpc_call
*rxrpc_find_call_by_user_ID(struct rxrpc_sock
*rx
,
68 unsigned long user_call_ID
)
70 struct rxrpc_call
*call
;
73 _enter("%p,%lx", rx
, user_call_ID
);
75 read_lock(&rx
->call_lock
);
77 p
= rx
->calls
.rb_node
;
79 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
81 if (user_call_ID
< call
->user_call_ID
)
83 else if (user_call_ID
> call
->user_call_ID
)
86 goto found_extant_call
;
89 read_unlock(&rx
->call_lock
);
95 read_unlock(&rx
->call_lock
);
96 _leave(" = %p [%d]", call
, atomic_read(&call
->usage
));
101 * allocate a new call
103 static struct rxrpc_call
*rxrpc_alloc_call(gfp_t gfp
)
105 struct rxrpc_call
*call
;
107 call
= kmem_cache_zalloc(rxrpc_call_jar
, gfp
);
111 call
->acks_winsz
= 16;
112 call
->acks_window
= kmalloc(call
->acks_winsz
* sizeof(unsigned long),
114 if (!call
->acks_window
) {
115 kmem_cache_free(rxrpc_call_jar
, call
);
119 setup_timer(&call
->lifetimer
, &rxrpc_call_life_expired
,
120 (unsigned long) call
);
121 setup_timer(&call
->deadspan
, &rxrpc_dead_call_expired
,
122 (unsigned long) call
);
123 setup_timer(&call
->ack_timer
, &rxrpc_ack_time_expired
,
124 (unsigned long) call
);
125 setup_timer(&call
->resend_timer
, &rxrpc_resend_time_expired
,
126 (unsigned long) call
);
127 INIT_WORK(&call
->destroyer
, &rxrpc_destroy_call
);
128 INIT_WORK(&call
->processor
, &rxrpc_process_call
);
129 INIT_LIST_HEAD(&call
->link
);
130 INIT_LIST_HEAD(&call
->accept_link
);
131 skb_queue_head_init(&call
->rx_queue
);
132 skb_queue_head_init(&call
->rx_oos_queue
);
133 init_waitqueue_head(&call
->tx_waitq
);
134 spin_lock_init(&call
->lock
);
135 rwlock_init(&call
->state_lock
);
136 atomic_set(&call
->usage
, 1);
137 call
->debug_id
= atomic_inc_return(&rxrpc_debug_id
);
139 memset(&call
->sock_node
, 0xed, sizeof(call
->sock_node
));
141 call
->rx_data_expect
= 1;
142 call
->rx_data_eaten
= 0;
143 call
->rx_first_oos
= 0;
144 call
->ackr_win_top
= call
->rx_data_eaten
+ 1 + rxrpc_rx_window_size
;
145 call
->creation_jif
= jiffies
;
150 * Allocate a new client call.
152 static struct rxrpc_call
*rxrpc_alloc_client_call(struct rxrpc_sock
*rx
,
153 struct sockaddr_rxrpc
*srx
,
156 struct rxrpc_call
*call
;
160 ASSERT(rx
->local
!= NULL
);
162 call
= rxrpc_alloc_call(gfp
);
164 return ERR_PTR(-ENOMEM
);
165 call
->state
= RXRPC_CALL_CLIENT_AWAIT_CONN
;
169 call
->rx_data_post
= 1;
171 call
->local
= rx
->local
;
172 call
->service_id
= srx
->srx_service
;
173 call
->in_clientflag
= 0;
175 _leave(" = %p", call
);
182 static int rxrpc_begin_client_call(struct rxrpc_call
*call
,
183 struct rxrpc_conn_parameters
*cp
,
184 struct sockaddr_rxrpc
*srx
,
189 /* Set up or get a connection record and set the protocol parameters,
190 * including channel number and call ID.
192 ret
= rxrpc_connect_call(call
, cp
, srx
, gfp
);
196 call
->state
= RXRPC_CALL_CLIENT_SEND_REQUEST
;
198 spin_lock(&call
->conn
->params
.peer
->lock
);
199 hlist_add_head(&call
->error_link
, &call
->conn
->params
.peer
->error_targets
);
200 spin_unlock(&call
->conn
->params
.peer
->lock
);
202 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
203 add_timer(&call
->lifetimer
);
208 * set up a call for the given data
209 * - called in process context with IRQs enabled
211 struct rxrpc_call
*rxrpc_new_client_call(struct rxrpc_sock
*rx
,
212 struct rxrpc_conn_parameters
*cp
,
213 struct sockaddr_rxrpc
*srx
,
214 unsigned long user_call_ID
,
217 struct rxrpc_call
*call
, *xcall
;
218 struct rb_node
*parent
, **pp
;
221 _enter("%p,%lx", rx
, user_call_ID
);
223 call
= rxrpc_alloc_client_call(rx
, srx
, gfp
);
225 _leave(" = %ld", PTR_ERR(call
));
229 /* Publish the call, even though it is incompletely set up as yet */
230 call
->user_call_ID
= user_call_ID
;
231 __set_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
233 write_lock(&rx
->call_lock
);
235 pp
= &rx
->calls
.rb_node
;
239 xcall
= rb_entry(parent
, struct rxrpc_call
, sock_node
);
241 if (user_call_ID
< xcall
->user_call_ID
)
242 pp
= &(*pp
)->rb_left
;
243 else if (user_call_ID
> xcall
->user_call_ID
)
244 pp
= &(*pp
)->rb_right
;
246 goto found_user_ID_now_present
;
249 rxrpc_get_call(call
);
251 rb_link_node(&call
->sock_node
, parent
, pp
);
252 rb_insert_color(&call
->sock_node
, &rx
->calls
);
253 write_unlock(&rx
->call_lock
);
255 write_lock_bh(&rxrpc_call_lock
);
256 list_add_tail(&call
->link
, &rxrpc_calls
);
257 write_unlock_bh(&rxrpc_call_lock
);
259 ret
= rxrpc_begin_client_call(call
, cp
, srx
, gfp
);
263 _net("CALL new %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
265 _leave(" = %p [new]", call
);
269 write_lock(&rx
->call_lock
);
270 rb_erase(&call
->sock_node
, &rx
->calls
);
271 write_unlock(&rx
->call_lock
);
272 rxrpc_put_call(call
);
274 write_lock_bh(&rxrpc_call_lock
);
275 list_del_init(&call
->link
);
276 write_unlock_bh(&rxrpc_call_lock
);
278 call
->state
= RXRPC_CALL_DEAD
;
279 rxrpc_put_call(call
);
280 _leave(" = %d", ret
);
283 /* We unexpectedly found the user ID in the list after taking
284 * the call_lock. This shouldn't happen unless the user races
285 * with itself and tries to add the same user ID twice at the
286 * same time in different threads.
288 found_user_ID_now_present
:
289 write_unlock(&rx
->call_lock
);
290 call
->state
= RXRPC_CALL_DEAD
;
291 rxrpc_put_call(call
);
292 _leave(" = -EEXIST [%p]", call
);
293 return ERR_PTR(-EEXIST
);
297 * set up an incoming call
298 * - called in process context with IRQs enabled
300 struct rxrpc_call
*rxrpc_incoming_call(struct rxrpc_sock
*rx
,
301 struct rxrpc_connection
*conn
,
304 struct rxrpc_skb_priv
*sp
= rxrpc_skb(skb
);
305 struct rxrpc_call
*call
, *candidate
;
308 _enter(",%d", conn
->debug_id
);
312 candidate
= rxrpc_alloc_call(GFP_NOIO
);
314 return ERR_PTR(-EBUSY
);
316 chan
= sp
->hdr
.cid
& RXRPC_CHANNELMASK
;
317 candidate
->socket
= rx
;
318 candidate
->conn
= conn
;
319 candidate
->cid
= sp
->hdr
.cid
;
320 candidate
->call_id
= sp
->hdr
.callNumber
;
321 candidate
->channel
= chan
;
322 candidate
->rx_data_post
= 0;
323 candidate
->state
= RXRPC_CALL_SERVER_ACCEPTING
;
324 if (conn
->security_ix
> 0)
325 candidate
->state
= RXRPC_CALL_SERVER_SECURING
;
327 spin_lock(&conn
->channel_lock
);
329 /* set the channel for this call */
330 call
= rcu_dereference_protected(conn
->channels
[chan
].call
,
331 lockdep_is_held(&conn
->channel_lock
));
333 _debug("channel[%u] is %p", candidate
->channel
, call
);
334 if (call
&& call
->call_id
== sp
->hdr
.callNumber
) {
335 /* already set; must've been a duplicate packet */
336 _debug("extant call [%d]", call
->state
);
337 ASSERTCMP(call
->conn
, ==, conn
);
339 read_lock(&call
->state_lock
);
340 switch (call
->state
) {
341 case RXRPC_CALL_LOCALLY_ABORTED
:
342 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
343 rxrpc_queue_call(call
);
344 case RXRPC_CALL_REMOTELY_ABORTED
:
345 read_unlock(&call
->state_lock
);
348 rxrpc_get_call(call
);
349 read_unlock(&call
->state_lock
);
355 /* it seems the channel is still in use from the previous call
356 * - ditch the old binding if its call is now complete */
357 _debug("CALL: %u { %s }",
358 call
->debug_id
, rxrpc_call_states
[call
->state
]);
360 if (call
->state
>= RXRPC_CALL_COMPLETE
) {
361 __rxrpc_disconnect_call(call
);
363 spin_unlock(&conn
->channel_lock
);
364 kmem_cache_free(rxrpc_call_jar
, candidate
);
366 return ERR_PTR(-EBUSY
);
370 /* check the call number isn't duplicate */
372 call_id
= sp
->hdr
.callNumber
;
374 /* We just ignore calls prior to the current call ID. Terminated calls
375 * are handled via the connection.
377 if (call_id
<= conn
->channels
[chan
].call_counter
)
378 goto old_call
; /* TODO: Just drop packet */
380 /* make the call available */
384 conn
->channels
[chan
].call_counter
= call_id
;
385 rcu_assign_pointer(conn
->channels
[chan
].call
, call
);
387 rxrpc_get_connection(conn
);
388 spin_unlock(&conn
->channel_lock
);
390 spin_lock(&conn
->params
.peer
->lock
);
391 hlist_add_head(&call
->error_link
, &conn
->params
.peer
->error_targets
);
392 spin_unlock(&conn
->params
.peer
->lock
);
394 write_lock_bh(&rxrpc_call_lock
);
395 list_add_tail(&call
->link
, &rxrpc_calls
);
396 write_unlock_bh(&rxrpc_call_lock
);
398 call
->local
= conn
->params
.local
;
399 call
->epoch
= conn
->proto
.epoch
;
400 call
->service_id
= conn
->params
.service_id
;
401 call
->in_clientflag
= RXRPC_CLIENT_INITIATED
;
403 _net("CALL incoming %d on CONN %d", call
->debug_id
, call
->conn
->debug_id
);
405 call
->lifetimer
.expires
= jiffies
+ rxrpc_max_call_lifetime
;
406 add_timer(&call
->lifetimer
);
407 _leave(" = %p {%d} [new]", call
, call
->debug_id
);
411 spin_unlock(&conn
->channel_lock
);
412 kmem_cache_free(rxrpc_call_jar
, candidate
);
413 _leave(" = %p {%d} [extant]", call
, call
? call
->debug_id
: -1);
417 spin_unlock(&conn
->channel_lock
);
418 kmem_cache_free(rxrpc_call_jar
, candidate
);
419 _leave(" = -ECONNABORTED");
420 return ERR_PTR(-ECONNABORTED
);
423 spin_unlock(&conn
->channel_lock
);
424 kmem_cache_free(rxrpc_call_jar
, candidate
);
425 _leave(" = -ECONNRESET [old]");
426 return ERR_PTR(-ECONNRESET
);
430 * detach a call from a socket and set up for release
432 void rxrpc_release_call(struct rxrpc_call
*call
)
434 struct rxrpc_connection
*conn
= call
->conn
;
435 struct rxrpc_sock
*rx
= call
->socket
;
437 _enter("{%d,%d,%d,%d}",
438 call
->debug_id
, atomic_read(&call
->usage
),
439 atomic_read(&call
->ackr_not_idle
),
442 spin_lock_bh(&call
->lock
);
443 if (test_and_set_bit(RXRPC_CALL_RELEASED
, &call
->flags
))
445 spin_unlock_bh(&call
->lock
);
447 /* dissociate from the socket
448 * - the socket's ref on the call is passed to the death timer
450 _debug("RELEASE CALL %p (%d CONN %p)", call
, call
->debug_id
, conn
);
452 spin_lock(&conn
->params
.peer
->lock
);
453 hlist_del_init(&call
->error_link
);
454 spin_unlock(&conn
->params
.peer
->lock
);
456 write_lock_bh(&rx
->call_lock
);
457 if (!list_empty(&call
->accept_link
)) {
458 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
459 call
, call
->events
, call
->flags
);
460 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
));
461 list_del_init(&call
->accept_link
);
462 sk_acceptq_removed(&rx
->sk
);
463 } else if (test_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
)) {
464 rb_erase(&call
->sock_node
, &rx
->calls
);
465 memset(&call
->sock_node
, 0xdd, sizeof(call
->sock_node
));
466 clear_bit(RXRPC_CALL_HAS_USERID
, &call
->flags
);
468 write_unlock_bh(&rx
->call_lock
);
470 /* free up the channel for reuse */
471 write_lock_bh(&call
->state_lock
);
473 if (call
->state
< RXRPC_CALL_COMPLETE
&&
474 call
->state
!= RXRPC_CALL_CLIENT_FINAL_ACK
) {
475 _debug("+++ ABORTING STATE %d +++\n", call
->state
);
476 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
477 call
->local_abort
= RX_CALL_DEAD
;
479 write_unlock_bh(&call
->state_lock
);
481 rxrpc_disconnect_call(call
);
483 /* clean up the Rx queue */
484 if (!skb_queue_empty(&call
->rx_queue
) ||
485 !skb_queue_empty(&call
->rx_oos_queue
)) {
486 struct rxrpc_skb_priv
*sp
;
489 _debug("purge Rx queues");
491 spin_lock_bh(&call
->lock
);
492 while ((skb
= skb_dequeue(&call
->rx_queue
)) ||
493 (skb
= skb_dequeue(&call
->rx_oos_queue
))) {
496 ASSERTCMP(sp
->call
, ==, call
);
497 rxrpc_put_call(call
);
500 skb
->destructor
= NULL
;
501 spin_unlock_bh(&call
->lock
);
503 _debug("- zap %s %%%u #%u",
504 rxrpc_pkts
[sp
->hdr
.type
],
505 sp
->hdr
.serial
, sp
->hdr
.seq
);
507 spin_lock_bh(&call
->lock
);
509 spin_unlock_bh(&call
->lock
);
511 ASSERTCMP(call
->state
, !=, RXRPC_CALL_COMPLETE
);
514 del_timer_sync(&call
->resend_timer
);
515 del_timer_sync(&call
->ack_timer
);
516 del_timer_sync(&call
->lifetimer
);
517 call
->deadspan
.expires
= jiffies
+ rxrpc_dead_call_expiry
;
518 add_timer(&call
->deadspan
);
524 * handle a dead call being ready for reaping
526 static void rxrpc_dead_call_expired(unsigned long _call
)
528 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
530 _enter("{%d}", call
->debug_id
);
532 write_lock_bh(&call
->state_lock
);
533 call
->state
= RXRPC_CALL_DEAD
;
534 write_unlock_bh(&call
->state_lock
);
535 rxrpc_put_call(call
);
539 * mark a call as to be released, aborting it if it's still in progress
540 * - called with softirqs disabled
542 static void rxrpc_mark_call_released(struct rxrpc_call
*call
)
546 write_lock(&call
->state_lock
);
547 if (call
->state
< RXRPC_CALL_DEAD
) {
549 if (call
->state
< RXRPC_CALL_COMPLETE
) {
550 _debug("abort call %p", call
);
551 call
->state
= RXRPC_CALL_LOCALLY_ABORTED
;
552 call
->local_abort
= RX_CALL_DEAD
;
553 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT
, &call
->events
))
556 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE
, &call
->events
))
559 rxrpc_queue_call(call
);
561 write_unlock(&call
->state_lock
);
565 * release all the calls associated with a socket
567 void rxrpc_release_calls_on_socket(struct rxrpc_sock
*rx
)
569 struct rxrpc_call
*call
;
574 read_lock_bh(&rx
->call_lock
);
576 /* mark all the calls as no longer wanting incoming packets */
577 for (p
= rb_first(&rx
->calls
); p
; p
= rb_next(p
)) {
578 call
= rb_entry(p
, struct rxrpc_call
, sock_node
);
579 rxrpc_mark_call_released(call
);
582 /* kill the not-yet-accepted incoming calls */
583 list_for_each_entry(call
, &rx
->secureq
, accept_link
) {
584 rxrpc_mark_call_released(call
);
587 list_for_each_entry(call
, &rx
->acceptq
, accept_link
) {
588 rxrpc_mark_call_released(call
);
591 read_unlock_bh(&rx
->call_lock
);
598 void __rxrpc_put_call(struct rxrpc_call
*call
)
600 ASSERT(call
!= NULL
);
602 _enter("%p{u=%d}", call
, atomic_read(&call
->usage
));
604 ASSERTCMP(atomic_read(&call
->usage
), >, 0);
606 if (atomic_dec_and_test(&call
->usage
)) {
607 _debug("call %d dead", call
->debug_id
);
608 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
609 rxrpc_queue_work(&call
->destroyer
);
615 * Final call destruction under RCU.
617 static void rxrpc_rcu_destroy_call(struct rcu_head
*rcu
)
619 struct rxrpc_call
*call
= container_of(rcu
, struct rxrpc_call
, rcu
);
621 rxrpc_purge_queue(&call
->rx_queue
);
622 kmem_cache_free(rxrpc_call_jar
, call
);
628 static void rxrpc_cleanup_call(struct rxrpc_call
*call
)
630 _net("DESTROY CALL %d", call
->debug_id
);
632 ASSERT(call
->socket
);
634 memset(&call
->sock_node
, 0xcd, sizeof(call
->sock_node
));
636 del_timer_sync(&call
->lifetimer
);
637 del_timer_sync(&call
->deadspan
);
638 del_timer_sync(&call
->ack_timer
);
639 del_timer_sync(&call
->resend_timer
);
641 ASSERT(test_bit(RXRPC_CALL_RELEASED
, &call
->flags
));
642 ASSERTCMP(call
->events
, ==, 0);
643 if (work_pending(&call
->processor
)) {
644 _debug("defer destroy");
645 rxrpc_queue_work(&call
->destroyer
);
649 ASSERTCMP(call
->conn
, ==, NULL
);
651 if (call
->acks_window
) {
652 _debug("kill Tx window %d",
653 CIRC_CNT(call
->acks_head
, call
->acks_tail
,
656 while (CIRC_CNT(call
->acks_head
, call
->acks_tail
,
657 call
->acks_winsz
) > 0) {
658 struct rxrpc_skb_priv
*sp
;
661 _skb
= call
->acks_window
[call
->acks_tail
] & ~1;
662 sp
= rxrpc_skb((struct sk_buff
*)_skb
);
663 _debug("+++ clear Tx %u", sp
->hdr
.seq
);
664 rxrpc_free_skb((struct sk_buff
*)_skb
);
666 (call
->acks_tail
+ 1) & (call
->acks_winsz
- 1);
669 kfree(call
->acks_window
);
672 rxrpc_free_skb(call
->tx_pending
);
674 rxrpc_purge_queue(&call
->rx_queue
);
675 ASSERT(skb_queue_empty(&call
->rx_oos_queue
));
676 sock_put(&call
->socket
->sk
);
677 call_rcu(&call
->rcu
, rxrpc_rcu_destroy_call
);
683 static void rxrpc_destroy_call(struct work_struct
*work
)
685 struct rxrpc_call
*call
=
686 container_of(work
, struct rxrpc_call
, destroyer
);
688 _enter("%p{%d,%d,%p}",
689 call
, atomic_read(&call
->usage
), call
->channel
, call
->conn
);
691 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
693 write_lock_bh(&rxrpc_call_lock
);
694 list_del_init(&call
->link
);
695 write_unlock_bh(&rxrpc_call_lock
);
697 rxrpc_cleanup_call(call
);
702 * preemptively destroy all the call records from a transport endpoint rather
703 * than waiting for them to time out
705 void __exit
rxrpc_destroy_all_calls(void)
707 struct rxrpc_call
*call
;
710 write_lock_bh(&rxrpc_call_lock
);
712 while (!list_empty(&rxrpc_calls
)) {
713 call
= list_entry(rxrpc_calls
.next
, struct rxrpc_call
, link
);
714 _debug("Zapping call %p", call
);
716 list_del_init(&call
->link
);
718 switch (atomic_read(&call
->usage
)) {
720 ASSERTCMP(call
->state
, ==, RXRPC_CALL_DEAD
);
723 if (del_timer_sync(&call
->deadspan
) != 0 &&
724 call
->state
!= RXRPC_CALL_DEAD
)
725 rxrpc_dead_call_expired((unsigned long) call
);
726 if (call
->state
!= RXRPC_CALL_DEAD
)
729 pr_err("Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
730 call
, atomic_read(&call
->usage
),
731 atomic_read(&call
->ackr_not_idle
),
732 rxrpc_call_states
[call
->state
],
733 call
->flags
, call
->events
);
734 if (!skb_queue_empty(&call
->rx_queue
))
735 pr_err("Rx queue occupied\n");
736 if (!skb_queue_empty(&call
->rx_oos_queue
))
737 pr_err("OOS queue occupied\n");
741 write_unlock_bh(&rxrpc_call_lock
);
743 write_lock_bh(&rxrpc_call_lock
);
746 write_unlock_bh(&rxrpc_call_lock
);
751 * handle call lifetime being exceeded
753 static void rxrpc_call_life_expired(unsigned long _call
)
755 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
757 if (call
->state
>= RXRPC_CALL_COMPLETE
)
760 _enter("{%d}", call
->debug_id
);
761 read_lock_bh(&call
->state_lock
);
762 if (call
->state
< RXRPC_CALL_COMPLETE
) {
763 set_bit(RXRPC_CALL_EV_LIFE_TIMER
, &call
->events
);
764 rxrpc_queue_call(call
);
766 read_unlock_bh(&call
->state_lock
);
770 * handle resend timer expiry
771 * - may not take call->state_lock as this can deadlock against del_timer_sync()
773 static void rxrpc_resend_time_expired(unsigned long _call
)
775 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
777 _enter("{%d}", call
->debug_id
);
779 if (call
->state
>= RXRPC_CALL_COMPLETE
)
782 clear_bit(RXRPC_CALL_RUN_RTIMER
, &call
->flags
);
783 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER
, &call
->events
))
784 rxrpc_queue_call(call
);
788 * handle ACK timer expiry
790 static void rxrpc_ack_time_expired(unsigned long _call
)
792 struct rxrpc_call
*call
= (struct rxrpc_call
*) _call
;
794 _enter("{%d}", call
->debug_id
);
796 if (call
->state
>= RXRPC_CALL_COMPLETE
)
799 read_lock_bh(&call
->state_lock
);
800 if (call
->state
< RXRPC_CALL_COMPLETE
&&
801 !test_and_set_bit(RXRPC_CALL_EV_ACK
, &call
->events
))
802 rxrpc_queue_call(call
);
803 read_unlock_bh(&call
->state_lock
);