Merge branch 'akpm' (patches from Andrew)
[deliverable/linux.git] / net / rxrpc / ar-call.c
CommitLineData
17926a79
DH
1/* RxRPC individual remote procedure call handling
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
5a0e3ad6 12#include <linux/slab.h>
17926a79
DH
13#include <linux/module.h>
14#include <linux/circ_buf.h>
7727640c
TS
15#include <linux/hashtable.h>
16#include <linux/spinlock_types.h>
17926a79
DH
17#include <net/sock.h>
18#include <net/af_rxrpc.h>
19#include "ar-internal.h"
20
5873c083
DH
21/*
22 * Maximum lifetime of a call (in jiffies).
23 */
dad8aff7 24unsigned int rxrpc_max_call_lifetime = 60 * HZ;
5873c083
DH
25
26/*
27 * Time till dead call expires after last use (in jiffies).
28 */
dad8aff7 29unsigned int rxrpc_dead_call_expiry = 2 * HZ;
5873c083 30
5b8848d1 31const char *const rxrpc_call_states[NR__RXRPC_CALL_STATES] = {
1f8481d1
DH
32 [RXRPC_CALL_CLIENT_SEND_REQUEST] = "ClSndReq",
33 [RXRPC_CALL_CLIENT_AWAIT_REPLY] = "ClAwtRpl",
34 [RXRPC_CALL_CLIENT_RECV_REPLY] = "ClRcvRpl",
35 [RXRPC_CALL_CLIENT_FINAL_ACK] = "ClFnlACK",
36 [RXRPC_CALL_SERVER_SECURING] = "SvSecure",
37 [RXRPC_CALL_SERVER_ACCEPTING] = "SvAccept",
38 [RXRPC_CALL_SERVER_RECV_REQUEST] = "SvRcvReq",
39 [RXRPC_CALL_SERVER_ACK_REQUEST] = "SvAckReq",
40 [RXRPC_CALL_SERVER_SEND_REPLY] = "SvSndRpl",
41 [RXRPC_CALL_SERVER_AWAIT_ACK] = "SvAwtACK",
42 [RXRPC_CALL_COMPLETE] = "Complete",
43 [RXRPC_CALL_SERVER_BUSY] = "SvBusy ",
44 [RXRPC_CALL_REMOTELY_ABORTED] = "RmtAbort",
45 [RXRPC_CALL_LOCALLY_ABORTED] = "LocAbort",
46 [RXRPC_CALL_NETWORK_ERROR] = "NetError",
47 [RXRPC_CALL_DEAD] = "Dead ",
48};
49
17926a79
DH
50struct kmem_cache *rxrpc_call_jar;
51LIST_HEAD(rxrpc_calls);
52DEFINE_RWLOCK(rxrpc_call_lock);
17926a79
DH
53
54static void rxrpc_destroy_call(struct work_struct *work);
55static void rxrpc_call_life_expired(unsigned long _call);
56static void rxrpc_dead_call_expired(unsigned long _call);
57static void rxrpc_ack_time_expired(unsigned long _call);
58static void rxrpc_resend_time_expired(unsigned long _call);
59
7727640c
TS
60static DEFINE_SPINLOCK(rxrpc_call_hash_lock);
61static DEFINE_HASHTABLE(rxrpc_call_hash, 10);
62
63/*
64 * Hash function for rxrpc_call_hash
65 */
66static unsigned long rxrpc_call_hashfunc(
0d12f8a4
DH
67 u8 in_clientflag,
68 u32 cid,
69 u32 call_id,
70 u32 epoch,
71 u16 service_id,
7727640c
TS
72 sa_family_t proto,
73 void *localptr,
74 unsigned int addr_size,
75 const u8 *peer_addr)
76{
77 const u16 *p;
78 unsigned int i;
79 unsigned long key;
7727640c
TS
80
81 _enter("");
82
83 key = (unsigned long)localptr;
84 /* We just want to add up the __be32 values, so forcing the
85 * cast should be okay.
86 */
0d12f8a4
DH
87 key += epoch;
88 key += service_id;
89 key += call_id;
90 key += (cid & RXRPC_CIDMASK) >> RXRPC_CIDSHIFT;
91 key += cid & RXRPC_CHANNELMASK;
92 key += in_clientflag;
7727640c
TS
93 key += proto;
94 /* Step through the peer address in 16-bit portions for speed */
95 for (i = 0, p = (const u16 *)peer_addr; i < addr_size >> 1; i++, p++)
96 key += *p;
97 _leave(" key = 0x%lx", key);
98 return key;
99}
100
101/*
102 * Add a call to the hashtable
103 */
104static void rxrpc_call_hash_add(struct rxrpc_call *call)
105{
106 unsigned long key;
107 unsigned int addr_size = 0;
108
109 _enter("");
110 switch (call->proto) {
111 case AF_INET:
112 addr_size = sizeof(call->peer_ip.ipv4_addr);
113 break;
114 case AF_INET6:
115 addr_size = sizeof(call->peer_ip.ipv6_addr);
116 break;
117 default:
118 break;
119 }
120 key = rxrpc_call_hashfunc(call->in_clientflag, call->cid,
121 call->call_id, call->epoch,
122 call->service_id, call->proto,
123 call->conn->trans->local, addr_size,
124 call->peer_ip.ipv6_addr);
125 /* Store the full key in the call */
126 call->hash_key = key;
127 spin_lock(&rxrpc_call_hash_lock);
128 hash_add_rcu(rxrpc_call_hash, &call->hash_node, key);
129 spin_unlock(&rxrpc_call_hash_lock);
130 _leave("");
131}
132
133/*
134 * Remove a call from the hashtable
135 */
136static void rxrpc_call_hash_del(struct rxrpc_call *call)
137{
138 _enter("");
139 spin_lock(&rxrpc_call_hash_lock);
140 hash_del_rcu(&call->hash_node);
141 spin_unlock(&rxrpc_call_hash_lock);
142 _leave("");
143}
144
145/*
146 * Find a call in the hashtable and return it, or NULL if it
147 * isn't there.
148 */
149struct rxrpc_call *rxrpc_find_call_hash(
0d12f8a4 150 struct rxrpc_host_header *hdr,
7727640c
TS
151 void *localptr,
152 sa_family_t proto,
0d12f8a4 153 const void *peer_addr)
7727640c
TS
154{
155 unsigned long key;
156 unsigned int addr_size = 0;
157 struct rxrpc_call *call = NULL;
158 struct rxrpc_call *ret = NULL;
0d12f8a4 159 u8 in_clientflag = hdr->flags & RXRPC_CLIENT_INITIATED;
7727640c
TS
160
161 _enter("");
162 switch (proto) {
163 case AF_INET:
164 addr_size = sizeof(call->peer_ip.ipv4_addr);
165 break;
166 case AF_INET6:
167 addr_size = sizeof(call->peer_ip.ipv6_addr);
168 break;
169 default:
170 break;
171 }
172
0d12f8a4
DH
173 key = rxrpc_call_hashfunc(in_clientflag, hdr->cid, hdr->callNumber,
174 hdr->epoch, hdr->serviceId,
175 proto, localptr, addr_size,
7727640c
TS
176 peer_addr);
177 hash_for_each_possible_rcu(rxrpc_call_hash, call, hash_node, key) {
178 if (call->hash_key == key &&
0d12f8a4
DH
179 call->call_id == hdr->callNumber &&
180 call->cid == hdr->cid &&
181 call->in_clientflag == in_clientflag &&
182 call->service_id == hdr->serviceId &&
7727640c
TS
183 call->proto == proto &&
184 call->local == localptr &&
185 memcmp(call->peer_ip.ipv6_addr, peer_addr,
0d12f8a4
DH
186 addr_size) == 0 &&
187 call->epoch == hdr->epoch) {
7727640c
TS
188 ret = call;
189 break;
190 }
191 }
192 _leave(" = %p", ret);
193 return ret;
194}
195
17926a79
DH
196/*
197 * allocate a new call
198 */
199static struct rxrpc_call *rxrpc_alloc_call(gfp_t gfp)
200{
201 struct rxrpc_call *call;
202
203 call = kmem_cache_zalloc(rxrpc_call_jar, gfp);
204 if (!call)
205 return NULL;
206
207 call->acks_winsz = 16;
208 call->acks_window = kmalloc(call->acks_winsz * sizeof(unsigned long),
209 gfp);
210 if (!call->acks_window) {
211 kmem_cache_free(rxrpc_call_jar, call);
212 return NULL;
213 }
214
215 setup_timer(&call->lifetimer, &rxrpc_call_life_expired,
216 (unsigned long) call);
217 setup_timer(&call->deadspan, &rxrpc_dead_call_expired,
218 (unsigned long) call);
219 setup_timer(&call->ack_timer, &rxrpc_ack_time_expired,
220 (unsigned long) call);
221 setup_timer(&call->resend_timer, &rxrpc_resend_time_expired,
222 (unsigned long) call);
223 INIT_WORK(&call->destroyer, &rxrpc_destroy_call);
224 INIT_WORK(&call->processor, &rxrpc_process_call);
225 INIT_LIST_HEAD(&call->accept_link);
226 skb_queue_head_init(&call->rx_queue);
227 skb_queue_head_init(&call->rx_oos_queue);
228 init_waitqueue_head(&call->tx_waitq);
229 spin_lock_init(&call->lock);
230 rwlock_init(&call->state_lock);
231 atomic_set(&call->usage, 1);
232 call->debug_id = atomic_inc_return(&rxrpc_debug_id);
233 call->state = RXRPC_CALL_CLIENT_SEND_REQUEST;
234
235 memset(&call->sock_node, 0xed, sizeof(call->sock_node));
236
237 call->rx_data_expect = 1;
238 call->rx_data_eaten = 0;
239 call->rx_first_oos = 0;
817913d8 240 call->ackr_win_top = call->rx_data_eaten + 1 + rxrpc_rx_window_size;
17926a79
DH
241 call->creation_jif = jiffies;
242 return call;
243}
244
245/*
fd589a8f 246 * allocate a new client call and attempt to get a connection slot for it
17926a79
DH
247 */
248static struct rxrpc_call *rxrpc_alloc_client_call(
249 struct rxrpc_sock *rx,
250 struct rxrpc_transport *trans,
251 struct rxrpc_conn_bundle *bundle,
252 gfp_t gfp)
253{
254 struct rxrpc_call *call;
255 int ret;
256
257 _enter("");
258
259 ASSERT(rx != NULL);
260 ASSERT(trans != NULL);
261 ASSERT(bundle != NULL);
262
263 call = rxrpc_alloc_call(gfp);
264 if (!call)
265 return ERR_PTR(-ENOMEM);
266
267 sock_hold(&rx->sk);
268 call->socket = rx;
269 call->rx_data_post = 1;
270
271 ret = rxrpc_connect_call(rx, trans, bundle, call, gfp);
272 if (ret < 0) {
273 kmem_cache_free(rxrpc_call_jar, call);
274 return ERR_PTR(ret);
275 }
276
7727640c
TS
277 /* Record copies of information for hashtable lookup */
278 call->proto = rx->proto;
279 call->local = trans->local;
280 switch (call->proto) {
281 case AF_INET:
282 call->peer_ip.ipv4_addr =
283 trans->peer->srx.transport.sin.sin_addr.s_addr;
284 break;
285 case AF_INET6:
286 memcpy(call->peer_ip.ipv6_addr,
287 trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
288 sizeof(call->peer_ip.ipv6_addr));
289 break;
290 }
291 call->epoch = call->conn->epoch;
292 call->service_id = call->conn->service_id;
293 call->in_clientflag = call->conn->in_clientflag;
294 /* Add the new call to the hashtable */
295 rxrpc_call_hash_add(call);
296
17926a79
DH
297 spin_lock(&call->conn->trans->peer->lock);
298 list_add(&call->error_link, &call->conn->trans->peer->error_targets);
299 spin_unlock(&call->conn->trans->peer->lock);
300
5873c083 301 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
17926a79
DH
302 add_timer(&call->lifetimer);
303
304 _leave(" = %p", call);
305 return call;
306}
307
308/*
309 * set up a call for the given data
310 * - called in process context with IRQs enabled
311 */
312struct rxrpc_call *rxrpc_get_client_call(struct rxrpc_sock *rx,
313 struct rxrpc_transport *trans,
314 struct rxrpc_conn_bundle *bundle,
315 unsigned long user_call_ID,
316 int create,
317 gfp_t gfp)
318{
319 struct rxrpc_call *call, *candidate;
320 struct rb_node *p, *parent, **pp;
321
322 _enter("%p,%d,%d,%lx,%d",
323 rx, trans ? trans->debug_id : -1, bundle ? bundle->debug_id : -1,
324 user_call_ID, create);
325
326 /* search the extant calls first for one that matches the specified
327 * user ID */
328 read_lock(&rx->call_lock);
329
330 p = rx->calls.rb_node;
331 while (p) {
332 call = rb_entry(p, struct rxrpc_call, sock_node);
333
334 if (user_call_ID < call->user_call_ID)
335 p = p->rb_left;
336 else if (user_call_ID > call->user_call_ID)
337 p = p->rb_right;
338 else
339 goto found_extant_call;
340 }
341
342 read_unlock(&rx->call_lock);
343
344 if (!create || !trans)
345 return ERR_PTR(-EBADSLT);
346
347 /* not yet present - create a candidate for a new record and then
348 * redo the search */
349 candidate = rxrpc_alloc_client_call(rx, trans, bundle, gfp);
350 if (IS_ERR(candidate)) {
351 _leave(" = %ld", PTR_ERR(candidate));
352 return candidate;
353 }
354
355 candidate->user_call_ID = user_call_ID;
356 __set_bit(RXRPC_CALL_HAS_USERID, &candidate->flags);
357
358 write_lock(&rx->call_lock);
359
360 pp = &rx->calls.rb_node;
361 parent = NULL;
362 while (*pp) {
363 parent = *pp;
364 call = rb_entry(parent, struct rxrpc_call, sock_node);
365
366 if (user_call_ID < call->user_call_ID)
367 pp = &(*pp)->rb_left;
368 else if (user_call_ID > call->user_call_ID)
369 pp = &(*pp)->rb_right;
370 else
371 goto found_extant_second;
372 }
373
374 /* second search also failed; add the new call */
375 call = candidate;
376 candidate = NULL;
377 rxrpc_get_call(call);
378
379 rb_link_node(&call->sock_node, parent, pp);
380 rb_insert_color(&call->sock_node, &rx->calls);
381 write_unlock(&rx->call_lock);
382
383 write_lock_bh(&rxrpc_call_lock);
384 list_add_tail(&call->link, &rxrpc_calls);
385 write_unlock_bh(&rxrpc_call_lock);
386
387 _net("CALL new %d on CONN %d", call->debug_id, call->conn->debug_id);
388
389 _leave(" = %p [new]", call);
390 return call;
391
392 /* we found the call in the list immediately */
393found_extant_call:
394 rxrpc_get_call(call);
395 read_unlock(&rx->call_lock);
396 _leave(" = %p [extant %d]", call, atomic_read(&call->usage));
397 return call;
398
399 /* we found the call on the second time through the list */
400found_extant_second:
401 rxrpc_get_call(call);
402 write_unlock(&rx->call_lock);
403 rxrpc_put_call(candidate);
404 _leave(" = %p [second %d]", call, atomic_read(&call->usage));
405 return call;
406}
407
408/*
409 * set up an incoming call
410 * - called in process context with IRQs enabled
411 */
412struct rxrpc_call *rxrpc_incoming_call(struct rxrpc_sock *rx,
413 struct rxrpc_connection *conn,
0d12f8a4 414 struct rxrpc_host_header *hdr,
17926a79
DH
415 gfp_t gfp)
416{
417 struct rxrpc_call *call, *candidate;
418 struct rb_node **p, *parent;
0d12f8a4 419 u32 call_id;
17926a79
DH
420
421 _enter(",%d,,%x", conn->debug_id, gfp);
422
423 ASSERT(rx != NULL);
424
425 candidate = rxrpc_alloc_call(gfp);
426 if (!candidate)
427 return ERR_PTR(-EBUSY);
428
429 candidate->socket = rx;
430 candidate->conn = conn;
431 candidate->cid = hdr->cid;
432 candidate->call_id = hdr->callNumber;
0d12f8a4 433 candidate->channel = hdr->cid & RXRPC_CHANNELMASK;
17926a79
DH
434 candidate->rx_data_post = 0;
435 candidate->state = RXRPC_CALL_SERVER_ACCEPTING;
436 if (conn->security_ix > 0)
437 candidate->state = RXRPC_CALL_SERVER_SECURING;
438
439 write_lock_bh(&conn->lock);
440
441 /* set the channel for this call */
442 call = conn->channels[candidate->channel];
443 _debug("channel[%u] is %p", candidate->channel, call);
444 if (call && call->call_id == hdr->callNumber) {
445 /* already set; must've been a duplicate packet */
446 _debug("extant call [%d]", call->state);
447 ASSERTCMP(call->conn, ==, conn);
448
449 read_lock(&call->state_lock);
450 switch (call->state) {
451 case RXRPC_CALL_LOCALLY_ABORTED:
4c198ad1 452 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
651350d1 453 rxrpc_queue_call(call);
17926a79
DH
454 case RXRPC_CALL_REMOTELY_ABORTED:
455 read_unlock(&call->state_lock);
456 goto aborted_call;
457 default:
458 rxrpc_get_call(call);
459 read_unlock(&call->state_lock);
460 goto extant_call;
461 }
462 }
463
464 if (call) {
465 /* it seems the channel is still in use from the previous call
466 * - ditch the old binding if its call is now complete */
467 _debug("CALL: %u { %s }",
468 call->debug_id, rxrpc_call_states[call->state]);
469
470 if (call->state >= RXRPC_CALL_COMPLETE) {
471 conn->channels[call->channel] = NULL;
472 } else {
473 write_unlock_bh(&conn->lock);
474 kmem_cache_free(rxrpc_call_jar, candidate);
475 _leave(" = -EBUSY");
476 return ERR_PTR(-EBUSY);
477 }
478 }
479
480 /* check the call number isn't duplicate */
481 _debug("check dup");
482 call_id = hdr->callNumber;
483 p = &conn->calls.rb_node;
484 parent = NULL;
485 while (*p) {
486 parent = *p;
487 call = rb_entry(parent, struct rxrpc_call, conn_node);
488
7727640c
TS
489 /* The tree is sorted in order of the __be32 value without
490 * turning it into host order.
491 */
0d12f8a4 492 if (call_id < call->call_id)
17926a79 493 p = &(*p)->rb_left;
0d12f8a4 494 else if (call_id > call->call_id)
17926a79
DH
495 p = &(*p)->rb_right;
496 else
497 goto old_call;
498 }
499
500 /* make the call available */
501 _debug("new call");
502 call = candidate;
503 candidate = NULL;
504 rb_link_node(&call->conn_node, parent, p);
505 rb_insert_color(&call->conn_node, &conn->calls);
506 conn->channels[call->channel] = call;
507 sock_hold(&rx->sk);
508 atomic_inc(&conn->usage);
509 write_unlock_bh(&conn->lock);
510
511 spin_lock(&conn->trans->peer->lock);
512 list_add(&call->error_link, &conn->trans->peer->error_targets);
513 spin_unlock(&conn->trans->peer->lock);
514
515 write_lock_bh(&rxrpc_call_lock);
516 list_add_tail(&call->link, &rxrpc_calls);
517 write_unlock_bh(&rxrpc_call_lock);
518
7727640c
TS
519 /* Record copies of information for hashtable lookup */
520 call->proto = rx->proto;
521 call->local = conn->trans->local;
522 switch (call->proto) {
523 case AF_INET:
524 call->peer_ip.ipv4_addr =
525 conn->trans->peer->srx.transport.sin.sin_addr.s_addr;
526 break;
527 case AF_INET6:
528 memcpy(call->peer_ip.ipv6_addr,
529 conn->trans->peer->srx.transport.sin6.sin6_addr.in6_u.u6_addr8,
530 sizeof(call->peer_ip.ipv6_addr));
531 break;
532 default:
533 break;
534 }
535 call->epoch = conn->epoch;
536 call->service_id = conn->service_id;
537 call->in_clientflag = conn->in_clientflag;
538 /* Add the new call to the hashtable */
539 rxrpc_call_hash_add(call);
540
17926a79
DH
541 _net("CALL incoming %d on CONN %d", call->debug_id, call->conn->debug_id);
542
5873c083 543 call->lifetimer.expires = jiffies + rxrpc_max_call_lifetime;
17926a79
DH
544 add_timer(&call->lifetimer);
545 _leave(" = %p {%d} [new]", call, call->debug_id);
546 return call;
547
548extant_call:
549 write_unlock_bh(&conn->lock);
550 kmem_cache_free(rxrpc_call_jar, candidate);
551 _leave(" = %p {%d} [extant]", call, call ? call->debug_id : -1);
552 return call;
553
554aborted_call:
555 write_unlock_bh(&conn->lock);
556 kmem_cache_free(rxrpc_call_jar, candidate);
557 _leave(" = -ECONNABORTED");
558 return ERR_PTR(-ECONNABORTED);
559
560old_call:
561 write_unlock_bh(&conn->lock);
562 kmem_cache_free(rxrpc_call_jar, candidate);
563 _leave(" = -ECONNRESET [old]");
564 return ERR_PTR(-ECONNRESET);
565}
566
567/*
568 * find an extant server call
569 * - called in process context with IRQs enabled
570 */
571struct rxrpc_call *rxrpc_find_server_call(struct rxrpc_sock *rx,
572 unsigned long user_call_ID)
573{
574 struct rxrpc_call *call;
575 struct rb_node *p;
576
577 _enter("%p,%lx", rx, user_call_ID);
578
579 /* search the extant calls for one that matches the specified user
580 * ID */
581 read_lock(&rx->call_lock);
582
583 p = rx->calls.rb_node;
584 while (p) {
585 call = rb_entry(p, struct rxrpc_call, sock_node);
586
587 if (user_call_ID < call->user_call_ID)
588 p = p->rb_left;
589 else if (user_call_ID > call->user_call_ID)
590 p = p->rb_right;
591 else
592 goto found_extant_call;
593 }
594
595 read_unlock(&rx->call_lock);
596 _leave(" = NULL");
597 return NULL;
598
599 /* we found the call in the list immediately */
600found_extant_call:
601 rxrpc_get_call(call);
602 read_unlock(&rx->call_lock);
603 _leave(" = %p [%d]", call, atomic_read(&call->usage));
604 return call;
605}
606
607/*
608 * detach a call from a socket and set up for release
609 */
610void rxrpc_release_call(struct rxrpc_call *call)
611{
651350d1 612 struct rxrpc_connection *conn = call->conn;
17926a79
DH
613 struct rxrpc_sock *rx = call->socket;
614
615 _enter("{%d,%d,%d,%d}",
616 call->debug_id, atomic_read(&call->usage),
617 atomic_read(&call->ackr_not_idle),
618 call->rx_first_oos);
619
620 spin_lock_bh(&call->lock);
621 if (test_and_set_bit(RXRPC_CALL_RELEASED, &call->flags))
622 BUG();
623 spin_unlock_bh(&call->lock);
624
625 /* dissociate from the socket
626 * - the socket's ref on the call is passed to the death timer
627 */
651350d1 628 _debug("RELEASE CALL %p (%d CONN %p)", call, call->debug_id, conn);
17926a79
DH
629
630 write_lock_bh(&rx->call_lock);
631 if (!list_empty(&call->accept_link)) {
632 _debug("unlinking once-pending call %p { e=%lx f=%lx }",
633 call, call->events, call->flags);
634 ASSERT(!test_bit(RXRPC_CALL_HAS_USERID, &call->flags));
635 list_del_init(&call->accept_link);
636 sk_acceptq_removed(&rx->sk);
637 } else if (test_bit(RXRPC_CALL_HAS_USERID, &call->flags)) {
638 rb_erase(&call->sock_node, &rx->calls);
639 memset(&call->sock_node, 0xdd, sizeof(call->sock_node));
640 clear_bit(RXRPC_CALL_HAS_USERID, &call->flags);
641 }
642 write_unlock_bh(&rx->call_lock);
643
17926a79 644 /* free up the channel for reuse */
651350d1
DH
645 spin_lock(&conn->trans->client_lock);
646 write_lock_bh(&conn->lock);
647 write_lock(&call->state_lock);
648
649 if (conn->channels[call->channel] == call)
650 conn->channels[call->channel] = NULL;
651
652 if (conn->out_clientflag && conn->bundle) {
653 conn->avail_calls++;
654 switch (conn->avail_calls) {
655 case 1:
656 list_move_tail(&conn->bundle_link,
657 &conn->bundle->avail_conns);
658 case 2 ... RXRPC_MAXCALLS - 1:
659 ASSERT(conn->channels[0] == NULL ||
660 conn->channels[1] == NULL ||
661 conn->channels[2] == NULL ||
662 conn->channels[3] == NULL);
663 break;
664 case RXRPC_MAXCALLS:
665 list_move_tail(&conn->bundle_link,
666 &conn->bundle->unused_conns);
667 ASSERT(conn->channels[0] == NULL &&
668 conn->channels[1] == NULL &&
669 conn->channels[2] == NULL &&
670 conn->channels[3] == NULL);
671 break;
672 default:
673 printk(KERN_ERR "RxRPC: conn->avail_calls=%d\n",
674 conn->avail_calls);
675 BUG();
676 }
17926a79
DH
677 }
678
651350d1 679 spin_unlock(&conn->trans->client_lock);
17926a79
DH
680
681 if (call->state < RXRPC_CALL_COMPLETE &&
682 call->state != RXRPC_CALL_CLIENT_FINAL_ACK) {
683 _debug("+++ ABORTING STATE %d +++\n", call->state);
684 call->state = RXRPC_CALL_LOCALLY_ABORTED;
685 call->abort_code = RX_CALL_DEAD;
4c198ad1 686 set_bit(RXRPC_CALL_EV_ABORT, &call->events);
651350d1 687 rxrpc_queue_call(call);
17926a79
DH
688 }
689 write_unlock(&call->state_lock);
651350d1 690 write_unlock_bh(&conn->lock);
17926a79 691
651350d1 692 /* clean up the Rx queue */
17926a79
DH
693 if (!skb_queue_empty(&call->rx_queue) ||
694 !skb_queue_empty(&call->rx_oos_queue)) {
695 struct rxrpc_skb_priv *sp;
696 struct sk_buff *skb;
697
698 _debug("purge Rx queues");
699
700 spin_lock_bh(&call->lock);
701 while ((skb = skb_dequeue(&call->rx_queue)) ||
702 (skb = skb_dequeue(&call->rx_oos_queue))) {
703 sp = rxrpc_skb(skb);
704 if (sp->call) {
705 ASSERTCMP(sp->call, ==, call);
706 rxrpc_put_call(call);
707 sp->call = NULL;
708 }
709 skb->destructor = NULL;
710 spin_unlock_bh(&call->lock);
711
712 _debug("- zap %s %%%u #%u",
713 rxrpc_pkts[sp->hdr.type],
0d12f8a4 714 sp->hdr.serial, sp->hdr.seq);
17926a79
DH
715 rxrpc_free_skb(skb);
716 spin_lock_bh(&call->lock);
717 }
718 spin_unlock_bh(&call->lock);
719
720 ASSERTCMP(call->state, !=, RXRPC_CALL_COMPLETE);
721 }
722
723 del_timer_sync(&call->resend_timer);
724 del_timer_sync(&call->ack_timer);
725 del_timer_sync(&call->lifetimer);
5873c083 726 call->deadspan.expires = jiffies + rxrpc_dead_call_expiry;
17926a79
DH
727 add_timer(&call->deadspan);
728
729 _leave("");
730}
731
732/*
733 * handle a dead call being ready for reaping
734 */
735static void rxrpc_dead_call_expired(unsigned long _call)
736{
737 struct rxrpc_call *call = (struct rxrpc_call *) _call;
738
739 _enter("{%d}", call->debug_id);
740
741 write_lock_bh(&call->state_lock);
742 call->state = RXRPC_CALL_DEAD;
743 write_unlock_bh(&call->state_lock);
744 rxrpc_put_call(call);
745}
746
747/*
748 * mark a call as to be released, aborting it if it's still in progress
749 * - called with softirqs disabled
750 */
751static void rxrpc_mark_call_released(struct rxrpc_call *call)
752{
753 bool sched;
754
755 write_lock(&call->state_lock);
756 if (call->state < RXRPC_CALL_DEAD) {
757 sched = false;
758 if (call->state < RXRPC_CALL_COMPLETE) {
759 _debug("abort call %p", call);
760 call->state = RXRPC_CALL_LOCALLY_ABORTED;
761 call->abort_code = RX_CALL_DEAD;
4c198ad1 762 if (!test_and_set_bit(RXRPC_CALL_EV_ABORT, &call->events))
17926a79
DH
763 sched = true;
764 }
4c198ad1 765 if (!test_and_set_bit(RXRPC_CALL_EV_RELEASE, &call->events))
17926a79
DH
766 sched = true;
767 if (sched)
651350d1 768 rxrpc_queue_call(call);
17926a79
DH
769 }
770 write_unlock(&call->state_lock);
771}
772
773/*
774 * release all the calls associated with a socket
775 */
776void rxrpc_release_calls_on_socket(struct rxrpc_sock *rx)
777{
778 struct rxrpc_call *call;
779 struct rb_node *p;
780
781 _enter("%p", rx);
782
783 read_lock_bh(&rx->call_lock);
784
785 /* mark all the calls as no longer wanting incoming packets */
786 for (p = rb_first(&rx->calls); p; p = rb_next(p)) {
787 call = rb_entry(p, struct rxrpc_call, sock_node);
788 rxrpc_mark_call_released(call);
789 }
790
791 /* kill the not-yet-accepted incoming calls */
792 list_for_each_entry(call, &rx->secureq, accept_link) {
793 rxrpc_mark_call_released(call);
794 }
795
796 list_for_each_entry(call, &rx->acceptq, accept_link) {
797 rxrpc_mark_call_released(call);
798 }
799
800 read_unlock_bh(&rx->call_lock);
801 _leave("");
802}
803
804/*
805 * release a call
806 */
807void __rxrpc_put_call(struct rxrpc_call *call)
808{
809 ASSERT(call != NULL);
810
811 _enter("%p{u=%d}", call, atomic_read(&call->usage));
812
813 ASSERTCMP(atomic_read(&call->usage), >, 0);
814
815 if (atomic_dec_and_test(&call->usage)) {
816 _debug("call %d dead", call->debug_id);
817 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
651350d1 818 rxrpc_queue_work(&call->destroyer);
17926a79
DH
819 }
820 _leave("");
821}
822
823/*
824 * clean up a call
825 */
826static void rxrpc_cleanup_call(struct rxrpc_call *call)
827{
828 _net("DESTROY CALL %d", call->debug_id);
829
830 ASSERT(call->socket);
831
832 memset(&call->sock_node, 0xcd, sizeof(call->sock_node));
833
834 del_timer_sync(&call->lifetimer);
835 del_timer_sync(&call->deadspan);
836 del_timer_sync(&call->ack_timer);
837 del_timer_sync(&call->resend_timer);
838
839 ASSERT(test_bit(RXRPC_CALL_RELEASED, &call->flags));
840 ASSERTCMP(call->events, ==, 0);
841 if (work_pending(&call->processor)) {
842 _debug("defer destroy");
651350d1 843 rxrpc_queue_work(&call->destroyer);
17926a79
DH
844 return;
845 }
846
847 if (call->conn) {
848 spin_lock(&call->conn->trans->peer->lock);
849 list_del(&call->error_link);
850 spin_unlock(&call->conn->trans->peer->lock);
851
852 write_lock_bh(&call->conn->lock);
853 rb_erase(&call->conn_node, &call->conn->calls);
854 write_unlock_bh(&call->conn->lock);
855 rxrpc_put_connection(call->conn);
856 }
857
7727640c
TS
858 /* Remove the call from the hash */
859 rxrpc_call_hash_del(call);
860
17926a79
DH
861 if (call->acks_window) {
862 _debug("kill Tx window %d",
863 CIRC_CNT(call->acks_head, call->acks_tail,
864 call->acks_winsz));
865 smp_mb();
866 while (CIRC_CNT(call->acks_head, call->acks_tail,
867 call->acks_winsz) > 0) {
868 struct rxrpc_skb_priv *sp;
869 unsigned long _skb;
870
871 _skb = call->acks_window[call->acks_tail] & ~1;
0d12f8a4
DH
872 sp = rxrpc_skb((struct sk_buff *)_skb);
873 _debug("+++ clear Tx %u", sp->hdr.seq);
874 rxrpc_free_skb((struct sk_buff *)_skb);
17926a79
DH
875 call->acks_tail =
876 (call->acks_tail + 1) & (call->acks_winsz - 1);
877 }
878
879 kfree(call->acks_window);
880 }
881
882 rxrpc_free_skb(call->tx_pending);
883
884 rxrpc_purge_queue(&call->rx_queue);
885 ASSERT(skb_queue_empty(&call->rx_oos_queue));
886 sock_put(&call->socket->sk);
887 kmem_cache_free(rxrpc_call_jar, call);
888}
889
890/*
891 * destroy a call
892 */
893static void rxrpc_destroy_call(struct work_struct *work)
894{
895 struct rxrpc_call *call =
896 container_of(work, struct rxrpc_call, destroyer);
897
898 _enter("%p{%d,%d,%p}",
899 call, atomic_read(&call->usage), call->channel, call->conn);
900
901 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
902
903 write_lock_bh(&rxrpc_call_lock);
904 list_del_init(&call->link);
905 write_unlock_bh(&rxrpc_call_lock);
906
907 rxrpc_cleanup_call(call);
908 _leave("");
909}
910
911/*
912 * preemptively destroy all the call records from a transport endpoint rather
913 * than waiting for them to time out
914 */
915void __exit rxrpc_destroy_all_calls(void)
916{
917 struct rxrpc_call *call;
918
919 _enter("");
920 write_lock_bh(&rxrpc_call_lock);
921
922 while (!list_empty(&rxrpc_calls)) {
923 call = list_entry(rxrpc_calls.next, struct rxrpc_call, link);
924 _debug("Zapping call %p", call);
925
926 list_del_init(&call->link);
927
928 switch (atomic_read(&call->usage)) {
929 case 0:
930 ASSERTCMP(call->state, ==, RXRPC_CALL_DEAD);
931 break;
932 case 1:
933 if (del_timer_sync(&call->deadspan) != 0 &&
934 call->state != RXRPC_CALL_DEAD)
935 rxrpc_dead_call_expired((unsigned long) call);
936 if (call->state != RXRPC_CALL_DEAD)
937 break;
938 default:
939 printk(KERN_ERR "RXRPC:"
940 " Call %p still in use (%d,%d,%s,%lx,%lx)!\n",
941 call, atomic_read(&call->usage),
942 atomic_read(&call->ackr_not_idle),
943 rxrpc_call_states[call->state],
944 call->flags, call->events);
945 if (!skb_queue_empty(&call->rx_queue))
946 printk(KERN_ERR"RXRPC: Rx queue occupied\n");
947 if (!skb_queue_empty(&call->rx_oos_queue))
948 printk(KERN_ERR"RXRPC: OOS queue occupied\n");
949 break;
950 }
951
952 write_unlock_bh(&rxrpc_call_lock);
953 cond_resched();
954 write_lock_bh(&rxrpc_call_lock);
955 }
956
957 write_unlock_bh(&rxrpc_call_lock);
958 _leave("");
959}
960
961/*
962 * handle call lifetime being exceeded
963 */
964static void rxrpc_call_life_expired(unsigned long _call)
965{
966 struct rxrpc_call *call = (struct rxrpc_call *) _call;
967
968 if (call->state >= RXRPC_CALL_COMPLETE)
969 return;
970
971 _enter("{%d}", call->debug_id);
972 read_lock_bh(&call->state_lock);
973 if (call->state < RXRPC_CALL_COMPLETE) {
4c198ad1 974 set_bit(RXRPC_CALL_EV_LIFE_TIMER, &call->events);
651350d1 975 rxrpc_queue_call(call);
17926a79
DH
976 }
977 read_unlock_bh(&call->state_lock);
978}
979
980/*
981 * handle resend timer expiry
3b5bac2b 982 * - may not take call->state_lock as this can deadlock against del_timer_sync()
17926a79
DH
983 */
984static void rxrpc_resend_time_expired(unsigned long _call)
985{
986 struct rxrpc_call *call = (struct rxrpc_call *) _call;
987
988 _enter("{%d}", call->debug_id);
989
990 if (call->state >= RXRPC_CALL_COMPLETE)
991 return;
992
17926a79 993 clear_bit(RXRPC_CALL_RUN_RTIMER, &call->flags);
4c198ad1 994 if (!test_and_set_bit(RXRPC_CALL_EV_RESEND_TIMER, &call->events))
651350d1 995 rxrpc_queue_call(call);
17926a79
DH
996}
997
998/*
999 * handle ACK timer expiry
1000 */
1001static void rxrpc_ack_time_expired(unsigned long _call)
1002{
1003 struct rxrpc_call *call = (struct rxrpc_call *) _call;
1004
1005 _enter("{%d}", call->debug_id);
1006
1007 if (call->state >= RXRPC_CALL_COMPLETE)
1008 return;
1009
1010 read_lock_bh(&call->state_lock);
1011 if (call->state < RXRPC_CALL_COMPLETE &&
4c198ad1 1012 !test_and_set_bit(RXRPC_CALL_EV_ACK, &call->events))
651350d1 1013 rxrpc_queue_call(call);
17926a79
DH
1014 read_unlock_bh(&call->state_lock);
1015}
This page took 0.561845 seconds and 5 git commands to generate.