[NET]: Add UDPLITE support in a few missing spots
[deliverable/linux.git] / net / rxrpc / connection.c
CommitLineData
1da177e4
LT
1/* connection.c: Rx connection routines
2 *
3 * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <rxrpc/rxrpc.h>
16#include <rxrpc/transport.h>
17#include <rxrpc/peer.h>
18#include <rxrpc/connection.h>
19#include <rxrpc/call.h>
20#include <rxrpc/message.h>
21#include <linux/udp.h>
22#include <linux/ip.h>
23#include <net/sock.h>
24#include <asm/uaccess.h>
25#include "internal.h"
26
27__RXACCT_DECL(atomic_t rxrpc_connection_count);
28
29LIST_HEAD(rxrpc_conns);
30DECLARE_RWSEM(rxrpc_conns_sem);
31unsigned long rxrpc_conn_timeout = 60 * 60;
32
33static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn);
34
35static void __rxrpc_conn_timeout(rxrpc_timer_t *timer)
36{
37 struct rxrpc_connection *conn =
38 list_entry(timer, struct rxrpc_connection, timeout);
39
40 _debug("Rx CONN TIMEOUT [%p{u=%d}]", conn, atomic_read(&conn->usage));
41
42 rxrpc_conn_do_timeout(conn);
43}
44
45static const struct rxrpc_timer_ops rxrpc_conn_timer_ops = {
46 .timed_out = __rxrpc_conn_timeout,
47};
48
49/*****************************************************************************/
50/*
51 * create a new connection record
52 */
53static inline int __rxrpc_create_connection(struct rxrpc_peer *peer,
54 struct rxrpc_connection **_conn)
55{
56 struct rxrpc_connection *conn;
57
58 _enter("%p",peer);
59
60 /* allocate and initialise a connection record */
0da974f4 61 conn = kzalloc(sizeof(struct rxrpc_connection), GFP_KERNEL);
1da177e4
LT
62 if (!conn) {
63 _leave(" = -ENOMEM");
64 return -ENOMEM;
65 }
66
1da177e4
LT
67 atomic_set(&conn->usage, 1);
68
69 INIT_LIST_HEAD(&conn->link);
70 INIT_LIST_HEAD(&conn->id_link);
71 init_waitqueue_head(&conn->chanwait);
72 spin_lock_init(&conn->lock);
73 rxrpc_timer_init(&conn->timeout, &rxrpc_conn_timer_ops);
74
75 do_gettimeofday(&conn->atime);
76 conn->mtu_size = 1024;
77 conn->peer = peer;
78 conn->trans = peer->trans;
79
80 __RXACCT(atomic_inc(&rxrpc_connection_count));
81 *_conn = conn;
82 _leave(" = 0 (%p)", conn);
83
84 return 0;
85} /* end __rxrpc_create_connection() */
86
87/*****************************************************************************/
88/*
89 * create a new connection record for outgoing connections
90 */
91int rxrpc_create_connection(struct rxrpc_transport *trans,
92 __be16 port,
93 __be32 addr,
94 uint16_t service_id,
95 void *security,
96 struct rxrpc_connection **_conn)
97{
98 struct rxrpc_connection *candidate, *conn;
99 struct rxrpc_peer *peer;
100 struct list_head *_p;
101 __be32 connid;
102 int ret;
103
104 _enter("%p{%hu},%u,%hu", trans, trans->port, ntohs(port), service_id);
105
106 /* get a peer record */
107 ret = rxrpc_peer_lookup(trans, addr, &peer);
108 if (ret < 0) {
109 _leave(" = %d", ret);
110 return ret;
111 }
112
113 /* allocate and initialise a connection record */
114 ret = __rxrpc_create_connection(peer, &candidate);
115 if (ret < 0) {
116 rxrpc_put_peer(peer);
117 _leave(" = %d", ret);
118 return ret;
119 }
120
121 /* fill in the specific bits */
122 candidate->addr.sin_family = AF_INET;
123 candidate->addr.sin_port = port;
124 candidate->addr.sin_addr.s_addr = addr;
125
126 candidate->in_epoch = rxrpc_epoch;
127 candidate->out_epoch = rxrpc_epoch;
128 candidate->in_clientflag = 0;
129 candidate->out_clientflag = RXRPC_CLIENT_INITIATED;
130 candidate->service_id = htons(service_id);
131
132 /* invent a unique connection ID */
133 write_lock(&peer->conn_idlock);
134
135 try_next_id:
136 connid = htonl(peer->conn_idcounter & RXRPC_CIDMASK);
137 peer->conn_idcounter += RXRPC_MAXCALLS;
138
139 list_for_each(_p, &peer->conn_idlist) {
140 conn = list_entry(_p, struct rxrpc_connection, id_link);
141 if (connid == conn->conn_id)
142 goto try_next_id;
143 if (connid > conn->conn_id)
144 break;
145 }
146
147 _debug("selected candidate conn ID %x.%u",
148 ntohl(peer->addr.s_addr), ntohl(connid));
149
150 candidate->conn_id = connid;
151 list_add_tail(&candidate->id_link, _p);
152
153 write_unlock(&peer->conn_idlock);
154
155 /* attach to peer */
156 candidate->peer = peer;
157
158 write_lock(&peer->conn_lock);
159
160 /* search the peer's transport graveyard list */
161 spin_lock(&peer->conn_gylock);
162 list_for_each(_p, &peer->conn_graveyard) {
163 conn = list_entry(_p, struct rxrpc_connection, link);
164 if (conn->addr.sin_port == candidate->addr.sin_port &&
165 conn->security_ix == candidate->security_ix &&
166 conn->service_id == candidate->service_id &&
167 conn->in_clientflag == 0)
168 goto found_in_graveyard;
169 }
170 spin_unlock(&peer->conn_gylock);
171
172 /* pick the new candidate */
173 _debug("created connection: {%08x} [out]", ntohl(candidate->conn_id));
174 atomic_inc(&peer->conn_count);
175 conn = candidate;
176 candidate = NULL;
177
178 make_active:
179 list_add_tail(&conn->link, &peer->conn_active);
180 write_unlock(&peer->conn_lock);
181
182 if (candidate) {
183 write_lock(&peer->conn_idlock);
184 list_del(&candidate->id_link);
185 write_unlock(&peer->conn_idlock);
186
187 __RXACCT(atomic_dec(&rxrpc_connection_count));
188 kfree(candidate);
189 }
190 else {
191 down_write(&rxrpc_conns_sem);
192 list_add_tail(&conn->proc_link, &rxrpc_conns);
193 up_write(&rxrpc_conns_sem);
194 }
195
196 *_conn = conn;
197 _leave(" = 0 (%p)", conn);
198
199 return 0;
200
201 /* handle resurrecting a connection from the graveyard */
202 found_in_graveyard:
203 _debug("resurrecting connection: {%08x} [out]", ntohl(conn->conn_id));
204 rxrpc_get_connection(conn);
205 rxrpc_krxtimod_del_timer(&conn->timeout);
206 list_del_init(&conn->link);
207 spin_unlock(&peer->conn_gylock);
208 goto make_active;
209} /* end rxrpc_create_connection() */
210
211/*****************************************************************************/
212/*
213 * lookup the connection for an incoming packet
214 * - create a new connection record for unrecorded incoming connections
215 */
216int rxrpc_connection_lookup(struct rxrpc_peer *peer,
217 struct rxrpc_message *msg,
218 struct rxrpc_connection **_conn)
219{
220 struct rxrpc_connection *conn, *candidate = NULL;
221 struct list_head *_p;
ea2e90df 222 struct sk_buff *pkt = msg->pkt;
1da177e4
LT
223 int ret, fresh = 0;
224 __be32 x_epoch, x_connid;
225 __be16 x_port, x_servid;
226 __u32 x_secix;
227 u8 x_clflag;
228
229 _enter("%p{{%hu}},%u,%hu",
230 peer,
231 peer->trans->port,
ea2e90df 232 ntohs(pkt->h.uh->source),
1da177e4
LT
233 ntohs(msg->hdr.serviceId));
234
ea2e90df 235 x_port = pkt->h.uh->source;
1da177e4
LT
236 x_epoch = msg->hdr.epoch;
237 x_clflag = msg->hdr.flags & RXRPC_CLIENT_INITIATED;
238 x_connid = htonl(ntohl(msg->hdr.cid) & RXRPC_CIDMASK);
239 x_servid = msg->hdr.serviceId;
240 x_secix = msg->hdr.securityIndex;
241
242 /* [common case] search the transport's active list first */
243 read_lock(&peer->conn_lock);
244 list_for_each(_p, &peer->conn_active) {
245 conn = list_entry(_p, struct rxrpc_connection, link);
246 if (conn->addr.sin_port == x_port &&
247 conn->in_epoch == x_epoch &&
248 conn->conn_id == x_connid &&
249 conn->security_ix == x_secix &&
250 conn->service_id == x_servid &&
251 conn->in_clientflag == x_clflag)
252 goto found_active;
253 }
254 read_unlock(&peer->conn_lock);
255
256 /* [uncommon case] not active
257 * - create a candidate for a new record if an inbound connection
258 * - only examine the graveyard for an outbound connection
259 */
260 if (x_clflag) {
261 ret = __rxrpc_create_connection(peer, &candidate);
262 if (ret < 0) {
263 _leave(" = %d", ret);
264 return ret;
265 }
266
267 /* fill in the specifics */
268 candidate->addr.sin_family = AF_INET;
269 candidate->addr.sin_port = x_port;
ea2e90df 270 candidate->addr.sin_addr.s_addr = pkt->nh.iph->saddr;
1da177e4
LT
271 candidate->in_epoch = x_epoch;
272 candidate->out_epoch = x_epoch;
273 candidate->in_clientflag = RXRPC_CLIENT_INITIATED;
274 candidate->out_clientflag = 0;
275 candidate->conn_id = x_connid;
276 candidate->service_id = x_servid;
277 candidate->security_ix = x_secix;
278 }
279
280 /* search the active list again, just in case it appeared whilst we
281 * were busy */
282 write_lock(&peer->conn_lock);
283 list_for_each(_p, &peer->conn_active) {
284 conn = list_entry(_p, struct rxrpc_connection, link);
285 if (conn->addr.sin_port == x_port &&
286 conn->in_epoch == x_epoch &&
287 conn->conn_id == x_connid &&
288 conn->security_ix == x_secix &&
289 conn->service_id == x_servid &&
290 conn->in_clientflag == x_clflag)
291 goto found_active_second_chance;
292 }
293
294 /* search the transport's graveyard list */
295 spin_lock(&peer->conn_gylock);
296 list_for_each(_p, &peer->conn_graveyard) {
297 conn = list_entry(_p, struct rxrpc_connection, link);
298 if (conn->addr.sin_port == x_port &&
299 conn->in_epoch == x_epoch &&
300 conn->conn_id == x_connid &&
301 conn->security_ix == x_secix &&
302 conn->service_id == x_servid &&
303 conn->in_clientflag == x_clflag)
304 goto found_in_graveyard;
305 }
306 spin_unlock(&peer->conn_gylock);
307
308 /* outbound connections aren't created here */
309 if (!x_clflag) {
310 write_unlock(&peer->conn_lock);
311 _leave(" = -ENOENT");
312 return -ENOENT;
313 }
314
315 /* we can now add the new candidate to the list */
316 _debug("created connection: {%08x} [in]", ntohl(candidate->conn_id));
317 rxrpc_get_peer(peer);
318 conn = candidate;
319 candidate = NULL;
320 atomic_inc(&peer->conn_count);
321 fresh = 1;
322
323 make_active:
324 list_add_tail(&conn->link, &peer->conn_active);
325
326 success_uwfree:
327 write_unlock(&peer->conn_lock);
328
329 if (candidate) {
330 write_lock(&peer->conn_idlock);
331 list_del(&candidate->id_link);
332 write_unlock(&peer->conn_idlock);
333
334 __RXACCT(atomic_dec(&rxrpc_connection_count));
335 kfree(candidate);
336 }
337
338 if (fresh) {
339 down_write(&rxrpc_conns_sem);
340 list_add_tail(&conn->proc_link, &rxrpc_conns);
341 up_write(&rxrpc_conns_sem);
342 }
343
344 success:
345 *_conn = conn;
346 _leave(" = 0 (%p)", conn);
347 return 0;
348
349 /* handle the connection being found in the active list straight off */
350 found_active:
351 rxrpc_get_connection(conn);
352 read_unlock(&peer->conn_lock);
353 goto success;
354
355 /* handle resurrecting a connection from the graveyard */
356 found_in_graveyard:
357 _debug("resurrecting connection: {%08x} [in]", ntohl(conn->conn_id));
358 rxrpc_get_peer(peer);
359 rxrpc_get_connection(conn);
360 rxrpc_krxtimod_del_timer(&conn->timeout);
361 list_del_init(&conn->link);
362 spin_unlock(&peer->conn_gylock);
363 goto make_active;
364
365 /* handle finding the connection on the second time through the active
366 * list */
367 found_active_second_chance:
368 rxrpc_get_connection(conn);
369 goto success_uwfree;
370
371} /* end rxrpc_connection_lookup() */
372
373/*****************************************************************************/
374/*
375 * finish using a connection record
376 * - it will be transferred to the peer's connection graveyard when refcount
377 * reaches 0
378 */
379void rxrpc_put_connection(struct rxrpc_connection *conn)
380{
381 struct rxrpc_peer *peer;
382
383 if (!conn)
384 return;
385
386 _enter("%p{u=%d p=%hu}",
387 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
388
389 peer = conn->peer;
390 spin_lock(&peer->conn_gylock);
391
392 /* sanity check */
393 if (atomic_read(&conn->usage) <= 0)
394 BUG();
395
396 if (likely(!atomic_dec_and_test(&conn->usage))) {
397 spin_unlock(&peer->conn_gylock);
398 _leave("");
399 return;
400 }
401
402 /* move to graveyard queue */
403 _debug("burying connection: {%08x}", ntohl(conn->conn_id));
a842ef29 404 list_move_tail(&conn->link, &peer->conn_graveyard);
1da177e4
LT
405
406 rxrpc_krxtimod_add_timer(&conn->timeout, rxrpc_conn_timeout * HZ);
407
408 spin_unlock(&peer->conn_gylock);
409
410 rxrpc_put_peer(conn->peer);
411
412 _leave(" [killed]");
413} /* end rxrpc_put_connection() */
414
415/*****************************************************************************/
416/*
417 * free a connection record
418 */
419static void rxrpc_conn_do_timeout(struct rxrpc_connection *conn)
420{
421 struct rxrpc_peer *peer;
422
423 _enter("%p{u=%d p=%hu}",
424 conn, atomic_read(&conn->usage), ntohs(conn->addr.sin_port));
425
426 peer = conn->peer;
427
428 if (atomic_read(&conn->usage) < 0)
429 BUG();
430
431 /* remove from graveyard if still dead */
432 spin_lock(&peer->conn_gylock);
433 if (atomic_read(&conn->usage) == 0) {
434 list_del_init(&conn->link);
435 }
436 else {
437 conn = NULL;
438 }
439 spin_unlock(&peer->conn_gylock);
440
441 if (!conn) {
442 _leave("");
443 return; /* resurrected */
444 }
445
446 _debug("--- Destroying Connection %p{%08x} ---",
447 conn, ntohl(conn->conn_id));
448
449 down_write(&rxrpc_conns_sem);
450 list_del(&conn->proc_link);
451 up_write(&rxrpc_conns_sem);
452
453 write_lock(&peer->conn_idlock);
454 list_del(&conn->id_link);
455 write_unlock(&peer->conn_idlock);
456
457 __RXACCT(atomic_dec(&rxrpc_connection_count));
458 kfree(conn);
459
460 /* if the graveyard is now empty, wake up anyone waiting for that */
461 if (atomic_dec_and_test(&peer->conn_count))
462 wake_up(&peer->conn_gy_waitq);
463
464 _leave(" [destroyed]");
465} /* end rxrpc_conn_do_timeout() */
466
467/*****************************************************************************/
468/*
469 * clear all connection records from a peer endpoint
470 */
471void rxrpc_conn_clearall(struct rxrpc_peer *peer)
472{
473 DECLARE_WAITQUEUE(myself, current);
474
475 struct rxrpc_connection *conn;
476 int err;
477
478 _enter("%p", peer);
479
480 /* there shouldn't be any active conns remaining */
481 if (!list_empty(&peer->conn_active))
482 BUG();
483
484 /* manually timeout all conns in the graveyard */
485 spin_lock(&peer->conn_gylock);
486 while (!list_empty(&peer->conn_graveyard)) {
487 conn = list_entry(peer->conn_graveyard.next,
488 struct rxrpc_connection, link);
489 err = rxrpc_krxtimod_del_timer(&conn->timeout);
490 spin_unlock(&peer->conn_gylock);
491
492 if (err == 0)
493 rxrpc_conn_do_timeout(conn);
494
495 spin_lock(&peer->conn_gylock);
496 }
497 spin_unlock(&peer->conn_gylock);
498
499 /* wait for the the conn graveyard to be completely cleared */
500 set_current_state(TASK_UNINTERRUPTIBLE);
501 add_wait_queue(&peer->conn_gy_waitq, &myself);
502
503 while (atomic_read(&peer->conn_count) != 0) {
504 schedule();
505 set_current_state(TASK_UNINTERRUPTIBLE);
506 }
507
508 remove_wait_queue(&peer->conn_gy_waitq, &myself);
509 set_current_state(TASK_RUNNING);
510
511 _leave("");
512} /* end rxrpc_conn_clearall() */
513
514/*****************************************************************************/
515/*
516 * allocate and prepare a message for sending out through the transport
517 * endpoint
518 */
519int rxrpc_conn_newmsg(struct rxrpc_connection *conn,
520 struct rxrpc_call *call,
521 uint8_t type,
522 int dcount,
523 struct kvec diov[],
dd0fc66f 524 gfp_t alloc_flags,
1da177e4
LT
525 struct rxrpc_message **_msg)
526{
527 struct rxrpc_message *msg;
528 int loop;
529
530 _enter("%p{%d},%p,%u", conn, ntohs(conn->addr.sin_port), call, type);
531
532 if (dcount > 3) {
533 _leave(" = -EINVAL");
534 return -EINVAL;
535 }
536
0da974f4 537 msg = kzalloc(sizeof(struct rxrpc_message), alloc_flags);
1da177e4
LT
538 if (!msg) {
539 _leave(" = -ENOMEM");
540 return -ENOMEM;
541 }
542
1da177e4
LT
543 atomic_set(&msg->usage, 1);
544
545 INIT_LIST_HEAD(&msg->link);
546
547 msg->state = RXRPC_MSG_PREPARED;
548
549 msg->hdr.epoch = conn->out_epoch;
550 msg->hdr.cid = conn->conn_id | (call ? call->chan_ix : 0);
551 msg->hdr.callNumber = call ? call->call_id : 0;
552 msg->hdr.type = type;
553 msg->hdr.flags = conn->out_clientflag;
554 msg->hdr.securityIndex = conn->security_ix;
555 msg->hdr.serviceId = conn->service_id;
556
557 /* generate sequence numbers for data packets */
558 if (call) {
559 switch (type) {
560 case RXRPC_PACKET_TYPE_DATA:
561 msg->seq = ++call->snd_seq_count;
562 msg->hdr.seq = htonl(msg->seq);
563 break;
564 case RXRPC_PACKET_TYPE_ACK:
565 /* ACK sequence numbers are complicated. The following
566 * may be wrong:
567 * - jumbo packet ACKs should have a seq number
568 * - normal ACKs should not
569 */
570 default:
571 break;
572 }
573 }
574
575 msg->dcount = dcount + 1;
576 msg->dsize = sizeof(msg->hdr);
577 msg->data[0].iov_len = sizeof(msg->hdr);
578 msg->data[0].iov_base = &msg->hdr;
579
580 for (loop=0; loop < dcount; loop++) {
581 msg->dsize += diov[loop].iov_len;
582 msg->data[loop+1].iov_len = diov[loop].iov_len;
583 msg->data[loop+1].iov_base = diov[loop].iov_base;
584 }
585
586 __RXACCT(atomic_inc(&rxrpc_message_count));
587 *_msg = msg;
588 _leave(" = 0 (%p) #%d", msg, atomic_read(&rxrpc_message_count));
589 return 0;
590} /* end rxrpc_conn_newmsg() */
591
592/*****************************************************************************/
593/*
594 * free a message
595 */
596void __rxrpc_put_message(struct rxrpc_message *msg)
597{
598 int loop;
599
600 _enter("%p #%d", msg, atomic_read(&rxrpc_message_count));
601
602 if (msg->pkt)
603 kfree_skb(msg->pkt);
604 rxrpc_put_connection(msg->conn);
605
606 for (loop = 0; loop < 8; loop++)
607 if (test_bit(loop, &msg->dfree))
608 kfree(msg->data[loop].iov_base);
609
610 __RXACCT(atomic_dec(&rxrpc_message_count));
611 kfree(msg);
612
613 _leave("");
614} /* end __rxrpc_put_message() */
615
616/*****************************************************************************/
617/*
618 * send a message out through the transport endpoint
619 */
620int rxrpc_conn_sendmsg(struct rxrpc_connection *conn,
621 struct rxrpc_message *msg)
622{
623 struct msghdr msghdr;
624 int ret;
625
626 _enter("%p{%d}", conn, ntohs(conn->addr.sin_port));
627
628 /* fill in some fields in the header */
629 spin_lock(&conn->lock);
630 msg->hdr.serial = htonl(++conn->serial_counter);
631 msg->rttdone = 0;
632 spin_unlock(&conn->lock);
633
634 /* set up the message to be transmitted */
635 msghdr.msg_name = &conn->addr;
636 msghdr.msg_namelen = sizeof(conn->addr);
637 msghdr.msg_control = NULL;
638 msghdr.msg_controllen = 0;
639 msghdr.msg_flags = MSG_CONFIRM | MSG_DONTWAIT;
640
641 _net("Sending message type %d of %Zd bytes to %08x:%d",
642 msg->hdr.type,
643 msg->dsize,
644 ntohl(conn->addr.sin_addr.s_addr),
645 ntohs(conn->addr.sin_port));
646
647 /* send the message */
648 ret = kernel_sendmsg(conn->trans->socket, &msghdr,
649 msg->data, msg->dcount, msg->dsize);
650 if (ret < 0) {
651 msg->state = RXRPC_MSG_ERROR;
652 } else {
653 msg->state = RXRPC_MSG_SENT;
654 ret = 0;
655
656 spin_lock(&conn->lock);
657 do_gettimeofday(&conn->atime);
658 msg->stamp = conn->atime;
659 spin_unlock(&conn->lock);
660 }
661
662 _leave(" = %d", ret);
663
664 return ret;
665} /* end rxrpc_conn_sendmsg() */
666
667/*****************************************************************************/
668/*
669 * deal with a subsequent call packet
670 */
671int rxrpc_conn_receive_call_packet(struct rxrpc_connection *conn,
672 struct rxrpc_call *call,
673 struct rxrpc_message *msg)
674{
675 struct rxrpc_message *pmsg;
ea2e90df 676 struct dst_entry *dst;
1da177e4
LT
677 struct list_head *_p;
678 unsigned cix, seq;
679 int ret = 0;
680
681 _enter("%p,%p,%p", conn, call, msg);
682
683 if (!call) {
684 cix = ntohl(msg->hdr.cid) & RXRPC_CHANNELMASK;
685
686 spin_lock(&conn->lock);
687 call = conn->channels[cix];
688
689 if (!call || call->call_id != msg->hdr.callNumber) {
690 spin_unlock(&conn->lock);
691 rxrpc_trans_immediate_abort(conn->trans, msg, -ENOENT);
692 goto out;
693 }
694 else {
695 rxrpc_get_call(call);
696 spin_unlock(&conn->lock);
697 }
698 }
699 else {
700 rxrpc_get_call(call);
701 }
702
703 _proto("Received packet %%%u [%u] on call %hu:%u:%u",
704 ntohl(msg->hdr.serial),
705 ntohl(msg->hdr.seq),
706 ntohs(msg->hdr.serviceId),
707 ntohl(conn->conn_id),
708 ntohl(call->call_id));
709
710 call->pkt_rcv_count++;
711
ea2e90df
JJ
712 dst = msg->pkt->dst;
713 if (dst && dst->dev)
1da177e4 714 conn->peer->if_mtu =
ea2e90df 715 dst->dev->mtu - dst->dev->hard_header_len;
1da177e4
LT
716
717 /* queue on the call in seq order */
718 rxrpc_get_message(msg);
719 seq = msg->seq;
720
721 spin_lock(&call->lock);
722 list_for_each(_p, &call->rcv_receiveq) {
723 pmsg = list_entry(_p, struct rxrpc_message, link);
724 if (pmsg->seq > seq)
725 break;
726 }
727 list_add_tail(&msg->link, _p);
728
729 /* reset the activity timeout */
730 call->flags |= RXRPC_CALL_RCV_PKT;
731 mod_timer(&call->rcv_timeout,jiffies + rxrpc_call_rcv_timeout * HZ);
732
733 spin_unlock(&call->lock);
734
735 rxrpc_krxiod_queue_call(call);
736
737 rxrpc_put_call(call);
738 out:
739 _leave(" = %d", ret);
740 return ret;
741} /* end rxrpc_conn_receive_call_packet() */
742
743/*****************************************************************************/
744/*
745 * handle an ICMP error being applied to a connection
746 */
747void rxrpc_conn_handle_error(struct rxrpc_connection *conn,
748 int local, int errno)
749{
750 struct rxrpc_call *calls[4];
751 int loop;
752
753 _enter("%p{%d},%d", conn, ntohs(conn->addr.sin_port), errno);
754
755 /* get a ref to all my calls in one go */
756 memset(calls, 0, sizeof(calls));
757 spin_lock(&conn->lock);
758
759 for (loop = 3; loop >= 0; loop--) {
760 if (conn->channels[loop]) {
761 calls[loop] = conn->channels[loop];
762 rxrpc_get_call(calls[loop]);
763 }
764 }
765
766 spin_unlock(&conn->lock);
767
768 /* now kick them all */
769 for (loop = 3; loop >= 0; loop--) {
770 if (calls[loop]) {
771 rxrpc_call_handle_error(calls[loop], local, errno);
772 rxrpc_put_call(calls[loop]);
773 }
774 }
775
776 _leave("");
777} /* end rxrpc_conn_handle_error() */
This page took 0.191114 seconds and 5 git commands to generate.