Merge remote-tracking branch 'block/for-next'
[deliverable/linux.git] / net / tipc / link.c
1 /*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46
47 #include <linux/pkt_sched.h>
48
49 struct tipc_stats {
50 u32 sent_info; /* used in counting # sent packets */
51 u32 recv_info; /* used in counting # recv'd packets */
52 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
77 };
78
79 /**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_count: # of identical retransmit requests made by peer
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 */
124 struct tipc_link {
125 u32 addr;
126 char name[TIPC_MAX_LINK_NAME];
127 struct net *net;
128
129 /* Management and link supervision data */
130 u32 peer_session;
131 u32 session;
132 u32 peer_bearer_id;
133 u32 bearer_id;
134 u32 tolerance;
135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
140 char if_name[TIPC_MAX_IF_NAME];
141 u32 priority;
142 char net_plane;
143 struct tipc_mon_state mon_state;
144 u16 rst_cnt;
145
146 /* Failover/synch */
147 u16 drop_point;
148 struct sk_buff *failover_reasm_skb;
149
150 /* Max packet negotiation */
151 u16 mtu;
152 u16 advertised_mtu;
153
154 /* Sending */
155 struct sk_buff_head transmq;
156 struct sk_buff_head backlogq;
157 struct {
158 u16 len;
159 u16 limit;
160 } backlog[5];
161 u16 snd_nxt;
162 u16 last_retransm;
163 u16 window;
164 u32 stale_count;
165
166 /* Reception */
167 u16 rcv_nxt;
168 u32 rcv_unacked;
169 struct sk_buff_head deferdq;
170 struct sk_buff_head *inputq;
171 struct sk_buff_head *namedq;
172
173 /* Congestion handling */
174 struct sk_buff_head wakeupq;
175
176 /* Fragmentation/reassembly */
177 struct sk_buff *reasm_buf;
178
179 /* Broadcast */
180 u16 ackers;
181 u16 acked;
182 struct tipc_link *bc_rcvlink;
183 struct tipc_link *bc_sndlink;
184 unsigned long prev_retr;
185 u16 prev_from;
186 u16 prev_to;
187 u8 nack_state;
188 bool bc_peer_is_up;
189
190 /* Statistics */
191 struct tipc_stats stats;
192 };
193
194 /*
195 * Error message prefixes
196 */
197 static const char *link_co_err = "Link tunneling error, ";
198 static const char *link_rst_msg = "Resetting link ";
199
200 /* Send states for broadcast NACKs
201 */
202 enum {
203 BC_NACK_SND_CONDITIONAL,
204 BC_NACK_SND_UNCONDITIONAL,
205 BC_NACK_SND_SUPPRESS,
206 };
207
208 #define TIPC_BC_RETR_LIMIT 10 /* [ms] */
209
210 /*
211 * Interval between NACKs when packets arrive out of order
212 */
213 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
214
215 /* Wildcard value for link session numbers. When it is known that
216 * peer endpoint is down, any session number must be accepted.
217 */
218 #define ANY_SESSION 0x10000
219
220 /* Link FSM states:
221 */
222 enum {
223 LINK_ESTABLISHED = 0xe,
224 LINK_ESTABLISHING = 0xe << 4,
225 LINK_RESET = 0x1 << 8,
226 LINK_RESETTING = 0x2 << 12,
227 LINK_PEER_RESET = 0xd << 16,
228 LINK_FAILINGOVER = 0xf << 20,
229 LINK_SYNCHING = 0xc << 24
230 };
231
232 /* Link FSM state checking routines
233 */
234 static int link_is_up(struct tipc_link *l)
235 {
236 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
237 }
238
239 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
240 struct sk_buff_head *xmitq);
241 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
242 u16 rcvgap, int tolerance, int priority,
243 struct sk_buff_head *xmitq);
244 static void link_print(struct tipc_link *l, const char *str);
245 static int tipc_link_build_nack_msg(struct tipc_link *l,
246 struct sk_buff_head *xmitq);
247 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
248 struct sk_buff_head *xmitq);
249 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
250
251 /*
252 * Simple non-static link routines (i.e. referenced outside this file)
253 */
254 bool tipc_link_is_up(struct tipc_link *l)
255 {
256 return link_is_up(l);
257 }
258
259 bool tipc_link_peer_is_down(struct tipc_link *l)
260 {
261 return l->state == LINK_PEER_RESET;
262 }
263
264 bool tipc_link_is_reset(struct tipc_link *l)
265 {
266 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
267 }
268
269 bool tipc_link_is_establishing(struct tipc_link *l)
270 {
271 return l->state == LINK_ESTABLISHING;
272 }
273
274 bool tipc_link_is_synching(struct tipc_link *l)
275 {
276 return l->state == LINK_SYNCHING;
277 }
278
279 bool tipc_link_is_failingover(struct tipc_link *l)
280 {
281 return l->state == LINK_FAILINGOVER;
282 }
283
284 bool tipc_link_is_blocked(struct tipc_link *l)
285 {
286 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
287 }
288
289 static bool link_is_bc_sndlink(struct tipc_link *l)
290 {
291 return !l->bc_sndlink;
292 }
293
294 static bool link_is_bc_rcvlink(struct tipc_link *l)
295 {
296 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
297 }
298
299 int tipc_link_is_active(struct tipc_link *l)
300 {
301 return l->active;
302 }
303
304 void tipc_link_set_active(struct tipc_link *l, bool active)
305 {
306 l->active = active;
307 }
308
309 u32 tipc_link_id(struct tipc_link *l)
310 {
311 return l->peer_bearer_id << 16 | l->bearer_id;
312 }
313
314 int tipc_link_window(struct tipc_link *l)
315 {
316 return l->window;
317 }
318
319 int tipc_link_prio(struct tipc_link *l)
320 {
321 return l->priority;
322 }
323
324 unsigned long tipc_link_tolerance(struct tipc_link *l)
325 {
326 return l->tolerance;
327 }
328
329 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
330 {
331 return l->inputq;
332 }
333
334 char tipc_link_plane(struct tipc_link *l)
335 {
336 return l->net_plane;
337 }
338
339 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
340 struct tipc_link *uc_l,
341 struct sk_buff_head *xmitq)
342 {
343 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
344
345 snd_l->ackers++;
346 rcv_l->acked = snd_l->snd_nxt - 1;
347 snd_l->state = LINK_ESTABLISHED;
348 tipc_link_build_bc_init_msg(uc_l, xmitq);
349 }
350
351 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
352 struct tipc_link *rcv_l,
353 struct sk_buff_head *xmitq)
354 {
355 u16 ack = snd_l->snd_nxt - 1;
356
357 snd_l->ackers--;
358 rcv_l->bc_peer_is_up = true;
359 rcv_l->state = LINK_ESTABLISHED;
360 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
361 tipc_link_reset(rcv_l);
362 rcv_l->state = LINK_RESET;
363 if (!snd_l->ackers) {
364 tipc_link_reset(snd_l);
365 snd_l->state = LINK_RESET;
366 __skb_queue_purge(xmitq);
367 }
368 }
369
370 int tipc_link_bc_peers(struct tipc_link *l)
371 {
372 return l->ackers;
373 }
374
375 u16 link_bc_rcv_gap(struct tipc_link *l)
376 {
377 struct sk_buff *skb = skb_peek(&l->deferdq);
378 u16 gap = 0;
379
380 if (more(l->snd_nxt, l->rcv_nxt))
381 gap = l->snd_nxt - l->rcv_nxt;
382 if (skb)
383 gap = buf_seqno(skb) - l->rcv_nxt;
384 return gap;
385 }
386
387 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
388 {
389 l->mtu = mtu;
390 }
391
392 int tipc_link_mtu(struct tipc_link *l)
393 {
394 return l->mtu;
395 }
396
397 u16 tipc_link_rcv_nxt(struct tipc_link *l)
398 {
399 return l->rcv_nxt;
400 }
401
402 u16 tipc_link_acked(struct tipc_link *l)
403 {
404 return l->acked;
405 }
406
407 char *tipc_link_name(struct tipc_link *l)
408 {
409 return l->name;
410 }
411
412 /**
413 * tipc_link_create - create a new link
414 * @n: pointer to associated node
415 * @if_name: associated interface name
416 * @bearer_id: id (index) of associated bearer
417 * @tolerance: link tolerance to be used by link
418 * @net_plane: network plane (A,B,c..) this link belongs to
419 * @mtu: mtu to be advertised by link
420 * @priority: priority to be used by link
421 * @window: send window to be used by link
422 * @session: session to be used by link
423 * @ownnode: identity of own node
424 * @peer: node id of peer node
425 * @peer_caps: bitmap describing peer node capabilities
426 * @bc_sndlink: the namespace global link used for broadcast sending
427 * @bc_rcvlink: the peer specific link used for broadcast reception
428 * @inputq: queue to put messages ready for delivery
429 * @namedq: queue to put binding table update messages ready for delivery
430 * @link: return value, pointer to put the created link
431 *
432 * Returns true if link was created, otherwise false
433 */
434 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
435 int tolerance, char net_plane, u32 mtu, int priority,
436 int window, u32 session, u32 ownnode, u32 peer,
437 u16 peer_caps,
438 struct tipc_link *bc_sndlink,
439 struct tipc_link *bc_rcvlink,
440 struct sk_buff_head *inputq,
441 struct sk_buff_head *namedq,
442 struct tipc_link **link)
443 {
444 struct tipc_link *l;
445
446 l = kzalloc(sizeof(*l), GFP_ATOMIC);
447 if (!l)
448 return false;
449 *link = l;
450 l->session = session;
451
452 /* Note: peer i/f name is completed by reset/activate message */
453 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
454 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
455 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
456 strcpy(l->if_name, if_name);
457 l->addr = peer;
458 l->peer_caps = peer_caps;
459 l->net = net;
460 l->peer_session = ANY_SESSION;
461 l->bearer_id = bearer_id;
462 l->tolerance = tolerance;
463 l->net_plane = net_plane;
464 l->advertised_mtu = mtu;
465 l->mtu = mtu;
466 l->priority = priority;
467 tipc_link_set_queue_limits(l, window);
468 l->ackers = 1;
469 l->bc_sndlink = bc_sndlink;
470 l->bc_rcvlink = bc_rcvlink;
471 l->inputq = inputq;
472 l->namedq = namedq;
473 l->state = LINK_RESETTING;
474 __skb_queue_head_init(&l->transmq);
475 __skb_queue_head_init(&l->backlogq);
476 __skb_queue_head_init(&l->deferdq);
477 skb_queue_head_init(&l->wakeupq);
478 skb_queue_head_init(l->inputq);
479 return true;
480 }
481
482 /**
483 * tipc_link_bc_create - create new link to be used for broadcast
484 * @n: pointer to associated node
485 * @mtu: mtu to be used
486 * @window: send window to be used
487 * @inputq: queue to put messages ready for delivery
488 * @namedq: queue to put binding table update messages ready for delivery
489 * @link: return value, pointer to put the created link
490 *
491 * Returns true if link was created, otherwise false
492 */
493 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
494 int mtu, int window, u16 peer_caps,
495 struct sk_buff_head *inputq,
496 struct sk_buff_head *namedq,
497 struct tipc_link *bc_sndlink,
498 struct tipc_link **link)
499 {
500 struct tipc_link *l;
501
502 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
503 0, ownnode, peer, peer_caps, bc_sndlink,
504 NULL, inputq, namedq, link))
505 return false;
506
507 l = *link;
508 strcpy(l->name, tipc_bclink_name);
509 tipc_link_reset(l);
510 l->state = LINK_RESET;
511 l->ackers = 0;
512 l->bc_rcvlink = l;
513
514 /* Broadcast send link is always up */
515 if (link_is_bc_sndlink(l))
516 l->state = LINK_ESTABLISHED;
517
518 return true;
519 }
520
521 /**
522 * tipc_link_fsm_evt - link finite state machine
523 * @l: pointer to link
524 * @evt: state machine event to be processed
525 */
526 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
527 {
528 int rc = 0;
529
530 switch (l->state) {
531 case LINK_RESETTING:
532 switch (evt) {
533 case LINK_PEER_RESET_EVT:
534 l->state = LINK_PEER_RESET;
535 break;
536 case LINK_RESET_EVT:
537 l->state = LINK_RESET;
538 break;
539 case LINK_FAILURE_EVT:
540 case LINK_FAILOVER_BEGIN_EVT:
541 case LINK_ESTABLISH_EVT:
542 case LINK_FAILOVER_END_EVT:
543 case LINK_SYNCH_BEGIN_EVT:
544 case LINK_SYNCH_END_EVT:
545 default:
546 goto illegal_evt;
547 }
548 break;
549 case LINK_RESET:
550 switch (evt) {
551 case LINK_PEER_RESET_EVT:
552 l->state = LINK_ESTABLISHING;
553 break;
554 case LINK_FAILOVER_BEGIN_EVT:
555 l->state = LINK_FAILINGOVER;
556 case LINK_FAILURE_EVT:
557 case LINK_RESET_EVT:
558 case LINK_ESTABLISH_EVT:
559 case LINK_FAILOVER_END_EVT:
560 break;
561 case LINK_SYNCH_BEGIN_EVT:
562 case LINK_SYNCH_END_EVT:
563 default:
564 goto illegal_evt;
565 }
566 break;
567 case LINK_PEER_RESET:
568 switch (evt) {
569 case LINK_RESET_EVT:
570 l->state = LINK_ESTABLISHING;
571 break;
572 case LINK_PEER_RESET_EVT:
573 case LINK_ESTABLISH_EVT:
574 case LINK_FAILURE_EVT:
575 break;
576 case LINK_SYNCH_BEGIN_EVT:
577 case LINK_SYNCH_END_EVT:
578 case LINK_FAILOVER_BEGIN_EVT:
579 case LINK_FAILOVER_END_EVT:
580 default:
581 goto illegal_evt;
582 }
583 break;
584 case LINK_FAILINGOVER:
585 switch (evt) {
586 case LINK_FAILOVER_END_EVT:
587 l->state = LINK_RESET;
588 break;
589 case LINK_PEER_RESET_EVT:
590 case LINK_RESET_EVT:
591 case LINK_ESTABLISH_EVT:
592 case LINK_FAILURE_EVT:
593 break;
594 case LINK_FAILOVER_BEGIN_EVT:
595 case LINK_SYNCH_BEGIN_EVT:
596 case LINK_SYNCH_END_EVT:
597 default:
598 goto illegal_evt;
599 }
600 break;
601 case LINK_ESTABLISHING:
602 switch (evt) {
603 case LINK_ESTABLISH_EVT:
604 l->state = LINK_ESTABLISHED;
605 break;
606 case LINK_FAILOVER_BEGIN_EVT:
607 l->state = LINK_FAILINGOVER;
608 break;
609 case LINK_RESET_EVT:
610 l->state = LINK_RESET;
611 break;
612 case LINK_FAILURE_EVT:
613 case LINK_PEER_RESET_EVT:
614 case LINK_SYNCH_BEGIN_EVT:
615 case LINK_FAILOVER_END_EVT:
616 break;
617 case LINK_SYNCH_END_EVT:
618 default:
619 goto illegal_evt;
620 }
621 break;
622 case LINK_ESTABLISHED:
623 switch (evt) {
624 case LINK_PEER_RESET_EVT:
625 l->state = LINK_PEER_RESET;
626 rc |= TIPC_LINK_DOWN_EVT;
627 break;
628 case LINK_FAILURE_EVT:
629 l->state = LINK_RESETTING;
630 rc |= TIPC_LINK_DOWN_EVT;
631 break;
632 case LINK_RESET_EVT:
633 l->state = LINK_RESET;
634 break;
635 case LINK_ESTABLISH_EVT:
636 case LINK_SYNCH_END_EVT:
637 break;
638 case LINK_SYNCH_BEGIN_EVT:
639 l->state = LINK_SYNCHING;
640 break;
641 case LINK_FAILOVER_BEGIN_EVT:
642 case LINK_FAILOVER_END_EVT:
643 default:
644 goto illegal_evt;
645 }
646 break;
647 case LINK_SYNCHING:
648 switch (evt) {
649 case LINK_PEER_RESET_EVT:
650 l->state = LINK_PEER_RESET;
651 rc |= TIPC_LINK_DOWN_EVT;
652 break;
653 case LINK_FAILURE_EVT:
654 l->state = LINK_RESETTING;
655 rc |= TIPC_LINK_DOWN_EVT;
656 break;
657 case LINK_RESET_EVT:
658 l->state = LINK_RESET;
659 break;
660 case LINK_ESTABLISH_EVT:
661 case LINK_SYNCH_BEGIN_EVT:
662 break;
663 case LINK_SYNCH_END_EVT:
664 l->state = LINK_ESTABLISHED;
665 break;
666 case LINK_FAILOVER_BEGIN_EVT:
667 case LINK_FAILOVER_END_EVT:
668 default:
669 goto illegal_evt;
670 }
671 break;
672 default:
673 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
674 }
675 return rc;
676 illegal_evt:
677 pr_err("Illegal FSM event %x in state %x on link %s\n",
678 evt, l->state, l->name);
679 return rc;
680 }
681
682 /* link_profile_stats - update statistical profiling of traffic
683 */
684 static void link_profile_stats(struct tipc_link *l)
685 {
686 struct sk_buff *skb;
687 struct tipc_msg *msg;
688 int length;
689
690 /* Update counters used in statistical profiling of send traffic */
691 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
692 l->stats.queue_sz_counts++;
693
694 skb = skb_peek(&l->transmq);
695 if (!skb)
696 return;
697 msg = buf_msg(skb);
698 length = msg_size(msg);
699
700 if (msg_user(msg) == MSG_FRAGMENTER) {
701 if (msg_type(msg) != FIRST_FRAGMENT)
702 return;
703 length = msg_size(msg_get_wrapped(msg));
704 }
705 l->stats.msg_lengths_total += length;
706 l->stats.msg_length_counts++;
707 if (length <= 64)
708 l->stats.msg_length_profile[0]++;
709 else if (length <= 256)
710 l->stats.msg_length_profile[1]++;
711 else if (length <= 1024)
712 l->stats.msg_length_profile[2]++;
713 else if (length <= 4096)
714 l->stats.msg_length_profile[3]++;
715 else if (length <= 16384)
716 l->stats.msg_length_profile[4]++;
717 else if (length <= 32768)
718 l->stats.msg_length_profile[5]++;
719 else
720 l->stats.msg_length_profile[6]++;
721 }
722
723 /* tipc_link_timeout - perform periodic task as instructed from node timeout
724 */
725 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
726 {
727 int mtyp = 0;
728 int rc = 0;
729 bool state = false;
730 bool probe = false;
731 bool setup = false;
732 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
733 u16 bc_acked = l->bc_rcvlink->acked;
734 struct tipc_mon_state *mstate = &l->mon_state;
735
736 switch (l->state) {
737 case LINK_ESTABLISHED:
738 case LINK_SYNCHING:
739 mtyp = STATE_MSG;
740 link_profile_stats(l);
741 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
742 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
743 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
744 state = bc_acked != bc_snt;
745 state |= l->bc_rcvlink->rcv_unacked;
746 state |= l->rcv_unacked;
747 state |= !skb_queue_empty(&l->transmq);
748 state |= !skb_queue_empty(&l->deferdq);
749 probe = mstate->probing;
750 probe |= l->silent_intv_cnt;
751 if (probe || mstate->monitoring)
752 l->silent_intv_cnt++;
753 break;
754 case LINK_RESET:
755 setup = l->rst_cnt++ <= 4;
756 setup |= !(l->rst_cnt % 16);
757 mtyp = RESET_MSG;
758 break;
759 case LINK_ESTABLISHING:
760 setup = true;
761 mtyp = ACTIVATE_MSG;
762 break;
763 case LINK_PEER_RESET:
764 case LINK_RESETTING:
765 case LINK_FAILINGOVER:
766 break;
767 default:
768 break;
769 }
770
771 if (state || probe || setup)
772 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
773
774 return rc;
775 }
776
777 /**
778 * link_schedule_user - schedule a message sender for wakeup after congestion
779 * @link: congested link
780 * @list: message that was attempted sent
781 * Create pseudo msg to send back to user when congestion abates
782 * Does not consume buffer list
783 */
784 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
785 {
786 struct tipc_msg *msg = buf_msg(skb_peek(list));
787 int imp = msg_importance(msg);
788 u32 oport = msg_origport(msg);
789 u32 addr = tipc_own_addr(link->net);
790 struct sk_buff *skb;
791
792 /* This really cannot happen... */
793 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
794 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
795 return -ENOBUFS;
796 }
797 /* Non-blocking sender: */
798 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
799 return -ELINKCONG;
800
801 /* Create and schedule wakeup pseudo message */
802 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
803 addr, addr, oport, 0, 0);
804 if (!skb)
805 return -ENOBUFS;
806 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
807 TIPC_SKB_CB(skb)->chain_imp = imp;
808 skb_queue_tail(&link->wakeupq, skb);
809 link->stats.link_congs++;
810 return -ELINKCONG;
811 }
812
813 /**
814 * link_prepare_wakeup - prepare users for wakeup after congestion
815 * @link: congested link
816 * Move a number of waiting users, as permitted by available space in
817 * the send queue, from link wait queue to node wait queue for wakeup
818 */
819 void link_prepare_wakeup(struct tipc_link *l)
820 {
821 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
822 int imp, lim;
823 struct sk_buff *skb, *tmp;
824
825 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
826 imp = TIPC_SKB_CB(skb)->chain_imp;
827 lim = l->backlog[imp].limit;
828 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
829 if ((pnd[imp] + l->backlog[imp].len) >= lim)
830 break;
831 skb_unlink(skb, &l->wakeupq);
832 skb_queue_tail(l->inputq, skb);
833 }
834 }
835
836 void tipc_link_reset(struct tipc_link *l)
837 {
838 l->peer_session = ANY_SESSION;
839 l->session++;
840 l->mtu = l->advertised_mtu;
841 __skb_queue_purge(&l->transmq);
842 __skb_queue_purge(&l->deferdq);
843 skb_queue_splice_init(&l->wakeupq, l->inputq);
844 __skb_queue_purge(&l->backlogq);
845 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
846 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
847 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
848 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
849 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
850 kfree_skb(l->reasm_buf);
851 kfree_skb(l->failover_reasm_skb);
852 l->reasm_buf = NULL;
853 l->failover_reasm_skb = NULL;
854 l->rcv_unacked = 0;
855 l->snd_nxt = 1;
856 l->rcv_nxt = 1;
857 l->acked = 0;
858 l->silent_intv_cnt = 0;
859 l->rst_cnt = 0;
860 l->stats.recv_info = 0;
861 l->stale_count = 0;
862 l->bc_peer_is_up = false;
863 memset(&l->mon_state, 0, sizeof(l->mon_state));
864 tipc_link_reset_stats(l);
865 }
866
867 /**
868 * tipc_link_xmit(): enqueue buffer list according to queue situation
869 * @link: link to use
870 * @list: chain of buffers containing message
871 * @xmitq: returned list of packets to be sent by caller
872 *
873 * Consumes the buffer chain, except when returning -ELINKCONG,
874 * since the caller then may want to make more send attempts.
875 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
876 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
877 */
878 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
879 struct sk_buff_head *xmitq)
880 {
881 struct tipc_msg *hdr = buf_msg(skb_peek(list));
882 unsigned int maxwin = l->window;
883 unsigned int i, imp = msg_importance(hdr);
884 unsigned int mtu = l->mtu;
885 u16 ack = l->rcv_nxt - 1;
886 u16 seqno = l->snd_nxt;
887 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
888 struct sk_buff_head *transmq = &l->transmq;
889 struct sk_buff_head *backlogq = &l->backlogq;
890 struct sk_buff *skb, *_skb, *bskb;
891
892 /* Match msg importance against this and all higher backlog limits: */
893 if (!skb_queue_empty(backlogq)) {
894 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
895 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
896 return link_schedule_user(l, list);
897 }
898 }
899 if (unlikely(msg_size(hdr) > mtu)) {
900 skb_queue_purge(list);
901 return -EMSGSIZE;
902 }
903
904 /* Prepare each packet for sending, and add to relevant queue: */
905 while (skb_queue_len(list)) {
906 skb = skb_peek(list);
907 hdr = buf_msg(skb);
908 msg_set_seqno(hdr, seqno);
909 msg_set_ack(hdr, ack);
910 msg_set_bcast_ack(hdr, bc_ack);
911
912 if (likely(skb_queue_len(transmq) < maxwin)) {
913 _skb = skb_clone(skb, GFP_ATOMIC);
914 if (!_skb) {
915 skb_queue_purge(list);
916 return -ENOBUFS;
917 }
918 __skb_dequeue(list);
919 __skb_queue_tail(transmq, skb);
920 __skb_queue_tail(xmitq, _skb);
921 TIPC_SKB_CB(skb)->ackers = l->ackers;
922 l->rcv_unacked = 0;
923 seqno++;
924 continue;
925 }
926 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
927 kfree_skb(__skb_dequeue(list));
928 l->stats.sent_bundled++;
929 continue;
930 }
931 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
932 kfree_skb(__skb_dequeue(list));
933 __skb_queue_tail(backlogq, bskb);
934 l->backlog[msg_importance(buf_msg(bskb))].len++;
935 l->stats.sent_bundled++;
936 l->stats.sent_bundles++;
937 continue;
938 }
939 l->backlog[imp].len += skb_queue_len(list);
940 skb_queue_splice_tail_init(list, backlogq);
941 }
942 l->snd_nxt = seqno;
943 return 0;
944 }
945
946 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
947 {
948 struct sk_buff *skb, *_skb;
949 struct tipc_msg *hdr;
950 u16 seqno = l->snd_nxt;
951 u16 ack = l->rcv_nxt - 1;
952 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
953
954 while (skb_queue_len(&l->transmq) < l->window) {
955 skb = skb_peek(&l->backlogq);
956 if (!skb)
957 break;
958 _skb = skb_clone(skb, GFP_ATOMIC);
959 if (!_skb)
960 break;
961 __skb_dequeue(&l->backlogq);
962 hdr = buf_msg(skb);
963 l->backlog[msg_importance(hdr)].len--;
964 __skb_queue_tail(&l->transmq, skb);
965 __skb_queue_tail(xmitq, _skb);
966 TIPC_SKB_CB(skb)->ackers = l->ackers;
967 msg_set_seqno(hdr, seqno);
968 msg_set_ack(hdr, ack);
969 msg_set_bcast_ack(hdr, bc_ack);
970 l->rcv_unacked = 0;
971 seqno++;
972 }
973 l->snd_nxt = seqno;
974 }
975
976 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
977 {
978 struct tipc_msg *hdr = buf_msg(skb);
979
980 pr_warn("Retransmission failure on link <%s>\n", l->name);
981 link_print(l, "Resetting link ");
982 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
983 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
984 pr_info("sqno %u, prev: %x, src: %x\n",
985 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
986 }
987
988 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
989 struct sk_buff_head *xmitq)
990 {
991 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
992 struct tipc_msg *hdr;
993 u16 ack = l->rcv_nxt - 1;
994 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
995
996 if (!skb)
997 return 0;
998
999 /* Detect repeated retransmit failures on same packet */
1000 if (likely(l->last_retransm != buf_seqno(skb))) {
1001 l->last_retransm = buf_seqno(skb);
1002 l->stale_count = 1;
1003 } else if (++l->stale_count > 100) {
1004 link_retransmit_failure(l, skb);
1005 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1006 }
1007
1008 /* Move forward to where retransmission should start */
1009 skb_queue_walk(&l->transmq, skb) {
1010 if (!less(buf_seqno(skb), from))
1011 break;
1012 }
1013
1014 skb_queue_walk_from(&l->transmq, skb) {
1015 if (more(buf_seqno(skb), to))
1016 break;
1017 hdr = buf_msg(skb);
1018 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1019 if (!_skb)
1020 return 0;
1021 hdr = buf_msg(_skb);
1022 msg_set_ack(hdr, ack);
1023 msg_set_bcast_ack(hdr, bc_ack);
1024 _skb->priority = TC_PRIO_CONTROL;
1025 __skb_queue_tail(xmitq, _skb);
1026 l->stats.retransmitted++;
1027 }
1028 return 0;
1029 }
1030
1031 /* tipc_data_input - deliver data and name distr msgs to upper layer
1032 *
1033 * Consumes buffer if message is of right type
1034 * Node lock must be held
1035 */
1036 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1037 struct sk_buff_head *inputq)
1038 {
1039 switch (msg_user(buf_msg(skb))) {
1040 case TIPC_LOW_IMPORTANCE:
1041 case TIPC_MEDIUM_IMPORTANCE:
1042 case TIPC_HIGH_IMPORTANCE:
1043 case TIPC_CRITICAL_IMPORTANCE:
1044 case CONN_MANAGER:
1045 skb_queue_tail(inputq, skb);
1046 return true;
1047 case NAME_DISTRIBUTOR:
1048 l->bc_rcvlink->state = LINK_ESTABLISHED;
1049 skb_queue_tail(l->namedq, skb);
1050 return true;
1051 case MSG_BUNDLER:
1052 case TUNNEL_PROTOCOL:
1053 case MSG_FRAGMENTER:
1054 case BCAST_PROTOCOL:
1055 return false;
1056 default:
1057 pr_warn("Dropping received illegal msg type\n");
1058 kfree_skb(skb);
1059 return false;
1060 };
1061 }
1062
1063 /* tipc_link_input - process packet that has passed link protocol check
1064 *
1065 * Consumes buffer
1066 */
1067 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1068 struct sk_buff_head *inputq)
1069 {
1070 struct tipc_msg *hdr = buf_msg(skb);
1071 struct sk_buff **reasm_skb = &l->reasm_buf;
1072 struct sk_buff *iskb;
1073 struct sk_buff_head tmpq;
1074 int usr = msg_user(hdr);
1075 int rc = 0;
1076 int pos = 0;
1077 int ipos = 0;
1078
1079 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1080 if (msg_type(hdr) == SYNCH_MSG) {
1081 __skb_queue_purge(&l->deferdq);
1082 goto drop;
1083 }
1084 if (!tipc_msg_extract(skb, &iskb, &ipos))
1085 return rc;
1086 kfree_skb(skb);
1087 skb = iskb;
1088 hdr = buf_msg(skb);
1089 if (less(msg_seqno(hdr), l->drop_point))
1090 goto drop;
1091 if (tipc_data_input(l, skb, inputq))
1092 return rc;
1093 usr = msg_user(hdr);
1094 reasm_skb = &l->failover_reasm_skb;
1095 }
1096
1097 if (usr == MSG_BUNDLER) {
1098 skb_queue_head_init(&tmpq);
1099 l->stats.recv_bundles++;
1100 l->stats.recv_bundled += msg_msgcnt(hdr);
1101 while (tipc_msg_extract(skb, &iskb, &pos))
1102 tipc_data_input(l, iskb, &tmpq);
1103 tipc_skb_queue_splice_tail(&tmpq, inputq);
1104 return 0;
1105 } else if (usr == MSG_FRAGMENTER) {
1106 l->stats.recv_fragments++;
1107 if (tipc_buf_append(reasm_skb, &skb)) {
1108 l->stats.recv_fragmented++;
1109 tipc_data_input(l, skb, inputq);
1110 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1111 pr_warn_ratelimited("Unable to build fragment list\n");
1112 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1113 }
1114 return 0;
1115 } else if (usr == BCAST_PROTOCOL) {
1116 tipc_bcast_lock(l->net);
1117 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1118 tipc_bcast_unlock(l->net);
1119 }
1120 drop:
1121 kfree_skb(skb);
1122 return 0;
1123 }
1124
1125 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1126 {
1127 bool released = false;
1128 struct sk_buff *skb, *tmp;
1129
1130 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1131 if (more(buf_seqno(skb), acked))
1132 break;
1133 __skb_unlink(skb, &l->transmq);
1134 kfree_skb(skb);
1135 released = true;
1136 }
1137 return released;
1138 }
1139
1140 /* tipc_link_build_state_msg: prepare link state message for transmission
1141 *
1142 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1143 * risk of ack storms towards the sender
1144 */
1145 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1146 {
1147 if (!l)
1148 return 0;
1149
1150 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1151 if (link_is_bc_rcvlink(l)) {
1152 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1153 return 0;
1154 l->rcv_unacked = 0;
1155
1156 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1157 l->snd_nxt = l->rcv_nxt;
1158 return TIPC_LINK_SND_STATE;
1159 }
1160
1161 /* Unicast ACK */
1162 l->rcv_unacked = 0;
1163 l->stats.sent_acks++;
1164 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1165 return 0;
1166 }
1167
1168 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1169 */
1170 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1171 {
1172 int mtyp = RESET_MSG;
1173 struct sk_buff *skb;
1174
1175 if (l->state == LINK_ESTABLISHING)
1176 mtyp = ACTIVATE_MSG;
1177
1178 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1179
1180 /* Inform peer that this endpoint is going down if applicable */
1181 skb = skb_peek_tail(xmitq);
1182 if (skb && (l->state == LINK_RESET))
1183 msg_set_peer_stopping(buf_msg(skb), 1);
1184 }
1185
1186 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1187 * Note that sending of broadcast NACK is coordinated among nodes, to
1188 * reduce the risk of NACK storms towards the sender
1189 */
1190 static int tipc_link_build_nack_msg(struct tipc_link *l,
1191 struct sk_buff_head *xmitq)
1192 {
1193 u32 def_cnt = ++l->stats.deferred_recv;
1194 int match1, match2;
1195
1196 if (link_is_bc_rcvlink(l)) {
1197 match1 = def_cnt & 0xf;
1198 match2 = tipc_own_addr(l->net) & 0xf;
1199 if (match1 == match2)
1200 return TIPC_LINK_SND_STATE;
1201 return 0;
1202 }
1203
1204 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1205 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1206 return 0;
1207 }
1208
1209 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1210 * @l: the link that should handle the message
1211 * @skb: TIPC packet
1212 * @xmitq: queue to place packets to be sent after this call
1213 */
1214 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1215 struct sk_buff_head *xmitq)
1216 {
1217 struct sk_buff_head *defq = &l->deferdq;
1218 struct tipc_msg *hdr;
1219 u16 seqno, rcv_nxt, win_lim;
1220 int rc = 0;
1221
1222 do {
1223 hdr = buf_msg(skb);
1224 seqno = msg_seqno(hdr);
1225 rcv_nxt = l->rcv_nxt;
1226 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1227
1228 /* Verify and update link state */
1229 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1230 return tipc_link_proto_rcv(l, skb, xmitq);
1231
1232 if (unlikely(!link_is_up(l))) {
1233 if (l->state == LINK_ESTABLISHING)
1234 rc = TIPC_LINK_UP_EVT;
1235 goto drop;
1236 }
1237
1238 /* Don't send probe at next timeout expiration */
1239 l->silent_intv_cnt = 0;
1240
1241 /* Drop if outside receive window */
1242 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1243 l->stats.duplicates++;
1244 goto drop;
1245 }
1246
1247 /* Forward queues and wake up waiting users */
1248 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1249 tipc_link_advance_backlog(l, xmitq);
1250 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1251 link_prepare_wakeup(l);
1252 }
1253
1254 /* Defer delivery if sequence gap */
1255 if (unlikely(seqno != rcv_nxt)) {
1256 __tipc_skb_queue_sorted(defq, seqno, skb);
1257 rc |= tipc_link_build_nack_msg(l, xmitq);
1258 break;
1259 }
1260
1261 /* Deliver packet */
1262 l->rcv_nxt++;
1263 l->stats.recv_info++;
1264 if (!tipc_data_input(l, skb, l->inputq))
1265 rc |= tipc_link_input(l, skb, l->inputq);
1266 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1267 rc |= tipc_link_build_state_msg(l, xmitq);
1268 if (unlikely(rc & ~TIPC_LINK_SND_STATE))
1269 break;
1270 } while ((skb = __skb_dequeue(defq)));
1271
1272 return rc;
1273 drop:
1274 kfree_skb(skb);
1275 return rc;
1276 }
1277
1278 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1279 u16 rcvgap, int tolerance, int priority,
1280 struct sk_buff_head *xmitq)
1281 {
1282 struct tipc_link *bcl = l->bc_rcvlink;
1283 struct sk_buff *skb;
1284 struct tipc_msg *hdr;
1285 struct sk_buff_head *dfq = &l->deferdq;
1286 bool node_up = link_is_up(bcl);
1287 struct tipc_mon_state *mstate = &l->mon_state;
1288 int dlen = 0;
1289 void *data;
1290
1291 /* Don't send protocol message during reset or link failover */
1292 if (tipc_link_is_blocked(l))
1293 return;
1294
1295 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1296 return;
1297
1298 if (!skb_queue_empty(dfq))
1299 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1300
1301 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1302 tipc_max_domain_size, l->addr,
1303 tipc_own_addr(l->net), 0, 0, 0);
1304 if (!skb)
1305 return;
1306
1307 hdr = buf_msg(skb);
1308 data = msg_data(hdr);
1309 msg_set_session(hdr, l->session);
1310 msg_set_bearer_id(hdr, l->bearer_id);
1311 msg_set_net_plane(hdr, l->net_plane);
1312 msg_set_next_sent(hdr, l->snd_nxt);
1313 msg_set_ack(hdr, l->rcv_nxt - 1);
1314 msg_set_bcast_ack(hdr, bcl->rcv_nxt - 1);
1315 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1316 msg_set_link_tolerance(hdr, tolerance);
1317 msg_set_linkprio(hdr, priority);
1318 msg_set_redundant_link(hdr, node_up);
1319 msg_set_seq_gap(hdr, 0);
1320 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1321
1322 if (mtyp == STATE_MSG) {
1323 msg_set_seq_gap(hdr, rcvgap);
1324 msg_set_bc_gap(hdr, link_bc_rcv_gap(bcl));
1325 msg_set_probe(hdr, probe);
1326 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1327 msg_set_size(hdr, INT_H_SIZE + dlen);
1328 skb_trim(skb, INT_H_SIZE + dlen);
1329 l->stats.sent_states++;
1330 l->rcv_unacked = 0;
1331 } else {
1332 /* RESET_MSG or ACTIVATE_MSG */
1333 msg_set_max_pkt(hdr, l->advertised_mtu);
1334 strcpy(data, l->if_name);
1335 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1336 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1337 }
1338 if (probe)
1339 l->stats.sent_probes++;
1340 if (rcvgap)
1341 l->stats.sent_nacks++;
1342 skb->priority = TC_PRIO_CONTROL;
1343 __skb_queue_tail(xmitq, skb);
1344 }
1345
1346 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1347 * with contents of the link's transmit and backlog queues.
1348 */
1349 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1350 int mtyp, struct sk_buff_head *xmitq)
1351 {
1352 struct sk_buff *skb, *tnlskb;
1353 struct tipc_msg *hdr, tnlhdr;
1354 struct sk_buff_head *queue = &l->transmq;
1355 struct sk_buff_head tmpxq, tnlq;
1356 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1357
1358 if (!tnl)
1359 return;
1360
1361 skb_queue_head_init(&tnlq);
1362 skb_queue_head_init(&tmpxq);
1363
1364 /* At least one packet required for safe algorithm => add dummy */
1365 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1366 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1367 0, 0, TIPC_ERR_NO_PORT);
1368 if (!skb) {
1369 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1370 return;
1371 }
1372 skb_queue_tail(&tnlq, skb);
1373 tipc_link_xmit(l, &tnlq, &tmpxq);
1374 __skb_queue_purge(&tmpxq);
1375
1376 /* Initialize reusable tunnel packet header */
1377 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1378 mtyp, INT_H_SIZE, l->addr);
1379 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1380 msg_set_msgcnt(&tnlhdr, pktcnt);
1381 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1382 tnl:
1383 /* Wrap each packet into a tunnel packet */
1384 skb_queue_walk(queue, skb) {
1385 hdr = buf_msg(skb);
1386 if (queue == &l->backlogq)
1387 msg_set_seqno(hdr, seqno++);
1388 pktlen = msg_size(hdr);
1389 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1390 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1391 if (!tnlskb) {
1392 pr_warn("%sunable to send packet\n", link_co_err);
1393 return;
1394 }
1395 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1396 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1397 __skb_queue_tail(&tnlq, tnlskb);
1398 }
1399 if (queue != &l->backlogq) {
1400 queue = &l->backlogq;
1401 goto tnl;
1402 }
1403
1404 tipc_link_xmit(tnl, &tnlq, xmitq);
1405
1406 if (mtyp == FAILOVER_MSG) {
1407 tnl->drop_point = l->rcv_nxt;
1408 tnl->failover_reasm_skb = l->reasm_buf;
1409 l->reasm_buf = NULL;
1410 }
1411 }
1412
1413 /* tipc_link_proto_rcv(): receive link level protocol message :
1414 * Note that network plane id propagates through the network, and may
1415 * change at any time. The node with lowest numerical id determines
1416 * network plane
1417 */
1418 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1419 struct sk_buff_head *xmitq)
1420 {
1421 struct tipc_msg *hdr = buf_msg(skb);
1422 u16 rcvgap = 0;
1423 u16 ack = msg_ack(hdr);
1424 u16 gap = msg_seq_gap(hdr);
1425 u16 peers_snd_nxt = msg_next_sent(hdr);
1426 u16 peers_tol = msg_link_tolerance(hdr);
1427 u16 peers_prio = msg_linkprio(hdr);
1428 u16 rcv_nxt = l->rcv_nxt;
1429 u16 dlen = msg_data_sz(hdr);
1430 int mtyp = msg_type(hdr);
1431 void *data;
1432 char *if_name;
1433 int rc = 0;
1434
1435 if (tipc_link_is_blocked(l) || !xmitq)
1436 goto exit;
1437
1438 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1439 l->net_plane = msg_net_plane(hdr);
1440
1441 skb_linearize(skb);
1442 hdr = buf_msg(skb);
1443 data = msg_data(hdr);
1444
1445 switch (mtyp) {
1446 case RESET_MSG:
1447
1448 /* Ignore duplicate RESET with old session number */
1449 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1450 (l->peer_session != ANY_SESSION))
1451 break;
1452 /* fall thru' */
1453
1454 case ACTIVATE_MSG:
1455
1456 /* Complete own link name with peer's interface name */
1457 if_name = strrchr(l->name, ':') + 1;
1458 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1459 break;
1460 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1461 break;
1462 strncpy(if_name, data, TIPC_MAX_IF_NAME);
1463
1464 /* Update own tolerance if peer indicates a non-zero value */
1465 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1466 l->tolerance = peers_tol;
1467
1468 /* Update own priority if peer's priority is higher */
1469 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1470 l->priority = peers_prio;
1471
1472 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1473 if (msg_peer_stopping(hdr))
1474 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1475 else if ((mtyp == RESET_MSG) || !link_is_up(l))
1476 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1477
1478 /* ACTIVATE_MSG takes up link if it was already locally reset */
1479 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1480 rc = TIPC_LINK_UP_EVT;
1481
1482 l->peer_session = msg_session(hdr);
1483 l->peer_bearer_id = msg_bearer_id(hdr);
1484 if (l->mtu > msg_max_pkt(hdr))
1485 l->mtu = msg_max_pkt(hdr);
1486 break;
1487
1488 case STATE_MSG:
1489
1490 /* Update own tolerance if peer indicates a non-zero value */
1491 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1492 l->tolerance = peers_tol;
1493
1494 if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
1495 TIPC_MAX_LINK_PRI)) {
1496 l->priority = peers_prio;
1497 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1498 }
1499
1500 l->silent_intv_cnt = 0;
1501 l->stats.recv_states++;
1502 if (msg_probe(hdr))
1503 l->stats.recv_probes++;
1504
1505 if (!link_is_up(l)) {
1506 if (l->state == LINK_ESTABLISHING)
1507 rc = TIPC_LINK_UP_EVT;
1508 break;
1509 }
1510 tipc_mon_rcv(l->net, data, dlen, l->addr,
1511 &l->mon_state, l->bearer_id);
1512
1513 /* Send NACK if peer has sent pkts we haven't received yet */
1514 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1515 rcvgap = peers_snd_nxt - l->rcv_nxt;
1516 if (rcvgap || (msg_probe(hdr)))
1517 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1518 0, 0, xmitq);
1519 tipc_link_release_pkts(l, ack);
1520
1521 /* If NACK, retransmit will now start at right position */
1522 if (gap) {
1523 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
1524 l->stats.recv_nacks++;
1525 }
1526
1527 tipc_link_advance_backlog(l, xmitq);
1528 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1529 link_prepare_wakeup(l);
1530 }
1531 exit:
1532 kfree_skb(skb);
1533 return rc;
1534 }
1535
1536 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1537 */
1538 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1539 u16 peers_snd_nxt,
1540 struct sk_buff_head *xmitq)
1541 {
1542 struct sk_buff *skb;
1543 struct tipc_msg *hdr;
1544 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1545 u16 ack = l->rcv_nxt - 1;
1546 u16 gap_to = peers_snd_nxt - 1;
1547
1548 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1549 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1550 if (!skb)
1551 return false;
1552 hdr = buf_msg(skb);
1553 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1554 msg_set_bcast_ack(hdr, ack);
1555 msg_set_bcgap_after(hdr, ack);
1556 if (dfrd_skb)
1557 gap_to = buf_seqno(dfrd_skb) - 1;
1558 msg_set_bcgap_to(hdr, gap_to);
1559 msg_set_non_seq(hdr, bcast);
1560 __skb_queue_tail(xmitq, skb);
1561 return true;
1562 }
1563
1564 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1565 *
1566 * Give a newly added peer node the sequence number where it should
1567 * start receiving and acking broadcast packets.
1568 */
1569 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1570 struct sk_buff_head *xmitq)
1571 {
1572 struct sk_buff_head list;
1573
1574 __skb_queue_head_init(&list);
1575 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1576 return;
1577 tipc_link_xmit(l, &list, xmitq);
1578 }
1579
1580 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1581 */
1582 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1583 {
1584 int mtyp = msg_type(hdr);
1585 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1586
1587 if (link_is_up(l))
1588 return;
1589
1590 if (msg_user(hdr) == BCAST_PROTOCOL) {
1591 l->rcv_nxt = peers_snd_nxt;
1592 l->state = LINK_ESTABLISHED;
1593 return;
1594 }
1595
1596 if (l->peer_caps & TIPC_BCAST_SYNCH)
1597 return;
1598
1599 if (msg_peer_node_is_up(hdr))
1600 return;
1601
1602 /* Compatibility: accept older, less safe initial synch data */
1603 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1604 l->rcv_nxt = peers_snd_nxt;
1605 }
1606
1607 /* link_bc_retr eval()- check if the indicated range can be retransmitted now
1608 * - Adjust permitted range if there is overlap with previous retransmission
1609 */
1610 static bool link_bc_retr_eval(struct tipc_link *l, u16 *from, u16 *to)
1611 {
1612 unsigned long elapsed = jiffies_to_msecs(jiffies - l->prev_retr);
1613
1614 if (less(*to, *from))
1615 return false;
1616
1617 /* New retransmission request */
1618 if ((elapsed > TIPC_BC_RETR_LIMIT) ||
1619 less(*to, l->prev_from) || more(*from, l->prev_to)) {
1620 l->prev_from = *from;
1621 l->prev_to = *to;
1622 l->prev_retr = jiffies;
1623 return true;
1624 }
1625
1626 /* Inside range of previous retransmit */
1627 if (!less(*from, l->prev_from) && !more(*to, l->prev_to))
1628 return false;
1629
1630 /* Fully or partially outside previous range => exclude overlap */
1631 if (less(*from, l->prev_from)) {
1632 *to = l->prev_from - 1;
1633 l->prev_from = *from;
1634 }
1635 if (more(*to, l->prev_to)) {
1636 *from = l->prev_to + 1;
1637 l->prev_to = *to;
1638 }
1639 l->prev_retr = jiffies;
1640 return true;
1641 }
1642
1643 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1644 */
1645 int tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1646 struct sk_buff_head *xmitq)
1647 {
1648 struct tipc_link *snd_l = l->bc_sndlink;
1649 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1650 u16 from = msg_bcast_ack(hdr) + 1;
1651 u16 to = from + msg_bc_gap(hdr) - 1;
1652 int rc = 0;
1653
1654 if (!link_is_up(l))
1655 return rc;
1656
1657 if (!msg_peer_node_is_up(hdr))
1658 return rc;
1659
1660 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1661 if (msg_ack(hdr))
1662 l->bc_peer_is_up = true;
1663
1664 if (!l->bc_peer_is_up)
1665 return rc;
1666
1667 l->stats.recv_nacks++;
1668
1669 /* Ignore if peers_snd_nxt goes beyond receive window */
1670 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1671 return rc;
1672
1673 if (link_bc_retr_eval(snd_l, &from, &to))
1674 rc = tipc_link_retrans(snd_l, from, to, xmitq);
1675
1676 l->snd_nxt = peers_snd_nxt;
1677 if (link_bc_rcv_gap(l))
1678 rc |= TIPC_LINK_SND_STATE;
1679
1680 /* Return now if sender supports nack via STATE messages */
1681 if (l->peer_caps & TIPC_BCAST_STATE_NACK)
1682 return rc;
1683
1684 /* Otherwise, be backwards compatible */
1685
1686 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1687 l->nack_state = BC_NACK_SND_CONDITIONAL;
1688 return 0;
1689 }
1690
1691 /* Don't NACK if one was recently sent or peeked */
1692 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1693 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1694 return 0;
1695 }
1696
1697 /* Conditionally delay NACK sending until next synch rcv */
1698 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1699 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1700 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1701 return 0;
1702 }
1703
1704 /* Send NACK now but suppress next one */
1705 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1706 l->nack_state = BC_NACK_SND_SUPPRESS;
1707 return 0;
1708 }
1709
1710 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1711 struct sk_buff_head *xmitq)
1712 {
1713 struct sk_buff *skb, *tmp;
1714 struct tipc_link *snd_l = l->bc_sndlink;
1715
1716 if (!link_is_up(l) || !l->bc_peer_is_up)
1717 return;
1718
1719 if (!more(acked, l->acked))
1720 return;
1721
1722 /* Skip over packets peer has already acked */
1723 skb_queue_walk(&snd_l->transmq, skb) {
1724 if (more(buf_seqno(skb), l->acked))
1725 break;
1726 }
1727
1728 /* Update/release the packets peer is acking now */
1729 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1730 if (more(buf_seqno(skb), acked))
1731 break;
1732 if (!--TIPC_SKB_CB(skb)->ackers) {
1733 __skb_unlink(skb, &snd_l->transmq);
1734 kfree_skb(skb);
1735 }
1736 }
1737 l->acked = acked;
1738 tipc_link_advance_backlog(snd_l, xmitq);
1739 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1740 link_prepare_wakeup(snd_l);
1741 }
1742
1743 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1744 * This function is here for backwards compatibility, since
1745 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1746 */
1747 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1748 struct sk_buff_head *xmitq)
1749 {
1750 struct tipc_msg *hdr = buf_msg(skb);
1751 u32 dnode = msg_destnode(hdr);
1752 int mtyp = msg_type(hdr);
1753 u16 acked = msg_bcast_ack(hdr);
1754 u16 from = acked + 1;
1755 u16 to = msg_bcgap_to(hdr);
1756 u16 peers_snd_nxt = to + 1;
1757 int rc = 0;
1758
1759 kfree_skb(skb);
1760
1761 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1762 return 0;
1763
1764 if (mtyp != STATE_MSG)
1765 return 0;
1766
1767 if (dnode == tipc_own_addr(l->net)) {
1768 tipc_link_bc_ack_rcv(l, acked, xmitq);
1769 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1770 l->stats.recv_nacks++;
1771 return rc;
1772 }
1773
1774 /* Msg for other node => suppress own NACK at next sync if applicable */
1775 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1776 l->nack_state = BC_NACK_SND_SUPPRESS;
1777
1778 return 0;
1779 }
1780
1781 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1782 {
1783 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1784
1785 l->window = win;
1786 l->backlog[TIPC_LOW_IMPORTANCE].limit = max_t(u16, 50, win);
1787 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = max_t(u16, 100, win * 2);
1788 l->backlog[TIPC_HIGH_IMPORTANCE].limit = max_t(u16, 150, win * 3);
1789 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = max_t(u16, 200, win * 4);
1790 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1791 }
1792
1793 /**
1794 * link_reset_stats - reset link statistics
1795 * @l: pointer to link
1796 */
1797 void tipc_link_reset_stats(struct tipc_link *l)
1798 {
1799 memset(&l->stats, 0, sizeof(l->stats));
1800 if (!link_is_bc_sndlink(l)) {
1801 l->stats.sent_info = l->snd_nxt;
1802 l->stats.recv_info = l->rcv_nxt;
1803 }
1804 }
1805
1806 static void link_print(struct tipc_link *l, const char *str)
1807 {
1808 struct sk_buff *hskb = skb_peek(&l->transmq);
1809 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1810 u16 tail = l->snd_nxt - 1;
1811
1812 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1813 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1814 skb_queue_len(&l->transmq), head, tail,
1815 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1816 }
1817
1818 /* Parse and validate nested (link) properties valid for media, bearer and link
1819 */
1820 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1821 {
1822 int err;
1823
1824 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1825 tipc_nl_prop_policy);
1826 if (err)
1827 return err;
1828
1829 if (props[TIPC_NLA_PROP_PRIO]) {
1830 u32 prio;
1831
1832 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1833 if (prio > TIPC_MAX_LINK_PRI)
1834 return -EINVAL;
1835 }
1836
1837 if (props[TIPC_NLA_PROP_TOL]) {
1838 u32 tol;
1839
1840 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1841 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1842 return -EINVAL;
1843 }
1844
1845 if (props[TIPC_NLA_PROP_WIN]) {
1846 u32 win;
1847
1848 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1849 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1850 return -EINVAL;
1851 }
1852
1853 return 0;
1854 }
1855
1856 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1857 {
1858 int i;
1859 struct nlattr *stats;
1860
1861 struct nla_map {
1862 u32 key;
1863 u32 val;
1864 };
1865
1866 struct nla_map map[] = {
1867 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1868 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1869 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1870 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1871 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1872 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1873 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1874 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1875 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1876 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1877 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1878 s->msg_length_counts : 1},
1879 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1880 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1881 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1882 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1883 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1884 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1885 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1886 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1887 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1888 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1889 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1890 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1891 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1892 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1893 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1894 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1895 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1896 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1897 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1898 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1899 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1900 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1901 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1902 };
1903
1904 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1905 if (!stats)
1906 return -EMSGSIZE;
1907
1908 for (i = 0; i < ARRAY_SIZE(map); i++)
1909 if (nla_put_u32(skb, map[i].key, map[i].val))
1910 goto msg_full;
1911
1912 nla_nest_end(skb, stats);
1913
1914 return 0;
1915 msg_full:
1916 nla_nest_cancel(skb, stats);
1917
1918 return -EMSGSIZE;
1919 }
1920
1921 /* Caller should hold appropriate locks to protect the link */
1922 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1923 struct tipc_link *link, int nlflags)
1924 {
1925 int err;
1926 void *hdr;
1927 struct nlattr *attrs;
1928 struct nlattr *prop;
1929 struct tipc_net *tn = net_generic(net, tipc_net_id);
1930
1931 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1932 nlflags, TIPC_NL_LINK_GET);
1933 if (!hdr)
1934 return -EMSGSIZE;
1935
1936 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1937 if (!attrs)
1938 goto msg_full;
1939
1940 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1941 goto attr_msg_full;
1942 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1943 tipc_cluster_mask(tn->own_addr)))
1944 goto attr_msg_full;
1945 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1946 goto attr_msg_full;
1947 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1948 goto attr_msg_full;
1949 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1950 goto attr_msg_full;
1951
1952 if (tipc_link_is_up(link))
1953 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1954 goto attr_msg_full;
1955 if (link->active)
1956 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1957 goto attr_msg_full;
1958
1959 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1960 if (!prop)
1961 goto attr_msg_full;
1962 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1963 goto prop_msg_full;
1964 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1965 goto prop_msg_full;
1966 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1967 link->window))
1968 goto prop_msg_full;
1969 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1970 goto prop_msg_full;
1971 nla_nest_end(msg->skb, prop);
1972
1973 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1974 if (err)
1975 goto attr_msg_full;
1976
1977 nla_nest_end(msg->skb, attrs);
1978 genlmsg_end(msg->skb, hdr);
1979
1980 return 0;
1981
1982 prop_msg_full:
1983 nla_nest_cancel(msg->skb, prop);
1984 attr_msg_full:
1985 nla_nest_cancel(msg->skb, attrs);
1986 msg_full:
1987 genlmsg_cancel(msg->skb, hdr);
1988
1989 return -EMSGSIZE;
1990 }
1991
1992 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1993 struct tipc_stats *stats)
1994 {
1995 int i;
1996 struct nlattr *nest;
1997
1998 struct nla_map {
1999 __u32 key;
2000 __u32 val;
2001 };
2002
2003 struct nla_map map[] = {
2004 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
2005 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
2006 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
2007 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
2008 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
2009 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
2010 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
2011 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
2012 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
2013 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
2014 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
2015 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
2016 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
2017 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
2018 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
2019 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
2020 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
2021 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
2022 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
2023 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
2024 };
2025
2026 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
2027 if (!nest)
2028 return -EMSGSIZE;
2029
2030 for (i = 0; i < ARRAY_SIZE(map); i++)
2031 if (nla_put_u32(skb, map[i].key, map[i].val))
2032 goto msg_full;
2033
2034 nla_nest_end(skb, nest);
2035
2036 return 0;
2037 msg_full:
2038 nla_nest_cancel(skb, nest);
2039
2040 return -EMSGSIZE;
2041 }
2042
2043 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
2044 {
2045 int err;
2046 void *hdr;
2047 struct nlattr *attrs;
2048 struct nlattr *prop;
2049 struct tipc_net *tn = net_generic(net, tipc_net_id);
2050 struct tipc_link *bcl = tn->bcl;
2051
2052 if (!bcl)
2053 return 0;
2054
2055 tipc_bcast_lock(net);
2056
2057 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
2058 NLM_F_MULTI, TIPC_NL_LINK_GET);
2059 if (!hdr) {
2060 tipc_bcast_unlock(net);
2061 return -EMSGSIZE;
2062 }
2063
2064 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
2065 if (!attrs)
2066 goto msg_full;
2067
2068 /* The broadcast link is always up */
2069 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
2070 goto attr_msg_full;
2071
2072 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
2073 goto attr_msg_full;
2074 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
2075 goto attr_msg_full;
2076 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
2077 goto attr_msg_full;
2078 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
2079 goto attr_msg_full;
2080
2081 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
2082 if (!prop)
2083 goto attr_msg_full;
2084 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
2085 goto prop_msg_full;
2086 nla_nest_end(msg->skb, prop);
2087
2088 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
2089 if (err)
2090 goto attr_msg_full;
2091
2092 tipc_bcast_unlock(net);
2093 nla_nest_end(msg->skb, attrs);
2094 genlmsg_end(msg->skb, hdr);
2095
2096 return 0;
2097
2098 prop_msg_full:
2099 nla_nest_cancel(msg->skb, prop);
2100 attr_msg_full:
2101 nla_nest_cancel(msg->skb, attrs);
2102 msg_full:
2103 tipc_bcast_unlock(net);
2104 genlmsg_cancel(msg->skb, hdr);
2105
2106 return -EMSGSIZE;
2107 }
2108
2109 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2110 struct sk_buff_head *xmitq)
2111 {
2112 l->tolerance = tol;
2113 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
2114 }
2115
2116 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2117 struct sk_buff_head *xmitq)
2118 {
2119 l->priority = prio;
2120 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
2121 }
2122
2123 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2124 {
2125 l->abort_limit = limit;
2126 }
This page took 0.080896 seconds and 5 git commands to generate.