vfio/pci: Fix typos in comments
[deliverable/linux.git] / net / tipc / link.c
1 /*
2 * net/tipc/link.c: TIPC link code
3 *
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
6 * All rights reserved.
7 *
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
10 *
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
19 *
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
23 *
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
35 */
36
37 #include "core.h"
38 #include "subscr.h"
39 #include "link.h"
40 #include "bcast.h"
41 #include "socket.h"
42 #include "name_distr.h"
43 #include "discover.h"
44 #include "netlink.h"
45 #include "monitor.h"
46
47 #include <linux/pkt_sched.h>
48
49 struct tipc_stats {
50 u32 sent_info; /* used in counting # sent packets */
51 u32 recv_info; /* used in counting # recv'd packets */
52 u32 sent_states;
53 u32 recv_states;
54 u32 sent_probes;
55 u32 recv_probes;
56 u32 sent_nacks;
57 u32 recv_nacks;
58 u32 sent_acks;
59 u32 sent_bundled;
60 u32 sent_bundles;
61 u32 recv_bundled;
62 u32 recv_bundles;
63 u32 retransmitted;
64 u32 sent_fragmented;
65 u32 sent_fragments;
66 u32 recv_fragmented;
67 u32 recv_fragments;
68 u32 link_congs; /* # port sends blocked by congestion */
69 u32 deferred_recv;
70 u32 duplicates;
71 u32 max_queue_sz; /* send queue size high water mark */
72 u32 accu_queue_sz; /* used for send queue size profiling */
73 u32 queue_sz_counts; /* used for send queue size profiling */
74 u32 msg_length_counts; /* used for message length profiling */
75 u32 msg_lengths_total; /* used for message length profiling */
76 u32 msg_length_profile[7]; /* used for msg. length profiling */
77 };
78
79 /**
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
84 * @timer: link timer
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_count: # of identical retransmit requests made by peer
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
123 */
124 struct tipc_link {
125 u32 addr;
126 char name[TIPC_MAX_LINK_NAME];
127 struct net *net;
128
129 /* Management and link supervision data */
130 u32 peer_session;
131 u32 session;
132 u32 peer_bearer_id;
133 u32 bearer_id;
134 u32 tolerance;
135 u32 abort_limit;
136 u32 state;
137 u16 peer_caps;
138 bool active;
139 u32 silent_intv_cnt;
140 char if_name[TIPC_MAX_IF_NAME];
141 u32 priority;
142 char net_plane;
143 struct tipc_mon_state mon_state;
144 u16 rst_cnt;
145
146 /* Failover/synch */
147 u16 drop_point;
148 struct sk_buff *failover_reasm_skb;
149
150 /* Max packet negotiation */
151 u16 mtu;
152 u16 advertised_mtu;
153
154 /* Sending */
155 struct sk_buff_head transmq;
156 struct sk_buff_head backlogq;
157 struct {
158 u16 len;
159 u16 limit;
160 } backlog[5];
161 u16 snd_nxt;
162 u16 last_retransm;
163 u16 window;
164 u32 stale_count;
165
166 /* Reception */
167 u16 rcv_nxt;
168 u32 rcv_unacked;
169 struct sk_buff_head deferdq;
170 struct sk_buff_head *inputq;
171 struct sk_buff_head *namedq;
172
173 /* Congestion handling */
174 struct sk_buff_head wakeupq;
175
176 /* Fragmentation/reassembly */
177 struct sk_buff *reasm_buf;
178
179 /* Broadcast */
180 u16 ackers;
181 u16 acked;
182 struct tipc_link *bc_rcvlink;
183 struct tipc_link *bc_sndlink;
184 int nack_state;
185 bool bc_peer_is_up;
186
187 /* Statistics */
188 struct tipc_stats stats;
189 };
190
191 /*
192 * Error message prefixes
193 */
194 static const char *link_co_err = "Link tunneling error, ";
195 static const char *link_rst_msg = "Resetting link ";
196
197 /* Send states for broadcast NACKs
198 */
199 enum {
200 BC_NACK_SND_CONDITIONAL,
201 BC_NACK_SND_UNCONDITIONAL,
202 BC_NACK_SND_SUPPRESS,
203 };
204
205 /*
206 * Interval between NACKs when packets arrive out of order
207 */
208 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
209
210 /* Wildcard value for link session numbers. When it is known that
211 * peer endpoint is down, any session number must be accepted.
212 */
213 #define ANY_SESSION 0x10000
214
215 /* Link FSM states:
216 */
217 enum {
218 LINK_ESTABLISHED = 0xe,
219 LINK_ESTABLISHING = 0xe << 4,
220 LINK_RESET = 0x1 << 8,
221 LINK_RESETTING = 0x2 << 12,
222 LINK_PEER_RESET = 0xd << 16,
223 LINK_FAILINGOVER = 0xf << 20,
224 LINK_SYNCHING = 0xc << 24
225 };
226
227 /* Link FSM state checking routines
228 */
229 static int link_is_up(struct tipc_link *l)
230 {
231 return l->state & (LINK_ESTABLISHED | LINK_SYNCHING);
232 }
233
234 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
235 struct sk_buff_head *xmitq);
236 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
237 u16 rcvgap, int tolerance, int priority,
238 struct sk_buff_head *xmitq);
239 static void link_print(struct tipc_link *l, const char *str);
240 static void tipc_link_build_nack_msg(struct tipc_link *l,
241 struct sk_buff_head *xmitq);
242 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
243 struct sk_buff_head *xmitq);
244 static bool tipc_link_release_pkts(struct tipc_link *l, u16 to);
245
246 /*
247 * Simple non-static link routines (i.e. referenced outside this file)
248 */
249 bool tipc_link_is_up(struct tipc_link *l)
250 {
251 return link_is_up(l);
252 }
253
254 bool tipc_link_peer_is_down(struct tipc_link *l)
255 {
256 return l->state == LINK_PEER_RESET;
257 }
258
259 bool tipc_link_is_reset(struct tipc_link *l)
260 {
261 return l->state & (LINK_RESET | LINK_FAILINGOVER | LINK_ESTABLISHING);
262 }
263
264 bool tipc_link_is_establishing(struct tipc_link *l)
265 {
266 return l->state == LINK_ESTABLISHING;
267 }
268
269 bool tipc_link_is_synching(struct tipc_link *l)
270 {
271 return l->state == LINK_SYNCHING;
272 }
273
274 bool tipc_link_is_failingover(struct tipc_link *l)
275 {
276 return l->state == LINK_FAILINGOVER;
277 }
278
279 bool tipc_link_is_blocked(struct tipc_link *l)
280 {
281 return l->state & (LINK_RESETTING | LINK_PEER_RESET | LINK_FAILINGOVER);
282 }
283
284 static bool link_is_bc_sndlink(struct tipc_link *l)
285 {
286 return !l->bc_sndlink;
287 }
288
289 static bool link_is_bc_rcvlink(struct tipc_link *l)
290 {
291 return ((l->bc_rcvlink == l) && !link_is_bc_sndlink(l));
292 }
293
294 int tipc_link_is_active(struct tipc_link *l)
295 {
296 return l->active;
297 }
298
299 void tipc_link_set_active(struct tipc_link *l, bool active)
300 {
301 l->active = active;
302 }
303
304 u32 tipc_link_id(struct tipc_link *l)
305 {
306 return l->peer_bearer_id << 16 | l->bearer_id;
307 }
308
309 int tipc_link_window(struct tipc_link *l)
310 {
311 return l->window;
312 }
313
314 int tipc_link_prio(struct tipc_link *l)
315 {
316 return l->priority;
317 }
318
319 unsigned long tipc_link_tolerance(struct tipc_link *l)
320 {
321 return l->tolerance;
322 }
323
324 struct sk_buff_head *tipc_link_inputq(struct tipc_link *l)
325 {
326 return l->inputq;
327 }
328
329 char tipc_link_plane(struct tipc_link *l)
330 {
331 return l->net_plane;
332 }
333
334 void tipc_link_add_bc_peer(struct tipc_link *snd_l,
335 struct tipc_link *uc_l,
336 struct sk_buff_head *xmitq)
337 {
338 struct tipc_link *rcv_l = uc_l->bc_rcvlink;
339
340 snd_l->ackers++;
341 rcv_l->acked = snd_l->snd_nxt - 1;
342 snd_l->state = LINK_ESTABLISHED;
343 tipc_link_build_bc_init_msg(uc_l, xmitq);
344 }
345
346 void tipc_link_remove_bc_peer(struct tipc_link *snd_l,
347 struct tipc_link *rcv_l,
348 struct sk_buff_head *xmitq)
349 {
350 u16 ack = snd_l->snd_nxt - 1;
351
352 snd_l->ackers--;
353 rcv_l->bc_peer_is_up = true;
354 rcv_l->state = LINK_ESTABLISHED;
355 tipc_link_bc_ack_rcv(rcv_l, ack, xmitq);
356 tipc_link_reset(rcv_l);
357 rcv_l->state = LINK_RESET;
358 if (!snd_l->ackers) {
359 tipc_link_reset(snd_l);
360 snd_l->state = LINK_RESET;
361 __skb_queue_purge(xmitq);
362 }
363 }
364
365 int tipc_link_bc_peers(struct tipc_link *l)
366 {
367 return l->ackers;
368 }
369
370 void tipc_link_set_mtu(struct tipc_link *l, int mtu)
371 {
372 l->mtu = mtu;
373 }
374
375 int tipc_link_mtu(struct tipc_link *l)
376 {
377 return l->mtu;
378 }
379
380 u16 tipc_link_rcv_nxt(struct tipc_link *l)
381 {
382 return l->rcv_nxt;
383 }
384
385 u16 tipc_link_acked(struct tipc_link *l)
386 {
387 return l->acked;
388 }
389
390 char *tipc_link_name(struct tipc_link *l)
391 {
392 return l->name;
393 }
394
395 /**
396 * tipc_link_create - create a new link
397 * @n: pointer to associated node
398 * @if_name: associated interface name
399 * @bearer_id: id (index) of associated bearer
400 * @tolerance: link tolerance to be used by link
401 * @net_plane: network plane (A,B,c..) this link belongs to
402 * @mtu: mtu to be advertised by link
403 * @priority: priority to be used by link
404 * @window: send window to be used by link
405 * @session: session to be used by link
406 * @ownnode: identity of own node
407 * @peer: node id of peer node
408 * @peer_caps: bitmap describing peer node capabilities
409 * @bc_sndlink: the namespace global link used for broadcast sending
410 * @bc_rcvlink: the peer specific link used for broadcast reception
411 * @inputq: queue to put messages ready for delivery
412 * @namedq: queue to put binding table update messages ready for delivery
413 * @link: return value, pointer to put the created link
414 *
415 * Returns true if link was created, otherwise false
416 */
417 bool tipc_link_create(struct net *net, char *if_name, int bearer_id,
418 int tolerance, char net_plane, u32 mtu, int priority,
419 int window, u32 session, u32 ownnode, u32 peer,
420 u16 peer_caps,
421 struct tipc_link *bc_sndlink,
422 struct tipc_link *bc_rcvlink,
423 struct sk_buff_head *inputq,
424 struct sk_buff_head *namedq,
425 struct tipc_link **link)
426 {
427 struct tipc_link *l;
428
429 l = kzalloc(sizeof(*l), GFP_ATOMIC);
430 if (!l)
431 return false;
432 *link = l;
433 l->session = session;
434
435 /* Note: peer i/f name is completed by reset/activate message */
436 sprintf(l->name, "%u.%u.%u:%s-%u.%u.%u:unknown",
437 tipc_zone(ownnode), tipc_cluster(ownnode), tipc_node(ownnode),
438 if_name, tipc_zone(peer), tipc_cluster(peer), tipc_node(peer));
439 strcpy(l->if_name, if_name);
440 l->addr = peer;
441 l->peer_caps = peer_caps;
442 l->net = net;
443 l->peer_session = ANY_SESSION;
444 l->bearer_id = bearer_id;
445 l->tolerance = tolerance;
446 l->net_plane = net_plane;
447 l->advertised_mtu = mtu;
448 l->mtu = mtu;
449 l->priority = priority;
450 tipc_link_set_queue_limits(l, window);
451 l->ackers = 1;
452 l->bc_sndlink = bc_sndlink;
453 l->bc_rcvlink = bc_rcvlink;
454 l->inputq = inputq;
455 l->namedq = namedq;
456 l->state = LINK_RESETTING;
457 __skb_queue_head_init(&l->transmq);
458 __skb_queue_head_init(&l->backlogq);
459 __skb_queue_head_init(&l->deferdq);
460 skb_queue_head_init(&l->wakeupq);
461 skb_queue_head_init(l->inputq);
462 return true;
463 }
464
465 /**
466 * tipc_link_bc_create - create new link to be used for broadcast
467 * @n: pointer to associated node
468 * @mtu: mtu to be used
469 * @window: send window to be used
470 * @inputq: queue to put messages ready for delivery
471 * @namedq: queue to put binding table update messages ready for delivery
472 * @link: return value, pointer to put the created link
473 *
474 * Returns true if link was created, otherwise false
475 */
476 bool tipc_link_bc_create(struct net *net, u32 ownnode, u32 peer,
477 int mtu, int window, u16 peer_caps,
478 struct sk_buff_head *inputq,
479 struct sk_buff_head *namedq,
480 struct tipc_link *bc_sndlink,
481 struct tipc_link **link)
482 {
483 struct tipc_link *l;
484
485 if (!tipc_link_create(net, "", MAX_BEARERS, 0, 'Z', mtu, 0, window,
486 0, ownnode, peer, peer_caps, bc_sndlink,
487 NULL, inputq, namedq, link))
488 return false;
489
490 l = *link;
491 strcpy(l->name, tipc_bclink_name);
492 tipc_link_reset(l);
493 l->state = LINK_RESET;
494 l->ackers = 0;
495 l->bc_rcvlink = l;
496
497 /* Broadcast send link is always up */
498 if (link_is_bc_sndlink(l))
499 l->state = LINK_ESTABLISHED;
500
501 return true;
502 }
503
504 /**
505 * tipc_link_fsm_evt - link finite state machine
506 * @l: pointer to link
507 * @evt: state machine event to be processed
508 */
509 int tipc_link_fsm_evt(struct tipc_link *l, int evt)
510 {
511 int rc = 0;
512
513 switch (l->state) {
514 case LINK_RESETTING:
515 switch (evt) {
516 case LINK_PEER_RESET_EVT:
517 l->state = LINK_PEER_RESET;
518 break;
519 case LINK_RESET_EVT:
520 l->state = LINK_RESET;
521 break;
522 case LINK_FAILURE_EVT:
523 case LINK_FAILOVER_BEGIN_EVT:
524 case LINK_ESTABLISH_EVT:
525 case LINK_FAILOVER_END_EVT:
526 case LINK_SYNCH_BEGIN_EVT:
527 case LINK_SYNCH_END_EVT:
528 default:
529 goto illegal_evt;
530 }
531 break;
532 case LINK_RESET:
533 switch (evt) {
534 case LINK_PEER_RESET_EVT:
535 l->state = LINK_ESTABLISHING;
536 break;
537 case LINK_FAILOVER_BEGIN_EVT:
538 l->state = LINK_FAILINGOVER;
539 case LINK_FAILURE_EVT:
540 case LINK_RESET_EVT:
541 case LINK_ESTABLISH_EVT:
542 case LINK_FAILOVER_END_EVT:
543 break;
544 case LINK_SYNCH_BEGIN_EVT:
545 case LINK_SYNCH_END_EVT:
546 default:
547 goto illegal_evt;
548 }
549 break;
550 case LINK_PEER_RESET:
551 switch (evt) {
552 case LINK_RESET_EVT:
553 l->state = LINK_ESTABLISHING;
554 break;
555 case LINK_PEER_RESET_EVT:
556 case LINK_ESTABLISH_EVT:
557 case LINK_FAILURE_EVT:
558 break;
559 case LINK_SYNCH_BEGIN_EVT:
560 case LINK_SYNCH_END_EVT:
561 case LINK_FAILOVER_BEGIN_EVT:
562 case LINK_FAILOVER_END_EVT:
563 default:
564 goto illegal_evt;
565 }
566 break;
567 case LINK_FAILINGOVER:
568 switch (evt) {
569 case LINK_FAILOVER_END_EVT:
570 l->state = LINK_RESET;
571 break;
572 case LINK_PEER_RESET_EVT:
573 case LINK_RESET_EVT:
574 case LINK_ESTABLISH_EVT:
575 case LINK_FAILURE_EVT:
576 break;
577 case LINK_FAILOVER_BEGIN_EVT:
578 case LINK_SYNCH_BEGIN_EVT:
579 case LINK_SYNCH_END_EVT:
580 default:
581 goto illegal_evt;
582 }
583 break;
584 case LINK_ESTABLISHING:
585 switch (evt) {
586 case LINK_ESTABLISH_EVT:
587 l->state = LINK_ESTABLISHED;
588 break;
589 case LINK_FAILOVER_BEGIN_EVT:
590 l->state = LINK_FAILINGOVER;
591 break;
592 case LINK_RESET_EVT:
593 l->state = LINK_RESET;
594 break;
595 case LINK_FAILURE_EVT:
596 case LINK_PEER_RESET_EVT:
597 case LINK_SYNCH_BEGIN_EVT:
598 case LINK_FAILOVER_END_EVT:
599 break;
600 case LINK_SYNCH_END_EVT:
601 default:
602 goto illegal_evt;
603 }
604 break;
605 case LINK_ESTABLISHED:
606 switch (evt) {
607 case LINK_PEER_RESET_EVT:
608 l->state = LINK_PEER_RESET;
609 rc |= TIPC_LINK_DOWN_EVT;
610 break;
611 case LINK_FAILURE_EVT:
612 l->state = LINK_RESETTING;
613 rc |= TIPC_LINK_DOWN_EVT;
614 break;
615 case LINK_RESET_EVT:
616 l->state = LINK_RESET;
617 break;
618 case LINK_ESTABLISH_EVT:
619 case LINK_SYNCH_END_EVT:
620 break;
621 case LINK_SYNCH_BEGIN_EVT:
622 l->state = LINK_SYNCHING;
623 break;
624 case LINK_FAILOVER_BEGIN_EVT:
625 case LINK_FAILOVER_END_EVT:
626 default:
627 goto illegal_evt;
628 }
629 break;
630 case LINK_SYNCHING:
631 switch (evt) {
632 case LINK_PEER_RESET_EVT:
633 l->state = LINK_PEER_RESET;
634 rc |= TIPC_LINK_DOWN_EVT;
635 break;
636 case LINK_FAILURE_EVT:
637 l->state = LINK_RESETTING;
638 rc |= TIPC_LINK_DOWN_EVT;
639 break;
640 case LINK_RESET_EVT:
641 l->state = LINK_RESET;
642 break;
643 case LINK_ESTABLISH_EVT:
644 case LINK_SYNCH_BEGIN_EVT:
645 break;
646 case LINK_SYNCH_END_EVT:
647 l->state = LINK_ESTABLISHED;
648 break;
649 case LINK_FAILOVER_BEGIN_EVT:
650 case LINK_FAILOVER_END_EVT:
651 default:
652 goto illegal_evt;
653 }
654 break;
655 default:
656 pr_err("Unknown FSM state %x in %s\n", l->state, l->name);
657 }
658 return rc;
659 illegal_evt:
660 pr_err("Illegal FSM event %x in state %x on link %s\n",
661 evt, l->state, l->name);
662 return rc;
663 }
664
665 /* link_profile_stats - update statistical profiling of traffic
666 */
667 static void link_profile_stats(struct tipc_link *l)
668 {
669 struct sk_buff *skb;
670 struct tipc_msg *msg;
671 int length;
672
673 /* Update counters used in statistical profiling of send traffic */
674 l->stats.accu_queue_sz += skb_queue_len(&l->transmq);
675 l->stats.queue_sz_counts++;
676
677 skb = skb_peek(&l->transmq);
678 if (!skb)
679 return;
680 msg = buf_msg(skb);
681 length = msg_size(msg);
682
683 if (msg_user(msg) == MSG_FRAGMENTER) {
684 if (msg_type(msg) != FIRST_FRAGMENT)
685 return;
686 length = msg_size(msg_get_wrapped(msg));
687 }
688 l->stats.msg_lengths_total += length;
689 l->stats.msg_length_counts++;
690 if (length <= 64)
691 l->stats.msg_length_profile[0]++;
692 else if (length <= 256)
693 l->stats.msg_length_profile[1]++;
694 else if (length <= 1024)
695 l->stats.msg_length_profile[2]++;
696 else if (length <= 4096)
697 l->stats.msg_length_profile[3]++;
698 else if (length <= 16384)
699 l->stats.msg_length_profile[4]++;
700 else if (length <= 32768)
701 l->stats.msg_length_profile[5]++;
702 else
703 l->stats.msg_length_profile[6]++;
704 }
705
706 /* tipc_link_timeout - perform periodic task as instructed from node timeout
707 */
708 int tipc_link_timeout(struct tipc_link *l, struct sk_buff_head *xmitq)
709 {
710 int mtyp = 0;
711 int rc = 0;
712 bool state = false;
713 bool probe = false;
714 bool setup = false;
715 u16 bc_snt = l->bc_sndlink->snd_nxt - 1;
716 u16 bc_acked = l->bc_rcvlink->acked;
717 struct tipc_mon_state *mstate = &l->mon_state;
718
719 switch (l->state) {
720 case LINK_ESTABLISHED:
721 case LINK_SYNCHING:
722 mtyp = STATE_MSG;
723 link_profile_stats(l);
724 tipc_mon_get_state(l->net, l->addr, mstate, l->bearer_id);
725 if (mstate->reset || (l->silent_intv_cnt > l->abort_limit))
726 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
727 state = bc_acked != bc_snt;
728 state |= l->bc_rcvlink->rcv_unacked;
729 state |= l->rcv_unacked;
730 state |= !skb_queue_empty(&l->transmq);
731 state |= !skb_queue_empty(&l->deferdq);
732 probe = mstate->probing;
733 probe |= l->silent_intv_cnt;
734 if (probe || mstate->monitoring)
735 l->silent_intv_cnt++;
736 break;
737 case LINK_RESET:
738 setup = l->rst_cnt++ <= 4;
739 setup |= !(l->rst_cnt % 16);
740 mtyp = RESET_MSG;
741 break;
742 case LINK_ESTABLISHING:
743 setup = true;
744 mtyp = ACTIVATE_MSG;
745 break;
746 case LINK_PEER_RESET:
747 case LINK_RESETTING:
748 case LINK_FAILINGOVER:
749 break;
750 default:
751 break;
752 }
753
754 if (state || probe || setup)
755 tipc_link_build_proto_msg(l, mtyp, probe, 0, 0, 0, xmitq);
756
757 return rc;
758 }
759
760 /**
761 * link_schedule_user - schedule a message sender for wakeup after congestion
762 * @link: congested link
763 * @list: message that was attempted sent
764 * Create pseudo msg to send back to user when congestion abates
765 * Does not consume buffer list
766 */
767 static int link_schedule_user(struct tipc_link *link, struct sk_buff_head *list)
768 {
769 struct tipc_msg *msg = buf_msg(skb_peek(list));
770 int imp = msg_importance(msg);
771 u32 oport = msg_origport(msg);
772 u32 addr = tipc_own_addr(link->net);
773 struct sk_buff *skb;
774
775 /* This really cannot happen... */
776 if (unlikely(imp > TIPC_CRITICAL_IMPORTANCE)) {
777 pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
778 return -ENOBUFS;
779 }
780 /* Non-blocking sender: */
781 if (TIPC_SKB_CB(skb_peek(list))->wakeup_pending)
782 return -ELINKCONG;
783
784 /* Create and schedule wakeup pseudo message */
785 skb = tipc_msg_create(SOCK_WAKEUP, 0, INT_H_SIZE, 0,
786 addr, addr, oport, 0, 0);
787 if (!skb)
788 return -ENOBUFS;
789 TIPC_SKB_CB(skb)->chain_sz = skb_queue_len(list);
790 TIPC_SKB_CB(skb)->chain_imp = imp;
791 skb_queue_tail(&link->wakeupq, skb);
792 link->stats.link_congs++;
793 return -ELINKCONG;
794 }
795
796 /**
797 * link_prepare_wakeup - prepare users for wakeup after congestion
798 * @link: congested link
799 * Move a number of waiting users, as permitted by available space in
800 * the send queue, from link wait queue to node wait queue for wakeup
801 */
802 void link_prepare_wakeup(struct tipc_link *l)
803 {
804 int pnd[TIPC_SYSTEM_IMPORTANCE + 1] = {0,};
805 int imp, lim;
806 struct sk_buff *skb, *tmp;
807
808 skb_queue_walk_safe(&l->wakeupq, skb, tmp) {
809 imp = TIPC_SKB_CB(skb)->chain_imp;
810 lim = l->window + l->backlog[imp].limit;
811 pnd[imp] += TIPC_SKB_CB(skb)->chain_sz;
812 if ((pnd[imp] + l->backlog[imp].len) >= lim)
813 break;
814 skb_unlink(skb, &l->wakeupq);
815 skb_queue_tail(l->inputq, skb);
816 }
817 }
818
819 void tipc_link_reset(struct tipc_link *l)
820 {
821 l->peer_session = ANY_SESSION;
822 l->session++;
823 l->mtu = l->advertised_mtu;
824 __skb_queue_purge(&l->transmq);
825 __skb_queue_purge(&l->deferdq);
826 skb_queue_splice_init(&l->wakeupq, l->inputq);
827 __skb_queue_purge(&l->backlogq);
828 l->backlog[TIPC_LOW_IMPORTANCE].len = 0;
829 l->backlog[TIPC_MEDIUM_IMPORTANCE].len = 0;
830 l->backlog[TIPC_HIGH_IMPORTANCE].len = 0;
831 l->backlog[TIPC_CRITICAL_IMPORTANCE].len = 0;
832 l->backlog[TIPC_SYSTEM_IMPORTANCE].len = 0;
833 kfree_skb(l->reasm_buf);
834 kfree_skb(l->failover_reasm_skb);
835 l->reasm_buf = NULL;
836 l->failover_reasm_skb = NULL;
837 l->rcv_unacked = 0;
838 l->snd_nxt = 1;
839 l->rcv_nxt = 1;
840 l->acked = 0;
841 l->silent_intv_cnt = 0;
842 l->rst_cnt = 0;
843 l->stats.recv_info = 0;
844 l->stale_count = 0;
845 l->bc_peer_is_up = false;
846 memset(&l->mon_state, 0, sizeof(l->mon_state));
847 tipc_link_reset_stats(l);
848 }
849
850 /**
851 * tipc_link_xmit(): enqueue buffer list according to queue situation
852 * @link: link to use
853 * @list: chain of buffers containing message
854 * @xmitq: returned list of packets to be sent by caller
855 *
856 * Consumes the buffer chain, except when returning -ELINKCONG,
857 * since the caller then may want to make more send attempts.
858 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
859 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
860 */
861 int tipc_link_xmit(struct tipc_link *l, struct sk_buff_head *list,
862 struct sk_buff_head *xmitq)
863 {
864 struct tipc_msg *hdr = buf_msg(skb_peek(list));
865 unsigned int maxwin = l->window;
866 unsigned int i, imp = msg_importance(hdr);
867 unsigned int mtu = l->mtu;
868 u16 ack = l->rcv_nxt - 1;
869 u16 seqno = l->snd_nxt;
870 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
871 struct sk_buff_head *transmq = &l->transmq;
872 struct sk_buff_head *backlogq = &l->backlogq;
873 struct sk_buff *skb, *_skb, *bskb;
874
875 /* Match msg importance against this and all higher backlog limits: */
876 for (i = imp; i <= TIPC_SYSTEM_IMPORTANCE; i++) {
877 if (unlikely(l->backlog[i].len >= l->backlog[i].limit))
878 return link_schedule_user(l, list);
879 }
880 if (unlikely(msg_size(hdr) > mtu)) {
881 skb_queue_purge(list);
882 return -EMSGSIZE;
883 }
884
885 /* Prepare each packet for sending, and add to relevant queue: */
886 while (skb_queue_len(list)) {
887 skb = skb_peek(list);
888 hdr = buf_msg(skb);
889 msg_set_seqno(hdr, seqno);
890 msg_set_ack(hdr, ack);
891 msg_set_bcast_ack(hdr, bc_ack);
892
893 if (likely(skb_queue_len(transmq) < maxwin)) {
894 _skb = skb_clone(skb, GFP_ATOMIC);
895 if (!_skb) {
896 skb_queue_purge(list);
897 return -ENOBUFS;
898 }
899 __skb_dequeue(list);
900 __skb_queue_tail(transmq, skb);
901 __skb_queue_tail(xmitq, _skb);
902 TIPC_SKB_CB(skb)->ackers = l->ackers;
903 l->rcv_unacked = 0;
904 seqno++;
905 continue;
906 }
907 if (tipc_msg_bundle(skb_peek_tail(backlogq), hdr, mtu)) {
908 kfree_skb(__skb_dequeue(list));
909 l->stats.sent_bundled++;
910 continue;
911 }
912 if (tipc_msg_make_bundle(&bskb, hdr, mtu, l->addr)) {
913 kfree_skb(__skb_dequeue(list));
914 __skb_queue_tail(backlogq, bskb);
915 l->backlog[msg_importance(buf_msg(bskb))].len++;
916 l->stats.sent_bundled++;
917 l->stats.sent_bundles++;
918 continue;
919 }
920 l->backlog[imp].len += skb_queue_len(list);
921 skb_queue_splice_tail_init(list, backlogq);
922 }
923 l->snd_nxt = seqno;
924 return 0;
925 }
926
927 void tipc_link_advance_backlog(struct tipc_link *l, struct sk_buff_head *xmitq)
928 {
929 struct sk_buff *skb, *_skb;
930 struct tipc_msg *hdr;
931 u16 seqno = l->snd_nxt;
932 u16 ack = l->rcv_nxt - 1;
933 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
934
935 while (skb_queue_len(&l->transmq) < l->window) {
936 skb = skb_peek(&l->backlogq);
937 if (!skb)
938 break;
939 _skb = skb_clone(skb, GFP_ATOMIC);
940 if (!_skb)
941 break;
942 __skb_dequeue(&l->backlogq);
943 hdr = buf_msg(skb);
944 l->backlog[msg_importance(hdr)].len--;
945 __skb_queue_tail(&l->transmq, skb);
946 __skb_queue_tail(xmitq, _skb);
947 TIPC_SKB_CB(skb)->ackers = l->ackers;
948 msg_set_seqno(hdr, seqno);
949 msg_set_ack(hdr, ack);
950 msg_set_bcast_ack(hdr, bc_ack);
951 l->rcv_unacked = 0;
952 seqno++;
953 }
954 l->snd_nxt = seqno;
955 }
956
957 static void link_retransmit_failure(struct tipc_link *l, struct sk_buff *skb)
958 {
959 struct tipc_msg *hdr = buf_msg(skb);
960
961 pr_warn("Retransmission failure on link <%s>\n", l->name);
962 link_print(l, "Resetting link ");
963 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
964 msg_user(hdr), msg_type(hdr), msg_size(hdr), msg_errcode(hdr));
965 pr_info("sqno %u, prev: %x, src: %x\n",
966 msg_seqno(hdr), msg_prevnode(hdr), msg_orignode(hdr));
967 }
968
969 int tipc_link_retrans(struct tipc_link *l, u16 from, u16 to,
970 struct sk_buff_head *xmitq)
971 {
972 struct sk_buff *_skb, *skb = skb_peek(&l->transmq);
973 struct tipc_msg *hdr;
974 u16 ack = l->rcv_nxt - 1;
975 u16 bc_ack = l->bc_rcvlink->rcv_nxt - 1;
976
977 if (!skb)
978 return 0;
979
980 /* Detect repeated retransmit failures on same packet */
981 if (likely(l->last_retransm != buf_seqno(skb))) {
982 l->last_retransm = buf_seqno(skb);
983 l->stale_count = 1;
984 } else if (++l->stale_count > 100) {
985 link_retransmit_failure(l, skb);
986 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
987 }
988
989 /* Move forward to where retransmission should start */
990 skb_queue_walk(&l->transmq, skb) {
991 if (!less(buf_seqno(skb), from))
992 break;
993 }
994
995 skb_queue_walk_from(&l->transmq, skb) {
996 if (more(buf_seqno(skb), to))
997 break;
998 hdr = buf_msg(skb);
999 _skb = __pskb_copy(skb, MIN_H_SIZE, GFP_ATOMIC);
1000 if (!_skb)
1001 return 0;
1002 hdr = buf_msg(_skb);
1003 msg_set_ack(hdr, ack);
1004 msg_set_bcast_ack(hdr, bc_ack);
1005 _skb->priority = TC_PRIO_CONTROL;
1006 __skb_queue_tail(xmitq, _skb);
1007 l->stats.retransmitted++;
1008 }
1009 return 0;
1010 }
1011
1012 /* tipc_data_input - deliver data and name distr msgs to upper layer
1013 *
1014 * Consumes buffer if message is of right type
1015 * Node lock must be held
1016 */
1017 static bool tipc_data_input(struct tipc_link *l, struct sk_buff *skb,
1018 struct sk_buff_head *inputq)
1019 {
1020 switch (msg_user(buf_msg(skb))) {
1021 case TIPC_LOW_IMPORTANCE:
1022 case TIPC_MEDIUM_IMPORTANCE:
1023 case TIPC_HIGH_IMPORTANCE:
1024 case TIPC_CRITICAL_IMPORTANCE:
1025 case CONN_MANAGER:
1026 skb_queue_tail(inputq, skb);
1027 return true;
1028 case NAME_DISTRIBUTOR:
1029 l->bc_rcvlink->state = LINK_ESTABLISHED;
1030 skb_queue_tail(l->namedq, skb);
1031 return true;
1032 case MSG_BUNDLER:
1033 case TUNNEL_PROTOCOL:
1034 case MSG_FRAGMENTER:
1035 case BCAST_PROTOCOL:
1036 return false;
1037 default:
1038 pr_warn("Dropping received illegal msg type\n");
1039 kfree_skb(skb);
1040 return false;
1041 };
1042 }
1043
1044 /* tipc_link_input - process packet that has passed link protocol check
1045 *
1046 * Consumes buffer
1047 */
1048 static int tipc_link_input(struct tipc_link *l, struct sk_buff *skb,
1049 struct sk_buff_head *inputq)
1050 {
1051 struct tipc_msg *hdr = buf_msg(skb);
1052 struct sk_buff **reasm_skb = &l->reasm_buf;
1053 struct sk_buff *iskb;
1054 struct sk_buff_head tmpq;
1055 int usr = msg_user(hdr);
1056 int rc = 0;
1057 int pos = 0;
1058 int ipos = 0;
1059
1060 if (unlikely(usr == TUNNEL_PROTOCOL)) {
1061 if (msg_type(hdr) == SYNCH_MSG) {
1062 __skb_queue_purge(&l->deferdq);
1063 goto drop;
1064 }
1065 if (!tipc_msg_extract(skb, &iskb, &ipos))
1066 return rc;
1067 kfree_skb(skb);
1068 skb = iskb;
1069 hdr = buf_msg(skb);
1070 if (less(msg_seqno(hdr), l->drop_point))
1071 goto drop;
1072 if (tipc_data_input(l, skb, inputq))
1073 return rc;
1074 usr = msg_user(hdr);
1075 reasm_skb = &l->failover_reasm_skb;
1076 }
1077
1078 if (usr == MSG_BUNDLER) {
1079 skb_queue_head_init(&tmpq);
1080 l->stats.recv_bundles++;
1081 l->stats.recv_bundled += msg_msgcnt(hdr);
1082 while (tipc_msg_extract(skb, &iskb, &pos))
1083 tipc_data_input(l, iskb, &tmpq);
1084 tipc_skb_queue_splice_tail(&tmpq, inputq);
1085 return 0;
1086 } else if (usr == MSG_FRAGMENTER) {
1087 l->stats.recv_fragments++;
1088 if (tipc_buf_append(reasm_skb, &skb)) {
1089 l->stats.recv_fragmented++;
1090 tipc_data_input(l, skb, inputq);
1091 } else if (!*reasm_skb && !link_is_bc_rcvlink(l)) {
1092 pr_warn_ratelimited("Unable to build fragment list\n");
1093 return tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1094 }
1095 return 0;
1096 } else if (usr == BCAST_PROTOCOL) {
1097 tipc_bcast_lock(l->net);
1098 tipc_link_bc_init_rcv(l->bc_rcvlink, hdr);
1099 tipc_bcast_unlock(l->net);
1100 }
1101 drop:
1102 kfree_skb(skb);
1103 return 0;
1104 }
1105
1106 static bool tipc_link_release_pkts(struct tipc_link *l, u16 acked)
1107 {
1108 bool released = false;
1109 struct sk_buff *skb, *tmp;
1110
1111 skb_queue_walk_safe(&l->transmq, skb, tmp) {
1112 if (more(buf_seqno(skb), acked))
1113 break;
1114 __skb_unlink(skb, &l->transmq);
1115 kfree_skb(skb);
1116 released = true;
1117 }
1118 return released;
1119 }
1120
1121 /* tipc_link_build_state_msg: prepare link state message for transmission
1122 *
1123 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1124 * risk of ack storms towards the sender
1125 */
1126 int tipc_link_build_state_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1127 {
1128 if (!l)
1129 return 0;
1130
1131 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1132 if (link_is_bc_rcvlink(l)) {
1133 if (((l->rcv_nxt ^ tipc_own_addr(l->net)) & 0xf) != 0xf)
1134 return 0;
1135 l->rcv_unacked = 0;
1136 return TIPC_LINK_SND_BC_ACK;
1137 }
1138
1139 /* Unicast ACK */
1140 l->rcv_unacked = 0;
1141 l->stats.sent_acks++;
1142 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1143 return 0;
1144 }
1145
1146 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1147 */
1148 void tipc_link_build_reset_msg(struct tipc_link *l, struct sk_buff_head *xmitq)
1149 {
1150 int mtyp = RESET_MSG;
1151 struct sk_buff *skb;
1152
1153 if (l->state == LINK_ESTABLISHING)
1154 mtyp = ACTIVATE_MSG;
1155
1156 tipc_link_build_proto_msg(l, mtyp, 0, 0, 0, 0, xmitq);
1157
1158 /* Inform peer that this endpoint is going down if applicable */
1159 skb = skb_peek_tail(xmitq);
1160 if (skb && (l->state == LINK_RESET))
1161 msg_set_peer_stopping(buf_msg(skb), 1);
1162 }
1163
1164 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1165 */
1166 static void tipc_link_build_nack_msg(struct tipc_link *l,
1167 struct sk_buff_head *xmitq)
1168 {
1169 u32 def_cnt = ++l->stats.deferred_recv;
1170
1171 if (link_is_bc_rcvlink(l))
1172 return;
1173
1174 if ((skb_queue_len(&l->deferdq) == 1) || !(def_cnt % TIPC_NACK_INTV))
1175 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, 0, xmitq);
1176 }
1177
1178 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1179 * @l: the link that should handle the message
1180 * @skb: TIPC packet
1181 * @xmitq: queue to place packets to be sent after this call
1182 */
1183 int tipc_link_rcv(struct tipc_link *l, struct sk_buff *skb,
1184 struct sk_buff_head *xmitq)
1185 {
1186 struct sk_buff_head *defq = &l->deferdq;
1187 struct tipc_msg *hdr;
1188 u16 seqno, rcv_nxt, win_lim;
1189 int rc = 0;
1190
1191 do {
1192 hdr = buf_msg(skb);
1193 seqno = msg_seqno(hdr);
1194 rcv_nxt = l->rcv_nxt;
1195 win_lim = rcv_nxt + TIPC_MAX_LINK_WIN;
1196
1197 /* Verify and update link state */
1198 if (unlikely(msg_user(hdr) == LINK_PROTOCOL))
1199 return tipc_link_proto_rcv(l, skb, xmitq);
1200
1201 if (unlikely(!link_is_up(l))) {
1202 if (l->state == LINK_ESTABLISHING)
1203 rc = TIPC_LINK_UP_EVT;
1204 goto drop;
1205 }
1206
1207 /* Don't send probe at next timeout expiration */
1208 l->silent_intv_cnt = 0;
1209
1210 /* Drop if outside receive window */
1211 if (unlikely(less(seqno, rcv_nxt) || more(seqno, win_lim))) {
1212 l->stats.duplicates++;
1213 goto drop;
1214 }
1215
1216 /* Forward queues and wake up waiting users */
1217 if (likely(tipc_link_release_pkts(l, msg_ack(hdr)))) {
1218 tipc_link_advance_backlog(l, xmitq);
1219 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1220 link_prepare_wakeup(l);
1221 }
1222
1223 /* Defer delivery if sequence gap */
1224 if (unlikely(seqno != rcv_nxt)) {
1225 __tipc_skb_queue_sorted(defq, seqno, skb);
1226 tipc_link_build_nack_msg(l, xmitq);
1227 break;
1228 }
1229
1230 /* Deliver packet */
1231 l->rcv_nxt++;
1232 l->stats.recv_info++;
1233 if (!tipc_data_input(l, skb, l->inputq))
1234 rc |= tipc_link_input(l, skb, l->inputq);
1235 if (unlikely(++l->rcv_unacked >= TIPC_MIN_LINK_WIN))
1236 rc |= tipc_link_build_state_msg(l, xmitq);
1237 if (unlikely(rc & ~TIPC_LINK_SND_BC_ACK))
1238 break;
1239 } while ((skb = __skb_dequeue(defq)));
1240
1241 return rc;
1242 drop:
1243 kfree_skb(skb);
1244 return rc;
1245 }
1246
1247 static void tipc_link_build_proto_msg(struct tipc_link *l, int mtyp, bool probe,
1248 u16 rcvgap, int tolerance, int priority,
1249 struct sk_buff_head *xmitq)
1250 {
1251 struct sk_buff *skb;
1252 struct tipc_msg *hdr;
1253 struct sk_buff_head *dfq = &l->deferdq;
1254 bool node_up = link_is_up(l->bc_rcvlink);
1255 struct tipc_mon_state *mstate = &l->mon_state;
1256 int dlen = 0;
1257 void *data;
1258
1259 /* Don't send protocol message during reset or link failover */
1260 if (tipc_link_is_blocked(l))
1261 return;
1262
1263 if (!tipc_link_is_up(l) && (mtyp == STATE_MSG))
1264 return;
1265
1266 if (!skb_queue_empty(dfq))
1267 rcvgap = buf_seqno(skb_peek(dfq)) - l->rcv_nxt;
1268
1269 skb = tipc_msg_create(LINK_PROTOCOL, mtyp, INT_H_SIZE,
1270 tipc_max_domain_size, l->addr,
1271 tipc_own_addr(l->net), 0, 0, 0);
1272 if (!skb)
1273 return;
1274
1275 hdr = buf_msg(skb);
1276 data = msg_data(hdr);
1277 msg_set_session(hdr, l->session);
1278 msg_set_bearer_id(hdr, l->bearer_id);
1279 msg_set_net_plane(hdr, l->net_plane);
1280 msg_set_next_sent(hdr, l->snd_nxt);
1281 msg_set_ack(hdr, l->rcv_nxt - 1);
1282 msg_set_bcast_ack(hdr, l->bc_rcvlink->rcv_nxt - 1);
1283 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1284 msg_set_link_tolerance(hdr, tolerance);
1285 msg_set_linkprio(hdr, priority);
1286 msg_set_redundant_link(hdr, node_up);
1287 msg_set_seq_gap(hdr, 0);
1288 msg_set_seqno(hdr, l->snd_nxt + U16_MAX / 2);
1289
1290 if (mtyp == STATE_MSG) {
1291 msg_set_seq_gap(hdr, rcvgap);
1292 msg_set_probe(hdr, probe);
1293 tipc_mon_prep(l->net, data, &dlen, mstate, l->bearer_id);
1294 msg_set_size(hdr, INT_H_SIZE + dlen);
1295 skb_trim(skb, INT_H_SIZE + dlen);
1296 l->stats.sent_states++;
1297 l->rcv_unacked = 0;
1298 } else {
1299 /* RESET_MSG or ACTIVATE_MSG */
1300 msg_set_max_pkt(hdr, l->advertised_mtu);
1301 strcpy(data, l->if_name);
1302 msg_set_size(hdr, INT_H_SIZE + TIPC_MAX_IF_NAME);
1303 skb_trim(skb, INT_H_SIZE + TIPC_MAX_IF_NAME);
1304 }
1305 if (probe)
1306 l->stats.sent_probes++;
1307 if (rcvgap)
1308 l->stats.sent_nacks++;
1309 skb->priority = TC_PRIO_CONTROL;
1310 __skb_queue_tail(xmitq, skb);
1311 }
1312
1313 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1314 * with contents of the link's transmit and backlog queues.
1315 */
1316 void tipc_link_tnl_prepare(struct tipc_link *l, struct tipc_link *tnl,
1317 int mtyp, struct sk_buff_head *xmitq)
1318 {
1319 struct sk_buff *skb, *tnlskb;
1320 struct tipc_msg *hdr, tnlhdr;
1321 struct sk_buff_head *queue = &l->transmq;
1322 struct sk_buff_head tmpxq, tnlq;
1323 u16 pktlen, pktcnt, seqno = l->snd_nxt;
1324
1325 if (!tnl)
1326 return;
1327
1328 skb_queue_head_init(&tnlq);
1329 skb_queue_head_init(&tmpxq);
1330
1331 /* At least one packet required for safe algorithm => add dummy */
1332 skb = tipc_msg_create(TIPC_LOW_IMPORTANCE, TIPC_DIRECT_MSG,
1333 BASIC_H_SIZE, 0, l->addr, tipc_own_addr(l->net),
1334 0, 0, TIPC_ERR_NO_PORT);
1335 if (!skb) {
1336 pr_warn("%sunable to create tunnel packet\n", link_co_err);
1337 return;
1338 }
1339 skb_queue_tail(&tnlq, skb);
1340 tipc_link_xmit(l, &tnlq, &tmpxq);
1341 __skb_queue_purge(&tmpxq);
1342
1343 /* Initialize reusable tunnel packet header */
1344 tipc_msg_init(tipc_own_addr(l->net), &tnlhdr, TUNNEL_PROTOCOL,
1345 mtyp, INT_H_SIZE, l->addr);
1346 pktcnt = skb_queue_len(&l->transmq) + skb_queue_len(&l->backlogq);
1347 msg_set_msgcnt(&tnlhdr, pktcnt);
1348 msg_set_bearer_id(&tnlhdr, l->peer_bearer_id);
1349 tnl:
1350 /* Wrap each packet into a tunnel packet */
1351 skb_queue_walk(queue, skb) {
1352 hdr = buf_msg(skb);
1353 if (queue == &l->backlogq)
1354 msg_set_seqno(hdr, seqno++);
1355 pktlen = msg_size(hdr);
1356 msg_set_size(&tnlhdr, pktlen + INT_H_SIZE);
1357 tnlskb = tipc_buf_acquire(pktlen + INT_H_SIZE);
1358 if (!tnlskb) {
1359 pr_warn("%sunable to send packet\n", link_co_err);
1360 return;
1361 }
1362 skb_copy_to_linear_data(tnlskb, &tnlhdr, INT_H_SIZE);
1363 skb_copy_to_linear_data_offset(tnlskb, INT_H_SIZE, hdr, pktlen);
1364 __skb_queue_tail(&tnlq, tnlskb);
1365 }
1366 if (queue != &l->backlogq) {
1367 queue = &l->backlogq;
1368 goto tnl;
1369 }
1370
1371 tipc_link_xmit(tnl, &tnlq, xmitq);
1372
1373 if (mtyp == FAILOVER_MSG) {
1374 tnl->drop_point = l->rcv_nxt;
1375 tnl->failover_reasm_skb = l->reasm_buf;
1376 l->reasm_buf = NULL;
1377 }
1378 }
1379
1380 /* tipc_link_proto_rcv(): receive link level protocol message :
1381 * Note that network plane id propagates through the network, and may
1382 * change at any time. The node with lowest numerical id determines
1383 * network plane
1384 */
1385 static int tipc_link_proto_rcv(struct tipc_link *l, struct sk_buff *skb,
1386 struct sk_buff_head *xmitq)
1387 {
1388 struct tipc_msg *hdr = buf_msg(skb);
1389 u16 rcvgap = 0;
1390 u16 ack = msg_ack(hdr);
1391 u16 gap = msg_seq_gap(hdr);
1392 u16 peers_snd_nxt = msg_next_sent(hdr);
1393 u16 peers_tol = msg_link_tolerance(hdr);
1394 u16 peers_prio = msg_linkprio(hdr);
1395 u16 rcv_nxt = l->rcv_nxt;
1396 u16 dlen = msg_data_sz(hdr);
1397 int mtyp = msg_type(hdr);
1398 void *data;
1399 char *if_name;
1400 int rc = 0;
1401
1402 if (tipc_link_is_blocked(l) || !xmitq)
1403 goto exit;
1404
1405 if (tipc_own_addr(l->net) > msg_prevnode(hdr))
1406 l->net_plane = msg_net_plane(hdr);
1407
1408 skb_linearize(skb);
1409 hdr = buf_msg(skb);
1410 data = msg_data(hdr);
1411
1412 switch (mtyp) {
1413 case RESET_MSG:
1414
1415 /* Ignore duplicate RESET with old session number */
1416 if ((less_eq(msg_session(hdr), l->peer_session)) &&
1417 (l->peer_session != ANY_SESSION))
1418 break;
1419 /* fall thru' */
1420
1421 case ACTIVATE_MSG:
1422
1423 /* Complete own link name with peer's interface name */
1424 if_name = strrchr(l->name, ':') + 1;
1425 if (sizeof(l->name) - (if_name - l->name) <= TIPC_MAX_IF_NAME)
1426 break;
1427 if (msg_data_sz(hdr) < TIPC_MAX_IF_NAME)
1428 break;
1429 strncpy(if_name, data, TIPC_MAX_IF_NAME);
1430
1431 /* Update own tolerance if peer indicates a non-zero value */
1432 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1433 l->tolerance = peers_tol;
1434
1435 /* Update own priority if peer's priority is higher */
1436 if (in_range(peers_prio, l->priority + 1, TIPC_MAX_LINK_PRI))
1437 l->priority = peers_prio;
1438
1439 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1440 if (msg_peer_stopping(hdr))
1441 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1442 else if ((mtyp == RESET_MSG) || !link_is_up(l))
1443 rc = tipc_link_fsm_evt(l, LINK_PEER_RESET_EVT);
1444
1445 /* ACTIVATE_MSG takes up link if it was already locally reset */
1446 if ((mtyp == ACTIVATE_MSG) && (l->state == LINK_ESTABLISHING))
1447 rc = TIPC_LINK_UP_EVT;
1448
1449 l->peer_session = msg_session(hdr);
1450 l->peer_bearer_id = msg_bearer_id(hdr);
1451 if (l->mtu > msg_max_pkt(hdr))
1452 l->mtu = msg_max_pkt(hdr);
1453 break;
1454
1455 case STATE_MSG:
1456
1457 /* Update own tolerance if peer indicates a non-zero value */
1458 if (in_range(peers_tol, TIPC_MIN_LINK_TOL, TIPC_MAX_LINK_TOL))
1459 l->tolerance = peers_tol;
1460
1461 if (peers_prio && in_range(peers_prio, TIPC_MIN_LINK_PRI,
1462 TIPC_MAX_LINK_PRI)) {
1463 l->priority = peers_prio;
1464 rc = tipc_link_fsm_evt(l, LINK_FAILURE_EVT);
1465 }
1466
1467 l->silent_intv_cnt = 0;
1468 l->stats.recv_states++;
1469 if (msg_probe(hdr))
1470 l->stats.recv_probes++;
1471
1472 if (!link_is_up(l)) {
1473 if (l->state == LINK_ESTABLISHING)
1474 rc = TIPC_LINK_UP_EVT;
1475 break;
1476 }
1477 tipc_mon_rcv(l->net, data, dlen, l->addr,
1478 &l->mon_state, l->bearer_id);
1479
1480 /* Send NACK if peer has sent pkts we haven't received yet */
1481 if (more(peers_snd_nxt, rcv_nxt) && !tipc_link_is_synching(l))
1482 rcvgap = peers_snd_nxt - l->rcv_nxt;
1483 if (rcvgap || (msg_probe(hdr)))
1484 tipc_link_build_proto_msg(l, STATE_MSG, 0, rcvgap,
1485 0, 0, xmitq);
1486 tipc_link_release_pkts(l, ack);
1487
1488 /* If NACK, retransmit will now start at right position */
1489 if (gap) {
1490 rc = tipc_link_retrans(l, ack + 1, ack + gap, xmitq);
1491 l->stats.recv_nacks++;
1492 }
1493
1494 tipc_link_advance_backlog(l, xmitq);
1495 if (unlikely(!skb_queue_empty(&l->wakeupq)))
1496 link_prepare_wakeup(l);
1497 }
1498 exit:
1499 kfree_skb(skb);
1500 return rc;
1501 }
1502
1503 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1504 */
1505 static bool tipc_link_build_bc_proto_msg(struct tipc_link *l, bool bcast,
1506 u16 peers_snd_nxt,
1507 struct sk_buff_head *xmitq)
1508 {
1509 struct sk_buff *skb;
1510 struct tipc_msg *hdr;
1511 struct sk_buff *dfrd_skb = skb_peek(&l->deferdq);
1512 u16 ack = l->rcv_nxt - 1;
1513 u16 gap_to = peers_snd_nxt - 1;
1514
1515 skb = tipc_msg_create(BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE,
1516 0, l->addr, tipc_own_addr(l->net), 0, 0, 0);
1517 if (!skb)
1518 return false;
1519 hdr = buf_msg(skb);
1520 msg_set_last_bcast(hdr, l->bc_sndlink->snd_nxt - 1);
1521 msg_set_bcast_ack(hdr, ack);
1522 msg_set_bcgap_after(hdr, ack);
1523 if (dfrd_skb)
1524 gap_to = buf_seqno(dfrd_skb) - 1;
1525 msg_set_bcgap_to(hdr, gap_to);
1526 msg_set_non_seq(hdr, bcast);
1527 __skb_queue_tail(xmitq, skb);
1528 return true;
1529 }
1530
1531 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1532 *
1533 * Give a newly added peer node the sequence number where it should
1534 * start receiving and acking broadcast packets.
1535 */
1536 static void tipc_link_build_bc_init_msg(struct tipc_link *l,
1537 struct sk_buff_head *xmitq)
1538 {
1539 struct sk_buff_head list;
1540
1541 __skb_queue_head_init(&list);
1542 if (!tipc_link_build_bc_proto_msg(l->bc_rcvlink, false, 0, &list))
1543 return;
1544 tipc_link_xmit(l, &list, xmitq);
1545 }
1546
1547 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1548 */
1549 void tipc_link_bc_init_rcv(struct tipc_link *l, struct tipc_msg *hdr)
1550 {
1551 int mtyp = msg_type(hdr);
1552 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1553
1554 if (link_is_up(l))
1555 return;
1556
1557 if (msg_user(hdr) == BCAST_PROTOCOL) {
1558 l->rcv_nxt = peers_snd_nxt;
1559 l->state = LINK_ESTABLISHED;
1560 return;
1561 }
1562
1563 if (l->peer_caps & TIPC_BCAST_SYNCH)
1564 return;
1565
1566 if (msg_peer_node_is_up(hdr))
1567 return;
1568
1569 /* Compatibility: accept older, less safe initial synch data */
1570 if ((mtyp == RESET_MSG) || (mtyp == ACTIVATE_MSG))
1571 l->rcv_nxt = peers_snd_nxt;
1572 }
1573
1574 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1575 */
1576 void tipc_link_bc_sync_rcv(struct tipc_link *l, struct tipc_msg *hdr,
1577 struct sk_buff_head *xmitq)
1578 {
1579 u16 peers_snd_nxt = msg_bc_snd_nxt(hdr);
1580
1581 if (!link_is_up(l))
1582 return;
1583
1584 if (!msg_peer_node_is_up(hdr))
1585 return;
1586
1587 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1588 if (msg_ack(hdr))
1589 l->bc_peer_is_up = true;
1590
1591 if (!l->bc_peer_is_up)
1592 return;
1593
1594 /* Ignore if peers_snd_nxt goes beyond receive window */
1595 if (more(peers_snd_nxt, l->rcv_nxt + l->window))
1596 return;
1597
1598 if (!more(peers_snd_nxt, l->rcv_nxt)) {
1599 l->nack_state = BC_NACK_SND_CONDITIONAL;
1600 return;
1601 }
1602
1603 /* Don't NACK if one was recently sent or peeked */
1604 if (l->nack_state == BC_NACK_SND_SUPPRESS) {
1605 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1606 return;
1607 }
1608
1609 /* Conditionally delay NACK sending until next synch rcv */
1610 if (l->nack_state == BC_NACK_SND_CONDITIONAL) {
1611 l->nack_state = BC_NACK_SND_UNCONDITIONAL;
1612 if ((peers_snd_nxt - l->rcv_nxt) < TIPC_MIN_LINK_WIN)
1613 return;
1614 }
1615
1616 /* Send NACK now but suppress next one */
1617 tipc_link_build_bc_proto_msg(l, true, peers_snd_nxt, xmitq);
1618 l->nack_state = BC_NACK_SND_SUPPRESS;
1619 }
1620
1621 void tipc_link_bc_ack_rcv(struct tipc_link *l, u16 acked,
1622 struct sk_buff_head *xmitq)
1623 {
1624 struct sk_buff *skb, *tmp;
1625 struct tipc_link *snd_l = l->bc_sndlink;
1626
1627 if (!link_is_up(l) || !l->bc_peer_is_up)
1628 return;
1629
1630 if (!more(acked, l->acked))
1631 return;
1632
1633 /* Skip over packets peer has already acked */
1634 skb_queue_walk(&snd_l->transmq, skb) {
1635 if (more(buf_seqno(skb), l->acked))
1636 break;
1637 }
1638
1639 /* Update/release the packets peer is acking now */
1640 skb_queue_walk_from_safe(&snd_l->transmq, skb, tmp) {
1641 if (more(buf_seqno(skb), acked))
1642 break;
1643 if (!--TIPC_SKB_CB(skb)->ackers) {
1644 __skb_unlink(skb, &snd_l->transmq);
1645 kfree_skb(skb);
1646 }
1647 }
1648 l->acked = acked;
1649 tipc_link_advance_backlog(snd_l, xmitq);
1650 if (unlikely(!skb_queue_empty(&snd_l->wakeupq)))
1651 link_prepare_wakeup(snd_l);
1652 }
1653
1654 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1655 */
1656 int tipc_link_bc_nack_rcv(struct tipc_link *l, struct sk_buff *skb,
1657 struct sk_buff_head *xmitq)
1658 {
1659 struct tipc_msg *hdr = buf_msg(skb);
1660 u32 dnode = msg_destnode(hdr);
1661 int mtyp = msg_type(hdr);
1662 u16 acked = msg_bcast_ack(hdr);
1663 u16 from = acked + 1;
1664 u16 to = msg_bcgap_to(hdr);
1665 u16 peers_snd_nxt = to + 1;
1666 int rc = 0;
1667
1668 kfree_skb(skb);
1669
1670 if (!tipc_link_is_up(l) || !l->bc_peer_is_up)
1671 return 0;
1672
1673 if (mtyp != STATE_MSG)
1674 return 0;
1675
1676 if (dnode == tipc_own_addr(l->net)) {
1677 tipc_link_bc_ack_rcv(l, acked, xmitq);
1678 rc = tipc_link_retrans(l->bc_sndlink, from, to, xmitq);
1679 l->stats.recv_nacks++;
1680 return rc;
1681 }
1682
1683 /* Msg for other node => suppress own NACK at next sync if applicable */
1684 if (more(peers_snd_nxt, l->rcv_nxt) && !less(l->rcv_nxt, from))
1685 l->nack_state = BC_NACK_SND_SUPPRESS;
1686
1687 return 0;
1688 }
1689
1690 void tipc_link_set_queue_limits(struct tipc_link *l, u32 win)
1691 {
1692 int max_bulk = TIPC_MAX_PUBLICATIONS / (l->mtu / ITEM_SIZE);
1693
1694 l->window = win;
1695 l->backlog[TIPC_LOW_IMPORTANCE].limit = win / 2;
1696 l->backlog[TIPC_MEDIUM_IMPORTANCE].limit = win;
1697 l->backlog[TIPC_HIGH_IMPORTANCE].limit = win / 2 * 3;
1698 l->backlog[TIPC_CRITICAL_IMPORTANCE].limit = win * 2;
1699 l->backlog[TIPC_SYSTEM_IMPORTANCE].limit = max_bulk;
1700 }
1701
1702 /**
1703 * link_reset_stats - reset link statistics
1704 * @l: pointer to link
1705 */
1706 void tipc_link_reset_stats(struct tipc_link *l)
1707 {
1708 memset(&l->stats, 0, sizeof(l->stats));
1709 if (!link_is_bc_sndlink(l)) {
1710 l->stats.sent_info = l->snd_nxt;
1711 l->stats.recv_info = l->rcv_nxt;
1712 }
1713 }
1714
1715 static void link_print(struct tipc_link *l, const char *str)
1716 {
1717 struct sk_buff *hskb = skb_peek(&l->transmq);
1718 u16 head = hskb ? msg_seqno(buf_msg(hskb)) : l->snd_nxt - 1;
1719 u16 tail = l->snd_nxt - 1;
1720
1721 pr_info("%s Link <%s> state %x\n", str, l->name, l->state);
1722 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1723 skb_queue_len(&l->transmq), head, tail,
1724 skb_queue_len(&l->backlogq), l->snd_nxt, l->rcv_nxt);
1725 }
1726
1727 /* Parse and validate nested (link) properties valid for media, bearer and link
1728 */
1729 int tipc_nl_parse_link_prop(struct nlattr *prop, struct nlattr *props[])
1730 {
1731 int err;
1732
1733 err = nla_parse_nested(props, TIPC_NLA_PROP_MAX, prop,
1734 tipc_nl_prop_policy);
1735 if (err)
1736 return err;
1737
1738 if (props[TIPC_NLA_PROP_PRIO]) {
1739 u32 prio;
1740
1741 prio = nla_get_u32(props[TIPC_NLA_PROP_PRIO]);
1742 if (prio > TIPC_MAX_LINK_PRI)
1743 return -EINVAL;
1744 }
1745
1746 if (props[TIPC_NLA_PROP_TOL]) {
1747 u32 tol;
1748
1749 tol = nla_get_u32(props[TIPC_NLA_PROP_TOL]);
1750 if ((tol < TIPC_MIN_LINK_TOL) || (tol > TIPC_MAX_LINK_TOL))
1751 return -EINVAL;
1752 }
1753
1754 if (props[TIPC_NLA_PROP_WIN]) {
1755 u32 win;
1756
1757 win = nla_get_u32(props[TIPC_NLA_PROP_WIN]);
1758 if ((win < TIPC_MIN_LINK_WIN) || (win > TIPC_MAX_LINK_WIN))
1759 return -EINVAL;
1760 }
1761
1762 return 0;
1763 }
1764
1765 static int __tipc_nl_add_stats(struct sk_buff *skb, struct tipc_stats *s)
1766 {
1767 int i;
1768 struct nlattr *stats;
1769
1770 struct nla_map {
1771 u32 key;
1772 u32 val;
1773 };
1774
1775 struct nla_map map[] = {
1776 {TIPC_NLA_STATS_RX_INFO, s->recv_info},
1777 {TIPC_NLA_STATS_RX_FRAGMENTS, s->recv_fragments},
1778 {TIPC_NLA_STATS_RX_FRAGMENTED, s->recv_fragmented},
1779 {TIPC_NLA_STATS_RX_BUNDLES, s->recv_bundles},
1780 {TIPC_NLA_STATS_RX_BUNDLED, s->recv_bundled},
1781 {TIPC_NLA_STATS_TX_INFO, s->sent_info},
1782 {TIPC_NLA_STATS_TX_FRAGMENTS, s->sent_fragments},
1783 {TIPC_NLA_STATS_TX_FRAGMENTED, s->sent_fragmented},
1784 {TIPC_NLA_STATS_TX_BUNDLES, s->sent_bundles},
1785 {TIPC_NLA_STATS_TX_BUNDLED, s->sent_bundled},
1786 {TIPC_NLA_STATS_MSG_PROF_TOT, (s->msg_length_counts) ?
1787 s->msg_length_counts : 1},
1788 {TIPC_NLA_STATS_MSG_LEN_CNT, s->msg_length_counts},
1789 {TIPC_NLA_STATS_MSG_LEN_TOT, s->msg_lengths_total},
1790 {TIPC_NLA_STATS_MSG_LEN_P0, s->msg_length_profile[0]},
1791 {TIPC_NLA_STATS_MSG_LEN_P1, s->msg_length_profile[1]},
1792 {TIPC_NLA_STATS_MSG_LEN_P2, s->msg_length_profile[2]},
1793 {TIPC_NLA_STATS_MSG_LEN_P3, s->msg_length_profile[3]},
1794 {TIPC_NLA_STATS_MSG_LEN_P4, s->msg_length_profile[4]},
1795 {TIPC_NLA_STATS_MSG_LEN_P5, s->msg_length_profile[5]},
1796 {TIPC_NLA_STATS_MSG_LEN_P6, s->msg_length_profile[6]},
1797 {TIPC_NLA_STATS_RX_STATES, s->recv_states},
1798 {TIPC_NLA_STATS_RX_PROBES, s->recv_probes},
1799 {TIPC_NLA_STATS_RX_NACKS, s->recv_nacks},
1800 {TIPC_NLA_STATS_RX_DEFERRED, s->deferred_recv},
1801 {TIPC_NLA_STATS_TX_STATES, s->sent_states},
1802 {TIPC_NLA_STATS_TX_PROBES, s->sent_probes},
1803 {TIPC_NLA_STATS_TX_NACKS, s->sent_nacks},
1804 {TIPC_NLA_STATS_TX_ACKS, s->sent_acks},
1805 {TIPC_NLA_STATS_RETRANSMITTED, s->retransmitted},
1806 {TIPC_NLA_STATS_DUPLICATES, s->duplicates},
1807 {TIPC_NLA_STATS_LINK_CONGS, s->link_congs},
1808 {TIPC_NLA_STATS_MAX_QUEUE, s->max_queue_sz},
1809 {TIPC_NLA_STATS_AVG_QUEUE, s->queue_sz_counts ?
1810 (s->accu_queue_sz / s->queue_sz_counts) : 0}
1811 };
1812
1813 stats = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1814 if (!stats)
1815 return -EMSGSIZE;
1816
1817 for (i = 0; i < ARRAY_SIZE(map); i++)
1818 if (nla_put_u32(skb, map[i].key, map[i].val))
1819 goto msg_full;
1820
1821 nla_nest_end(skb, stats);
1822
1823 return 0;
1824 msg_full:
1825 nla_nest_cancel(skb, stats);
1826
1827 return -EMSGSIZE;
1828 }
1829
1830 /* Caller should hold appropriate locks to protect the link */
1831 int __tipc_nl_add_link(struct net *net, struct tipc_nl_msg *msg,
1832 struct tipc_link *link, int nlflags)
1833 {
1834 int err;
1835 void *hdr;
1836 struct nlattr *attrs;
1837 struct nlattr *prop;
1838 struct tipc_net *tn = net_generic(net, tipc_net_id);
1839
1840 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1841 nlflags, TIPC_NL_LINK_GET);
1842 if (!hdr)
1843 return -EMSGSIZE;
1844
1845 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1846 if (!attrs)
1847 goto msg_full;
1848
1849 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, link->name))
1850 goto attr_msg_full;
1851 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_DEST,
1852 tipc_cluster_mask(tn->own_addr)))
1853 goto attr_msg_full;
1854 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_MTU, link->mtu))
1855 goto attr_msg_full;
1856 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, link->rcv_nxt))
1857 goto attr_msg_full;
1858 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, link->snd_nxt))
1859 goto attr_msg_full;
1860
1861 if (tipc_link_is_up(link))
1862 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1863 goto attr_msg_full;
1864 if (link->active)
1865 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_ACTIVE))
1866 goto attr_msg_full;
1867
1868 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1869 if (!prop)
1870 goto attr_msg_full;
1871 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1872 goto prop_msg_full;
1873 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_TOL, link->tolerance))
1874 goto prop_msg_full;
1875 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN,
1876 link->window))
1877 goto prop_msg_full;
1878 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_PRIO, link->priority))
1879 goto prop_msg_full;
1880 nla_nest_end(msg->skb, prop);
1881
1882 err = __tipc_nl_add_stats(msg->skb, &link->stats);
1883 if (err)
1884 goto attr_msg_full;
1885
1886 nla_nest_end(msg->skb, attrs);
1887 genlmsg_end(msg->skb, hdr);
1888
1889 return 0;
1890
1891 prop_msg_full:
1892 nla_nest_cancel(msg->skb, prop);
1893 attr_msg_full:
1894 nla_nest_cancel(msg->skb, attrs);
1895 msg_full:
1896 genlmsg_cancel(msg->skb, hdr);
1897
1898 return -EMSGSIZE;
1899 }
1900
1901 static int __tipc_nl_add_bc_link_stat(struct sk_buff *skb,
1902 struct tipc_stats *stats)
1903 {
1904 int i;
1905 struct nlattr *nest;
1906
1907 struct nla_map {
1908 __u32 key;
1909 __u32 val;
1910 };
1911
1912 struct nla_map map[] = {
1913 {TIPC_NLA_STATS_RX_INFO, stats->recv_info},
1914 {TIPC_NLA_STATS_RX_FRAGMENTS, stats->recv_fragments},
1915 {TIPC_NLA_STATS_RX_FRAGMENTED, stats->recv_fragmented},
1916 {TIPC_NLA_STATS_RX_BUNDLES, stats->recv_bundles},
1917 {TIPC_NLA_STATS_RX_BUNDLED, stats->recv_bundled},
1918 {TIPC_NLA_STATS_TX_INFO, stats->sent_info},
1919 {TIPC_NLA_STATS_TX_FRAGMENTS, stats->sent_fragments},
1920 {TIPC_NLA_STATS_TX_FRAGMENTED, stats->sent_fragmented},
1921 {TIPC_NLA_STATS_TX_BUNDLES, stats->sent_bundles},
1922 {TIPC_NLA_STATS_TX_BUNDLED, stats->sent_bundled},
1923 {TIPC_NLA_STATS_RX_NACKS, stats->recv_nacks},
1924 {TIPC_NLA_STATS_RX_DEFERRED, stats->deferred_recv},
1925 {TIPC_NLA_STATS_TX_NACKS, stats->sent_nacks},
1926 {TIPC_NLA_STATS_TX_ACKS, stats->sent_acks},
1927 {TIPC_NLA_STATS_RETRANSMITTED, stats->retransmitted},
1928 {TIPC_NLA_STATS_DUPLICATES, stats->duplicates},
1929 {TIPC_NLA_STATS_LINK_CONGS, stats->link_congs},
1930 {TIPC_NLA_STATS_MAX_QUEUE, stats->max_queue_sz},
1931 {TIPC_NLA_STATS_AVG_QUEUE, stats->queue_sz_counts ?
1932 (stats->accu_queue_sz / stats->queue_sz_counts) : 0}
1933 };
1934
1935 nest = nla_nest_start(skb, TIPC_NLA_LINK_STATS);
1936 if (!nest)
1937 return -EMSGSIZE;
1938
1939 for (i = 0; i < ARRAY_SIZE(map); i++)
1940 if (nla_put_u32(skb, map[i].key, map[i].val))
1941 goto msg_full;
1942
1943 nla_nest_end(skb, nest);
1944
1945 return 0;
1946 msg_full:
1947 nla_nest_cancel(skb, nest);
1948
1949 return -EMSGSIZE;
1950 }
1951
1952 int tipc_nl_add_bc_link(struct net *net, struct tipc_nl_msg *msg)
1953 {
1954 int err;
1955 void *hdr;
1956 struct nlattr *attrs;
1957 struct nlattr *prop;
1958 struct tipc_net *tn = net_generic(net, tipc_net_id);
1959 struct tipc_link *bcl = tn->bcl;
1960
1961 if (!bcl)
1962 return 0;
1963
1964 tipc_bcast_lock(net);
1965
1966 hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
1967 NLM_F_MULTI, TIPC_NL_LINK_GET);
1968 if (!hdr) {
1969 tipc_bcast_unlock(net);
1970 return -EMSGSIZE;
1971 }
1972
1973 attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
1974 if (!attrs)
1975 goto msg_full;
1976
1977 /* The broadcast link is always up */
1978 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_UP))
1979 goto attr_msg_full;
1980
1981 if (nla_put_flag(msg->skb, TIPC_NLA_LINK_BROADCAST))
1982 goto attr_msg_full;
1983 if (nla_put_string(msg->skb, TIPC_NLA_LINK_NAME, bcl->name))
1984 goto attr_msg_full;
1985 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_RX, bcl->rcv_nxt))
1986 goto attr_msg_full;
1987 if (nla_put_u32(msg->skb, TIPC_NLA_LINK_TX, bcl->snd_nxt))
1988 goto attr_msg_full;
1989
1990 prop = nla_nest_start(msg->skb, TIPC_NLA_LINK_PROP);
1991 if (!prop)
1992 goto attr_msg_full;
1993 if (nla_put_u32(msg->skb, TIPC_NLA_PROP_WIN, bcl->window))
1994 goto prop_msg_full;
1995 nla_nest_end(msg->skb, prop);
1996
1997 err = __tipc_nl_add_bc_link_stat(msg->skb, &bcl->stats);
1998 if (err)
1999 goto attr_msg_full;
2000
2001 tipc_bcast_unlock(net);
2002 nla_nest_end(msg->skb, attrs);
2003 genlmsg_end(msg->skb, hdr);
2004
2005 return 0;
2006
2007 prop_msg_full:
2008 nla_nest_cancel(msg->skb, prop);
2009 attr_msg_full:
2010 nla_nest_cancel(msg->skb, attrs);
2011 msg_full:
2012 tipc_bcast_unlock(net);
2013 genlmsg_cancel(msg->skb, hdr);
2014
2015 return -EMSGSIZE;
2016 }
2017
2018 void tipc_link_set_tolerance(struct tipc_link *l, u32 tol,
2019 struct sk_buff_head *xmitq)
2020 {
2021 l->tolerance = tol;
2022 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, tol, 0, xmitq);
2023 }
2024
2025 void tipc_link_set_prio(struct tipc_link *l, u32 prio,
2026 struct sk_buff_head *xmitq)
2027 {
2028 l->priority = prio;
2029 tipc_link_build_proto_msg(l, STATE_MSG, 0, 0, 0, prio, xmitq);
2030 }
2031
2032 void tipc_link_set_abort_limit(struct tipc_link *l, u32 limit)
2033 {
2034 l->abort_limit = limit;
2035 }
This page took 0.075216 seconds and 5 git commands to generate.