2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2016, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
47 #include <linux/pkt_sched.h>
50 u32 sent_info
; /* used in counting # sent packets */
51 u32 recv_info
; /* used in counting # recv'd packets */
68 u32 link_congs
; /* # port sends blocked by congestion */
71 u32 max_queue_sz
; /* send queue size high water mark */
72 u32 accu_queue_sz
; /* used for send queue size profiling */
73 u32 queue_sz_counts
; /* used for send queue size profiling */
74 u32 msg_length_counts
; /* used for message length profiling */
75 u32 msg_lengths_total
; /* used for message length profiling */
76 u32 msg_length_profile
[7]; /* used for msg. length profiling */
80 * struct tipc_link - TIPC link data structure
81 * @addr: network address of link's peer node
82 * @name: link name character string
83 * @media_addr: media address to use when sending messages over link
85 * @net: pointer to namespace struct
86 * @refcnt: reference counter for permanent references (owner node & timer)
87 * @peer_session: link session # being used by peer end of link
88 * @peer_bearer_id: bearer id used by link's peer endpoint
89 * @bearer_id: local bearer id used by link
90 * @tolerance: minimum link continuity loss needed to reset link [in ms]
91 * @abort_limit: # of unacknowledged continuity probes needed to reset link
92 * @state: current state of link FSM
93 * @peer_caps: bitmap describing capabilities of peer node
94 * @silent_intv_cnt: # of timer intervals without any reception from peer
95 * @proto_msg: template for control messages generated by link
96 * @pmsg: convenience pointer to "proto_msg" field
97 * @priority: current link priority
98 * @net_plane: current link network plane ('A' through 'H')
99 * @mon_state: cookie with information needed by link monitor
100 * @backlog_limit: backlog queue congestion thresholds (indexed by importance)
101 * @exp_msg_count: # of tunnelled messages expected during link changeover
102 * @reset_rcv_checkpt: seq # of last acknowledged message at time of link reset
103 * @mtu: current maximum packet size for this link
104 * @advertised_mtu: advertised own mtu when link is being established
105 * @transmitq: queue for sent, non-acked messages
106 * @backlogq: queue for messages waiting to be sent
107 * @snt_nxt: next sequence number to use for outbound messages
108 * @last_retransmitted: sequence number of most recently retransmitted message
109 * @stale_count: # of identical retransmit requests made by peer
110 * @ackers: # of peers that needs to ack each packet before it can be released
111 * @acked: # last packet acked by a certain peer. Used for broadcast.
112 * @rcv_nxt: next sequence number to expect for inbound messages
113 * @deferred_queue: deferred queue saved OOS b'cast message received from node
114 * @unacked_window: # of inbound messages rx'd without ack'ing back to peer
115 * @inputq: buffer queue for messages to be delivered upwards
116 * @namedq: buffer queue for name table messages to be delivered upwards
117 * @next_out: ptr to first unsent outbound message in queue
118 * @wakeupq: linked list of wakeup msgs waiting for link congestion to abate
119 * @long_msg_seq_no: next identifier to use for outbound fragmented messages
120 * @reasm_buf: head of partially reassembled inbound message fragments
121 * @bc_rcvr: marks that this is a broadcast receiver link
122 * @stats: collects statistics regarding link activity
126 char name
[TIPC_MAX_LINK_NAME
];
129 /* Management and link supervision data */
140 char if_name
[TIPC_MAX_IF_NAME
];
143 struct tipc_mon_state mon_state
;
148 struct sk_buff
*failover_reasm_skb
;
150 /* Max packet negotiation */
155 struct sk_buff_head transmq
;
156 struct sk_buff_head backlogq
;
169 struct sk_buff_head deferdq
;
170 struct sk_buff_head
*inputq
;
171 struct sk_buff_head
*namedq
;
173 /* Congestion handling */
174 struct sk_buff_head wakeupq
;
176 /* Fragmentation/reassembly */
177 struct sk_buff
*reasm_buf
;
182 struct tipc_link
*bc_rcvlink
;
183 struct tipc_link
*bc_sndlink
;
184 unsigned long prev_retr
;
191 struct tipc_stats stats
;
195 * Error message prefixes
197 static const char *link_co_err
= "Link tunneling error, ";
198 static const char *link_rst_msg
= "Resetting link ";
200 /* Send states for broadcast NACKs
203 BC_NACK_SND_CONDITIONAL
,
204 BC_NACK_SND_UNCONDITIONAL
,
205 BC_NACK_SND_SUPPRESS
,
208 #define TIPC_BC_RETR_LIMIT 10 /* [ms] */
211 * Interval between NACKs when packets arrive out of order
213 #define TIPC_NACK_INTV (TIPC_MIN_LINK_WIN * 2)
215 /* Wildcard value for link session numbers. When it is known that
216 * peer endpoint is down, any session number must be accepted.
218 #define ANY_SESSION 0x10000
223 LINK_ESTABLISHED
= 0xe,
224 LINK_ESTABLISHING
= 0xe << 4,
225 LINK_RESET
= 0x1 << 8,
226 LINK_RESETTING
= 0x2 << 12,
227 LINK_PEER_RESET
= 0xd << 16,
228 LINK_FAILINGOVER
= 0xf << 20,
229 LINK_SYNCHING
= 0xc << 24
232 /* Link FSM state checking routines
234 static int link_is_up(struct tipc_link
*l
)
236 return l
->state
& (LINK_ESTABLISHED
| LINK_SYNCHING
);
239 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
240 struct sk_buff_head
*xmitq
);
241 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
242 u16 rcvgap
, int tolerance
, int priority
,
243 struct sk_buff_head
*xmitq
);
244 static void link_print(struct tipc_link
*l
, const char *str
);
245 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
246 struct sk_buff_head
*xmitq
);
247 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
248 struct sk_buff_head
*xmitq
);
249 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 to
);
252 * Simple non-static link routines (i.e. referenced outside this file)
254 bool tipc_link_is_up(struct tipc_link
*l
)
256 return link_is_up(l
);
259 bool tipc_link_peer_is_down(struct tipc_link
*l
)
261 return l
->state
== LINK_PEER_RESET
;
264 bool tipc_link_is_reset(struct tipc_link
*l
)
266 return l
->state
& (LINK_RESET
| LINK_FAILINGOVER
| LINK_ESTABLISHING
);
269 bool tipc_link_is_establishing(struct tipc_link
*l
)
271 return l
->state
== LINK_ESTABLISHING
;
274 bool tipc_link_is_synching(struct tipc_link
*l
)
276 return l
->state
== LINK_SYNCHING
;
279 bool tipc_link_is_failingover(struct tipc_link
*l
)
281 return l
->state
== LINK_FAILINGOVER
;
284 bool tipc_link_is_blocked(struct tipc_link
*l
)
286 return l
->state
& (LINK_RESETTING
| LINK_PEER_RESET
| LINK_FAILINGOVER
);
289 static bool link_is_bc_sndlink(struct tipc_link
*l
)
291 return !l
->bc_sndlink
;
294 static bool link_is_bc_rcvlink(struct tipc_link
*l
)
296 return ((l
->bc_rcvlink
== l
) && !link_is_bc_sndlink(l
));
299 int tipc_link_is_active(struct tipc_link
*l
)
304 void tipc_link_set_active(struct tipc_link
*l
, bool active
)
309 u32
tipc_link_id(struct tipc_link
*l
)
311 return l
->peer_bearer_id
<< 16 | l
->bearer_id
;
314 int tipc_link_window(struct tipc_link
*l
)
319 int tipc_link_prio(struct tipc_link
*l
)
324 unsigned long tipc_link_tolerance(struct tipc_link
*l
)
329 struct sk_buff_head
*tipc_link_inputq(struct tipc_link
*l
)
334 char tipc_link_plane(struct tipc_link
*l
)
339 void tipc_link_add_bc_peer(struct tipc_link
*snd_l
,
340 struct tipc_link
*uc_l
,
341 struct sk_buff_head
*xmitq
)
343 struct tipc_link
*rcv_l
= uc_l
->bc_rcvlink
;
346 rcv_l
->acked
= snd_l
->snd_nxt
- 1;
347 snd_l
->state
= LINK_ESTABLISHED
;
348 tipc_link_build_bc_init_msg(uc_l
, xmitq
);
351 void tipc_link_remove_bc_peer(struct tipc_link
*snd_l
,
352 struct tipc_link
*rcv_l
,
353 struct sk_buff_head
*xmitq
)
355 u16 ack
= snd_l
->snd_nxt
- 1;
358 rcv_l
->bc_peer_is_up
= true;
359 rcv_l
->state
= LINK_ESTABLISHED
;
360 tipc_link_bc_ack_rcv(rcv_l
, ack
, xmitq
);
361 tipc_link_reset(rcv_l
);
362 rcv_l
->state
= LINK_RESET
;
363 if (!snd_l
->ackers
) {
364 tipc_link_reset(snd_l
);
365 snd_l
->state
= LINK_RESET
;
366 __skb_queue_purge(xmitq
);
370 int tipc_link_bc_peers(struct tipc_link
*l
)
375 u16
link_bc_rcv_gap(struct tipc_link
*l
)
377 struct sk_buff
*skb
= skb_peek(&l
->deferdq
);
380 if (more(l
->snd_nxt
, l
->rcv_nxt
))
381 gap
= l
->snd_nxt
- l
->rcv_nxt
;
383 gap
= buf_seqno(skb
) - l
->rcv_nxt
;
387 void tipc_link_set_mtu(struct tipc_link
*l
, int mtu
)
392 int tipc_link_mtu(struct tipc_link
*l
)
397 u16
tipc_link_rcv_nxt(struct tipc_link
*l
)
402 u16
tipc_link_acked(struct tipc_link
*l
)
407 char *tipc_link_name(struct tipc_link
*l
)
413 * tipc_link_create - create a new link
414 * @n: pointer to associated node
415 * @if_name: associated interface name
416 * @bearer_id: id (index) of associated bearer
417 * @tolerance: link tolerance to be used by link
418 * @net_plane: network plane (A,B,c..) this link belongs to
419 * @mtu: mtu to be advertised by link
420 * @priority: priority to be used by link
421 * @window: send window to be used by link
422 * @session: session to be used by link
423 * @ownnode: identity of own node
424 * @peer: node id of peer node
425 * @peer_caps: bitmap describing peer node capabilities
426 * @bc_sndlink: the namespace global link used for broadcast sending
427 * @bc_rcvlink: the peer specific link used for broadcast reception
428 * @inputq: queue to put messages ready for delivery
429 * @namedq: queue to put binding table update messages ready for delivery
430 * @link: return value, pointer to put the created link
432 * Returns true if link was created, otherwise false
434 bool tipc_link_create(struct net
*net
, char *if_name
, int bearer_id
,
435 int tolerance
, char net_plane
, u32 mtu
, int priority
,
436 int window
, u32 session
, u32 ownnode
, u32 peer
,
438 struct tipc_link
*bc_sndlink
,
439 struct tipc_link
*bc_rcvlink
,
440 struct sk_buff_head
*inputq
,
441 struct sk_buff_head
*namedq
,
442 struct tipc_link
**link
)
446 l
= kzalloc(sizeof(*l
), GFP_ATOMIC
);
450 l
->session
= session
;
452 /* Note: peer i/f name is completed by reset/activate message */
453 sprintf(l
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
454 tipc_zone(ownnode
), tipc_cluster(ownnode
), tipc_node(ownnode
),
455 if_name
, tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
456 strcpy(l
->if_name
, if_name
);
458 l
->peer_caps
= peer_caps
;
460 l
->peer_session
= ANY_SESSION
;
461 l
->bearer_id
= bearer_id
;
462 l
->tolerance
= tolerance
;
463 l
->net_plane
= net_plane
;
464 l
->advertised_mtu
= mtu
;
466 l
->priority
= priority
;
467 tipc_link_set_queue_limits(l
, window
);
469 l
->bc_sndlink
= bc_sndlink
;
470 l
->bc_rcvlink
= bc_rcvlink
;
473 l
->state
= LINK_RESETTING
;
474 __skb_queue_head_init(&l
->transmq
);
475 __skb_queue_head_init(&l
->backlogq
);
476 __skb_queue_head_init(&l
->deferdq
);
477 skb_queue_head_init(&l
->wakeupq
);
478 skb_queue_head_init(l
->inputq
);
483 * tipc_link_bc_create - create new link to be used for broadcast
484 * @n: pointer to associated node
485 * @mtu: mtu to be used
486 * @window: send window to be used
487 * @inputq: queue to put messages ready for delivery
488 * @namedq: queue to put binding table update messages ready for delivery
489 * @link: return value, pointer to put the created link
491 * Returns true if link was created, otherwise false
493 bool tipc_link_bc_create(struct net
*net
, u32 ownnode
, u32 peer
,
494 int mtu
, int window
, u16 peer_caps
,
495 struct sk_buff_head
*inputq
,
496 struct sk_buff_head
*namedq
,
497 struct tipc_link
*bc_sndlink
,
498 struct tipc_link
**link
)
502 if (!tipc_link_create(net
, "", MAX_BEARERS
, 0, 'Z', mtu
, 0, window
,
503 0, ownnode
, peer
, peer_caps
, bc_sndlink
,
504 NULL
, inputq
, namedq
, link
))
508 strcpy(l
->name
, tipc_bclink_name
);
510 l
->state
= LINK_RESET
;
514 /* Broadcast send link is always up */
515 if (link_is_bc_sndlink(l
))
516 l
->state
= LINK_ESTABLISHED
;
522 * tipc_link_fsm_evt - link finite state machine
523 * @l: pointer to link
524 * @evt: state machine event to be processed
526 int tipc_link_fsm_evt(struct tipc_link
*l
, int evt
)
533 case LINK_PEER_RESET_EVT
:
534 l
->state
= LINK_PEER_RESET
;
537 l
->state
= LINK_RESET
;
539 case LINK_FAILURE_EVT
:
540 case LINK_FAILOVER_BEGIN_EVT
:
541 case LINK_ESTABLISH_EVT
:
542 case LINK_FAILOVER_END_EVT
:
543 case LINK_SYNCH_BEGIN_EVT
:
544 case LINK_SYNCH_END_EVT
:
551 case LINK_PEER_RESET_EVT
:
552 l
->state
= LINK_ESTABLISHING
;
554 case LINK_FAILOVER_BEGIN_EVT
:
555 l
->state
= LINK_FAILINGOVER
;
556 case LINK_FAILURE_EVT
:
558 case LINK_ESTABLISH_EVT
:
559 case LINK_FAILOVER_END_EVT
:
561 case LINK_SYNCH_BEGIN_EVT
:
562 case LINK_SYNCH_END_EVT
:
567 case LINK_PEER_RESET
:
570 l
->state
= LINK_ESTABLISHING
;
572 case LINK_PEER_RESET_EVT
:
573 case LINK_ESTABLISH_EVT
:
574 case LINK_FAILURE_EVT
:
576 case LINK_SYNCH_BEGIN_EVT
:
577 case LINK_SYNCH_END_EVT
:
578 case LINK_FAILOVER_BEGIN_EVT
:
579 case LINK_FAILOVER_END_EVT
:
584 case LINK_FAILINGOVER
:
586 case LINK_FAILOVER_END_EVT
:
587 l
->state
= LINK_RESET
;
589 case LINK_PEER_RESET_EVT
:
591 case LINK_ESTABLISH_EVT
:
592 case LINK_FAILURE_EVT
:
594 case LINK_FAILOVER_BEGIN_EVT
:
595 case LINK_SYNCH_BEGIN_EVT
:
596 case LINK_SYNCH_END_EVT
:
601 case LINK_ESTABLISHING
:
603 case LINK_ESTABLISH_EVT
:
604 l
->state
= LINK_ESTABLISHED
;
606 case LINK_FAILOVER_BEGIN_EVT
:
607 l
->state
= LINK_FAILINGOVER
;
610 l
->state
= LINK_RESET
;
612 case LINK_FAILURE_EVT
:
613 case LINK_PEER_RESET_EVT
:
614 case LINK_SYNCH_BEGIN_EVT
:
615 case LINK_FAILOVER_END_EVT
:
617 case LINK_SYNCH_END_EVT
:
622 case LINK_ESTABLISHED
:
624 case LINK_PEER_RESET_EVT
:
625 l
->state
= LINK_PEER_RESET
;
626 rc
|= TIPC_LINK_DOWN_EVT
;
628 case LINK_FAILURE_EVT
:
629 l
->state
= LINK_RESETTING
;
630 rc
|= TIPC_LINK_DOWN_EVT
;
633 l
->state
= LINK_RESET
;
635 case LINK_ESTABLISH_EVT
:
636 case LINK_SYNCH_END_EVT
:
638 case LINK_SYNCH_BEGIN_EVT
:
639 l
->state
= LINK_SYNCHING
;
641 case LINK_FAILOVER_BEGIN_EVT
:
642 case LINK_FAILOVER_END_EVT
:
649 case LINK_PEER_RESET_EVT
:
650 l
->state
= LINK_PEER_RESET
;
651 rc
|= TIPC_LINK_DOWN_EVT
;
653 case LINK_FAILURE_EVT
:
654 l
->state
= LINK_RESETTING
;
655 rc
|= TIPC_LINK_DOWN_EVT
;
658 l
->state
= LINK_RESET
;
660 case LINK_ESTABLISH_EVT
:
661 case LINK_SYNCH_BEGIN_EVT
:
663 case LINK_SYNCH_END_EVT
:
664 l
->state
= LINK_ESTABLISHED
;
666 case LINK_FAILOVER_BEGIN_EVT
:
667 case LINK_FAILOVER_END_EVT
:
673 pr_err("Unknown FSM state %x in %s\n", l
->state
, l
->name
);
677 pr_err("Illegal FSM event %x in state %x on link %s\n",
678 evt
, l
->state
, l
->name
);
682 /* link_profile_stats - update statistical profiling of traffic
684 static void link_profile_stats(struct tipc_link
*l
)
687 struct tipc_msg
*msg
;
690 /* Update counters used in statistical profiling of send traffic */
691 l
->stats
.accu_queue_sz
+= skb_queue_len(&l
->transmq
);
692 l
->stats
.queue_sz_counts
++;
694 skb
= skb_peek(&l
->transmq
);
698 length
= msg_size(msg
);
700 if (msg_user(msg
) == MSG_FRAGMENTER
) {
701 if (msg_type(msg
) != FIRST_FRAGMENT
)
703 length
= msg_size(msg_get_wrapped(msg
));
705 l
->stats
.msg_lengths_total
+= length
;
706 l
->stats
.msg_length_counts
++;
708 l
->stats
.msg_length_profile
[0]++;
709 else if (length
<= 256)
710 l
->stats
.msg_length_profile
[1]++;
711 else if (length
<= 1024)
712 l
->stats
.msg_length_profile
[2]++;
713 else if (length
<= 4096)
714 l
->stats
.msg_length_profile
[3]++;
715 else if (length
<= 16384)
716 l
->stats
.msg_length_profile
[4]++;
717 else if (length
<= 32768)
718 l
->stats
.msg_length_profile
[5]++;
720 l
->stats
.msg_length_profile
[6]++;
723 /* tipc_link_timeout - perform periodic task as instructed from node timeout
725 int tipc_link_timeout(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
732 u16 bc_snt
= l
->bc_sndlink
->snd_nxt
- 1;
733 u16 bc_acked
= l
->bc_rcvlink
->acked
;
734 struct tipc_mon_state
*mstate
= &l
->mon_state
;
737 case LINK_ESTABLISHED
:
740 link_profile_stats(l
);
741 tipc_mon_get_state(l
->net
, l
->addr
, mstate
, l
->bearer_id
);
742 if (mstate
->reset
|| (l
->silent_intv_cnt
> l
->abort_limit
))
743 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
744 state
= bc_acked
!= bc_snt
;
745 state
|= l
->bc_rcvlink
->rcv_unacked
;
746 state
|= l
->rcv_unacked
;
747 state
|= !skb_queue_empty(&l
->transmq
);
748 state
|= !skb_queue_empty(&l
->deferdq
);
749 probe
= mstate
->probing
;
750 probe
|= l
->silent_intv_cnt
;
751 if (probe
|| mstate
->monitoring
)
752 l
->silent_intv_cnt
++;
755 setup
= l
->rst_cnt
++ <= 4;
756 setup
|= !(l
->rst_cnt
% 16);
759 case LINK_ESTABLISHING
:
763 case LINK_PEER_RESET
:
765 case LINK_FAILINGOVER
:
771 if (state
|| probe
|| setup
)
772 tipc_link_build_proto_msg(l
, mtyp
, probe
, 0, 0, 0, xmitq
);
778 * link_schedule_user - schedule a message sender for wakeup after congestion
779 * @link: congested link
780 * @list: message that was attempted sent
781 * Create pseudo msg to send back to user when congestion abates
782 * Does not consume buffer list
784 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
786 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
787 int imp
= msg_importance(msg
);
788 u32 oport
= msg_origport(msg
);
789 u32 addr
= tipc_own_addr(link
->net
);
792 /* This really cannot happen... */
793 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
794 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
797 /* Non-blocking sender: */
798 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
801 /* Create and schedule wakeup pseudo message */
802 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
803 addr
, addr
, oport
, 0, 0);
806 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
807 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
808 skb_queue_tail(&link
->wakeupq
, skb
);
809 link
->stats
.link_congs
++;
814 * link_prepare_wakeup - prepare users for wakeup after congestion
815 * @link: congested link
816 * Move a number of waiting users, as permitted by available space in
817 * the send queue, from link wait queue to node wait queue for wakeup
819 void link_prepare_wakeup(struct tipc_link
*l
)
821 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
823 struct sk_buff
*skb
, *tmp
;
825 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
826 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
827 lim
= l
->backlog
[imp
].limit
;
828 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
829 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
831 skb_unlink(skb
, &l
->wakeupq
);
832 skb_queue_tail(l
->inputq
, skb
);
836 void tipc_link_reset(struct tipc_link
*l
)
838 l
->peer_session
= ANY_SESSION
;
840 l
->mtu
= l
->advertised_mtu
;
841 __skb_queue_purge(&l
->transmq
);
842 __skb_queue_purge(&l
->deferdq
);
843 skb_queue_splice_init(&l
->wakeupq
, l
->inputq
);
844 __skb_queue_purge(&l
->backlogq
);
845 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
846 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
847 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
848 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
849 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
850 kfree_skb(l
->reasm_buf
);
851 kfree_skb(l
->failover_reasm_skb
);
853 l
->failover_reasm_skb
= NULL
;
858 l
->silent_intv_cnt
= 0;
860 l
->stats
.recv_info
= 0;
862 l
->bc_peer_is_up
= false;
863 memset(&l
->mon_state
, 0, sizeof(l
->mon_state
));
864 tipc_link_reset_stats(l
);
868 * tipc_link_xmit(): enqueue buffer list according to queue situation
870 * @list: chain of buffers containing message
871 * @xmitq: returned list of packets to be sent by caller
873 * Consumes the buffer chain, except when returning -ELINKCONG,
874 * since the caller then may want to make more send attempts.
875 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
876 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
878 int tipc_link_xmit(struct tipc_link
*l
, struct sk_buff_head
*list
,
879 struct sk_buff_head
*xmitq
)
881 struct tipc_msg
*hdr
= buf_msg(skb_peek(list
));
882 unsigned int maxwin
= l
->window
;
883 unsigned int i
, imp
= msg_importance(hdr
);
884 unsigned int mtu
= l
->mtu
;
885 u16 ack
= l
->rcv_nxt
- 1;
886 u16 seqno
= l
->snd_nxt
;
887 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
888 struct sk_buff_head
*transmq
= &l
->transmq
;
889 struct sk_buff_head
*backlogq
= &l
->backlogq
;
890 struct sk_buff
*skb
, *_skb
, *bskb
;
892 /* Match msg importance against this and all higher backlog limits: */
893 if (!skb_queue_empty(backlogq
)) {
894 for (i
= imp
; i
<= TIPC_SYSTEM_IMPORTANCE
; i
++) {
895 if (unlikely(l
->backlog
[i
].len
>= l
->backlog
[i
].limit
))
896 return link_schedule_user(l
, list
);
899 if (unlikely(msg_size(hdr
) > mtu
)) {
900 skb_queue_purge(list
);
904 /* Prepare each packet for sending, and add to relevant queue: */
905 while (skb_queue_len(list
)) {
906 skb
= skb_peek(list
);
908 msg_set_seqno(hdr
, seqno
);
909 msg_set_ack(hdr
, ack
);
910 msg_set_bcast_ack(hdr
, bc_ack
);
912 if (likely(skb_queue_len(transmq
) < maxwin
)) {
913 _skb
= skb_clone(skb
, GFP_ATOMIC
);
915 skb_queue_purge(list
);
919 __skb_queue_tail(transmq
, skb
);
920 __skb_queue_tail(xmitq
, _skb
);
921 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
926 if (tipc_msg_bundle(skb_peek_tail(backlogq
), hdr
, mtu
)) {
927 kfree_skb(__skb_dequeue(list
));
928 l
->stats
.sent_bundled
++;
931 if (tipc_msg_make_bundle(&bskb
, hdr
, mtu
, l
->addr
)) {
932 kfree_skb(__skb_dequeue(list
));
933 __skb_queue_tail(backlogq
, bskb
);
934 l
->backlog
[msg_importance(buf_msg(bskb
))].len
++;
935 l
->stats
.sent_bundled
++;
936 l
->stats
.sent_bundles
++;
939 l
->backlog
[imp
].len
+= skb_queue_len(list
);
940 skb_queue_splice_tail_init(list
, backlogq
);
946 void tipc_link_advance_backlog(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
948 struct sk_buff
*skb
, *_skb
;
949 struct tipc_msg
*hdr
;
950 u16 seqno
= l
->snd_nxt
;
951 u16 ack
= l
->rcv_nxt
- 1;
952 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
954 while (skb_queue_len(&l
->transmq
) < l
->window
) {
955 skb
= skb_peek(&l
->backlogq
);
958 _skb
= skb_clone(skb
, GFP_ATOMIC
);
961 __skb_dequeue(&l
->backlogq
);
963 l
->backlog
[msg_importance(hdr
)].len
--;
964 __skb_queue_tail(&l
->transmq
, skb
);
965 __skb_queue_tail(xmitq
, _skb
);
966 TIPC_SKB_CB(skb
)->ackers
= l
->ackers
;
967 msg_set_seqno(hdr
, seqno
);
968 msg_set_ack(hdr
, ack
);
969 msg_set_bcast_ack(hdr
, bc_ack
);
976 static void link_retransmit_failure(struct tipc_link
*l
, struct sk_buff
*skb
)
978 struct tipc_msg
*hdr
= buf_msg(skb
);
980 pr_warn("Retransmission failure on link <%s>\n", l
->name
);
981 link_print(l
, "Resetting link ");
982 pr_info("Failed msg: usr %u, typ %u, len %u, err %u\n",
983 msg_user(hdr
), msg_type(hdr
), msg_size(hdr
), msg_errcode(hdr
));
984 pr_info("sqno %u, prev: %x, src: %x\n",
985 msg_seqno(hdr
), msg_prevnode(hdr
), msg_orignode(hdr
));
988 int tipc_link_retrans(struct tipc_link
*l
, u16 from
, u16 to
,
989 struct sk_buff_head
*xmitq
)
991 struct sk_buff
*_skb
, *skb
= skb_peek(&l
->transmq
);
992 struct tipc_msg
*hdr
;
993 u16 ack
= l
->rcv_nxt
- 1;
994 u16 bc_ack
= l
->bc_rcvlink
->rcv_nxt
- 1;
999 /* Detect repeated retransmit failures on same packet */
1000 if (likely(l
->last_retransm
!= buf_seqno(skb
))) {
1001 l
->last_retransm
= buf_seqno(skb
);
1003 } else if (++l
->stale_count
> 100) {
1004 link_retransmit_failure(l
, skb
);
1005 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1008 /* Move forward to where retransmission should start */
1009 skb_queue_walk(&l
->transmq
, skb
) {
1010 if (!less(buf_seqno(skb
), from
))
1014 skb_queue_walk_from(&l
->transmq
, skb
) {
1015 if (more(buf_seqno(skb
), to
))
1018 _skb
= __pskb_copy(skb
, MIN_H_SIZE
, GFP_ATOMIC
);
1021 hdr
= buf_msg(_skb
);
1022 msg_set_ack(hdr
, ack
);
1023 msg_set_bcast_ack(hdr
, bc_ack
);
1024 _skb
->priority
= TC_PRIO_CONTROL
;
1025 __skb_queue_tail(xmitq
, _skb
);
1026 l
->stats
.retransmitted
++;
1031 /* tipc_data_input - deliver data and name distr msgs to upper layer
1033 * Consumes buffer if message is of right type
1034 * Node lock must be held
1036 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1037 struct sk_buff_head
*inputq
)
1039 switch (msg_user(buf_msg(skb
))) {
1040 case TIPC_LOW_IMPORTANCE
:
1041 case TIPC_MEDIUM_IMPORTANCE
:
1042 case TIPC_HIGH_IMPORTANCE
:
1043 case TIPC_CRITICAL_IMPORTANCE
:
1045 skb_queue_tail(inputq
, skb
);
1047 case NAME_DISTRIBUTOR
:
1048 l
->bc_rcvlink
->state
= LINK_ESTABLISHED
;
1049 skb_queue_tail(l
->namedq
, skb
);
1052 case TUNNEL_PROTOCOL
:
1053 case MSG_FRAGMENTER
:
1054 case BCAST_PROTOCOL
:
1057 pr_warn("Dropping received illegal msg type\n");
1063 /* tipc_link_input - process packet that has passed link protocol check
1067 static int tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
,
1068 struct sk_buff_head
*inputq
)
1070 struct tipc_msg
*hdr
= buf_msg(skb
);
1071 struct sk_buff
**reasm_skb
= &l
->reasm_buf
;
1072 struct sk_buff
*iskb
;
1073 struct sk_buff_head tmpq
;
1074 int usr
= msg_user(hdr
);
1079 if (unlikely(usr
== TUNNEL_PROTOCOL
)) {
1080 if (msg_type(hdr
) == SYNCH_MSG
) {
1081 __skb_queue_purge(&l
->deferdq
);
1084 if (!tipc_msg_extract(skb
, &iskb
, &ipos
))
1089 if (less(msg_seqno(hdr
), l
->drop_point
))
1091 if (tipc_data_input(l
, skb
, inputq
))
1093 usr
= msg_user(hdr
);
1094 reasm_skb
= &l
->failover_reasm_skb
;
1097 if (usr
== MSG_BUNDLER
) {
1098 skb_queue_head_init(&tmpq
);
1099 l
->stats
.recv_bundles
++;
1100 l
->stats
.recv_bundled
+= msg_msgcnt(hdr
);
1101 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1102 tipc_data_input(l
, iskb
, &tmpq
);
1103 tipc_skb_queue_splice_tail(&tmpq
, inputq
);
1105 } else if (usr
== MSG_FRAGMENTER
) {
1106 l
->stats
.recv_fragments
++;
1107 if (tipc_buf_append(reasm_skb
, &skb
)) {
1108 l
->stats
.recv_fragmented
++;
1109 tipc_data_input(l
, skb
, inputq
);
1110 } else if (!*reasm_skb
&& !link_is_bc_rcvlink(l
)) {
1111 pr_warn_ratelimited("Unable to build fragment list\n");
1112 return tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1115 } else if (usr
== BCAST_PROTOCOL
) {
1116 tipc_bcast_lock(l
->net
);
1117 tipc_link_bc_init_rcv(l
->bc_rcvlink
, hdr
);
1118 tipc_bcast_unlock(l
->net
);
1125 static bool tipc_link_release_pkts(struct tipc_link
*l
, u16 acked
)
1127 bool released
= false;
1128 struct sk_buff
*skb
, *tmp
;
1130 skb_queue_walk_safe(&l
->transmq
, skb
, tmp
) {
1131 if (more(buf_seqno(skb
), acked
))
1133 __skb_unlink(skb
, &l
->transmq
);
1140 /* tipc_link_build_state_msg: prepare link state message for transmission
1142 * Note that sending of broadcast ack is coordinated among nodes, to reduce
1143 * risk of ack storms towards the sender
1145 int tipc_link_build_state_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1150 /* Broadcast ACK must be sent via a unicast link => defer to caller */
1151 if (link_is_bc_rcvlink(l
)) {
1152 if (((l
->rcv_nxt
^ tipc_own_addr(l
->net
)) & 0xf) != 0xf)
1156 /* Use snd_nxt to store peer's snd_nxt in broadcast rcv link */
1157 l
->snd_nxt
= l
->rcv_nxt
;
1158 return TIPC_LINK_SND_STATE
;
1163 l
->stats
.sent_acks
++;
1164 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
1168 /* tipc_link_build_reset_msg: prepare link RESET or ACTIVATE message
1170 void tipc_link_build_reset_msg(struct tipc_link
*l
, struct sk_buff_head
*xmitq
)
1172 int mtyp
= RESET_MSG
;
1173 struct sk_buff
*skb
;
1175 if (l
->state
== LINK_ESTABLISHING
)
1176 mtyp
= ACTIVATE_MSG
;
1178 tipc_link_build_proto_msg(l
, mtyp
, 0, 0, 0, 0, xmitq
);
1180 /* Inform peer that this endpoint is going down if applicable */
1181 skb
= skb_peek_tail(xmitq
);
1182 if (skb
&& (l
->state
== LINK_RESET
))
1183 msg_set_peer_stopping(buf_msg(skb
), 1);
1186 /* tipc_link_build_nack_msg: prepare link nack message for transmission
1187 * Note that sending of broadcast NACK is coordinated among nodes, to
1188 * reduce the risk of NACK storms towards the sender
1190 static int tipc_link_build_nack_msg(struct tipc_link
*l
,
1191 struct sk_buff_head
*xmitq
)
1193 u32 def_cnt
= ++l
->stats
.deferred_recv
;
1196 if (link_is_bc_rcvlink(l
)) {
1197 match1
= def_cnt
& 0xf;
1198 match2
= tipc_own_addr(l
->net
) & 0xf;
1199 if (match1
== match2
)
1200 return TIPC_LINK_SND_STATE
;
1204 if ((skb_queue_len(&l
->deferdq
) == 1) || !(def_cnt
% TIPC_NACK_INTV
))
1205 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, 0, xmitq
);
1209 /* tipc_link_rcv - process TIPC packets/messages arriving from off-node
1210 * @l: the link that should handle the message
1212 * @xmitq: queue to place packets to be sent after this call
1214 int tipc_link_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1215 struct sk_buff_head
*xmitq
)
1217 struct sk_buff_head
*defq
= &l
->deferdq
;
1218 struct tipc_msg
*hdr
;
1219 u16 seqno
, rcv_nxt
, win_lim
;
1224 seqno
= msg_seqno(hdr
);
1225 rcv_nxt
= l
->rcv_nxt
;
1226 win_lim
= rcv_nxt
+ TIPC_MAX_LINK_WIN
;
1228 /* Verify and update link state */
1229 if (unlikely(msg_user(hdr
) == LINK_PROTOCOL
))
1230 return tipc_link_proto_rcv(l
, skb
, xmitq
);
1232 if (unlikely(!link_is_up(l
))) {
1233 if (l
->state
== LINK_ESTABLISHING
)
1234 rc
= TIPC_LINK_UP_EVT
;
1238 /* Don't send probe at next timeout expiration */
1239 l
->silent_intv_cnt
= 0;
1241 /* Drop if outside receive window */
1242 if (unlikely(less(seqno
, rcv_nxt
) || more(seqno
, win_lim
))) {
1243 l
->stats
.duplicates
++;
1247 /* Forward queues and wake up waiting users */
1248 if (likely(tipc_link_release_pkts(l
, msg_ack(hdr
)))) {
1249 tipc_link_advance_backlog(l
, xmitq
);
1250 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1251 link_prepare_wakeup(l
);
1254 /* Defer delivery if sequence gap */
1255 if (unlikely(seqno
!= rcv_nxt
)) {
1256 __tipc_skb_queue_sorted(defq
, seqno
, skb
);
1257 rc
|= tipc_link_build_nack_msg(l
, xmitq
);
1261 /* Deliver packet */
1263 l
->stats
.recv_info
++;
1264 if (!tipc_data_input(l
, skb
, l
->inputq
))
1265 rc
|= tipc_link_input(l
, skb
, l
->inputq
);
1266 if (unlikely(++l
->rcv_unacked
>= TIPC_MIN_LINK_WIN
))
1267 rc
|= tipc_link_build_state_msg(l
, xmitq
);
1268 if (unlikely(rc
& ~TIPC_LINK_SND_STATE
))
1270 } while ((skb
= __skb_dequeue(defq
)));
1278 static void tipc_link_build_proto_msg(struct tipc_link
*l
, int mtyp
, bool probe
,
1279 u16 rcvgap
, int tolerance
, int priority
,
1280 struct sk_buff_head
*xmitq
)
1282 struct tipc_link
*bcl
= l
->bc_rcvlink
;
1283 struct sk_buff
*skb
;
1284 struct tipc_msg
*hdr
;
1285 struct sk_buff_head
*dfq
= &l
->deferdq
;
1286 bool node_up
= link_is_up(bcl
);
1287 struct tipc_mon_state
*mstate
= &l
->mon_state
;
1291 /* Don't send protocol message during reset or link failover */
1292 if (tipc_link_is_blocked(l
))
1295 if (!tipc_link_is_up(l
) && (mtyp
== STATE_MSG
))
1298 if (!skb_queue_empty(dfq
))
1299 rcvgap
= buf_seqno(skb_peek(dfq
)) - l
->rcv_nxt
;
1301 skb
= tipc_msg_create(LINK_PROTOCOL
, mtyp
, INT_H_SIZE
,
1302 tipc_max_domain_size
, l
->addr
,
1303 tipc_own_addr(l
->net
), 0, 0, 0);
1308 data
= msg_data(hdr
);
1309 msg_set_session(hdr
, l
->session
);
1310 msg_set_bearer_id(hdr
, l
->bearer_id
);
1311 msg_set_net_plane(hdr
, l
->net_plane
);
1312 msg_set_next_sent(hdr
, l
->snd_nxt
);
1313 msg_set_ack(hdr
, l
->rcv_nxt
- 1);
1314 msg_set_bcast_ack(hdr
, bcl
->rcv_nxt
- 1);
1315 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1316 msg_set_link_tolerance(hdr
, tolerance
);
1317 msg_set_linkprio(hdr
, priority
);
1318 msg_set_redundant_link(hdr
, node_up
);
1319 msg_set_seq_gap(hdr
, 0);
1320 msg_set_seqno(hdr
, l
->snd_nxt
+ U16_MAX
/ 2);
1322 if (mtyp
== STATE_MSG
) {
1323 msg_set_seq_gap(hdr
, rcvgap
);
1324 msg_set_bc_gap(hdr
, link_bc_rcv_gap(bcl
));
1325 msg_set_probe(hdr
, probe
);
1326 tipc_mon_prep(l
->net
, data
, &dlen
, mstate
, l
->bearer_id
);
1327 msg_set_size(hdr
, INT_H_SIZE
+ dlen
);
1328 skb_trim(skb
, INT_H_SIZE
+ dlen
);
1329 l
->stats
.sent_states
++;
1332 /* RESET_MSG or ACTIVATE_MSG */
1333 msg_set_max_pkt(hdr
, l
->advertised_mtu
);
1334 strcpy(data
, l
->if_name
);
1335 msg_set_size(hdr
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1336 skb_trim(skb
, INT_H_SIZE
+ TIPC_MAX_IF_NAME
);
1339 l
->stats
.sent_probes
++;
1341 l
->stats
.sent_nacks
++;
1342 skb
->priority
= TC_PRIO_CONTROL
;
1343 __skb_queue_tail(xmitq
, skb
);
1346 /* tipc_link_tnl_prepare(): prepare and return a list of tunnel packets
1347 * with contents of the link's transmit and backlog queues.
1349 void tipc_link_tnl_prepare(struct tipc_link
*l
, struct tipc_link
*tnl
,
1350 int mtyp
, struct sk_buff_head
*xmitq
)
1352 struct sk_buff
*skb
, *tnlskb
;
1353 struct tipc_msg
*hdr
, tnlhdr
;
1354 struct sk_buff_head
*queue
= &l
->transmq
;
1355 struct sk_buff_head tmpxq
, tnlq
;
1356 u16 pktlen
, pktcnt
, seqno
= l
->snd_nxt
;
1361 skb_queue_head_init(&tnlq
);
1362 skb_queue_head_init(&tmpxq
);
1364 /* At least one packet required for safe algorithm => add dummy */
1365 skb
= tipc_msg_create(TIPC_LOW_IMPORTANCE
, TIPC_DIRECT_MSG
,
1366 BASIC_H_SIZE
, 0, l
->addr
, tipc_own_addr(l
->net
),
1367 0, 0, TIPC_ERR_NO_PORT
);
1369 pr_warn("%sunable to create tunnel packet\n", link_co_err
);
1372 skb_queue_tail(&tnlq
, skb
);
1373 tipc_link_xmit(l
, &tnlq
, &tmpxq
);
1374 __skb_queue_purge(&tmpxq
);
1376 /* Initialize reusable tunnel packet header */
1377 tipc_msg_init(tipc_own_addr(l
->net
), &tnlhdr
, TUNNEL_PROTOCOL
,
1378 mtyp
, INT_H_SIZE
, l
->addr
);
1379 pktcnt
= skb_queue_len(&l
->transmq
) + skb_queue_len(&l
->backlogq
);
1380 msg_set_msgcnt(&tnlhdr
, pktcnt
);
1381 msg_set_bearer_id(&tnlhdr
, l
->peer_bearer_id
);
1383 /* Wrap each packet into a tunnel packet */
1384 skb_queue_walk(queue
, skb
) {
1386 if (queue
== &l
->backlogq
)
1387 msg_set_seqno(hdr
, seqno
++);
1388 pktlen
= msg_size(hdr
);
1389 msg_set_size(&tnlhdr
, pktlen
+ INT_H_SIZE
);
1390 tnlskb
= tipc_buf_acquire(pktlen
+ INT_H_SIZE
);
1392 pr_warn("%sunable to send packet\n", link_co_err
);
1395 skb_copy_to_linear_data(tnlskb
, &tnlhdr
, INT_H_SIZE
);
1396 skb_copy_to_linear_data_offset(tnlskb
, INT_H_SIZE
, hdr
, pktlen
);
1397 __skb_queue_tail(&tnlq
, tnlskb
);
1399 if (queue
!= &l
->backlogq
) {
1400 queue
= &l
->backlogq
;
1404 tipc_link_xmit(tnl
, &tnlq
, xmitq
);
1406 if (mtyp
== FAILOVER_MSG
) {
1407 tnl
->drop_point
= l
->rcv_nxt
;
1408 tnl
->failover_reasm_skb
= l
->reasm_buf
;
1409 l
->reasm_buf
= NULL
;
1413 /* tipc_link_proto_rcv(): receive link level protocol message :
1414 * Note that network plane id propagates through the network, and may
1415 * change at any time. The node with lowest numerical id determines
1418 static int tipc_link_proto_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1419 struct sk_buff_head
*xmitq
)
1421 struct tipc_msg
*hdr
= buf_msg(skb
);
1423 u16 ack
= msg_ack(hdr
);
1424 u16 gap
= msg_seq_gap(hdr
);
1425 u16 peers_snd_nxt
= msg_next_sent(hdr
);
1426 u16 peers_tol
= msg_link_tolerance(hdr
);
1427 u16 peers_prio
= msg_linkprio(hdr
);
1428 u16 rcv_nxt
= l
->rcv_nxt
;
1429 u16 dlen
= msg_data_sz(hdr
);
1430 int mtyp
= msg_type(hdr
);
1435 if (tipc_link_is_blocked(l
) || !xmitq
)
1438 if (tipc_own_addr(l
->net
) > msg_prevnode(hdr
))
1439 l
->net_plane
= msg_net_plane(hdr
);
1443 data
= msg_data(hdr
);
1448 /* Ignore duplicate RESET with old session number */
1449 if ((less_eq(msg_session(hdr
), l
->peer_session
)) &&
1450 (l
->peer_session
!= ANY_SESSION
))
1456 /* Complete own link name with peer's interface name */
1457 if_name
= strrchr(l
->name
, ':') + 1;
1458 if (sizeof(l
->name
) - (if_name
- l
->name
) <= TIPC_MAX_IF_NAME
)
1460 if (msg_data_sz(hdr
) < TIPC_MAX_IF_NAME
)
1462 strncpy(if_name
, data
, TIPC_MAX_IF_NAME
);
1464 /* Update own tolerance if peer indicates a non-zero value */
1465 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1466 l
->tolerance
= peers_tol
;
1468 /* Update own priority if peer's priority is higher */
1469 if (in_range(peers_prio
, l
->priority
+ 1, TIPC_MAX_LINK_PRI
))
1470 l
->priority
= peers_prio
;
1472 /* ACTIVATE_MSG serves as PEER_RESET if link is already down */
1473 if (msg_peer_stopping(hdr
))
1474 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1475 else if ((mtyp
== RESET_MSG
) || !link_is_up(l
))
1476 rc
= tipc_link_fsm_evt(l
, LINK_PEER_RESET_EVT
);
1478 /* ACTIVATE_MSG takes up link if it was already locally reset */
1479 if ((mtyp
== ACTIVATE_MSG
) && (l
->state
== LINK_ESTABLISHING
))
1480 rc
= TIPC_LINK_UP_EVT
;
1482 l
->peer_session
= msg_session(hdr
);
1483 l
->peer_bearer_id
= msg_bearer_id(hdr
);
1484 if (l
->mtu
> msg_max_pkt(hdr
))
1485 l
->mtu
= msg_max_pkt(hdr
);
1490 /* Update own tolerance if peer indicates a non-zero value */
1491 if (in_range(peers_tol
, TIPC_MIN_LINK_TOL
, TIPC_MAX_LINK_TOL
))
1492 l
->tolerance
= peers_tol
;
1494 if (peers_prio
&& in_range(peers_prio
, TIPC_MIN_LINK_PRI
,
1495 TIPC_MAX_LINK_PRI
)) {
1496 l
->priority
= peers_prio
;
1497 rc
= tipc_link_fsm_evt(l
, LINK_FAILURE_EVT
);
1500 l
->silent_intv_cnt
= 0;
1501 l
->stats
.recv_states
++;
1503 l
->stats
.recv_probes
++;
1505 if (!link_is_up(l
)) {
1506 if (l
->state
== LINK_ESTABLISHING
)
1507 rc
= TIPC_LINK_UP_EVT
;
1510 tipc_mon_rcv(l
->net
, data
, dlen
, l
->addr
,
1511 &l
->mon_state
, l
->bearer_id
);
1513 /* Send NACK if peer has sent pkts we haven't received yet */
1514 if (more(peers_snd_nxt
, rcv_nxt
) && !tipc_link_is_synching(l
))
1515 rcvgap
= peers_snd_nxt
- l
->rcv_nxt
;
1516 if (rcvgap
|| (msg_probe(hdr
)))
1517 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, rcvgap
,
1519 tipc_link_release_pkts(l
, ack
);
1521 /* If NACK, retransmit will now start at right position */
1523 rc
= tipc_link_retrans(l
, ack
+ 1, ack
+ gap
, xmitq
);
1524 l
->stats
.recv_nacks
++;
1527 tipc_link_advance_backlog(l
, xmitq
);
1528 if (unlikely(!skb_queue_empty(&l
->wakeupq
)))
1529 link_prepare_wakeup(l
);
1536 /* tipc_link_build_bc_proto_msg() - create broadcast protocol message
1538 static bool tipc_link_build_bc_proto_msg(struct tipc_link
*l
, bool bcast
,
1540 struct sk_buff_head
*xmitq
)
1542 struct sk_buff
*skb
;
1543 struct tipc_msg
*hdr
;
1544 struct sk_buff
*dfrd_skb
= skb_peek(&l
->deferdq
);
1545 u16 ack
= l
->rcv_nxt
- 1;
1546 u16 gap_to
= peers_snd_nxt
- 1;
1548 skb
= tipc_msg_create(BCAST_PROTOCOL
, STATE_MSG
, INT_H_SIZE
,
1549 0, l
->addr
, tipc_own_addr(l
->net
), 0, 0, 0);
1553 msg_set_last_bcast(hdr
, l
->bc_sndlink
->snd_nxt
- 1);
1554 msg_set_bcast_ack(hdr
, ack
);
1555 msg_set_bcgap_after(hdr
, ack
);
1557 gap_to
= buf_seqno(dfrd_skb
) - 1;
1558 msg_set_bcgap_to(hdr
, gap_to
);
1559 msg_set_non_seq(hdr
, bcast
);
1560 __skb_queue_tail(xmitq
, skb
);
1564 /* tipc_link_build_bc_init_msg() - synchronize broadcast link endpoints.
1566 * Give a newly added peer node the sequence number where it should
1567 * start receiving and acking broadcast packets.
1569 static void tipc_link_build_bc_init_msg(struct tipc_link
*l
,
1570 struct sk_buff_head
*xmitq
)
1572 struct sk_buff_head list
;
1574 __skb_queue_head_init(&list
);
1575 if (!tipc_link_build_bc_proto_msg(l
->bc_rcvlink
, false, 0, &list
))
1577 tipc_link_xmit(l
, &list
, xmitq
);
1580 /* tipc_link_bc_init_rcv - receive initial broadcast synch data from peer
1582 void tipc_link_bc_init_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
)
1584 int mtyp
= msg_type(hdr
);
1585 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1590 if (msg_user(hdr
) == BCAST_PROTOCOL
) {
1591 l
->rcv_nxt
= peers_snd_nxt
;
1592 l
->state
= LINK_ESTABLISHED
;
1596 if (l
->peer_caps
& TIPC_BCAST_SYNCH
)
1599 if (msg_peer_node_is_up(hdr
))
1602 /* Compatibility: accept older, less safe initial synch data */
1603 if ((mtyp
== RESET_MSG
) || (mtyp
== ACTIVATE_MSG
))
1604 l
->rcv_nxt
= peers_snd_nxt
;
1607 /* link_bc_retr eval()- check if the indicated range can be retransmitted now
1608 * - Adjust permitted range if there is overlap with previous retransmission
1610 static bool link_bc_retr_eval(struct tipc_link
*l
, u16
*from
, u16
*to
)
1612 unsigned long elapsed
= jiffies_to_msecs(jiffies
- l
->prev_retr
);
1614 if (less(*to
, *from
))
1617 /* New retransmission request */
1618 if ((elapsed
> TIPC_BC_RETR_LIMIT
) ||
1619 less(*to
, l
->prev_from
) || more(*from
, l
->prev_to
)) {
1620 l
->prev_from
= *from
;
1622 l
->prev_retr
= jiffies
;
1626 /* Inside range of previous retransmit */
1627 if (!less(*from
, l
->prev_from
) && !more(*to
, l
->prev_to
))
1630 /* Fully or partially outside previous range => exclude overlap */
1631 if (less(*from
, l
->prev_from
)) {
1632 *to
= l
->prev_from
- 1;
1633 l
->prev_from
= *from
;
1635 if (more(*to
, l
->prev_to
)) {
1636 *from
= l
->prev_to
+ 1;
1639 l
->prev_retr
= jiffies
;
1643 /* tipc_link_bc_sync_rcv - update rcv link according to peer's send state
1645 int tipc_link_bc_sync_rcv(struct tipc_link
*l
, struct tipc_msg
*hdr
,
1646 struct sk_buff_head
*xmitq
)
1648 struct tipc_link
*snd_l
= l
->bc_sndlink
;
1649 u16 peers_snd_nxt
= msg_bc_snd_nxt(hdr
);
1650 u16 from
= msg_bcast_ack(hdr
) + 1;
1651 u16 to
= from
+ msg_bc_gap(hdr
) - 1;
1657 if (!msg_peer_node_is_up(hdr
))
1660 /* Open when peer ackowledges our bcast init msg (pkt #1) */
1662 l
->bc_peer_is_up
= true;
1664 if (!l
->bc_peer_is_up
)
1667 l
->stats
.recv_nacks
++;
1669 /* Ignore if peers_snd_nxt goes beyond receive window */
1670 if (more(peers_snd_nxt
, l
->rcv_nxt
+ l
->window
))
1673 if (link_bc_retr_eval(snd_l
, &from
, &to
))
1674 rc
= tipc_link_retrans(snd_l
, from
, to
, xmitq
);
1676 l
->snd_nxt
= peers_snd_nxt
;
1677 if (link_bc_rcv_gap(l
))
1678 rc
|= TIPC_LINK_SND_STATE
;
1680 /* Return now if sender supports nack via STATE messages */
1681 if (l
->peer_caps
& TIPC_BCAST_STATE_NACK
)
1684 /* Otherwise, be backwards compatible */
1686 if (!more(peers_snd_nxt
, l
->rcv_nxt
)) {
1687 l
->nack_state
= BC_NACK_SND_CONDITIONAL
;
1691 /* Don't NACK if one was recently sent or peeked */
1692 if (l
->nack_state
== BC_NACK_SND_SUPPRESS
) {
1693 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1697 /* Conditionally delay NACK sending until next synch rcv */
1698 if (l
->nack_state
== BC_NACK_SND_CONDITIONAL
) {
1699 l
->nack_state
= BC_NACK_SND_UNCONDITIONAL
;
1700 if ((peers_snd_nxt
- l
->rcv_nxt
) < TIPC_MIN_LINK_WIN
)
1704 /* Send NACK now but suppress next one */
1705 tipc_link_build_bc_proto_msg(l
, true, peers_snd_nxt
, xmitq
);
1706 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1710 void tipc_link_bc_ack_rcv(struct tipc_link
*l
, u16 acked
,
1711 struct sk_buff_head
*xmitq
)
1713 struct sk_buff
*skb
, *tmp
;
1714 struct tipc_link
*snd_l
= l
->bc_sndlink
;
1716 if (!link_is_up(l
) || !l
->bc_peer_is_up
)
1719 if (!more(acked
, l
->acked
))
1722 /* Skip over packets peer has already acked */
1723 skb_queue_walk(&snd_l
->transmq
, skb
) {
1724 if (more(buf_seqno(skb
), l
->acked
))
1728 /* Update/release the packets peer is acking now */
1729 skb_queue_walk_from_safe(&snd_l
->transmq
, skb
, tmp
) {
1730 if (more(buf_seqno(skb
), acked
))
1732 if (!--TIPC_SKB_CB(skb
)->ackers
) {
1733 __skb_unlink(skb
, &snd_l
->transmq
);
1738 tipc_link_advance_backlog(snd_l
, xmitq
);
1739 if (unlikely(!skb_queue_empty(&snd_l
->wakeupq
)))
1740 link_prepare_wakeup(snd_l
);
1743 /* tipc_link_bc_nack_rcv(): receive broadcast nack message
1744 * This function is here for backwards compatibility, since
1745 * no BCAST_PROTOCOL/STATE messages occur from TIPC v2.5.
1747 int tipc_link_bc_nack_rcv(struct tipc_link
*l
, struct sk_buff
*skb
,
1748 struct sk_buff_head
*xmitq
)
1750 struct tipc_msg
*hdr
= buf_msg(skb
);
1751 u32 dnode
= msg_destnode(hdr
);
1752 int mtyp
= msg_type(hdr
);
1753 u16 acked
= msg_bcast_ack(hdr
);
1754 u16 from
= acked
+ 1;
1755 u16 to
= msg_bcgap_to(hdr
);
1756 u16 peers_snd_nxt
= to
+ 1;
1761 if (!tipc_link_is_up(l
) || !l
->bc_peer_is_up
)
1764 if (mtyp
!= STATE_MSG
)
1767 if (dnode
== tipc_own_addr(l
->net
)) {
1768 tipc_link_bc_ack_rcv(l
, acked
, xmitq
);
1769 rc
= tipc_link_retrans(l
->bc_sndlink
, from
, to
, xmitq
);
1770 l
->stats
.recv_nacks
++;
1774 /* Msg for other node => suppress own NACK at next sync if applicable */
1775 if (more(peers_snd_nxt
, l
->rcv_nxt
) && !less(l
->rcv_nxt
, from
))
1776 l
->nack_state
= BC_NACK_SND_SUPPRESS
;
1781 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1783 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->mtu
/ ITEM_SIZE
);
1786 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= max_t(u16
, 50, win
);
1787 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= max_t(u16
, 100, win
* 2);
1788 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= max_t(u16
, 150, win
* 3);
1789 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= max_t(u16
, 200, win
* 4);
1790 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1794 * link_reset_stats - reset link statistics
1795 * @l: pointer to link
1797 void tipc_link_reset_stats(struct tipc_link
*l
)
1799 memset(&l
->stats
, 0, sizeof(l
->stats
));
1800 if (!link_is_bc_sndlink(l
)) {
1801 l
->stats
.sent_info
= l
->snd_nxt
;
1802 l
->stats
.recv_info
= l
->rcv_nxt
;
1806 static void link_print(struct tipc_link
*l
, const char *str
)
1808 struct sk_buff
*hskb
= skb_peek(&l
->transmq
);
1809 u16 head
= hskb
? msg_seqno(buf_msg(hskb
)) : l
->snd_nxt
- 1;
1810 u16 tail
= l
->snd_nxt
- 1;
1812 pr_info("%s Link <%s> state %x\n", str
, l
->name
, l
->state
);
1813 pr_info("XMTQ: %u [%u-%u], BKLGQ: %u, SNDNX: %u, RCVNX: %u\n",
1814 skb_queue_len(&l
->transmq
), head
, tail
,
1815 skb_queue_len(&l
->backlogq
), l
->snd_nxt
, l
->rcv_nxt
);
1818 /* Parse and validate nested (link) properties valid for media, bearer and link
1820 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1824 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1825 tipc_nl_prop_policy
);
1829 if (props
[TIPC_NLA_PROP_PRIO
]) {
1832 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1833 if (prio
> TIPC_MAX_LINK_PRI
)
1837 if (props
[TIPC_NLA_PROP_TOL
]) {
1840 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1841 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1845 if (props
[TIPC_NLA_PROP_WIN
]) {
1848 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1849 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1856 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1859 struct nlattr
*stats
;
1866 struct nla_map map
[] = {
1867 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
1868 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1869 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1870 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1871 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1872 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
1873 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1874 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1875 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1876 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1877 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1878 s
->msg_length_counts
: 1},
1879 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1880 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1881 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1882 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1883 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1884 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1885 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1886 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1887 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1888 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1889 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1890 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1891 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1892 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1893 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1894 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1895 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1896 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1897 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1898 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1899 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1900 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1901 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1904 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1908 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1909 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
1912 nla_nest_end(skb
, stats
);
1916 nla_nest_cancel(skb
, stats
);
1921 /* Caller should hold appropriate locks to protect the link */
1922 int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
1923 struct tipc_link
*link
, int nlflags
)
1927 struct nlattr
*attrs
;
1928 struct nlattr
*prop
;
1929 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1931 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
1932 nlflags
, TIPC_NL_LINK_GET
);
1936 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
1940 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
1942 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
1943 tipc_cluster_mask(tn
->own_addr
)))
1945 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
1947 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->rcv_nxt
))
1949 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->snd_nxt
))
1952 if (tipc_link_is_up(link
))
1953 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
1956 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
1959 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
1962 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1964 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
1966 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
1969 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
1971 nla_nest_end(msg
->skb
, prop
);
1973 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
1977 nla_nest_end(msg
->skb
, attrs
);
1978 genlmsg_end(msg
->skb
, hdr
);
1983 nla_nest_cancel(msg
->skb
, prop
);
1985 nla_nest_cancel(msg
->skb
, attrs
);
1987 genlmsg_cancel(msg
->skb
, hdr
);
1992 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
1993 struct tipc_stats
*stats
)
1996 struct nlattr
*nest
;
2003 struct nla_map map
[] = {
2004 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_info
},
2005 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
2006 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
2007 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
2008 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
2009 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_info
},
2010 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
2011 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
2012 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
2013 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
2014 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
2015 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
2016 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
2017 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
2018 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
2019 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
2020 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
2021 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
2022 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
2023 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
2026 nest
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
2030 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
2031 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2034 nla_nest_end(skb
, nest
);
2038 nla_nest_cancel(skb
, nest
);
2043 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
2047 struct nlattr
*attrs
;
2048 struct nlattr
*prop
;
2049 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2050 struct tipc_link
*bcl
= tn
->bcl
;
2055 tipc_bcast_lock(net
);
2057 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2058 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
2060 tipc_bcast_unlock(net
);
2064 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
2068 /* The broadcast link is always up */
2069 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2072 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
2074 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
2076 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, bcl
->rcv_nxt
))
2078 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, bcl
->snd_nxt
))
2081 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
2084 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->window
))
2086 nla_nest_end(msg
->skb
, prop
);
2088 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
2092 tipc_bcast_unlock(net
);
2093 nla_nest_end(msg
->skb
, attrs
);
2094 genlmsg_end(msg
->skb
, hdr
);
2099 nla_nest_cancel(msg
->skb
, prop
);
2101 nla_nest_cancel(msg
->skb
, attrs
);
2103 tipc_bcast_unlock(net
);
2104 genlmsg_cancel(msg
->skb
, hdr
);
2109 void tipc_link_set_tolerance(struct tipc_link
*l
, u32 tol
,
2110 struct sk_buff_head
*xmitq
)
2113 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, tol
, 0, xmitq
);
2116 void tipc_link_set_prio(struct tipc_link
*l
, u32 prio
,
2117 struct sk_buff_head
*xmitq
)
2120 tipc_link_build_proto_msg(l
, STATE_MSG
, 0, 0, 0, prio
, xmitq
);
2123 void tipc_link_set_abort_limit(struct tipc_link
*l
, u32 limit
)
2125 l
->abort_limit
= limit
;