2 * net/tipc/bcast.c: TIPC broadcast code
4 * Copyright (c) 2004-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004, Intel Corporation.
6 * Copyright (c) 2005, 2010-2011, Wind River Systems
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
17 * 3. Neither the names of the copyright holders nor the names of its
18 * contributors may be used to endorse or promote products derived from
19 * this software without specific prior written permission.
21 * Alternatively, this software may be distributed under the terms of the
22 * GNU General Public License ("GPL") version 2 as published by the Free
23 * Software Foundation.
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
26 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
29 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
35 * POSSIBILITY OF SUCH DAMAGE.
41 #include "name_distr.h"
44 #define MAX_PKT_DEFAULT_MCAST 1500 /* bcast link max packet size (fixed) */
45 #define BCLINK_WIN_DEFAULT 20 /* bcast link window size (default) */
47 const char tipc_bclink_name
[] = "broadcast-link";
49 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
50 struct tipc_node_map
*nm_b
,
51 struct tipc_node_map
*nm_diff
);
52 static void tipc_nmap_add(struct tipc_node_map
*nm_ptr
, u32 node
);
53 static void tipc_nmap_remove(struct tipc_node_map
*nm_ptr
, u32 node
);
55 static void tipc_bclink_lock(struct net
*net
)
57 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
59 spin_lock_bh(&tn
->bclink
->lock
);
62 static void tipc_bclink_unlock(struct net
*net
)
64 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
66 spin_unlock_bh(&tn
->bclink
->lock
);
69 void tipc_bclink_input(struct net
*net
)
71 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
73 tipc_sk_mcast_rcv(net
, &tn
->bclink
->arrvq
, &tn
->bclink
->inputq
);
76 uint
tipc_bclink_get_mtu(void)
78 return MAX_PKT_DEFAULT_MCAST
;
81 static u32
bcbuf_acks(struct sk_buff
*buf
)
83 return (u32
)(unsigned long)TIPC_SKB_CB(buf
)->handle
;
86 static void bcbuf_set_acks(struct sk_buff
*buf
, u32 acks
)
88 TIPC_SKB_CB(buf
)->handle
= (void *)(unsigned long)acks
;
91 static void bcbuf_decr_acks(struct sk_buff
*buf
)
93 bcbuf_set_acks(buf
, bcbuf_acks(buf
) - 1);
96 void tipc_bclink_add_node(struct net
*net
, u32 addr
)
98 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
100 tipc_bclink_lock(net
);
101 tipc_nmap_add(&tn
->bclink
->bcast_nodes
, addr
);
102 tipc_bclink_unlock(net
);
105 void tipc_bclink_remove_node(struct net
*net
, u32 addr
)
107 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
109 tipc_bclink_lock(net
);
110 tipc_nmap_remove(&tn
->bclink
->bcast_nodes
, addr
);
111 tipc_bclink_unlock(net
);
114 static void bclink_set_last_sent(struct net
*net
)
116 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
117 struct tipc_link
*bcl
= tn
->bcl
;
119 bcl
->silent_intv_cnt
= mod(bcl
->snd_nxt
- 1);
122 u32
tipc_bclink_get_last_sent(struct net
*net
)
124 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
126 return tn
->bcl
->silent_intv_cnt
;
129 static void bclink_update_last_sent(struct tipc_node
*node
, u32 seqno
)
131 node
->bclink
.last_sent
= less_eq(node
->bclink
.last_sent
, seqno
) ?
132 seqno
: node
->bclink
.last_sent
;
136 * tipc_bclink_retransmit_to - get most recent node to request retransmission
138 * Called with bclink_lock locked
140 struct tipc_node
*tipc_bclink_retransmit_to(struct net
*net
)
142 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
144 return tn
->bclink
->retransmit_to
;
148 * bclink_retransmit_pkt - retransmit broadcast packets
149 * @after: sequence number of last packet to *not* retransmit
150 * @to: sequence number of last packet to retransmit
152 * Called with bclink_lock locked
154 static void bclink_retransmit_pkt(struct tipc_net
*tn
, u32 after
, u32 to
)
157 struct tipc_link
*bcl
= tn
->bcl
;
159 skb_queue_walk(&bcl
->transmq
, skb
) {
160 if (more(buf_seqno(skb
), after
)) {
161 tipc_link_retransmit(bcl
, skb
, mod(to
- after
));
168 * tipc_bclink_wakeup_users - wake up pending users
170 * Called with no locks taken
172 void tipc_bclink_wakeup_users(struct net
*net
)
174 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
176 tipc_sk_rcv(net
, &tn
->bclink
->link
.wakeupq
);
180 * tipc_bclink_acknowledge - handle acknowledgement of broadcast packets
181 * @n_ptr: node that sent acknowledgement info
182 * @acked: broadcast sequence # that has been acknowledged
184 * Node is locked, bclink_lock unlocked.
186 void tipc_bclink_acknowledge(struct tipc_node
*n_ptr
, u32 acked
)
188 struct sk_buff
*skb
, *tmp
;
189 unsigned int released
= 0;
190 struct net
*net
= n_ptr
->net
;
191 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
193 if (unlikely(!n_ptr
->bclink
.recv_permitted
))
196 tipc_bclink_lock(net
);
198 /* Bail out if tx queue is empty (no clean up is required) */
199 skb
= skb_peek(&tn
->bcl
->transmq
);
203 /* Determine which messages need to be acknowledged */
204 if (acked
== INVALID_LINK_SEQ
) {
206 * Contact with specified node has been lost, so need to
207 * acknowledge sent messages only (if other nodes still exist)
208 * or both sent and unsent messages (otherwise)
210 if (tn
->bclink
->bcast_nodes
.count
)
211 acked
= tn
->bcl
->silent_intv_cnt
;
213 acked
= tn
->bcl
->snd_nxt
;
216 * Bail out if specified sequence number does not correspond
217 * to a message that has been sent and not yet acknowledged
219 if (less(acked
, buf_seqno(skb
)) ||
220 less(tn
->bcl
->silent_intv_cnt
, acked
) ||
221 less_eq(acked
, n_ptr
->bclink
.acked
))
225 /* Skip over packets that node has previously acknowledged */
226 skb_queue_walk(&tn
->bcl
->transmq
, skb
) {
227 if (more(buf_seqno(skb
), n_ptr
->bclink
.acked
))
231 /* Update packets that node is now acknowledging */
232 skb_queue_walk_from_safe(&tn
->bcl
->transmq
, skb
, tmp
) {
233 if (more(buf_seqno(skb
), acked
))
235 bcbuf_decr_acks(skb
);
236 bclink_set_last_sent(net
);
237 if (bcbuf_acks(skb
) == 0) {
238 __skb_unlink(skb
, &tn
->bcl
->transmq
);
243 n_ptr
->bclink
.acked
= acked
;
245 /* Try resolving broadcast link congestion, if necessary */
246 if (unlikely(skb_peek(&tn
->bcl
->backlogq
))) {
247 tipc_link_push_packets(tn
->bcl
);
248 bclink_set_last_sent(net
);
250 if (unlikely(released
&& !skb_queue_empty(&tn
->bcl
->wakeupq
)))
251 n_ptr
->action_flags
|= TIPC_WAKEUP_BCAST_USERS
;
253 tipc_bclink_unlock(net
);
257 * tipc_bclink_update_link_state - update broadcast link state
259 * RCU and node lock set
261 void tipc_bclink_update_link_state(struct tipc_node
*n_ptr
,
265 struct net
*net
= n_ptr
->net
;
266 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
268 /* Ignore "stale" link state info */
269 if (less_eq(last_sent
, n_ptr
->bclink
.last_in
))
272 /* Update link synchronization state; quit if in sync */
273 bclink_update_last_sent(n_ptr
, last_sent
);
275 if (n_ptr
->bclink
.last_sent
== n_ptr
->bclink
.last_in
)
278 /* Update out-of-sync state; quit if loss is still unconfirmed */
279 if ((++n_ptr
->bclink
.oos_state
) == 1) {
280 if (n_ptr
->bclink
.deferred_size
< (TIPC_MIN_LINK_WIN
/ 2))
282 n_ptr
->bclink
.oos_state
++;
285 /* Don't NACK if one has been recently sent (or seen) */
286 if (n_ptr
->bclink
.oos_state
& 0x1)
290 buf
= tipc_buf_acquire(INT_H_SIZE
);
292 struct tipc_msg
*msg
= buf_msg(buf
);
293 struct sk_buff
*skb
= skb_peek(&n_ptr
->bclink
.deferdq
);
294 u32 to
= skb
? buf_seqno(skb
) - 1 : n_ptr
->bclink
.last_sent
;
296 tipc_msg_init(tn
->own_addr
, msg
, BCAST_PROTOCOL
, STATE_MSG
,
297 INT_H_SIZE
, n_ptr
->addr
);
298 msg_set_non_seq(msg
, 1);
299 msg_set_mc_netid(msg
, tn
->net_id
);
300 msg_set_bcast_ack(msg
, n_ptr
->bclink
.last_in
);
301 msg_set_bcgap_after(msg
, n_ptr
->bclink
.last_in
);
302 msg_set_bcgap_to(msg
, to
);
304 tipc_bclink_lock(net
);
305 tipc_bearer_send(net
, MAX_BEARERS
, buf
, NULL
);
306 tn
->bcl
->stats
.sent_nacks
++;
307 tipc_bclink_unlock(net
);
310 n_ptr
->bclink
.oos_state
++;
315 * bclink_peek_nack - monitor retransmission requests sent by other nodes
317 * Delay any upcoming NACK by this node if another node has already
318 * requested the first message this node is going to ask for.
320 static void bclink_peek_nack(struct net
*net
, struct tipc_msg
*msg
)
322 struct tipc_node
*n_ptr
= tipc_node_find(net
, msg_destnode(msg
));
324 if (unlikely(!n_ptr
))
327 tipc_node_lock(n_ptr
);
328 if (n_ptr
->bclink
.recv_permitted
&&
329 (n_ptr
->bclink
.last_in
!= n_ptr
->bclink
.last_sent
) &&
330 (n_ptr
->bclink
.last_in
== msg_bcgap_after(msg
)))
331 n_ptr
->bclink
.oos_state
= 2;
332 tipc_node_unlock(n_ptr
);
333 tipc_node_put(n_ptr
);
336 /* tipc_bclink_xmit - deliver buffer chain to all nodes in cluster
337 * and to identified node local sockets
338 * @net: the applicable net namespace
339 * @list: chain of buffers containing message
340 * Consumes the buffer chain, except when returning -ELINKCONG
341 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
343 int tipc_bclink_xmit(struct net
*net
, struct sk_buff_head
*list
)
345 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
346 struct tipc_link
*bcl
= tn
->bcl
;
347 struct tipc_bclink
*bclink
= tn
->bclink
;
351 struct sk_buff_head arrvq
;
352 struct sk_buff_head inputq
;
354 /* Prepare clone of message for local node */
355 skb
= tipc_msg_reassemble(list
);
356 if (unlikely(!skb
)) {
357 __skb_queue_purge(list
);
358 return -EHOSTUNREACH
;
360 /* Broadcast to all nodes */
361 if (likely(bclink
)) {
362 tipc_bclink_lock(net
);
363 if (likely(bclink
->bcast_nodes
.count
)) {
364 rc
= __tipc_link_xmit(net
, bcl
, list
);
366 u32 len
= skb_queue_len(&bcl
->transmq
);
368 bclink_set_last_sent(net
);
369 bcl
->stats
.queue_sz_counts
++;
370 bcl
->stats
.accu_queue_sz
+= len
;
374 tipc_bclink_unlock(net
);
378 __skb_queue_purge(list
);
384 /* Deliver message clone */
385 __skb_queue_head_init(&arrvq
);
386 skb_queue_head_init(&inputq
);
387 __skb_queue_tail(&arrvq
, skb
);
388 tipc_sk_mcast_rcv(net
, &arrvq
, &inputq
);
393 * bclink_accept_pkt - accept an incoming, in-sequence broadcast packet
395 * Called with both sending node's lock and bclink_lock taken.
397 static void bclink_accept_pkt(struct tipc_node
*node
, u32 seqno
)
399 struct tipc_net
*tn
= net_generic(node
->net
, tipc_net_id
);
401 bclink_update_last_sent(node
, seqno
);
402 node
->bclink
.last_in
= seqno
;
403 node
->bclink
.oos_state
= 0;
404 tn
->bcl
->stats
.recv_info
++;
407 * Unicast an ACK periodically, ensuring that
408 * all nodes in the cluster don't ACK at the same time
410 if (((seqno
- tn
->own_addr
) % TIPC_MIN_LINK_WIN
) == 0) {
411 tipc_link_proto_xmit(node
->active_links
[node
->addr
& 1],
412 STATE_MSG
, 0, 0, 0, 0);
413 tn
->bcl
->stats
.sent_acks
++;
418 * tipc_bclink_rcv - receive a broadcast packet, and deliver upwards
420 * RCU is locked, no other locks set
422 void tipc_bclink_rcv(struct net
*net
, struct sk_buff
*buf
)
424 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
425 struct tipc_link
*bcl
= tn
->bcl
;
426 struct tipc_msg
*msg
= buf_msg(buf
);
427 struct tipc_node
*node
;
432 struct sk_buff
*iskb
;
433 struct sk_buff_head
*arrvq
, *inputq
;
435 /* Screen out unwanted broadcast messages */
436 if (msg_mc_netid(msg
) != tn
->net_id
)
439 node
= tipc_node_find(net
, msg_prevnode(msg
));
443 tipc_node_lock(node
);
444 if (unlikely(!node
->bclink
.recv_permitted
))
447 /* Handle broadcast protocol message */
448 if (unlikely(msg_user(msg
) == BCAST_PROTOCOL
)) {
449 if (msg_type(msg
) != STATE_MSG
)
451 if (msg_destnode(msg
) == tn
->own_addr
) {
452 tipc_bclink_acknowledge(node
, msg_bcast_ack(msg
));
453 tipc_bclink_lock(net
);
454 bcl
->stats
.recv_nacks
++;
455 tn
->bclink
->retransmit_to
= node
;
456 bclink_retransmit_pkt(tn
, msg_bcgap_after(msg
),
458 tipc_bclink_unlock(net
);
459 tipc_node_unlock(node
);
461 tipc_node_unlock(node
);
462 bclink_peek_nack(net
, msg
);
468 /* Handle in-sequence broadcast message */
469 seqno
= msg_seqno(msg
);
470 next_in
= mod(node
->bclink
.last_in
+ 1);
471 arrvq
= &tn
->bclink
->arrvq
;
472 inputq
= &tn
->bclink
->inputq
;
474 if (likely(seqno
== next_in
)) {
476 /* Deliver message to destination */
477 if (likely(msg_isdata(msg
))) {
478 tipc_bclink_lock(net
);
479 bclink_accept_pkt(node
, seqno
);
480 spin_lock_bh(&inputq
->lock
);
481 __skb_queue_tail(arrvq
, buf
);
482 spin_unlock_bh(&inputq
->lock
);
483 node
->action_flags
|= TIPC_BCAST_MSG_EVT
;
484 tipc_bclink_unlock(net
);
485 tipc_node_unlock(node
);
486 } else if (msg_user(msg
) == MSG_BUNDLER
) {
487 tipc_bclink_lock(net
);
488 bclink_accept_pkt(node
, seqno
);
489 bcl
->stats
.recv_bundles
++;
490 bcl
->stats
.recv_bundled
+= msg_msgcnt(msg
);
492 while (tipc_msg_extract(buf
, &iskb
, &pos
)) {
493 spin_lock_bh(&inputq
->lock
);
494 __skb_queue_tail(arrvq
, iskb
);
495 spin_unlock_bh(&inputq
->lock
);
497 node
->action_flags
|= TIPC_BCAST_MSG_EVT
;
498 tipc_bclink_unlock(net
);
499 tipc_node_unlock(node
);
500 } else if (msg_user(msg
) == MSG_FRAGMENTER
) {
501 tipc_bclink_lock(net
);
502 bclink_accept_pkt(node
, seqno
);
503 tipc_buf_append(&node
->bclink
.reasm_buf
, &buf
);
504 if (unlikely(!buf
&& !node
->bclink
.reasm_buf
)) {
505 tipc_bclink_unlock(net
);
508 bcl
->stats
.recv_fragments
++;
510 bcl
->stats
.recv_fragmented
++;
512 tipc_bclink_unlock(net
);
515 tipc_bclink_unlock(net
);
516 tipc_node_unlock(node
);
518 tipc_bclink_lock(net
);
519 bclink_accept_pkt(node
, seqno
);
520 tipc_bclink_unlock(net
);
521 tipc_node_unlock(node
);
526 /* Determine new synchronization state */
527 tipc_node_lock(node
);
528 if (unlikely(!tipc_node_is_up(node
)))
531 if (node
->bclink
.last_in
== node
->bclink
.last_sent
)
534 if (skb_queue_empty(&node
->bclink
.deferdq
)) {
535 node
->bclink
.oos_state
= 1;
539 msg
= buf_msg(skb_peek(&node
->bclink
.deferdq
));
540 seqno
= msg_seqno(msg
);
541 next_in
= mod(next_in
+ 1);
542 if (seqno
!= next_in
)
545 /* Take in-sequence message from deferred queue & deliver it */
546 buf
= __skb_dequeue(&node
->bclink
.deferdq
);
550 /* Handle out-of-sequence broadcast message */
551 if (less(next_in
, seqno
)) {
552 deferred
= tipc_link_defer_pkt(&node
->bclink
.deferdq
,
554 bclink_update_last_sent(node
, seqno
);
558 tipc_bclink_lock(net
);
561 bcl
->stats
.deferred_recv
++;
563 bcl
->stats
.duplicates
++;
565 tipc_bclink_unlock(net
);
568 tipc_node_unlock(node
);
574 u32
tipc_bclink_acks_missing(struct tipc_node
*n_ptr
)
576 return (n_ptr
->bclink
.recv_permitted
&&
577 (tipc_bclink_get_last_sent(n_ptr
->net
) != n_ptr
->bclink
.acked
));
582 * tipc_bcbearer_send - send a packet through the broadcast pseudo-bearer
584 * Send packet over as many bearers as necessary to reach all nodes
585 * that have joined the broadcast link.
587 * Returns 0 (packet sent successfully) under all circumstances,
588 * since the broadcast link's pseudo-bearer never blocks
590 static int tipc_bcbearer_send(struct net
*net
, struct sk_buff
*buf
,
591 struct tipc_bearer
*unused1
,
592 struct tipc_media_addr
*unused2
)
595 struct tipc_msg
*msg
= buf_msg(buf
);
596 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
597 struct tipc_bcbearer
*bcbearer
= tn
->bcbearer
;
598 struct tipc_bclink
*bclink
= tn
->bclink
;
600 /* Prepare broadcast link message for reliable transmission,
601 * if first time trying to send it;
602 * preparation is skipped for broadcast link protocol messages
603 * since they are sent in an unreliable manner and don't need it
605 if (likely(!msg_non_seq(buf_msg(buf
)))) {
606 bcbuf_set_acks(buf
, bclink
->bcast_nodes
.count
);
607 msg_set_non_seq(msg
, 1);
608 msg_set_mc_netid(msg
, tn
->net_id
);
609 tn
->bcl
->stats
.sent_info
++;
610 if (WARN_ON(!bclink
->bcast_nodes
.count
)) {
616 /* Send buffer over bearers until all targets reached */
617 bcbearer
->remains
= bclink
->bcast_nodes
;
619 for (bp_index
= 0; bp_index
< MAX_BEARERS
; bp_index
++) {
620 struct tipc_bearer
*p
= bcbearer
->bpairs
[bp_index
].primary
;
621 struct tipc_bearer
*s
= bcbearer
->bpairs
[bp_index
].secondary
;
622 struct tipc_bearer
*bp
[2] = {p
, s
};
623 struct tipc_bearer
*b
= bp
[msg_link_selector(msg
)];
624 struct sk_buff
*tbuf
;
627 break; /* No more bearers to try */
630 tipc_nmap_diff(&bcbearer
->remains
, &b
->nodes
,
631 &bcbearer
->remains_new
);
632 if (bcbearer
->remains_new
.count
== bcbearer
->remains
.count
)
633 continue; /* Nothing added by bearer pair */
636 /* Use original buffer for first bearer */
637 tipc_bearer_send(net
, b
->identity
, buf
, &b
->bcast_addr
);
639 /* Avoid concurrent buffer access */
640 tbuf
= pskb_copy_for_clone(buf
, GFP_ATOMIC
);
643 tipc_bearer_send(net
, b
->identity
, tbuf
,
645 kfree_skb(tbuf
); /* Bearer keeps a clone */
647 if (bcbearer
->remains_new
.count
== 0)
648 break; /* All targets reached */
650 bcbearer
->remains
= bcbearer
->remains_new
;
657 * tipc_bcbearer_sort - create sets of bearer pairs used by broadcast bearer
659 void tipc_bcbearer_sort(struct net
*net
, struct tipc_node_map
*nm_ptr
,
660 u32 node
, bool action
)
662 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
663 struct tipc_bcbearer
*bcbearer
= tn
->bcbearer
;
664 struct tipc_bcbearer_pair
*bp_temp
= bcbearer
->bpairs_temp
;
665 struct tipc_bcbearer_pair
*bp_curr
;
666 struct tipc_bearer
*b
;
670 tipc_bclink_lock(net
);
673 tipc_nmap_add(nm_ptr
, node
);
675 tipc_nmap_remove(nm_ptr
, node
);
677 /* Group bearers by priority (can assume max of two per priority) */
678 memset(bp_temp
, 0, sizeof(bcbearer
->bpairs_temp
));
681 for (b_index
= 0; b_index
< MAX_BEARERS
; b_index
++) {
682 b
= rcu_dereference_rtnl(tn
->bearer_list
[b_index
]);
683 if (!b
|| !b
->nodes
.count
)
686 if (!bp_temp
[b
->priority
].primary
)
687 bp_temp
[b
->priority
].primary
= b
;
689 bp_temp
[b
->priority
].secondary
= b
;
693 /* Create array of bearer pairs for broadcasting */
694 bp_curr
= bcbearer
->bpairs
;
695 memset(bcbearer
->bpairs
, 0, sizeof(bcbearer
->bpairs
));
697 for (pri
= TIPC_MAX_LINK_PRI
; pri
>= 0; pri
--) {
699 if (!bp_temp
[pri
].primary
)
702 bp_curr
->primary
= bp_temp
[pri
].primary
;
704 if (bp_temp
[pri
].secondary
) {
705 if (tipc_nmap_equal(&bp_temp
[pri
].primary
->nodes
,
706 &bp_temp
[pri
].secondary
->nodes
)) {
707 bp_curr
->secondary
= bp_temp
[pri
].secondary
;
710 bp_curr
->primary
= bp_temp
[pri
].secondary
;
717 tipc_bclink_unlock(net
);
720 static int __tipc_nl_add_bc_link_stat(struct sk_buff
*skb
,
721 struct tipc_stats
*stats
)
731 struct nla_map map
[] = {
732 {TIPC_NLA_STATS_RX_INFO
, stats
->recv_info
},
733 {TIPC_NLA_STATS_RX_FRAGMENTS
, stats
->recv_fragments
},
734 {TIPC_NLA_STATS_RX_FRAGMENTED
, stats
->recv_fragmented
},
735 {TIPC_NLA_STATS_RX_BUNDLES
, stats
->recv_bundles
},
736 {TIPC_NLA_STATS_RX_BUNDLED
, stats
->recv_bundled
},
737 {TIPC_NLA_STATS_TX_INFO
, stats
->sent_info
},
738 {TIPC_NLA_STATS_TX_FRAGMENTS
, stats
->sent_fragments
},
739 {TIPC_NLA_STATS_TX_FRAGMENTED
, stats
->sent_fragmented
},
740 {TIPC_NLA_STATS_TX_BUNDLES
, stats
->sent_bundles
},
741 {TIPC_NLA_STATS_TX_BUNDLED
, stats
->sent_bundled
},
742 {TIPC_NLA_STATS_RX_NACKS
, stats
->recv_nacks
},
743 {TIPC_NLA_STATS_RX_DEFERRED
, stats
->deferred_recv
},
744 {TIPC_NLA_STATS_TX_NACKS
, stats
->sent_nacks
},
745 {TIPC_NLA_STATS_TX_ACKS
, stats
->sent_acks
},
746 {TIPC_NLA_STATS_RETRANSMITTED
, stats
->retransmitted
},
747 {TIPC_NLA_STATS_DUPLICATES
, stats
->duplicates
},
748 {TIPC_NLA_STATS_LINK_CONGS
, stats
->link_congs
},
749 {TIPC_NLA_STATS_MAX_QUEUE
, stats
->max_queue_sz
},
750 {TIPC_NLA_STATS_AVG_QUEUE
, stats
->queue_sz_counts
?
751 (stats
->accu_queue_sz
/ stats
->queue_sz_counts
) : 0}
754 nest
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
758 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
759 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
762 nla_nest_end(skb
, nest
);
766 nla_nest_cancel(skb
, nest
);
771 int tipc_nl_add_bc_link(struct net
*net
, struct tipc_nl_msg
*msg
)
775 struct nlattr
*attrs
;
777 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
778 struct tipc_link
*bcl
= tn
->bcl
;
783 tipc_bclink_lock(net
);
785 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
786 NLM_F_MULTI
, TIPC_NL_LINK_GET
);
790 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
794 /* The broadcast link is always up */
795 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
798 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_BROADCAST
))
800 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, bcl
->name
))
802 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, bcl
->rcv_nxt
))
804 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, bcl
->snd_nxt
))
807 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
810 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
, bcl
->window
))
812 nla_nest_end(msg
->skb
, prop
);
814 err
= __tipc_nl_add_bc_link_stat(msg
->skb
, &bcl
->stats
);
818 tipc_bclink_unlock(net
);
819 nla_nest_end(msg
->skb
, attrs
);
820 genlmsg_end(msg
->skb
, hdr
);
825 nla_nest_cancel(msg
->skb
, prop
);
827 nla_nest_cancel(msg
->skb
, attrs
);
829 tipc_bclink_unlock(net
);
830 genlmsg_cancel(msg
->skb
, hdr
);
835 int tipc_bclink_reset_stats(struct net
*net
)
837 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
838 struct tipc_link
*bcl
= tn
->bcl
;
843 tipc_bclink_lock(net
);
844 memset(&bcl
->stats
, 0, sizeof(bcl
->stats
));
845 tipc_bclink_unlock(net
);
849 int tipc_bclink_set_queue_limits(struct net
*net
, u32 limit
)
851 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
852 struct tipc_link
*bcl
= tn
->bcl
;
856 if ((limit
< TIPC_MIN_LINK_WIN
) || (limit
> TIPC_MAX_LINK_WIN
))
859 tipc_bclink_lock(net
);
860 tipc_link_set_queue_limits(bcl
, limit
);
861 tipc_bclink_unlock(net
);
865 int tipc_nl_bc_link_set(struct net
*net
, struct nlattr
*attrs
[])
869 struct nlattr
*props
[TIPC_NLA_PROP_MAX
+ 1];
871 if (!attrs
[TIPC_NLA_LINK_PROP
])
874 err
= tipc_nl_parse_link_prop(attrs
[TIPC_NLA_LINK_PROP
], props
);
878 if (!props
[TIPC_NLA_PROP_WIN
])
881 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
883 return tipc_bclink_set_queue_limits(net
, win
);
886 int tipc_bclink_init(struct net
*net
)
888 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
889 struct tipc_bcbearer
*bcbearer
;
890 struct tipc_bclink
*bclink
;
891 struct tipc_link
*bcl
;
893 bcbearer
= kzalloc(sizeof(*bcbearer
), GFP_ATOMIC
);
897 bclink
= kzalloc(sizeof(*bclink
), GFP_ATOMIC
);
904 bcbearer
->bearer
.media
= &bcbearer
->media
;
905 bcbearer
->media
.send_msg
= tipc_bcbearer_send
;
906 sprintf(bcbearer
->media
.name
, "tipc-broadcast");
908 spin_lock_init(&bclink
->lock
);
909 __skb_queue_head_init(&bcl
->transmq
);
910 __skb_queue_head_init(&bcl
->backlogq
);
911 __skb_queue_head_init(&bcl
->deferdq
);
912 skb_queue_head_init(&bcl
->wakeupq
);
914 spin_lock_init(&bclink
->node
.lock
);
915 __skb_queue_head_init(&bclink
->arrvq
);
916 skb_queue_head_init(&bclink
->inputq
);
917 bcl
->owner
= &bclink
->node
;
918 bcl
->owner
->net
= net
;
919 bcl
->mtu
= MAX_PKT_DEFAULT_MCAST
;
920 tipc_link_set_queue_limits(bcl
, BCLINK_WIN_DEFAULT
);
921 bcl
->bearer_id
= MAX_BEARERS
;
922 rcu_assign_pointer(tn
->bearer_list
[MAX_BEARERS
], &bcbearer
->bearer
);
923 bcl
->state
= WORKING_WORKING
;
924 bcl
->pmsg
= (struct tipc_msg
*)&bcl
->proto_msg
;
925 msg_set_prevnode(bcl
->pmsg
, tn
->own_addr
);
926 strlcpy(bcl
->name
, tipc_bclink_name
, TIPC_MAX_LINK_NAME
);
927 tn
->bcbearer
= bcbearer
;
933 void tipc_bclink_stop(struct net
*net
)
935 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
937 tipc_bclink_lock(net
);
938 tipc_link_purge_queues(tn
->bcl
);
939 tipc_bclink_unlock(net
);
941 RCU_INIT_POINTER(tn
->bearer_list
[BCBEARER
], NULL
);
948 * tipc_nmap_add - add a node to a node map
950 static void tipc_nmap_add(struct tipc_node_map
*nm_ptr
, u32 node
)
952 int n
= tipc_node(node
);
954 u32 mask
= (1 << (n
% WSIZE
));
956 if ((nm_ptr
->map
[w
] & mask
) == 0) {
958 nm_ptr
->map
[w
] |= mask
;
963 * tipc_nmap_remove - remove a node from a node map
965 static void tipc_nmap_remove(struct tipc_node_map
*nm_ptr
, u32 node
)
967 int n
= tipc_node(node
);
969 u32 mask
= (1 << (n
% WSIZE
));
971 if ((nm_ptr
->map
[w
] & mask
) != 0) {
972 nm_ptr
->map
[w
] &= ~mask
;
978 * tipc_nmap_diff - find differences between node maps
979 * @nm_a: input node map A
980 * @nm_b: input node map B
981 * @nm_diff: output node map A-B (i.e. nodes of A that are not in B)
983 static void tipc_nmap_diff(struct tipc_node_map
*nm_a
,
984 struct tipc_node_map
*nm_b
,
985 struct tipc_node_map
*nm_diff
)
987 int stop
= ARRAY_SIZE(nm_a
->map
);
992 memset(nm_diff
, 0, sizeof(*nm_diff
));
993 for (w
= 0; w
< stop
; w
++) {
994 map
= nm_a
->map
[w
] ^ (nm_a
->map
[w
] & nm_b
->map
[w
]);
995 nm_diff
->map
[w
] = map
;
997 for (b
= 0 ; b
< WSIZE
; b
++) {