2 * net/tipc/link.c: TIPC link code
4 * Copyright (c) 1996-2007, 2012-2015, Ericsson AB
5 * Copyright (c) 2004-2007, 2010-2013, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
42 #include "name_distr.h"
46 #include <linux/pkt_sched.h>
49 * Error message prefixes
51 static const char *link_co_err
= "Link changeover error, ";
52 static const char *link_rst_msg
= "Resetting link ";
53 static const char *link_unk_evt
= "Unknown link event ";
55 static const struct nla_policy tipc_nl_link_policy
[TIPC_NLA_LINK_MAX
+ 1] = {
56 [TIPC_NLA_LINK_UNSPEC
] = { .type
= NLA_UNSPEC
},
57 [TIPC_NLA_LINK_NAME
] = {
59 .len
= TIPC_MAX_LINK_NAME
61 [TIPC_NLA_LINK_MTU
] = { .type
= NLA_U32
},
62 [TIPC_NLA_LINK_BROADCAST
] = { .type
= NLA_FLAG
},
63 [TIPC_NLA_LINK_UP
] = { .type
= NLA_FLAG
},
64 [TIPC_NLA_LINK_ACTIVE
] = { .type
= NLA_FLAG
},
65 [TIPC_NLA_LINK_PROP
] = { .type
= NLA_NESTED
},
66 [TIPC_NLA_LINK_STATS
] = { .type
= NLA_NESTED
},
67 [TIPC_NLA_LINK_RX
] = { .type
= NLA_U32
},
68 [TIPC_NLA_LINK_TX
] = { .type
= NLA_U32
}
71 /* Properties valid for media, bearar and link */
72 static const struct nla_policy tipc_nl_prop_policy
[TIPC_NLA_PROP_MAX
+ 1] = {
73 [TIPC_NLA_PROP_UNSPEC
] = { .type
= NLA_UNSPEC
},
74 [TIPC_NLA_PROP_PRIO
] = { .type
= NLA_U32
},
75 [TIPC_NLA_PROP_TOL
] = { .type
= NLA_U32
},
76 [TIPC_NLA_PROP_WIN
] = { .type
= NLA_U32
}
80 * Out-of-range value for link session numbers
82 #define INVALID_SESSION 0x10000
87 #define STARTING_EVT 856384768 /* link processing trigger */
88 #define TRAFFIC_MSG_EVT 560815u /* rx'd ??? */
89 #define TIMEOUT_EVT 560817u /* link timer expired */
92 * State value stored in 'failover_pkts'
94 #define FIRST_FAILOVER 0xffffu
96 static void link_handle_out_of_seq_msg(struct tipc_link
*link
,
98 static void tipc_link_proto_rcv(struct tipc_link
*link
,
100 static void link_set_supervision_props(struct tipc_link
*l_ptr
, u32 tol
);
101 static void link_state_event(struct tipc_link
*l_ptr
, u32 event
);
102 static void link_reset_statistics(struct tipc_link
*l_ptr
);
103 static void link_print(struct tipc_link
*l_ptr
, const char *str
);
104 static void tipc_link_sync_xmit(struct tipc_link
*l
);
105 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
);
106 static void tipc_link_input(struct tipc_link
*l
, struct sk_buff
*skb
);
107 static bool tipc_data_input(struct tipc_link
*l
, struct sk_buff
*skb
);
108 static bool tipc_link_failover_rcv(struct tipc_link
*l
, struct sk_buff
**skb
);
110 * Simple link routines
112 static unsigned int align(unsigned int i
)
114 return (i
+ 3) & ~3u;
117 static void tipc_link_release(struct kref
*kref
)
119 kfree(container_of(kref
, struct tipc_link
, ref
));
122 static void tipc_link_get(struct tipc_link
*l_ptr
)
124 kref_get(&l_ptr
->ref
);
127 static void tipc_link_put(struct tipc_link
*l_ptr
)
129 kref_put(&l_ptr
->ref
, tipc_link_release
);
132 static struct tipc_link
*tipc_parallel_link(struct tipc_link
*l
)
134 if (l
->owner
->active_links
[0] != l
)
135 return l
->owner
->active_links
[0];
136 return l
->owner
->active_links
[1];
140 * Simple non-static link routines (i.e. referenced outside this file)
142 int tipc_link_is_up(struct tipc_link
*l_ptr
)
146 return link_working_working(l_ptr
) || link_working_unknown(l_ptr
);
149 int tipc_link_is_active(struct tipc_link
*l_ptr
)
151 return (l_ptr
->owner
->active_links
[0] == l_ptr
) ||
152 (l_ptr
->owner
->active_links
[1] == l_ptr
);
156 * link_timeout - handle expiration of link timer
157 * @l_ptr: pointer to link
159 static void link_timeout(unsigned long data
)
161 struct tipc_link
*l_ptr
= (struct tipc_link
*)data
;
164 tipc_node_lock(l_ptr
->owner
);
166 /* update counters used in statistical profiling of send traffic */
167 l_ptr
->stats
.accu_queue_sz
+= skb_queue_len(&l_ptr
->transmq
);
168 l_ptr
->stats
.queue_sz_counts
++;
170 skb
= skb_peek(&l_ptr
->transmq
);
172 struct tipc_msg
*msg
= buf_msg(skb
);
173 u32 length
= msg_size(msg
);
175 if ((msg_user(msg
) == MSG_FRAGMENTER
) &&
176 (msg_type(msg
) == FIRST_FRAGMENT
)) {
177 length
= msg_size(msg_get_wrapped(msg
));
180 l_ptr
->stats
.msg_lengths_total
+= length
;
181 l_ptr
->stats
.msg_length_counts
++;
183 l_ptr
->stats
.msg_length_profile
[0]++;
184 else if (length
<= 256)
185 l_ptr
->stats
.msg_length_profile
[1]++;
186 else if (length
<= 1024)
187 l_ptr
->stats
.msg_length_profile
[2]++;
188 else if (length
<= 4096)
189 l_ptr
->stats
.msg_length_profile
[3]++;
190 else if (length
<= 16384)
191 l_ptr
->stats
.msg_length_profile
[4]++;
192 else if (length
<= 32768)
193 l_ptr
->stats
.msg_length_profile
[5]++;
195 l_ptr
->stats
.msg_length_profile
[6]++;
199 /* do all other link processing performed on a periodic basis */
200 link_state_event(l_ptr
, TIMEOUT_EVT
);
202 if (skb_queue_len(&l_ptr
->backlogq
))
203 tipc_link_push_packets(l_ptr
);
205 tipc_node_unlock(l_ptr
->owner
);
206 tipc_link_put(l_ptr
);
209 static void link_set_timer(struct tipc_link
*link
, unsigned long time
)
211 if (!mod_timer(&link
->timer
, jiffies
+ time
))
216 * tipc_link_create - create a new link
217 * @n_ptr: pointer to associated node
218 * @b_ptr: pointer to associated bearer
219 * @media_addr: media address to use when sending messages over link
221 * Returns pointer to link.
223 struct tipc_link
*tipc_link_create(struct tipc_node
*n_ptr
,
224 struct tipc_bearer
*b_ptr
,
225 const struct tipc_media_addr
*media_addr
)
227 struct tipc_net
*tn
= net_generic(n_ptr
->net
, tipc_net_id
);
228 struct tipc_link
*l_ptr
;
229 struct tipc_msg
*msg
;
231 char addr_string
[16];
232 u32 peer
= n_ptr
->addr
;
234 if (n_ptr
->link_cnt
>= MAX_BEARERS
) {
235 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
236 pr_err("Attempt to establish %uth link to %s. Max %u allowed.\n",
237 n_ptr
->link_cnt
, addr_string
, MAX_BEARERS
);
241 if (n_ptr
->links
[b_ptr
->identity
]) {
242 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
243 pr_err("Attempt to establish second link on <%s> to %s\n",
244 b_ptr
->name
, addr_string
);
248 l_ptr
= kzalloc(sizeof(*l_ptr
), GFP_ATOMIC
);
250 pr_warn("Link creation failed, no memory\n");
253 kref_init(&l_ptr
->ref
);
255 if_name
= strchr(b_ptr
->name
, ':') + 1;
256 sprintf(l_ptr
->name
, "%u.%u.%u:%s-%u.%u.%u:unknown",
257 tipc_zone(tn
->own_addr
), tipc_cluster(tn
->own_addr
),
258 tipc_node(tn
->own_addr
),
260 tipc_zone(peer
), tipc_cluster(peer
), tipc_node(peer
));
261 /* note: peer i/f name is updated by reset/activate message */
262 memcpy(&l_ptr
->media_addr
, media_addr
, sizeof(*media_addr
));
263 l_ptr
->owner
= n_ptr
;
264 l_ptr
->checkpoint
= 1;
265 l_ptr
->peer_session
= INVALID_SESSION
;
266 l_ptr
->bearer_id
= b_ptr
->identity
;
267 link_set_supervision_props(l_ptr
, b_ptr
->tolerance
);
268 l_ptr
->state
= RESET_UNKNOWN
;
270 l_ptr
->pmsg
= (struct tipc_msg
*)&l_ptr
->proto_msg
;
272 tipc_msg_init(tn
->own_addr
, msg
, LINK_PROTOCOL
, RESET_MSG
, INT_H_SIZE
,
274 msg_set_size(msg
, sizeof(l_ptr
->proto_msg
));
275 msg_set_session(msg
, (tn
->random
& 0xffff));
276 msg_set_bearer_id(msg
, b_ptr
->identity
);
277 strcpy((char *)msg_data(msg
), if_name
);
278 l_ptr
->net_plane
= b_ptr
->net_plane
;
279 l_ptr
->advertised_mtu
= b_ptr
->mtu
;
280 l_ptr
->mtu
= l_ptr
->advertised_mtu
;
281 l_ptr
->priority
= b_ptr
->priority
;
282 tipc_link_set_queue_limits(l_ptr
, b_ptr
->window
);
283 l_ptr
->next_out_no
= 1;
284 __skb_queue_head_init(&l_ptr
->transmq
);
285 __skb_queue_head_init(&l_ptr
->backlogq
);
286 __skb_queue_head_init(&l_ptr
->deferdq
);
287 skb_queue_head_init(&l_ptr
->wakeupq
);
288 skb_queue_head_init(&l_ptr
->inputq
);
289 skb_queue_head_init(&l_ptr
->namedq
);
290 link_reset_statistics(l_ptr
);
291 tipc_node_attach_link(n_ptr
, l_ptr
);
292 setup_timer(&l_ptr
->timer
, link_timeout
, (unsigned long)l_ptr
);
293 link_state_event(l_ptr
, STARTING_EVT
);
299 * tipc_link_delete - Delete a link
300 * @l: link to be deleted
302 void tipc_link_delete(struct tipc_link
*l
)
305 if (del_timer(&l
->timer
))
307 l
->flags
|= LINK_STOPPED
;
308 /* Delete link now, or when timer is finished: */
309 tipc_link_reset_fragments(l
);
310 tipc_node_detach_link(l
->owner
, l
);
314 void tipc_link_delete_list(struct net
*net
, unsigned int bearer_id
,
317 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
318 struct tipc_link
*link
;
319 struct tipc_node
*node
;
322 list_for_each_entry_rcu(node
, &tn
->node_list
, list
) {
323 tipc_node_lock(node
);
324 link
= node
->links
[bearer_id
];
326 tipc_link_delete(link
);
327 tipc_node_unlock(node
);
333 * link_schedule_user - schedule a message sender for wakeup after congestion
334 * @link: congested link
335 * @list: message that was attempted sent
336 * Create pseudo msg to send back to user when congestion abates
337 * Only consumes message if there is an error
339 static int link_schedule_user(struct tipc_link
*link
, struct sk_buff_head
*list
)
341 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
342 int imp
= msg_importance(msg
);
343 u32 oport
= msg_origport(msg
);
344 u32 addr
= link_own_addr(link
);
347 /* This really cannot happen... */
348 if (unlikely(imp
> TIPC_CRITICAL_IMPORTANCE
)) {
349 pr_warn("%s<%s>, send queue full", link_rst_msg
, link
->name
);
350 tipc_link_reset(link
);
353 /* Non-blocking sender: */
354 if (TIPC_SKB_CB(skb_peek(list
))->wakeup_pending
)
357 /* Create and schedule wakeup pseudo message */
358 skb
= tipc_msg_create(SOCK_WAKEUP
, 0, INT_H_SIZE
, 0,
359 addr
, addr
, oport
, 0, 0);
362 TIPC_SKB_CB(skb
)->chain_sz
= skb_queue_len(list
);
363 TIPC_SKB_CB(skb
)->chain_imp
= imp
;
364 skb_queue_tail(&link
->wakeupq
, skb
);
365 link
->stats
.link_congs
++;
368 __skb_queue_purge(list
);
373 * link_prepare_wakeup - prepare users for wakeup after congestion
374 * @link: congested link
375 * Move a number of waiting users, as permitted by available space in
376 * the send queue, from link wait queue to node wait queue for wakeup
378 void link_prepare_wakeup(struct tipc_link
*l
)
380 int pnd
[TIPC_SYSTEM_IMPORTANCE
+ 1] = {0,};
382 struct sk_buff
*skb
, *tmp
;
384 skb_queue_walk_safe(&l
->wakeupq
, skb
, tmp
) {
385 imp
= TIPC_SKB_CB(skb
)->chain_imp
;
386 lim
= l
->window
+ l
->backlog
[imp
].limit
;
387 pnd
[imp
] += TIPC_SKB_CB(skb
)->chain_sz
;
388 if ((pnd
[imp
] + l
->backlog
[imp
].len
) >= lim
)
390 skb_unlink(skb
, &l
->wakeupq
);
391 skb_queue_tail(&l
->inputq
, skb
);
392 l
->owner
->inputq
= &l
->inputq
;
393 l
->owner
->action_flags
|= TIPC_MSG_EVT
;
398 * tipc_link_reset_fragments - purge link's inbound message fragments queue
399 * @l_ptr: pointer to link
401 void tipc_link_reset_fragments(struct tipc_link
*l_ptr
)
403 kfree_skb(l_ptr
->reasm_buf
);
404 l_ptr
->reasm_buf
= NULL
;
407 static void tipc_link_purge_backlog(struct tipc_link
*l
)
409 __skb_queue_purge(&l
->backlogq
);
410 l
->backlog
[TIPC_LOW_IMPORTANCE
].len
= 0;
411 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].len
= 0;
412 l
->backlog
[TIPC_HIGH_IMPORTANCE
].len
= 0;
413 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].len
= 0;
414 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].len
= 0;
418 * tipc_link_purge_queues - purge all pkt queues associated with link
419 * @l_ptr: pointer to link
421 void tipc_link_purge_queues(struct tipc_link
*l_ptr
)
423 __skb_queue_purge(&l_ptr
->deferdq
);
424 __skb_queue_purge(&l_ptr
->transmq
);
425 tipc_link_purge_backlog(l_ptr
);
426 tipc_link_reset_fragments(l_ptr
);
429 void tipc_link_reset(struct tipc_link
*l_ptr
)
431 u32 prev_state
= l_ptr
->state
;
432 int was_active_link
= tipc_link_is_active(l_ptr
);
433 struct tipc_node
*owner
= l_ptr
->owner
;
434 struct tipc_link
*pl
= tipc_parallel_link(l_ptr
);
436 msg_set_session(l_ptr
->pmsg
, ((msg_session(l_ptr
->pmsg
) + 1) & 0xffff));
438 /* Link is down, accept any session */
439 l_ptr
->peer_session
= INVALID_SESSION
;
441 /* Prepare for renewed mtu size negotiation */
442 l_ptr
->mtu
= l_ptr
->advertised_mtu
;
444 l_ptr
->state
= RESET_UNKNOWN
;
446 if ((prev_state
== RESET_UNKNOWN
) || (prev_state
== RESET_RESET
))
449 tipc_node_link_down(l_ptr
->owner
, l_ptr
);
450 tipc_bearer_remove_dest(owner
->net
, l_ptr
->bearer_id
, l_ptr
->addr
);
452 if (was_active_link
&& tipc_node_is_up(l_ptr
->owner
) && (pl
!= l_ptr
)) {
453 l_ptr
->flags
|= LINK_FAILINGOVER
;
454 l_ptr
->failover_checkpt
= l_ptr
->next_in_no
;
455 pl
->failover_pkts
= FIRST_FAILOVER
;
456 pl
->failover_checkpt
= l_ptr
->next_in_no
;
457 pl
->failover_skb
= l_ptr
->reasm_buf
;
459 kfree_skb(l_ptr
->reasm_buf
);
461 /* Clean up all queues, except inputq: */
462 __skb_queue_purge(&l_ptr
->transmq
);
463 __skb_queue_purge(&l_ptr
->deferdq
);
465 owner
->inputq
= &l_ptr
->inputq
;
466 skb_queue_splice_init(&l_ptr
->wakeupq
, owner
->inputq
);
467 if (!skb_queue_empty(owner
->inputq
))
468 owner
->action_flags
|= TIPC_MSG_EVT
;
469 tipc_link_purge_backlog(l_ptr
);
470 l_ptr
->reasm_buf
= NULL
;
471 l_ptr
->rcv_unacked
= 0;
472 l_ptr
->checkpoint
= 1;
473 l_ptr
->next_out_no
= 1;
474 l_ptr
->fsm_msg_cnt
= 0;
475 l_ptr
->stale_count
= 0;
476 link_reset_statistics(l_ptr
);
479 void tipc_link_reset_list(struct net
*net
, unsigned int bearer_id
)
481 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
482 struct tipc_link
*l_ptr
;
483 struct tipc_node
*n_ptr
;
486 list_for_each_entry_rcu(n_ptr
, &tn
->node_list
, list
) {
487 tipc_node_lock(n_ptr
);
488 l_ptr
= n_ptr
->links
[bearer_id
];
490 tipc_link_reset(l_ptr
);
491 tipc_node_unlock(n_ptr
);
496 static void link_activate(struct tipc_link
*link
)
498 struct tipc_node
*node
= link
->owner
;
500 link
->next_in_no
= 1;
501 link
->stats
.recv_info
= 1;
502 tipc_node_link_up(node
, link
);
503 tipc_bearer_add_dest(node
->net
, link
->bearer_id
, link
->addr
);
507 * link_state_event - link finite state machine
508 * @l_ptr: pointer to link
509 * @event: state machine event to process
511 static void link_state_event(struct tipc_link
*l_ptr
, unsigned int event
)
513 struct tipc_link
*other
;
514 unsigned long cont_intv
= l_ptr
->cont_intv
;
516 if (l_ptr
->flags
& LINK_STOPPED
)
519 if (!(l_ptr
->flags
& LINK_STARTED
) && (event
!= STARTING_EVT
))
520 return; /* Not yet. */
522 if (l_ptr
->flags
& LINK_FAILINGOVER
) {
523 if (event
== TIMEOUT_EVT
)
524 link_set_timer(l_ptr
, cont_intv
);
528 switch (l_ptr
->state
) {
529 case WORKING_WORKING
:
531 case TRAFFIC_MSG_EVT
:
535 if (l_ptr
->next_in_no
!= l_ptr
->checkpoint
) {
536 l_ptr
->checkpoint
= l_ptr
->next_in_no
;
537 if (tipc_bclink_acks_missing(l_ptr
->owner
)) {
538 tipc_link_proto_xmit(l_ptr
, STATE_MSG
,
540 l_ptr
->fsm_msg_cnt
++;
542 link_set_timer(l_ptr
, cont_intv
);
545 l_ptr
->state
= WORKING_UNKNOWN
;
546 l_ptr
->fsm_msg_cnt
= 0;
547 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 1, 0, 0, 0);
548 l_ptr
->fsm_msg_cnt
++;
549 link_set_timer(l_ptr
, cont_intv
/ 4);
552 pr_debug("%s<%s>, requested by peer\n",
553 link_rst_msg
, l_ptr
->name
);
554 tipc_link_reset(l_ptr
);
555 l_ptr
->state
= RESET_RESET
;
556 l_ptr
->fsm_msg_cnt
= 0;
557 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
559 l_ptr
->fsm_msg_cnt
++;
560 link_set_timer(l_ptr
, cont_intv
);
563 pr_debug("%s%u in WW state\n", link_unk_evt
, event
);
566 case WORKING_UNKNOWN
:
568 case TRAFFIC_MSG_EVT
:
570 l_ptr
->state
= WORKING_WORKING
;
571 l_ptr
->fsm_msg_cnt
= 0;
572 link_set_timer(l_ptr
, cont_intv
);
575 pr_debug("%s<%s>, requested by peer while probing\n",
576 link_rst_msg
, l_ptr
->name
);
577 tipc_link_reset(l_ptr
);
578 l_ptr
->state
= RESET_RESET
;
579 l_ptr
->fsm_msg_cnt
= 0;
580 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
582 l_ptr
->fsm_msg_cnt
++;
583 link_set_timer(l_ptr
, cont_intv
);
586 if (l_ptr
->next_in_no
!= l_ptr
->checkpoint
) {
587 l_ptr
->state
= WORKING_WORKING
;
588 l_ptr
->fsm_msg_cnt
= 0;
589 l_ptr
->checkpoint
= l_ptr
->next_in_no
;
590 if (tipc_bclink_acks_missing(l_ptr
->owner
)) {
591 tipc_link_proto_xmit(l_ptr
, STATE_MSG
,
593 l_ptr
->fsm_msg_cnt
++;
595 link_set_timer(l_ptr
, cont_intv
);
596 } else if (l_ptr
->fsm_msg_cnt
< l_ptr
->abort_limit
) {
597 tipc_link_proto_xmit(l_ptr
, STATE_MSG
,
599 l_ptr
->fsm_msg_cnt
++;
600 link_set_timer(l_ptr
, cont_intv
/ 4);
601 } else { /* Link has failed */
602 pr_debug("%s<%s>, peer not responding\n",
603 link_rst_msg
, l_ptr
->name
);
604 tipc_link_reset(l_ptr
);
605 l_ptr
->state
= RESET_UNKNOWN
;
606 l_ptr
->fsm_msg_cnt
= 0;
607 tipc_link_proto_xmit(l_ptr
, RESET_MSG
,
609 l_ptr
->fsm_msg_cnt
++;
610 link_set_timer(l_ptr
, cont_intv
);
614 pr_err("%s%u in WU state\n", link_unk_evt
, event
);
619 case TRAFFIC_MSG_EVT
:
622 other
= l_ptr
->owner
->active_links
[0];
623 if (other
&& link_working_unknown(other
))
625 l_ptr
->state
= WORKING_WORKING
;
626 l_ptr
->fsm_msg_cnt
= 0;
627 link_activate(l_ptr
);
628 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 1, 0, 0, 0);
629 l_ptr
->fsm_msg_cnt
++;
630 if (l_ptr
->owner
->working_links
== 1)
631 tipc_link_sync_xmit(l_ptr
);
632 link_set_timer(l_ptr
, cont_intv
);
635 l_ptr
->state
= RESET_RESET
;
636 l_ptr
->fsm_msg_cnt
= 0;
637 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
639 l_ptr
->fsm_msg_cnt
++;
640 link_set_timer(l_ptr
, cont_intv
);
643 l_ptr
->flags
|= LINK_STARTED
;
644 l_ptr
->fsm_msg_cnt
++;
645 link_set_timer(l_ptr
, cont_intv
);
648 tipc_link_proto_xmit(l_ptr
, RESET_MSG
, 0, 0, 0, 0);
649 l_ptr
->fsm_msg_cnt
++;
650 link_set_timer(l_ptr
, cont_intv
);
653 pr_err("%s%u in RU state\n", link_unk_evt
, event
);
658 case TRAFFIC_MSG_EVT
:
660 other
= l_ptr
->owner
->active_links
[0];
661 if (other
&& link_working_unknown(other
))
663 l_ptr
->state
= WORKING_WORKING
;
664 l_ptr
->fsm_msg_cnt
= 0;
665 link_activate(l_ptr
);
666 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 1, 0, 0, 0);
667 l_ptr
->fsm_msg_cnt
++;
668 if (l_ptr
->owner
->working_links
== 1)
669 tipc_link_sync_xmit(l_ptr
);
670 link_set_timer(l_ptr
, cont_intv
);
675 tipc_link_proto_xmit(l_ptr
, ACTIVATE_MSG
,
677 l_ptr
->fsm_msg_cnt
++;
678 link_set_timer(l_ptr
, cont_intv
);
681 pr_err("%s%u in RR state\n", link_unk_evt
, event
);
685 pr_err("Unknown link state %u/%u\n", l_ptr
->state
, event
);
690 * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
692 * @list: chain of buffers containing message
694 * Consumes the buffer chain, except when returning -ELINKCONG,
695 * since the caller then may want to make more send attempts.
696 * Returns 0 if success, or errno: -ELINKCONG, -EMSGSIZE or -ENOBUFS
697 * Messages at TIPC_SYSTEM_IMPORTANCE are always accepted
699 int __tipc_link_xmit(struct net
*net
, struct tipc_link
*link
,
700 struct sk_buff_head
*list
)
702 struct tipc_msg
*msg
= buf_msg(skb_peek(list
));
703 unsigned int maxwin
= link
->window
;
704 unsigned int imp
= msg_importance(msg
);
705 uint mtu
= link
->mtu
;
706 uint ack
= mod(link
->next_in_no
- 1);
707 uint seqno
= link
->next_out_no
;
708 uint bc_last_in
= link
->owner
->bclink
.last_in
;
709 struct tipc_media_addr
*addr
= &link
->media_addr
;
710 struct sk_buff_head
*transmq
= &link
->transmq
;
711 struct sk_buff_head
*backlogq
= &link
->backlogq
;
712 struct sk_buff
*skb
, *tmp
;
714 /* Match backlog limit against msg importance: */
715 if (unlikely(link
->backlog
[imp
].len
>= link
->backlog
[imp
].limit
))
716 return link_schedule_user(link
, list
);
718 if (unlikely(msg_size(msg
) > mtu
)) {
719 __skb_queue_purge(list
);
722 /* Prepare each packet for sending, and add to relevant queue: */
723 skb_queue_walk_safe(list
, skb
, tmp
) {
724 __skb_unlink(skb
, list
);
726 msg_set_seqno(msg
, seqno
);
727 msg_set_ack(msg
, ack
);
728 msg_set_bcast_ack(msg
, bc_last_in
);
730 if (likely(skb_queue_len(transmq
) < maxwin
)) {
731 __skb_queue_tail(transmq
, skb
);
732 tipc_bearer_send(net
, link
->bearer_id
, skb
, addr
);
733 link
->rcv_unacked
= 0;
737 if (tipc_msg_bundle(skb_peek_tail(backlogq
), skb
, mtu
)) {
738 link
->stats
.sent_bundled
++;
741 if (tipc_msg_make_bundle(&skb
, mtu
, link
->addr
)) {
742 link
->stats
.sent_bundled
++;
743 link
->stats
.sent_bundles
++;
744 imp
= msg_importance(buf_msg(skb
));
746 __skb_queue_tail(backlogq
, skb
);
747 link
->backlog
[imp
].len
++;
750 link
->next_out_no
= seqno
;
754 static void skb2list(struct sk_buff
*skb
, struct sk_buff_head
*list
)
756 skb_queue_head_init(list
);
757 __skb_queue_tail(list
, skb
);
760 static int __tipc_link_xmit_skb(struct tipc_link
*link
, struct sk_buff
*skb
)
762 struct sk_buff_head head
;
764 skb2list(skb
, &head
);
765 return __tipc_link_xmit(link
->owner
->net
, link
, &head
);
768 /* tipc_link_xmit_skb(): send single buffer to destination
769 * Buffers sent via this functon are generally TIPC_SYSTEM_IMPORTANCE
770 * messages, which will not be rejected
771 * The only exception is datagram messages rerouted after secondary
772 * lookup, which are rare and safe to dispose of anyway.
773 * TODO: Return real return value, and let callers use
774 * tipc_wait_for_sendpkt() where applicable
776 int tipc_link_xmit_skb(struct net
*net
, struct sk_buff
*skb
, u32 dnode
,
779 struct sk_buff_head head
;
782 skb2list(skb
, &head
);
783 rc
= tipc_link_xmit(net
, &head
, dnode
, selector
);
784 if (rc
== -ELINKCONG
)
790 * tipc_link_xmit() is the general link level function for message sending
791 * @net: the applicable net namespace
792 * @list: chain of buffers containing message
793 * @dsz: amount of user data to be sent
794 * @dnode: address of destination node
795 * @selector: a number used for deterministic link selection
796 * Consumes the buffer chain, except when returning -ELINKCONG
797 * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
799 int tipc_link_xmit(struct net
*net
, struct sk_buff_head
*list
, u32 dnode
,
802 struct tipc_link
*link
= NULL
;
803 struct tipc_node
*node
;
804 int rc
= -EHOSTUNREACH
;
806 node
= tipc_node_find(net
, dnode
);
808 tipc_node_lock(node
);
809 link
= node
->active_links
[selector
& 1];
811 rc
= __tipc_link_xmit(net
, link
, list
);
812 tipc_node_unlock(node
);
818 if (likely(in_own_node(net
, dnode
))) {
819 tipc_sk_rcv(net
, list
);
823 __skb_queue_purge(list
);
828 * tipc_link_sync_xmit - synchronize broadcast link endpoints.
830 * Give a newly added peer node the sequence number where it should
831 * start receiving and acking broadcast packets.
833 * Called with node locked
835 static void tipc_link_sync_xmit(struct tipc_link
*link
)
838 struct tipc_msg
*msg
;
840 skb
= tipc_buf_acquire(INT_H_SIZE
);
845 tipc_msg_init(link_own_addr(link
), msg
, BCAST_PROTOCOL
, STATE_MSG
,
846 INT_H_SIZE
, link
->addr
);
847 msg_set_last_bcast(msg
, link
->owner
->bclink
.acked
);
848 __tipc_link_xmit_skb(link
, skb
);
852 * tipc_link_sync_rcv - synchronize broadcast link endpoints.
853 * Receive the sequence number where we should start receiving and
854 * acking broadcast packets from a newly added peer node, and open
855 * up for reception of such packets.
857 * Called with node locked
859 static void tipc_link_sync_rcv(struct tipc_node
*n
, struct sk_buff
*buf
)
861 struct tipc_msg
*msg
= buf_msg(buf
);
863 n
->bclink
.last_sent
= n
->bclink
.last_in
= msg_last_bcast(msg
);
864 n
->bclink
.recv_permitted
= true;
869 * tipc_link_push_packets - push unsent packets to bearer
871 * Push out the unsent messages of a link where congestion
872 * has abated. Node is locked.
874 * Called with node locked
876 void tipc_link_push_packets(struct tipc_link
*link
)
879 struct tipc_msg
*msg
;
880 unsigned int ack
= mod(link
->next_in_no
- 1);
882 while (skb_queue_len(&link
->transmq
) < link
->window
) {
883 skb
= __skb_dequeue(&link
->backlogq
);
887 link
->backlog
[msg_importance(msg
)].len
--;
888 msg_set_ack(msg
, ack
);
889 msg_set_bcast_ack(msg
, link
->owner
->bclink
.last_in
);
890 link
->rcv_unacked
= 0;
891 __skb_queue_tail(&link
->transmq
, skb
);
892 tipc_bearer_send(link
->owner
->net
, link
->bearer_id
,
893 skb
, &link
->media_addr
);
897 void tipc_link_reset_all(struct tipc_node
*node
)
899 char addr_string
[16];
902 tipc_node_lock(node
);
904 pr_warn("Resetting all links to %s\n",
905 tipc_addr_string_fill(addr_string
, node
->addr
));
907 for (i
= 0; i
< MAX_BEARERS
; i
++) {
908 if (node
->links
[i
]) {
909 link_print(node
->links
[i
], "Resetting link\n");
910 tipc_link_reset(node
->links
[i
]);
914 tipc_node_unlock(node
);
917 static void link_retransmit_failure(struct tipc_link
*l_ptr
,
920 struct tipc_msg
*msg
= buf_msg(buf
);
921 struct net
*net
= l_ptr
->owner
->net
;
923 pr_warn("Retransmission failure on link <%s>\n", l_ptr
->name
);
926 /* Handle failure on standard link */
927 link_print(l_ptr
, "Resetting link\n");
928 tipc_link_reset(l_ptr
);
931 /* Handle failure on broadcast link */
932 struct tipc_node
*n_ptr
;
933 char addr_string
[16];
935 pr_info("Msg seq number: %u, ", msg_seqno(msg
));
936 pr_cont("Outstanding acks: %lu\n",
937 (unsigned long) TIPC_SKB_CB(buf
)->handle
);
939 n_ptr
= tipc_bclink_retransmit_to(net
);
941 tipc_addr_string_fill(addr_string
, n_ptr
->addr
);
942 pr_info("Broadcast link info for %s\n", addr_string
);
943 pr_info("Reception permitted: %d, Acked: %u\n",
944 n_ptr
->bclink
.recv_permitted
,
945 n_ptr
->bclink
.acked
);
946 pr_info("Last in: %u, Oos state: %u, Last sent: %u\n",
947 n_ptr
->bclink
.last_in
,
948 n_ptr
->bclink
.oos_state
,
949 n_ptr
->bclink
.last_sent
);
951 n_ptr
->action_flags
|= TIPC_BCAST_RESET
;
952 l_ptr
->stale_count
= 0;
956 void tipc_link_retransmit(struct tipc_link
*l_ptr
, struct sk_buff
*skb
,
959 struct tipc_msg
*msg
;
966 /* Detect repeated retransmit failures */
967 if (l_ptr
->last_retransmitted
== msg_seqno(msg
)) {
968 if (++l_ptr
->stale_count
> 100) {
969 link_retransmit_failure(l_ptr
, skb
);
973 l_ptr
->last_retransmitted
= msg_seqno(msg
);
974 l_ptr
->stale_count
= 1;
977 skb_queue_walk_from(&l_ptr
->transmq
, skb
) {
981 msg_set_ack(msg
, mod(l_ptr
->next_in_no
- 1));
982 msg_set_bcast_ack(msg
, l_ptr
->owner
->bclink
.last_in
);
983 tipc_bearer_send(l_ptr
->owner
->net
, l_ptr
->bearer_id
, skb
,
986 l_ptr
->stats
.retransmitted
++;
990 /* link_synch(): check if all packets arrived before the synch
991 * point have been consumed
992 * Returns true if the parallel links are synched, otherwise false
994 static bool link_synch(struct tipc_link
*l
)
996 unsigned int post_synch
;
997 struct tipc_link
*pl
;
999 pl
= tipc_parallel_link(l
);
1003 /* Was last pre-synch packet added to input queue ? */
1004 if (less_eq(pl
->next_in_no
, l
->synch_point
))
1007 /* Is it still in the input queue ? */
1008 post_synch
= mod(pl
->next_in_no
- l
->synch_point
) - 1;
1009 if (skb_queue_len(&pl
->inputq
) > post_synch
)
1012 l
->flags
&= ~LINK_SYNCHING
;
1016 static void link_retrieve_defq(struct tipc_link
*link
,
1017 struct sk_buff_head
*list
)
1021 if (skb_queue_empty(&link
->deferdq
))
1024 seq_no
= buf_seqno(skb_peek(&link
->deferdq
));
1025 if (seq_no
== mod(link
->next_in_no
))
1026 skb_queue_splice_tail_init(&link
->deferdq
, list
);
1030 * tipc_rcv - process TIPC packets/messages arriving from off-node
1031 * @net: the applicable net namespace
1033 * @b_ptr: pointer to bearer message arrived on
1035 * Invoked with no locks held. Bearer pointer must point to a valid bearer
1036 * structure (i.e. cannot be NULL), but bearer can be inactive.
1038 void tipc_rcv(struct net
*net
, struct sk_buff
*skb
, struct tipc_bearer
*b_ptr
)
1040 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1041 struct sk_buff_head head
;
1042 struct tipc_node
*n_ptr
;
1043 struct tipc_link
*l_ptr
;
1044 struct sk_buff
*skb1
, *tmp
;
1045 struct tipc_msg
*msg
;
1050 skb2list(skb
, &head
);
1052 while ((skb
= __skb_dequeue(&head
))) {
1053 /* Ensure message is well-formed */
1054 if (unlikely(!tipc_msg_validate(skb
)))
1057 /* Handle arrival of a non-unicast link message */
1059 if (unlikely(msg_non_seq(msg
))) {
1060 if (msg_user(msg
) == LINK_CONFIG
)
1061 tipc_disc_rcv(net
, skb
, b_ptr
);
1063 tipc_bclink_rcv(net
, skb
);
1067 /* Discard unicast link messages destined for another node */
1068 if (unlikely(!msg_short(msg
) &&
1069 (msg_destnode(msg
) != tn
->own_addr
)))
1072 /* Locate neighboring node that sent message */
1073 n_ptr
= tipc_node_find(net
, msg_prevnode(msg
));
1074 if (unlikely(!n_ptr
))
1077 tipc_node_lock(n_ptr
);
1078 /* Locate unicast link endpoint that should handle message */
1079 l_ptr
= n_ptr
->links
[b_ptr
->identity
];
1080 if (unlikely(!l_ptr
))
1083 /* Verify that communication with node is currently allowed */
1084 if ((n_ptr
->action_flags
& TIPC_WAIT_PEER_LINKS_DOWN
) &&
1085 msg_user(msg
) == LINK_PROTOCOL
&&
1086 (msg_type(msg
) == RESET_MSG
||
1087 msg_type(msg
) == ACTIVATE_MSG
) &&
1088 !msg_redundant_link(msg
))
1089 n_ptr
->action_flags
&= ~TIPC_WAIT_PEER_LINKS_DOWN
;
1091 if (tipc_node_blocked(n_ptr
))
1094 /* Validate message sequence number info */
1095 seq_no
= msg_seqno(msg
);
1096 ackd
= msg_ack(msg
);
1098 /* Release acked messages */
1099 if (unlikely(n_ptr
->bclink
.acked
!= msg_bcast_ack(msg
)))
1100 tipc_bclink_acknowledge(n_ptr
, msg_bcast_ack(msg
));
1103 skb_queue_walk_safe(&l_ptr
->transmq
, skb1
, tmp
) {
1104 if (more(buf_seqno(skb1
), ackd
))
1106 __skb_unlink(skb1
, &l_ptr
->transmq
);
1111 /* Try sending any messages link endpoint has pending */
1112 if (unlikely(skb_queue_len(&l_ptr
->backlogq
)))
1113 tipc_link_push_packets(l_ptr
);
1115 if (released
&& !skb_queue_empty(&l_ptr
->wakeupq
))
1116 link_prepare_wakeup(l_ptr
);
1118 /* Process the incoming packet */
1119 if (unlikely(!link_working_working(l_ptr
))) {
1120 if (msg_user(msg
) == LINK_PROTOCOL
) {
1121 tipc_link_proto_rcv(l_ptr
, skb
);
1122 link_retrieve_defq(l_ptr
, &head
);
1127 /* Traffic message. Conditionally activate link */
1128 link_state_event(l_ptr
, TRAFFIC_MSG_EVT
);
1130 if (link_working_working(l_ptr
)) {
1131 /* Re-insert buffer in front of queue */
1132 __skb_queue_head(&head
, skb
);
1139 /* Link is now in state WORKING_WORKING */
1140 if (unlikely(seq_no
!= mod(l_ptr
->next_in_no
))) {
1141 link_handle_out_of_seq_msg(l_ptr
, skb
);
1142 link_retrieve_defq(l_ptr
, &head
);
1146 /* Synchronize with parallel link if applicable */
1147 if (unlikely((l_ptr
->flags
& LINK_SYNCHING
) && !msg_dup(msg
))) {
1148 if (!link_synch(l_ptr
))
1151 l_ptr
->next_in_no
++;
1152 if (unlikely(!skb_queue_empty(&l_ptr
->deferdq
)))
1153 link_retrieve_defq(l_ptr
, &head
);
1154 if (unlikely(++l_ptr
->rcv_unacked
>= TIPC_MIN_LINK_WIN
)) {
1155 l_ptr
->stats
.sent_acks
++;
1156 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 0, 0, 0, 0);
1158 tipc_link_input(l_ptr
, skb
);
1161 tipc_node_unlock(n_ptr
);
1162 tipc_node_put(n_ptr
);
1169 /* tipc_data_input - deliver data and name distr msgs to upper layer
1171 * Consumes buffer if message is of right type
1172 * Node lock must be held
1174 static bool tipc_data_input(struct tipc_link
*link
, struct sk_buff
*skb
)
1176 struct tipc_node
*node
= link
->owner
;
1177 struct tipc_msg
*msg
= buf_msg(skb
);
1178 u32 dport
= msg_destport(msg
);
1180 switch (msg_user(msg
)) {
1181 case TIPC_LOW_IMPORTANCE
:
1182 case TIPC_MEDIUM_IMPORTANCE
:
1183 case TIPC_HIGH_IMPORTANCE
:
1184 case TIPC_CRITICAL_IMPORTANCE
:
1186 if (tipc_skb_queue_tail(&link
->inputq
, skb
, dport
)) {
1187 node
->inputq
= &link
->inputq
;
1188 node
->action_flags
|= TIPC_MSG_EVT
;
1191 case NAME_DISTRIBUTOR
:
1192 node
->bclink
.recv_permitted
= true;
1193 node
->namedq
= &link
->namedq
;
1194 skb_queue_tail(&link
->namedq
, skb
);
1195 if (skb_queue_len(&link
->namedq
) == 1)
1196 node
->action_flags
|= TIPC_NAMED_MSG_EVT
;
1199 case TUNNEL_PROTOCOL
:
1200 case MSG_FRAGMENTER
:
1201 case BCAST_PROTOCOL
:
1204 pr_warn("Dropping received illegal msg type\n");
1210 /* tipc_link_input - process packet that has passed link protocol check
1213 * Node lock must be held
1215 static void tipc_link_input(struct tipc_link
*link
, struct sk_buff
*skb
)
1217 struct tipc_node
*node
= link
->owner
;
1218 struct tipc_msg
*msg
= buf_msg(skb
);
1219 struct sk_buff
*iskb
;
1222 if (likely(tipc_data_input(link
, skb
)))
1225 switch (msg_user(msg
)) {
1226 case TUNNEL_PROTOCOL
:
1228 link
->flags
|= LINK_SYNCHING
;
1229 link
->synch_point
= msg_seqno(msg_get_wrapped(msg
));
1233 if (!tipc_link_failover_rcv(link
, &skb
))
1235 if (msg_user(buf_msg(skb
)) != MSG_BUNDLER
) {
1236 tipc_data_input(link
, skb
);
1240 link
->stats
.recv_bundles
++;
1241 link
->stats
.recv_bundled
+= msg_msgcnt(msg
);
1243 while (tipc_msg_extract(skb
, &iskb
, &pos
))
1244 tipc_data_input(link
, iskb
);
1246 case MSG_FRAGMENTER
:
1247 link
->stats
.recv_fragments
++;
1248 if (tipc_buf_append(&link
->reasm_buf
, &skb
)) {
1249 link
->stats
.recv_fragmented
++;
1250 tipc_data_input(link
, skb
);
1251 } else if (!link
->reasm_buf
) {
1252 tipc_link_reset(link
);
1255 case BCAST_PROTOCOL
:
1256 tipc_link_sync_rcv(node
, skb
);
1264 * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
1266 * Returns increase in queue length (i.e. 0 or 1)
1268 u32
tipc_link_defer_pkt(struct sk_buff_head
*list
, struct sk_buff
*skb
)
1270 struct sk_buff
*skb1
;
1271 u32 seq_no
= buf_seqno(skb
);
1274 if (skb_queue_empty(list
)) {
1275 __skb_queue_tail(list
, skb
);
1280 if (less(buf_seqno(skb_peek_tail(list
)), seq_no
)) {
1281 __skb_queue_tail(list
, skb
);
1285 /* Locate insertion point in queue, then insert; discard if duplicate */
1286 skb_queue_walk(list
, skb1
) {
1287 u32 curr_seqno
= buf_seqno(skb1
);
1289 if (seq_no
== curr_seqno
) {
1294 if (less(seq_no
, curr_seqno
))
1298 __skb_queue_before(list
, skb1
, skb
);
1303 * link_handle_out_of_seq_msg - handle arrival of out-of-sequence packet
1305 static void link_handle_out_of_seq_msg(struct tipc_link
*l_ptr
,
1306 struct sk_buff
*buf
)
1308 u32 seq_no
= buf_seqno(buf
);
1310 if (likely(msg_user(buf_msg(buf
)) == LINK_PROTOCOL
)) {
1311 tipc_link_proto_rcv(l_ptr
, buf
);
1315 /* Record OOS packet arrival (force mismatch on next timeout) */
1316 l_ptr
->checkpoint
--;
1319 * Discard packet if a duplicate; otherwise add it to deferred queue
1320 * and notify peer of gap as per protocol specification
1322 if (less(seq_no
, mod(l_ptr
->next_in_no
))) {
1323 l_ptr
->stats
.duplicates
++;
1328 if (tipc_link_defer_pkt(&l_ptr
->deferdq
, buf
)) {
1329 l_ptr
->stats
.deferred_recv
++;
1330 if ((skb_queue_len(&l_ptr
->deferdq
) % TIPC_MIN_LINK_WIN
) == 1)
1331 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 0, 0, 0, 0);
1333 l_ptr
->stats
.duplicates
++;
1338 * Send protocol message to the other endpoint.
1340 void tipc_link_proto_xmit(struct tipc_link
*l_ptr
, u32 msg_typ
, int probe_msg
,
1341 u32 gap
, u32 tolerance
, u32 priority
)
1343 struct sk_buff
*buf
= NULL
;
1344 struct tipc_msg
*msg
= l_ptr
->pmsg
;
1345 u32 msg_size
= sizeof(l_ptr
->proto_msg
);
1348 /* Don't send protocol message during link failover */
1349 if (l_ptr
->flags
& LINK_FAILINGOVER
)
1352 /* Abort non-RESET send if communication with node is prohibited */
1353 if ((tipc_node_blocked(l_ptr
->owner
)) && (msg_typ
!= RESET_MSG
))
1356 /* Create protocol message with "out-of-sequence" sequence number */
1357 msg_set_type(msg
, msg_typ
);
1358 msg_set_net_plane(msg
, l_ptr
->net_plane
);
1359 msg_set_bcast_ack(msg
, l_ptr
->owner
->bclink
.last_in
);
1360 msg_set_last_bcast(msg
, tipc_bclink_get_last_sent(l_ptr
->owner
->net
));
1362 if (msg_typ
== STATE_MSG
) {
1363 u32 next_sent
= mod(l_ptr
->next_out_no
);
1365 if (!tipc_link_is_up(l_ptr
))
1367 if (skb_queue_len(&l_ptr
->backlogq
))
1368 next_sent
= buf_seqno(skb_peek(&l_ptr
->backlogq
));
1369 msg_set_next_sent(msg
, next_sent
);
1370 if (!skb_queue_empty(&l_ptr
->deferdq
)) {
1371 u32 rec
= buf_seqno(skb_peek(&l_ptr
->deferdq
));
1372 gap
= mod(rec
- mod(l_ptr
->next_in_no
));
1374 msg_set_seq_gap(msg
, gap
);
1376 l_ptr
->stats
.sent_nacks
++;
1377 msg_set_link_tolerance(msg
, tolerance
);
1378 msg_set_linkprio(msg
, priority
);
1379 msg_set_max_pkt(msg
, l_ptr
->mtu
);
1380 msg_set_ack(msg
, mod(l_ptr
->next_in_no
- 1));
1381 msg_set_probe(msg
, probe_msg
!= 0);
1383 l_ptr
->stats
.sent_probes
++;
1384 l_ptr
->stats
.sent_states
++;
1385 } else { /* RESET_MSG or ACTIVATE_MSG */
1386 msg_set_ack(msg
, mod(l_ptr
->failover_checkpt
- 1));
1387 msg_set_seq_gap(msg
, 0);
1388 msg_set_next_sent(msg
, 1);
1389 msg_set_probe(msg
, 0);
1390 msg_set_link_tolerance(msg
, l_ptr
->tolerance
);
1391 msg_set_linkprio(msg
, l_ptr
->priority
);
1392 msg_set_max_pkt(msg
, l_ptr
->advertised_mtu
);
1395 r_flag
= (l_ptr
->owner
->working_links
> tipc_link_is_up(l_ptr
));
1396 msg_set_redundant_link(msg
, r_flag
);
1397 msg_set_linkprio(msg
, l_ptr
->priority
);
1398 msg_set_size(msg
, msg_size
);
1400 msg_set_seqno(msg
, mod(l_ptr
->next_out_no
+ (0xffff/2)));
1402 buf
= tipc_buf_acquire(msg_size
);
1406 skb_copy_to_linear_data(buf
, msg
, sizeof(l_ptr
->proto_msg
));
1407 buf
->priority
= TC_PRIO_CONTROL
;
1408 tipc_bearer_send(l_ptr
->owner
->net
, l_ptr
->bearer_id
, buf
,
1409 &l_ptr
->media_addr
);
1410 l_ptr
->rcv_unacked
= 0;
1415 * Receive protocol message :
1416 * Note that network plane id propagates through the network, and may
1417 * change at any time. The node with lowest address rules
1419 static void tipc_link_proto_rcv(struct tipc_link
*l_ptr
,
1420 struct sk_buff
*buf
)
1424 struct tipc_msg
*msg
= buf_msg(buf
);
1426 if (l_ptr
->flags
& LINK_FAILINGOVER
)
1429 if (l_ptr
->net_plane
!= msg_net_plane(msg
))
1430 if (link_own_addr(l_ptr
) > msg_prevnode(msg
))
1431 l_ptr
->net_plane
= msg_net_plane(msg
);
1433 switch (msg_type(msg
)) {
1436 if (!link_working_unknown(l_ptr
) &&
1437 (l_ptr
->peer_session
!= INVALID_SESSION
)) {
1438 if (less_eq(msg_session(msg
), l_ptr
->peer_session
))
1439 break; /* duplicate or old reset: ignore */
1442 if (!msg_redundant_link(msg
) && (link_working_working(l_ptr
) ||
1443 link_working_unknown(l_ptr
))) {
1445 * peer has lost contact -- don't allow peer's links
1446 * to reactivate before we recognize loss & clean up
1448 l_ptr
->owner
->action_flags
|= TIPC_WAIT_OWN_LINKS_DOWN
;
1451 link_state_event(l_ptr
, RESET_MSG
);
1455 /* Update link settings according other endpoint's values */
1456 strcpy((strrchr(l_ptr
->name
, ':') + 1), (char *)msg_data(msg
));
1458 msg_tol
= msg_link_tolerance(msg
);
1459 if (msg_tol
> l_ptr
->tolerance
)
1460 link_set_supervision_props(l_ptr
, msg_tol
);
1462 if (msg_linkprio(msg
) > l_ptr
->priority
)
1463 l_ptr
->priority
= msg_linkprio(msg
);
1465 if (l_ptr
->mtu
> msg_max_pkt(msg
))
1466 l_ptr
->mtu
= msg_max_pkt(msg
);
1468 /* Synchronize broadcast link info, if not done previously */
1469 if (!tipc_node_is_up(l_ptr
->owner
)) {
1470 l_ptr
->owner
->bclink
.last_sent
=
1471 l_ptr
->owner
->bclink
.last_in
=
1472 msg_last_bcast(msg
);
1473 l_ptr
->owner
->bclink
.oos_state
= 0;
1476 l_ptr
->peer_session
= msg_session(msg
);
1477 l_ptr
->peer_bearer_id
= msg_bearer_id(msg
);
1479 if (msg_type(msg
) == ACTIVATE_MSG
)
1480 link_state_event(l_ptr
, ACTIVATE_MSG
);
1484 msg_tol
= msg_link_tolerance(msg
);
1486 link_set_supervision_props(l_ptr
, msg_tol
);
1488 if (msg_linkprio(msg
) &&
1489 (msg_linkprio(msg
) != l_ptr
->priority
)) {
1490 pr_debug("%s<%s>, priority change %u->%u\n",
1491 link_rst_msg
, l_ptr
->name
,
1492 l_ptr
->priority
, msg_linkprio(msg
));
1493 l_ptr
->priority
= msg_linkprio(msg
);
1494 tipc_link_reset(l_ptr
); /* Enforce change to take effect */
1498 /* Record reception; force mismatch at next timeout: */
1499 l_ptr
->checkpoint
--;
1501 link_state_event(l_ptr
, TRAFFIC_MSG_EVT
);
1502 l_ptr
->stats
.recv_states
++;
1503 if (link_reset_unknown(l_ptr
))
1506 if (less_eq(mod(l_ptr
->next_in_no
), msg_next_sent(msg
))) {
1507 rec_gap
= mod(msg_next_sent(msg
) -
1508 mod(l_ptr
->next_in_no
));
1512 l_ptr
->stats
.recv_probes
++;
1514 /* Protocol message before retransmits, reduce loss risk */
1515 if (l_ptr
->owner
->bclink
.recv_permitted
)
1516 tipc_bclink_update_link_state(l_ptr
->owner
,
1517 msg_last_bcast(msg
));
1519 if (rec_gap
|| (msg_probe(msg
))) {
1520 tipc_link_proto_xmit(l_ptr
, STATE_MSG
, 0,
1523 if (msg_seq_gap(msg
)) {
1524 l_ptr
->stats
.recv_nacks
++;
1525 tipc_link_retransmit(l_ptr
, skb_peek(&l_ptr
->transmq
),
1535 /* tipc_link_tunnel_xmit(): Tunnel one packet via a link belonging to
1536 * a different bearer. Owner node is locked.
1538 static void tipc_link_tunnel_xmit(struct tipc_link
*l_ptr
,
1539 struct tipc_msg
*tunnel_hdr
,
1540 struct tipc_msg
*msg
,
1543 struct tipc_link
*tunnel
;
1544 struct sk_buff
*skb
;
1545 u32 length
= msg_size(msg
);
1547 tunnel
= l_ptr
->owner
->active_links
[selector
& 1];
1548 if (!tipc_link_is_up(tunnel
)) {
1549 pr_warn("%stunnel link no longer available\n", link_co_err
);
1552 msg_set_size(tunnel_hdr
, length
+ INT_H_SIZE
);
1553 skb
= tipc_buf_acquire(length
+ INT_H_SIZE
);
1555 pr_warn("%sunable to send tunnel msg\n", link_co_err
);
1558 skb_copy_to_linear_data(skb
, tunnel_hdr
, INT_H_SIZE
);
1559 skb_copy_to_linear_data_offset(skb
, INT_H_SIZE
, msg
, length
);
1560 __tipc_link_xmit_skb(tunnel
, skb
);
1564 /* tipc_link_failover_send_queue(): A link has gone down, but a second
1565 * link is still active. We can do failover. Tunnel the failing link's
1566 * whole send queue via the remaining link. This way, we don't lose
1567 * any packets, and sequence order is preserved for subsequent traffic
1568 * sent over the remaining link. Owner node is locked.
1570 void tipc_link_failover_send_queue(struct tipc_link
*l_ptr
)
1573 struct tipc_link
*tunnel
= l_ptr
->owner
->active_links
[0];
1574 struct tipc_msg tunnel_hdr
;
1575 struct sk_buff
*skb
;
1581 tipc_msg_init(link_own_addr(l_ptr
), &tunnel_hdr
, TUNNEL_PROTOCOL
,
1582 FAILOVER_MSG
, INT_H_SIZE
, l_ptr
->addr
);
1583 skb_queue_splice_tail_init(&l_ptr
->backlogq
, &l_ptr
->transmq
);
1584 tipc_link_purge_backlog(l_ptr
);
1585 msgcount
= skb_queue_len(&l_ptr
->transmq
);
1586 msg_set_bearer_id(&tunnel_hdr
, l_ptr
->peer_bearer_id
);
1587 msg_set_msgcnt(&tunnel_hdr
, msgcount
);
1589 if (skb_queue_empty(&l_ptr
->transmq
)) {
1590 skb
= tipc_buf_acquire(INT_H_SIZE
);
1592 skb_copy_to_linear_data(skb
, &tunnel_hdr
, INT_H_SIZE
);
1593 msg_set_size(&tunnel_hdr
, INT_H_SIZE
);
1594 __tipc_link_xmit_skb(tunnel
, skb
);
1596 pr_warn("%sunable to send changeover msg\n",
1602 split_bundles
= (l_ptr
->owner
->active_links
[0] !=
1603 l_ptr
->owner
->active_links
[1]);
1605 skb_queue_walk(&l_ptr
->transmq
, skb
) {
1606 struct tipc_msg
*msg
= buf_msg(skb
);
1608 if ((msg_user(msg
) == MSG_BUNDLER
) && split_bundles
) {
1609 struct tipc_msg
*m
= msg_get_wrapped(msg
);
1610 unchar
*pos
= (unchar
*)m
;
1612 msgcount
= msg_msgcnt(msg
);
1613 while (msgcount
--) {
1614 msg_set_seqno(m
, msg_seqno(msg
));
1615 tipc_link_tunnel_xmit(l_ptr
, &tunnel_hdr
, m
,
1616 msg_link_selector(m
));
1617 pos
+= align(msg_size(m
));
1618 m
= (struct tipc_msg
*)pos
;
1621 tipc_link_tunnel_xmit(l_ptr
, &tunnel_hdr
, msg
,
1622 msg_link_selector(msg
));
1627 /* tipc_link_dup_queue_xmit(): A second link has become active. Tunnel a
1628 * duplicate of the first link's send queue via the new link. This way, we
1629 * are guaranteed that currently queued packets from a socket are delivered
1630 * before future traffic from the same socket, even if this is using the
1631 * new link. The last arriving copy of each duplicate packet is dropped at
1632 * the receiving end by the regular protocol check, so packet cardinality
1633 * and sequence order is preserved per sender/receiver socket pair.
1634 * Owner node is locked.
1636 void tipc_link_dup_queue_xmit(struct tipc_link
*link
,
1637 struct tipc_link
*tnl
)
1639 struct sk_buff
*skb
;
1640 struct tipc_msg tnl_hdr
;
1641 struct sk_buff_head
*queue
= &link
->transmq
;
1644 tipc_msg_init(link_own_addr(link
), &tnl_hdr
, TUNNEL_PROTOCOL
,
1645 SYNCH_MSG
, INT_H_SIZE
, link
->addr
);
1646 mcnt
= skb_queue_len(&link
->transmq
) + skb_queue_len(&link
->backlogq
);
1647 msg_set_msgcnt(&tnl_hdr
, mcnt
);
1648 msg_set_bearer_id(&tnl_hdr
, link
->peer_bearer_id
);
1651 skb_queue_walk(queue
, skb
) {
1652 struct sk_buff
*outskb
;
1653 struct tipc_msg
*msg
= buf_msg(skb
);
1654 u32 len
= msg_size(msg
);
1656 msg_set_ack(msg
, mod(link
->next_in_no
- 1));
1657 msg_set_bcast_ack(msg
, link
->owner
->bclink
.last_in
);
1658 msg_set_size(&tnl_hdr
, len
+ INT_H_SIZE
);
1659 outskb
= tipc_buf_acquire(len
+ INT_H_SIZE
);
1660 if (outskb
== NULL
) {
1661 pr_warn("%sunable to send duplicate msg\n",
1665 skb_copy_to_linear_data(outskb
, &tnl_hdr
, INT_H_SIZE
);
1666 skb_copy_to_linear_data_offset(outskb
, INT_H_SIZE
,
1668 __tipc_link_xmit_skb(tnl
, outskb
);
1669 if (!tipc_link_is_up(link
))
1672 if (queue
== &link
->backlogq
)
1674 queue
= &link
->backlogq
;
1678 /* tipc_link_failover_rcv(): Receive a tunnelled FAILOVER_MSG packet
1679 * Owner node is locked.
1681 static bool tipc_link_failover_rcv(struct tipc_link
*link
,
1682 struct sk_buff
**skb
)
1684 struct tipc_msg
*msg
= buf_msg(*skb
);
1685 struct sk_buff
*iskb
= NULL
;
1686 struct tipc_link
*pl
= NULL
;
1687 int bearer_id
= msg_bearer_id(msg
);
1690 if (msg_type(msg
) != FAILOVER_MSG
) {
1691 pr_warn("%sunknown tunnel pkt received\n", link_co_err
);
1694 if (bearer_id
>= MAX_BEARERS
)
1697 if (bearer_id
== link
->bearer_id
)
1700 pl
= link
->owner
->links
[bearer_id
];
1701 if (pl
&& tipc_link_is_up(pl
))
1702 tipc_link_reset(pl
);
1704 if (link
->failover_pkts
== FIRST_FAILOVER
)
1705 link
->failover_pkts
= msg_msgcnt(msg
);
1707 /* Should we expect an inner packet? */
1708 if (!link
->failover_pkts
)
1711 if (!tipc_msg_extract(*skb
, &iskb
, &pos
)) {
1712 pr_warn("%sno inner failover pkt\n", link_co_err
);
1716 link
->failover_pkts
--;
1719 /* Was this packet already delivered? */
1720 if (less(buf_seqno(iskb
), link
->failover_checkpt
)) {
1725 if (msg_user(buf_msg(iskb
)) == MSG_FRAGMENTER
) {
1726 link
->stats
.recv_fragments
++;
1727 tipc_buf_append(&link
->failover_skb
, &iskb
);
1730 if (!link
->failover_pkts
&& pl
)
1731 pl
->flags
&= ~LINK_FAILINGOVER
;
1737 static void link_set_supervision_props(struct tipc_link
*l_ptr
, u32 tol
)
1739 unsigned long intv
= ((tol
/ 4) > 500) ? 500 : tol
/ 4;
1741 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1744 l_ptr
->tolerance
= tol
;
1745 l_ptr
->cont_intv
= msecs_to_jiffies(intv
);
1746 l_ptr
->abort_limit
= tol
/ (jiffies_to_msecs(l_ptr
->cont_intv
) / 4);
1749 void tipc_link_set_queue_limits(struct tipc_link
*l
, u32 win
)
1751 int max_bulk
= TIPC_MAX_PUBLICATIONS
/ (l
->mtu
/ ITEM_SIZE
);
1754 l
->backlog
[TIPC_LOW_IMPORTANCE
].limit
= win
/ 2;
1755 l
->backlog
[TIPC_MEDIUM_IMPORTANCE
].limit
= win
;
1756 l
->backlog
[TIPC_HIGH_IMPORTANCE
].limit
= win
/ 2 * 3;
1757 l
->backlog
[TIPC_CRITICAL_IMPORTANCE
].limit
= win
* 2;
1758 l
->backlog
[TIPC_SYSTEM_IMPORTANCE
].limit
= max_bulk
;
1761 /* tipc_link_find_owner - locate owner node of link by link's name
1762 * @net: the applicable net namespace
1763 * @name: pointer to link name string
1764 * @bearer_id: pointer to index in 'node->links' array where the link was found.
1766 * Returns pointer to node owning the link, or 0 if no matching link is found.
1768 static struct tipc_node
*tipc_link_find_owner(struct net
*net
,
1769 const char *link_name
,
1770 unsigned int *bearer_id
)
1772 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
1773 struct tipc_link
*l_ptr
;
1774 struct tipc_node
*n_ptr
;
1775 struct tipc_node
*found_node
= NULL
;
1780 list_for_each_entry_rcu(n_ptr
, &tn
->node_list
, list
) {
1781 tipc_node_lock(n_ptr
);
1782 for (i
= 0; i
< MAX_BEARERS
; i
++) {
1783 l_ptr
= n_ptr
->links
[i
];
1784 if (l_ptr
&& !strcmp(l_ptr
->name
, link_name
)) {
1790 tipc_node_unlock(n_ptr
);
1800 * link_reset_statistics - reset link statistics
1801 * @l_ptr: pointer to link
1803 static void link_reset_statistics(struct tipc_link
*l_ptr
)
1805 memset(&l_ptr
->stats
, 0, sizeof(l_ptr
->stats
));
1806 l_ptr
->stats
.sent_info
= l_ptr
->next_out_no
;
1807 l_ptr
->stats
.recv_info
= l_ptr
->next_in_no
;
1810 static void link_print(struct tipc_link
*l_ptr
, const char *str
)
1812 struct tipc_net
*tn
= net_generic(l_ptr
->owner
->net
, tipc_net_id
);
1813 struct tipc_bearer
*b_ptr
;
1816 b_ptr
= rcu_dereference_rtnl(tn
->bearer_list
[l_ptr
->bearer_id
]);
1818 pr_info("%s Link %x<%s>:", str
, l_ptr
->addr
, b_ptr
->name
);
1821 if (link_working_unknown(l_ptr
))
1823 else if (link_reset_reset(l_ptr
))
1825 else if (link_reset_unknown(l_ptr
))
1827 else if (link_working_working(l_ptr
))
1833 /* Parse and validate nested (link) properties valid for media, bearer and link
1835 int tipc_nl_parse_link_prop(struct nlattr
*prop
, struct nlattr
*props
[])
1839 err
= nla_parse_nested(props
, TIPC_NLA_PROP_MAX
, prop
,
1840 tipc_nl_prop_policy
);
1844 if (props
[TIPC_NLA_PROP_PRIO
]) {
1847 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1848 if (prio
> TIPC_MAX_LINK_PRI
)
1852 if (props
[TIPC_NLA_PROP_TOL
]) {
1855 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1856 if ((tol
< TIPC_MIN_LINK_TOL
) || (tol
> TIPC_MAX_LINK_TOL
))
1860 if (props
[TIPC_NLA_PROP_WIN
]) {
1863 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1864 if ((win
< TIPC_MIN_LINK_WIN
) || (win
> TIPC_MAX_LINK_WIN
))
1871 int tipc_nl_link_set(struct sk_buff
*skb
, struct genl_info
*info
)
1877 struct tipc_link
*link
;
1878 struct tipc_node
*node
;
1879 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
1880 struct net
*net
= sock_net(skb
->sk
);
1882 if (!info
->attrs
[TIPC_NLA_LINK
])
1885 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
1886 info
->attrs
[TIPC_NLA_LINK
],
1887 tipc_nl_link_policy
);
1891 if (!attrs
[TIPC_NLA_LINK_NAME
])
1894 name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
1896 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
1900 tipc_node_lock(node
);
1902 link
= node
->links
[bearer_id
];
1908 if (attrs
[TIPC_NLA_LINK_PROP
]) {
1909 struct nlattr
*props
[TIPC_NLA_PROP_MAX
+ 1];
1911 err
= tipc_nl_parse_link_prop(attrs
[TIPC_NLA_LINK_PROP
],
1918 if (props
[TIPC_NLA_PROP_TOL
]) {
1921 tol
= nla_get_u32(props
[TIPC_NLA_PROP_TOL
]);
1922 link_set_supervision_props(link
, tol
);
1923 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, tol
, 0);
1925 if (props
[TIPC_NLA_PROP_PRIO
]) {
1928 prio
= nla_get_u32(props
[TIPC_NLA_PROP_PRIO
]);
1929 link
->priority
= prio
;
1930 tipc_link_proto_xmit(link
, STATE_MSG
, 0, 0, 0, prio
);
1932 if (props
[TIPC_NLA_PROP_WIN
]) {
1935 win
= nla_get_u32(props
[TIPC_NLA_PROP_WIN
]);
1936 tipc_link_set_queue_limits(link
, win
);
1941 tipc_node_unlock(node
);
1946 static int __tipc_nl_add_stats(struct sk_buff
*skb
, struct tipc_stats
*s
)
1949 struct nlattr
*stats
;
1956 struct nla_map map
[] = {
1957 {TIPC_NLA_STATS_RX_INFO
, s
->recv_info
},
1958 {TIPC_NLA_STATS_RX_FRAGMENTS
, s
->recv_fragments
},
1959 {TIPC_NLA_STATS_RX_FRAGMENTED
, s
->recv_fragmented
},
1960 {TIPC_NLA_STATS_RX_BUNDLES
, s
->recv_bundles
},
1961 {TIPC_NLA_STATS_RX_BUNDLED
, s
->recv_bundled
},
1962 {TIPC_NLA_STATS_TX_INFO
, s
->sent_info
},
1963 {TIPC_NLA_STATS_TX_FRAGMENTS
, s
->sent_fragments
},
1964 {TIPC_NLA_STATS_TX_FRAGMENTED
, s
->sent_fragmented
},
1965 {TIPC_NLA_STATS_TX_BUNDLES
, s
->sent_bundles
},
1966 {TIPC_NLA_STATS_TX_BUNDLED
, s
->sent_bundled
},
1967 {TIPC_NLA_STATS_MSG_PROF_TOT
, (s
->msg_length_counts
) ?
1968 s
->msg_length_counts
: 1},
1969 {TIPC_NLA_STATS_MSG_LEN_CNT
, s
->msg_length_counts
},
1970 {TIPC_NLA_STATS_MSG_LEN_TOT
, s
->msg_lengths_total
},
1971 {TIPC_NLA_STATS_MSG_LEN_P0
, s
->msg_length_profile
[0]},
1972 {TIPC_NLA_STATS_MSG_LEN_P1
, s
->msg_length_profile
[1]},
1973 {TIPC_NLA_STATS_MSG_LEN_P2
, s
->msg_length_profile
[2]},
1974 {TIPC_NLA_STATS_MSG_LEN_P3
, s
->msg_length_profile
[3]},
1975 {TIPC_NLA_STATS_MSG_LEN_P4
, s
->msg_length_profile
[4]},
1976 {TIPC_NLA_STATS_MSG_LEN_P5
, s
->msg_length_profile
[5]},
1977 {TIPC_NLA_STATS_MSG_LEN_P6
, s
->msg_length_profile
[6]},
1978 {TIPC_NLA_STATS_RX_STATES
, s
->recv_states
},
1979 {TIPC_NLA_STATS_RX_PROBES
, s
->recv_probes
},
1980 {TIPC_NLA_STATS_RX_NACKS
, s
->recv_nacks
},
1981 {TIPC_NLA_STATS_RX_DEFERRED
, s
->deferred_recv
},
1982 {TIPC_NLA_STATS_TX_STATES
, s
->sent_states
},
1983 {TIPC_NLA_STATS_TX_PROBES
, s
->sent_probes
},
1984 {TIPC_NLA_STATS_TX_NACKS
, s
->sent_nacks
},
1985 {TIPC_NLA_STATS_TX_ACKS
, s
->sent_acks
},
1986 {TIPC_NLA_STATS_RETRANSMITTED
, s
->retransmitted
},
1987 {TIPC_NLA_STATS_DUPLICATES
, s
->duplicates
},
1988 {TIPC_NLA_STATS_LINK_CONGS
, s
->link_congs
},
1989 {TIPC_NLA_STATS_MAX_QUEUE
, s
->max_queue_sz
},
1990 {TIPC_NLA_STATS_AVG_QUEUE
, s
->queue_sz_counts
?
1991 (s
->accu_queue_sz
/ s
->queue_sz_counts
) : 0}
1994 stats
= nla_nest_start(skb
, TIPC_NLA_LINK_STATS
);
1998 for (i
= 0; i
< ARRAY_SIZE(map
); i
++)
1999 if (nla_put_u32(skb
, map
[i
].key
, map
[i
].val
))
2002 nla_nest_end(skb
, stats
);
2006 nla_nest_cancel(skb
, stats
);
2011 /* Caller should hold appropriate locks to protect the link */
2012 static int __tipc_nl_add_link(struct net
*net
, struct tipc_nl_msg
*msg
,
2013 struct tipc_link
*link
, int nlflags
)
2017 struct nlattr
*attrs
;
2018 struct nlattr
*prop
;
2019 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2021 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
, &tipc_genl_family
,
2022 nlflags
, TIPC_NL_LINK_GET
);
2026 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK
);
2030 if (nla_put_string(msg
->skb
, TIPC_NLA_LINK_NAME
, link
->name
))
2032 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_DEST
,
2033 tipc_cluster_mask(tn
->own_addr
)))
2035 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_MTU
, link
->mtu
))
2037 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_RX
, link
->next_in_no
))
2039 if (nla_put_u32(msg
->skb
, TIPC_NLA_LINK_TX
, link
->next_out_no
))
2042 if (tipc_link_is_up(link
))
2043 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_UP
))
2045 if (tipc_link_is_active(link
))
2046 if (nla_put_flag(msg
->skb
, TIPC_NLA_LINK_ACTIVE
))
2049 prop
= nla_nest_start(msg
->skb
, TIPC_NLA_LINK_PROP
);
2052 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2054 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_TOL
, link
->tolerance
))
2056 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_WIN
,
2059 if (nla_put_u32(msg
->skb
, TIPC_NLA_PROP_PRIO
, link
->priority
))
2061 nla_nest_end(msg
->skb
, prop
);
2063 err
= __tipc_nl_add_stats(msg
->skb
, &link
->stats
);
2067 nla_nest_end(msg
->skb
, attrs
);
2068 genlmsg_end(msg
->skb
, hdr
);
2073 nla_nest_cancel(msg
->skb
, prop
);
2075 nla_nest_cancel(msg
->skb
, attrs
);
2077 genlmsg_cancel(msg
->skb
, hdr
);
2082 /* Caller should hold node lock */
2083 static int __tipc_nl_add_node_links(struct net
*net
, struct tipc_nl_msg
*msg
,
2084 struct tipc_node
*node
, u32
*prev_link
)
2089 for (i
= *prev_link
; i
< MAX_BEARERS
; i
++) {
2092 if (!node
->links
[i
])
2095 err
= __tipc_nl_add_link(net
, msg
, node
->links
[i
], NLM_F_MULTI
);
2104 int tipc_nl_link_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2106 struct net
*net
= sock_net(skb
->sk
);
2107 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
2108 struct tipc_node
*node
;
2109 struct tipc_nl_msg msg
;
2110 u32 prev_node
= cb
->args
[0];
2111 u32 prev_link
= cb
->args
[1];
2112 int done
= cb
->args
[2];
2119 msg
.portid
= NETLINK_CB(cb
->skb
).portid
;
2120 msg
.seq
= cb
->nlh
->nlmsg_seq
;
2124 node
= tipc_node_find(net
, prev_node
);
2126 /* We never set seq or call nl_dump_check_consistent()
2127 * this means that setting prev_seq here will cause the
2128 * consistence check to fail in the netlink callback
2129 * handler. Resulting in the last NLMSG_DONE message
2130 * having the NLM_F_DUMP_INTR flag set.
2135 tipc_node_put(node
);
2137 list_for_each_entry_continue_rcu(node
, &tn
->node_list
,
2139 tipc_node_lock(node
);
2140 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
2142 tipc_node_unlock(node
);
2146 prev_node
= node
->addr
;
2149 err
= tipc_nl_add_bc_link(net
, &msg
);
2153 list_for_each_entry_rcu(node
, &tn
->node_list
, list
) {
2154 tipc_node_lock(node
);
2155 err
= __tipc_nl_add_node_links(net
, &msg
, node
,
2157 tipc_node_unlock(node
);
2161 prev_node
= node
->addr
;
2168 cb
->args
[0] = prev_node
;
2169 cb
->args
[1] = prev_link
;
2175 int tipc_nl_link_get(struct sk_buff
*skb
, struct genl_info
*info
)
2177 struct net
*net
= genl_info_net(info
);
2178 struct sk_buff
*ans_skb
;
2179 struct tipc_nl_msg msg
;
2180 struct tipc_link
*link
;
2181 struct tipc_node
*node
;
2186 if (!info
->attrs
[TIPC_NLA_LINK_NAME
])
2189 name
= nla_data(info
->attrs
[TIPC_NLA_LINK_NAME
]);
2190 node
= tipc_link_find_owner(net
, name
, &bearer_id
);
2194 ans_skb
= nlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
2199 msg
.portid
= info
->snd_portid
;
2200 msg
.seq
= info
->snd_seq
;
2202 tipc_node_lock(node
);
2203 link
= node
->links
[bearer_id
];
2209 err
= __tipc_nl_add_link(net
, &msg
, link
, 0);
2213 tipc_node_unlock(node
);
2215 return genlmsg_reply(ans_skb
, info
);
2218 tipc_node_unlock(node
);
2219 nlmsg_free(ans_skb
);
2224 int tipc_nl_link_reset_stats(struct sk_buff
*skb
, struct genl_info
*info
)
2228 unsigned int bearer_id
;
2229 struct tipc_link
*link
;
2230 struct tipc_node
*node
;
2231 struct nlattr
*attrs
[TIPC_NLA_LINK_MAX
+ 1];
2232 struct net
*net
= sock_net(skb
->sk
);
2234 if (!info
->attrs
[TIPC_NLA_LINK
])
2237 err
= nla_parse_nested(attrs
, TIPC_NLA_LINK_MAX
,
2238 info
->attrs
[TIPC_NLA_LINK
],
2239 tipc_nl_link_policy
);
2243 if (!attrs
[TIPC_NLA_LINK_NAME
])
2246 link_name
= nla_data(attrs
[TIPC_NLA_LINK_NAME
]);
2248 if (strcmp(link_name
, tipc_bclink_name
) == 0) {
2249 err
= tipc_bclink_reset_stats(net
);
2255 node
= tipc_link_find_owner(net
, link_name
, &bearer_id
);
2259 tipc_node_lock(node
);
2261 link
= node
->links
[bearer_id
];
2263 tipc_node_unlock(node
);
2267 link_reset_statistics(link
);
2269 tipc_node_unlock(node
);