Commit | Line | Data |
---|---|---|
b97bf3fd PL |
1 | /* |
2 | * net/tipc/node.c: TIPC node management routines | |
3 | * | |
593a5f22 | 4 | * Copyright (c) 2000-2006, Ericsson AB |
b97bf3fd | 5 | * Copyright (c) 2005, Wind River Systems |
b97bf3fd PL |
6 | * All rights reserved. |
7 | * | |
9ea1fd3c | 8 | * Redistribution and use in source and binary forms, with or without |
b97bf3fd PL |
9 | * modification, are permitted provided that the following conditions are met: |
10 | * | |
9ea1fd3c PL |
11 | * 1. Redistributions of source code must retain the above copyright |
12 | * notice, this list of conditions and the following disclaimer. | |
13 | * 2. Redistributions in binary form must reproduce the above copyright | |
14 | * notice, this list of conditions and the following disclaimer in the | |
15 | * documentation and/or other materials provided with the distribution. | |
16 | * 3. Neither the names of the copyright holders nor the names of its | |
17 | * contributors may be used to endorse or promote products derived from | |
18 | * this software without specific prior written permission. | |
b97bf3fd | 19 | * |
9ea1fd3c PL |
20 | * Alternatively, this software may be distributed under the terms of the |
21 | * GNU General Public License ("GPL") version 2 as published by the Free | |
22 | * Software Foundation. | |
23 | * | |
24 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | |
25 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
26 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |
27 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | |
28 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | |
29 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | |
30 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS | |
31 | * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN | |
32 | * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | |
33 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | |
b97bf3fd PL |
34 | * POSSIBILITY OF SUCH DAMAGE. |
35 | */ | |
36 | ||
37 | #include "core.h" | |
38 | #include "config.h" | |
39 | #include "node.h" | |
40 | #include "cluster.h" | |
41 | #include "net.h" | |
42 | #include "addr.h" | |
43 | #include "node_subscr.h" | |
44 | #include "link.h" | |
45 | #include "port.h" | |
46 | #include "bearer.h" | |
47 | #include "name_distr.h" | |
48 | #include "net.h" | |
49 | ||
50 | void node_print(struct print_buf *buf, struct node *n_ptr, char *str); | |
51 | static void node_lost_contact(struct node *n_ptr); | |
52 | static void node_established_contact(struct node *n_ptr); | |
53 | ||
54 | struct node *nodes = NULL; /* sorted list of nodes within cluster */ | |
55 | ||
56 | u32 tipc_own_tag = 0; | |
57 | ||
58 | struct node *node_create(u32 addr) | |
59 | { | |
60 | struct cluster *c_ptr; | |
61 | struct node *n_ptr; | |
62 | struct node **curr_node; | |
63 | ||
64 | n_ptr = kmalloc(sizeof(*n_ptr),GFP_ATOMIC); | |
65 | if (n_ptr != NULL) { | |
66 | memset(n_ptr, 0, sizeof(*n_ptr)); | |
67 | n_ptr->addr = addr; | |
68 | n_ptr->lock = SPIN_LOCK_UNLOCKED; | |
69 | INIT_LIST_HEAD(&n_ptr->nsub); | |
70 | ||
71 | c_ptr = cluster_find(addr); | |
72 | if (c_ptr == NULL) | |
73 | c_ptr = cluster_create(addr); | |
74 | if (c_ptr != NULL) { | |
75 | n_ptr->owner = c_ptr; | |
76 | cluster_attach_node(c_ptr, n_ptr); | |
77 | n_ptr->last_router = -1; | |
78 | ||
79 | /* Insert node into ordered list */ | |
80 | for (curr_node = &nodes; *curr_node; | |
81 | curr_node = &(*curr_node)->next) { | |
82 | if (addr < (*curr_node)->addr) { | |
83 | n_ptr->next = *curr_node; | |
84 | break; | |
85 | } | |
86 | } | |
87 | (*curr_node) = n_ptr; | |
88 | } else { | |
89 | kfree(n_ptr); | |
90 | n_ptr = NULL; | |
91 | } | |
92 | } | |
93 | return n_ptr; | |
94 | } | |
95 | ||
96 | void node_delete(struct node *n_ptr) | |
97 | { | |
98 | if (!n_ptr) | |
99 | return; | |
100 | ||
101 | #if 0 | |
102 | /* Not needed because links are already deleted via bearer_stop() */ | |
103 | ||
104 | u32 l_num; | |
105 | ||
106 | for (l_num = 0; l_num < MAX_BEARERS; l_num++) { | |
107 | link_delete(n_ptr->links[l_num]); | |
108 | } | |
109 | #endif | |
110 | ||
111 | dbg("node %x deleted\n", n_ptr->addr); | |
112 | kfree(n_ptr); | |
113 | } | |
114 | ||
115 | ||
116 | /** | |
117 | * node_link_up - handle addition of link | |
118 | * | |
119 | * Link becomes active (alone or shared) or standby, depending on its priority. | |
120 | */ | |
121 | ||
122 | void node_link_up(struct node *n_ptr, struct link *l_ptr) | |
123 | { | |
124 | struct link **active = &n_ptr->active_links[0]; | |
125 | ||
126 | info("Established link <%s> on network plane %c\n", | |
127 | l_ptr->name, l_ptr->b_ptr->net_plane); | |
128 | ||
129 | if (!active[0]) { | |
130 | dbg(" link %x into %x/%x\n", l_ptr, &active[0], &active[1]); | |
131 | active[0] = active[1] = l_ptr; | |
132 | node_established_contact(n_ptr); | |
133 | return; | |
134 | } | |
135 | if (l_ptr->priority < active[0]->priority) { | |
136 | info("Link is standby\n"); | |
137 | return; | |
138 | } | |
139 | link_send_duplicate(active[0], l_ptr); | |
140 | if (l_ptr->priority == active[0]->priority) { | |
141 | active[0] = l_ptr; | |
142 | return; | |
143 | } | |
144 | info("Link <%s> on network plane %c becomes standby\n", | |
145 | active[0]->name, active[0]->b_ptr->net_plane); | |
146 | active[0] = active[1] = l_ptr; | |
147 | } | |
148 | ||
149 | /** | |
150 | * node_select_active_links - select active link | |
151 | */ | |
152 | ||
153 | static void node_select_active_links(struct node *n_ptr) | |
154 | { | |
155 | struct link **active = &n_ptr->active_links[0]; | |
156 | u32 i; | |
157 | u32 highest_prio = 0; | |
158 | ||
159 | active[0] = active[1] = 0; | |
160 | ||
161 | for (i = 0; i < MAX_BEARERS; i++) { | |
162 | struct link *l_ptr = n_ptr->links[i]; | |
163 | ||
164 | if (!l_ptr || !link_is_up(l_ptr) || | |
165 | (l_ptr->priority < highest_prio)) | |
166 | continue; | |
167 | ||
168 | if (l_ptr->priority > highest_prio) { | |
169 | highest_prio = l_ptr->priority; | |
170 | active[0] = active[1] = l_ptr; | |
171 | } else { | |
172 | active[1] = l_ptr; | |
173 | } | |
174 | } | |
175 | } | |
176 | ||
177 | /** | |
178 | * node_link_down - handle loss of link | |
179 | */ | |
180 | ||
181 | void node_link_down(struct node *n_ptr, struct link *l_ptr) | |
182 | { | |
183 | struct link **active; | |
184 | ||
185 | if (!link_is_active(l_ptr)) { | |
186 | info("Lost standby link <%s> on network plane %c\n", | |
187 | l_ptr->name, l_ptr->b_ptr->net_plane); | |
188 | return; | |
189 | } | |
190 | info("Lost link <%s> on network plane %c\n", | |
191 | l_ptr->name, l_ptr->b_ptr->net_plane); | |
192 | ||
193 | active = &n_ptr->active_links[0]; | |
194 | if (active[0] == l_ptr) | |
195 | active[0] = active[1]; | |
196 | if (active[1] == l_ptr) | |
197 | active[1] = active[0]; | |
198 | if (active[0] == l_ptr) | |
199 | node_select_active_links(n_ptr); | |
200 | if (node_is_up(n_ptr)) | |
201 | link_changeover(l_ptr); | |
202 | else | |
203 | node_lost_contact(n_ptr); | |
204 | } | |
205 | ||
206 | int node_has_active_links(struct node *n_ptr) | |
207 | { | |
208 | return (n_ptr && | |
209 | ((n_ptr->active_links[0]) || (n_ptr->active_links[1]))); | |
210 | } | |
211 | ||
212 | int node_has_redundant_links(struct node *n_ptr) | |
213 | { | |
214 | return (node_has_active_links(n_ptr) && | |
215 | (n_ptr->active_links[0] != n_ptr->active_links[1])); | |
216 | } | |
217 | ||
218 | int node_has_active_routes(struct node *n_ptr) | |
219 | { | |
220 | return (n_ptr && (n_ptr->last_router >= 0)); | |
221 | } | |
222 | ||
223 | int node_is_up(struct node *n_ptr) | |
224 | { | |
225 | return (node_has_active_links(n_ptr) || node_has_active_routes(n_ptr)); | |
226 | } | |
227 | ||
228 | struct node *node_attach_link(struct link *l_ptr) | |
229 | { | |
230 | struct node *n_ptr = node_find(l_ptr->addr); | |
231 | ||
232 | if (!n_ptr) | |
233 | n_ptr = node_create(l_ptr->addr); | |
234 | if (n_ptr) { | |
235 | u32 bearer_id = l_ptr->b_ptr->identity; | |
236 | char addr_string[16]; | |
237 | ||
238 | assert(bearer_id < MAX_BEARERS); | |
239 | if (n_ptr->link_cnt >= 2) { | |
240 | char addr_string[16]; | |
241 | ||
242 | err("Attempt to create third link to %s\n", | |
243 | addr_string_fill(addr_string, n_ptr->addr)); | |
244 | return 0; | |
245 | } | |
246 | ||
247 | if (!n_ptr->links[bearer_id]) { | |
248 | n_ptr->links[bearer_id] = l_ptr; | |
249 | net.zones[tipc_zone(l_ptr->addr)]->links++; | |
250 | n_ptr->link_cnt++; | |
251 | return n_ptr; | |
252 | } | |
253 | err("Attempt to establish second link on <%s> to <%s> \n", | |
254 | l_ptr->b_ptr->publ.name, | |
255 | addr_string_fill(addr_string, l_ptr->addr)); | |
256 | } | |
257 | return 0; | |
258 | } | |
259 | ||
260 | void node_detach_link(struct node *n_ptr, struct link *l_ptr) | |
261 | { | |
262 | n_ptr->links[l_ptr->b_ptr->identity] = 0; | |
263 | net.zones[tipc_zone(l_ptr->addr)]->links--; | |
264 | n_ptr->link_cnt--; | |
265 | } | |
266 | ||
267 | /* | |
268 | * Routing table management - five cases to handle: | |
269 | * | |
270 | * 1: A link towards a zone/cluster external node comes up. | |
271 | * => Send a multicast message updating routing tables of all | |
272 | * system nodes within own cluster that the new destination | |
273 | * can be reached via this node. | |
274 | * (node.establishedContact()=>cluster.multicastNewRoute()) | |
275 | * | |
276 | * 2: A link towards a slave node comes up. | |
277 | * => Send a multicast message updating routing tables of all | |
278 | * system nodes within own cluster that the new destination | |
279 | * can be reached via this node. | |
280 | * (node.establishedContact()=>cluster.multicastNewRoute()) | |
281 | * => Send a message to the slave node about existence | |
282 | * of all system nodes within cluster: | |
283 | * (node.establishedContact()=>cluster.sendLocalRoutes()) | |
284 | * | |
285 | * 3: A new cluster local system node becomes available. | |
286 | * => Send message(s) to this particular node containing | |
287 | * information about all cluster external and slave | |
288 | * nodes which can be reached via this node. | |
289 | * (node.establishedContact()==>network.sendExternalRoutes()) | |
290 | * (node.establishedContact()==>network.sendSlaveRoutes()) | |
291 | * => Send messages to all directly connected slave nodes | |
292 | * containing information about the existence of the new node | |
293 | * (node.establishedContact()=>cluster.multicastNewRoute()) | |
294 | * | |
295 | * 4: The link towards a zone/cluster external node or slave | |
296 | * node goes down. | |
297 | * => Send a multcast message updating routing tables of all | |
298 | * nodes within cluster that the new destination can not any | |
299 | * longer be reached via this node. | |
300 | * (node.lostAllLinks()=>cluster.bcastLostRoute()) | |
301 | * | |
302 | * 5: A cluster local system node becomes unavailable. | |
303 | * => Remove all references to this node from the local | |
304 | * routing tables. Note: This is a completely node | |
305 | * local operation. | |
306 | * (node.lostAllLinks()=>network.removeAsRouter()) | |
307 | * => Send messages to all directly connected slave nodes | |
308 | * containing information about loss of the node | |
309 | * (node.establishedContact()=>cluster.multicastLostRoute()) | |
310 | * | |
311 | */ | |
312 | ||
313 | static void node_established_contact(struct node *n_ptr) | |
314 | { | |
315 | struct cluster *c_ptr; | |
316 | ||
317 | dbg("node_established_contact:-> %x\n", n_ptr->addr); | |
318 | if (!node_has_active_routes(n_ptr)) { | |
319 | k_signal((Handler)named_node_up, n_ptr->addr); | |
320 | } | |
321 | ||
322 | /* Syncronize broadcast acks */ | |
323 | n_ptr->bclink.acked = bclink_get_last_sent(); | |
324 | ||
325 | if (is_slave(tipc_own_addr)) | |
326 | return; | |
327 | if (!in_own_cluster(n_ptr->addr)) { | |
328 | /* Usage case 1 (see above) */ | |
329 | c_ptr = cluster_find(tipc_own_addr); | |
330 | if (!c_ptr) | |
331 | c_ptr = cluster_create(tipc_own_addr); | |
332 | if (c_ptr) | |
333 | cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, | |
334 | tipc_max_nodes); | |
335 | return; | |
336 | } | |
337 | ||
338 | c_ptr = n_ptr->owner; | |
339 | if (is_slave(n_ptr->addr)) { | |
340 | /* Usage case 2 (see above) */ | |
341 | cluster_bcast_new_route(c_ptr, n_ptr->addr, 1, tipc_max_nodes); | |
342 | cluster_send_local_routes(c_ptr, n_ptr->addr); | |
343 | return; | |
344 | } | |
345 | ||
346 | if (n_ptr->bclink.supported) { | |
347 | nmap_add(&cluster_bcast_nodes, n_ptr->addr); | |
348 | if (n_ptr->addr < tipc_own_addr) | |
349 | tipc_own_tag++; | |
350 | } | |
351 | ||
352 | /* Case 3 (see above) */ | |
353 | net_send_external_routes(n_ptr->addr); | |
354 | cluster_send_slave_routes(c_ptr, n_ptr->addr); | |
355 | cluster_bcast_new_route(c_ptr, n_ptr->addr, LOWEST_SLAVE, | |
356 | highest_allowed_slave); | |
357 | } | |
358 | ||
359 | static void node_lost_contact(struct node *n_ptr) | |
360 | { | |
361 | struct cluster *c_ptr; | |
362 | struct node_subscr *ns, *tns; | |
363 | char addr_string[16]; | |
364 | u32 i; | |
365 | ||
366 | /* Clean up broadcast reception remains */ | |
367 | n_ptr->bclink.gap_after = n_ptr->bclink.gap_to = 0; | |
368 | while (n_ptr->bclink.deferred_head) { | |
369 | struct sk_buff* buf = n_ptr->bclink.deferred_head; | |
370 | n_ptr->bclink.deferred_head = buf->next; | |
371 | buf_discard(buf); | |
372 | } | |
373 | if (n_ptr->bclink.defragm) { | |
374 | buf_discard(n_ptr->bclink.defragm); | |
375 | n_ptr->bclink.defragm = NULL; | |
376 | } | |
377 | if (in_own_cluster(n_ptr->addr) && n_ptr->bclink.supported) { | |
378 | bclink_acknowledge(n_ptr, mod(n_ptr->bclink.acked + 10000)); | |
379 | } | |
380 | ||
381 | /* Update routing tables */ | |
382 | if (is_slave(tipc_own_addr)) { | |
383 | net_remove_as_router(n_ptr->addr); | |
384 | } else { | |
385 | if (!in_own_cluster(n_ptr->addr)) { | |
386 | /* Case 4 (see above) */ | |
387 | c_ptr = cluster_find(tipc_own_addr); | |
388 | cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, | |
389 | tipc_max_nodes); | |
390 | } else { | |
391 | /* Case 5 (see above) */ | |
392 | c_ptr = cluster_find(n_ptr->addr); | |
393 | if (is_slave(n_ptr->addr)) { | |
394 | cluster_bcast_lost_route(c_ptr, n_ptr->addr, 1, | |
395 | tipc_max_nodes); | |
396 | } else { | |
397 | if (n_ptr->bclink.supported) { | |
398 | nmap_remove(&cluster_bcast_nodes, | |
399 | n_ptr->addr); | |
400 | if (n_ptr->addr < tipc_own_addr) | |
401 | tipc_own_tag--; | |
402 | } | |
403 | net_remove_as_router(n_ptr->addr); | |
404 | cluster_bcast_lost_route(c_ptr, n_ptr->addr, | |
405 | LOWEST_SLAVE, | |
406 | highest_allowed_slave); | |
407 | } | |
408 | } | |
409 | } | |
410 | if (node_has_active_routes(n_ptr)) | |
411 | return; | |
412 | ||
413 | info("Lost contact with %s\n", | |
414 | addr_string_fill(addr_string, n_ptr->addr)); | |
415 | ||
416 | /* Abort link changeover */ | |
417 | for (i = 0; i < MAX_BEARERS; i++) { | |
418 | struct link *l_ptr = n_ptr->links[i]; | |
419 | if (!l_ptr) | |
420 | continue; | |
421 | l_ptr->reset_checkpoint = l_ptr->next_in_no; | |
422 | l_ptr->exp_msg_count = 0; | |
423 | link_reset_fragments(l_ptr); | |
424 | } | |
425 | ||
426 | /* Notify subscribers */ | |
427 | list_for_each_entry_safe(ns, tns, &n_ptr->nsub, nodesub_list) { | |
428 | ns->node = 0; | |
429 | list_del_init(&ns->nodesub_list); | |
430 | k_signal((Handler)ns->handle_node_down, | |
431 | (unsigned long)ns->usr_handle); | |
432 | } | |
433 | } | |
434 | ||
435 | /** | |
436 | * node_select_next_hop - find the next-hop node for a message | |
437 | * | |
438 | * Called by when cluster local lookup has failed. | |
439 | */ | |
440 | ||
441 | struct node *node_select_next_hop(u32 addr, u32 selector) | |
442 | { | |
443 | struct node *n_ptr; | |
444 | u32 router_addr; | |
445 | ||
446 | if (!addr_domain_valid(addr)) | |
447 | return 0; | |
448 | ||
449 | /* Look for direct link to destination processsor */ | |
450 | n_ptr = node_find(addr); | |
451 | if (n_ptr && node_has_active_links(n_ptr)) | |
452 | return n_ptr; | |
453 | ||
454 | /* Cluster local system nodes *must* have direct links */ | |
455 | if (!is_slave(addr) && in_own_cluster(addr)) | |
456 | return 0; | |
457 | ||
458 | /* Look for cluster local router with direct link to node */ | |
459 | router_addr = node_select_router(n_ptr, selector); | |
460 | if (router_addr) | |
461 | return node_select(router_addr, selector); | |
462 | ||
463 | /* Slave nodes can only be accessed within own cluster via a | |
464 | known router with direct link -- if no router was found,give up */ | |
465 | if (is_slave(addr)) | |
466 | return 0; | |
467 | ||
468 | /* Inter zone/cluster -- find any direct link to remote cluster */ | |
469 | addr = tipc_addr(tipc_zone(addr), tipc_cluster(addr), 0); | |
470 | n_ptr = net_select_remote_node(addr, selector); | |
471 | if (n_ptr && node_has_active_links(n_ptr)) | |
472 | return n_ptr; | |
473 | ||
474 | /* Last resort -- look for any router to anywhere in remote zone */ | |
475 | router_addr = net_select_router(addr, selector); | |
476 | if (router_addr) | |
477 | return node_select(router_addr, selector); | |
478 | ||
479 | return 0; | |
480 | } | |
481 | ||
482 | /** | |
483 | * node_select_router - select router to reach specified node | |
484 | * | |
485 | * Uses a deterministic and fair algorithm for selecting router node. | |
486 | */ | |
487 | ||
488 | u32 node_select_router(struct node *n_ptr, u32 ref) | |
489 | { | |
490 | u32 ulim; | |
491 | u32 mask; | |
492 | u32 start; | |
493 | u32 r; | |
494 | ||
495 | if (!n_ptr) | |
496 | return 0; | |
497 | ||
498 | if (n_ptr->last_router < 0) | |
499 | return 0; | |
500 | ulim = ((n_ptr->last_router + 1) * 32) - 1; | |
501 | ||
502 | /* Start entry must be random */ | |
503 | mask = tipc_max_nodes; | |
504 | while (mask > ulim) | |
505 | mask >>= 1; | |
506 | start = ref & mask; | |
507 | r = start; | |
508 | ||
509 | /* Lookup upwards with wrap-around */ | |
510 | do { | |
511 | if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) | |
512 | break; | |
513 | } while (++r <= ulim); | |
514 | if (r > ulim) { | |
515 | r = 1; | |
516 | do { | |
517 | if (((n_ptr->routers[r / 32]) >> (r % 32)) & 1) | |
518 | break; | |
519 | } while (++r < start); | |
520 | assert(r != start); | |
521 | } | |
522 | assert(r && (r <= ulim)); | |
523 | return tipc_addr(own_zone(), own_cluster(), r); | |
524 | } | |
525 | ||
526 | void node_add_router(struct node *n_ptr, u32 router) | |
527 | { | |
528 | u32 r_num = tipc_node(router); | |
529 | ||
530 | n_ptr->routers[r_num / 32] = | |
531 | ((1 << (r_num % 32)) | n_ptr->routers[r_num / 32]); | |
532 | n_ptr->last_router = tipc_max_nodes / 32; | |
533 | while ((--n_ptr->last_router >= 0) && | |
534 | !n_ptr->routers[n_ptr->last_router]); | |
535 | } | |
536 | ||
537 | void node_remove_router(struct node *n_ptr, u32 router) | |
538 | { | |
539 | u32 r_num = tipc_node(router); | |
540 | ||
541 | if (n_ptr->last_router < 0) | |
542 | return; /* No routes */ | |
543 | ||
544 | n_ptr->routers[r_num / 32] = | |
545 | ((~(1 << (r_num % 32))) & (n_ptr->routers[r_num / 32])); | |
546 | n_ptr->last_router = tipc_max_nodes / 32; | |
547 | while ((--n_ptr->last_router >= 0) && | |
548 | !n_ptr->routers[n_ptr->last_router]); | |
549 | ||
550 | if (!node_is_up(n_ptr)) | |
551 | node_lost_contact(n_ptr); | |
552 | } | |
553 | ||
554 | #if 0 | |
555 | void node_print(struct print_buf *buf, struct node *n_ptr, char *str) | |
556 | { | |
557 | u32 i; | |
558 | ||
559 | tipc_printf(buf, "\n\n%s", str); | |
560 | for (i = 0; i < MAX_BEARERS; i++) { | |
561 | if (!n_ptr->links[i]) | |
562 | continue; | |
563 | tipc_printf(buf, "Links[%u]: %x, ", i, n_ptr->links[i]); | |
564 | } | |
565 | tipc_printf(buf, "Active links: [%x,%x]\n", | |
566 | n_ptr->active_links[0], n_ptr->active_links[1]); | |
567 | } | |
568 | #endif | |
569 | ||
570 | u32 tipc_available_nodes(const u32 domain) | |
571 | { | |
572 | struct node *n_ptr; | |
573 | u32 cnt = 0; | |
574 | ||
575 | for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { | |
576 | if (!in_scope(domain, n_ptr->addr)) | |
577 | continue; | |
578 | if (node_is_up(n_ptr)) | |
579 | cnt++; | |
580 | } | |
581 | return cnt; | |
582 | } | |
583 | ||
584 | struct sk_buff *node_get_nodes(const void *req_tlv_area, int req_tlv_space) | |
585 | { | |
586 | u32 domain; | |
587 | struct sk_buff *buf; | |
588 | struct node *n_ptr; | |
589 | struct tipc_node_info node_info; | |
590 | ||
591 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | |
592 | return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | |
593 | ||
594 | domain = *(u32 *)TLV_DATA(req_tlv_area); | |
595 | domain = ntohl(domain); | |
596 | if (!addr_domain_valid(domain)) | |
597 | return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | |
598 | " (network address)"); | |
599 | ||
600 | if (!nodes) | |
601 | return cfg_reply_none(); | |
602 | ||
603 | /* For now, get space for all other nodes | |
604 | (will need to modify this when slave nodes are supported */ | |
605 | ||
606 | buf = cfg_reply_alloc(TLV_SPACE(sizeof(node_info)) * | |
607 | (tipc_max_nodes - 1)); | |
608 | if (!buf) | |
609 | return NULL; | |
610 | ||
611 | /* Add TLVs for all nodes in scope */ | |
612 | ||
613 | for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { | |
614 | if (!in_scope(domain, n_ptr->addr)) | |
615 | continue; | |
616 | node_info.addr = htonl(n_ptr->addr); | |
617 | node_info.up = htonl(node_is_up(n_ptr)); | |
618 | cfg_append_tlv(buf, TIPC_TLV_NODE_INFO, | |
619 | &node_info, sizeof(node_info)); | |
620 | } | |
621 | ||
622 | return buf; | |
623 | } | |
624 | ||
625 | struct sk_buff *node_get_links(const void *req_tlv_area, int req_tlv_space) | |
626 | { | |
627 | u32 domain; | |
628 | struct sk_buff *buf; | |
629 | struct node *n_ptr; | |
630 | struct tipc_link_info link_info; | |
631 | ||
632 | if (!TLV_CHECK(req_tlv_area, req_tlv_space, TIPC_TLV_NET_ADDR)) | |
633 | return cfg_reply_error_string(TIPC_CFG_TLV_ERROR); | |
634 | ||
635 | domain = *(u32 *)TLV_DATA(req_tlv_area); | |
636 | domain = ntohl(domain); | |
637 | if (!addr_domain_valid(domain)) | |
638 | return cfg_reply_error_string(TIPC_CFG_INVALID_VALUE | |
639 | " (network address)"); | |
640 | ||
641 | if (!nodes) | |
642 | return cfg_reply_none(); | |
643 | ||
644 | /* For now, get space for 2 links to all other nodes + bcast link | |
645 | (will need to modify this when slave nodes are supported */ | |
646 | ||
647 | buf = cfg_reply_alloc(TLV_SPACE(sizeof(link_info)) * | |
648 | (2 * (tipc_max_nodes - 1) + 1)); | |
649 | if (!buf) | |
650 | return NULL; | |
651 | ||
652 | /* Add TLV for broadcast link */ | |
653 | ||
654 | link_info.dest = tipc_own_addr & 0xfffff00; | |
655 | link_info.dest = htonl(link_info.dest); | |
656 | link_info.up = htonl(1); | |
657 | sprintf(link_info.str, bc_link_name); | |
658 | cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, &link_info, sizeof(link_info)); | |
659 | ||
660 | /* Add TLVs for any other links in scope */ | |
661 | ||
662 | for (n_ptr = nodes; n_ptr; n_ptr = n_ptr->next) { | |
663 | u32 i; | |
664 | ||
665 | if (!in_scope(domain, n_ptr->addr)) | |
666 | continue; | |
667 | for (i = 0; i < MAX_BEARERS; i++) { | |
668 | if (!n_ptr->links[i]) | |
669 | continue; | |
670 | link_info.dest = htonl(n_ptr->addr); | |
671 | link_info.up = htonl(link_is_up(n_ptr->links[i])); | |
672 | strcpy(link_info.str, n_ptr->links[i]->name); | |
673 | cfg_append_tlv(buf, TIPC_TLV_LINK_INFO, | |
674 | &link_info, sizeof(link_info)); | |
675 | } | |
676 | } | |
677 | ||
678 | return buf; | |
679 | } |