Merge git://git.kernel.org/pub/scm/linux/kernel/git/pablo/nf
[deliverable/linux.git] / net / batman-adv / originator.c
1 /* Copyright (C) 2009-2014 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, see <http://www.gnu.org/licenses/>.
16 */
17
18 #include "main.h"
19 #include "distributed-arp-table.h"
20 #include "originator.h"
21 #include "hash.h"
22 #include "translation-table.h"
23 #include "routing.h"
24 #include "gateway_client.h"
25 #include "hard-interface.h"
26 #include "soft-interface.h"
27 #include "bridge_loop_avoidance.h"
28 #include "network-coding.h"
29 #include "fragmentation.h"
30 #include "multicast.h"
31
32 /* hash class keys */
33 static struct lock_class_key batadv_orig_hash_lock_class_key;
34
35 static void batadv_purge_orig(struct work_struct *work);
36
37 /* returns 1 if they are the same originator */
38 int batadv_compare_orig(const struct hlist_node *node, const void *data2)
39 {
40 const void *data1 = container_of(node, struct batadv_orig_node,
41 hash_entry);
42
43 return batadv_compare_eth(data1, data2);
44 }
45
46 /**
47 * batadv_orig_node_vlan_get - get an orig_node_vlan object
48 * @orig_node: the originator serving the VLAN
49 * @vid: the VLAN identifier
50 *
51 * Returns the vlan object identified by vid and belonging to orig_node or NULL
52 * if it does not exist.
53 */
54 struct batadv_orig_node_vlan *
55 batadv_orig_node_vlan_get(struct batadv_orig_node *orig_node,
56 unsigned short vid)
57 {
58 struct batadv_orig_node_vlan *vlan = NULL, *tmp;
59
60 rcu_read_lock();
61 list_for_each_entry_rcu(tmp, &orig_node->vlan_list, list) {
62 if (tmp->vid != vid)
63 continue;
64
65 if (!atomic_inc_not_zero(&tmp->refcount))
66 continue;
67
68 vlan = tmp;
69
70 break;
71 }
72 rcu_read_unlock();
73
74 return vlan;
75 }
76
77 /**
78 * batadv_orig_node_vlan_new - search and possibly create an orig_node_vlan
79 * object
80 * @orig_node: the originator serving the VLAN
81 * @vid: the VLAN identifier
82 *
83 * Returns NULL in case of failure or the vlan object identified by vid and
84 * belonging to orig_node otherwise. The object is created and added to the list
85 * if it does not exist.
86 *
87 * The object is returned with refcounter increased by 1.
88 */
89 struct batadv_orig_node_vlan *
90 batadv_orig_node_vlan_new(struct batadv_orig_node *orig_node,
91 unsigned short vid)
92 {
93 struct batadv_orig_node_vlan *vlan;
94
95 spin_lock_bh(&orig_node->vlan_list_lock);
96
97 /* first look if an object for this vid already exists */
98 vlan = batadv_orig_node_vlan_get(orig_node, vid);
99 if (vlan)
100 goto out;
101
102 vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
103 if (!vlan)
104 goto out;
105
106 atomic_set(&vlan->refcount, 2);
107 vlan->vid = vid;
108
109 list_add_rcu(&vlan->list, &orig_node->vlan_list);
110
111 out:
112 spin_unlock_bh(&orig_node->vlan_list_lock);
113
114 return vlan;
115 }
116
117 /**
118 * batadv_orig_node_vlan_free_ref - decrement the refcounter and possibly free
119 * the originator-vlan object
120 * @orig_vlan: the originator-vlan object to release
121 */
122 void batadv_orig_node_vlan_free_ref(struct batadv_orig_node_vlan *orig_vlan)
123 {
124 if (atomic_dec_and_test(&orig_vlan->refcount))
125 kfree_rcu(orig_vlan, rcu);
126 }
127
128 int batadv_originator_init(struct batadv_priv *bat_priv)
129 {
130 if (bat_priv->orig_hash)
131 return 0;
132
133 bat_priv->orig_hash = batadv_hash_new(1024);
134
135 if (!bat_priv->orig_hash)
136 goto err;
137
138 batadv_hash_set_lock_class(bat_priv->orig_hash,
139 &batadv_orig_hash_lock_class_key);
140
141 INIT_DELAYED_WORK(&bat_priv->orig_work, batadv_purge_orig);
142 queue_delayed_work(batadv_event_workqueue,
143 &bat_priv->orig_work,
144 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
145
146 return 0;
147
148 err:
149 return -ENOMEM;
150 }
151
152 /**
153 * batadv_neigh_ifinfo_free_rcu - free the neigh_ifinfo object
154 * @rcu: rcu pointer of the neigh_ifinfo object
155 */
156 static void batadv_neigh_ifinfo_free_rcu(struct rcu_head *rcu)
157 {
158 struct batadv_neigh_ifinfo *neigh_ifinfo;
159
160 neigh_ifinfo = container_of(rcu, struct batadv_neigh_ifinfo, rcu);
161
162 if (neigh_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
163 batadv_hardif_free_ref_now(neigh_ifinfo->if_outgoing);
164
165 kfree(neigh_ifinfo);
166 }
167
168 /**
169 * batadv_neigh_ifinfo_free_now - decrement the refcounter and possibly free
170 * the neigh_ifinfo (without rcu callback)
171 * @neigh_ifinfo: the neigh_ifinfo object to release
172 */
173 static void
174 batadv_neigh_ifinfo_free_ref_now(struct batadv_neigh_ifinfo *neigh_ifinfo)
175 {
176 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
177 batadv_neigh_ifinfo_free_rcu(&neigh_ifinfo->rcu);
178 }
179
180 /**
181 * batadv_neigh_ifinfo_free_ref - decrement the refcounter and possibly free
182 * the neigh_ifinfo
183 * @neigh_ifinfo: the neigh_ifinfo object to release
184 */
185 void batadv_neigh_ifinfo_free_ref(struct batadv_neigh_ifinfo *neigh_ifinfo)
186 {
187 if (atomic_dec_and_test(&neigh_ifinfo->refcount))
188 call_rcu(&neigh_ifinfo->rcu, batadv_neigh_ifinfo_free_rcu);
189 }
190
191 /**
192 * batadv_neigh_node_free_rcu - free the neigh_node
193 * @rcu: rcu pointer of the neigh_node
194 */
195 static void batadv_neigh_node_free_rcu(struct rcu_head *rcu)
196 {
197 struct hlist_node *node_tmp;
198 struct batadv_neigh_node *neigh_node;
199 struct batadv_neigh_ifinfo *neigh_ifinfo;
200
201 neigh_node = container_of(rcu, struct batadv_neigh_node, rcu);
202
203 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
204 &neigh_node->ifinfo_list, list) {
205 batadv_neigh_ifinfo_free_ref_now(neigh_ifinfo);
206 }
207 batadv_hardif_free_ref_now(neigh_node->if_incoming);
208
209 kfree(neigh_node);
210 }
211
212 /**
213 * batadv_neigh_node_free_ref_now - decrement the neighbors refcounter
214 * and possibly free it (without rcu callback)
215 * @neigh_node: neigh neighbor to free
216 */
217 static void
218 batadv_neigh_node_free_ref_now(struct batadv_neigh_node *neigh_node)
219 {
220 if (atomic_dec_and_test(&neigh_node->refcount))
221 batadv_neigh_node_free_rcu(&neigh_node->rcu);
222 }
223
224 /**
225 * batadv_neigh_node_free_ref - decrement the neighbors refcounter
226 * and possibly free it
227 * @neigh_node: neigh neighbor to free
228 */
229 void batadv_neigh_node_free_ref(struct batadv_neigh_node *neigh_node)
230 {
231 if (atomic_dec_and_test(&neigh_node->refcount))
232 call_rcu(&neigh_node->rcu, batadv_neigh_node_free_rcu);
233 }
234
235 /**
236 * batadv_orig_node_get_router - router to the originator depending on iface
237 * @orig_node: the orig node for the router
238 * @if_outgoing: the interface where the payload packet has been received or
239 * the OGM should be sent to
240 *
241 * Returns the neighbor which should be router for this orig_node/iface.
242 *
243 * The object is returned with refcounter increased by 1.
244 */
245 struct batadv_neigh_node *
246 batadv_orig_router_get(struct batadv_orig_node *orig_node,
247 const struct batadv_hard_iface *if_outgoing)
248 {
249 struct batadv_orig_ifinfo *orig_ifinfo;
250 struct batadv_neigh_node *router = NULL;
251
252 rcu_read_lock();
253 hlist_for_each_entry_rcu(orig_ifinfo, &orig_node->ifinfo_list, list) {
254 if (orig_ifinfo->if_outgoing != if_outgoing)
255 continue;
256
257 router = rcu_dereference(orig_ifinfo->router);
258 break;
259 }
260
261 if (router && !atomic_inc_not_zero(&router->refcount))
262 router = NULL;
263
264 rcu_read_unlock();
265 return router;
266 }
267
268 /**
269 * batadv_orig_ifinfo_get - find the ifinfo from an orig_node
270 * @orig_node: the orig node to be queried
271 * @if_outgoing: the interface for which the ifinfo should be acquired
272 *
273 * Returns the requested orig_ifinfo or NULL if not found.
274 *
275 * The object is returned with refcounter increased by 1.
276 */
277 struct batadv_orig_ifinfo *
278 batadv_orig_ifinfo_get(struct batadv_orig_node *orig_node,
279 struct batadv_hard_iface *if_outgoing)
280 {
281 struct batadv_orig_ifinfo *tmp, *orig_ifinfo = NULL;
282
283 rcu_read_lock();
284 hlist_for_each_entry_rcu(tmp, &orig_node->ifinfo_list,
285 list) {
286 if (tmp->if_outgoing != if_outgoing)
287 continue;
288
289 if (!atomic_inc_not_zero(&tmp->refcount))
290 continue;
291
292 orig_ifinfo = tmp;
293 break;
294 }
295 rcu_read_unlock();
296
297 return orig_ifinfo;
298 }
299
300 /**
301 * batadv_orig_ifinfo_new - search and possibly create an orig_ifinfo object
302 * @orig_node: the orig node to be queried
303 * @if_outgoing: the interface for which the ifinfo should be acquired
304 *
305 * Returns NULL in case of failure or the orig_ifinfo object for the if_outgoing
306 * interface otherwise. The object is created and added to the list
307 * if it does not exist.
308 *
309 * The object is returned with refcounter increased by 1.
310 */
311 struct batadv_orig_ifinfo *
312 batadv_orig_ifinfo_new(struct batadv_orig_node *orig_node,
313 struct batadv_hard_iface *if_outgoing)
314 {
315 struct batadv_orig_ifinfo *orig_ifinfo = NULL;
316 unsigned long reset_time;
317
318 spin_lock_bh(&orig_node->neigh_list_lock);
319
320 orig_ifinfo = batadv_orig_ifinfo_get(orig_node, if_outgoing);
321 if (orig_ifinfo)
322 goto out;
323
324 orig_ifinfo = kzalloc(sizeof(*orig_ifinfo), GFP_ATOMIC);
325 if (!orig_ifinfo)
326 goto out;
327
328 if (if_outgoing != BATADV_IF_DEFAULT &&
329 !atomic_inc_not_zero(&if_outgoing->refcount)) {
330 kfree(orig_ifinfo);
331 orig_ifinfo = NULL;
332 goto out;
333 }
334
335 reset_time = jiffies - 1;
336 reset_time -= msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
337 orig_ifinfo->batman_seqno_reset = reset_time;
338 orig_ifinfo->if_outgoing = if_outgoing;
339 INIT_HLIST_NODE(&orig_ifinfo->list);
340 atomic_set(&orig_ifinfo->refcount, 2);
341 hlist_add_head_rcu(&orig_ifinfo->list,
342 &orig_node->ifinfo_list);
343 out:
344 spin_unlock_bh(&orig_node->neigh_list_lock);
345 return orig_ifinfo;
346 }
347
348 /**
349 * batadv_neigh_ifinfo_get - find the ifinfo from an neigh_node
350 * @neigh_node: the neigh node to be queried
351 * @if_outgoing: the interface for which the ifinfo should be acquired
352 *
353 * The object is returned with refcounter increased by 1.
354 *
355 * Returns the requested neigh_ifinfo or NULL if not found
356 */
357 struct batadv_neigh_ifinfo *
358 batadv_neigh_ifinfo_get(struct batadv_neigh_node *neigh,
359 struct batadv_hard_iface *if_outgoing)
360 {
361 struct batadv_neigh_ifinfo *neigh_ifinfo = NULL,
362 *tmp_neigh_ifinfo;
363
364 rcu_read_lock();
365 hlist_for_each_entry_rcu(tmp_neigh_ifinfo, &neigh->ifinfo_list,
366 list) {
367 if (tmp_neigh_ifinfo->if_outgoing != if_outgoing)
368 continue;
369
370 if (!atomic_inc_not_zero(&tmp_neigh_ifinfo->refcount))
371 continue;
372
373 neigh_ifinfo = tmp_neigh_ifinfo;
374 break;
375 }
376 rcu_read_unlock();
377
378 return neigh_ifinfo;
379 }
380
381 /**
382 * batadv_neigh_ifinfo_new - search and possibly create an neigh_ifinfo object
383 * @neigh_node: the neigh node to be queried
384 * @if_outgoing: the interface for which the ifinfo should be acquired
385 *
386 * Returns NULL in case of failure or the neigh_ifinfo object for the
387 * if_outgoing interface otherwise. The object is created and added to the list
388 * if it does not exist.
389 *
390 * The object is returned with refcounter increased by 1.
391 */
392 struct batadv_neigh_ifinfo *
393 batadv_neigh_ifinfo_new(struct batadv_neigh_node *neigh,
394 struct batadv_hard_iface *if_outgoing)
395 {
396 struct batadv_neigh_ifinfo *neigh_ifinfo;
397
398 spin_lock_bh(&neigh->ifinfo_lock);
399
400 neigh_ifinfo = batadv_neigh_ifinfo_get(neigh, if_outgoing);
401 if (neigh_ifinfo)
402 goto out;
403
404 neigh_ifinfo = kzalloc(sizeof(*neigh_ifinfo), GFP_ATOMIC);
405 if (!neigh_ifinfo)
406 goto out;
407
408 if (if_outgoing && !atomic_inc_not_zero(&if_outgoing->refcount)) {
409 kfree(neigh_ifinfo);
410 neigh_ifinfo = NULL;
411 goto out;
412 }
413
414 INIT_HLIST_NODE(&neigh_ifinfo->list);
415 atomic_set(&neigh_ifinfo->refcount, 2);
416 neigh_ifinfo->if_outgoing = if_outgoing;
417
418 hlist_add_head_rcu(&neigh_ifinfo->list, &neigh->ifinfo_list);
419
420 out:
421 spin_unlock_bh(&neigh->ifinfo_lock);
422
423 return neigh_ifinfo;
424 }
425
426 /**
427 * batadv_neigh_node_new - create and init a new neigh_node object
428 * @hard_iface: the interface where the neighbour is connected to
429 * @neigh_addr: the mac address of the neighbour interface
430 * @orig_node: originator object representing the neighbour
431 *
432 * Allocates a new neigh_node object and initialises all the generic fields.
433 * Returns the new object or NULL on failure.
434 */
435 struct batadv_neigh_node *
436 batadv_neigh_node_new(struct batadv_hard_iface *hard_iface,
437 const uint8_t *neigh_addr,
438 struct batadv_orig_node *orig_node)
439 {
440 struct batadv_neigh_node *neigh_node;
441
442 neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC);
443 if (!neigh_node)
444 goto out;
445
446 INIT_HLIST_NODE(&neigh_node->list);
447 INIT_HLIST_HEAD(&neigh_node->ifinfo_list);
448 spin_lock_init(&neigh_node->ifinfo_lock);
449
450 ether_addr_copy(neigh_node->addr, neigh_addr);
451 neigh_node->if_incoming = hard_iface;
452 neigh_node->orig_node = orig_node;
453
454 /* extra reference for return */
455 atomic_set(&neigh_node->refcount, 2);
456
457 out:
458 return neigh_node;
459 }
460
461 /**
462 * batadv_neigh_node_get - retrieve a neighbour from the list
463 * @orig_node: originator which the neighbour belongs to
464 * @hard_iface: the interface where this neighbour is connected to
465 * @addr: the address of the neighbour
466 *
467 * Looks for and possibly returns a neighbour belonging to this originator list
468 * which is connected through the provided hard interface.
469 * Returns NULL if the neighbour is not found.
470 */
471 struct batadv_neigh_node *
472 batadv_neigh_node_get(const struct batadv_orig_node *orig_node,
473 const struct batadv_hard_iface *hard_iface,
474 const uint8_t *addr)
475 {
476 struct batadv_neigh_node *tmp_neigh_node, *res = NULL;
477
478 rcu_read_lock();
479 hlist_for_each_entry_rcu(tmp_neigh_node, &orig_node->neigh_list, list) {
480 if (!batadv_compare_eth(tmp_neigh_node->addr, addr))
481 continue;
482
483 if (tmp_neigh_node->if_incoming != hard_iface)
484 continue;
485
486 if (!atomic_inc_not_zero(&tmp_neigh_node->refcount))
487 continue;
488
489 res = tmp_neigh_node;
490 break;
491 }
492 rcu_read_unlock();
493
494 return res;
495 }
496
497 /**
498 * batadv_orig_ifinfo_free_rcu - free the orig_ifinfo object
499 * @rcu: rcu pointer of the orig_ifinfo object
500 */
501 static void batadv_orig_ifinfo_free_rcu(struct rcu_head *rcu)
502 {
503 struct batadv_orig_ifinfo *orig_ifinfo;
504 struct batadv_neigh_node *router;
505
506 orig_ifinfo = container_of(rcu, struct batadv_orig_ifinfo, rcu);
507
508 if (orig_ifinfo->if_outgoing != BATADV_IF_DEFAULT)
509 batadv_hardif_free_ref_now(orig_ifinfo->if_outgoing);
510
511 /* this is the last reference to this object */
512 router = rcu_dereference_protected(orig_ifinfo->router, true);
513 if (router)
514 batadv_neigh_node_free_ref_now(router);
515 kfree(orig_ifinfo);
516 }
517
518 /**
519 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
520 * the orig_ifinfo (without rcu callback)
521 * @orig_ifinfo: the orig_ifinfo object to release
522 */
523 static void
524 batadv_orig_ifinfo_free_ref_now(struct batadv_orig_ifinfo *orig_ifinfo)
525 {
526 if (atomic_dec_and_test(&orig_ifinfo->refcount))
527 batadv_orig_ifinfo_free_rcu(&orig_ifinfo->rcu);
528 }
529
530 /**
531 * batadv_orig_ifinfo_free_ref - decrement the refcounter and possibly free
532 * the orig_ifinfo
533 * @orig_ifinfo: the orig_ifinfo object to release
534 */
535 void batadv_orig_ifinfo_free_ref(struct batadv_orig_ifinfo *orig_ifinfo)
536 {
537 if (atomic_dec_and_test(&orig_ifinfo->refcount))
538 call_rcu(&orig_ifinfo->rcu, batadv_orig_ifinfo_free_rcu);
539 }
540
541 static void batadv_orig_node_free_rcu(struct rcu_head *rcu)
542 {
543 struct hlist_node *node_tmp;
544 struct batadv_neigh_node *neigh_node;
545 struct batadv_orig_node *orig_node;
546 struct batadv_orig_ifinfo *orig_ifinfo;
547
548 orig_node = container_of(rcu, struct batadv_orig_node, rcu);
549
550 spin_lock_bh(&orig_node->neigh_list_lock);
551
552 /* for all neighbors towards this originator ... */
553 hlist_for_each_entry_safe(neigh_node, node_tmp,
554 &orig_node->neigh_list, list) {
555 hlist_del_rcu(&neigh_node->list);
556 batadv_neigh_node_free_ref_now(neigh_node);
557 }
558
559 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
560 &orig_node->ifinfo_list, list) {
561 hlist_del_rcu(&orig_ifinfo->list);
562 batadv_orig_ifinfo_free_ref_now(orig_ifinfo);
563 }
564 spin_unlock_bh(&orig_node->neigh_list_lock);
565
566 batadv_mcast_purge_orig(orig_node);
567
568 /* Free nc_nodes */
569 batadv_nc_purge_orig(orig_node->bat_priv, orig_node, NULL);
570
571 batadv_frag_purge_orig(orig_node, NULL);
572
573 if (orig_node->bat_priv->bat_algo_ops->bat_orig_free)
574 orig_node->bat_priv->bat_algo_ops->bat_orig_free(orig_node);
575
576 kfree(orig_node->tt_buff);
577 kfree(orig_node);
578 }
579
580 /**
581 * batadv_orig_node_free_ref - decrement the orig node refcounter and possibly
582 * schedule an rcu callback for freeing it
583 * @orig_node: the orig node to free
584 */
585 void batadv_orig_node_free_ref(struct batadv_orig_node *orig_node)
586 {
587 if (atomic_dec_and_test(&orig_node->refcount))
588 call_rcu(&orig_node->rcu, batadv_orig_node_free_rcu);
589 }
590
591 /**
592 * batadv_orig_node_free_ref_now - decrement the orig node refcounter and
593 * possibly free it (without rcu callback)
594 * @orig_node: the orig node to free
595 */
596 void batadv_orig_node_free_ref_now(struct batadv_orig_node *orig_node)
597 {
598 if (atomic_dec_and_test(&orig_node->refcount))
599 batadv_orig_node_free_rcu(&orig_node->rcu);
600 }
601
602 void batadv_originator_free(struct batadv_priv *bat_priv)
603 {
604 struct batadv_hashtable *hash = bat_priv->orig_hash;
605 struct hlist_node *node_tmp;
606 struct hlist_head *head;
607 spinlock_t *list_lock; /* spinlock to protect write access */
608 struct batadv_orig_node *orig_node;
609 uint32_t i;
610
611 if (!hash)
612 return;
613
614 cancel_delayed_work_sync(&bat_priv->orig_work);
615
616 bat_priv->orig_hash = NULL;
617
618 for (i = 0; i < hash->size; i++) {
619 head = &hash->table[i];
620 list_lock = &hash->list_locks[i];
621
622 spin_lock_bh(list_lock);
623 hlist_for_each_entry_safe(orig_node, node_tmp,
624 head, hash_entry) {
625 hlist_del_rcu(&orig_node->hash_entry);
626 batadv_orig_node_free_ref(orig_node);
627 }
628 spin_unlock_bh(list_lock);
629 }
630
631 batadv_hash_destroy(hash);
632 }
633
634 /**
635 * batadv_orig_node_new - creates a new orig_node
636 * @bat_priv: the bat priv with all the soft interface information
637 * @addr: the mac address of the originator
638 *
639 * Creates a new originator object and initialise all the generic fields.
640 * The new object is not added to the originator list.
641 * Returns the newly created object or NULL on failure.
642 */
643 struct batadv_orig_node *batadv_orig_node_new(struct batadv_priv *bat_priv,
644 const uint8_t *addr)
645 {
646 struct batadv_orig_node *orig_node;
647 struct batadv_orig_node_vlan *vlan;
648 unsigned long reset_time;
649 int i;
650
651 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
652 "Creating new originator: %pM\n", addr);
653
654 orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC);
655 if (!orig_node)
656 return NULL;
657
658 INIT_HLIST_HEAD(&orig_node->neigh_list);
659 INIT_LIST_HEAD(&orig_node->vlan_list);
660 INIT_HLIST_HEAD(&orig_node->ifinfo_list);
661 spin_lock_init(&orig_node->bcast_seqno_lock);
662 spin_lock_init(&orig_node->neigh_list_lock);
663 spin_lock_init(&orig_node->tt_buff_lock);
664 spin_lock_init(&orig_node->tt_lock);
665 spin_lock_init(&orig_node->vlan_list_lock);
666
667 batadv_nc_init_orig(orig_node);
668
669 /* extra reference for return */
670 atomic_set(&orig_node->refcount, 2);
671
672 orig_node->bat_priv = bat_priv;
673 ether_addr_copy(orig_node->orig, addr);
674 batadv_dat_init_orig_node_addr(orig_node);
675 atomic_set(&orig_node->last_ttvn, 0);
676 orig_node->tt_buff = NULL;
677 orig_node->tt_buff_len = 0;
678 orig_node->last_seen = jiffies;
679 reset_time = jiffies - 1 - msecs_to_jiffies(BATADV_RESET_PROTECTION_MS);
680 orig_node->bcast_seqno_reset = reset_time;
681 #ifdef CONFIG_BATMAN_ADV_MCAST
682 orig_node->mcast_flags = BATADV_NO_FLAGS;
683 #endif
684
685 /* create a vlan object for the "untagged" LAN */
686 vlan = batadv_orig_node_vlan_new(orig_node, BATADV_NO_FLAGS);
687 if (!vlan)
688 goto free_orig_node;
689 /* batadv_orig_node_vlan_new() increases the refcounter.
690 * Immediately release vlan since it is not needed anymore in this
691 * context
692 */
693 batadv_orig_node_vlan_free_ref(vlan);
694
695 for (i = 0; i < BATADV_FRAG_BUFFER_COUNT; i++) {
696 INIT_HLIST_HEAD(&orig_node->fragments[i].head);
697 spin_lock_init(&orig_node->fragments[i].lock);
698 orig_node->fragments[i].size = 0;
699 }
700
701 return orig_node;
702 free_orig_node:
703 kfree(orig_node);
704 return NULL;
705 }
706
707 /**
708 * batadv_purge_neigh_ifinfo - purge obsolete ifinfo entries from neighbor
709 * @bat_priv: the bat priv with all the soft interface information
710 * @neigh: orig node which is to be checked
711 */
712 static void
713 batadv_purge_neigh_ifinfo(struct batadv_priv *bat_priv,
714 struct batadv_neigh_node *neigh)
715 {
716 struct batadv_neigh_ifinfo *neigh_ifinfo;
717 struct batadv_hard_iface *if_outgoing;
718 struct hlist_node *node_tmp;
719
720 spin_lock_bh(&neigh->ifinfo_lock);
721
722 /* for all ifinfo objects for this neighinator */
723 hlist_for_each_entry_safe(neigh_ifinfo, node_tmp,
724 &neigh->ifinfo_list, list) {
725 if_outgoing = neigh_ifinfo->if_outgoing;
726
727 /* always keep the default interface */
728 if (if_outgoing == BATADV_IF_DEFAULT)
729 continue;
730
731 /* don't purge if the interface is not (going) down */
732 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
733 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
734 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
735 continue;
736
737 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
738 "neighbor/ifinfo purge: neighbor %pM, iface: %s\n",
739 neigh->addr, if_outgoing->net_dev->name);
740
741 hlist_del_rcu(&neigh_ifinfo->list);
742 batadv_neigh_ifinfo_free_ref(neigh_ifinfo);
743 }
744
745 spin_unlock_bh(&neigh->ifinfo_lock);
746 }
747
748 /**
749 * batadv_purge_orig_ifinfo - purge obsolete ifinfo entries from originator
750 * @bat_priv: the bat priv with all the soft interface information
751 * @orig_node: orig node which is to be checked
752 *
753 * Returns true if any ifinfo entry was purged, false otherwise.
754 */
755 static bool
756 batadv_purge_orig_ifinfo(struct batadv_priv *bat_priv,
757 struct batadv_orig_node *orig_node)
758 {
759 struct batadv_orig_ifinfo *orig_ifinfo;
760 struct batadv_hard_iface *if_outgoing;
761 struct hlist_node *node_tmp;
762 bool ifinfo_purged = false;
763
764 spin_lock_bh(&orig_node->neigh_list_lock);
765
766 /* for all ifinfo objects for this originator */
767 hlist_for_each_entry_safe(orig_ifinfo, node_tmp,
768 &orig_node->ifinfo_list, list) {
769 if_outgoing = orig_ifinfo->if_outgoing;
770
771 /* always keep the default interface */
772 if (if_outgoing == BATADV_IF_DEFAULT)
773 continue;
774
775 /* don't purge if the interface is not (going) down */
776 if ((if_outgoing->if_status != BATADV_IF_INACTIVE) &&
777 (if_outgoing->if_status != BATADV_IF_NOT_IN_USE) &&
778 (if_outgoing->if_status != BATADV_IF_TO_BE_REMOVED))
779 continue;
780
781 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
782 "router/ifinfo purge: originator %pM, iface: %s\n",
783 orig_node->orig, if_outgoing->net_dev->name);
784
785 ifinfo_purged = true;
786
787 hlist_del_rcu(&orig_ifinfo->list);
788 batadv_orig_ifinfo_free_ref(orig_ifinfo);
789 if (orig_node->last_bonding_candidate == orig_ifinfo) {
790 orig_node->last_bonding_candidate = NULL;
791 batadv_orig_ifinfo_free_ref(orig_ifinfo);
792 }
793 }
794
795 spin_unlock_bh(&orig_node->neigh_list_lock);
796
797 return ifinfo_purged;
798 }
799
800
801 /**
802 * batadv_purge_orig_neighbors - purges neighbors from originator
803 * @bat_priv: the bat priv with all the soft interface information
804 * @orig_node: orig node which is to be checked
805 *
806 * Returns true if any neighbor was purged, false otherwise
807 */
808 static bool
809 batadv_purge_orig_neighbors(struct batadv_priv *bat_priv,
810 struct batadv_orig_node *orig_node)
811 {
812 struct hlist_node *node_tmp;
813 struct batadv_neigh_node *neigh_node;
814 bool neigh_purged = false;
815 unsigned long last_seen;
816 struct batadv_hard_iface *if_incoming;
817
818 spin_lock_bh(&orig_node->neigh_list_lock);
819
820 /* for all neighbors towards this originator ... */
821 hlist_for_each_entry_safe(neigh_node, node_tmp,
822 &orig_node->neigh_list, list) {
823 last_seen = neigh_node->last_seen;
824 if_incoming = neigh_node->if_incoming;
825
826 if ((batadv_has_timed_out(last_seen, BATADV_PURGE_TIMEOUT)) ||
827 (if_incoming->if_status == BATADV_IF_INACTIVE) ||
828 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
829 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED)) {
830 if ((if_incoming->if_status == BATADV_IF_INACTIVE) ||
831 (if_incoming->if_status == BATADV_IF_NOT_IN_USE) ||
832 (if_incoming->if_status == BATADV_IF_TO_BE_REMOVED))
833 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
834 "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n",
835 orig_node->orig, neigh_node->addr,
836 if_incoming->net_dev->name);
837 else
838 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
839 "neighbor timeout: originator %pM, neighbor: %pM, last_seen: %u\n",
840 orig_node->orig, neigh_node->addr,
841 jiffies_to_msecs(last_seen));
842
843 neigh_purged = true;
844
845 hlist_del_rcu(&neigh_node->list);
846 batadv_neigh_node_free_ref(neigh_node);
847 } else {
848 /* only necessary if not the whole neighbor is to be
849 * deleted, but some interface has been removed.
850 */
851 batadv_purge_neigh_ifinfo(bat_priv, neigh_node);
852 }
853 }
854
855 spin_unlock_bh(&orig_node->neigh_list_lock);
856 return neigh_purged;
857 }
858
859 /**
860 * batadv_find_best_neighbor - finds the best neighbor after purging
861 * @bat_priv: the bat priv with all the soft interface information
862 * @orig_node: orig node which is to be checked
863 * @if_outgoing: the interface for which the metric should be compared
864 *
865 * Returns the current best neighbor, with refcount increased.
866 */
867 static struct batadv_neigh_node *
868 batadv_find_best_neighbor(struct batadv_priv *bat_priv,
869 struct batadv_orig_node *orig_node,
870 struct batadv_hard_iface *if_outgoing)
871 {
872 struct batadv_neigh_node *best = NULL, *neigh;
873 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
874
875 rcu_read_lock();
876 hlist_for_each_entry_rcu(neigh, &orig_node->neigh_list, list) {
877 if (best && (bao->bat_neigh_cmp(neigh, if_outgoing,
878 best, if_outgoing) <= 0))
879 continue;
880
881 if (!atomic_inc_not_zero(&neigh->refcount))
882 continue;
883
884 if (best)
885 batadv_neigh_node_free_ref(best);
886
887 best = neigh;
888 }
889 rcu_read_unlock();
890
891 return best;
892 }
893
894 /**
895 * batadv_purge_orig_node - purges obsolete information from an orig_node
896 * @bat_priv: the bat priv with all the soft interface information
897 * @orig_node: orig node which is to be checked
898 *
899 * This function checks if the orig_node or substructures of it have become
900 * obsolete, and purges this information if that's the case.
901 *
902 * Returns true if the orig_node is to be removed, false otherwise.
903 */
904 static bool batadv_purge_orig_node(struct batadv_priv *bat_priv,
905 struct batadv_orig_node *orig_node)
906 {
907 struct batadv_neigh_node *best_neigh_node;
908 struct batadv_hard_iface *hard_iface;
909 bool changed_ifinfo, changed_neigh;
910
911 if (batadv_has_timed_out(orig_node->last_seen,
912 2 * BATADV_PURGE_TIMEOUT)) {
913 batadv_dbg(BATADV_DBG_BATMAN, bat_priv,
914 "Originator timeout: originator %pM, last_seen %u\n",
915 orig_node->orig,
916 jiffies_to_msecs(orig_node->last_seen));
917 return true;
918 }
919 changed_ifinfo = batadv_purge_orig_ifinfo(bat_priv, orig_node);
920 changed_neigh = batadv_purge_orig_neighbors(bat_priv, orig_node);
921
922 if (!changed_ifinfo && !changed_neigh)
923 return false;
924
925 /* first for NULL ... */
926 best_neigh_node = batadv_find_best_neighbor(bat_priv, orig_node,
927 BATADV_IF_DEFAULT);
928 batadv_update_route(bat_priv, orig_node, BATADV_IF_DEFAULT,
929 best_neigh_node);
930 if (best_neigh_node)
931 batadv_neigh_node_free_ref(best_neigh_node);
932
933 /* ... then for all other interfaces. */
934 rcu_read_lock();
935 list_for_each_entry_rcu(hard_iface, &batadv_hardif_list, list) {
936 if (hard_iface->if_status != BATADV_IF_ACTIVE)
937 continue;
938
939 if (hard_iface->soft_iface != bat_priv->soft_iface)
940 continue;
941
942 best_neigh_node = batadv_find_best_neighbor(bat_priv,
943 orig_node,
944 hard_iface);
945 batadv_update_route(bat_priv, orig_node, hard_iface,
946 best_neigh_node);
947 if (best_neigh_node)
948 batadv_neigh_node_free_ref(best_neigh_node);
949 }
950 rcu_read_unlock();
951
952 return false;
953 }
954
955 static void _batadv_purge_orig(struct batadv_priv *bat_priv)
956 {
957 struct batadv_hashtable *hash = bat_priv->orig_hash;
958 struct hlist_node *node_tmp;
959 struct hlist_head *head;
960 spinlock_t *list_lock; /* spinlock to protect write access */
961 struct batadv_orig_node *orig_node;
962 uint32_t i;
963
964 if (!hash)
965 return;
966
967 /* for all origins... */
968 for (i = 0; i < hash->size; i++) {
969 head = &hash->table[i];
970 list_lock = &hash->list_locks[i];
971
972 spin_lock_bh(list_lock);
973 hlist_for_each_entry_safe(orig_node, node_tmp,
974 head, hash_entry) {
975 if (batadv_purge_orig_node(bat_priv, orig_node)) {
976 batadv_gw_node_delete(bat_priv, orig_node);
977 hlist_del_rcu(&orig_node->hash_entry);
978 batadv_tt_global_del_orig(orig_node->bat_priv,
979 orig_node, -1,
980 "originator timed out");
981 batadv_orig_node_free_ref(orig_node);
982 continue;
983 }
984
985 batadv_frag_purge_orig(orig_node,
986 batadv_frag_check_entry);
987 }
988 spin_unlock_bh(list_lock);
989 }
990
991 batadv_gw_node_purge(bat_priv);
992 batadv_gw_election(bat_priv);
993 }
994
995 static void batadv_purge_orig(struct work_struct *work)
996 {
997 struct delayed_work *delayed_work;
998 struct batadv_priv *bat_priv;
999
1000 delayed_work = container_of(work, struct delayed_work, work);
1001 bat_priv = container_of(delayed_work, struct batadv_priv, orig_work);
1002 _batadv_purge_orig(bat_priv);
1003 queue_delayed_work(batadv_event_workqueue,
1004 &bat_priv->orig_work,
1005 msecs_to_jiffies(BATADV_ORIG_WORK_PERIOD));
1006 }
1007
1008 void batadv_purge_orig_ref(struct batadv_priv *bat_priv)
1009 {
1010 _batadv_purge_orig(bat_priv);
1011 }
1012
1013 int batadv_orig_seq_print_text(struct seq_file *seq, void *offset)
1014 {
1015 struct net_device *net_dev = (struct net_device *)seq->private;
1016 struct batadv_priv *bat_priv = netdev_priv(net_dev);
1017 struct batadv_hard_iface *primary_if;
1018
1019 primary_if = batadv_seq_print_text_primary_if_get(seq);
1020 if (!primary_if)
1021 return 0;
1022
1023 seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s %s)]\n",
1024 BATADV_SOURCE_VERSION, primary_if->net_dev->name,
1025 primary_if->net_dev->dev_addr, net_dev->name,
1026 bat_priv->bat_algo_ops->name);
1027
1028 batadv_hardif_free_ref(primary_if);
1029
1030 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1031 seq_puts(seq,
1032 "No printing function for this routing protocol\n");
1033 return 0;
1034 }
1035
1036 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq,
1037 BATADV_IF_DEFAULT);
1038
1039 return 0;
1040 }
1041
1042 /**
1043 * batadv_orig_hardif_seq_print_text - writes originator infos for a specific
1044 * outgoing interface
1045 * @seq: debugfs table seq_file struct
1046 * @offset: not used
1047 *
1048 * Returns 0
1049 */
1050 int batadv_orig_hardif_seq_print_text(struct seq_file *seq, void *offset)
1051 {
1052 struct net_device *net_dev = (struct net_device *)seq->private;
1053 struct batadv_hard_iface *hard_iface;
1054 struct batadv_priv *bat_priv;
1055
1056 hard_iface = batadv_hardif_get_by_netdev(net_dev);
1057
1058 if (!hard_iface || !hard_iface->soft_iface) {
1059 seq_puts(seq, "Interface not known to B.A.T.M.A.N.\n");
1060 goto out;
1061 }
1062
1063 bat_priv = netdev_priv(hard_iface->soft_iface);
1064 if (!bat_priv->bat_algo_ops->bat_orig_print) {
1065 seq_puts(seq,
1066 "No printing function for this routing protocol\n");
1067 goto out;
1068 }
1069
1070 if (hard_iface->if_status != BATADV_IF_ACTIVE) {
1071 seq_puts(seq, "Interface not active\n");
1072 goto out;
1073 }
1074
1075 seq_printf(seq, "[B.A.T.M.A.N. adv %s, IF/MAC: %s/%pM (%s %s)]\n",
1076 BATADV_SOURCE_VERSION, hard_iface->net_dev->name,
1077 hard_iface->net_dev->dev_addr,
1078 hard_iface->soft_iface->name, bat_priv->bat_algo_ops->name);
1079
1080 bat_priv->bat_algo_ops->bat_orig_print(bat_priv, seq, hard_iface);
1081
1082 out:
1083 if (hard_iface)
1084 batadv_hardif_free_ref(hard_iface);
1085 return 0;
1086 }
1087
1088 int batadv_orig_hash_add_if(struct batadv_hard_iface *hard_iface,
1089 int max_if_num)
1090 {
1091 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1092 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1093 struct batadv_hashtable *hash = bat_priv->orig_hash;
1094 struct hlist_head *head;
1095 struct batadv_orig_node *orig_node;
1096 uint32_t i;
1097 int ret;
1098
1099 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1100 * if_num
1101 */
1102 for (i = 0; i < hash->size; i++) {
1103 head = &hash->table[i];
1104
1105 rcu_read_lock();
1106 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1107 ret = 0;
1108 if (bao->bat_orig_add_if)
1109 ret = bao->bat_orig_add_if(orig_node,
1110 max_if_num);
1111 if (ret == -ENOMEM)
1112 goto err;
1113 }
1114 rcu_read_unlock();
1115 }
1116
1117 return 0;
1118
1119 err:
1120 rcu_read_unlock();
1121 return -ENOMEM;
1122 }
1123
1124 int batadv_orig_hash_del_if(struct batadv_hard_iface *hard_iface,
1125 int max_if_num)
1126 {
1127 struct batadv_priv *bat_priv = netdev_priv(hard_iface->soft_iface);
1128 struct batadv_hashtable *hash = bat_priv->orig_hash;
1129 struct hlist_head *head;
1130 struct batadv_hard_iface *hard_iface_tmp;
1131 struct batadv_orig_node *orig_node;
1132 struct batadv_algo_ops *bao = bat_priv->bat_algo_ops;
1133 uint32_t i;
1134 int ret;
1135
1136 /* resize all orig nodes because orig_node->bcast_own(_sum) depend on
1137 * if_num
1138 */
1139 for (i = 0; i < hash->size; i++) {
1140 head = &hash->table[i];
1141
1142 rcu_read_lock();
1143 hlist_for_each_entry_rcu(orig_node, head, hash_entry) {
1144 ret = 0;
1145 if (bao->bat_orig_del_if)
1146 ret = bao->bat_orig_del_if(orig_node,
1147 max_if_num,
1148 hard_iface->if_num);
1149 if (ret == -ENOMEM)
1150 goto err;
1151 }
1152 rcu_read_unlock();
1153 }
1154
1155 /* renumber remaining batman interfaces _inside_ of orig_hash_lock */
1156 rcu_read_lock();
1157 list_for_each_entry_rcu(hard_iface_tmp, &batadv_hardif_list, list) {
1158 if (hard_iface_tmp->if_status == BATADV_IF_NOT_IN_USE)
1159 continue;
1160
1161 if (hard_iface == hard_iface_tmp)
1162 continue;
1163
1164 if (hard_iface->soft_iface != hard_iface_tmp->soft_iface)
1165 continue;
1166
1167 if (hard_iface_tmp->if_num > hard_iface->if_num)
1168 hard_iface_tmp->if_num--;
1169 }
1170 rcu_read_unlock();
1171
1172 hard_iface->if_num = -1;
1173 return 0;
1174
1175 err:
1176 rcu_read_unlock();
1177 return -ENOMEM;
1178 }
This page took 0.082776 seconds and 6 git commands to generate.