Merge branch 'mnt_devname' of git://git.kernel.org/pub/scm/linux/kernel/git/viro...
[deliverable/linux.git] / net / batman-adv / vis.c
1 /*
2 * Copyright (C) 2008-2011 B.A.T.M.A.N. contributors:
3 *
4 * Simon Wunderlich
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of version 2 of the GNU General Public
8 * License as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18 * 02110-1301, USA
19 *
20 */
21
22 #include "main.h"
23 #include "send.h"
24 #include "translation-table.h"
25 #include "vis.h"
26 #include "soft-interface.h"
27 #include "hard-interface.h"
28 #include "hash.h"
29 #include "originator.h"
30
31 #define MAX_VIS_PACKET_SIZE 1000
32
33 /* Returns the smallest signed integer in two's complement with the sizeof x */
34 #define smallest_signed_int(x) (1u << (7u + 8u * (sizeof(x) - 1u)))
35
36 /* Checks if a sequence number x is a predecessor/successor of y.
37 * they handle overflows/underflows and can correctly check for a
38 * predecessor/successor unless the variable sequence number has grown by
39 * more then 2**(bitwidth(x)-1)-1.
40 * This means that for a uint8_t with the maximum value 255, it would think:
41 * - when adding nothing - it is neither a predecessor nor a successor
42 * - before adding more than 127 to the starting value - it is a predecessor,
43 * - when adding 128 - it is neither a predecessor nor a successor,
44 * - after adding more than 127 to the starting value - it is a successor */
45 #define seq_before(x, y) ({typeof(x) _dummy = (x - y); \
46 _dummy > smallest_signed_int(_dummy); })
47 #define seq_after(x, y) seq_before(y, x)
48
49 static void start_vis_timer(struct bat_priv *bat_priv);
50
51 /* free the info */
52 static void free_info(struct kref *ref)
53 {
54 struct vis_info *info = container_of(ref, struct vis_info, refcount);
55 struct bat_priv *bat_priv = info->bat_priv;
56 struct recvlist_node *entry, *tmp;
57
58 list_del_init(&info->send_list);
59 spin_lock_bh(&bat_priv->vis_list_lock);
60 list_for_each_entry_safe(entry, tmp, &info->recv_list, list) {
61 list_del(&entry->list);
62 kfree(entry);
63 }
64
65 spin_unlock_bh(&bat_priv->vis_list_lock);
66 kfree_skb(info->skb_packet);
67 kfree(info);
68 }
69
70 /* Compare two vis packets, used by the hashing algorithm */
71 static int vis_info_cmp(struct hlist_node *node, void *data2)
72 {
73 struct vis_info *d1, *d2;
74 struct vis_packet *p1, *p2;
75
76 d1 = container_of(node, struct vis_info, hash_entry);
77 d2 = data2;
78 p1 = (struct vis_packet *)d1->skb_packet->data;
79 p2 = (struct vis_packet *)d2->skb_packet->data;
80 return compare_eth(p1->vis_orig, p2->vis_orig);
81 }
82
83 /* hash function to choose an entry in a hash table of given size */
84 /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */
85 static int vis_info_choose(void *data, int size)
86 {
87 struct vis_info *vis_info = data;
88 struct vis_packet *packet;
89 unsigned char *key;
90 uint32_t hash = 0;
91 size_t i;
92
93 packet = (struct vis_packet *)vis_info->skb_packet->data;
94 key = packet->vis_orig;
95 for (i = 0; i < ETH_ALEN; i++) {
96 hash += key[i];
97 hash += (hash << 10);
98 hash ^= (hash >> 6);
99 }
100
101 hash += (hash << 3);
102 hash ^= (hash >> 11);
103 hash += (hash << 15);
104
105 return hash % size;
106 }
107
108 static struct vis_info *vis_hash_find(struct bat_priv *bat_priv,
109 void *data)
110 {
111 struct hashtable_t *hash = bat_priv->vis_hash;
112 struct hlist_head *head;
113 struct hlist_node *node;
114 struct vis_info *vis_info, *vis_info_tmp = NULL;
115 int index;
116
117 if (!hash)
118 return NULL;
119
120 index = vis_info_choose(data, hash->size);
121 head = &hash->table[index];
122
123 rcu_read_lock();
124 hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) {
125 if (!vis_info_cmp(node, data))
126 continue;
127
128 vis_info_tmp = vis_info;
129 break;
130 }
131 rcu_read_unlock();
132
133 return vis_info_tmp;
134 }
135
136 /* insert interface to the list of interfaces of one originator, if it
137 * does not already exist in the list */
138 static void vis_data_insert_interface(const uint8_t *interface,
139 struct hlist_head *if_list,
140 bool primary)
141 {
142 struct if_list_entry *entry;
143 struct hlist_node *pos;
144
145 hlist_for_each_entry(entry, pos, if_list, list) {
146 if (compare_eth(entry->addr, (void *)interface))
147 return;
148 }
149
150 /* its a new address, add it to the list */
151 entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
152 if (!entry)
153 return;
154 memcpy(entry->addr, interface, ETH_ALEN);
155 entry->primary = primary;
156 hlist_add_head(&entry->list, if_list);
157 }
158
159 static ssize_t vis_data_read_prim_sec(char *buff, struct hlist_head *if_list)
160 {
161 struct if_list_entry *entry;
162 struct hlist_node *pos;
163 size_t len = 0;
164
165 hlist_for_each_entry(entry, pos, if_list, list) {
166 if (entry->primary)
167 len += sprintf(buff + len, "PRIMARY, ");
168 else
169 len += sprintf(buff + len, "SEC %pM, ", entry->addr);
170 }
171
172 return len;
173 }
174
175 static size_t vis_data_count_prim_sec(struct hlist_head *if_list)
176 {
177 struct if_list_entry *entry;
178 struct hlist_node *pos;
179 size_t count = 0;
180
181 hlist_for_each_entry(entry, pos, if_list, list) {
182 if (entry->primary)
183 count += 9;
184 else
185 count += 23;
186 }
187
188 return count;
189 }
190
191 /* read an entry */
192 static ssize_t vis_data_read_entry(char *buff, struct vis_info_entry *entry,
193 uint8_t *src, bool primary)
194 {
195 /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */
196 if (primary && entry->quality == 0)
197 return sprintf(buff, "HNA %pM, ", entry->dest);
198 else if (compare_eth(entry->src, src))
199 return sprintf(buff, "TQ %pM %d, ", entry->dest,
200 entry->quality);
201
202 return 0;
203 }
204
205 int vis_seq_print_text(struct seq_file *seq, void *offset)
206 {
207 struct hlist_node *node;
208 struct hlist_head *head;
209 struct vis_info *info;
210 struct vis_packet *packet;
211 struct vis_info_entry *entries;
212 struct net_device *net_dev = (struct net_device *)seq->private;
213 struct bat_priv *bat_priv = netdev_priv(net_dev);
214 struct hashtable_t *hash = bat_priv->vis_hash;
215 HLIST_HEAD(vis_if_list);
216 struct if_list_entry *entry;
217 struct hlist_node *pos, *n;
218 int i, j;
219 int vis_server = atomic_read(&bat_priv->vis_mode);
220 size_t buff_pos, buf_size;
221 char *buff;
222 int compare;
223
224 if ((!bat_priv->primary_if) ||
225 (vis_server == VIS_TYPE_CLIENT_UPDATE))
226 return 0;
227
228 buf_size = 1;
229 /* Estimate length */
230 spin_lock_bh(&bat_priv->vis_hash_lock);
231 for (i = 0; i < hash->size; i++) {
232 head = &hash->table[i];
233
234 rcu_read_lock();
235 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
236 packet = (struct vis_packet *)info->skb_packet->data;
237 entries = (struct vis_info_entry *)
238 ((char *)packet + sizeof(struct vis_packet));
239
240 for (j = 0; j < packet->entries; j++) {
241 if (entries[j].quality == 0)
242 continue;
243 compare =
244 compare_eth(entries[j].src, packet->vis_orig);
245 vis_data_insert_interface(entries[j].src,
246 &vis_if_list,
247 compare);
248 }
249
250 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
251 buf_size += 18 + 26 * packet->entries;
252
253 /* add primary/secondary records */
254 if (compare_eth(entry->addr, packet->vis_orig))
255 buf_size +=
256 vis_data_count_prim_sec(&vis_if_list);
257
258 buf_size += 1;
259 }
260
261 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
262 list) {
263 hlist_del(&entry->list);
264 kfree(entry);
265 }
266 }
267 rcu_read_unlock();
268 }
269
270 buff = kmalloc(buf_size, GFP_ATOMIC);
271 if (!buff) {
272 spin_unlock_bh(&bat_priv->vis_hash_lock);
273 return -ENOMEM;
274 }
275 buff[0] = '\0';
276 buff_pos = 0;
277
278 for (i = 0; i < hash->size; i++) {
279 head = &hash->table[i];
280
281 rcu_read_lock();
282 hlist_for_each_entry_rcu(info, node, head, hash_entry) {
283 packet = (struct vis_packet *)info->skb_packet->data;
284 entries = (struct vis_info_entry *)
285 ((char *)packet + sizeof(struct vis_packet));
286
287 for (j = 0; j < packet->entries; j++) {
288 if (entries[j].quality == 0)
289 continue;
290 compare =
291 compare_eth(entries[j].src, packet->vis_orig);
292 vis_data_insert_interface(entries[j].src,
293 &vis_if_list,
294 compare);
295 }
296
297 hlist_for_each_entry(entry, pos, &vis_if_list, list) {
298 buff_pos += sprintf(buff + buff_pos, "%pM,",
299 entry->addr);
300
301 for (j = 0; j < packet->entries; j++)
302 buff_pos += vis_data_read_entry(
303 buff + buff_pos,
304 &entries[j],
305 entry->addr,
306 entry->primary);
307
308 /* add primary/secondary records */
309 if (compare_eth(entry->addr, packet->vis_orig))
310 buff_pos +=
311 vis_data_read_prim_sec(buff + buff_pos,
312 &vis_if_list);
313
314 buff_pos += sprintf(buff + buff_pos, "\n");
315 }
316
317 hlist_for_each_entry_safe(entry, pos, n, &vis_if_list,
318 list) {
319 hlist_del(&entry->list);
320 kfree(entry);
321 }
322 }
323 rcu_read_unlock();
324 }
325
326 spin_unlock_bh(&bat_priv->vis_hash_lock);
327
328 seq_printf(seq, "%s", buff);
329 kfree(buff);
330
331 return 0;
332 }
333
334 /* add the info packet to the send list, if it was not
335 * already linked in. */
336 static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info)
337 {
338 if (list_empty(&info->send_list)) {
339 kref_get(&info->refcount);
340 list_add_tail(&info->send_list, &bat_priv->vis_send_list);
341 }
342 }
343
344 /* delete the info packet from the send list, if it was
345 * linked in. */
346 static void send_list_del(struct vis_info *info)
347 {
348 if (!list_empty(&info->send_list)) {
349 list_del_init(&info->send_list);
350 kref_put(&info->refcount, free_info);
351 }
352 }
353
354 /* tries to add one entry to the receive list. */
355 static void recv_list_add(struct bat_priv *bat_priv,
356 struct list_head *recv_list, char *mac)
357 {
358 struct recvlist_node *entry;
359
360 entry = kmalloc(sizeof(struct recvlist_node), GFP_ATOMIC);
361 if (!entry)
362 return;
363
364 memcpy(entry->mac, mac, ETH_ALEN);
365 spin_lock_bh(&bat_priv->vis_list_lock);
366 list_add_tail(&entry->list, recv_list);
367 spin_unlock_bh(&bat_priv->vis_list_lock);
368 }
369
370 /* returns 1 if this mac is in the recv_list */
371 static int recv_list_is_in(struct bat_priv *bat_priv,
372 struct list_head *recv_list, char *mac)
373 {
374 struct recvlist_node *entry;
375
376 spin_lock_bh(&bat_priv->vis_list_lock);
377 list_for_each_entry(entry, recv_list, list) {
378 if (compare_eth(entry->mac, mac)) {
379 spin_unlock_bh(&bat_priv->vis_list_lock);
380 return 1;
381 }
382 }
383 spin_unlock_bh(&bat_priv->vis_list_lock);
384 return 0;
385 }
386
387 /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old,
388 * broken.. ). vis hash must be locked outside. is_new is set when the packet
389 * is newer than old entries in the hash. */
390 static struct vis_info *add_packet(struct bat_priv *bat_priv,
391 struct vis_packet *vis_packet,
392 int vis_info_len, int *is_new,
393 int make_broadcast)
394 {
395 struct vis_info *info, *old_info;
396 struct vis_packet *search_packet, *old_packet;
397 struct vis_info search_elem;
398 struct vis_packet *packet;
399 int hash_added;
400
401 *is_new = 0;
402 /* sanity check */
403 if (!bat_priv->vis_hash)
404 return NULL;
405
406 /* see if the packet is already in vis_hash */
407 search_elem.skb_packet = dev_alloc_skb(sizeof(struct vis_packet));
408 if (!search_elem.skb_packet)
409 return NULL;
410 search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet,
411 sizeof(struct vis_packet));
412
413 memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN);
414 old_info = vis_hash_find(bat_priv, &search_elem);
415 kfree_skb(search_elem.skb_packet);
416
417 if (old_info) {
418 old_packet = (struct vis_packet *)old_info->skb_packet->data;
419 if (!seq_after(ntohl(vis_packet->seqno),
420 ntohl(old_packet->seqno))) {
421 if (old_packet->seqno == vis_packet->seqno) {
422 recv_list_add(bat_priv, &old_info->recv_list,
423 vis_packet->sender_orig);
424 return old_info;
425 } else {
426 /* newer packet is already in hash. */
427 return NULL;
428 }
429 }
430 /* remove old entry */
431 hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
432 old_info);
433 send_list_del(old_info);
434 kref_put(&old_info->refcount, free_info);
435 }
436
437 info = kmalloc(sizeof(struct vis_info), GFP_ATOMIC);
438 if (!info)
439 return NULL;
440
441 info->skb_packet = dev_alloc_skb(sizeof(struct vis_packet) +
442 vis_info_len + sizeof(struct ethhdr));
443 if (!info->skb_packet) {
444 kfree(info);
445 return NULL;
446 }
447 skb_reserve(info->skb_packet, sizeof(struct ethhdr));
448 packet = (struct vis_packet *)skb_put(info->skb_packet,
449 sizeof(struct vis_packet) +
450 vis_info_len);
451
452 kref_init(&info->refcount);
453 INIT_LIST_HEAD(&info->send_list);
454 INIT_LIST_HEAD(&info->recv_list);
455 info->first_seen = jiffies;
456 info->bat_priv = bat_priv;
457 memcpy(packet, vis_packet, sizeof(struct vis_packet) + vis_info_len);
458
459 /* initialize and add new packet. */
460 *is_new = 1;
461
462 /* Make it a broadcast packet, if required */
463 if (make_broadcast)
464 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
465
466 /* repair if entries is longer than packet. */
467 if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len)
468 packet->entries = vis_info_len / sizeof(struct vis_info_entry);
469
470 recv_list_add(bat_priv, &info->recv_list, packet->sender_orig);
471
472 /* try to add it */
473 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
474 info, &info->hash_entry);
475 if (hash_added < 0) {
476 /* did not work (for some reason) */
477 kref_put(&info->refcount, free_info);
478 info = NULL;
479 }
480
481 return info;
482 }
483
484 /* handle the server sync packet, forward if needed. */
485 void receive_server_sync_packet(struct bat_priv *bat_priv,
486 struct vis_packet *vis_packet,
487 int vis_info_len)
488 {
489 struct vis_info *info;
490 int is_new, make_broadcast;
491 int vis_server = atomic_read(&bat_priv->vis_mode);
492
493 make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC);
494
495 spin_lock_bh(&bat_priv->vis_hash_lock);
496 info = add_packet(bat_priv, vis_packet, vis_info_len,
497 &is_new, make_broadcast);
498 if (!info)
499 goto end;
500
501 /* only if we are server ourselves and packet is newer than the one in
502 * hash.*/
503 if (vis_server == VIS_TYPE_SERVER_SYNC && is_new)
504 send_list_add(bat_priv, info);
505 end:
506 spin_unlock_bh(&bat_priv->vis_hash_lock);
507 }
508
509 /* handle an incoming client update packet and schedule forward if needed. */
510 void receive_client_update_packet(struct bat_priv *bat_priv,
511 struct vis_packet *vis_packet,
512 int vis_info_len)
513 {
514 struct vis_info *info;
515 struct vis_packet *packet;
516 int is_new;
517 int vis_server = atomic_read(&bat_priv->vis_mode);
518 int are_target = 0;
519
520 /* clients shall not broadcast. */
521 if (is_broadcast_ether_addr(vis_packet->target_orig))
522 return;
523
524 /* Are we the target for this VIS packet? */
525 if (vis_server == VIS_TYPE_SERVER_SYNC &&
526 is_my_mac(vis_packet->target_orig))
527 are_target = 1;
528
529 spin_lock_bh(&bat_priv->vis_hash_lock);
530 info = add_packet(bat_priv, vis_packet, vis_info_len,
531 &is_new, are_target);
532
533 if (!info)
534 goto end;
535 /* note that outdated packets will be dropped at this point. */
536
537 packet = (struct vis_packet *)info->skb_packet->data;
538
539 /* send only if we're the target server or ... */
540 if (are_target && is_new) {
541 packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */
542 send_list_add(bat_priv, info);
543
544 /* ... we're not the recipient (and thus need to forward). */
545 } else if (!is_my_mac(packet->target_orig)) {
546 send_list_add(bat_priv, info);
547 }
548
549 end:
550 spin_unlock_bh(&bat_priv->vis_hash_lock);
551 }
552
553 /* Walk the originators and find the VIS server with the best tq. Set the packet
554 * address to its address and return the best_tq.
555 *
556 * Must be called with the originator hash locked */
557 static int find_best_vis_server(struct bat_priv *bat_priv,
558 struct vis_info *info)
559 {
560 struct hashtable_t *hash = bat_priv->orig_hash;
561 struct hlist_node *node;
562 struct hlist_head *head;
563 struct orig_node *orig_node;
564 struct vis_packet *packet;
565 int best_tq = -1, i;
566
567 packet = (struct vis_packet *)info->skb_packet->data;
568
569 for (i = 0; i < hash->size; i++) {
570 head = &hash->table[i];
571
572 rcu_read_lock();
573 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
574 if ((orig_node) && (orig_node->router) &&
575 (orig_node->flags & VIS_SERVER) &&
576 (orig_node->router->tq_avg > best_tq)) {
577 best_tq = orig_node->router->tq_avg;
578 memcpy(packet->target_orig, orig_node->orig,
579 ETH_ALEN);
580 }
581 }
582 rcu_read_unlock();
583 }
584
585 return best_tq;
586 }
587
588 /* Return true if the vis packet is full. */
589 static bool vis_packet_full(struct vis_info *info)
590 {
591 struct vis_packet *packet;
592 packet = (struct vis_packet *)info->skb_packet->data;
593
594 if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry)
595 < packet->entries + 1)
596 return true;
597 return false;
598 }
599
600 /* generates a packet of own vis data,
601 * returns 0 on success, -1 if no packet could be generated */
602 static int generate_vis_packet(struct bat_priv *bat_priv)
603 {
604 struct hashtable_t *hash = bat_priv->orig_hash;
605 struct hlist_node *node;
606 struct hlist_head *head;
607 struct orig_node *orig_node;
608 struct neigh_node *neigh_node;
609 struct vis_info *info = (struct vis_info *)bat_priv->my_vis_info;
610 struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data;
611 struct vis_info_entry *entry;
612 struct hna_local_entry *hna_local_entry;
613 int best_tq = -1, i;
614
615 info->first_seen = jiffies;
616 packet->vis_type = atomic_read(&bat_priv->vis_mode);
617
618 memcpy(packet->target_orig, broadcast_addr, ETH_ALEN);
619 packet->ttl = TTL;
620 packet->seqno = htonl(ntohl(packet->seqno) + 1);
621 packet->entries = 0;
622 skb_trim(info->skb_packet, sizeof(struct vis_packet));
623
624 if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) {
625 best_tq = find_best_vis_server(bat_priv, info);
626
627 if (best_tq < 0)
628 return -1;
629 }
630
631 for (i = 0; i < hash->size; i++) {
632 head = &hash->table[i];
633
634 rcu_read_lock();
635 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
636 neigh_node = orig_node->router;
637
638 if (!neigh_node)
639 continue;
640
641 if (!compare_eth(neigh_node->addr, orig_node->orig))
642 continue;
643
644 if (neigh_node->if_incoming->if_status != IF_ACTIVE)
645 continue;
646
647 if (neigh_node->tq_avg < 1)
648 continue;
649
650 /* fill one entry into buffer. */
651 entry = (struct vis_info_entry *)
652 skb_put(info->skb_packet, sizeof(*entry));
653 memcpy(entry->src,
654 neigh_node->if_incoming->net_dev->dev_addr,
655 ETH_ALEN);
656 memcpy(entry->dest, orig_node->orig, ETH_ALEN);
657 entry->quality = neigh_node->tq_avg;
658 packet->entries++;
659
660 if (vis_packet_full(info))
661 goto unlock;
662 }
663 rcu_read_unlock();
664 }
665
666 hash = bat_priv->hna_local_hash;
667
668 spin_lock_bh(&bat_priv->hna_lhash_lock);
669 for (i = 0; i < hash->size; i++) {
670 head = &hash->table[i];
671
672 hlist_for_each_entry(hna_local_entry, node, head, hash_entry) {
673 entry = (struct vis_info_entry *)
674 skb_put(info->skb_packet,
675 sizeof(*entry));
676 memset(entry->src, 0, ETH_ALEN);
677 memcpy(entry->dest, hna_local_entry->addr, ETH_ALEN);
678 entry->quality = 0; /* 0 means HNA */
679 packet->entries++;
680
681 if (vis_packet_full(info)) {
682 spin_unlock_bh(&bat_priv->hna_lhash_lock);
683 return 0;
684 }
685 }
686 }
687
688 spin_unlock_bh(&bat_priv->hna_lhash_lock);
689 return 0;
690
691 unlock:
692 rcu_read_unlock();
693 return 0;
694 }
695
696 /* free old vis packets. Must be called with this vis_hash_lock
697 * held */
698 static void purge_vis_packets(struct bat_priv *bat_priv)
699 {
700 int i;
701 struct hashtable_t *hash = bat_priv->vis_hash;
702 struct hlist_node *node, *node_tmp;
703 struct hlist_head *head;
704 struct vis_info *info;
705
706 for (i = 0; i < hash->size; i++) {
707 head = &hash->table[i];
708
709 hlist_for_each_entry_safe(info, node, node_tmp,
710 head, hash_entry) {
711 /* never purge own data. */
712 if (info == bat_priv->my_vis_info)
713 continue;
714
715 if (time_after(jiffies,
716 info->first_seen + VIS_TIMEOUT * HZ)) {
717 hlist_del(node);
718 send_list_del(info);
719 kref_put(&info->refcount, free_info);
720 }
721 }
722 }
723 }
724
725 static void broadcast_vis_packet(struct bat_priv *bat_priv,
726 struct vis_info *info)
727 {
728 struct hashtable_t *hash = bat_priv->orig_hash;
729 struct hlist_node *node;
730 struct hlist_head *head;
731 struct orig_node *orig_node;
732 struct vis_packet *packet;
733 struct sk_buff *skb;
734 struct hard_iface *hard_iface;
735 uint8_t dstaddr[ETH_ALEN];
736 int i;
737
738
739 packet = (struct vis_packet *)info->skb_packet->data;
740
741 /* send to all routers in range. */
742 for (i = 0; i < hash->size; i++) {
743 head = &hash->table[i];
744
745 rcu_read_lock();
746 hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
747 /* if it's a vis server and reachable, send it. */
748 if ((!orig_node) || (!orig_node->router))
749 continue;
750 if (!(orig_node->flags & VIS_SERVER))
751 continue;
752 /* don't send it if we already received the packet from
753 * this node. */
754 if (recv_list_is_in(bat_priv, &info->recv_list,
755 orig_node->orig))
756 continue;
757
758 memcpy(packet->target_orig, orig_node->orig, ETH_ALEN);
759 hard_iface = orig_node->router->if_incoming;
760 memcpy(dstaddr, orig_node->router->addr, ETH_ALEN);
761
762 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
763 if (skb)
764 send_skb_packet(skb, hard_iface, dstaddr);
765
766 }
767 rcu_read_unlock();
768 }
769 }
770
771 static void unicast_vis_packet(struct bat_priv *bat_priv,
772 struct vis_info *info)
773 {
774 struct orig_node *orig_node;
775 struct neigh_node *neigh_node = NULL;
776 struct sk_buff *skb;
777 struct vis_packet *packet;
778
779 packet = (struct vis_packet *)info->skb_packet->data;
780
781 rcu_read_lock();
782 orig_node = orig_hash_find(bat_priv, packet->target_orig);
783
784 if (!orig_node)
785 goto unlock;
786
787 neigh_node = orig_node->router;
788
789 if (!neigh_node)
790 goto unlock;
791
792 if (!atomic_inc_not_zero(&neigh_node->refcount)) {
793 neigh_node = NULL;
794 goto unlock;
795 }
796
797 rcu_read_unlock();
798
799 skb = skb_clone(info->skb_packet, GFP_ATOMIC);
800 if (skb)
801 send_skb_packet(skb, neigh_node->if_incoming,
802 neigh_node->addr);
803
804 goto out;
805
806 unlock:
807 rcu_read_unlock();
808 out:
809 if (neigh_node)
810 neigh_node_free_ref(neigh_node);
811 if (orig_node)
812 orig_node_free_ref(orig_node);
813 return;
814 }
815
816 /* only send one vis packet. called from send_vis_packets() */
817 static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info)
818 {
819 struct vis_packet *packet;
820
821 packet = (struct vis_packet *)info->skb_packet->data;
822 if (packet->ttl < 2) {
823 pr_debug("Error - can't send vis packet: ttl exceeded\n");
824 return;
825 }
826
827 memcpy(packet->sender_orig, bat_priv->primary_if->net_dev->dev_addr,
828 ETH_ALEN);
829 packet->ttl--;
830
831 if (is_broadcast_ether_addr(packet->target_orig))
832 broadcast_vis_packet(bat_priv, info);
833 else
834 unicast_vis_packet(bat_priv, info);
835 packet->ttl++; /* restore TTL */
836 }
837
838 /* called from timer; send (and maybe generate) vis packet. */
839 static void send_vis_packets(struct work_struct *work)
840 {
841 struct delayed_work *delayed_work =
842 container_of(work, struct delayed_work, work);
843 struct bat_priv *bat_priv =
844 container_of(delayed_work, struct bat_priv, vis_work);
845 struct vis_info *info;
846
847 spin_lock_bh(&bat_priv->vis_hash_lock);
848 purge_vis_packets(bat_priv);
849
850 if (generate_vis_packet(bat_priv) == 0) {
851 /* schedule if generation was successful */
852 send_list_add(bat_priv, bat_priv->my_vis_info);
853 }
854
855 while (!list_empty(&bat_priv->vis_send_list)) {
856 info = list_first_entry(&bat_priv->vis_send_list,
857 typeof(*info), send_list);
858
859 kref_get(&info->refcount);
860 spin_unlock_bh(&bat_priv->vis_hash_lock);
861
862 if (bat_priv->primary_if)
863 send_vis_packet(bat_priv, info);
864
865 spin_lock_bh(&bat_priv->vis_hash_lock);
866 send_list_del(info);
867 kref_put(&info->refcount, free_info);
868 }
869 spin_unlock_bh(&bat_priv->vis_hash_lock);
870 start_vis_timer(bat_priv);
871 }
872
873 /* init the vis server. this may only be called when if_list is already
874 * initialized (e.g. bat0 is initialized, interfaces have been added) */
875 int vis_init(struct bat_priv *bat_priv)
876 {
877 struct vis_packet *packet;
878 int hash_added;
879
880 if (bat_priv->vis_hash)
881 return 1;
882
883 spin_lock_bh(&bat_priv->vis_hash_lock);
884
885 bat_priv->vis_hash = hash_new(256);
886 if (!bat_priv->vis_hash) {
887 pr_err("Can't initialize vis_hash\n");
888 goto err;
889 }
890
891 bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC);
892 if (!bat_priv->my_vis_info) {
893 pr_err("Can't initialize vis packet\n");
894 goto err;
895 }
896
897 bat_priv->my_vis_info->skb_packet = dev_alloc_skb(
898 sizeof(struct vis_packet) +
899 MAX_VIS_PACKET_SIZE +
900 sizeof(struct ethhdr));
901 if (!bat_priv->my_vis_info->skb_packet)
902 goto free_info;
903
904 skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr));
905 packet = (struct vis_packet *)skb_put(
906 bat_priv->my_vis_info->skb_packet,
907 sizeof(struct vis_packet));
908
909 /* prefill the vis info */
910 bat_priv->my_vis_info->first_seen = jiffies -
911 msecs_to_jiffies(VIS_INTERVAL);
912 INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list);
913 INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list);
914 kref_init(&bat_priv->my_vis_info->refcount);
915 bat_priv->my_vis_info->bat_priv = bat_priv;
916 packet->version = COMPAT_VERSION;
917 packet->packet_type = BAT_VIS;
918 packet->ttl = TTL;
919 packet->seqno = 0;
920 packet->entries = 0;
921
922 INIT_LIST_HEAD(&bat_priv->vis_send_list);
923
924 hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose,
925 bat_priv->my_vis_info,
926 &bat_priv->my_vis_info->hash_entry);
927 if (hash_added < 0) {
928 pr_err("Can't add own vis packet into hash\n");
929 /* not in hash, need to remove it manually. */
930 kref_put(&bat_priv->my_vis_info->refcount, free_info);
931 goto err;
932 }
933
934 spin_unlock_bh(&bat_priv->vis_hash_lock);
935 start_vis_timer(bat_priv);
936 return 1;
937
938 free_info:
939 kfree(bat_priv->my_vis_info);
940 bat_priv->my_vis_info = NULL;
941 err:
942 spin_unlock_bh(&bat_priv->vis_hash_lock);
943 vis_quit(bat_priv);
944 return 0;
945 }
946
947 /* Decrease the reference count on a hash item info */
948 static void free_info_ref(struct hlist_node *node, void *arg)
949 {
950 struct vis_info *info;
951
952 info = container_of(node, struct vis_info, hash_entry);
953 send_list_del(info);
954 kref_put(&info->refcount, free_info);
955 }
956
957 /* shutdown vis-server */
958 void vis_quit(struct bat_priv *bat_priv)
959 {
960 if (!bat_priv->vis_hash)
961 return;
962
963 cancel_delayed_work_sync(&bat_priv->vis_work);
964
965 spin_lock_bh(&bat_priv->vis_hash_lock);
966 /* properly remove, kill timers ... */
967 hash_delete(bat_priv->vis_hash, free_info_ref, NULL);
968 bat_priv->vis_hash = NULL;
969 bat_priv->my_vis_info = NULL;
970 spin_unlock_bh(&bat_priv->vis_hash_lock);
971 }
972
973 /* schedule packets for (re)transmission */
974 static void start_vis_timer(struct bat_priv *bat_priv)
975 {
976 INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets);
977 queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work,
978 msecs_to_jiffies(VIS_INTERVAL));
979 }
This page took 0.04997 seconds and 5 git commands to generate.