batman-adv: Reduce accumulated length of simple statements
[deliverable/linux.git] / net / batman-adv / translation-table.c
1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
2 *
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
17 * 02110-1301, USA
18 */
19
20 #include "main.h"
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
24 #include "send.h"
25 #include "hash.h"
26 #include "originator.h"
27 #include "routing.h"
28 #include "bridge_loop_avoidance.h"
29
30 #include <linux/crc16.h>
31
32 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
33 struct batadv_orig_node *orig_node);
34 static void batadv_tt_purge(struct work_struct *work);
35 static void
36 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry);
37
38 /* returns 1 if they are the same mac addr */
39 static int batadv_compare_tt(const struct hlist_node *node, const void *data2)
40 {
41 const void *data1 = container_of(node, struct batadv_tt_common_entry,
42 hash_entry);
43
44 return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0);
45 }
46
47 static void batadv_tt_start_timer(struct batadv_priv *bat_priv)
48 {
49 INIT_DELAYED_WORK(&bat_priv->tt.work, batadv_tt_purge);
50 queue_delayed_work(batadv_event_workqueue, &bat_priv->tt.work,
51 msecs_to_jiffies(5000));
52 }
53
54 static struct batadv_tt_common_entry *
55 batadv_tt_hash_find(struct batadv_hashtable *hash, const void *data)
56 {
57 struct hlist_head *head;
58 struct hlist_node *node;
59 struct batadv_tt_common_entry *tt_common_entry;
60 struct batadv_tt_common_entry *tt_common_entry_tmp = NULL;
61 uint32_t index;
62
63 if (!hash)
64 return NULL;
65
66 index = batadv_choose_orig(data, hash->size);
67 head = &hash->table[index];
68
69 rcu_read_lock();
70 hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) {
71 if (!batadv_compare_eth(tt_common_entry, data))
72 continue;
73
74 if (!atomic_inc_not_zero(&tt_common_entry->refcount))
75 continue;
76
77 tt_common_entry_tmp = tt_common_entry;
78 break;
79 }
80 rcu_read_unlock();
81
82 return tt_common_entry_tmp;
83 }
84
85 static struct batadv_tt_local_entry *
86 batadv_tt_local_hash_find(struct batadv_priv *bat_priv, const void *data)
87 {
88 struct batadv_tt_common_entry *tt_common_entry;
89 struct batadv_tt_local_entry *tt_local_entry = NULL;
90
91 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.local_hash, data);
92 if (tt_common_entry)
93 tt_local_entry = container_of(tt_common_entry,
94 struct batadv_tt_local_entry,
95 common);
96 return tt_local_entry;
97 }
98
99 static struct batadv_tt_global_entry *
100 batadv_tt_global_hash_find(struct batadv_priv *bat_priv, const void *data)
101 {
102 struct batadv_tt_common_entry *tt_common_entry;
103 struct batadv_tt_global_entry *tt_global_entry = NULL;
104
105 tt_common_entry = batadv_tt_hash_find(bat_priv->tt.global_hash, data);
106 if (tt_common_entry)
107 tt_global_entry = container_of(tt_common_entry,
108 struct batadv_tt_global_entry,
109 common);
110 return tt_global_entry;
111
112 }
113
114 static void
115 batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry *tt_local_entry)
116 {
117 if (atomic_dec_and_test(&tt_local_entry->common.refcount))
118 kfree_rcu(tt_local_entry, common.rcu);
119 }
120
121 static void batadv_tt_global_entry_free_rcu(struct rcu_head *rcu)
122 {
123 struct batadv_tt_common_entry *tt_common_entry;
124 struct batadv_tt_global_entry *tt_global_entry;
125
126 tt_common_entry = container_of(rcu, struct batadv_tt_common_entry, rcu);
127 tt_global_entry = container_of(tt_common_entry,
128 struct batadv_tt_global_entry, common);
129
130 kfree(tt_global_entry);
131 }
132
133 static void
134 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry *tt_global_entry)
135 {
136 if (atomic_dec_and_test(&tt_global_entry->common.refcount)) {
137 batadv_tt_global_del_orig_list(tt_global_entry);
138 call_rcu(&tt_global_entry->common.rcu,
139 batadv_tt_global_entry_free_rcu);
140 }
141 }
142
143 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head *rcu)
144 {
145 struct batadv_tt_orig_list_entry *orig_entry;
146
147 orig_entry = container_of(rcu, struct batadv_tt_orig_list_entry, rcu);
148 batadv_orig_node_free_ref(orig_entry->orig_node);
149 kfree(orig_entry);
150 }
151
152 static void
153 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry *orig_entry)
154 {
155 if (!atomic_dec_and_test(&orig_entry->refcount))
156 return;
157 /* to avoid race conditions, immediately decrease the tt counter */
158 atomic_dec(&orig_entry->orig_node->tt_size);
159 call_rcu(&orig_entry->rcu, batadv_tt_orig_list_entry_free_rcu);
160 }
161
162 static void batadv_tt_local_event(struct batadv_priv *bat_priv,
163 const uint8_t *addr, uint8_t flags)
164 {
165 struct batadv_tt_change_node *tt_change_node, *entry, *safe;
166 bool event_removed = false;
167 bool del_op_requested, del_op_entry;
168
169 tt_change_node = kmalloc(sizeof(*tt_change_node), GFP_ATOMIC);
170
171 if (!tt_change_node)
172 return;
173
174 tt_change_node->change.flags = flags;
175 memcpy(tt_change_node->change.addr, addr, ETH_ALEN);
176
177 del_op_requested = flags & BATADV_TT_CLIENT_DEL;
178
179 /* check for ADD+DEL or DEL+ADD events */
180 spin_lock_bh(&bat_priv->tt.changes_list_lock);
181 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
182 list) {
183 if (!batadv_compare_eth(entry->change.addr, addr))
184 continue;
185
186 /* DEL+ADD in the same orig interval have no effect and can be
187 * removed to avoid silly behaviour on the receiver side. The
188 * other way around (ADD+DEL) can happen in case of roaming of
189 * a client still in the NEW state. Roaming of NEW clients is
190 * now possible due to automatically recognition of "temporary"
191 * clients
192 */
193 del_op_entry = entry->change.flags & BATADV_TT_CLIENT_DEL;
194 if (!del_op_requested && del_op_entry)
195 goto del;
196 if (del_op_requested && !del_op_entry)
197 goto del;
198 continue;
199 del:
200 list_del(&entry->list);
201 kfree(entry);
202 kfree(tt_change_node);
203 event_removed = true;
204 goto unlock;
205 }
206
207 /* track the change in the OGMinterval list */
208 list_add_tail(&tt_change_node->list, &bat_priv->tt.changes_list);
209
210 unlock:
211 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
212
213 if (event_removed)
214 atomic_dec(&bat_priv->tt.local_changes);
215 else
216 atomic_inc(&bat_priv->tt.local_changes);
217 }
218
219 int batadv_tt_len(int changes_num)
220 {
221 return changes_num * sizeof(struct batadv_tt_change);
222 }
223
224 static int batadv_tt_local_init(struct batadv_priv *bat_priv)
225 {
226 if (bat_priv->tt.local_hash)
227 return 0;
228
229 bat_priv->tt.local_hash = batadv_hash_new(1024);
230
231 if (!bat_priv->tt.local_hash)
232 return -ENOMEM;
233
234 return 0;
235 }
236
237 void batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
238 int ifindex)
239 {
240 struct batadv_priv *bat_priv = netdev_priv(soft_iface);
241 struct batadv_tt_local_entry *tt_local_entry = NULL;
242 struct batadv_tt_global_entry *tt_global_entry = NULL;
243 struct hlist_head *head;
244 struct hlist_node *node;
245 struct batadv_tt_orig_list_entry *orig_entry;
246 int hash_added;
247
248 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
249
250 if (tt_local_entry) {
251 tt_local_entry->last_seen = jiffies;
252 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
253 tt_local_entry->common.flags &= ~BATADV_TT_CLIENT_PENDING;
254 goto out;
255 }
256
257 tt_local_entry = kmalloc(sizeof(*tt_local_entry), GFP_ATOMIC);
258 if (!tt_local_entry)
259 goto out;
260
261 batadv_dbg(BATADV_DBG_TT, bat_priv,
262 "Creating new local tt entry: %pM (ttvn: %d)\n", addr,
263 (uint8_t)atomic_read(&bat_priv->tt.vn));
264
265 memcpy(tt_local_entry->common.addr, addr, ETH_ALEN);
266 tt_local_entry->common.flags = BATADV_NO_FLAGS;
267 if (batadv_is_wifi_iface(ifindex))
268 tt_local_entry->common.flags |= BATADV_TT_CLIENT_WIFI;
269 atomic_set(&tt_local_entry->common.refcount, 2);
270 tt_local_entry->last_seen = jiffies;
271
272 /* the batman interface mac address should never be purged */
273 if (batadv_compare_eth(addr, soft_iface->dev_addr))
274 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NOPURGE;
275
276 /* The local entry has to be marked as NEW to avoid to send it in
277 * a full table response going out before the next ttvn increment
278 * (consistency check)
279 */
280 tt_local_entry->common.flags |= BATADV_TT_CLIENT_NEW;
281
282 hash_added = batadv_hash_add(bat_priv->tt.local_hash, batadv_compare_tt,
283 batadv_choose_orig,
284 &tt_local_entry->common,
285 &tt_local_entry->common.hash_entry);
286
287 if (unlikely(hash_added != 0)) {
288 /* remove the reference for the hash */
289 batadv_tt_local_entry_free_ref(tt_local_entry);
290 goto out;
291 }
292
293 batadv_tt_local_event(bat_priv, addr, tt_local_entry->common.flags);
294
295 /* remove address from global hash if present */
296 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
297
298 /* Check whether it is a roaming! */
299 if (tt_global_entry) {
300 /* These node are probably going to update their tt table */
301 head = &tt_global_entry->orig_list;
302 rcu_read_lock();
303 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
304 orig_entry->orig_node->tt_poss_change = true;
305
306 batadv_send_roam_adv(bat_priv,
307 tt_global_entry->common.addr,
308 orig_entry->orig_node);
309 }
310 rcu_read_unlock();
311 /* The global entry has to be marked as ROAMING and
312 * has to be kept for consistency purpose
313 */
314 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
315 tt_global_entry->roam_at = jiffies;
316 }
317 out:
318 if (tt_local_entry)
319 batadv_tt_local_entry_free_ref(tt_local_entry);
320 if (tt_global_entry)
321 batadv_tt_global_entry_free_ref(tt_global_entry);
322 }
323
324 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff,
325 int *packet_buff_len,
326 int min_packet_len,
327 int new_packet_len)
328 {
329 unsigned char *new_buff;
330
331 new_buff = kmalloc(new_packet_len, GFP_ATOMIC);
332
333 /* keep old buffer if kmalloc should fail */
334 if (new_buff) {
335 memcpy(new_buff, *packet_buff, min_packet_len);
336 kfree(*packet_buff);
337 *packet_buff = new_buff;
338 *packet_buff_len = new_packet_len;
339 }
340 }
341
342 static void batadv_tt_prepare_packet_buff(struct batadv_priv *bat_priv,
343 unsigned char **packet_buff,
344 int *packet_buff_len,
345 int min_packet_len)
346 {
347 struct batadv_hard_iface *primary_if;
348 int req_len;
349
350 primary_if = batadv_primary_if_get_selected(bat_priv);
351
352 req_len = min_packet_len;
353 req_len += batadv_tt_len(atomic_read(&bat_priv->tt.local_changes));
354
355 /* if we have too many changes for one packet don't send any
356 * and wait for the tt table request which will be fragmented
357 */
358 if ((!primary_if) || (req_len > primary_if->soft_iface->mtu))
359 req_len = min_packet_len;
360
361 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
362 min_packet_len, req_len);
363
364 if (primary_if)
365 batadv_hardif_free_ref(primary_if);
366 }
367
368 static int batadv_tt_changes_fill_buff(struct batadv_priv *bat_priv,
369 unsigned char **packet_buff,
370 int *packet_buff_len,
371 int min_packet_len)
372 {
373 struct batadv_tt_change_node *entry, *safe;
374 int count = 0, tot_changes = 0, new_len;
375 unsigned char *tt_buff;
376
377 batadv_tt_prepare_packet_buff(bat_priv, packet_buff,
378 packet_buff_len, min_packet_len);
379
380 new_len = *packet_buff_len - min_packet_len;
381 tt_buff = *packet_buff + min_packet_len;
382
383 if (new_len > 0)
384 tot_changes = new_len / batadv_tt_len(1);
385
386 spin_lock_bh(&bat_priv->tt.changes_list_lock);
387 atomic_set(&bat_priv->tt.local_changes, 0);
388
389 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
390 list) {
391 if (count < tot_changes) {
392 memcpy(tt_buff + batadv_tt_len(count),
393 &entry->change, sizeof(struct batadv_tt_change));
394 count++;
395 }
396 list_del(&entry->list);
397 kfree(entry);
398 }
399 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
400
401 /* Keep the buffer for possible tt_request */
402 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
403 kfree(bat_priv->tt.last_changeset);
404 bat_priv->tt.last_changeset_len = 0;
405 bat_priv->tt.last_changeset = NULL;
406 /* check whether this new OGM has no changes due to size problems */
407 if (new_len > 0) {
408 /* if kmalloc() fails we will reply with the full table
409 * instead of providing the diff
410 */
411 bat_priv->tt.last_changeset = kmalloc(new_len, GFP_ATOMIC);
412 if (bat_priv->tt.last_changeset) {
413 memcpy(bat_priv->tt.last_changeset, tt_buff, new_len);
414 bat_priv->tt.last_changeset_len = new_len;
415 }
416 }
417 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
418
419 return count;
420 }
421
422 int batadv_tt_local_seq_print_text(struct seq_file *seq, void *offset)
423 {
424 struct net_device *net_dev = (struct net_device *)seq->private;
425 struct batadv_priv *bat_priv = netdev_priv(net_dev);
426 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
427 struct batadv_tt_common_entry *tt_common_entry;
428 struct batadv_hard_iface *primary_if;
429 struct hlist_node *node;
430 struct hlist_head *head;
431 uint32_t i;
432 int ret = 0;
433
434 primary_if = batadv_primary_if_get_selected(bat_priv);
435 if (!primary_if) {
436 ret = seq_printf(seq,
437 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
438 net_dev->name);
439 goto out;
440 }
441
442 if (primary_if->if_status != BATADV_IF_ACTIVE) {
443 ret = seq_printf(seq,
444 "BATMAN mesh %s disabled - primary interface not active\n",
445 net_dev->name);
446 goto out;
447 }
448
449 seq_printf(seq,
450 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
451 net_dev->name, (uint8_t)atomic_read(&bat_priv->tt.vn));
452
453 for (i = 0; i < hash->size; i++) {
454 head = &hash->table[i];
455
456 rcu_read_lock();
457 hlist_for_each_entry_rcu(tt_common_entry, node,
458 head, hash_entry) {
459 seq_printf(seq, " * %pM [%c%c%c%c%c]\n",
460 tt_common_entry->addr,
461 (tt_common_entry->flags &
462 BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
463 (tt_common_entry->flags &
464 BATADV_TT_CLIENT_NOPURGE ? 'P' : '.'),
465 (tt_common_entry->flags &
466 BATADV_TT_CLIENT_NEW ? 'N' : '.'),
467 (tt_common_entry->flags &
468 BATADV_TT_CLIENT_PENDING ? 'X' : '.'),
469 (tt_common_entry->flags &
470 BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
471 }
472 rcu_read_unlock();
473 }
474 out:
475 if (primary_if)
476 batadv_hardif_free_ref(primary_if);
477 return ret;
478 }
479
480 static void
481 batadv_tt_local_set_pending(struct batadv_priv *bat_priv,
482 struct batadv_tt_local_entry *tt_local_entry,
483 uint16_t flags, const char *message)
484 {
485 batadv_tt_local_event(bat_priv, tt_local_entry->common.addr,
486 tt_local_entry->common.flags | flags);
487
488 /* The local client has to be marked as "pending to be removed" but has
489 * to be kept in the table in order to send it in a full table
490 * response issued before the net ttvn increment (consistency check)
491 */
492 tt_local_entry->common.flags |= BATADV_TT_CLIENT_PENDING;
493
494 batadv_dbg(BATADV_DBG_TT, bat_priv,
495 "Local tt entry (%pM) pending to be removed: %s\n",
496 tt_local_entry->common.addr, message);
497 }
498
499 void batadv_tt_local_remove(struct batadv_priv *bat_priv, const uint8_t *addr,
500 const char *message, bool roaming)
501 {
502 struct batadv_tt_local_entry *tt_local_entry = NULL;
503 uint16_t flags;
504
505 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
506 if (!tt_local_entry)
507 goto out;
508
509 flags = BATADV_TT_CLIENT_DEL;
510 if (roaming)
511 flags |= BATADV_TT_CLIENT_ROAM;
512
513 batadv_tt_local_set_pending(bat_priv, tt_local_entry, flags, message);
514 out:
515 if (tt_local_entry)
516 batadv_tt_local_entry_free_ref(tt_local_entry);
517 }
518
519 static void batadv_tt_local_purge_list(struct batadv_priv *bat_priv,
520 struct hlist_head *head)
521 {
522 struct batadv_tt_local_entry *tt_local_entry;
523 struct batadv_tt_common_entry *tt_common_entry;
524 struct hlist_node *node, *node_tmp;
525
526 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
527 hash_entry) {
528 tt_local_entry = container_of(tt_common_entry,
529 struct batadv_tt_local_entry,
530 common);
531 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_NOPURGE)
532 continue;
533
534 /* entry already marked for deletion */
535 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
536 continue;
537
538 if (!batadv_has_timed_out(tt_local_entry->last_seen,
539 BATADV_TT_LOCAL_TIMEOUT))
540 continue;
541
542 batadv_tt_local_set_pending(bat_priv, tt_local_entry,
543 BATADV_TT_CLIENT_DEL, "timed out");
544 }
545 }
546
547 static void batadv_tt_local_purge(struct batadv_priv *bat_priv)
548 {
549 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
550 struct hlist_head *head;
551 spinlock_t *list_lock; /* protects write access to the hash lists */
552 uint32_t i;
553
554 for (i = 0; i < hash->size; i++) {
555 head = &hash->table[i];
556 list_lock = &hash->list_locks[i];
557
558 spin_lock_bh(list_lock);
559 batadv_tt_local_purge_list(bat_priv, head);
560 spin_unlock_bh(list_lock);
561 }
562
563 }
564
565 static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
566 {
567 struct batadv_hashtable *hash;
568 spinlock_t *list_lock; /* protects write access to the hash lists */
569 struct batadv_tt_common_entry *tt_common_entry;
570 struct batadv_tt_local_entry *tt_local;
571 struct hlist_node *node, *node_tmp;
572 struct hlist_head *head;
573 uint32_t i;
574
575 if (!bat_priv->tt.local_hash)
576 return;
577
578 hash = bat_priv->tt.local_hash;
579
580 for (i = 0; i < hash->size; i++) {
581 head = &hash->table[i];
582 list_lock = &hash->list_locks[i];
583
584 spin_lock_bh(list_lock);
585 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
586 head, hash_entry) {
587 hlist_del_rcu(node);
588 tt_local = container_of(tt_common_entry,
589 struct batadv_tt_local_entry,
590 common);
591 batadv_tt_local_entry_free_ref(tt_local);
592 }
593 spin_unlock_bh(list_lock);
594 }
595
596 batadv_hash_destroy(hash);
597
598 bat_priv->tt.local_hash = NULL;
599 }
600
601 static int batadv_tt_global_init(struct batadv_priv *bat_priv)
602 {
603 if (bat_priv->tt.global_hash)
604 return 0;
605
606 bat_priv->tt.global_hash = batadv_hash_new(1024);
607
608 if (!bat_priv->tt.global_hash)
609 return -ENOMEM;
610
611 return 0;
612 }
613
614 static void batadv_tt_changes_list_free(struct batadv_priv *bat_priv)
615 {
616 struct batadv_tt_change_node *entry, *safe;
617
618 spin_lock_bh(&bat_priv->tt.changes_list_lock);
619
620 list_for_each_entry_safe(entry, safe, &bat_priv->tt.changes_list,
621 list) {
622 list_del(&entry->list);
623 kfree(entry);
624 }
625
626 atomic_set(&bat_priv->tt.local_changes, 0);
627 spin_unlock_bh(&bat_priv->tt.changes_list_lock);
628 }
629
630 /* retrieves the orig_tt_list_entry belonging to orig_node from the
631 * batadv_tt_global_entry list
632 *
633 * returns it with an increased refcounter, NULL if not found
634 */
635 static struct batadv_tt_orig_list_entry *
636 batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry *entry,
637 const struct batadv_orig_node *orig_node)
638 {
639 struct batadv_tt_orig_list_entry *tmp_orig_entry, *orig_entry = NULL;
640 const struct hlist_head *head;
641 struct hlist_node *node;
642
643 rcu_read_lock();
644 head = &entry->orig_list;
645 hlist_for_each_entry_rcu(tmp_orig_entry, node, head, list) {
646 if (tmp_orig_entry->orig_node != orig_node)
647 continue;
648 if (!atomic_inc_not_zero(&tmp_orig_entry->refcount))
649 continue;
650
651 orig_entry = tmp_orig_entry;
652 break;
653 }
654 rcu_read_unlock();
655
656 return orig_entry;
657 }
658
659 /* find out if an orig_node is already in the list of a tt_global_entry.
660 * returns true if found, false otherwise
661 */
662 static bool
663 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry *entry,
664 const struct batadv_orig_node *orig_node)
665 {
666 struct batadv_tt_orig_list_entry *orig_entry;
667 bool found = false;
668
669 orig_entry = batadv_tt_global_orig_entry_find(entry, orig_node);
670 if (orig_entry) {
671 found = true;
672 batadv_tt_orig_list_entry_free_ref(orig_entry);
673 }
674
675 return found;
676 }
677
678 static void
679 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry *tt_global,
680 struct batadv_orig_node *orig_node, int ttvn)
681 {
682 struct batadv_tt_orig_list_entry *orig_entry;
683
684 orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
685 if (orig_entry)
686 goto out;
687
688 orig_entry = kzalloc(sizeof(*orig_entry), GFP_ATOMIC);
689 if (!orig_entry)
690 goto out;
691
692 INIT_HLIST_NODE(&orig_entry->list);
693 atomic_inc(&orig_node->refcount);
694 atomic_inc(&orig_node->tt_size);
695 orig_entry->orig_node = orig_node;
696 orig_entry->ttvn = ttvn;
697 atomic_set(&orig_entry->refcount, 2);
698
699 spin_lock_bh(&tt_global->list_lock);
700 hlist_add_head_rcu(&orig_entry->list,
701 &tt_global->orig_list);
702 spin_unlock_bh(&tt_global->list_lock);
703 out:
704 if (orig_entry)
705 batadv_tt_orig_list_entry_free_ref(orig_entry);
706 }
707
708 /* caller must hold orig_node refcount */
709 int batadv_tt_global_add(struct batadv_priv *bat_priv,
710 struct batadv_orig_node *orig_node,
711 const unsigned char *tt_addr, uint8_t flags,
712 uint8_t ttvn)
713 {
714 struct batadv_tt_global_entry *tt_global_entry = NULL;
715 int ret = 0;
716 int hash_added;
717 struct batadv_tt_common_entry *common;
718
719 tt_global_entry = batadv_tt_global_hash_find(bat_priv, tt_addr);
720
721 if (!tt_global_entry) {
722 tt_global_entry = kzalloc(sizeof(*tt_global_entry), GFP_ATOMIC);
723 if (!tt_global_entry)
724 goto out;
725
726 common = &tt_global_entry->common;
727 memcpy(common->addr, tt_addr, ETH_ALEN);
728
729 common->flags = flags;
730 tt_global_entry->roam_at = 0;
731 atomic_set(&common->refcount, 2);
732
733 INIT_HLIST_HEAD(&tt_global_entry->orig_list);
734 spin_lock_init(&tt_global_entry->list_lock);
735
736 hash_added = batadv_hash_add(bat_priv->tt.global_hash,
737 batadv_compare_tt,
738 batadv_choose_orig, common,
739 &common->hash_entry);
740
741 if (unlikely(hash_added != 0)) {
742 /* remove the reference for the hash */
743 batadv_tt_global_entry_free_ref(tt_global_entry);
744 goto out_remove;
745 }
746 } else {
747 /* there is already a global entry, use this one. */
748
749 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
750 * one originator left in the list and we previously received a
751 * delete + roaming change for this originator.
752 *
753 * We should first delete the old originator before adding the
754 * new one.
755 */
756 if (tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM) {
757 batadv_tt_global_del_orig_list(tt_global_entry);
758 tt_global_entry->common.flags &= ~BATADV_TT_CLIENT_ROAM;
759 tt_global_entry->roam_at = 0;
760 }
761
762 }
763 /* add the new orig_entry (if needed) */
764 batadv_tt_global_orig_entry_add(tt_global_entry, orig_node, ttvn);
765
766 batadv_dbg(BATADV_DBG_TT, bat_priv,
767 "Creating new global tt entry: %pM (via %pM)\n",
768 tt_global_entry->common.addr, orig_node->orig);
769
770 out_remove:
771 /* remove address from local hash if present */
772 batadv_tt_local_remove(bat_priv, tt_global_entry->common.addr,
773 "global tt received",
774 flags & BATADV_TT_CLIENT_ROAM);
775 ret = 1;
776 out:
777 if (tt_global_entry)
778 batadv_tt_global_entry_free_ref(tt_global_entry);
779 return ret;
780 }
781
782 /* print all orig nodes who announce the address for this global entry.
783 * it is assumed that the caller holds rcu_read_lock();
784 */
785 static void
786 batadv_tt_global_print_entry(struct batadv_tt_global_entry *tt_global_entry,
787 struct seq_file *seq)
788 {
789 struct hlist_head *head;
790 struct hlist_node *node;
791 struct batadv_tt_orig_list_entry *orig_entry;
792 struct batadv_tt_common_entry *tt_common_entry;
793 uint16_t flags;
794 uint8_t last_ttvn;
795
796 tt_common_entry = &tt_global_entry->common;
797
798 head = &tt_global_entry->orig_list;
799
800 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
801 flags = tt_common_entry->flags;
802 last_ttvn = atomic_read(&orig_entry->orig_node->last_ttvn);
803 seq_printf(seq, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
804 tt_global_entry->common.addr, orig_entry->ttvn,
805 orig_entry->orig_node->orig, last_ttvn,
806 (flags & BATADV_TT_CLIENT_ROAM ? 'R' : '.'),
807 (flags & BATADV_TT_CLIENT_WIFI ? 'W' : '.'));
808 }
809 }
810
811 int batadv_tt_global_seq_print_text(struct seq_file *seq, void *offset)
812 {
813 struct net_device *net_dev = (struct net_device *)seq->private;
814 struct batadv_priv *bat_priv = netdev_priv(net_dev);
815 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
816 struct batadv_tt_common_entry *tt_common_entry;
817 struct batadv_tt_global_entry *tt_global;
818 struct batadv_hard_iface *primary_if;
819 struct hlist_node *node;
820 struct hlist_head *head;
821 uint32_t i;
822 int ret = 0;
823
824 primary_if = batadv_primary_if_get_selected(bat_priv);
825 if (!primary_if) {
826 ret = seq_printf(seq,
827 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
828 net_dev->name);
829 goto out;
830 }
831
832 if (primary_if->if_status != BATADV_IF_ACTIVE) {
833 ret = seq_printf(seq,
834 "BATMAN mesh %s disabled - primary interface not active\n",
835 net_dev->name);
836 goto out;
837 }
838
839 seq_printf(seq,
840 "Globally announced TT entries received via the mesh %s\n",
841 net_dev->name);
842 seq_printf(seq, " %-13s %s %-15s %s %s\n",
843 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
844
845 for (i = 0; i < hash->size; i++) {
846 head = &hash->table[i];
847
848 rcu_read_lock();
849 hlist_for_each_entry_rcu(tt_common_entry, node,
850 head, hash_entry) {
851 tt_global = container_of(tt_common_entry,
852 struct batadv_tt_global_entry,
853 common);
854 batadv_tt_global_print_entry(tt_global, seq);
855 }
856 rcu_read_unlock();
857 }
858 out:
859 if (primary_if)
860 batadv_hardif_free_ref(primary_if);
861 return ret;
862 }
863
864 /* deletes the orig list of a tt_global_entry */
865 static void
866 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry *tt_global_entry)
867 {
868 struct hlist_head *head;
869 struct hlist_node *node, *safe;
870 struct batadv_tt_orig_list_entry *orig_entry;
871
872 spin_lock_bh(&tt_global_entry->list_lock);
873 head = &tt_global_entry->orig_list;
874 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
875 hlist_del_rcu(node);
876 batadv_tt_orig_list_entry_free_ref(orig_entry);
877 }
878 spin_unlock_bh(&tt_global_entry->list_lock);
879
880 }
881
882 static void
883 batadv_tt_global_del_orig_entry(struct batadv_priv *bat_priv,
884 struct batadv_tt_global_entry *tt_global_entry,
885 struct batadv_orig_node *orig_node,
886 const char *message)
887 {
888 struct hlist_head *head;
889 struct hlist_node *node, *safe;
890 struct batadv_tt_orig_list_entry *orig_entry;
891
892 spin_lock_bh(&tt_global_entry->list_lock);
893 head = &tt_global_entry->orig_list;
894 hlist_for_each_entry_safe(orig_entry, node, safe, head, list) {
895 if (orig_entry->orig_node == orig_node) {
896 batadv_dbg(BATADV_DBG_TT, bat_priv,
897 "Deleting %pM from global tt entry %pM: %s\n",
898 orig_node->orig,
899 tt_global_entry->common.addr, message);
900 hlist_del_rcu(node);
901 batadv_tt_orig_list_entry_free_ref(orig_entry);
902 }
903 }
904 spin_unlock_bh(&tt_global_entry->list_lock);
905 }
906
907 static void
908 batadv_tt_global_del_struct(struct batadv_priv *bat_priv,
909 struct batadv_tt_global_entry *tt_global_entry,
910 const char *message)
911 {
912 batadv_dbg(BATADV_DBG_TT, bat_priv,
913 "Deleting global tt entry %pM: %s\n",
914 tt_global_entry->common.addr, message);
915
916 batadv_hash_remove(bat_priv->tt.global_hash, batadv_compare_tt,
917 batadv_choose_orig, tt_global_entry->common.addr);
918 batadv_tt_global_entry_free_ref(tt_global_entry);
919
920 }
921
922 /* If the client is to be deleted, we check if it is the last origantor entry
923 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
924 * timer, otherwise we simply remove the originator scheduled for deletion.
925 */
926 static void
927 batadv_tt_global_del_roaming(struct batadv_priv *bat_priv,
928 struct batadv_tt_global_entry *tt_global_entry,
929 struct batadv_orig_node *orig_node,
930 const char *message)
931 {
932 bool last_entry = true;
933 struct hlist_head *head;
934 struct hlist_node *node;
935 struct batadv_tt_orig_list_entry *orig_entry;
936
937 /* no local entry exists, case 1:
938 * Check if this is the last one or if other entries exist.
939 */
940
941 rcu_read_lock();
942 head = &tt_global_entry->orig_list;
943 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
944 if (orig_entry->orig_node != orig_node) {
945 last_entry = false;
946 break;
947 }
948 }
949 rcu_read_unlock();
950
951 if (last_entry) {
952 /* its the last one, mark for roaming. */
953 tt_global_entry->common.flags |= BATADV_TT_CLIENT_ROAM;
954 tt_global_entry->roam_at = jiffies;
955 } else
956 /* there is another entry, we can simply delete this
957 * one and can still use the other one.
958 */
959 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
960 orig_node, message);
961 }
962
963
964
965 static void batadv_tt_global_del(struct batadv_priv *bat_priv,
966 struct batadv_orig_node *orig_node,
967 const unsigned char *addr,
968 const char *message, bool roaming)
969 {
970 struct batadv_tt_global_entry *tt_global_entry = NULL;
971 struct batadv_tt_local_entry *local_entry = NULL;
972
973 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
974 if (!tt_global_entry)
975 goto out;
976
977 if (!roaming) {
978 batadv_tt_global_del_orig_entry(bat_priv, tt_global_entry,
979 orig_node, message);
980
981 if (hlist_empty(&tt_global_entry->orig_list))
982 batadv_tt_global_del_struct(bat_priv, tt_global_entry,
983 message);
984
985 goto out;
986 }
987
988 /* if we are deleting a global entry due to a roam
989 * event, there are two possibilities:
990 * 1) the client roamed from node A to node B => if there
991 * is only one originator left for this client, we mark
992 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
993 * wait for node B to claim it. In case of timeout
994 * the entry is purged.
995 *
996 * If there are other originators left, we directly delete
997 * the originator.
998 * 2) the client roamed to us => we can directly delete
999 * the global entry, since it is useless now.
1000 */
1001 local_entry = batadv_tt_local_hash_find(bat_priv,
1002 tt_global_entry->common.addr);
1003 if (local_entry) {
1004 /* local entry exists, case 2: client roamed to us. */
1005 batadv_tt_global_del_orig_list(tt_global_entry);
1006 batadv_tt_global_del_struct(bat_priv, tt_global_entry, message);
1007 } else
1008 /* no local entry exists, case 1: check for roaming */
1009 batadv_tt_global_del_roaming(bat_priv, tt_global_entry,
1010 orig_node, message);
1011
1012
1013 out:
1014 if (tt_global_entry)
1015 batadv_tt_global_entry_free_ref(tt_global_entry);
1016 if (local_entry)
1017 batadv_tt_local_entry_free_ref(local_entry);
1018 }
1019
1020 void batadv_tt_global_del_orig(struct batadv_priv *bat_priv,
1021 struct batadv_orig_node *orig_node,
1022 const char *message)
1023 {
1024 struct batadv_tt_global_entry *tt_global;
1025 struct batadv_tt_common_entry *tt_common_entry;
1026 uint32_t i;
1027 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1028 struct hlist_node *node, *safe;
1029 struct hlist_head *head;
1030 spinlock_t *list_lock; /* protects write access to the hash lists */
1031
1032 if (!hash)
1033 return;
1034
1035 for (i = 0; i < hash->size; i++) {
1036 head = &hash->table[i];
1037 list_lock = &hash->list_locks[i];
1038
1039 spin_lock_bh(list_lock);
1040 hlist_for_each_entry_safe(tt_common_entry, node, safe,
1041 head, hash_entry) {
1042 tt_global = container_of(tt_common_entry,
1043 struct batadv_tt_global_entry,
1044 common);
1045
1046 batadv_tt_global_del_orig_entry(bat_priv, tt_global,
1047 orig_node, message);
1048
1049 if (hlist_empty(&tt_global->orig_list)) {
1050 batadv_dbg(BATADV_DBG_TT, bat_priv,
1051 "Deleting global tt entry %pM: %s\n",
1052 tt_global->common.addr, message);
1053 hlist_del_rcu(node);
1054 batadv_tt_global_entry_free_ref(tt_global);
1055 }
1056 }
1057 spin_unlock_bh(list_lock);
1058 }
1059 orig_node->tt_initialised = false;
1060 }
1061
1062 static void batadv_tt_global_roam_purge_list(struct batadv_priv *bat_priv,
1063 struct hlist_head *head)
1064 {
1065 struct batadv_tt_common_entry *tt_common_entry;
1066 struct batadv_tt_global_entry *tt_global_entry;
1067 struct hlist_node *node, *node_tmp;
1068
1069 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp, head,
1070 hash_entry) {
1071 tt_global_entry = container_of(tt_common_entry,
1072 struct batadv_tt_global_entry,
1073 common);
1074 if (!(tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM))
1075 continue;
1076 if (!batadv_has_timed_out(tt_global_entry->roam_at,
1077 BATADV_TT_CLIENT_ROAM_TIMEOUT))
1078 continue;
1079
1080 batadv_dbg(BATADV_DBG_TT, bat_priv,
1081 "Deleting global tt entry (%pM): Roaming timeout\n",
1082 tt_global_entry->common.addr);
1083
1084 hlist_del_rcu(node);
1085 batadv_tt_global_entry_free_ref(tt_global_entry);
1086 }
1087 }
1088
1089 static void batadv_tt_global_roam_purge(struct batadv_priv *bat_priv)
1090 {
1091 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1092 struct hlist_head *head;
1093 spinlock_t *list_lock; /* protects write access to the hash lists */
1094 uint32_t i;
1095
1096 for (i = 0; i < hash->size; i++) {
1097 head = &hash->table[i];
1098 list_lock = &hash->list_locks[i];
1099
1100 spin_lock_bh(list_lock);
1101 batadv_tt_global_roam_purge_list(bat_priv, head);
1102 spin_unlock_bh(list_lock);
1103 }
1104
1105 }
1106
1107 static void batadv_tt_global_table_free(struct batadv_priv *bat_priv)
1108 {
1109 struct batadv_hashtable *hash;
1110 spinlock_t *list_lock; /* protects write access to the hash lists */
1111 struct batadv_tt_common_entry *tt_common_entry;
1112 struct batadv_tt_global_entry *tt_global;
1113 struct hlist_node *node, *node_tmp;
1114 struct hlist_head *head;
1115 uint32_t i;
1116
1117 if (!bat_priv->tt.global_hash)
1118 return;
1119
1120 hash = bat_priv->tt.global_hash;
1121
1122 for (i = 0; i < hash->size; i++) {
1123 head = &hash->table[i];
1124 list_lock = &hash->list_locks[i];
1125
1126 spin_lock_bh(list_lock);
1127 hlist_for_each_entry_safe(tt_common_entry, node, node_tmp,
1128 head, hash_entry) {
1129 hlist_del_rcu(node);
1130 tt_global = container_of(tt_common_entry,
1131 struct batadv_tt_global_entry,
1132 common);
1133 batadv_tt_global_entry_free_ref(tt_global);
1134 }
1135 spin_unlock_bh(list_lock);
1136 }
1137
1138 batadv_hash_destroy(hash);
1139
1140 bat_priv->tt.global_hash = NULL;
1141 }
1142
1143 static bool
1144 _batadv_is_ap_isolated(struct batadv_tt_local_entry *tt_local_entry,
1145 struct batadv_tt_global_entry *tt_global_entry)
1146 {
1147 bool ret = false;
1148
1149 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_WIFI &&
1150 tt_global_entry->common.flags & BATADV_TT_CLIENT_WIFI)
1151 ret = true;
1152
1153 return ret;
1154 }
1155
1156 struct batadv_orig_node *batadv_transtable_search(struct batadv_priv *bat_priv,
1157 const uint8_t *src,
1158 const uint8_t *addr)
1159 {
1160 struct batadv_tt_local_entry *tt_local_entry = NULL;
1161 struct batadv_tt_global_entry *tt_global_entry = NULL;
1162 struct batadv_orig_node *orig_node = NULL;
1163 struct batadv_neigh_node *router = NULL;
1164 struct hlist_head *head;
1165 struct hlist_node *node;
1166 struct batadv_tt_orig_list_entry *orig_entry;
1167 int best_tq;
1168
1169 if (src && atomic_read(&bat_priv->ap_isolation)) {
1170 tt_local_entry = batadv_tt_local_hash_find(bat_priv, src);
1171 if (!tt_local_entry)
1172 goto out;
1173 }
1174
1175 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
1176 if (!tt_global_entry)
1177 goto out;
1178
1179 /* check whether the clients should not communicate due to AP
1180 * isolation
1181 */
1182 if (tt_local_entry &&
1183 _batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
1184 goto out;
1185
1186 best_tq = 0;
1187
1188 rcu_read_lock();
1189 head = &tt_global_entry->orig_list;
1190 hlist_for_each_entry_rcu(orig_entry, node, head, list) {
1191 router = batadv_orig_node_get_router(orig_entry->orig_node);
1192 if (!router)
1193 continue;
1194
1195 if (router->tq_avg > best_tq) {
1196 orig_node = orig_entry->orig_node;
1197 best_tq = router->tq_avg;
1198 }
1199 batadv_neigh_node_free_ref(router);
1200 }
1201 /* found anything? */
1202 if (orig_node && !atomic_inc_not_zero(&orig_node->refcount))
1203 orig_node = NULL;
1204 rcu_read_unlock();
1205 out:
1206 if (tt_global_entry)
1207 batadv_tt_global_entry_free_ref(tt_global_entry);
1208 if (tt_local_entry)
1209 batadv_tt_local_entry_free_ref(tt_local_entry);
1210
1211 return orig_node;
1212 }
1213
1214 /* Calculates the checksum of the local table of a given orig_node */
1215 static uint16_t batadv_tt_global_crc(struct batadv_priv *bat_priv,
1216 struct batadv_orig_node *orig_node)
1217 {
1218 uint16_t total = 0, total_one;
1219 struct batadv_hashtable *hash = bat_priv->tt.global_hash;
1220 struct batadv_tt_common_entry *tt_common;
1221 struct batadv_tt_global_entry *tt_global;
1222 struct hlist_node *node;
1223 struct hlist_head *head;
1224 uint32_t i;
1225 int j;
1226
1227 for (i = 0; i < hash->size; i++) {
1228 head = &hash->table[i];
1229
1230 rcu_read_lock();
1231 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1232 tt_global = container_of(tt_common,
1233 struct batadv_tt_global_entry,
1234 common);
1235 /* Roaming clients are in the global table for
1236 * consistency only. They don't have to be
1237 * taken into account while computing the
1238 * global crc
1239 */
1240 if (tt_common->flags & BATADV_TT_CLIENT_ROAM)
1241 continue;
1242
1243 /* find out if this global entry is announced by this
1244 * originator
1245 */
1246 if (!batadv_tt_global_entry_has_orig(tt_global,
1247 orig_node))
1248 continue;
1249
1250 total_one = 0;
1251 for (j = 0; j < ETH_ALEN; j++)
1252 total_one = crc16_byte(total_one,
1253 tt_common->addr[j]);
1254 total ^= total_one;
1255 }
1256 rcu_read_unlock();
1257 }
1258
1259 return total;
1260 }
1261
1262 /* Calculates the checksum of the local table */
1263 static uint16_t batadv_tt_local_crc(struct batadv_priv *bat_priv)
1264 {
1265 uint16_t total = 0, total_one;
1266 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
1267 struct batadv_tt_common_entry *tt_common;
1268 struct hlist_node *node;
1269 struct hlist_head *head;
1270 uint32_t i;
1271 int j;
1272
1273 for (i = 0; i < hash->size; i++) {
1274 head = &hash->table[i];
1275
1276 rcu_read_lock();
1277 hlist_for_each_entry_rcu(tt_common, node, head, hash_entry) {
1278 /* not yet committed clients have not to be taken into
1279 * account while computing the CRC
1280 */
1281 if (tt_common->flags & BATADV_TT_CLIENT_NEW)
1282 continue;
1283 total_one = 0;
1284 for (j = 0; j < ETH_ALEN; j++)
1285 total_one = crc16_byte(total_one,
1286 tt_common->addr[j]);
1287 total ^= total_one;
1288 }
1289 rcu_read_unlock();
1290 }
1291
1292 return total;
1293 }
1294
1295 static void batadv_tt_req_list_free(struct batadv_priv *bat_priv)
1296 {
1297 struct batadv_tt_req_node *node, *safe;
1298
1299 spin_lock_bh(&bat_priv->tt.req_list_lock);
1300
1301 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1302 list_del(&node->list);
1303 kfree(node);
1304 }
1305
1306 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1307 }
1308
1309 static void batadv_tt_save_orig_buffer(struct batadv_priv *bat_priv,
1310 struct batadv_orig_node *orig_node,
1311 const unsigned char *tt_buff,
1312 uint8_t tt_num_changes)
1313 {
1314 uint16_t tt_buff_len = batadv_tt_len(tt_num_changes);
1315
1316 /* Replace the old buffer only if I received something in the
1317 * last OGM (the OGM could carry no changes)
1318 */
1319 spin_lock_bh(&orig_node->tt_buff_lock);
1320 if (tt_buff_len > 0) {
1321 kfree(orig_node->tt_buff);
1322 orig_node->tt_buff_len = 0;
1323 orig_node->tt_buff = kmalloc(tt_buff_len, GFP_ATOMIC);
1324 if (orig_node->tt_buff) {
1325 memcpy(orig_node->tt_buff, tt_buff, tt_buff_len);
1326 orig_node->tt_buff_len = tt_buff_len;
1327 }
1328 }
1329 spin_unlock_bh(&orig_node->tt_buff_lock);
1330 }
1331
1332 static void batadv_tt_req_purge(struct batadv_priv *bat_priv)
1333 {
1334 struct batadv_tt_req_node *node, *safe;
1335
1336 spin_lock_bh(&bat_priv->tt.req_list_lock);
1337 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1338 if (batadv_has_timed_out(node->issued_at,
1339 BATADV_TT_REQUEST_TIMEOUT)) {
1340 list_del(&node->list);
1341 kfree(node);
1342 }
1343 }
1344 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1345 }
1346
1347 /* returns the pointer to the new tt_req_node struct if no request
1348 * has already been issued for this orig_node, NULL otherwise
1349 */
1350 static struct batadv_tt_req_node *
1351 batadv_new_tt_req_node(struct batadv_priv *bat_priv,
1352 struct batadv_orig_node *orig_node)
1353 {
1354 struct batadv_tt_req_node *tt_req_node_tmp, *tt_req_node = NULL;
1355
1356 spin_lock_bh(&bat_priv->tt.req_list_lock);
1357 list_for_each_entry(tt_req_node_tmp, &bat_priv->tt.req_list, list) {
1358 if (batadv_compare_eth(tt_req_node_tmp, orig_node) &&
1359 !batadv_has_timed_out(tt_req_node_tmp->issued_at,
1360 BATADV_TT_REQUEST_TIMEOUT))
1361 goto unlock;
1362 }
1363
1364 tt_req_node = kmalloc(sizeof(*tt_req_node), GFP_ATOMIC);
1365 if (!tt_req_node)
1366 goto unlock;
1367
1368 memcpy(tt_req_node->addr, orig_node->orig, ETH_ALEN);
1369 tt_req_node->issued_at = jiffies;
1370
1371 list_add(&tt_req_node->list, &bat_priv->tt.req_list);
1372 unlock:
1373 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1374 return tt_req_node;
1375 }
1376
1377 /* data_ptr is useless here, but has to be kept to respect the prototype */
1378 static int batadv_tt_local_valid_entry(const void *entry_ptr,
1379 const void *data_ptr)
1380 {
1381 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1382
1383 if (tt_common_entry->flags & BATADV_TT_CLIENT_NEW)
1384 return 0;
1385 return 1;
1386 }
1387
1388 static int batadv_tt_global_valid(const void *entry_ptr,
1389 const void *data_ptr)
1390 {
1391 const struct batadv_tt_common_entry *tt_common_entry = entry_ptr;
1392 const struct batadv_tt_global_entry *tt_global_entry;
1393 const struct batadv_orig_node *orig_node = data_ptr;
1394
1395 if (tt_common_entry->flags & BATADV_TT_CLIENT_ROAM)
1396 return 0;
1397
1398 tt_global_entry = container_of(tt_common_entry,
1399 struct batadv_tt_global_entry,
1400 common);
1401
1402 return batadv_tt_global_entry_has_orig(tt_global_entry, orig_node);
1403 }
1404
1405 static struct sk_buff *
1406 batadv_tt_response_fill_table(uint16_t tt_len, uint8_t ttvn,
1407 struct batadv_hashtable *hash,
1408 struct batadv_hard_iface *primary_if,
1409 int (*valid_cb)(const void *, const void *),
1410 void *cb_data)
1411 {
1412 struct batadv_tt_common_entry *tt_common_entry;
1413 struct batadv_tt_query_packet *tt_response;
1414 struct batadv_tt_change *tt_change;
1415 struct hlist_node *node;
1416 struct hlist_head *head;
1417 struct sk_buff *skb = NULL;
1418 uint16_t tt_tot, tt_count;
1419 ssize_t tt_query_size = sizeof(struct batadv_tt_query_packet);
1420 uint32_t i;
1421 size_t len;
1422
1423 if (tt_query_size + tt_len > primary_if->soft_iface->mtu) {
1424 tt_len = primary_if->soft_iface->mtu - tt_query_size;
1425 tt_len -= tt_len % sizeof(struct batadv_tt_change);
1426 }
1427 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1428
1429 len = tt_query_size + tt_len;
1430 skb = dev_alloc_skb(len + ETH_HLEN);
1431 if (!skb)
1432 goto out;
1433
1434 skb_reserve(skb, ETH_HLEN);
1435 tt_response = (struct batadv_tt_query_packet *)skb_put(skb, len);
1436 tt_response->ttvn = ttvn;
1437
1438 tt_change = (struct batadv_tt_change *)(skb->data + tt_query_size);
1439 tt_count = 0;
1440
1441 rcu_read_lock();
1442 for (i = 0; i < hash->size; i++) {
1443 head = &hash->table[i];
1444
1445 hlist_for_each_entry_rcu(tt_common_entry, node,
1446 head, hash_entry) {
1447 if (tt_count == tt_tot)
1448 break;
1449
1450 if ((valid_cb) && (!valid_cb(tt_common_entry, cb_data)))
1451 continue;
1452
1453 memcpy(tt_change->addr, tt_common_entry->addr,
1454 ETH_ALEN);
1455 tt_change->flags = BATADV_NO_FLAGS;
1456
1457 tt_count++;
1458 tt_change++;
1459 }
1460 }
1461 rcu_read_unlock();
1462
1463 /* store in the message the number of entries we have successfully
1464 * copied
1465 */
1466 tt_response->tt_data = htons(tt_count);
1467
1468 out:
1469 return skb;
1470 }
1471
1472 static int batadv_send_tt_request(struct batadv_priv *bat_priv,
1473 struct batadv_orig_node *dst_orig_node,
1474 uint8_t ttvn, uint16_t tt_crc,
1475 bool full_table)
1476 {
1477 struct sk_buff *skb = NULL;
1478 struct batadv_tt_query_packet *tt_request;
1479 struct batadv_neigh_node *neigh_node = NULL;
1480 struct batadv_hard_iface *primary_if;
1481 struct batadv_tt_req_node *tt_req_node = NULL;
1482 int ret = 1;
1483 size_t tt_req_len;
1484
1485 primary_if = batadv_primary_if_get_selected(bat_priv);
1486 if (!primary_if)
1487 goto out;
1488
1489 /* The new tt_req will be issued only if I'm not waiting for a
1490 * reply from the same orig_node yet
1491 */
1492 tt_req_node = batadv_new_tt_req_node(bat_priv, dst_orig_node);
1493 if (!tt_req_node)
1494 goto out;
1495
1496 skb = dev_alloc_skb(sizeof(*tt_request) + ETH_HLEN);
1497 if (!skb)
1498 goto out;
1499
1500 skb_reserve(skb, ETH_HLEN);
1501
1502 tt_req_len = sizeof(*tt_request);
1503 tt_request = (struct batadv_tt_query_packet *)skb_put(skb, tt_req_len);
1504
1505 tt_request->header.packet_type = BATADV_TT_QUERY;
1506 tt_request->header.version = BATADV_COMPAT_VERSION;
1507 memcpy(tt_request->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1508 memcpy(tt_request->dst, dst_orig_node->orig, ETH_ALEN);
1509 tt_request->header.ttl = BATADV_TTL;
1510 tt_request->ttvn = ttvn;
1511 tt_request->tt_data = htons(tt_crc);
1512 tt_request->flags = BATADV_TT_REQUEST;
1513
1514 if (full_table)
1515 tt_request->flags |= BATADV_TT_FULL_TABLE;
1516
1517 neigh_node = batadv_orig_node_get_router(dst_orig_node);
1518 if (!neigh_node)
1519 goto out;
1520
1521 batadv_dbg(BATADV_DBG_TT, bat_priv,
1522 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1523 dst_orig_node->orig, neigh_node->addr,
1524 (full_table ? 'F' : '.'));
1525
1526 batadv_inc_counter(bat_priv, BATADV_CNT_TT_REQUEST_TX);
1527
1528 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1529 ret = 0;
1530
1531 out:
1532 if (neigh_node)
1533 batadv_neigh_node_free_ref(neigh_node);
1534 if (primary_if)
1535 batadv_hardif_free_ref(primary_if);
1536 if (ret)
1537 kfree_skb(skb);
1538 if (ret && tt_req_node) {
1539 spin_lock_bh(&bat_priv->tt.req_list_lock);
1540 list_del(&tt_req_node->list);
1541 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1542 kfree(tt_req_node);
1543 }
1544 return ret;
1545 }
1546
1547 static bool
1548 batadv_send_other_tt_response(struct batadv_priv *bat_priv,
1549 struct batadv_tt_query_packet *tt_request)
1550 {
1551 struct batadv_orig_node *req_dst_orig_node = NULL;
1552 struct batadv_orig_node *res_dst_orig_node = NULL;
1553 struct batadv_neigh_node *neigh_node = NULL;
1554 struct batadv_hard_iface *primary_if = NULL;
1555 uint8_t orig_ttvn, req_ttvn, ttvn;
1556 int ret = false;
1557 unsigned char *tt_buff;
1558 bool full_table;
1559 uint16_t tt_len, tt_tot;
1560 struct sk_buff *skb = NULL;
1561 struct batadv_tt_query_packet *tt_response;
1562 uint8_t *packet_pos;
1563 size_t len;
1564
1565 batadv_dbg(BATADV_DBG_TT, bat_priv,
1566 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1567 tt_request->src, tt_request->ttvn, tt_request->dst,
1568 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1569
1570 /* Let's get the orig node of the REAL destination */
1571 req_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->dst);
1572 if (!req_dst_orig_node)
1573 goto out;
1574
1575 res_dst_orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1576 if (!res_dst_orig_node)
1577 goto out;
1578
1579 neigh_node = batadv_orig_node_get_router(res_dst_orig_node);
1580 if (!neigh_node)
1581 goto out;
1582
1583 primary_if = batadv_primary_if_get_selected(bat_priv);
1584 if (!primary_if)
1585 goto out;
1586
1587 orig_ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1588 req_ttvn = tt_request->ttvn;
1589
1590 /* I don't have the requested data */
1591 if (orig_ttvn != req_ttvn ||
1592 tt_request->tt_data != htons(req_dst_orig_node->tt_crc))
1593 goto out;
1594
1595 /* If the full table has been explicitly requested */
1596 if (tt_request->flags & BATADV_TT_FULL_TABLE ||
1597 !req_dst_orig_node->tt_buff)
1598 full_table = true;
1599 else
1600 full_table = false;
1601
1602 /* In this version, fragmentation is not implemented, then
1603 * I'll send only one packet with as much TT entries as I can
1604 */
1605 if (!full_table) {
1606 spin_lock_bh(&req_dst_orig_node->tt_buff_lock);
1607 tt_len = req_dst_orig_node->tt_buff_len;
1608 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1609
1610 len = sizeof(*tt_response) + tt_len;
1611 skb = dev_alloc_skb(len + ETH_HLEN);
1612 if (!skb)
1613 goto unlock;
1614
1615 skb_reserve(skb, ETH_HLEN);
1616 packet_pos = skb_put(skb, len);
1617 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1618 tt_response->ttvn = req_ttvn;
1619 tt_response->tt_data = htons(tt_tot);
1620
1621 tt_buff = skb->data + sizeof(*tt_response);
1622 /* Copy the last orig_node's OGM buffer */
1623 memcpy(tt_buff, req_dst_orig_node->tt_buff,
1624 req_dst_orig_node->tt_buff_len);
1625
1626 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1627 } else {
1628 tt_len = (uint16_t)atomic_read(&req_dst_orig_node->tt_size);
1629 tt_len *= sizeof(struct batadv_tt_change);
1630 ttvn = (uint8_t)atomic_read(&req_dst_orig_node->last_ttvn);
1631
1632 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1633 bat_priv->tt.global_hash,
1634 primary_if,
1635 batadv_tt_global_valid,
1636 req_dst_orig_node);
1637 if (!skb)
1638 goto out;
1639
1640 tt_response = (struct batadv_tt_query_packet *)skb->data;
1641 }
1642
1643 tt_response->header.packet_type = BATADV_TT_QUERY;
1644 tt_response->header.version = BATADV_COMPAT_VERSION;
1645 tt_response->header.ttl = BATADV_TTL;
1646 memcpy(tt_response->src, req_dst_orig_node->orig, ETH_ALEN);
1647 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1648 tt_response->flags = BATADV_TT_RESPONSE;
1649
1650 if (full_table)
1651 tt_response->flags |= BATADV_TT_FULL_TABLE;
1652
1653 batadv_dbg(BATADV_DBG_TT, bat_priv,
1654 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1655 res_dst_orig_node->orig, neigh_node->addr,
1656 req_dst_orig_node->orig, req_ttvn);
1657
1658 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1659
1660 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1661 ret = true;
1662 goto out;
1663
1664 unlock:
1665 spin_unlock_bh(&req_dst_orig_node->tt_buff_lock);
1666
1667 out:
1668 if (res_dst_orig_node)
1669 batadv_orig_node_free_ref(res_dst_orig_node);
1670 if (req_dst_orig_node)
1671 batadv_orig_node_free_ref(req_dst_orig_node);
1672 if (neigh_node)
1673 batadv_neigh_node_free_ref(neigh_node);
1674 if (primary_if)
1675 batadv_hardif_free_ref(primary_if);
1676 if (!ret)
1677 kfree_skb(skb);
1678 return ret;
1679
1680 }
1681
1682 static bool
1683 batadv_send_my_tt_response(struct batadv_priv *bat_priv,
1684 struct batadv_tt_query_packet *tt_request)
1685 {
1686 struct batadv_orig_node *orig_node = NULL;
1687 struct batadv_neigh_node *neigh_node = NULL;
1688 struct batadv_hard_iface *primary_if = NULL;
1689 uint8_t my_ttvn, req_ttvn, ttvn;
1690 int ret = false;
1691 unsigned char *tt_buff;
1692 bool full_table;
1693 uint16_t tt_len, tt_tot;
1694 struct sk_buff *skb = NULL;
1695 struct batadv_tt_query_packet *tt_response;
1696 uint8_t *packet_pos;
1697 size_t len;
1698
1699 batadv_dbg(BATADV_DBG_TT, bat_priv,
1700 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1701 tt_request->src, tt_request->ttvn,
1702 (tt_request->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1703
1704
1705 my_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1706 req_ttvn = tt_request->ttvn;
1707
1708 orig_node = batadv_orig_hash_find(bat_priv, tt_request->src);
1709 if (!orig_node)
1710 goto out;
1711
1712 neigh_node = batadv_orig_node_get_router(orig_node);
1713 if (!neigh_node)
1714 goto out;
1715
1716 primary_if = batadv_primary_if_get_selected(bat_priv);
1717 if (!primary_if)
1718 goto out;
1719
1720 /* If the full table has been explicitly requested or the gap
1721 * is too big send the whole local translation table
1722 */
1723 if (tt_request->flags & BATADV_TT_FULL_TABLE || my_ttvn != req_ttvn ||
1724 !bat_priv->tt.last_changeset)
1725 full_table = true;
1726 else
1727 full_table = false;
1728
1729 /* In this version, fragmentation is not implemented, then
1730 * I'll send only one packet with as much TT entries as I can
1731 */
1732 if (!full_table) {
1733 spin_lock_bh(&bat_priv->tt.last_changeset_lock);
1734 tt_len = bat_priv->tt.last_changeset_len;
1735 tt_tot = tt_len / sizeof(struct batadv_tt_change);
1736
1737 len = sizeof(*tt_response) + tt_len;
1738 skb = dev_alloc_skb(len + ETH_HLEN);
1739 if (!skb)
1740 goto unlock;
1741
1742 skb_reserve(skb, ETH_HLEN);
1743 packet_pos = skb_put(skb, len);
1744 tt_response = (struct batadv_tt_query_packet *)packet_pos;
1745 tt_response->ttvn = req_ttvn;
1746 tt_response->tt_data = htons(tt_tot);
1747
1748 tt_buff = skb->data + sizeof(*tt_response);
1749 memcpy(tt_buff, bat_priv->tt.last_changeset,
1750 bat_priv->tt.last_changeset_len);
1751 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1752 } else {
1753 tt_len = (uint16_t)atomic_read(&bat_priv->tt.local_entry_num);
1754 tt_len *= sizeof(struct batadv_tt_change);
1755 ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn);
1756
1757 skb = batadv_tt_response_fill_table(tt_len, ttvn,
1758 bat_priv->tt.local_hash,
1759 primary_if,
1760 batadv_tt_local_valid_entry,
1761 NULL);
1762 if (!skb)
1763 goto out;
1764
1765 tt_response = (struct batadv_tt_query_packet *)skb->data;
1766 }
1767
1768 tt_response->header.packet_type = BATADV_TT_QUERY;
1769 tt_response->header.version = BATADV_COMPAT_VERSION;
1770 tt_response->header.ttl = BATADV_TTL;
1771 memcpy(tt_response->src, primary_if->net_dev->dev_addr, ETH_ALEN);
1772 memcpy(tt_response->dst, tt_request->src, ETH_ALEN);
1773 tt_response->flags = BATADV_TT_RESPONSE;
1774
1775 if (full_table)
1776 tt_response->flags |= BATADV_TT_FULL_TABLE;
1777
1778 batadv_dbg(BATADV_DBG_TT, bat_priv,
1779 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1780 orig_node->orig, neigh_node->addr,
1781 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1782
1783 batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_TX);
1784
1785 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
1786 ret = true;
1787 goto out;
1788
1789 unlock:
1790 spin_unlock_bh(&bat_priv->tt.last_changeset_lock);
1791 out:
1792 if (orig_node)
1793 batadv_orig_node_free_ref(orig_node);
1794 if (neigh_node)
1795 batadv_neigh_node_free_ref(neigh_node);
1796 if (primary_if)
1797 batadv_hardif_free_ref(primary_if);
1798 if (!ret)
1799 kfree_skb(skb);
1800 /* This packet was for me, so it doesn't need to be re-routed */
1801 return true;
1802 }
1803
1804 bool batadv_send_tt_response(struct batadv_priv *bat_priv,
1805 struct batadv_tt_query_packet *tt_request)
1806 {
1807 if (batadv_is_my_mac(tt_request->dst)) {
1808 /* don't answer backbone gws! */
1809 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))
1810 return true;
1811
1812 return batadv_send_my_tt_response(bat_priv, tt_request);
1813 } else {
1814 return batadv_send_other_tt_response(bat_priv, tt_request);
1815 }
1816 }
1817
1818 static void _batadv_tt_update_changes(struct batadv_priv *bat_priv,
1819 struct batadv_orig_node *orig_node,
1820 struct batadv_tt_change *tt_change,
1821 uint16_t tt_num_changes, uint8_t ttvn)
1822 {
1823 int i;
1824 int roams;
1825
1826 for (i = 0; i < tt_num_changes; i++) {
1827 if ((tt_change + i)->flags & BATADV_TT_CLIENT_DEL) {
1828 roams = (tt_change + i)->flags & BATADV_TT_CLIENT_ROAM;
1829 batadv_tt_global_del(bat_priv, orig_node,
1830 (tt_change + i)->addr,
1831 "tt removed by changes",
1832 roams);
1833 } else {
1834 if (!batadv_tt_global_add(bat_priv, orig_node,
1835 (tt_change + i)->addr,
1836 (tt_change + i)->flags, ttvn))
1837 /* In case of problem while storing a
1838 * global_entry, we stop the updating
1839 * procedure without committing the
1840 * ttvn change. This will avoid to send
1841 * corrupted data on tt_request
1842 */
1843 return;
1844 }
1845 }
1846 orig_node->tt_initialised = true;
1847 }
1848
1849 static void batadv_tt_fill_gtable(struct batadv_priv *bat_priv,
1850 struct batadv_tt_query_packet *tt_response)
1851 {
1852 struct batadv_orig_node *orig_node = NULL;
1853
1854 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1855 if (!orig_node)
1856 goto out;
1857
1858 /* Purge the old table first.. */
1859 batadv_tt_global_del_orig(bat_priv, orig_node, "Received full table");
1860
1861 _batadv_tt_update_changes(bat_priv, orig_node,
1862 (struct batadv_tt_change *)(tt_response + 1),
1863 ntohs(tt_response->tt_data),
1864 tt_response->ttvn);
1865
1866 spin_lock_bh(&orig_node->tt_buff_lock);
1867 kfree(orig_node->tt_buff);
1868 orig_node->tt_buff_len = 0;
1869 orig_node->tt_buff = NULL;
1870 spin_unlock_bh(&orig_node->tt_buff_lock);
1871
1872 atomic_set(&orig_node->last_ttvn, tt_response->ttvn);
1873
1874 out:
1875 if (orig_node)
1876 batadv_orig_node_free_ref(orig_node);
1877 }
1878
1879 static void batadv_tt_update_changes(struct batadv_priv *bat_priv,
1880 struct batadv_orig_node *orig_node,
1881 uint16_t tt_num_changes, uint8_t ttvn,
1882 struct batadv_tt_change *tt_change)
1883 {
1884 _batadv_tt_update_changes(bat_priv, orig_node, tt_change,
1885 tt_num_changes, ttvn);
1886
1887 batadv_tt_save_orig_buffer(bat_priv, orig_node,
1888 (unsigned char *)tt_change, tt_num_changes);
1889 atomic_set(&orig_node->last_ttvn, ttvn);
1890 }
1891
1892 bool batadv_is_my_client(struct batadv_priv *bat_priv, const uint8_t *addr)
1893 {
1894 struct batadv_tt_local_entry *tt_local_entry = NULL;
1895 bool ret = false;
1896
1897 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr);
1898 if (!tt_local_entry)
1899 goto out;
1900 /* Check if the client has been logically deleted (but is kept for
1901 * consistency purpose)
1902 */
1903 if (tt_local_entry->common.flags & BATADV_TT_CLIENT_PENDING)
1904 goto out;
1905 ret = true;
1906 out:
1907 if (tt_local_entry)
1908 batadv_tt_local_entry_free_ref(tt_local_entry);
1909 return ret;
1910 }
1911
1912 void batadv_handle_tt_response(struct batadv_priv *bat_priv,
1913 struct batadv_tt_query_packet *tt_response)
1914 {
1915 struct batadv_tt_req_node *node, *safe;
1916 struct batadv_orig_node *orig_node = NULL;
1917 struct batadv_tt_change *tt_change;
1918
1919 batadv_dbg(BATADV_DBG_TT, bat_priv,
1920 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1921 tt_response->src, tt_response->ttvn,
1922 ntohs(tt_response->tt_data),
1923 (tt_response->flags & BATADV_TT_FULL_TABLE ? 'F' : '.'));
1924
1925 /* we should have never asked a backbone gw */
1926 if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_response->src))
1927 goto out;
1928
1929 orig_node = batadv_orig_hash_find(bat_priv, tt_response->src);
1930 if (!orig_node)
1931 goto out;
1932
1933 if (tt_response->flags & BATADV_TT_FULL_TABLE) {
1934 batadv_tt_fill_gtable(bat_priv, tt_response);
1935 } else {
1936 tt_change = (struct batadv_tt_change *)(tt_response + 1);
1937 batadv_tt_update_changes(bat_priv, orig_node,
1938 ntohs(tt_response->tt_data),
1939 tt_response->ttvn, tt_change);
1940 }
1941
1942 /* Delete the tt_req_node from pending tt_requests list */
1943 spin_lock_bh(&bat_priv->tt.req_list_lock);
1944 list_for_each_entry_safe(node, safe, &bat_priv->tt.req_list, list) {
1945 if (!batadv_compare_eth(node->addr, tt_response->src))
1946 continue;
1947 list_del(&node->list);
1948 kfree(node);
1949 }
1950 spin_unlock_bh(&bat_priv->tt.req_list_lock);
1951
1952 /* Recalculate the CRC for this orig_node and store it */
1953 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
1954 /* Roaming phase is over: tables are in sync again. I can
1955 * unset the flag
1956 */
1957 orig_node->tt_poss_change = false;
1958 out:
1959 if (orig_node)
1960 batadv_orig_node_free_ref(orig_node);
1961 }
1962
1963 int batadv_tt_init(struct batadv_priv *bat_priv)
1964 {
1965 int ret;
1966
1967 ret = batadv_tt_local_init(bat_priv);
1968 if (ret < 0)
1969 return ret;
1970
1971 ret = batadv_tt_global_init(bat_priv);
1972 if (ret < 0)
1973 return ret;
1974
1975 batadv_tt_start_timer(bat_priv);
1976
1977 return 1;
1978 }
1979
1980 static void batadv_tt_roam_list_free(struct batadv_priv *bat_priv)
1981 {
1982 struct batadv_tt_roam_node *node, *safe;
1983
1984 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1985
1986 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
1987 list_del(&node->list);
1988 kfree(node);
1989 }
1990
1991 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
1992 }
1993
1994 static void batadv_tt_roam_purge(struct batadv_priv *bat_priv)
1995 {
1996 struct batadv_tt_roam_node *node, *safe;
1997
1998 spin_lock_bh(&bat_priv->tt.roam_list_lock);
1999 list_for_each_entry_safe(node, safe, &bat_priv->tt.roam_list, list) {
2000 if (!batadv_has_timed_out(node->first_time,
2001 BATADV_ROAMING_MAX_TIME))
2002 continue;
2003
2004 list_del(&node->list);
2005 kfree(node);
2006 }
2007 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2008 }
2009
2010 /* This function checks whether the client already reached the
2011 * maximum number of possible roaming phases. In this case the ROAMING_ADV
2012 * will not be sent.
2013 *
2014 * returns true if the ROAMING_ADV can be sent, false otherwise
2015 */
2016 static bool batadv_tt_check_roam_count(struct batadv_priv *bat_priv,
2017 uint8_t *client)
2018 {
2019 struct batadv_tt_roam_node *tt_roam_node;
2020 bool ret = false;
2021
2022 spin_lock_bh(&bat_priv->tt.roam_list_lock);
2023 /* The new tt_req will be issued only if I'm not waiting for a
2024 * reply from the same orig_node yet
2025 */
2026 list_for_each_entry(tt_roam_node, &bat_priv->tt.roam_list, list) {
2027 if (!batadv_compare_eth(tt_roam_node->addr, client))
2028 continue;
2029
2030 if (batadv_has_timed_out(tt_roam_node->first_time,
2031 BATADV_ROAMING_MAX_TIME))
2032 continue;
2033
2034 if (!batadv_atomic_dec_not_zero(&tt_roam_node->counter))
2035 /* Sorry, you roamed too many times! */
2036 goto unlock;
2037 ret = true;
2038 break;
2039 }
2040
2041 if (!ret) {
2042 tt_roam_node = kmalloc(sizeof(*tt_roam_node), GFP_ATOMIC);
2043 if (!tt_roam_node)
2044 goto unlock;
2045
2046 tt_roam_node->first_time = jiffies;
2047 atomic_set(&tt_roam_node->counter,
2048 BATADV_ROAMING_MAX_COUNT - 1);
2049 memcpy(tt_roam_node->addr, client, ETH_ALEN);
2050
2051 list_add(&tt_roam_node->list, &bat_priv->tt.roam_list);
2052 ret = true;
2053 }
2054
2055 unlock:
2056 spin_unlock_bh(&bat_priv->tt.roam_list_lock);
2057 return ret;
2058 }
2059
2060 static void batadv_send_roam_adv(struct batadv_priv *bat_priv, uint8_t *client,
2061 struct batadv_orig_node *orig_node)
2062 {
2063 struct batadv_neigh_node *neigh_node = NULL;
2064 struct sk_buff *skb = NULL;
2065 struct batadv_roam_adv_packet *roam_adv_packet;
2066 int ret = 1;
2067 struct batadv_hard_iface *primary_if;
2068 size_t len = sizeof(*roam_adv_packet);
2069
2070 /* before going on we have to check whether the client has
2071 * already roamed to us too many times
2072 */
2073 if (!batadv_tt_check_roam_count(bat_priv, client))
2074 goto out;
2075
2076 skb = dev_alloc_skb(sizeof(*roam_adv_packet) + ETH_HLEN);
2077 if (!skb)
2078 goto out;
2079
2080 skb_reserve(skb, ETH_HLEN);
2081
2082 roam_adv_packet = (struct batadv_roam_adv_packet *)skb_put(skb, len);
2083
2084 roam_adv_packet->header.packet_type = BATADV_ROAM_ADV;
2085 roam_adv_packet->header.version = BATADV_COMPAT_VERSION;
2086 roam_adv_packet->header.ttl = BATADV_TTL;
2087 roam_adv_packet->reserved = 0;
2088 primary_if = batadv_primary_if_get_selected(bat_priv);
2089 if (!primary_if)
2090 goto out;
2091 memcpy(roam_adv_packet->src, primary_if->net_dev->dev_addr, ETH_ALEN);
2092 batadv_hardif_free_ref(primary_if);
2093 memcpy(roam_adv_packet->dst, orig_node->orig, ETH_ALEN);
2094 memcpy(roam_adv_packet->client, client, ETH_ALEN);
2095
2096 neigh_node = batadv_orig_node_get_router(orig_node);
2097 if (!neigh_node)
2098 goto out;
2099
2100 batadv_dbg(BATADV_DBG_TT, bat_priv,
2101 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2102 orig_node->orig, client, neigh_node->addr);
2103
2104 batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_TX);
2105
2106 batadv_send_skb_packet(skb, neigh_node->if_incoming, neigh_node->addr);
2107 ret = 0;
2108
2109 out:
2110 if (neigh_node)
2111 batadv_neigh_node_free_ref(neigh_node);
2112 if (ret)
2113 kfree_skb(skb);
2114 return;
2115 }
2116
2117 static void batadv_tt_purge(struct work_struct *work)
2118 {
2119 struct delayed_work *delayed_work;
2120 struct batadv_priv_tt *priv_tt;
2121 struct batadv_priv *bat_priv;
2122
2123 delayed_work = container_of(work, struct delayed_work, work);
2124 priv_tt = container_of(delayed_work, struct batadv_priv_tt, work);
2125 bat_priv = container_of(priv_tt, struct batadv_priv, tt);
2126
2127 batadv_tt_local_purge(bat_priv);
2128 batadv_tt_global_roam_purge(bat_priv);
2129 batadv_tt_req_purge(bat_priv);
2130 batadv_tt_roam_purge(bat_priv);
2131
2132 batadv_tt_start_timer(bat_priv);
2133 }
2134
2135 void batadv_tt_free(struct batadv_priv *bat_priv)
2136 {
2137 cancel_delayed_work_sync(&bat_priv->tt.work);
2138
2139 batadv_tt_local_table_free(bat_priv);
2140 batadv_tt_global_table_free(bat_priv);
2141 batadv_tt_req_list_free(bat_priv);
2142 batadv_tt_changes_list_free(bat_priv);
2143 batadv_tt_roam_list_free(bat_priv);
2144
2145 kfree(bat_priv->tt.last_changeset);
2146 }
2147
2148 /* This function will enable or disable the specified flags for all the entries
2149 * in the given hash table and returns the number of modified entries
2150 */
2151 static uint16_t batadv_tt_set_flags(struct batadv_hashtable *hash,
2152 uint16_t flags, bool enable)
2153 {
2154 uint32_t i;
2155 uint16_t changed_num = 0;
2156 struct hlist_head *head;
2157 struct hlist_node *node;
2158 struct batadv_tt_common_entry *tt_common_entry;
2159
2160 if (!hash)
2161 goto out;
2162
2163 for (i = 0; i < hash->size; i++) {
2164 head = &hash->table[i];
2165
2166 rcu_read_lock();
2167 hlist_for_each_entry_rcu(tt_common_entry, node,
2168 head, hash_entry) {
2169 if (enable) {
2170 if ((tt_common_entry->flags & flags) == flags)
2171 continue;
2172 tt_common_entry->flags |= flags;
2173 } else {
2174 if (!(tt_common_entry->flags & flags))
2175 continue;
2176 tt_common_entry->flags &= ~flags;
2177 }
2178 changed_num++;
2179 }
2180 rcu_read_unlock();
2181 }
2182 out:
2183 return changed_num;
2184 }
2185
2186 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2187 static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
2188 {
2189 struct batadv_hashtable *hash = bat_priv->tt.local_hash;
2190 struct batadv_tt_common_entry *tt_common;
2191 struct batadv_tt_local_entry *tt_local;
2192 struct hlist_node *node, *node_tmp;
2193 struct hlist_head *head;
2194 spinlock_t *list_lock; /* protects write access to the hash lists */
2195 uint32_t i;
2196
2197 if (!hash)
2198 return;
2199
2200 for (i = 0; i < hash->size; i++) {
2201 head = &hash->table[i];
2202 list_lock = &hash->list_locks[i];
2203
2204 spin_lock_bh(list_lock);
2205 hlist_for_each_entry_safe(tt_common, node, node_tmp, head,
2206 hash_entry) {
2207 if (!(tt_common->flags & BATADV_TT_CLIENT_PENDING))
2208 continue;
2209
2210 batadv_dbg(BATADV_DBG_TT, bat_priv,
2211 "Deleting local tt entry (%pM): pending\n",
2212 tt_common->addr);
2213
2214 atomic_dec(&bat_priv->tt.local_entry_num);
2215 hlist_del_rcu(node);
2216 tt_local = container_of(tt_common,
2217 struct batadv_tt_local_entry,
2218 common);
2219 batadv_tt_local_entry_free_ref(tt_local);
2220 }
2221 spin_unlock_bh(list_lock);
2222 }
2223
2224 }
2225
2226 static int batadv_tt_commit_changes(struct batadv_priv *bat_priv,
2227 unsigned char **packet_buff,
2228 int *packet_buff_len, int packet_min_len)
2229 {
2230 uint16_t changed_num = 0;
2231
2232 if (atomic_read(&bat_priv->tt.local_changes) < 1)
2233 return -ENOENT;
2234
2235 changed_num = batadv_tt_set_flags(bat_priv->tt.local_hash,
2236 BATADV_TT_CLIENT_NEW, false);
2237
2238 /* all reset entries have to be counted as local entries */
2239 atomic_add(changed_num, &bat_priv->tt.local_entry_num);
2240 batadv_tt_local_purge_pending_clients(bat_priv);
2241 bat_priv->tt.local_crc = batadv_tt_local_crc(bat_priv);
2242
2243 /* Increment the TTVN only once per OGM interval */
2244 atomic_inc(&bat_priv->tt.vn);
2245 batadv_dbg(BATADV_DBG_TT, bat_priv,
2246 "Local changes committed, updating to ttvn %u\n",
2247 (uint8_t)atomic_read(&bat_priv->tt.vn));
2248 bat_priv->tt.poss_change = false;
2249
2250 /* reset the sending counter */
2251 atomic_set(&bat_priv->tt.ogm_append_cnt, BATADV_TT_OGM_APPEND_MAX);
2252
2253 return batadv_tt_changes_fill_buff(bat_priv, packet_buff,
2254 packet_buff_len, packet_min_len);
2255 }
2256
2257 /* when calling this function (hard_iface == primary_if) has to be true */
2258 int batadv_tt_append_diff(struct batadv_priv *bat_priv,
2259 unsigned char **packet_buff, int *packet_buff_len,
2260 int packet_min_len)
2261 {
2262 int tt_num_changes;
2263
2264 /* if at least one change happened */
2265 tt_num_changes = batadv_tt_commit_changes(bat_priv, packet_buff,
2266 packet_buff_len,
2267 packet_min_len);
2268
2269 /* if the changes have been sent often enough */
2270 if ((tt_num_changes < 0) &&
2271 (!batadv_atomic_dec_not_zero(&bat_priv->tt.ogm_append_cnt))) {
2272 batadv_tt_realloc_packet_buff(packet_buff, packet_buff_len,
2273 packet_min_len, packet_min_len);
2274 tt_num_changes = 0;
2275 }
2276
2277 return tt_num_changes;
2278 }
2279
2280 bool batadv_is_ap_isolated(struct batadv_priv *bat_priv, uint8_t *src,
2281 uint8_t *dst)
2282 {
2283 struct batadv_tt_local_entry *tt_local_entry = NULL;
2284 struct batadv_tt_global_entry *tt_global_entry = NULL;
2285 bool ret = false;
2286
2287 if (!atomic_read(&bat_priv->ap_isolation))
2288 goto out;
2289
2290 tt_local_entry = batadv_tt_local_hash_find(bat_priv, dst);
2291 if (!tt_local_entry)
2292 goto out;
2293
2294 tt_global_entry = batadv_tt_global_hash_find(bat_priv, src);
2295 if (!tt_global_entry)
2296 goto out;
2297
2298 if (!_batadv_is_ap_isolated(tt_local_entry, tt_global_entry))
2299 goto out;
2300
2301 ret = true;
2302
2303 out:
2304 if (tt_global_entry)
2305 batadv_tt_global_entry_free_ref(tt_global_entry);
2306 if (tt_local_entry)
2307 batadv_tt_local_entry_free_ref(tt_local_entry);
2308 return ret;
2309 }
2310
2311 void batadv_tt_update_orig(struct batadv_priv *bat_priv,
2312 struct batadv_orig_node *orig_node,
2313 const unsigned char *tt_buff, uint8_t tt_num_changes,
2314 uint8_t ttvn, uint16_t tt_crc)
2315 {
2316 uint8_t orig_ttvn = (uint8_t)atomic_read(&orig_node->last_ttvn);
2317 bool full_table = true;
2318 struct batadv_tt_change *tt_change;
2319
2320 /* don't care about a backbone gateways updates. */
2321 if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig))
2322 return;
2323
2324 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2325 * increased by one -> we can apply the attached changes
2326 */
2327 if ((!orig_node->tt_initialised && ttvn == 1) ||
2328 ttvn - orig_ttvn == 1) {
2329 /* the OGM could not contain the changes due to their size or
2330 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2331 * times.
2332 * In this case send a tt request
2333 */
2334 if (!tt_num_changes) {
2335 full_table = false;
2336 goto request_table;
2337 }
2338
2339 tt_change = (struct batadv_tt_change *)tt_buff;
2340 batadv_tt_update_changes(bat_priv, orig_node, tt_num_changes,
2341 ttvn, tt_change);
2342
2343 /* Even if we received the precomputed crc with the OGM, we
2344 * prefer to recompute it to spot any possible inconsistency
2345 * in the global table
2346 */
2347 orig_node->tt_crc = batadv_tt_global_crc(bat_priv, orig_node);
2348
2349 /* The ttvn alone is not enough to guarantee consistency
2350 * because a single value could represent different states
2351 * (due to the wrap around). Thus a node has to check whether
2352 * the resulting table (after applying the changes) is still
2353 * consistent or not. E.g. a node could disconnect while its
2354 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2355 * checking the CRC value is mandatory to detect the
2356 * inconsistency
2357 */
2358 if (orig_node->tt_crc != tt_crc)
2359 goto request_table;
2360
2361 /* Roaming phase is over: tables are in sync again. I can
2362 * unset the flag
2363 */
2364 orig_node->tt_poss_change = false;
2365 } else {
2366 /* if we missed more than one change or our tables are not
2367 * in sync anymore -> request fresh tt data
2368 */
2369 if (!orig_node->tt_initialised || ttvn != orig_ttvn ||
2370 orig_node->tt_crc != tt_crc) {
2371 request_table:
2372 batadv_dbg(BATADV_DBG_TT, bat_priv,
2373 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2374 orig_node->orig, ttvn, orig_ttvn, tt_crc,
2375 orig_node->tt_crc, tt_num_changes);
2376 batadv_send_tt_request(bat_priv, orig_node, ttvn,
2377 tt_crc, full_table);
2378 return;
2379 }
2380 }
2381 }
2382
2383 /* returns true whether we know that the client has moved from its old
2384 * originator to another one. This entry is kept is still kept for consistency
2385 * purposes
2386 */
2387 bool batadv_tt_global_client_is_roaming(struct batadv_priv *bat_priv,
2388 uint8_t *addr)
2389 {
2390 struct batadv_tt_global_entry *tt_global_entry;
2391 bool ret = false;
2392
2393 tt_global_entry = batadv_tt_global_hash_find(bat_priv, addr);
2394 if (!tt_global_entry)
2395 goto out;
2396
2397 ret = tt_global_entry->common.flags & BATADV_TT_CLIENT_ROAM;
2398 batadv_tt_global_entry_free_ref(tt_global_entry);
2399 out:
2400 return ret;
2401 }
This page took 0.092302 seconds and 5 git commands to generate.