1 /* Copyright (C) 2007-2012 B.A.T.M.A.N. contributors:
3 * Marek Lindner, Simon Wunderlich, Antonio Quartulli
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of version 2 of the GNU General Public
7 * License as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 #include "translation-table.h"
22 #include "soft-interface.h"
23 #include "hard-interface.h"
26 #include "originator.h"
28 #include "bridge_loop_avoidance.h"
30 #include <linux/crc16.h>
32 static void batadv_send_roam_adv(struct batadv_priv
*bat_priv
, uint8_t *client
,
33 struct batadv_orig_node
*orig_node
);
34 static void batadv_tt_purge(struct work_struct
*work
);
36 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry
*tt_global_entry
);
38 /* returns 1 if they are the same mac addr */
39 static int batadv_compare_tt(const struct hlist_node
*node
, const void *data2
)
41 const void *data1
= container_of(node
, struct batadv_tt_common_entry
,
44 return (memcmp(data1
, data2
, ETH_ALEN
) == 0 ? 1 : 0);
47 static void batadv_tt_start_timer(struct batadv_priv
*bat_priv
)
49 INIT_DELAYED_WORK(&bat_priv
->tt
.work
, batadv_tt_purge
);
50 queue_delayed_work(batadv_event_workqueue
, &bat_priv
->tt
.work
,
51 msecs_to_jiffies(5000));
54 static struct batadv_tt_common_entry
*
55 batadv_tt_hash_find(struct batadv_hashtable
*hash
, const void *data
)
57 struct hlist_head
*head
;
58 struct hlist_node
*node
;
59 struct batadv_tt_common_entry
*tt_common_entry
;
60 struct batadv_tt_common_entry
*tt_common_entry_tmp
= NULL
;
66 index
= batadv_choose_orig(data
, hash
->size
);
67 head
= &hash
->table
[index
];
70 hlist_for_each_entry_rcu(tt_common_entry
, node
, head
, hash_entry
) {
71 if (!batadv_compare_eth(tt_common_entry
, data
))
74 if (!atomic_inc_not_zero(&tt_common_entry
->refcount
))
77 tt_common_entry_tmp
= tt_common_entry
;
82 return tt_common_entry_tmp
;
85 static struct batadv_tt_local_entry
*
86 batadv_tt_local_hash_find(struct batadv_priv
*bat_priv
, const void *data
)
88 struct batadv_tt_common_entry
*tt_common_entry
;
89 struct batadv_tt_local_entry
*tt_local_entry
= NULL
;
91 tt_common_entry
= batadv_tt_hash_find(bat_priv
->tt
.local_hash
, data
);
93 tt_local_entry
= container_of(tt_common_entry
,
94 struct batadv_tt_local_entry
,
96 return tt_local_entry
;
99 static struct batadv_tt_global_entry
*
100 batadv_tt_global_hash_find(struct batadv_priv
*bat_priv
, const void *data
)
102 struct batadv_tt_common_entry
*tt_common_entry
;
103 struct batadv_tt_global_entry
*tt_global_entry
= NULL
;
105 tt_common_entry
= batadv_tt_hash_find(bat_priv
->tt
.global_hash
, data
);
107 tt_global_entry
= container_of(tt_common_entry
,
108 struct batadv_tt_global_entry
,
110 return tt_global_entry
;
115 batadv_tt_local_entry_free_ref(struct batadv_tt_local_entry
*tt_local_entry
)
117 if (atomic_dec_and_test(&tt_local_entry
->common
.refcount
))
118 kfree_rcu(tt_local_entry
, common
.rcu
);
121 static void batadv_tt_global_entry_free_rcu(struct rcu_head
*rcu
)
123 struct batadv_tt_common_entry
*tt_common_entry
;
124 struct batadv_tt_global_entry
*tt_global_entry
;
126 tt_common_entry
= container_of(rcu
, struct batadv_tt_common_entry
, rcu
);
127 tt_global_entry
= container_of(tt_common_entry
,
128 struct batadv_tt_global_entry
, common
);
130 kfree(tt_global_entry
);
134 batadv_tt_global_entry_free_ref(struct batadv_tt_global_entry
*tt_global_entry
)
136 if (atomic_dec_and_test(&tt_global_entry
->common
.refcount
)) {
137 batadv_tt_global_del_orig_list(tt_global_entry
);
138 call_rcu(&tt_global_entry
->common
.rcu
,
139 batadv_tt_global_entry_free_rcu
);
143 static void batadv_tt_orig_list_entry_free_rcu(struct rcu_head
*rcu
)
145 struct batadv_tt_orig_list_entry
*orig_entry
;
147 orig_entry
= container_of(rcu
, struct batadv_tt_orig_list_entry
, rcu
);
148 batadv_orig_node_free_ref(orig_entry
->orig_node
);
153 batadv_tt_orig_list_entry_free_ref(struct batadv_tt_orig_list_entry
*orig_entry
)
155 if (!atomic_dec_and_test(&orig_entry
->refcount
))
157 /* to avoid race conditions, immediately decrease the tt counter */
158 atomic_dec(&orig_entry
->orig_node
->tt_size
);
159 call_rcu(&orig_entry
->rcu
, batadv_tt_orig_list_entry_free_rcu
);
162 static void batadv_tt_local_event(struct batadv_priv
*bat_priv
,
163 const uint8_t *addr
, uint8_t flags
)
165 struct batadv_tt_change_node
*tt_change_node
, *entry
, *safe
;
166 bool event_removed
= false;
167 bool del_op_requested
, del_op_entry
;
169 tt_change_node
= kmalloc(sizeof(*tt_change_node
), GFP_ATOMIC
);
174 tt_change_node
->change
.flags
= flags
;
175 memcpy(tt_change_node
->change
.addr
, addr
, ETH_ALEN
);
177 del_op_requested
= flags
& BATADV_TT_CLIENT_DEL
;
179 /* check for ADD+DEL or DEL+ADD events */
180 spin_lock_bh(&bat_priv
->tt
.changes_list_lock
);
181 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt
.changes_list
,
183 if (!batadv_compare_eth(entry
->change
.addr
, addr
))
186 /* DEL+ADD in the same orig interval have no effect and can be
187 * removed to avoid silly behaviour on the receiver side. The
188 * other way around (ADD+DEL) can happen in case of roaming of
189 * a client still in the NEW state. Roaming of NEW clients is
190 * now possible due to automatically recognition of "temporary"
193 del_op_entry
= entry
->change
.flags
& BATADV_TT_CLIENT_DEL
;
194 if (!del_op_requested
&& del_op_entry
)
196 if (del_op_requested
&& !del_op_entry
)
200 list_del(&entry
->list
);
202 kfree(tt_change_node
);
203 event_removed
= true;
207 /* track the change in the OGMinterval list */
208 list_add_tail(&tt_change_node
->list
, &bat_priv
->tt
.changes_list
);
211 spin_unlock_bh(&bat_priv
->tt
.changes_list_lock
);
214 atomic_dec(&bat_priv
->tt
.local_changes
);
216 atomic_inc(&bat_priv
->tt
.local_changes
);
219 int batadv_tt_len(int changes_num
)
221 return changes_num
* sizeof(struct batadv_tt_change
);
224 static int batadv_tt_local_init(struct batadv_priv
*bat_priv
)
226 if (bat_priv
->tt
.local_hash
)
229 bat_priv
->tt
.local_hash
= batadv_hash_new(1024);
231 if (!bat_priv
->tt
.local_hash
)
237 void batadv_tt_local_add(struct net_device
*soft_iface
, const uint8_t *addr
,
240 struct batadv_priv
*bat_priv
= netdev_priv(soft_iface
);
241 struct batadv_tt_local_entry
*tt_local_entry
= NULL
;
242 struct batadv_tt_global_entry
*tt_global_entry
= NULL
;
243 struct hlist_head
*head
;
244 struct hlist_node
*node
;
245 struct batadv_tt_orig_list_entry
*orig_entry
;
248 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
250 if (tt_local_entry
) {
251 tt_local_entry
->last_seen
= jiffies
;
252 /* possibly unset the BATADV_TT_CLIENT_PENDING flag */
253 tt_local_entry
->common
.flags
&= ~BATADV_TT_CLIENT_PENDING
;
257 tt_local_entry
= kmalloc(sizeof(*tt_local_entry
), GFP_ATOMIC
);
261 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
262 "Creating new local tt entry: %pM (ttvn: %d)\n", addr
,
263 (uint8_t)atomic_read(&bat_priv
->tt
.vn
));
265 memcpy(tt_local_entry
->common
.addr
, addr
, ETH_ALEN
);
266 tt_local_entry
->common
.flags
= BATADV_NO_FLAGS
;
267 if (batadv_is_wifi_iface(ifindex
))
268 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_WIFI
;
269 atomic_set(&tt_local_entry
->common
.refcount
, 2);
270 tt_local_entry
->last_seen
= jiffies
;
272 /* the batman interface mac address should never be purged */
273 if (batadv_compare_eth(addr
, soft_iface
->dev_addr
))
274 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_NOPURGE
;
276 /* The local entry has to be marked as NEW to avoid to send it in
277 * a full table response going out before the next ttvn increment
278 * (consistency check)
280 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_NEW
;
282 hash_added
= batadv_hash_add(bat_priv
->tt
.local_hash
, batadv_compare_tt
,
284 &tt_local_entry
->common
,
285 &tt_local_entry
->common
.hash_entry
);
287 if (unlikely(hash_added
!= 0)) {
288 /* remove the reference for the hash */
289 batadv_tt_local_entry_free_ref(tt_local_entry
);
293 batadv_tt_local_event(bat_priv
, addr
, tt_local_entry
->common
.flags
);
295 /* remove address from global hash if present */
296 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
298 /* Check whether it is a roaming! */
299 if (tt_global_entry
) {
300 /* These node are probably going to update their tt table */
301 head
= &tt_global_entry
->orig_list
;
303 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
304 orig_entry
->orig_node
->tt_poss_change
= true;
306 batadv_send_roam_adv(bat_priv
,
307 tt_global_entry
->common
.addr
,
308 orig_entry
->orig_node
);
311 /* The global entry has to be marked as ROAMING and
312 * has to be kept for consistency purpose
314 tt_global_entry
->common
.flags
|= BATADV_TT_CLIENT_ROAM
;
315 tt_global_entry
->roam_at
= jiffies
;
319 batadv_tt_local_entry_free_ref(tt_local_entry
);
321 batadv_tt_global_entry_free_ref(tt_global_entry
);
324 static void batadv_tt_realloc_packet_buff(unsigned char **packet_buff
,
325 int *packet_buff_len
,
329 unsigned char *new_buff
;
331 new_buff
= kmalloc(new_packet_len
, GFP_ATOMIC
);
333 /* keep old buffer if kmalloc should fail */
335 memcpy(new_buff
, *packet_buff
, min_packet_len
);
337 *packet_buff
= new_buff
;
338 *packet_buff_len
= new_packet_len
;
342 static void batadv_tt_prepare_packet_buff(struct batadv_priv
*bat_priv
,
343 unsigned char **packet_buff
,
344 int *packet_buff_len
,
347 struct batadv_hard_iface
*primary_if
;
350 primary_if
= batadv_primary_if_get_selected(bat_priv
);
352 req_len
= min_packet_len
;
353 req_len
+= batadv_tt_len(atomic_read(&bat_priv
->tt
.local_changes
));
355 /* if we have too many changes for one packet don't send any
356 * and wait for the tt table request which will be fragmented
358 if ((!primary_if
) || (req_len
> primary_if
->soft_iface
->mtu
))
359 req_len
= min_packet_len
;
361 batadv_tt_realloc_packet_buff(packet_buff
, packet_buff_len
,
362 min_packet_len
, req_len
);
365 batadv_hardif_free_ref(primary_if
);
368 static int batadv_tt_changes_fill_buff(struct batadv_priv
*bat_priv
,
369 unsigned char **packet_buff
,
370 int *packet_buff_len
,
373 struct batadv_tt_change_node
*entry
, *safe
;
374 int count
= 0, tot_changes
= 0, new_len
;
375 unsigned char *tt_buff
;
377 batadv_tt_prepare_packet_buff(bat_priv
, packet_buff
,
378 packet_buff_len
, min_packet_len
);
380 new_len
= *packet_buff_len
- min_packet_len
;
381 tt_buff
= *packet_buff
+ min_packet_len
;
384 tot_changes
= new_len
/ batadv_tt_len(1);
386 spin_lock_bh(&bat_priv
->tt
.changes_list_lock
);
387 atomic_set(&bat_priv
->tt
.local_changes
, 0);
389 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt
.changes_list
,
391 if (count
< tot_changes
) {
392 memcpy(tt_buff
+ batadv_tt_len(count
),
393 &entry
->change
, sizeof(struct batadv_tt_change
));
396 list_del(&entry
->list
);
399 spin_unlock_bh(&bat_priv
->tt
.changes_list_lock
);
401 /* Keep the buffer for possible tt_request */
402 spin_lock_bh(&bat_priv
->tt
.last_changeset_lock
);
403 kfree(bat_priv
->tt
.last_changeset
);
404 bat_priv
->tt
.last_changeset_len
= 0;
405 bat_priv
->tt
.last_changeset
= NULL
;
406 /* check whether this new OGM has no changes due to size problems */
408 /* if kmalloc() fails we will reply with the full table
409 * instead of providing the diff
411 bat_priv
->tt
.last_changeset
= kmalloc(new_len
, GFP_ATOMIC
);
412 if (bat_priv
->tt
.last_changeset
) {
413 memcpy(bat_priv
->tt
.last_changeset
, tt_buff
, new_len
);
414 bat_priv
->tt
.last_changeset_len
= new_len
;
417 spin_unlock_bh(&bat_priv
->tt
.last_changeset_lock
);
422 int batadv_tt_local_seq_print_text(struct seq_file
*seq
, void *offset
)
424 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
425 struct batadv_priv
*bat_priv
= netdev_priv(net_dev
);
426 struct batadv_hashtable
*hash
= bat_priv
->tt
.local_hash
;
427 struct batadv_tt_common_entry
*tt_common_entry
;
428 struct batadv_hard_iface
*primary_if
;
429 struct hlist_node
*node
;
430 struct hlist_head
*head
;
434 primary_if
= batadv_primary_if_get_selected(bat_priv
);
436 ret
= seq_printf(seq
,
437 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
442 if (primary_if
->if_status
!= BATADV_IF_ACTIVE
) {
443 ret
= seq_printf(seq
,
444 "BATMAN mesh %s disabled - primary interface not active\n",
450 "Locally retrieved addresses (from %s) announced via TT (TTVN: %u):\n",
451 net_dev
->name
, (uint8_t)atomic_read(&bat_priv
->tt
.vn
));
453 for (i
= 0; i
< hash
->size
; i
++) {
454 head
= &hash
->table
[i
];
457 hlist_for_each_entry_rcu(tt_common_entry
, node
,
459 seq_printf(seq
, " * %pM [%c%c%c%c%c]\n",
460 tt_common_entry
->addr
,
461 (tt_common_entry
->flags
&
462 BATADV_TT_CLIENT_ROAM
? 'R' : '.'),
463 (tt_common_entry
->flags
&
464 BATADV_TT_CLIENT_NOPURGE
? 'P' : '.'),
465 (tt_common_entry
->flags
&
466 BATADV_TT_CLIENT_NEW
? 'N' : '.'),
467 (tt_common_entry
->flags
&
468 BATADV_TT_CLIENT_PENDING
? 'X' : '.'),
469 (tt_common_entry
->flags
&
470 BATADV_TT_CLIENT_WIFI
? 'W' : '.'));
476 batadv_hardif_free_ref(primary_if
);
481 batadv_tt_local_set_pending(struct batadv_priv
*bat_priv
,
482 struct batadv_tt_local_entry
*tt_local_entry
,
483 uint16_t flags
, const char *message
)
485 batadv_tt_local_event(bat_priv
, tt_local_entry
->common
.addr
,
486 tt_local_entry
->common
.flags
| flags
);
488 /* The local client has to be marked as "pending to be removed" but has
489 * to be kept in the table in order to send it in a full table
490 * response issued before the net ttvn increment (consistency check)
492 tt_local_entry
->common
.flags
|= BATADV_TT_CLIENT_PENDING
;
494 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
495 "Local tt entry (%pM) pending to be removed: %s\n",
496 tt_local_entry
->common
.addr
, message
);
499 void batadv_tt_local_remove(struct batadv_priv
*bat_priv
, const uint8_t *addr
,
500 const char *message
, bool roaming
)
502 struct batadv_tt_local_entry
*tt_local_entry
= NULL
;
505 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
509 flags
= BATADV_TT_CLIENT_DEL
;
511 flags
|= BATADV_TT_CLIENT_ROAM
;
513 batadv_tt_local_set_pending(bat_priv
, tt_local_entry
, flags
, message
);
516 batadv_tt_local_entry_free_ref(tt_local_entry
);
519 static void batadv_tt_local_purge_list(struct batadv_priv
*bat_priv
,
520 struct hlist_head
*head
)
522 struct batadv_tt_local_entry
*tt_local_entry
;
523 struct batadv_tt_common_entry
*tt_common_entry
;
524 struct hlist_node
*node
, *node_tmp
;
526 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
, head
,
528 tt_local_entry
= container_of(tt_common_entry
,
529 struct batadv_tt_local_entry
,
531 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_NOPURGE
)
534 /* entry already marked for deletion */
535 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_PENDING
)
538 if (!batadv_has_timed_out(tt_local_entry
->last_seen
,
539 BATADV_TT_LOCAL_TIMEOUT
))
542 batadv_tt_local_set_pending(bat_priv
, tt_local_entry
,
543 BATADV_TT_CLIENT_DEL
, "timed out");
547 static void batadv_tt_local_purge(struct batadv_priv
*bat_priv
)
549 struct batadv_hashtable
*hash
= bat_priv
->tt
.local_hash
;
550 struct hlist_head
*head
;
551 spinlock_t
*list_lock
; /* protects write access to the hash lists */
554 for (i
= 0; i
< hash
->size
; i
++) {
555 head
= &hash
->table
[i
];
556 list_lock
= &hash
->list_locks
[i
];
558 spin_lock_bh(list_lock
);
559 batadv_tt_local_purge_list(bat_priv
, head
);
560 spin_unlock_bh(list_lock
);
565 static void batadv_tt_local_table_free(struct batadv_priv
*bat_priv
)
567 struct batadv_hashtable
*hash
;
568 spinlock_t
*list_lock
; /* protects write access to the hash lists */
569 struct batadv_tt_common_entry
*tt_common_entry
;
570 struct batadv_tt_local_entry
*tt_local
;
571 struct hlist_node
*node
, *node_tmp
;
572 struct hlist_head
*head
;
575 if (!bat_priv
->tt
.local_hash
)
578 hash
= bat_priv
->tt
.local_hash
;
580 for (i
= 0; i
< hash
->size
; i
++) {
581 head
= &hash
->table
[i
];
582 list_lock
= &hash
->list_locks
[i
];
584 spin_lock_bh(list_lock
);
585 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
588 tt_local
= container_of(tt_common_entry
,
589 struct batadv_tt_local_entry
,
591 batadv_tt_local_entry_free_ref(tt_local
);
593 spin_unlock_bh(list_lock
);
596 batadv_hash_destroy(hash
);
598 bat_priv
->tt
.local_hash
= NULL
;
601 static int batadv_tt_global_init(struct batadv_priv
*bat_priv
)
603 if (bat_priv
->tt
.global_hash
)
606 bat_priv
->tt
.global_hash
= batadv_hash_new(1024);
608 if (!bat_priv
->tt
.global_hash
)
614 static void batadv_tt_changes_list_free(struct batadv_priv
*bat_priv
)
616 struct batadv_tt_change_node
*entry
, *safe
;
618 spin_lock_bh(&bat_priv
->tt
.changes_list_lock
);
620 list_for_each_entry_safe(entry
, safe
, &bat_priv
->tt
.changes_list
,
622 list_del(&entry
->list
);
626 atomic_set(&bat_priv
->tt
.local_changes
, 0);
627 spin_unlock_bh(&bat_priv
->tt
.changes_list_lock
);
630 /* retrieves the orig_tt_list_entry belonging to orig_node from the
631 * batadv_tt_global_entry list
633 * returns it with an increased refcounter, NULL if not found
635 static struct batadv_tt_orig_list_entry
*
636 batadv_tt_global_orig_entry_find(const struct batadv_tt_global_entry
*entry
,
637 const struct batadv_orig_node
*orig_node
)
639 struct batadv_tt_orig_list_entry
*tmp_orig_entry
, *orig_entry
= NULL
;
640 const struct hlist_head
*head
;
641 struct hlist_node
*node
;
644 head
= &entry
->orig_list
;
645 hlist_for_each_entry_rcu(tmp_orig_entry
, node
, head
, list
) {
646 if (tmp_orig_entry
->orig_node
!= orig_node
)
648 if (!atomic_inc_not_zero(&tmp_orig_entry
->refcount
))
651 orig_entry
= tmp_orig_entry
;
659 /* find out if an orig_node is already in the list of a tt_global_entry.
660 * returns true if found, false otherwise
663 batadv_tt_global_entry_has_orig(const struct batadv_tt_global_entry
*entry
,
664 const struct batadv_orig_node
*orig_node
)
666 struct batadv_tt_orig_list_entry
*orig_entry
;
669 orig_entry
= batadv_tt_global_orig_entry_find(entry
, orig_node
);
672 batadv_tt_orig_list_entry_free_ref(orig_entry
);
679 batadv_tt_global_orig_entry_add(struct batadv_tt_global_entry
*tt_global
,
680 struct batadv_orig_node
*orig_node
, int ttvn
)
682 struct batadv_tt_orig_list_entry
*orig_entry
;
684 orig_entry
= batadv_tt_global_orig_entry_find(tt_global
, orig_node
);
688 orig_entry
= kzalloc(sizeof(*orig_entry
), GFP_ATOMIC
);
692 INIT_HLIST_NODE(&orig_entry
->list
);
693 atomic_inc(&orig_node
->refcount
);
694 atomic_inc(&orig_node
->tt_size
);
695 orig_entry
->orig_node
= orig_node
;
696 orig_entry
->ttvn
= ttvn
;
697 atomic_set(&orig_entry
->refcount
, 2);
699 spin_lock_bh(&tt_global
->list_lock
);
700 hlist_add_head_rcu(&orig_entry
->list
,
701 &tt_global
->orig_list
);
702 spin_unlock_bh(&tt_global
->list_lock
);
705 batadv_tt_orig_list_entry_free_ref(orig_entry
);
708 /* caller must hold orig_node refcount */
709 int batadv_tt_global_add(struct batadv_priv
*bat_priv
,
710 struct batadv_orig_node
*orig_node
,
711 const unsigned char *tt_addr
, uint8_t flags
,
714 struct batadv_tt_global_entry
*tt_global_entry
= NULL
;
717 struct batadv_tt_common_entry
*common
;
719 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, tt_addr
);
721 if (!tt_global_entry
) {
722 tt_global_entry
= kzalloc(sizeof(*tt_global_entry
), GFP_ATOMIC
);
723 if (!tt_global_entry
)
726 common
= &tt_global_entry
->common
;
727 memcpy(common
->addr
, tt_addr
, ETH_ALEN
);
729 common
->flags
= flags
;
730 tt_global_entry
->roam_at
= 0;
731 atomic_set(&common
->refcount
, 2);
733 INIT_HLIST_HEAD(&tt_global_entry
->orig_list
);
734 spin_lock_init(&tt_global_entry
->list_lock
);
736 hash_added
= batadv_hash_add(bat_priv
->tt
.global_hash
,
738 batadv_choose_orig
, common
,
739 &common
->hash_entry
);
741 if (unlikely(hash_added
!= 0)) {
742 /* remove the reference for the hash */
743 batadv_tt_global_entry_free_ref(tt_global_entry
);
747 /* there is already a global entry, use this one. */
749 /* If there is the BATADV_TT_CLIENT_ROAM flag set, there is only
750 * one originator left in the list and we previously received a
751 * delete + roaming change for this originator.
753 * We should first delete the old originator before adding the
756 if (tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_ROAM
) {
757 batadv_tt_global_del_orig_list(tt_global_entry
);
758 tt_global_entry
->common
.flags
&= ~BATADV_TT_CLIENT_ROAM
;
759 tt_global_entry
->roam_at
= 0;
763 /* add the new orig_entry (if needed) */
764 batadv_tt_global_orig_entry_add(tt_global_entry
, orig_node
, ttvn
);
766 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
767 "Creating new global tt entry: %pM (via %pM)\n",
768 tt_global_entry
->common
.addr
, orig_node
->orig
);
771 /* remove address from local hash if present */
772 batadv_tt_local_remove(bat_priv
, tt_global_entry
->common
.addr
,
773 "global tt received",
774 flags
& BATADV_TT_CLIENT_ROAM
);
778 batadv_tt_global_entry_free_ref(tt_global_entry
);
782 /* print all orig nodes who announce the address for this global entry.
783 * it is assumed that the caller holds rcu_read_lock();
786 batadv_tt_global_print_entry(struct batadv_tt_global_entry
*tt_global_entry
,
787 struct seq_file
*seq
)
789 struct hlist_head
*head
;
790 struct hlist_node
*node
;
791 struct batadv_tt_orig_list_entry
*orig_entry
;
792 struct batadv_tt_common_entry
*tt_common_entry
;
796 tt_common_entry
= &tt_global_entry
->common
;
798 head
= &tt_global_entry
->orig_list
;
800 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
801 flags
= tt_common_entry
->flags
;
802 last_ttvn
= atomic_read(&orig_entry
->orig_node
->last_ttvn
);
803 seq_printf(seq
, " * %pM (%3u) via %pM (%3u) [%c%c]\n",
804 tt_global_entry
->common
.addr
, orig_entry
->ttvn
,
805 orig_entry
->orig_node
->orig
, last_ttvn
,
806 (flags
& BATADV_TT_CLIENT_ROAM
? 'R' : '.'),
807 (flags
& BATADV_TT_CLIENT_WIFI
? 'W' : '.'));
811 int batadv_tt_global_seq_print_text(struct seq_file
*seq
, void *offset
)
813 struct net_device
*net_dev
= (struct net_device
*)seq
->private;
814 struct batadv_priv
*bat_priv
= netdev_priv(net_dev
);
815 struct batadv_hashtable
*hash
= bat_priv
->tt
.global_hash
;
816 struct batadv_tt_common_entry
*tt_common_entry
;
817 struct batadv_tt_global_entry
*tt_global
;
818 struct batadv_hard_iface
*primary_if
;
819 struct hlist_node
*node
;
820 struct hlist_head
*head
;
824 primary_if
= batadv_primary_if_get_selected(bat_priv
);
826 ret
= seq_printf(seq
,
827 "BATMAN mesh %s disabled - please specify interfaces to enable it\n",
832 if (primary_if
->if_status
!= BATADV_IF_ACTIVE
) {
833 ret
= seq_printf(seq
,
834 "BATMAN mesh %s disabled - primary interface not active\n",
840 "Globally announced TT entries received via the mesh %s\n",
842 seq_printf(seq
, " %-13s %s %-15s %s %s\n",
843 "Client", "(TTVN)", "Originator", "(Curr TTVN)", "Flags");
845 for (i
= 0; i
< hash
->size
; i
++) {
846 head
= &hash
->table
[i
];
849 hlist_for_each_entry_rcu(tt_common_entry
, node
,
851 tt_global
= container_of(tt_common_entry
,
852 struct batadv_tt_global_entry
,
854 batadv_tt_global_print_entry(tt_global
, seq
);
860 batadv_hardif_free_ref(primary_if
);
864 /* deletes the orig list of a tt_global_entry */
866 batadv_tt_global_del_orig_list(struct batadv_tt_global_entry
*tt_global_entry
)
868 struct hlist_head
*head
;
869 struct hlist_node
*node
, *safe
;
870 struct batadv_tt_orig_list_entry
*orig_entry
;
872 spin_lock_bh(&tt_global_entry
->list_lock
);
873 head
= &tt_global_entry
->orig_list
;
874 hlist_for_each_entry_safe(orig_entry
, node
, safe
, head
, list
) {
876 batadv_tt_orig_list_entry_free_ref(orig_entry
);
878 spin_unlock_bh(&tt_global_entry
->list_lock
);
883 batadv_tt_global_del_orig_entry(struct batadv_priv
*bat_priv
,
884 struct batadv_tt_global_entry
*tt_global_entry
,
885 struct batadv_orig_node
*orig_node
,
888 struct hlist_head
*head
;
889 struct hlist_node
*node
, *safe
;
890 struct batadv_tt_orig_list_entry
*orig_entry
;
892 spin_lock_bh(&tt_global_entry
->list_lock
);
893 head
= &tt_global_entry
->orig_list
;
894 hlist_for_each_entry_safe(orig_entry
, node
, safe
, head
, list
) {
895 if (orig_entry
->orig_node
== orig_node
) {
896 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
897 "Deleting %pM from global tt entry %pM: %s\n",
899 tt_global_entry
->common
.addr
, message
);
901 batadv_tt_orig_list_entry_free_ref(orig_entry
);
904 spin_unlock_bh(&tt_global_entry
->list_lock
);
908 batadv_tt_global_del_struct(struct batadv_priv
*bat_priv
,
909 struct batadv_tt_global_entry
*tt_global_entry
,
912 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
913 "Deleting global tt entry %pM: %s\n",
914 tt_global_entry
->common
.addr
, message
);
916 batadv_hash_remove(bat_priv
->tt
.global_hash
, batadv_compare_tt
,
917 batadv_choose_orig
, tt_global_entry
->common
.addr
);
918 batadv_tt_global_entry_free_ref(tt_global_entry
);
922 /* If the client is to be deleted, we check if it is the last origantor entry
923 * within tt_global entry. If yes, we set the BATADV_TT_CLIENT_ROAM flag and the
924 * timer, otherwise we simply remove the originator scheduled for deletion.
927 batadv_tt_global_del_roaming(struct batadv_priv
*bat_priv
,
928 struct batadv_tt_global_entry
*tt_global_entry
,
929 struct batadv_orig_node
*orig_node
,
932 bool last_entry
= true;
933 struct hlist_head
*head
;
934 struct hlist_node
*node
;
935 struct batadv_tt_orig_list_entry
*orig_entry
;
937 /* no local entry exists, case 1:
938 * Check if this is the last one or if other entries exist.
942 head
= &tt_global_entry
->orig_list
;
943 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
944 if (orig_entry
->orig_node
!= orig_node
) {
952 /* its the last one, mark for roaming. */
953 tt_global_entry
->common
.flags
|= BATADV_TT_CLIENT_ROAM
;
954 tt_global_entry
->roam_at
= jiffies
;
956 /* there is another entry, we can simply delete this
957 * one and can still use the other one.
959 batadv_tt_global_del_orig_entry(bat_priv
, tt_global_entry
,
965 static void batadv_tt_global_del(struct batadv_priv
*bat_priv
,
966 struct batadv_orig_node
*orig_node
,
967 const unsigned char *addr
,
968 const char *message
, bool roaming
)
970 struct batadv_tt_global_entry
*tt_global_entry
= NULL
;
971 struct batadv_tt_local_entry
*local_entry
= NULL
;
973 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
974 if (!tt_global_entry
)
978 batadv_tt_global_del_orig_entry(bat_priv
, tt_global_entry
,
981 if (hlist_empty(&tt_global_entry
->orig_list
))
982 batadv_tt_global_del_struct(bat_priv
, tt_global_entry
,
988 /* if we are deleting a global entry due to a roam
989 * event, there are two possibilities:
990 * 1) the client roamed from node A to node B => if there
991 * is only one originator left for this client, we mark
992 * it with BATADV_TT_CLIENT_ROAM, we start a timer and we
993 * wait for node B to claim it. In case of timeout
994 * the entry is purged.
996 * If there are other originators left, we directly delete
998 * 2) the client roamed to us => we can directly delete
999 * the global entry, since it is useless now.
1001 local_entry
= batadv_tt_local_hash_find(bat_priv
,
1002 tt_global_entry
->common
.addr
);
1004 /* local entry exists, case 2: client roamed to us. */
1005 batadv_tt_global_del_orig_list(tt_global_entry
);
1006 batadv_tt_global_del_struct(bat_priv
, tt_global_entry
, message
);
1008 /* no local entry exists, case 1: check for roaming */
1009 batadv_tt_global_del_roaming(bat_priv
, tt_global_entry
,
1010 orig_node
, message
);
1014 if (tt_global_entry
)
1015 batadv_tt_global_entry_free_ref(tt_global_entry
);
1017 batadv_tt_local_entry_free_ref(local_entry
);
1020 void batadv_tt_global_del_orig(struct batadv_priv
*bat_priv
,
1021 struct batadv_orig_node
*orig_node
,
1022 const char *message
)
1024 struct batadv_tt_global_entry
*tt_global
;
1025 struct batadv_tt_common_entry
*tt_common_entry
;
1027 struct batadv_hashtable
*hash
= bat_priv
->tt
.global_hash
;
1028 struct hlist_node
*node
, *safe
;
1029 struct hlist_head
*head
;
1030 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1035 for (i
= 0; i
< hash
->size
; i
++) {
1036 head
= &hash
->table
[i
];
1037 list_lock
= &hash
->list_locks
[i
];
1039 spin_lock_bh(list_lock
);
1040 hlist_for_each_entry_safe(tt_common_entry
, node
, safe
,
1042 tt_global
= container_of(tt_common_entry
,
1043 struct batadv_tt_global_entry
,
1046 batadv_tt_global_del_orig_entry(bat_priv
, tt_global
,
1047 orig_node
, message
);
1049 if (hlist_empty(&tt_global
->orig_list
)) {
1050 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1051 "Deleting global tt entry %pM: %s\n",
1052 tt_global
->common
.addr
, message
);
1053 hlist_del_rcu(node
);
1054 batadv_tt_global_entry_free_ref(tt_global
);
1057 spin_unlock_bh(list_lock
);
1059 orig_node
->tt_initialised
= false;
1062 static void batadv_tt_global_roam_purge_list(struct batadv_priv
*bat_priv
,
1063 struct hlist_head
*head
)
1065 struct batadv_tt_common_entry
*tt_common_entry
;
1066 struct batadv_tt_global_entry
*tt_global_entry
;
1067 struct hlist_node
*node
, *node_tmp
;
1069 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
, head
,
1071 tt_global_entry
= container_of(tt_common_entry
,
1072 struct batadv_tt_global_entry
,
1074 if (!(tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_ROAM
))
1076 if (!batadv_has_timed_out(tt_global_entry
->roam_at
,
1077 BATADV_TT_CLIENT_ROAM_TIMEOUT
))
1080 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1081 "Deleting global tt entry (%pM): Roaming timeout\n",
1082 tt_global_entry
->common
.addr
);
1084 hlist_del_rcu(node
);
1085 batadv_tt_global_entry_free_ref(tt_global_entry
);
1089 static void batadv_tt_global_roam_purge(struct batadv_priv
*bat_priv
)
1091 struct batadv_hashtable
*hash
= bat_priv
->tt
.global_hash
;
1092 struct hlist_head
*head
;
1093 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1096 for (i
= 0; i
< hash
->size
; i
++) {
1097 head
= &hash
->table
[i
];
1098 list_lock
= &hash
->list_locks
[i
];
1100 spin_lock_bh(list_lock
);
1101 batadv_tt_global_roam_purge_list(bat_priv
, head
);
1102 spin_unlock_bh(list_lock
);
1107 static void batadv_tt_global_table_free(struct batadv_priv
*bat_priv
)
1109 struct batadv_hashtable
*hash
;
1110 spinlock_t
*list_lock
; /* protects write access to the hash lists */
1111 struct batadv_tt_common_entry
*tt_common_entry
;
1112 struct batadv_tt_global_entry
*tt_global
;
1113 struct hlist_node
*node
, *node_tmp
;
1114 struct hlist_head
*head
;
1117 if (!bat_priv
->tt
.global_hash
)
1120 hash
= bat_priv
->tt
.global_hash
;
1122 for (i
= 0; i
< hash
->size
; i
++) {
1123 head
= &hash
->table
[i
];
1124 list_lock
= &hash
->list_locks
[i
];
1126 spin_lock_bh(list_lock
);
1127 hlist_for_each_entry_safe(tt_common_entry
, node
, node_tmp
,
1129 hlist_del_rcu(node
);
1130 tt_global
= container_of(tt_common_entry
,
1131 struct batadv_tt_global_entry
,
1133 batadv_tt_global_entry_free_ref(tt_global
);
1135 spin_unlock_bh(list_lock
);
1138 batadv_hash_destroy(hash
);
1140 bat_priv
->tt
.global_hash
= NULL
;
1144 _batadv_is_ap_isolated(struct batadv_tt_local_entry
*tt_local_entry
,
1145 struct batadv_tt_global_entry
*tt_global_entry
)
1149 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_WIFI
&&
1150 tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_WIFI
)
1156 struct batadv_orig_node
*batadv_transtable_search(struct batadv_priv
*bat_priv
,
1158 const uint8_t *addr
)
1160 struct batadv_tt_local_entry
*tt_local_entry
= NULL
;
1161 struct batadv_tt_global_entry
*tt_global_entry
= NULL
;
1162 struct batadv_orig_node
*orig_node
= NULL
;
1163 struct batadv_neigh_node
*router
= NULL
;
1164 struct hlist_head
*head
;
1165 struct hlist_node
*node
;
1166 struct batadv_tt_orig_list_entry
*orig_entry
;
1169 if (src
&& atomic_read(&bat_priv
->ap_isolation
)) {
1170 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, src
);
1171 if (!tt_local_entry
)
1175 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
1176 if (!tt_global_entry
)
1179 /* check whether the clients should not communicate due to AP
1182 if (tt_local_entry
&&
1183 _batadv_is_ap_isolated(tt_local_entry
, tt_global_entry
))
1189 head
= &tt_global_entry
->orig_list
;
1190 hlist_for_each_entry_rcu(orig_entry
, node
, head
, list
) {
1191 router
= batadv_orig_node_get_router(orig_entry
->orig_node
);
1195 if (router
->tq_avg
> best_tq
) {
1196 orig_node
= orig_entry
->orig_node
;
1197 best_tq
= router
->tq_avg
;
1199 batadv_neigh_node_free_ref(router
);
1201 /* found anything? */
1202 if (orig_node
&& !atomic_inc_not_zero(&orig_node
->refcount
))
1206 if (tt_global_entry
)
1207 batadv_tt_global_entry_free_ref(tt_global_entry
);
1209 batadv_tt_local_entry_free_ref(tt_local_entry
);
1214 /* Calculates the checksum of the local table of a given orig_node */
1215 static uint16_t batadv_tt_global_crc(struct batadv_priv
*bat_priv
,
1216 struct batadv_orig_node
*orig_node
)
1218 uint16_t total
= 0, total_one
;
1219 struct batadv_hashtable
*hash
= bat_priv
->tt
.global_hash
;
1220 struct batadv_tt_common_entry
*tt_common
;
1221 struct batadv_tt_global_entry
*tt_global
;
1222 struct hlist_node
*node
;
1223 struct hlist_head
*head
;
1227 for (i
= 0; i
< hash
->size
; i
++) {
1228 head
= &hash
->table
[i
];
1231 hlist_for_each_entry_rcu(tt_common
, node
, head
, hash_entry
) {
1232 tt_global
= container_of(tt_common
,
1233 struct batadv_tt_global_entry
,
1235 /* Roaming clients are in the global table for
1236 * consistency only. They don't have to be
1237 * taken into account while computing the
1240 if (tt_common
->flags
& BATADV_TT_CLIENT_ROAM
)
1243 /* find out if this global entry is announced by this
1246 if (!batadv_tt_global_entry_has_orig(tt_global
,
1251 for (j
= 0; j
< ETH_ALEN
; j
++)
1252 total_one
= crc16_byte(total_one
,
1253 tt_common
->addr
[j
]);
1262 /* Calculates the checksum of the local table */
1263 static uint16_t batadv_tt_local_crc(struct batadv_priv
*bat_priv
)
1265 uint16_t total
= 0, total_one
;
1266 struct batadv_hashtable
*hash
= bat_priv
->tt
.local_hash
;
1267 struct batadv_tt_common_entry
*tt_common
;
1268 struct hlist_node
*node
;
1269 struct hlist_head
*head
;
1273 for (i
= 0; i
< hash
->size
; i
++) {
1274 head
= &hash
->table
[i
];
1277 hlist_for_each_entry_rcu(tt_common
, node
, head
, hash_entry
) {
1278 /* not yet committed clients have not to be taken into
1279 * account while computing the CRC
1281 if (tt_common
->flags
& BATADV_TT_CLIENT_NEW
)
1284 for (j
= 0; j
< ETH_ALEN
; j
++)
1285 total_one
= crc16_byte(total_one
,
1286 tt_common
->addr
[j
]);
1295 static void batadv_tt_req_list_free(struct batadv_priv
*bat_priv
)
1297 struct batadv_tt_req_node
*node
, *safe
;
1299 spin_lock_bh(&bat_priv
->tt
.req_list_lock
);
1301 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt
.req_list
, list
) {
1302 list_del(&node
->list
);
1306 spin_unlock_bh(&bat_priv
->tt
.req_list_lock
);
1309 static void batadv_tt_save_orig_buffer(struct batadv_priv
*bat_priv
,
1310 struct batadv_orig_node
*orig_node
,
1311 const unsigned char *tt_buff
,
1312 uint8_t tt_num_changes
)
1314 uint16_t tt_buff_len
= batadv_tt_len(tt_num_changes
);
1316 /* Replace the old buffer only if I received something in the
1317 * last OGM (the OGM could carry no changes)
1319 spin_lock_bh(&orig_node
->tt_buff_lock
);
1320 if (tt_buff_len
> 0) {
1321 kfree(orig_node
->tt_buff
);
1322 orig_node
->tt_buff_len
= 0;
1323 orig_node
->tt_buff
= kmalloc(tt_buff_len
, GFP_ATOMIC
);
1324 if (orig_node
->tt_buff
) {
1325 memcpy(orig_node
->tt_buff
, tt_buff
, tt_buff_len
);
1326 orig_node
->tt_buff_len
= tt_buff_len
;
1329 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1332 static void batadv_tt_req_purge(struct batadv_priv
*bat_priv
)
1334 struct batadv_tt_req_node
*node
, *safe
;
1336 spin_lock_bh(&bat_priv
->tt
.req_list_lock
);
1337 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt
.req_list
, list
) {
1338 if (batadv_has_timed_out(node
->issued_at
,
1339 BATADV_TT_REQUEST_TIMEOUT
)) {
1340 list_del(&node
->list
);
1344 spin_unlock_bh(&bat_priv
->tt
.req_list_lock
);
1347 /* returns the pointer to the new tt_req_node struct if no request
1348 * has already been issued for this orig_node, NULL otherwise
1350 static struct batadv_tt_req_node
*
1351 batadv_new_tt_req_node(struct batadv_priv
*bat_priv
,
1352 struct batadv_orig_node
*orig_node
)
1354 struct batadv_tt_req_node
*tt_req_node_tmp
, *tt_req_node
= NULL
;
1356 spin_lock_bh(&bat_priv
->tt
.req_list_lock
);
1357 list_for_each_entry(tt_req_node_tmp
, &bat_priv
->tt
.req_list
, list
) {
1358 if (batadv_compare_eth(tt_req_node_tmp
, orig_node
) &&
1359 !batadv_has_timed_out(tt_req_node_tmp
->issued_at
,
1360 BATADV_TT_REQUEST_TIMEOUT
))
1364 tt_req_node
= kmalloc(sizeof(*tt_req_node
), GFP_ATOMIC
);
1368 memcpy(tt_req_node
->addr
, orig_node
->orig
, ETH_ALEN
);
1369 tt_req_node
->issued_at
= jiffies
;
1371 list_add(&tt_req_node
->list
, &bat_priv
->tt
.req_list
);
1373 spin_unlock_bh(&bat_priv
->tt
.req_list_lock
);
1377 /* data_ptr is useless here, but has to be kept to respect the prototype */
1378 static int batadv_tt_local_valid_entry(const void *entry_ptr
,
1379 const void *data_ptr
)
1381 const struct batadv_tt_common_entry
*tt_common_entry
= entry_ptr
;
1383 if (tt_common_entry
->flags
& BATADV_TT_CLIENT_NEW
)
1388 static int batadv_tt_global_valid(const void *entry_ptr
,
1389 const void *data_ptr
)
1391 const struct batadv_tt_common_entry
*tt_common_entry
= entry_ptr
;
1392 const struct batadv_tt_global_entry
*tt_global_entry
;
1393 const struct batadv_orig_node
*orig_node
= data_ptr
;
1395 if (tt_common_entry
->flags
& BATADV_TT_CLIENT_ROAM
)
1398 tt_global_entry
= container_of(tt_common_entry
,
1399 struct batadv_tt_global_entry
,
1402 return batadv_tt_global_entry_has_orig(tt_global_entry
, orig_node
);
1405 static struct sk_buff
*
1406 batadv_tt_response_fill_table(uint16_t tt_len
, uint8_t ttvn
,
1407 struct batadv_hashtable
*hash
,
1408 struct batadv_hard_iface
*primary_if
,
1409 int (*valid_cb
)(const void *, const void *),
1412 struct batadv_tt_common_entry
*tt_common_entry
;
1413 struct batadv_tt_query_packet
*tt_response
;
1414 struct batadv_tt_change
*tt_change
;
1415 struct hlist_node
*node
;
1416 struct hlist_head
*head
;
1417 struct sk_buff
*skb
= NULL
;
1418 uint16_t tt_tot
, tt_count
;
1419 ssize_t tt_query_size
= sizeof(struct batadv_tt_query_packet
);
1423 if (tt_query_size
+ tt_len
> primary_if
->soft_iface
->mtu
) {
1424 tt_len
= primary_if
->soft_iface
->mtu
- tt_query_size
;
1425 tt_len
-= tt_len
% sizeof(struct batadv_tt_change
);
1427 tt_tot
= tt_len
/ sizeof(struct batadv_tt_change
);
1429 len
= tt_query_size
+ tt_len
;
1430 skb
= dev_alloc_skb(len
+ ETH_HLEN
);
1434 skb_reserve(skb
, ETH_HLEN
);
1435 tt_response
= (struct batadv_tt_query_packet
*)skb_put(skb
, len
);
1436 tt_response
->ttvn
= ttvn
;
1438 tt_change
= (struct batadv_tt_change
*)(skb
->data
+ tt_query_size
);
1442 for (i
= 0; i
< hash
->size
; i
++) {
1443 head
= &hash
->table
[i
];
1445 hlist_for_each_entry_rcu(tt_common_entry
, node
,
1447 if (tt_count
== tt_tot
)
1450 if ((valid_cb
) && (!valid_cb(tt_common_entry
, cb_data
)))
1453 memcpy(tt_change
->addr
, tt_common_entry
->addr
,
1455 tt_change
->flags
= BATADV_NO_FLAGS
;
1463 /* store in the message the number of entries we have successfully
1466 tt_response
->tt_data
= htons(tt_count
);
1472 static int batadv_send_tt_request(struct batadv_priv
*bat_priv
,
1473 struct batadv_orig_node
*dst_orig_node
,
1474 uint8_t ttvn
, uint16_t tt_crc
,
1477 struct sk_buff
*skb
= NULL
;
1478 struct batadv_tt_query_packet
*tt_request
;
1479 struct batadv_neigh_node
*neigh_node
= NULL
;
1480 struct batadv_hard_iface
*primary_if
;
1481 struct batadv_tt_req_node
*tt_req_node
= NULL
;
1485 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1489 /* The new tt_req will be issued only if I'm not waiting for a
1490 * reply from the same orig_node yet
1492 tt_req_node
= batadv_new_tt_req_node(bat_priv
, dst_orig_node
);
1496 skb
= dev_alloc_skb(sizeof(*tt_request
) + ETH_HLEN
);
1500 skb_reserve(skb
, ETH_HLEN
);
1502 tt_req_len
= sizeof(*tt_request
);
1503 tt_request
= (struct batadv_tt_query_packet
*)skb_put(skb
, tt_req_len
);
1505 tt_request
->header
.packet_type
= BATADV_TT_QUERY
;
1506 tt_request
->header
.version
= BATADV_COMPAT_VERSION
;
1507 memcpy(tt_request
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1508 memcpy(tt_request
->dst
, dst_orig_node
->orig
, ETH_ALEN
);
1509 tt_request
->header
.ttl
= BATADV_TTL
;
1510 tt_request
->ttvn
= ttvn
;
1511 tt_request
->tt_data
= htons(tt_crc
);
1512 tt_request
->flags
= BATADV_TT_REQUEST
;
1515 tt_request
->flags
|= BATADV_TT_FULL_TABLE
;
1517 neigh_node
= batadv_orig_node_get_router(dst_orig_node
);
1521 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1522 "Sending TT_REQUEST to %pM via %pM [%c]\n",
1523 dst_orig_node
->orig
, neigh_node
->addr
,
1524 (full_table
? 'F' : '.'));
1526 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_REQUEST_TX
);
1528 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1533 batadv_neigh_node_free_ref(neigh_node
);
1535 batadv_hardif_free_ref(primary_if
);
1538 if (ret
&& tt_req_node
) {
1539 spin_lock_bh(&bat_priv
->tt
.req_list_lock
);
1540 list_del(&tt_req_node
->list
);
1541 spin_unlock_bh(&bat_priv
->tt
.req_list_lock
);
1548 batadv_send_other_tt_response(struct batadv_priv
*bat_priv
,
1549 struct batadv_tt_query_packet
*tt_request
)
1551 struct batadv_orig_node
*req_dst_orig_node
= NULL
;
1552 struct batadv_orig_node
*res_dst_orig_node
= NULL
;
1553 struct batadv_neigh_node
*neigh_node
= NULL
;
1554 struct batadv_hard_iface
*primary_if
= NULL
;
1555 uint8_t orig_ttvn
, req_ttvn
, ttvn
;
1557 unsigned char *tt_buff
;
1559 uint16_t tt_len
, tt_tot
;
1560 struct sk_buff
*skb
= NULL
;
1561 struct batadv_tt_query_packet
*tt_response
;
1562 uint8_t *packet_pos
;
1565 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1566 "Received TT_REQUEST from %pM for ttvn: %u (%pM) [%c]\n",
1567 tt_request
->src
, tt_request
->ttvn
, tt_request
->dst
,
1568 (tt_request
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1570 /* Let's get the orig node of the REAL destination */
1571 req_dst_orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->dst
);
1572 if (!req_dst_orig_node
)
1575 res_dst_orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->src
);
1576 if (!res_dst_orig_node
)
1579 neigh_node
= batadv_orig_node_get_router(res_dst_orig_node
);
1583 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1587 orig_ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1588 req_ttvn
= tt_request
->ttvn
;
1590 /* I don't have the requested data */
1591 if (orig_ttvn
!= req_ttvn
||
1592 tt_request
->tt_data
!= htons(req_dst_orig_node
->tt_crc
))
1595 /* If the full table has been explicitly requested */
1596 if (tt_request
->flags
& BATADV_TT_FULL_TABLE
||
1597 !req_dst_orig_node
->tt_buff
)
1602 /* In this version, fragmentation is not implemented, then
1603 * I'll send only one packet with as much TT entries as I can
1606 spin_lock_bh(&req_dst_orig_node
->tt_buff_lock
);
1607 tt_len
= req_dst_orig_node
->tt_buff_len
;
1608 tt_tot
= tt_len
/ sizeof(struct batadv_tt_change
);
1610 len
= sizeof(*tt_response
) + tt_len
;
1611 skb
= dev_alloc_skb(len
+ ETH_HLEN
);
1615 skb_reserve(skb
, ETH_HLEN
);
1616 packet_pos
= skb_put(skb
, len
);
1617 tt_response
= (struct batadv_tt_query_packet
*)packet_pos
;
1618 tt_response
->ttvn
= req_ttvn
;
1619 tt_response
->tt_data
= htons(tt_tot
);
1621 tt_buff
= skb
->data
+ sizeof(*tt_response
);
1622 /* Copy the last orig_node's OGM buffer */
1623 memcpy(tt_buff
, req_dst_orig_node
->tt_buff
,
1624 req_dst_orig_node
->tt_buff_len
);
1626 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1628 tt_len
= (uint16_t)atomic_read(&req_dst_orig_node
->tt_size
);
1629 tt_len
*= sizeof(struct batadv_tt_change
);
1630 ttvn
= (uint8_t)atomic_read(&req_dst_orig_node
->last_ttvn
);
1632 skb
= batadv_tt_response_fill_table(tt_len
, ttvn
,
1633 bat_priv
->tt
.global_hash
,
1635 batadv_tt_global_valid
,
1640 tt_response
= (struct batadv_tt_query_packet
*)skb
->data
;
1643 tt_response
->header
.packet_type
= BATADV_TT_QUERY
;
1644 tt_response
->header
.version
= BATADV_COMPAT_VERSION
;
1645 tt_response
->header
.ttl
= BATADV_TTL
;
1646 memcpy(tt_response
->src
, req_dst_orig_node
->orig
, ETH_ALEN
);
1647 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1648 tt_response
->flags
= BATADV_TT_RESPONSE
;
1651 tt_response
->flags
|= BATADV_TT_FULL_TABLE
;
1653 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1654 "Sending TT_RESPONSE %pM via %pM for %pM (ttvn: %u)\n",
1655 res_dst_orig_node
->orig
, neigh_node
->addr
,
1656 req_dst_orig_node
->orig
, req_ttvn
);
1658 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_RESPONSE_TX
);
1660 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1665 spin_unlock_bh(&req_dst_orig_node
->tt_buff_lock
);
1668 if (res_dst_orig_node
)
1669 batadv_orig_node_free_ref(res_dst_orig_node
);
1670 if (req_dst_orig_node
)
1671 batadv_orig_node_free_ref(req_dst_orig_node
);
1673 batadv_neigh_node_free_ref(neigh_node
);
1675 batadv_hardif_free_ref(primary_if
);
1683 batadv_send_my_tt_response(struct batadv_priv
*bat_priv
,
1684 struct batadv_tt_query_packet
*tt_request
)
1686 struct batadv_orig_node
*orig_node
= NULL
;
1687 struct batadv_neigh_node
*neigh_node
= NULL
;
1688 struct batadv_hard_iface
*primary_if
= NULL
;
1689 uint8_t my_ttvn
, req_ttvn
, ttvn
;
1691 unsigned char *tt_buff
;
1693 uint16_t tt_len
, tt_tot
;
1694 struct sk_buff
*skb
= NULL
;
1695 struct batadv_tt_query_packet
*tt_response
;
1696 uint8_t *packet_pos
;
1699 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1700 "Received TT_REQUEST from %pM for ttvn: %u (me) [%c]\n",
1701 tt_request
->src
, tt_request
->ttvn
,
1702 (tt_request
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1705 my_ttvn
= (uint8_t)atomic_read(&bat_priv
->tt
.vn
);
1706 req_ttvn
= tt_request
->ttvn
;
1708 orig_node
= batadv_orig_hash_find(bat_priv
, tt_request
->src
);
1712 neigh_node
= batadv_orig_node_get_router(orig_node
);
1716 primary_if
= batadv_primary_if_get_selected(bat_priv
);
1720 /* If the full table has been explicitly requested or the gap
1721 * is too big send the whole local translation table
1723 if (tt_request
->flags
& BATADV_TT_FULL_TABLE
|| my_ttvn
!= req_ttvn
||
1724 !bat_priv
->tt
.last_changeset
)
1729 /* In this version, fragmentation is not implemented, then
1730 * I'll send only one packet with as much TT entries as I can
1733 spin_lock_bh(&bat_priv
->tt
.last_changeset_lock
);
1734 tt_len
= bat_priv
->tt
.last_changeset_len
;
1735 tt_tot
= tt_len
/ sizeof(struct batadv_tt_change
);
1737 len
= sizeof(*tt_response
) + tt_len
;
1738 skb
= dev_alloc_skb(len
+ ETH_HLEN
);
1742 skb_reserve(skb
, ETH_HLEN
);
1743 packet_pos
= skb_put(skb
, len
);
1744 tt_response
= (struct batadv_tt_query_packet
*)packet_pos
;
1745 tt_response
->ttvn
= req_ttvn
;
1746 tt_response
->tt_data
= htons(tt_tot
);
1748 tt_buff
= skb
->data
+ sizeof(*tt_response
);
1749 memcpy(tt_buff
, bat_priv
->tt
.last_changeset
,
1750 bat_priv
->tt
.last_changeset_len
);
1751 spin_unlock_bh(&bat_priv
->tt
.last_changeset_lock
);
1753 tt_len
= (uint16_t)atomic_read(&bat_priv
->tt
.local_entry_num
);
1754 tt_len
*= sizeof(struct batadv_tt_change
);
1755 ttvn
= (uint8_t)atomic_read(&bat_priv
->tt
.vn
);
1757 skb
= batadv_tt_response_fill_table(tt_len
, ttvn
,
1758 bat_priv
->tt
.local_hash
,
1760 batadv_tt_local_valid_entry
,
1765 tt_response
= (struct batadv_tt_query_packet
*)skb
->data
;
1768 tt_response
->header
.packet_type
= BATADV_TT_QUERY
;
1769 tt_response
->header
.version
= BATADV_COMPAT_VERSION
;
1770 tt_response
->header
.ttl
= BATADV_TTL
;
1771 memcpy(tt_response
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
1772 memcpy(tt_response
->dst
, tt_request
->src
, ETH_ALEN
);
1773 tt_response
->flags
= BATADV_TT_RESPONSE
;
1776 tt_response
->flags
|= BATADV_TT_FULL_TABLE
;
1778 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1779 "Sending TT_RESPONSE to %pM via %pM [%c]\n",
1780 orig_node
->orig
, neigh_node
->addr
,
1781 (tt_response
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1783 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_RESPONSE_TX
);
1785 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
1790 spin_unlock_bh(&bat_priv
->tt
.last_changeset_lock
);
1793 batadv_orig_node_free_ref(orig_node
);
1795 batadv_neigh_node_free_ref(neigh_node
);
1797 batadv_hardif_free_ref(primary_if
);
1800 /* This packet was for me, so it doesn't need to be re-routed */
1804 bool batadv_send_tt_response(struct batadv_priv
*bat_priv
,
1805 struct batadv_tt_query_packet
*tt_request
)
1807 if (batadv_is_my_mac(tt_request
->dst
)) {
1808 /* don't answer backbone gws! */
1809 if (batadv_bla_is_backbone_gw_orig(bat_priv
, tt_request
->src
))
1812 return batadv_send_my_tt_response(bat_priv
, tt_request
);
1814 return batadv_send_other_tt_response(bat_priv
, tt_request
);
1818 static void _batadv_tt_update_changes(struct batadv_priv
*bat_priv
,
1819 struct batadv_orig_node
*orig_node
,
1820 struct batadv_tt_change
*tt_change
,
1821 uint16_t tt_num_changes
, uint8_t ttvn
)
1826 for (i
= 0; i
< tt_num_changes
; i
++) {
1827 if ((tt_change
+ i
)->flags
& BATADV_TT_CLIENT_DEL
) {
1828 roams
= (tt_change
+ i
)->flags
& BATADV_TT_CLIENT_ROAM
;
1829 batadv_tt_global_del(bat_priv
, orig_node
,
1830 (tt_change
+ i
)->addr
,
1831 "tt removed by changes",
1834 if (!batadv_tt_global_add(bat_priv
, orig_node
,
1835 (tt_change
+ i
)->addr
,
1836 (tt_change
+ i
)->flags
, ttvn
))
1837 /* In case of problem while storing a
1838 * global_entry, we stop the updating
1839 * procedure without committing the
1840 * ttvn change. This will avoid to send
1841 * corrupted data on tt_request
1846 orig_node
->tt_initialised
= true;
1849 static void batadv_tt_fill_gtable(struct batadv_priv
*bat_priv
,
1850 struct batadv_tt_query_packet
*tt_response
)
1852 struct batadv_orig_node
*orig_node
= NULL
;
1854 orig_node
= batadv_orig_hash_find(bat_priv
, tt_response
->src
);
1858 /* Purge the old table first.. */
1859 batadv_tt_global_del_orig(bat_priv
, orig_node
, "Received full table");
1861 _batadv_tt_update_changes(bat_priv
, orig_node
,
1862 (struct batadv_tt_change
*)(tt_response
+ 1),
1863 ntohs(tt_response
->tt_data
),
1866 spin_lock_bh(&orig_node
->tt_buff_lock
);
1867 kfree(orig_node
->tt_buff
);
1868 orig_node
->tt_buff_len
= 0;
1869 orig_node
->tt_buff
= NULL
;
1870 spin_unlock_bh(&orig_node
->tt_buff_lock
);
1872 atomic_set(&orig_node
->last_ttvn
, tt_response
->ttvn
);
1876 batadv_orig_node_free_ref(orig_node
);
1879 static void batadv_tt_update_changes(struct batadv_priv
*bat_priv
,
1880 struct batadv_orig_node
*orig_node
,
1881 uint16_t tt_num_changes
, uint8_t ttvn
,
1882 struct batadv_tt_change
*tt_change
)
1884 _batadv_tt_update_changes(bat_priv
, orig_node
, tt_change
,
1885 tt_num_changes
, ttvn
);
1887 batadv_tt_save_orig_buffer(bat_priv
, orig_node
,
1888 (unsigned char *)tt_change
, tt_num_changes
);
1889 atomic_set(&orig_node
->last_ttvn
, ttvn
);
1892 bool batadv_is_my_client(struct batadv_priv
*bat_priv
, const uint8_t *addr
)
1894 struct batadv_tt_local_entry
*tt_local_entry
= NULL
;
1897 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, addr
);
1898 if (!tt_local_entry
)
1900 /* Check if the client has been logically deleted (but is kept for
1901 * consistency purpose)
1903 if (tt_local_entry
->common
.flags
& BATADV_TT_CLIENT_PENDING
)
1908 batadv_tt_local_entry_free_ref(tt_local_entry
);
1912 void batadv_handle_tt_response(struct batadv_priv
*bat_priv
,
1913 struct batadv_tt_query_packet
*tt_response
)
1915 struct batadv_tt_req_node
*node
, *safe
;
1916 struct batadv_orig_node
*orig_node
= NULL
;
1917 struct batadv_tt_change
*tt_change
;
1919 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
1920 "Received TT_RESPONSE from %pM for ttvn %d t_size: %d [%c]\n",
1921 tt_response
->src
, tt_response
->ttvn
,
1922 ntohs(tt_response
->tt_data
),
1923 (tt_response
->flags
& BATADV_TT_FULL_TABLE
? 'F' : '.'));
1925 /* we should have never asked a backbone gw */
1926 if (batadv_bla_is_backbone_gw_orig(bat_priv
, tt_response
->src
))
1929 orig_node
= batadv_orig_hash_find(bat_priv
, tt_response
->src
);
1933 if (tt_response
->flags
& BATADV_TT_FULL_TABLE
) {
1934 batadv_tt_fill_gtable(bat_priv
, tt_response
);
1936 tt_change
= (struct batadv_tt_change
*)(tt_response
+ 1);
1937 batadv_tt_update_changes(bat_priv
, orig_node
,
1938 ntohs(tt_response
->tt_data
),
1939 tt_response
->ttvn
, tt_change
);
1942 /* Delete the tt_req_node from pending tt_requests list */
1943 spin_lock_bh(&bat_priv
->tt
.req_list_lock
);
1944 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt
.req_list
, list
) {
1945 if (!batadv_compare_eth(node
->addr
, tt_response
->src
))
1947 list_del(&node
->list
);
1950 spin_unlock_bh(&bat_priv
->tt
.req_list_lock
);
1952 /* Recalculate the CRC for this orig_node and store it */
1953 orig_node
->tt_crc
= batadv_tt_global_crc(bat_priv
, orig_node
);
1954 /* Roaming phase is over: tables are in sync again. I can
1957 orig_node
->tt_poss_change
= false;
1960 batadv_orig_node_free_ref(orig_node
);
1963 int batadv_tt_init(struct batadv_priv
*bat_priv
)
1967 ret
= batadv_tt_local_init(bat_priv
);
1971 ret
= batadv_tt_global_init(bat_priv
);
1975 batadv_tt_start_timer(bat_priv
);
1980 static void batadv_tt_roam_list_free(struct batadv_priv
*bat_priv
)
1982 struct batadv_tt_roam_node
*node
, *safe
;
1984 spin_lock_bh(&bat_priv
->tt
.roam_list_lock
);
1986 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt
.roam_list
, list
) {
1987 list_del(&node
->list
);
1991 spin_unlock_bh(&bat_priv
->tt
.roam_list_lock
);
1994 static void batadv_tt_roam_purge(struct batadv_priv
*bat_priv
)
1996 struct batadv_tt_roam_node
*node
, *safe
;
1998 spin_lock_bh(&bat_priv
->tt
.roam_list_lock
);
1999 list_for_each_entry_safe(node
, safe
, &bat_priv
->tt
.roam_list
, list
) {
2000 if (!batadv_has_timed_out(node
->first_time
,
2001 BATADV_ROAMING_MAX_TIME
))
2004 list_del(&node
->list
);
2007 spin_unlock_bh(&bat_priv
->tt
.roam_list_lock
);
2010 /* This function checks whether the client already reached the
2011 * maximum number of possible roaming phases. In this case the ROAMING_ADV
2014 * returns true if the ROAMING_ADV can be sent, false otherwise
2016 static bool batadv_tt_check_roam_count(struct batadv_priv
*bat_priv
,
2019 struct batadv_tt_roam_node
*tt_roam_node
;
2022 spin_lock_bh(&bat_priv
->tt
.roam_list_lock
);
2023 /* The new tt_req will be issued only if I'm not waiting for a
2024 * reply from the same orig_node yet
2026 list_for_each_entry(tt_roam_node
, &bat_priv
->tt
.roam_list
, list
) {
2027 if (!batadv_compare_eth(tt_roam_node
->addr
, client
))
2030 if (batadv_has_timed_out(tt_roam_node
->first_time
,
2031 BATADV_ROAMING_MAX_TIME
))
2034 if (!batadv_atomic_dec_not_zero(&tt_roam_node
->counter
))
2035 /* Sorry, you roamed too many times! */
2042 tt_roam_node
= kmalloc(sizeof(*tt_roam_node
), GFP_ATOMIC
);
2046 tt_roam_node
->first_time
= jiffies
;
2047 atomic_set(&tt_roam_node
->counter
,
2048 BATADV_ROAMING_MAX_COUNT
- 1);
2049 memcpy(tt_roam_node
->addr
, client
, ETH_ALEN
);
2051 list_add(&tt_roam_node
->list
, &bat_priv
->tt
.roam_list
);
2056 spin_unlock_bh(&bat_priv
->tt
.roam_list_lock
);
2060 static void batadv_send_roam_adv(struct batadv_priv
*bat_priv
, uint8_t *client
,
2061 struct batadv_orig_node
*orig_node
)
2063 struct batadv_neigh_node
*neigh_node
= NULL
;
2064 struct sk_buff
*skb
= NULL
;
2065 struct batadv_roam_adv_packet
*roam_adv_packet
;
2067 struct batadv_hard_iface
*primary_if
;
2068 size_t len
= sizeof(*roam_adv_packet
);
2070 /* before going on we have to check whether the client has
2071 * already roamed to us too many times
2073 if (!batadv_tt_check_roam_count(bat_priv
, client
))
2076 skb
= dev_alloc_skb(sizeof(*roam_adv_packet
) + ETH_HLEN
);
2080 skb_reserve(skb
, ETH_HLEN
);
2082 roam_adv_packet
= (struct batadv_roam_adv_packet
*)skb_put(skb
, len
);
2084 roam_adv_packet
->header
.packet_type
= BATADV_ROAM_ADV
;
2085 roam_adv_packet
->header
.version
= BATADV_COMPAT_VERSION
;
2086 roam_adv_packet
->header
.ttl
= BATADV_TTL
;
2087 roam_adv_packet
->reserved
= 0;
2088 primary_if
= batadv_primary_if_get_selected(bat_priv
);
2091 memcpy(roam_adv_packet
->src
, primary_if
->net_dev
->dev_addr
, ETH_ALEN
);
2092 batadv_hardif_free_ref(primary_if
);
2093 memcpy(roam_adv_packet
->dst
, orig_node
->orig
, ETH_ALEN
);
2094 memcpy(roam_adv_packet
->client
, client
, ETH_ALEN
);
2096 neigh_node
= batadv_orig_node_get_router(orig_node
);
2100 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2101 "Sending ROAMING_ADV to %pM (client %pM) via %pM\n",
2102 orig_node
->orig
, client
, neigh_node
->addr
);
2104 batadv_inc_counter(bat_priv
, BATADV_CNT_TT_ROAM_ADV_TX
);
2106 batadv_send_skb_packet(skb
, neigh_node
->if_incoming
, neigh_node
->addr
);
2111 batadv_neigh_node_free_ref(neigh_node
);
2117 static void batadv_tt_purge(struct work_struct
*work
)
2119 struct delayed_work
*delayed_work
;
2120 struct batadv_priv_tt
*priv_tt
;
2121 struct batadv_priv
*bat_priv
;
2123 delayed_work
= container_of(work
, struct delayed_work
, work
);
2124 priv_tt
= container_of(delayed_work
, struct batadv_priv_tt
, work
);
2125 bat_priv
= container_of(priv_tt
, struct batadv_priv
, tt
);
2127 batadv_tt_local_purge(bat_priv
);
2128 batadv_tt_global_roam_purge(bat_priv
);
2129 batadv_tt_req_purge(bat_priv
);
2130 batadv_tt_roam_purge(bat_priv
);
2132 batadv_tt_start_timer(bat_priv
);
2135 void batadv_tt_free(struct batadv_priv
*bat_priv
)
2137 cancel_delayed_work_sync(&bat_priv
->tt
.work
);
2139 batadv_tt_local_table_free(bat_priv
);
2140 batadv_tt_global_table_free(bat_priv
);
2141 batadv_tt_req_list_free(bat_priv
);
2142 batadv_tt_changes_list_free(bat_priv
);
2143 batadv_tt_roam_list_free(bat_priv
);
2145 kfree(bat_priv
->tt
.last_changeset
);
2148 /* This function will enable or disable the specified flags for all the entries
2149 * in the given hash table and returns the number of modified entries
2151 static uint16_t batadv_tt_set_flags(struct batadv_hashtable
*hash
,
2152 uint16_t flags
, bool enable
)
2155 uint16_t changed_num
= 0;
2156 struct hlist_head
*head
;
2157 struct hlist_node
*node
;
2158 struct batadv_tt_common_entry
*tt_common_entry
;
2163 for (i
= 0; i
< hash
->size
; i
++) {
2164 head
= &hash
->table
[i
];
2167 hlist_for_each_entry_rcu(tt_common_entry
, node
,
2170 if ((tt_common_entry
->flags
& flags
) == flags
)
2172 tt_common_entry
->flags
|= flags
;
2174 if (!(tt_common_entry
->flags
& flags
))
2176 tt_common_entry
->flags
&= ~flags
;
2186 /* Purge out all the tt local entries marked with BATADV_TT_CLIENT_PENDING */
2187 static void batadv_tt_local_purge_pending_clients(struct batadv_priv
*bat_priv
)
2189 struct batadv_hashtable
*hash
= bat_priv
->tt
.local_hash
;
2190 struct batadv_tt_common_entry
*tt_common
;
2191 struct batadv_tt_local_entry
*tt_local
;
2192 struct hlist_node
*node
, *node_tmp
;
2193 struct hlist_head
*head
;
2194 spinlock_t
*list_lock
; /* protects write access to the hash lists */
2200 for (i
= 0; i
< hash
->size
; i
++) {
2201 head
= &hash
->table
[i
];
2202 list_lock
= &hash
->list_locks
[i
];
2204 spin_lock_bh(list_lock
);
2205 hlist_for_each_entry_safe(tt_common
, node
, node_tmp
, head
,
2207 if (!(tt_common
->flags
& BATADV_TT_CLIENT_PENDING
))
2210 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2211 "Deleting local tt entry (%pM): pending\n",
2214 atomic_dec(&bat_priv
->tt
.local_entry_num
);
2215 hlist_del_rcu(node
);
2216 tt_local
= container_of(tt_common
,
2217 struct batadv_tt_local_entry
,
2219 batadv_tt_local_entry_free_ref(tt_local
);
2221 spin_unlock_bh(list_lock
);
2226 static int batadv_tt_commit_changes(struct batadv_priv
*bat_priv
,
2227 unsigned char **packet_buff
,
2228 int *packet_buff_len
, int packet_min_len
)
2230 uint16_t changed_num
= 0;
2232 if (atomic_read(&bat_priv
->tt
.local_changes
) < 1)
2235 changed_num
= batadv_tt_set_flags(bat_priv
->tt
.local_hash
,
2236 BATADV_TT_CLIENT_NEW
, false);
2238 /* all reset entries have to be counted as local entries */
2239 atomic_add(changed_num
, &bat_priv
->tt
.local_entry_num
);
2240 batadv_tt_local_purge_pending_clients(bat_priv
);
2241 bat_priv
->tt
.local_crc
= batadv_tt_local_crc(bat_priv
);
2243 /* Increment the TTVN only once per OGM interval */
2244 atomic_inc(&bat_priv
->tt
.vn
);
2245 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2246 "Local changes committed, updating to ttvn %u\n",
2247 (uint8_t)atomic_read(&bat_priv
->tt
.vn
));
2248 bat_priv
->tt
.poss_change
= false;
2250 /* reset the sending counter */
2251 atomic_set(&bat_priv
->tt
.ogm_append_cnt
, BATADV_TT_OGM_APPEND_MAX
);
2253 return batadv_tt_changes_fill_buff(bat_priv
, packet_buff
,
2254 packet_buff_len
, packet_min_len
);
2257 /* when calling this function (hard_iface == primary_if) has to be true */
2258 int batadv_tt_append_diff(struct batadv_priv
*bat_priv
,
2259 unsigned char **packet_buff
, int *packet_buff_len
,
2264 /* if at least one change happened */
2265 tt_num_changes
= batadv_tt_commit_changes(bat_priv
, packet_buff
,
2269 /* if the changes have been sent often enough */
2270 if ((tt_num_changes
< 0) &&
2271 (!batadv_atomic_dec_not_zero(&bat_priv
->tt
.ogm_append_cnt
))) {
2272 batadv_tt_realloc_packet_buff(packet_buff
, packet_buff_len
,
2273 packet_min_len
, packet_min_len
);
2277 return tt_num_changes
;
2280 bool batadv_is_ap_isolated(struct batadv_priv
*bat_priv
, uint8_t *src
,
2283 struct batadv_tt_local_entry
*tt_local_entry
= NULL
;
2284 struct batadv_tt_global_entry
*tt_global_entry
= NULL
;
2287 if (!atomic_read(&bat_priv
->ap_isolation
))
2290 tt_local_entry
= batadv_tt_local_hash_find(bat_priv
, dst
);
2291 if (!tt_local_entry
)
2294 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, src
);
2295 if (!tt_global_entry
)
2298 if (!_batadv_is_ap_isolated(tt_local_entry
, tt_global_entry
))
2304 if (tt_global_entry
)
2305 batadv_tt_global_entry_free_ref(tt_global_entry
);
2307 batadv_tt_local_entry_free_ref(tt_local_entry
);
2311 void batadv_tt_update_orig(struct batadv_priv
*bat_priv
,
2312 struct batadv_orig_node
*orig_node
,
2313 const unsigned char *tt_buff
, uint8_t tt_num_changes
,
2314 uint8_t ttvn
, uint16_t tt_crc
)
2316 uint8_t orig_ttvn
= (uint8_t)atomic_read(&orig_node
->last_ttvn
);
2317 bool full_table
= true;
2318 struct batadv_tt_change
*tt_change
;
2320 /* don't care about a backbone gateways updates. */
2321 if (batadv_bla_is_backbone_gw_orig(bat_priv
, orig_node
->orig
))
2324 /* orig table not initialised AND first diff is in the OGM OR the ttvn
2325 * increased by one -> we can apply the attached changes
2327 if ((!orig_node
->tt_initialised
&& ttvn
== 1) ||
2328 ttvn
- orig_ttvn
== 1) {
2329 /* the OGM could not contain the changes due to their size or
2330 * because they have already been sent BATADV_TT_OGM_APPEND_MAX
2332 * In this case send a tt request
2334 if (!tt_num_changes
) {
2339 tt_change
= (struct batadv_tt_change
*)tt_buff
;
2340 batadv_tt_update_changes(bat_priv
, orig_node
, tt_num_changes
,
2343 /* Even if we received the precomputed crc with the OGM, we
2344 * prefer to recompute it to spot any possible inconsistency
2345 * in the global table
2347 orig_node
->tt_crc
= batadv_tt_global_crc(bat_priv
, orig_node
);
2349 /* The ttvn alone is not enough to guarantee consistency
2350 * because a single value could represent different states
2351 * (due to the wrap around). Thus a node has to check whether
2352 * the resulting table (after applying the changes) is still
2353 * consistent or not. E.g. a node could disconnect while its
2354 * ttvn is X and reconnect on ttvn = X + TTVN_MAX: in this case
2355 * checking the CRC value is mandatory to detect the
2358 if (orig_node
->tt_crc
!= tt_crc
)
2361 /* Roaming phase is over: tables are in sync again. I can
2364 orig_node
->tt_poss_change
= false;
2366 /* if we missed more than one change or our tables are not
2367 * in sync anymore -> request fresh tt data
2369 if (!orig_node
->tt_initialised
|| ttvn
!= orig_ttvn
||
2370 orig_node
->tt_crc
!= tt_crc
) {
2372 batadv_dbg(BATADV_DBG_TT
, bat_priv
,
2373 "TT inconsistency for %pM. Need to retrieve the correct information (ttvn: %u last_ttvn: %u crc: %u last_crc: %u num_changes: %u)\n",
2374 orig_node
->orig
, ttvn
, orig_ttvn
, tt_crc
,
2375 orig_node
->tt_crc
, tt_num_changes
);
2376 batadv_send_tt_request(bat_priv
, orig_node
, ttvn
,
2377 tt_crc
, full_table
);
2383 /* returns true whether we know that the client has moved from its old
2384 * originator to another one. This entry is kept is still kept for consistency
2387 bool batadv_tt_global_client_is_roaming(struct batadv_priv
*bat_priv
,
2390 struct batadv_tt_global_entry
*tt_global_entry
;
2393 tt_global_entry
= batadv_tt_global_hash_find(bat_priv
, addr
);
2394 if (!tt_global_entry
)
2397 ret
= tt_global_entry
->common
.flags
& BATADV_TT_CLIENT_ROAM
;
2398 batadv_tt_global_entry_free_ref(tt_global_entry
);