mac80211: clean up mesh code
[deliverable/linux.git] / net / mac80211 / mesh_pathtbl.c
CommitLineData
eb2b9311 1/*
264d9b7d 2 * Copyright (c) 2008, 2009 open80211s Ltd.
eb2b9311
LCC
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/list.h>
eb2b9311 12#include <linux/random.h>
5a0e3ad6 13#include <linux/slab.h>
eb2b9311
LCC
14#include <linux/spinlock.h>
15#include <linux/string.h>
16#include <net/mac80211.h>
4777be41 17#include "wme.h"
eb2b9311
LCC
18#include "ieee80211_i.h"
19#include "mesh.h"
20
21/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22#define INIT_PATHS_SIZE_ORDER 2
23
24/* Keep the mean chain length below this constant */
25#define MEAN_CHAIN_LEN 2
26
bf7cd94d
JB
27static inline bool mpath_expired(struct mesh_path *mpath)
28{
29 return (mpath->flags & MESH_PATH_ACTIVE) &&
30 time_after(jiffies, mpath->exp_time) &&
31 !(mpath->flags & MESH_PATH_FIXED);
32}
eb2b9311
LCC
33
34struct mpath_node {
35 struct hlist_node list;
36 struct rcu_head rcu;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
39 */
40 struct mesh_path *mpath;
41};
42
349eb8cf
JB
43static struct mesh_table __rcu *mesh_paths;
44static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
eb2b9311 45
f5ea9120 46int mesh_paths_generation;
6b86bd62
JB
47
48/* This lock will have the grow table function as writer and add / delete nodes
239289e4
JC
49 * as readers. RCU provides sufficient protection only when reading the table
50 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
51 * the read lock or we risk operating on an old table. The write lock is only
52 * needed when modifying the number of buckets a table.
6b86bd62
JB
53 */
54static DEFINE_RWLOCK(pathtbl_resize_lock);
55
56
349eb8cf
JB
57static inline struct mesh_table *resize_dereference_mesh_paths(void)
58{
59 return rcu_dereference_protected(mesh_paths,
60 lockdep_is_held(&pathtbl_resize_lock));
61}
62
63static inline struct mesh_table *resize_dereference_mpp_paths(void)
64{
65 return rcu_dereference_protected(mpp_paths,
66 lockdep_is_held(&pathtbl_resize_lock));
67}
68
69/*
70 * CAREFUL -- "tbl" must not be an expression,
71 * in particular not an rcu_dereference(), since
72 * it's used twice. So it is illegal to do
73 * for_each_mesh_entry(rcu_dereference(...), ...)
74 */
75#define for_each_mesh_entry(tbl, p, node, i) \
76 for (i = 0; i <= tbl->hash_mask; i++) \
77 hlist_for_each_entry_rcu(node, p, &tbl->hash_buckets[i], list)
78
79
6b86bd62
JB
80static struct mesh_table *mesh_table_alloc(int size_order)
81{
82 int i;
83 struct mesh_table *newtbl;
84
d676ff49 85 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
6b86bd62
JB
86 if (!newtbl)
87 return NULL;
88
89 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
d676ff49 90 (1 << size_order), GFP_ATOMIC);
6b86bd62
JB
91
92 if (!newtbl->hash_buckets) {
93 kfree(newtbl);
94 return NULL;
95 }
96
97 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
d676ff49 98 (1 << size_order), GFP_ATOMIC);
6b86bd62
JB
99 if (!newtbl->hashwlock) {
100 kfree(newtbl->hash_buckets);
101 kfree(newtbl);
102 return NULL;
103 }
104
105 newtbl->size_order = size_order;
106 newtbl->hash_mask = (1 << size_order) - 1;
107 atomic_set(&newtbl->entries, 0);
108 get_random_bytes(&newtbl->hash_rnd,
109 sizeof(newtbl->hash_rnd));
110 for (i = 0; i <= newtbl->hash_mask; i++)
111 spin_lock_init(&newtbl->hashwlock[i]);
5ee68e5b 112 spin_lock_init(&newtbl->gates_lock);
6b86bd62
JB
113
114 return newtbl;
115}
116
18889231
JC
117static void __mesh_table_free(struct mesh_table *tbl)
118{
119 kfree(tbl->hash_buckets);
120 kfree(tbl->hashwlock);
121 kfree(tbl);
122}
123
6b86bd62 124static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
18889231
JC
125{
126 struct hlist_head *mesh_hash;
127 struct hlist_node *p, *q;
5ee68e5b 128 struct mpath_node *gate;
18889231
JC
129 int i;
130
131 mesh_hash = tbl->hash_buckets;
132 for (i = 0; i <= tbl->hash_mask; i++) {
9b84b808 133 spin_lock_bh(&tbl->hashwlock[i]);
18889231
JC
134 hlist_for_each_safe(p, q, &mesh_hash[i]) {
135 tbl->free_node(p, free_leafs);
136 atomic_dec(&tbl->entries);
137 }
9b84b808 138 spin_unlock_bh(&tbl->hashwlock[i]);
18889231 139 }
5ee68e5b
JC
140 if (free_leafs) {
141 spin_lock_bh(&tbl->gates_lock);
142 hlist_for_each_entry_safe(gate, p, q,
143 tbl->known_gates, list) {
144 hlist_del(&gate->list);
145 kfree(gate);
146 }
147 kfree(tbl->known_gates);
148 spin_unlock_bh(&tbl->gates_lock);
149 }
150
18889231
JC
151 __mesh_table_free(tbl);
152}
153
a3e6b12c 154static int mesh_table_grow(struct mesh_table *oldtbl,
6b86bd62 155 struct mesh_table *newtbl)
18889231 156{
18889231
JC
157 struct hlist_head *oldhash;
158 struct hlist_node *p, *q;
159 int i;
160
a3e6b12c
I
161 if (atomic_read(&oldtbl->entries)
162 < oldtbl->mean_chain_len * (oldtbl->hash_mask + 1))
163 return -EAGAIN;
18889231 164
a3e6b12c
I
165 newtbl->free_node = oldtbl->free_node;
166 newtbl->mean_chain_len = oldtbl->mean_chain_len;
167 newtbl->copy_node = oldtbl->copy_node;
5ee68e5b 168 newtbl->known_gates = oldtbl->known_gates;
a3e6b12c 169 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
18889231 170
a3e6b12c
I
171 oldhash = oldtbl->hash_buckets;
172 for (i = 0; i <= oldtbl->hash_mask; i++)
18889231 173 hlist_for_each(p, &oldhash[i])
a3e6b12c 174 if (oldtbl->copy_node(p, newtbl) < 0)
18889231
JC
175 goto errcopy;
176
a3e6b12c 177 return 0;
18889231
JC
178
179errcopy:
180 for (i = 0; i <= newtbl->hash_mask; i++) {
181 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
a3e6b12c 182 oldtbl->free_node(p, 0);
18889231 183 }
a3e6b12c 184 return -ENOMEM;
18889231
JC
185}
186
4a3cb702 187static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
6b86bd62
JB
188 struct mesh_table *tbl)
189{
190 /* Use last four bytes of hw addr and interface index as hash index */
bf7cd94d
JB
191 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
192 tbl->hash_rnd) & tbl->hash_mask;
6b86bd62 193}
f5ea9120 194
eb2b9311
LCC
195
196/**
197 *
198 * mesh_path_assign_nexthop - update mesh path next hop
199 *
200 * @mpath: mesh path to update
201 * @sta: next hop to assign
202 *
203 * Locking: mpath->state_lock must be held when calling this function
204 */
205void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
206{
10c836d7
JC
207 struct sk_buff *skb;
208 struct ieee80211_hdr *hdr;
10c836d7
JC
209 unsigned long flags;
210
d0709a65 211 rcu_assign_pointer(mpath->next_hop, sta);
10c836d7 212
10c836d7 213 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
b22bd522 214 skb_queue_walk(&mpath->frame_queue, skb) {
10c836d7
JC
215 hdr = (struct ieee80211_hdr *) skb->data;
216 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
7e3c8866 217 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
3f52b7e3 218 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
10c836d7
JC
219 }
220
10c836d7 221 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
eb2b9311
LCC
222}
223
5ee68e5b
JC
224static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
225 struct mesh_path *gate_mpath)
226{
227 struct ieee80211_hdr *hdr;
228 struct ieee80211s_hdr *mshdr;
229 int mesh_hdrlen, hdrlen;
230 char *next_hop;
231
232 hdr = (struct ieee80211_hdr *) skb->data;
233 hdrlen = ieee80211_hdrlen(hdr->frame_control);
234 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
235
236 if (!(mshdr->flags & MESH_FLAGS_AE)) {
237 /* size of the fixed part of the mesh header */
238 mesh_hdrlen = 6;
239
240 /* make room for the two extended addresses */
241 skb_push(skb, 2 * ETH_ALEN);
242 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
243
244 hdr = (struct ieee80211_hdr *) skb->data;
245
246 /* we preserve the previous mesh header and only add
247 * the new addreses */
248 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
249 mshdr->flags = MESH_FLAGS_AE_A5_A6;
250 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
251 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
252 }
253
254 /* update next hop */
255 hdr = (struct ieee80211_hdr *) skb->data;
256 rcu_read_lock();
257 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
258 memcpy(hdr->addr1, next_hop, ETH_ALEN);
259 rcu_read_unlock();
7e3c8866 260 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
5ee68e5b
JC
261 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
262}
263
264/**
265 *
266 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
267 *
268 * This function is used to transfer or copy frames from an unresolved mpath to
269 * a gate mpath. The function also adds the Address Extension field and
270 * updates the next hop.
271 *
272 * If a frame already has an Address Extension field, only the next hop and
273 * destination addresses are updated.
274 *
275 * The gate mpath must be an active mpath with a valid mpath->next_hop.
276 *
277 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
278 * @from_mpath: The failed mpath
279 * @copy: When true, copy all the frames to the new mpath queue. When false,
280 * move them.
281 */
282static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
283 struct mesh_path *from_mpath,
284 bool copy)
285{
4bd4c2dd
TP
286 struct sk_buff *skb, *fskb, *tmp;
287 struct sk_buff_head failq;
5ee68e5b 288 unsigned long flags;
5ee68e5b
JC
289
290 BUG_ON(gate_mpath == from_mpath);
291 BUG_ON(!gate_mpath->next_hop);
292
5ee68e5b
JC
293 __skb_queue_head_init(&failq);
294
295 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
296 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
297 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
298
4bd4c2dd
TP
299 skb_queue_walk_safe(&failq, fskb, tmp) {
300 if (skb_queue_len(&gate_mpath->frame_queue) >=
301 MESH_FRAME_QUEUE_LEN) {
302 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
303 break;
817a53d9 304 }
5ee68e5b 305
4bd4c2dd
TP
306 skb = skb_copy(fskb, GFP_ATOMIC);
307 if (WARN_ON(!skb))
308 break;
309
5ee68e5b 310 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
4bd4c2dd
TP
311 skb_queue_tail(&gate_mpath->frame_queue, skb);
312
313 if (copy)
314 continue;
315
316 __skb_unlink(fskb, &failq);
317 kfree_skb(fskb);
5ee68e5b
JC
318 }
319
bdcbd8e0
JB
320 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
321 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
5ee68e5b
JC
322
323 if (!copy)
324 return;
325
326 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
327 skb_queue_splice(&failq, &from_mpath->frame_queue);
328 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
329}
330
eb2b9311 331
4a3cb702
JB
332static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
333 struct ieee80211_sub_if_data *sdata)
eb2b9311
LCC
334{
335 struct mesh_path *mpath;
336 struct hlist_node *n;
337 struct hlist_head *bucket;
eb2b9311
LCC
338 struct mpath_node *node;
339
f698d856 340 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
eb2b9311
LCC
341 hlist_for_each_entry_rcu(node, n, bucket, list) {
342 mpath = node->mpath;
f698d856 343 if (mpath->sdata == sdata &&
b203ca39 344 ether_addr_equal(dst, mpath->dst)) {
bf7cd94d 345 if (mpath_expired(mpath)) {
eb2b9311 346 spin_lock_bh(&mpath->state_lock);
ad99d141 347 mpath->flags &= ~MESH_PATH_ACTIVE;
eb2b9311
LCC
348 spin_unlock_bh(&mpath->state_lock);
349 }
350 return mpath;
351 }
352 }
353 return NULL;
354}
355
239289e4
JC
356/**
357 * mesh_path_lookup - look up a path in the mesh path table
239289e4 358 * @sdata: local subif
bf7cd94d 359 * @dst: hardware address (ETH_ALEN length) of destination
239289e4
JC
360 *
361 * Returns: pointer to the mesh path structure, or NULL if not found
362 *
363 * Locking: must be called within a read rcu section.
364 */
bf7cd94d
JB
365struct mesh_path *
366mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
79617dee 367{
5ad20dd1 368 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
239289e4 369}
79617dee 370
bf7cd94d
JB
371struct mesh_path *
372mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
239289e4 373{
5ad20dd1 374 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
79617dee
Y
375}
376
377
eb2b9311
LCC
378/**
379 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
380 * @idx: index
f698d856 381 * @sdata: local subif, or NULL for all entries
eb2b9311
LCC
382 *
383 * Returns: pointer to the mesh path structure, or NULL if not found.
384 *
385 * Locking: must be called within a read rcu section.
386 */
bf7cd94d
JB
387struct mesh_path *
388mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
eb2b9311 389{
349eb8cf 390 struct mesh_table *tbl = rcu_dereference(mesh_paths);
eb2b9311
LCC
391 struct mpath_node *node;
392 struct hlist_node *p;
393 int i;
394 int j = 0;
395
349eb8cf 396 for_each_mesh_entry(tbl, p, node, i) {
f698d856 397 if (sdata && node->mpath->sdata != sdata)
2a8ca29a 398 continue;
eb2b9311 399 if (j++ == idx) {
bf7cd94d 400 if (mpath_expired(node->mpath)) {
eb2b9311 401 spin_lock_bh(&node->mpath->state_lock);
ad99d141 402 node->mpath->flags &= ~MESH_PATH_ACTIVE;
eb2b9311
LCC
403 spin_unlock_bh(&node->mpath->state_lock);
404 }
405 return node->mpath;
406 }
2a8ca29a 407 }
eb2b9311
LCC
408
409 return NULL;
410}
411
5ee68e5b 412/**
30be52e4
JB
413 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
414 * @mpath: gate path to add to table
5ee68e5b 415 */
30be52e4 416int mesh_path_add_gate(struct mesh_path *mpath)
5ee68e5b 417{
30be52e4 418 struct mesh_table *tbl;
5ee68e5b
JC
419 struct mpath_node *gate, *new_gate;
420 struct hlist_node *n;
421 int err;
422
423 rcu_read_lock();
30be52e4 424 tbl = rcu_dereference(mesh_paths);
5ee68e5b
JC
425
426 hlist_for_each_entry_rcu(gate, n, tbl->known_gates, list)
427 if (gate->mpath == mpath) {
428 err = -EEXIST;
429 goto err_rcu;
430 }
431
432 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
433 if (!new_gate) {
434 err = -ENOMEM;
435 goto err_rcu;
436 }
437
438 mpath->is_gate = true;
439 mpath->sdata->u.mesh.num_gates++;
440 new_gate->mpath = mpath;
441 spin_lock_bh(&tbl->gates_lock);
442 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
443 spin_unlock_bh(&tbl->gates_lock);
bdcbd8e0
JB
444 mpath_dbg(mpath->sdata,
445 "Mesh path: Recorded new gate: %pM. %d known gates\n",
446 mpath->dst, mpath->sdata->u.mesh.num_gates);
bf7cd94d 447 err = 0;
5ee68e5b
JC
448err_rcu:
449 rcu_read_unlock();
450 return err;
451}
452
453/**
454 * mesh_gate_del - remove a mesh gate from the list of known gates
455 * @tbl: table which holds our list of known gates
456 * @mpath: gate mpath
457 *
5ee68e5b
JC
458 * Locking: must be called inside rcu_read_lock() section
459 */
bf7cd94d 460static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
5ee68e5b
JC
461{
462 struct mpath_node *gate;
463 struct hlist_node *p, *q;
464
bf7cd94d
JB
465 hlist_for_each_entry_safe(gate, p, q, tbl->known_gates, list) {
466 if (gate->mpath != mpath)
467 continue;
468 spin_lock_bh(&tbl->gates_lock);
469 hlist_del_rcu(&gate->list);
470 kfree_rcu(gate, rcu);
471 spin_unlock_bh(&tbl->gates_lock);
472 mpath->sdata->u.mesh.num_gates--;
473 mpath->is_gate = false;
474 mpath_dbg(mpath->sdata,
475 "Mesh path: Deleted gate: %pM. %d known gates\n",
476 mpath->dst, mpath->sdata->u.mesh.num_gates);
477 break;
478 }
5ee68e5b
JC
479}
480
5ee68e5b
JC
481/**
482 * mesh_gate_num - number of gates known to this interface
483 * @sdata: subif data
484 */
485int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
486{
487 return sdata->u.mesh.num_gates;
488}
489
eb2b9311
LCC
490/**
491 * mesh_path_add - allocate and add a new path to the mesh path table
bf7cd94d 492 * @dst: destination address of the path (ETH_ALEN length)
f698d856 493 * @sdata: local subif
eb2b9311 494 *
af901ca1 495 * Returns: 0 on success
eb2b9311
LCC
496 *
497 * State: the initial state of the new path is set to 0
498 */
bf7cd94d 499int mesh_path_add(struct ieee80211_sub_if_data *sdata, const u8 *dst)
eb2b9311 500{
18889231
JC
501 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
502 struct ieee80211_local *local = sdata->local;
349eb8cf 503 struct mesh_table *tbl;
eb2b9311
LCC
504 struct mesh_path *mpath, *new_mpath;
505 struct mpath_node *node, *new_node;
506 struct hlist_head *bucket;
507 struct hlist_node *n;
508 int grow = 0;
509 int err = 0;
510 u32 hash_idx;
511
b203ca39 512 if (ether_addr_equal(dst, sdata->vif.addr))
eb2b9311
LCC
513 /* never add ourselves as neighbours */
514 return -ENOTSUPP;
515
516 if (is_multicast_ether_addr(dst))
517 return -ENOTSUPP;
518
472dbc45 519 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
eb2b9311
LCC
520 return -ENOSPC;
521
402d7752 522 err = -ENOMEM;
18889231 523 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
402d7752
PE
524 if (!new_mpath)
525 goto err_path_alloc;
526
18889231 527 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
402d7752
PE
528 if (!new_node)
529 goto err_node_alloc;
f84e71a9 530
9b84b808 531 read_lock_bh(&pathtbl_resize_lock);
eb2b9311 532 memcpy(new_mpath->dst, dst, ETH_ALEN);
e83e6541 533 eth_broadcast_addr(new_mpath->rann_snd_addr);
35bcd591 534 new_mpath->is_root = false;
f698d856 535 new_mpath->sdata = sdata;
eb2b9311
LCC
536 new_mpath->flags = 0;
537 skb_queue_head_init(&new_mpath->frame_queue);
eb2b9311
LCC
538 new_node->mpath = new_mpath;
539 new_mpath->timer.data = (unsigned long) new_mpath;
540 new_mpath->timer.function = mesh_path_timer;
541 new_mpath->exp_time = jiffies;
542 spin_lock_init(&new_mpath->state_lock);
543 init_timer(&new_mpath->timer);
544
349eb8cf 545 tbl = resize_dereference_mesh_paths();
eb2b9311 546
349eb8cf
JB
547 hash_idx = mesh_table_hash(dst, sdata, tbl);
548 bucket = &tbl->hash_buckets[hash_idx];
eb2b9311 549
f06c7885 550 spin_lock(&tbl->hashwlock[hash_idx]);
eb2b9311 551
402d7752 552 err = -EEXIST;
eb2b9311
LCC
553 hlist_for_each_entry(node, n, bucket, list) {
554 mpath = node->mpath;
888d04df 555 if (mpath->sdata == sdata &&
b203ca39 556 ether_addr_equal(dst, mpath->dst))
402d7752 557 goto err_exists;
eb2b9311
LCC
558 }
559
560 hlist_add_head_rcu(&new_node->list, bucket);
349eb8cf
JB
561 if (atomic_inc_return(&tbl->entries) >=
562 tbl->mean_chain_len * (tbl->hash_mask + 1))
eb2b9311
LCC
563 grow = 1;
564
f5ea9120
JB
565 mesh_paths_generation++;
566
f06c7885 567 spin_unlock(&tbl->hashwlock[hash_idx]);
9b84b808 568 read_unlock_bh(&pathtbl_resize_lock);
402d7752 569 if (grow) {
18889231 570 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
64592c8f 571 ieee80211_queue_work(&local->hw, &sdata->work);
eb2b9311 572 }
402d7752
PE
573 return 0;
574
575err_exists:
f06c7885 576 spin_unlock(&tbl->hashwlock[hash_idx]);
9b84b808 577 read_unlock_bh(&pathtbl_resize_lock);
402d7752
PE
578 kfree(new_node);
579err_node_alloc:
580 kfree(new_mpath);
581err_path_alloc:
472dbc45 582 atomic_dec(&sdata->u.mesh.mpaths);
eb2b9311
LCC
583 return err;
584}
585
1928ecab
JB
586static void mesh_table_free_rcu(struct rcu_head *rcu)
587{
588 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
589
590 mesh_table_free(tbl, false);
591}
592
18889231
JC
593void mesh_mpath_table_grow(void)
594{
595 struct mesh_table *oldtbl, *newtbl;
596
9b84b808 597 write_lock_bh(&pathtbl_resize_lock);
349eb8cf
JB
598 oldtbl = resize_dereference_mesh_paths();
599 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
1928ecab
JB
600 if (!newtbl)
601 goto out;
349eb8cf 602 if (mesh_table_grow(oldtbl, newtbl) < 0) {
a3e6b12c 603 __mesh_table_free(newtbl);
1928ecab 604 goto out;
18889231
JC
605 }
606 rcu_assign_pointer(mesh_paths, newtbl);
18889231 607
1928ecab
JB
608 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
609
610 out:
611 write_unlock_bh(&pathtbl_resize_lock);
18889231
JC
612}
613
614void mesh_mpp_table_grow(void)
615{
616 struct mesh_table *oldtbl, *newtbl;
617
9b84b808 618 write_lock_bh(&pathtbl_resize_lock);
349eb8cf
JB
619 oldtbl = resize_dereference_mpp_paths();
620 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
1928ecab
JB
621 if (!newtbl)
622 goto out;
349eb8cf 623 if (mesh_table_grow(oldtbl, newtbl) < 0) {
a3e6b12c 624 __mesh_table_free(newtbl);
1928ecab 625 goto out;
18889231
JC
626 }
627 rcu_assign_pointer(mpp_paths, newtbl);
1928ecab 628 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
18889231 629
1928ecab
JB
630 out:
631 write_unlock_bh(&pathtbl_resize_lock);
18889231 632}
eb2b9311 633
bf7cd94d
JB
634int mpp_path_add(struct ieee80211_sub_if_data *sdata,
635 const u8 *dst, const u8 *mpp)
79617dee 636{
18889231
JC
637 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
638 struct ieee80211_local *local = sdata->local;
349eb8cf 639 struct mesh_table *tbl;
79617dee
Y
640 struct mesh_path *mpath, *new_mpath;
641 struct mpath_node *node, *new_node;
642 struct hlist_head *bucket;
643 struct hlist_node *n;
644 int grow = 0;
645 int err = 0;
646 u32 hash_idx;
647
b203ca39 648 if (ether_addr_equal(dst, sdata->vif.addr))
79617dee
Y
649 /* never add ourselves as neighbours */
650 return -ENOTSUPP;
651
652 if (is_multicast_ether_addr(dst))
653 return -ENOTSUPP;
654
655 err = -ENOMEM;
18889231 656 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
79617dee
Y
657 if (!new_mpath)
658 goto err_path_alloc;
659
18889231 660 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
79617dee
Y
661 if (!new_node)
662 goto err_node_alloc;
663
9b84b808 664 read_lock_bh(&pathtbl_resize_lock);
79617dee
Y
665 memcpy(new_mpath->dst, dst, ETH_ALEN);
666 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
667 new_mpath->sdata = sdata;
668 new_mpath->flags = 0;
669 skb_queue_head_init(&new_mpath->frame_queue);
670 new_node->mpath = new_mpath;
c6133661 671 init_timer(&new_mpath->timer);
79617dee
Y
672 new_mpath->exp_time = jiffies;
673 spin_lock_init(&new_mpath->state_lock);
674
349eb8cf 675 tbl = resize_dereference_mpp_paths();
79617dee 676
349eb8cf
JB
677 hash_idx = mesh_table_hash(dst, sdata, tbl);
678 bucket = &tbl->hash_buckets[hash_idx];
679
f06c7885 680 spin_lock(&tbl->hashwlock[hash_idx]);
79617dee
Y
681
682 err = -EEXIST;
683 hlist_for_each_entry(node, n, bucket, list) {
684 mpath = node->mpath;
888d04df 685 if (mpath->sdata == sdata &&
b203ca39 686 ether_addr_equal(dst, mpath->dst))
79617dee
Y
687 goto err_exists;
688 }
689
690 hlist_add_head_rcu(&new_node->list, bucket);
349eb8cf
JB
691 if (atomic_inc_return(&tbl->entries) >=
692 tbl->mean_chain_len * (tbl->hash_mask + 1))
79617dee
Y
693 grow = 1;
694
f06c7885 695 spin_unlock(&tbl->hashwlock[hash_idx]);
9b84b808 696 read_unlock_bh(&pathtbl_resize_lock);
79617dee 697 if (grow) {
18889231 698 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
64592c8f 699 ieee80211_queue_work(&local->hw, &sdata->work);
79617dee
Y
700 }
701 return 0;
702
703err_exists:
f06c7885 704 spin_unlock(&tbl->hashwlock[hash_idx]);
9b84b808 705 read_unlock_bh(&pathtbl_resize_lock);
79617dee
Y
706 kfree(new_node);
707err_node_alloc:
708 kfree(new_mpath);
709err_path_alloc:
710 return err;
711}
712
713
eb2b9311
LCC
714/**
715 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
716 *
717 * @sta: broken peer link
718 *
719 * This function must be called from the rate control algorithm if enough
720 * delivery errors suggest that a peer link is no longer usable.
721 */
722void mesh_plink_broken(struct sta_info *sta)
723{
349eb8cf 724 struct mesh_table *tbl;
15ff6365 725 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
eb2b9311
LCC
726 struct mesh_path *mpath;
727 struct mpath_node *node;
728 struct hlist_node *p;
f698d856 729 struct ieee80211_sub_if_data *sdata = sta->sdata;
eb2b9311 730 int i;
25d49e4d 731 __le16 reason = cpu_to_le16(WLAN_REASON_MESH_PATH_DEST_UNREACHABLE);
eb2b9311
LCC
732
733 rcu_read_lock();
349eb8cf
JB
734 tbl = rcu_dereference(mesh_paths);
735 for_each_mesh_entry(tbl, p, node, i) {
eb2b9311 736 mpath = node->mpath;
349eb8cf 737 if (rcu_dereference(mpath->next_hop) == sta &&
eb2b9311
LCC
738 mpath->flags & MESH_PATH_ACTIVE &&
739 !(mpath->flags & MESH_PATH_FIXED)) {
f5e50cd0 740 spin_lock_bh(&mpath->state_lock);
eb2b9311 741 mpath->flags &= ~MESH_PATH_ACTIVE;
d19b3bf6 742 ++mpath->sn;
eb2b9311 743 spin_unlock_bh(&mpath->state_lock);
bf7cd94d
JB
744 mesh_path_error_tx(sdata,
745 sdata->u.mesh.mshcfg.element_ttl,
746 mpath->dst, cpu_to_le32(mpath->sn),
747 reason, bcast);
f5e50cd0 748 }
eb2b9311
LCC
749 }
750 rcu_read_unlock();
751}
752
19c50b3d
JC
753static void mesh_path_node_reclaim(struct rcu_head *rp)
754{
755 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
756 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
757
758 del_timer_sync(&node->mpath->timer);
759 atomic_dec(&sdata->u.mesh.mpaths);
760 kfree(node->mpath);
761 kfree(node);
762}
763
764/* needs to be called with the corresponding hashwlock taken */
765static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
766{
767 struct mesh_path *mpath;
768 mpath = node->mpath;
769 spin_lock(&mpath->state_lock);
770 mpath->flags |= MESH_PATH_RESOLVING;
771 if (mpath->is_gate)
772 mesh_gate_del(tbl, mpath);
773 hlist_del_rcu(&node->list);
774 call_rcu(&node->rcu, mesh_path_node_reclaim);
775 spin_unlock(&mpath->state_lock);
776 atomic_dec(&tbl->entries);
777}
778
eb2b9311
LCC
779/**
780 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
781 *
2c53040f 782 * @sta: mesh peer to match
eb2b9311 783 *
b4e08ea1
LCC
784 * RCU notes: this function is called when a mesh plink transitions from
785 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
786 * allows path creation. This will happen before the sta can be freed (because
d0709a65
JB
787 * sta_info_destroy() calls this) so any reader in a rcu read block will be
788 * protected against the plink disappearing.
eb2b9311
LCC
789 */
790void mesh_path_flush_by_nexthop(struct sta_info *sta)
791{
349eb8cf 792 struct mesh_table *tbl;
eb2b9311
LCC
793 struct mesh_path *mpath;
794 struct mpath_node *node;
795 struct hlist_node *p;
796 int i;
797
349eb8cf 798 rcu_read_lock();
239289e4
JC
799 read_lock_bh(&pathtbl_resize_lock);
800 tbl = resize_dereference_mesh_paths();
349eb8cf 801 for_each_mesh_entry(tbl, p, node, i) {
eb2b9311 802 mpath = node->mpath;
cd72e817 803 if (rcu_dereference(mpath->next_hop) == sta) {
f06c7885 804 spin_lock(&tbl->hashwlock[i]);
19c50b3d 805 __mesh_path_del(tbl, node);
f06c7885 806 spin_unlock(&tbl->hashwlock[i]);
19c50b3d 807 }
eb2b9311 808 }
239289e4 809 read_unlock_bh(&pathtbl_resize_lock);
349eb8cf 810 rcu_read_unlock();
eb2b9311
LCC
811}
812
cd72e817
JC
813static void table_flush_by_iface(struct mesh_table *tbl,
814 struct ieee80211_sub_if_data *sdata)
eb2b9311
LCC
815{
816 struct mesh_path *mpath;
817 struct mpath_node *node;
818 struct hlist_node *p;
819 int i;
820
cd72e817 821 WARN_ON(!rcu_read_lock_held());
349eb8cf 822 for_each_mesh_entry(tbl, p, node, i) {
eb2b9311 823 mpath = node->mpath;
cd72e817
JC
824 if (mpath->sdata != sdata)
825 continue;
ece1a2e7 826 spin_lock_bh(&tbl->hashwlock[i]);
19c50b3d 827 __mesh_path_del(tbl, node);
ece1a2e7 828 spin_unlock_bh(&tbl->hashwlock[i]);
eb2b9311
LCC
829 }
830}
831
ece1a2e7
JC
832/**
833 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
834 *
835 * This function deletes both mesh paths as well as mesh portal paths.
836 *
2c53040f 837 * @sdata: interface data to match
ece1a2e7
JC
838 *
839 */
840void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
eb2b9311 841{
cd72e817 842 struct mesh_table *tbl;
d0709a65 843
cd72e817 844 rcu_read_lock();
239289e4
JC
845 read_lock_bh(&pathtbl_resize_lock);
846 tbl = resize_dereference_mesh_paths();
cd72e817 847 table_flush_by_iface(tbl, sdata);
239289e4 848 tbl = resize_dereference_mpp_paths();
cd72e817 849 table_flush_by_iface(tbl, sdata);
239289e4 850 read_unlock_bh(&pathtbl_resize_lock);
cd72e817 851 rcu_read_unlock();
eb2b9311
LCC
852}
853
854/**
855 * mesh_path_del - delete a mesh path from the table
856 *
857 * @addr: dst address (ETH_ALEN length)
f698d856 858 * @sdata: local subif
eb2b9311 859 *
af901ca1 860 * Returns: 0 if successful
eb2b9311 861 */
bf7cd94d 862int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
eb2b9311 863{
349eb8cf 864 struct mesh_table *tbl;
eb2b9311
LCC
865 struct mesh_path *mpath;
866 struct mpath_node *node;
867 struct hlist_head *bucket;
868 struct hlist_node *n;
869 int hash_idx;
870 int err = 0;
871
9b84b808 872 read_lock_bh(&pathtbl_resize_lock);
349eb8cf
JB
873 tbl = resize_dereference_mesh_paths();
874 hash_idx = mesh_table_hash(addr, sdata, tbl);
875 bucket = &tbl->hash_buckets[hash_idx];
eb2b9311 876
f06c7885 877 spin_lock(&tbl->hashwlock[hash_idx]);
eb2b9311
LCC
878 hlist_for_each_entry(node, n, bucket, list) {
879 mpath = node->mpath;
f698d856 880 if (mpath->sdata == sdata &&
b203ca39 881 ether_addr_equal(addr, mpath->dst)) {
19c50b3d 882 __mesh_path_del(tbl, node);
eb2b9311
LCC
883 goto enddel;
884 }
885 }
886
887 err = -ENXIO;
888enddel:
f5ea9120 889 mesh_paths_generation++;
f06c7885 890 spin_unlock(&tbl->hashwlock[hash_idx]);
9b84b808 891 read_unlock_bh(&pathtbl_resize_lock);
eb2b9311
LCC
892 return err;
893}
894
895/**
896 * mesh_path_tx_pending - sends pending frames in a mesh path queue
897 *
898 * @mpath: mesh path to activate
899 *
900 * Locking: the state_lock of the mpath structure must NOT be held when calling
901 * this function.
902 */
903void mesh_path_tx_pending(struct mesh_path *mpath)
904{
249b405c
JC
905 if (mpath->flags & MESH_PATH_ACTIVE)
906 ieee80211_add_pending_skbs(mpath->sdata->local,
907 &mpath->frame_queue);
eb2b9311
LCC
908}
909
5ee68e5b
JC
910/**
911 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
912 *
913 * @mpath: mesh path whose queue will be emptied
914 *
915 * If there is only one gate, the frames are transferred from the failed mpath
916 * queue to that gate's queue. If there are more than one gates, the frames
917 * are copied from each gate to the next. After frames are copied, the
918 * mpath queues are emptied onto the transmission queue.
919 */
920int mesh_path_send_to_gates(struct mesh_path *mpath)
921{
922 struct ieee80211_sub_if_data *sdata = mpath->sdata;
923 struct hlist_node *n;
924 struct mesh_table *tbl;
925 struct mesh_path *from_mpath = mpath;
926 struct mpath_node *gate = NULL;
927 bool copy = false;
928 struct hlist_head *known_gates;
929
930 rcu_read_lock();
931 tbl = rcu_dereference(mesh_paths);
932 known_gates = tbl->known_gates;
933 rcu_read_unlock();
934
935 if (!known_gates)
936 return -EHOSTUNREACH;
937
938 hlist_for_each_entry_rcu(gate, n, known_gates, list) {
939 if (gate->mpath->sdata != sdata)
940 continue;
941
942 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
bdcbd8e0 943 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
5ee68e5b
JC
944 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
945 from_mpath = gate->mpath;
946 copy = true;
947 } else {
bdcbd8e0
JB
948 mpath_dbg(sdata,
949 "Not forwarding %p (flags %#x)\n",
950 gate->mpath, gate->mpath->flags);
5ee68e5b
JC
951 }
952 }
953
954 hlist_for_each_entry_rcu(gate, n, known_gates, list)
955 if (gate->mpath->sdata == sdata) {
bdcbd8e0 956 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
5ee68e5b
JC
957 mesh_path_tx_pending(gate->mpath);
958 }
959
960 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
961}
962
eb2b9311
LCC
963/**
964 * mesh_path_discard_frame - discard a frame whose path could not be resolved
965 *
966 * @skb: frame to discard
f698d856 967 * @sdata: network subif the frame was to be sent through
eb2b9311 968 *
eb2b9311
LCC
969 * Locking: the function must me called within a rcu_read_lock region
970 */
bf7cd94d
JB
971void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
972 struct sk_buff *skb)
eb2b9311 973{
eb2b9311 974 kfree_skb(skb);
472dbc45 975 sdata->u.mesh.mshstats.dropped_frames_no_route++;
eb2b9311
LCC
976}
977
978/**
979 * mesh_path_flush_pending - free the pending queue of a mesh path
980 *
981 * @mpath: mesh path whose queue has to be freed
982 *
25985edc 983 * Locking: the function must me called within a rcu_read_lock region
eb2b9311
LCC
984 */
985void mesh_path_flush_pending(struct mesh_path *mpath)
986{
eb2b9311
LCC
987 struct sk_buff *skb;
988
00e3f25c 989 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
bf7cd94d 990 mesh_path_discard_frame(mpath->sdata, skb);
eb2b9311
LCC
991}
992
993/**
994 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
995 *
996 * @mpath: the mesh path to modify
997 * @next_hop: the next hop to force
998 *
999 * Locking: this function must be called holding mpath->state_lock
1000 */
1001void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1002{
1003 spin_lock_bh(&mpath->state_lock);
1004 mesh_path_assign_nexthop(mpath, next_hop);
d19b3bf6 1005 mpath->sn = 0xffff;
eb2b9311
LCC
1006 mpath->metric = 0;
1007 mpath->hop_count = 0;
1008 mpath->exp_time = 0;
1009 mpath->flags |= MESH_PATH_FIXED;
1010 mesh_path_activate(mpath);
1011 spin_unlock_bh(&mpath->state_lock);
1012 mesh_path_tx_pending(mpath);
1013}
1014
1015static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1016{
1017 struct mesh_path *mpath;
1018 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1019 mpath = node->mpath;
1020 hlist_del_rcu(p);
d0df9eec
JC
1021 if (free_leafs) {
1022 del_timer_sync(&mpath->timer);
eb2b9311 1023 kfree(mpath);
d0df9eec 1024 }
eb2b9311
LCC
1025 kfree(node);
1026}
1027
4caf86c6 1028static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
eb2b9311
LCC
1029{
1030 struct mesh_path *mpath;
1031 struct mpath_node *node, *new_node;
1032 u32 hash_idx;
1033
8566dc3f 1034 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00242c40
PE
1035 if (new_node == NULL)
1036 return -ENOMEM;
1037
eb2b9311
LCC
1038 node = hlist_entry(p, struct mpath_node, list);
1039 mpath = node->mpath;
eb2b9311 1040 new_node->mpath = mpath;
f698d856 1041 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
eb2b9311
LCC
1042 hlist_add_head(&new_node->list,
1043 &newtbl->hash_buckets[hash_idx]);
4caf86c6 1044 return 0;
eb2b9311
LCC
1045}
1046
1047int mesh_pathtbl_init(void)
1048{
349eb8cf 1049 struct mesh_table *tbl_path, *tbl_mpp;
4c5ade41 1050 int ret;
349eb8cf
JB
1051
1052 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1053 if (!tbl_path)
79617dee 1054 return -ENOMEM;
349eb8cf
JB
1055 tbl_path->free_node = &mesh_path_node_free;
1056 tbl_path->copy_node = &mesh_path_node_copy;
1057 tbl_path->mean_chain_len = MEAN_CHAIN_LEN;
5ee68e5b 1058 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
4c5ade41
DC
1059 if (!tbl_path->known_gates) {
1060 ret = -ENOMEM;
1061 goto free_path;
1062 }
5ee68e5b
JC
1063 INIT_HLIST_HEAD(tbl_path->known_gates);
1064
79617dee 1065
349eb8cf
JB
1066 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1067 if (!tbl_mpp) {
4c5ade41
DC
1068 ret = -ENOMEM;
1069 goto free_path;
79617dee 1070 }
349eb8cf
JB
1071 tbl_mpp->free_node = &mesh_path_node_free;
1072 tbl_mpp->copy_node = &mesh_path_node_copy;
1073 tbl_mpp->mean_chain_len = MEAN_CHAIN_LEN;
5ee68e5b 1074 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
4c5ade41
DC
1075 if (!tbl_mpp->known_gates) {
1076 ret = -ENOMEM;
1077 goto free_mpp;
1078 }
5ee68e5b 1079 INIT_HLIST_HEAD(tbl_mpp->known_gates);
349eb8cf
JB
1080
1081 /* Need no locking since this is during init */
1082 RCU_INIT_POINTER(mesh_paths, tbl_path);
1083 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
79617dee 1084
eb2b9311 1085 return 0;
4c5ade41
DC
1086
1087free_mpp:
1088 mesh_table_free(tbl_mpp, true);
1089free_path:
1090 mesh_table_free(tbl_path, true);
1091 return ret;
eb2b9311
LCC
1092}
1093
f698d856 1094void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
eb2b9311 1095{
349eb8cf 1096 struct mesh_table *tbl;
eb2b9311
LCC
1097 struct mesh_path *mpath;
1098 struct mpath_node *node;
1099 struct hlist_node *p;
1100 int i;
1101
349eb8cf
JB
1102 rcu_read_lock();
1103 tbl = rcu_dereference(mesh_paths);
1104 for_each_mesh_entry(tbl, p, node, i) {
f698d856 1105 if (node->mpath->sdata != sdata)
eb2b9311
LCC
1106 continue;
1107 mpath = node->mpath;
eb2b9311
LCC
1108 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1109 (!(mpath->flags & MESH_PATH_FIXED)) &&
f5e50cd0 1110 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
bf7cd94d 1111 mesh_path_del(mpath->sdata, mpath->dst);
eb2b9311 1112 }
349eb8cf 1113 rcu_read_unlock();
eb2b9311
LCC
1114}
1115
1116void mesh_pathtbl_unregister(void)
1117{
349eb8cf 1118 /* no need for locking during exit path */
33d480ce
ED
1119 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1120 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
eb2b9311 1121}
This page took 0.716056 seconds and 5 git commands to generate.