mac80211: mesh: factor out common mesh path allocation code
[deliverable/linux.git] / net / mac80211 / mesh_pathtbl.c
CommitLineData
eb2b9311 1/*
264d9b7d 2 * Copyright (c) 2008, 2009 open80211s Ltd.
eb2b9311
LCC
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/etherdevice.h>
11#include <linux/list.h>
eb2b9311 12#include <linux/random.h>
5a0e3ad6 13#include <linux/slab.h>
eb2b9311
LCC
14#include <linux/spinlock.h>
15#include <linux/string.h>
16#include <net/mac80211.h>
4777be41 17#include "wme.h"
eb2b9311
LCC
18#include "ieee80211_i.h"
19#include "mesh.h"
20
21/* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22#define INIT_PATHS_SIZE_ORDER 2
23
24/* Keep the mean chain length below this constant */
25#define MEAN_CHAIN_LEN 2
26
bf7cd94d
JB
27static inline bool mpath_expired(struct mesh_path *mpath)
28{
29 return (mpath->flags & MESH_PATH_ACTIVE) &&
30 time_after(jiffies, mpath->exp_time) &&
31 !(mpath->flags & MESH_PATH_FIXED);
32}
eb2b9311
LCC
33
34struct mpath_node {
35 struct hlist_node list;
36 struct rcu_head rcu;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
39 */
40 struct mesh_path *mpath;
41};
42
4cc955de 43static inline struct mesh_table *resize_dereference_paths(
2bdaf386 44 struct ieee80211_sub_if_data *sdata,
4cc955de
HR
45 struct mesh_table __rcu *table)
46{
47 return rcu_dereference_protected(table,
2bdaf386 48 lockdep_is_held(&sdata->u.mesh.pathtbl_resize_lock));
4cc955de
HR
49}
50
2bdaf386
BC
51static inline struct mesh_table *resize_dereference_mesh_paths(
52 struct ieee80211_sub_if_data *sdata)
349eb8cf 53{
2bdaf386 54 return resize_dereference_paths(sdata, sdata->u.mesh.mesh_paths);
349eb8cf
JB
55}
56
2bdaf386
BC
57static inline struct mesh_table *resize_dereference_mpp_paths(
58 struct ieee80211_sub_if_data *sdata)
349eb8cf 59{
2bdaf386 60 return resize_dereference_paths(sdata, sdata->u.mesh.mpp_paths);
349eb8cf
JB
61}
62
63/*
64 * CAREFUL -- "tbl" must not be an expression,
65 * in particular not an rcu_dereference(), since
66 * it's used twice. So it is illegal to do
67 * for_each_mesh_entry(rcu_dereference(...), ...)
68 */
b67bfe0d 69#define for_each_mesh_entry(tbl, node, i) \
349eb8cf 70 for (i = 0; i <= tbl->hash_mask; i++) \
b67bfe0d 71 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
349eb8cf
JB
72
73
6b86bd62
JB
74static struct mesh_table *mesh_table_alloc(int size_order)
75{
76 int i;
77 struct mesh_table *newtbl;
78
d676ff49 79 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
6b86bd62
JB
80 if (!newtbl)
81 return NULL;
82
83 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
d676ff49 84 (1 << size_order), GFP_ATOMIC);
6b86bd62
JB
85
86 if (!newtbl->hash_buckets) {
87 kfree(newtbl);
88 return NULL;
89 }
90
91 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
d676ff49 92 (1 << size_order), GFP_ATOMIC);
6b86bd62
JB
93 if (!newtbl->hashwlock) {
94 kfree(newtbl->hash_buckets);
95 kfree(newtbl);
96 return NULL;
97 }
98
99 newtbl->size_order = size_order;
100 newtbl->hash_mask = (1 << size_order) - 1;
101 atomic_set(&newtbl->entries, 0);
102 get_random_bytes(&newtbl->hash_rnd,
103 sizeof(newtbl->hash_rnd));
104 for (i = 0; i <= newtbl->hash_mask; i++)
105 spin_lock_init(&newtbl->hashwlock[i]);
5ee68e5b 106 spin_lock_init(&newtbl->gates_lock);
6b86bd62
JB
107
108 return newtbl;
109}
110
18889231
JC
111static void __mesh_table_free(struct mesh_table *tbl)
112{
113 kfree(tbl->hash_buckets);
114 kfree(tbl->hashwlock);
115 kfree(tbl);
116}
117
6b86bd62 118static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
18889231
JC
119{
120 struct hlist_head *mesh_hash;
121 struct hlist_node *p, *q;
5ee68e5b 122 struct mpath_node *gate;
18889231
JC
123 int i;
124
125 mesh_hash = tbl->hash_buckets;
126 for (i = 0; i <= tbl->hash_mask; i++) {
9b84b808 127 spin_lock_bh(&tbl->hashwlock[i]);
18889231
JC
128 hlist_for_each_safe(p, q, &mesh_hash[i]) {
129 tbl->free_node(p, free_leafs);
130 atomic_dec(&tbl->entries);
131 }
9b84b808 132 spin_unlock_bh(&tbl->hashwlock[i]);
18889231 133 }
5ee68e5b
JC
134 if (free_leafs) {
135 spin_lock_bh(&tbl->gates_lock);
b67bfe0d 136 hlist_for_each_entry_safe(gate, q,
5ee68e5b
JC
137 tbl->known_gates, list) {
138 hlist_del(&gate->list);
139 kfree(gate);
140 }
141 kfree(tbl->known_gates);
142 spin_unlock_bh(&tbl->gates_lock);
143 }
144
18889231
JC
145 __mesh_table_free(tbl);
146}
147
a3e6b12c 148static int mesh_table_grow(struct mesh_table *oldtbl,
6b86bd62 149 struct mesh_table *newtbl)
18889231 150{
18889231
JC
151 struct hlist_head *oldhash;
152 struct hlist_node *p, *q;
153 int i;
154
a3e6b12c 155 if (atomic_read(&oldtbl->entries)
3f73fe9f 156 < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
a3e6b12c 157 return -EAGAIN;
18889231 158
a3e6b12c 159 newtbl->free_node = oldtbl->free_node;
a3e6b12c 160 newtbl->copy_node = oldtbl->copy_node;
5ee68e5b 161 newtbl->known_gates = oldtbl->known_gates;
a3e6b12c 162 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
18889231 163
a3e6b12c
I
164 oldhash = oldtbl->hash_buckets;
165 for (i = 0; i <= oldtbl->hash_mask; i++)
18889231 166 hlist_for_each(p, &oldhash[i])
a3e6b12c 167 if (oldtbl->copy_node(p, newtbl) < 0)
18889231
JC
168 goto errcopy;
169
a3e6b12c 170 return 0;
18889231
JC
171
172errcopy:
173 for (i = 0; i <= newtbl->hash_mask; i++) {
174 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
a3e6b12c 175 oldtbl->free_node(p, 0);
18889231 176 }
a3e6b12c 177 return -ENOMEM;
18889231
JC
178}
179
44395481 180static u32 mesh_table_hash(const u8 *addr, struct mesh_table *tbl)
6b86bd62 181{
44395481
BC
182 /* Use last four bytes of hw addr as hash index */
183 return jhash_1word(*(u32 *)(addr+2), tbl->hash_rnd) & tbl->hash_mask;
6b86bd62 184}
f5ea9120 185
eb2b9311
LCC
186
187/**
188 *
189 * mesh_path_assign_nexthop - update mesh path next hop
190 *
191 * @mpath: mesh path to update
192 * @sta: next hop to assign
193 *
194 * Locking: mpath->state_lock must be held when calling this function
195 */
196void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
197{
10c836d7
JC
198 struct sk_buff *skb;
199 struct ieee80211_hdr *hdr;
10c836d7
JC
200 unsigned long flags;
201
d0709a65 202 rcu_assign_pointer(mpath->next_hop, sta);
10c836d7 203
10c836d7 204 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
b22bd522 205 skb_queue_walk(&mpath->frame_queue, skb) {
10c836d7
JC
206 hdr = (struct ieee80211_hdr *) skb->data;
207 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
7e3c8866 208 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
3f52b7e3 209 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
10c836d7
JC
210 }
211
10c836d7 212 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
eb2b9311
LCC
213}
214
5ee68e5b
JC
215static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
216 struct mesh_path *gate_mpath)
217{
218 struct ieee80211_hdr *hdr;
219 struct ieee80211s_hdr *mshdr;
220 int mesh_hdrlen, hdrlen;
221 char *next_hop;
222
223 hdr = (struct ieee80211_hdr *) skb->data;
224 hdrlen = ieee80211_hdrlen(hdr->frame_control);
225 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
226
227 if (!(mshdr->flags & MESH_FLAGS_AE)) {
228 /* size of the fixed part of the mesh header */
229 mesh_hdrlen = 6;
230
231 /* make room for the two extended addresses */
232 skb_push(skb, 2 * ETH_ALEN);
233 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
234
235 hdr = (struct ieee80211_hdr *) skb->data;
236
237 /* we preserve the previous mesh header and only add
238 * the new addreses */
239 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
240 mshdr->flags = MESH_FLAGS_AE_A5_A6;
241 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
242 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
243 }
244
245 /* update next hop */
246 hdr = (struct ieee80211_hdr *) skb->data;
247 rcu_read_lock();
248 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
249 memcpy(hdr->addr1, next_hop, ETH_ALEN);
250 rcu_read_unlock();
7e3c8866 251 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
5ee68e5b
JC
252 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
253}
254
255/**
256 *
257 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
258 *
259 * This function is used to transfer or copy frames from an unresolved mpath to
260 * a gate mpath. The function also adds the Address Extension field and
261 * updates the next hop.
262 *
263 * If a frame already has an Address Extension field, only the next hop and
264 * destination addresses are updated.
265 *
266 * The gate mpath must be an active mpath with a valid mpath->next_hop.
267 *
268 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
269 * @from_mpath: The failed mpath
270 * @copy: When true, copy all the frames to the new mpath queue. When false,
271 * move them.
272 */
273static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
274 struct mesh_path *from_mpath,
275 bool copy)
276{
4bd4c2dd
TP
277 struct sk_buff *skb, *fskb, *tmp;
278 struct sk_buff_head failq;
5ee68e5b 279 unsigned long flags;
5ee68e5b 280
8c5bb1fa
JB
281 if (WARN_ON(gate_mpath == from_mpath))
282 return;
283 if (WARN_ON(!gate_mpath->next_hop))
284 return;
5ee68e5b 285
5ee68e5b
JC
286 __skb_queue_head_init(&failq);
287
288 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
289 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
290 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
291
4bd4c2dd
TP
292 skb_queue_walk_safe(&failq, fskb, tmp) {
293 if (skb_queue_len(&gate_mpath->frame_queue) >=
294 MESH_FRAME_QUEUE_LEN) {
295 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
296 break;
817a53d9 297 }
5ee68e5b 298
4bd4c2dd
TP
299 skb = skb_copy(fskb, GFP_ATOMIC);
300 if (WARN_ON(!skb))
301 break;
302
5ee68e5b 303 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
4bd4c2dd
TP
304 skb_queue_tail(&gate_mpath->frame_queue, skb);
305
306 if (copy)
307 continue;
308
309 __skb_unlink(fskb, &failq);
310 kfree_skb(fskb);
5ee68e5b
JC
311 }
312
bdcbd8e0
JB
313 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
314 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
5ee68e5b
JC
315
316 if (!copy)
317 return;
318
319 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
320 skb_queue_splice(&failq, &from_mpath->frame_queue);
321 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
322}
323
eb2b9311 324
4a3cb702
JB
325static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
326 struct ieee80211_sub_if_data *sdata)
eb2b9311
LCC
327{
328 struct mesh_path *mpath;
eb2b9311 329 struct hlist_head *bucket;
eb2b9311
LCC
330 struct mpath_node *node;
331
44395481 332 bucket = &tbl->hash_buckets[mesh_table_hash(dst, tbl)];
b67bfe0d 333 hlist_for_each_entry_rcu(node, bucket, list) {
eb2b9311 334 mpath = node->mpath;
2bdaf386 335 if (ether_addr_equal(dst, mpath->dst)) {
bf7cd94d 336 if (mpath_expired(mpath)) {
eb2b9311 337 spin_lock_bh(&mpath->state_lock);
ad99d141 338 mpath->flags &= ~MESH_PATH_ACTIVE;
eb2b9311
LCC
339 spin_unlock_bh(&mpath->state_lock);
340 }
341 return mpath;
342 }
343 }
344 return NULL;
345}
346
239289e4
JC
347/**
348 * mesh_path_lookup - look up a path in the mesh path table
239289e4 349 * @sdata: local subif
bf7cd94d 350 * @dst: hardware address (ETH_ALEN length) of destination
239289e4
JC
351 *
352 * Returns: pointer to the mesh path structure, or NULL if not found
353 *
354 * Locking: must be called within a read rcu section.
355 */
bf7cd94d
JB
356struct mesh_path *
357mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
79617dee 358{
2bdaf386
BC
359 return mpath_lookup(rcu_dereference(sdata->u.mesh.mesh_paths), dst,
360 sdata);
239289e4 361}
79617dee 362
bf7cd94d
JB
363struct mesh_path *
364mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
239289e4 365{
2bdaf386
BC
366 return mpath_lookup(rcu_dereference(sdata->u.mesh.mpp_paths), dst,
367 sdata);
79617dee
Y
368}
369
370
eb2b9311
LCC
371/**
372 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
373 * @idx: index
f698d856 374 * @sdata: local subif, or NULL for all entries
eb2b9311
LCC
375 *
376 * Returns: pointer to the mesh path structure, or NULL if not found.
377 *
378 * Locking: must be called within a read rcu section.
379 */
bf7cd94d
JB
380struct mesh_path *
381mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
eb2b9311 382{
2bdaf386 383 struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mesh_paths);
eb2b9311 384 struct mpath_node *node;
eb2b9311
LCC
385 int i;
386 int j = 0;
387
b67bfe0d 388 for_each_mesh_entry(tbl, node, i) {
eb2b9311 389 if (j++ == idx) {
bf7cd94d 390 if (mpath_expired(node->mpath)) {
eb2b9311 391 spin_lock_bh(&node->mpath->state_lock);
ad99d141 392 node->mpath->flags &= ~MESH_PATH_ACTIVE;
eb2b9311
LCC
393 spin_unlock_bh(&node->mpath->state_lock);
394 }
395 return node->mpath;
396 }
2a8ca29a 397 }
eb2b9311
LCC
398
399 return NULL;
400}
401
a2db2ed3
HR
402/**
403 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
404 * @idx: index
405 * @sdata: local subif, or NULL for all entries
406 *
407 * Returns: pointer to the proxy path structure, or NULL if not found.
408 *
409 * Locking: must be called within a read rcu section.
410 */
411struct mesh_path *
412mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
413{
2bdaf386 414 struct mesh_table *tbl = rcu_dereference(sdata->u.mesh.mpp_paths);
a2db2ed3
HR
415 struct mpath_node *node;
416 int i;
417 int j = 0;
418
419 for_each_mesh_entry(tbl, node, i) {
a2db2ed3
HR
420 if (j++ == idx)
421 return node->mpath;
422 }
423
424 return NULL;
425}
426
5ee68e5b 427/**
30be52e4
JB
428 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
429 * @mpath: gate path to add to table
5ee68e5b 430 */
30be52e4 431int mesh_path_add_gate(struct mesh_path *mpath)
5ee68e5b 432{
30be52e4 433 struct mesh_table *tbl;
5ee68e5b 434 struct mpath_node *gate, *new_gate;
5ee68e5b
JC
435 int err;
436
437 rcu_read_lock();
2bdaf386 438 tbl = rcu_dereference(mpath->sdata->u.mesh.mesh_paths);
5ee68e5b 439
b67bfe0d 440 hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
5ee68e5b
JC
441 if (gate->mpath == mpath) {
442 err = -EEXIST;
443 goto err_rcu;
444 }
445
446 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
447 if (!new_gate) {
448 err = -ENOMEM;
449 goto err_rcu;
450 }
451
452 mpath->is_gate = true;
453 mpath->sdata->u.mesh.num_gates++;
454 new_gate->mpath = mpath;
455 spin_lock_bh(&tbl->gates_lock);
456 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
457 spin_unlock_bh(&tbl->gates_lock);
bdcbd8e0
JB
458 mpath_dbg(mpath->sdata,
459 "Mesh path: Recorded new gate: %pM. %d known gates\n",
460 mpath->dst, mpath->sdata->u.mesh.num_gates);
bf7cd94d 461 err = 0;
5ee68e5b
JC
462err_rcu:
463 rcu_read_unlock();
464 return err;
465}
466
467/**
468 * mesh_gate_del - remove a mesh gate from the list of known gates
469 * @tbl: table which holds our list of known gates
470 * @mpath: gate mpath
471 *
5ee68e5b
JC
472 * Locking: must be called inside rcu_read_lock() section
473 */
bf7cd94d 474static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
5ee68e5b
JC
475{
476 struct mpath_node *gate;
b67bfe0d 477 struct hlist_node *q;
5ee68e5b 478
b67bfe0d 479 hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
bf7cd94d
JB
480 if (gate->mpath != mpath)
481 continue;
482 spin_lock_bh(&tbl->gates_lock);
483 hlist_del_rcu(&gate->list);
484 kfree_rcu(gate, rcu);
485 spin_unlock_bh(&tbl->gates_lock);
486 mpath->sdata->u.mesh.num_gates--;
487 mpath->is_gate = false;
488 mpath_dbg(mpath->sdata,
489 "Mesh path: Deleted gate: %pM. %d known gates\n",
490 mpath->dst, mpath->sdata->u.mesh.num_gates);
491 break;
492 }
5ee68e5b
JC
493}
494
5ee68e5b
JC
495/**
496 * mesh_gate_num - number of gates known to this interface
497 * @sdata: subif data
498 */
499int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
500{
501 return sdata->u.mesh.num_gates;
502}
503
b15dc38b
BC
504static
505struct mesh_path *mesh_path_new(struct ieee80211_sub_if_data *sdata,
506 const u8 *dst, gfp_t gfp_flags)
507{
508 struct mesh_path *new_mpath;
509
510 new_mpath = kzalloc(sizeof(struct mesh_path), gfp_flags);
511 if (!new_mpath)
512 return NULL;
513
514 memcpy(new_mpath->dst, dst, ETH_ALEN);
515 eth_broadcast_addr(new_mpath->rann_snd_addr);
516 new_mpath->is_root = false;
517 new_mpath->sdata = sdata;
518 new_mpath->flags = 0;
519 skb_queue_head_init(&new_mpath->frame_queue);
520 new_mpath->timer.data = (unsigned long) new_mpath;
521 new_mpath->timer.function = mesh_path_timer;
522 new_mpath->exp_time = jiffies;
523 spin_lock_init(&new_mpath->state_lock);
524 init_timer(&new_mpath->timer);
525
526 return new_mpath;
527}
528
eb2b9311
LCC
529/**
530 * mesh_path_add - allocate and add a new path to the mesh path table
bf7cd94d 531 * @dst: destination address of the path (ETH_ALEN length)
f698d856 532 * @sdata: local subif
eb2b9311 533 *
af901ca1 534 * Returns: 0 on success
eb2b9311
LCC
535 *
536 * State: the initial state of the new path is set to 0
537 */
ae76eef0
BC
538struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
539 const u8 *dst)
eb2b9311 540{
18889231
JC
541 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
542 struct ieee80211_local *local = sdata->local;
349eb8cf 543 struct mesh_table *tbl;
eb2b9311
LCC
544 struct mesh_path *mpath, *new_mpath;
545 struct mpath_node *node, *new_node;
546 struct hlist_head *bucket;
eb2b9311 547 int grow = 0;
ae76eef0 548 int err;
eb2b9311
LCC
549 u32 hash_idx;
550
b203ca39 551 if (ether_addr_equal(dst, sdata->vif.addr))
eb2b9311 552 /* never add ourselves as neighbours */
ae76eef0 553 return ERR_PTR(-ENOTSUPP);
eb2b9311
LCC
554
555 if (is_multicast_ether_addr(dst))
ae76eef0 556 return ERR_PTR(-ENOTSUPP);
eb2b9311 557
472dbc45 558 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
ae76eef0
BC
559 return ERR_PTR(-ENOSPC);
560
2bdaf386
BC
561 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
562 tbl = resize_dereference_mesh_paths(sdata);
ae76eef0 563
44395481 564 hash_idx = mesh_table_hash(dst, tbl);
ae76eef0
BC
565 bucket = &tbl->hash_buckets[hash_idx];
566
567 spin_lock(&tbl->hashwlock[hash_idx]);
568
569 hlist_for_each_entry(node, bucket, list) {
570 mpath = node->mpath;
2bdaf386 571 if (ether_addr_equal(dst, mpath->dst))
ae76eef0
BC
572 goto found;
573 }
eb2b9311 574
402d7752 575 err = -ENOMEM;
b15dc38b 576 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
402d7752
PE
577 if (!new_mpath)
578 goto err_path_alloc;
579
18889231 580 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
402d7752
PE
581 if (!new_node)
582 goto err_node_alloc;
f84e71a9 583
eb2b9311 584 new_node->mpath = new_mpath;
eb2b9311 585 hlist_add_head_rcu(&new_node->list, bucket);
349eb8cf 586 if (atomic_inc_return(&tbl->entries) >=
3f73fe9f 587 MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
eb2b9311
LCC
588 grow = 1;
589
2bdaf386 590 sdata->u.mesh.mesh_paths_generation++;
f5ea9120 591
402d7752 592 if (grow) {
18889231 593 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
64592c8f 594 ieee80211_queue_work(&local->hw, &sdata->work);
eb2b9311 595 }
ae76eef0
BC
596 mpath = new_mpath;
597found:
f06c7885 598 spin_unlock(&tbl->hashwlock[hash_idx]);
2bdaf386 599 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
ae76eef0
BC
600 return mpath;
601
402d7752
PE
602err_node_alloc:
603 kfree(new_mpath);
604err_path_alloc:
472dbc45 605 atomic_dec(&sdata->u.mesh.mpaths);
ae76eef0 606 spin_unlock(&tbl->hashwlock[hash_idx]);
2bdaf386 607 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
ae76eef0 608 return ERR_PTR(err);
eb2b9311
LCC
609}
610
1928ecab
JB
611static void mesh_table_free_rcu(struct rcu_head *rcu)
612{
613 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
614
615 mesh_table_free(tbl, false);
616}
617
2bdaf386 618void mesh_mpath_table_grow(struct ieee80211_sub_if_data *sdata)
18889231
JC
619{
620 struct mesh_table *oldtbl, *newtbl;
621
2bdaf386
BC
622 write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
623 oldtbl = resize_dereference_mesh_paths(sdata);
349eb8cf 624 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
1928ecab
JB
625 if (!newtbl)
626 goto out;
349eb8cf 627 if (mesh_table_grow(oldtbl, newtbl) < 0) {
a3e6b12c 628 __mesh_table_free(newtbl);
1928ecab 629 goto out;
18889231 630 }
2bdaf386 631 rcu_assign_pointer(sdata->u.mesh.mesh_paths, newtbl);
18889231 632
1928ecab
JB
633 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
634
635 out:
2bdaf386 636 write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
18889231
JC
637}
638
2bdaf386 639void mesh_mpp_table_grow(struct ieee80211_sub_if_data *sdata)
18889231
JC
640{
641 struct mesh_table *oldtbl, *newtbl;
642
2bdaf386
BC
643 write_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
644 oldtbl = resize_dereference_mpp_paths(sdata);
349eb8cf 645 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
1928ecab
JB
646 if (!newtbl)
647 goto out;
349eb8cf 648 if (mesh_table_grow(oldtbl, newtbl) < 0) {
a3e6b12c 649 __mesh_table_free(newtbl);
1928ecab 650 goto out;
18889231 651 }
2bdaf386 652 rcu_assign_pointer(sdata->u.mesh.mpp_paths, newtbl);
1928ecab 653 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
18889231 654
1928ecab 655 out:
2bdaf386 656 write_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
18889231 657}
eb2b9311 658
bf7cd94d
JB
659int mpp_path_add(struct ieee80211_sub_if_data *sdata,
660 const u8 *dst, const u8 *mpp)
79617dee 661{
18889231
JC
662 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
663 struct ieee80211_local *local = sdata->local;
349eb8cf 664 struct mesh_table *tbl;
79617dee
Y
665 struct mesh_path *mpath, *new_mpath;
666 struct mpath_node *node, *new_node;
667 struct hlist_head *bucket;
79617dee
Y
668 int grow = 0;
669 int err = 0;
670 u32 hash_idx;
671
b203ca39 672 if (ether_addr_equal(dst, sdata->vif.addr))
79617dee
Y
673 /* never add ourselves as neighbours */
674 return -ENOTSUPP;
675
676 if (is_multicast_ether_addr(dst))
677 return -ENOTSUPP;
678
679 err = -ENOMEM;
b15dc38b 680 new_mpath = mesh_path_new(sdata, dst, GFP_ATOMIC);
79617dee
Y
681 if (!new_mpath)
682 goto err_path_alloc;
683
18889231 684 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
79617dee
Y
685 if (!new_node)
686 goto err_node_alloc;
687
79617dee 688 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
79617dee 689 new_node->mpath = new_mpath;
b15dc38b 690 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
2bdaf386 691 tbl = resize_dereference_mpp_paths(sdata);
79617dee 692
44395481 693 hash_idx = mesh_table_hash(dst, tbl);
349eb8cf
JB
694 bucket = &tbl->hash_buckets[hash_idx];
695
f06c7885 696 spin_lock(&tbl->hashwlock[hash_idx]);
79617dee
Y
697
698 err = -EEXIST;
b67bfe0d 699 hlist_for_each_entry(node, bucket, list) {
79617dee 700 mpath = node->mpath;
2bdaf386 701 if (ether_addr_equal(dst, mpath->dst))
79617dee
Y
702 goto err_exists;
703 }
704
705 hlist_add_head_rcu(&new_node->list, bucket);
349eb8cf 706 if (atomic_inc_return(&tbl->entries) >=
3f73fe9f 707 MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
79617dee
Y
708 grow = 1;
709
f06c7885 710 spin_unlock(&tbl->hashwlock[hash_idx]);
2bdaf386 711 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
a2db2ed3 712
2bdaf386 713 sdata->u.mesh.mpp_paths_generation++;
a2db2ed3 714
79617dee 715 if (grow) {
18889231 716 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
64592c8f 717 ieee80211_queue_work(&local->hw, &sdata->work);
79617dee
Y
718 }
719 return 0;
720
721err_exists:
f06c7885 722 spin_unlock(&tbl->hashwlock[hash_idx]);
2bdaf386 723 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
79617dee
Y
724 kfree(new_node);
725err_node_alloc:
726 kfree(new_mpath);
727err_path_alloc:
728 return err;
729}
730
731
eb2b9311
LCC
732/**
733 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
734 *
735 * @sta: broken peer link
736 *
737 * This function must be called from the rate control algorithm if enough
738 * delivery errors suggest that a peer link is no longer usable.
739 */
740void mesh_plink_broken(struct sta_info *sta)
741{
349eb8cf 742 struct mesh_table *tbl;
15ff6365 743 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
eb2b9311
LCC
744 struct mesh_path *mpath;
745 struct mpath_node *node;
f698d856 746 struct ieee80211_sub_if_data *sdata = sta->sdata;
eb2b9311
LCC
747 int i;
748
749 rcu_read_lock();
2bdaf386 750 tbl = rcu_dereference(sdata->u.mesh.mesh_paths);
b67bfe0d 751 for_each_mesh_entry(tbl, node, i) {
eb2b9311 752 mpath = node->mpath;
2688eba9 753 if (rcu_access_pointer(mpath->next_hop) == sta &&
eb2b9311
LCC
754 mpath->flags & MESH_PATH_ACTIVE &&
755 !(mpath->flags & MESH_PATH_FIXED)) {
f5e50cd0 756 spin_lock_bh(&mpath->state_lock);
eb2b9311 757 mpath->flags &= ~MESH_PATH_ACTIVE;
d19b3bf6 758 ++mpath->sn;
eb2b9311 759 spin_unlock_bh(&mpath->state_lock);
bf7cd94d 760 mesh_path_error_tx(sdata,
f63f8421
CYY
761 sdata->u.mesh.mshcfg.element_ttl,
762 mpath->dst, mpath->sn,
763 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
f5e50cd0 764 }
eb2b9311
LCC
765 }
766 rcu_read_unlock();
767}
768
19c50b3d
JC
769static void mesh_path_node_reclaim(struct rcu_head *rp)
770{
771 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
19c50b3d
JC
772
773 del_timer_sync(&node->mpath->timer);
19c50b3d
JC
774 kfree(node->mpath);
775 kfree(node);
776}
777
778/* needs to be called with the corresponding hashwlock taken */
779static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
780{
c2e703a5
JB
781 struct mesh_path *mpath = node->mpath;
782 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
783
19c50b3d
JC
784 spin_lock(&mpath->state_lock);
785 mpath->flags |= MESH_PATH_RESOLVING;
786 if (mpath->is_gate)
787 mesh_gate_del(tbl, mpath);
788 hlist_del_rcu(&node->list);
789 call_rcu(&node->rcu, mesh_path_node_reclaim);
790 spin_unlock(&mpath->state_lock);
c2e703a5 791 atomic_dec(&sdata->u.mesh.mpaths);
19c50b3d
JC
792 atomic_dec(&tbl->entries);
793}
794
eb2b9311
LCC
795/**
796 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
797 *
2c53040f 798 * @sta: mesh peer to match
eb2b9311 799 *
b4e08ea1
LCC
800 * RCU notes: this function is called when a mesh plink transitions from
801 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
802 * allows path creation. This will happen before the sta can be freed (because
d0709a65
JB
803 * sta_info_destroy() calls this) so any reader in a rcu read block will be
804 * protected against the plink disappearing.
eb2b9311
LCC
805 */
806void mesh_path_flush_by_nexthop(struct sta_info *sta)
807{
2bdaf386 808 struct ieee80211_sub_if_data *sdata = sta->sdata;
349eb8cf 809 struct mesh_table *tbl;
eb2b9311
LCC
810 struct mesh_path *mpath;
811 struct mpath_node *node;
eb2b9311
LCC
812 int i;
813
349eb8cf 814 rcu_read_lock();
2bdaf386
BC
815 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
816 tbl = resize_dereference_mesh_paths(sdata);
b67bfe0d 817 for_each_mesh_entry(tbl, node, i) {
eb2b9311 818 mpath = node->mpath;
2688eba9 819 if (rcu_access_pointer(mpath->next_hop) == sta) {
f06c7885 820 spin_lock(&tbl->hashwlock[i]);
19c50b3d 821 __mesh_path_del(tbl, node);
f06c7885 822 spin_unlock(&tbl->hashwlock[i]);
19c50b3d 823 }
eb2b9311 824 }
2bdaf386 825 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
349eb8cf 826 rcu_read_unlock();
eb2b9311
LCC
827}
828
bf5a70e1
HR
829static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
830 const u8 *proxy)
831{
832 struct mesh_table *tbl;
833 struct mesh_path *mpp;
834 struct mpath_node *node;
835 int i;
836
837 rcu_read_lock();
2bdaf386
BC
838 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
839 tbl = resize_dereference_mpp_paths(sdata);
bf5a70e1
HR
840 for_each_mesh_entry(tbl, node, i) {
841 mpp = node->mpath;
842 if (ether_addr_equal(mpp->mpp, proxy)) {
843 spin_lock(&tbl->hashwlock[i]);
844 __mesh_path_del(tbl, node);
845 spin_unlock(&tbl->hashwlock[i]);
846 }
847 }
2bdaf386 848 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
bf5a70e1
HR
849 rcu_read_unlock();
850}
851
cd72e817
JC
852static void table_flush_by_iface(struct mesh_table *tbl,
853 struct ieee80211_sub_if_data *sdata)
eb2b9311
LCC
854{
855 struct mesh_path *mpath;
856 struct mpath_node *node;
eb2b9311
LCC
857 int i;
858
cd72e817 859 WARN_ON(!rcu_read_lock_held());
b67bfe0d 860 for_each_mesh_entry(tbl, node, i) {
eb2b9311 861 mpath = node->mpath;
ece1a2e7 862 spin_lock_bh(&tbl->hashwlock[i]);
19c50b3d 863 __mesh_path_del(tbl, node);
ece1a2e7 864 spin_unlock_bh(&tbl->hashwlock[i]);
eb2b9311
LCC
865 }
866}
867
ece1a2e7
JC
868/**
869 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
870 *
871 * This function deletes both mesh paths as well as mesh portal paths.
872 *
2c53040f 873 * @sdata: interface data to match
ece1a2e7
JC
874 *
875 */
876void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
eb2b9311 877{
cd72e817 878 struct mesh_table *tbl;
d0709a65 879
cd72e817 880 rcu_read_lock();
2bdaf386
BC
881 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
882 tbl = resize_dereference_mesh_paths(sdata);
cd72e817 883 table_flush_by_iface(tbl, sdata);
2bdaf386 884 tbl = resize_dereference_mpp_paths(sdata);
cd72e817 885 table_flush_by_iface(tbl, sdata);
2bdaf386 886 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
cd72e817 887 rcu_read_unlock();
eb2b9311
LCC
888}
889
890/**
4cc955de 891 * table_path_del - delete a path from the mesh or mpp table
eb2b9311 892 *
4cc955de 893 * @tbl: mesh or mpp path table
f698d856 894 * @sdata: local subif
4cc955de 895 * @addr: dst address (ETH_ALEN length)
eb2b9311 896 *
af901ca1 897 * Returns: 0 if successful
eb2b9311 898 */
4cc955de
HR
899static int table_path_del(struct mesh_table __rcu *rcu_tbl,
900 struct ieee80211_sub_if_data *sdata,
901 const u8 *addr)
eb2b9311 902{
349eb8cf 903 struct mesh_table *tbl;
eb2b9311
LCC
904 struct mesh_path *mpath;
905 struct mpath_node *node;
906 struct hlist_head *bucket;
eb2b9311
LCC
907 int hash_idx;
908 int err = 0;
909
2bdaf386 910 tbl = resize_dereference_paths(sdata, rcu_tbl);
44395481 911 hash_idx = mesh_table_hash(addr, tbl);
349eb8cf 912 bucket = &tbl->hash_buckets[hash_idx];
eb2b9311 913
f06c7885 914 spin_lock(&tbl->hashwlock[hash_idx]);
b67bfe0d 915 hlist_for_each_entry(node, bucket, list) {
eb2b9311 916 mpath = node->mpath;
2bdaf386 917 if (ether_addr_equal(addr, mpath->dst)) {
19c50b3d 918 __mesh_path_del(tbl, node);
eb2b9311
LCC
919 goto enddel;
920 }
921 }
922
923 err = -ENXIO;
924enddel:
f06c7885 925 spin_unlock(&tbl->hashwlock[hash_idx]);
4cc955de
HR
926 return err;
927}
928
929/**
930 * mesh_path_del - delete a mesh path from the table
931 *
932 * @addr: dst address (ETH_ALEN length)
933 * @sdata: local subif
934 *
935 * Returns: 0 if successful
936 */
937int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
938{
939 int err = 0;
940
941 /* flush relevant mpp entries first */
942 mpp_flush_by_proxy(sdata, addr);
943
2bdaf386
BC
944 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
945 err = table_path_del(sdata->u.mesh.mesh_paths, sdata, addr);
946 sdata->u.mesh.mesh_paths_generation++;
947 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
4cc955de 948
eb2b9311
LCC
949 return err;
950}
951
ab1c7906
HR
952/**
953 * mpp_path_del - delete a mesh proxy path from the table
954 *
955 * @addr: addr address (ETH_ALEN length)
956 * @sdata: local subif
957 *
958 * Returns: 0 if successful
959 */
960static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
961{
ab1c7906
HR
962 int err = 0;
963
2bdaf386
BC
964 read_lock_bh(&sdata->u.mesh.pathtbl_resize_lock);
965 err = table_path_del(sdata->u.mesh.mpp_paths, sdata, addr);
966 sdata->u.mesh.mpp_paths_generation++;
967 read_unlock_bh(&sdata->u.mesh.pathtbl_resize_lock);
4cc955de 968
ab1c7906
HR
969 return err;
970}
971
eb2b9311
LCC
972/**
973 * mesh_path_tx_pending - sends pending frames in a mesh path queue
974 *
975 * @mpath: mesh path to activate
976 *
977 * Locking: the state_lock of the mpath structure must NOT be held when calling
978 * this function.
979 */
980void mesh_path_tx_pending(struct mesh_path *mpath)
981{
249b405c
JC
982 if (mpath->flags & MESH_PATH_ACTIVE)
983 ieee80211_add_pending_skbs(mpath->sdata->local,
984 &mpath->frame_queue);
eb2b9311
LCC
985}
986
5ee68e5b
JC
987/**
988 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
989 *
990 * @mpath: mesh path whose queue will be emptied
991 *
992 * If there is only one gate, the frames are transferred from the failed mpath
993 * queue to that gate's queue. If there are more than one gates, the frames
994 * are copied from each gate to the next. After frames are copied, the
995 * mpath queues are emptied onto the transmission queue.
996 */
997int mesh_path_send_to_gates(struct mesh_path *mpath)
998{
999 struct ieee80211_sub_if_data *sdata = mpath->sdata;
5ee68e5b
JC
1000 struct mesh_table *tbl;
1001 struct mesh_path *from_mpath = mpath;
1002 struct mpath_node *gate = NULL;
1003 bool copy = false;
1004 struct hlist_head *known_gates;
1005
1006 rcu_read_lock();
2bdaf386 1007 tbl = rcu_dereference(sdata->u.mesh.mesh_paths);
5ee68e5b
JC
1008 known_gates = tbl->known_gates;
1009 rcu_read_unlock();
1010
1011 if (!known_gates)
1012 return -EHOSTUNREACH;
1013
b67bfe0d 1014 hlist_for_each_entry_rcu(gate, known_gates, list) {
5ee68e5b 1015 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
bdcbd8e0 1016 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
5ee68e5b
JC
1017 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
1018 from_mpath = gate->mpath;
1019 copy = true;
1020 } else {
bdcbd8e0 1021 mpath_dbg(sdata,
d671b2a0
JB
1022 "Not forwarding to %pM (flags %#x)\n",
1023 gate->mpath->dst, gate->mpath->flags);
5ee68e5b
JC
1024 }
1025 }
1026
2bdaf386
BC
1027 hlist_for_each_entry_rcu(gate, known_gates, list) {
1028 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
1029 mesh_path_tx_pending(gate->mpath);
1030 }
5ee68e5b
JC
1031
1032 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
1033}
1034
eb2b9311
LCC
1035/**
1036 * mesh_path_discard_frame - discard a frame whose path could not be resolved
1037 *
1038 * @skb: frame to discard
f698d856 1039 * @sdata: network subif the frame was to be sent through
eb2b9311 1040 *
eb2b9311
LCC
1041 * Locking: the function must me called within a rcu_read_lock region
1042 */
bf7cd94d
JB
1043void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
1044 struct sk_buff *skb)
eb2b9311 1045{
eb2b9311 1046 kfree_skb(skb);
472dbc45 1047 sdata->u.mesh.mshstats.dropped_frames_no_route++;
eb2b9311
LCC
1048}
1049
1050/**
1051 * mesh_path_flush_pending - free the pending queue of a mesh path
1052 *
1053 * @mpath: mesh path whose queue has to be freed
1054 *
25985edc 1055 * Locking: the function must me called within a rcu_read_lock region
eb2b9311
LCC
1056 */
1057void mesh_path_flush_pending(struct mesh_path *mpath)
1058{
eb2b9311
LCC
1059 struct sk_buff *skb;
1060
00e3f25c 1061 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
bf7cd94d 1062 mesh_path_discard_frame(mpath->sdata, skb);
eb2b9311
LCC
1063}
1064
1065/**
1066 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1067 *
1068 * @mpath: the mesh path to modify
1069 * @next_hop: the next hop to force
1070 *
1071 * Locking: this function must be called holding mpath->state_lock
1072 */
1073void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1074{
1075 spin_lock_bh(&mpath->state_lock);
1076 mesh_path_assign_nexthop(mpath, next_hop);
d19b3bf6 1077 mpath->sn = 0xffff;
eb2b9311
LCC
1078 mpath->metric = 0;
1079 mpath->hop_count = 0;
1080 mpath->exp_time = 0;
1081 mpath->flags |= MESH_PATH_FIXED;
1082 mesh_path_activate(mpath);
1083 spin_unlock_bh(&mpath->state_lock);
1084 mesh_path_tx_pending(mpath);
1085}
1086
1087static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1088{
1089 struct mesh_path *mpath;
1090 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1091 mpath = node->mpath;
1092 hlist_del_rcu(p);
d0df9eec
JC
1093 if (free_leafs) {
1094 del_timer_sync(&mpath->timer);
eb2b9311 1095 kfree(mpath);
d0df9eec 1096 }
eb2b9311
LCC
1097 kfree(node);
1098}
1099
4caf86c6 1100static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
eb2b9311
LCC
1101{
1102 struct mesh_path *mpath;
1103 struct mpath_node *node, *new_node;
1104 u32 hash_idx;
1105
8566dc3f 1106 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
00242c40
PE
1107 if (new_node == NULL)
1108 return -ENOMEM;
1109
eb2b9311
LCC
1110 node = hlist_entry(p, struct mpath_node, list);
1111 mpath = node->mpath;
eb2b9311 1112 new_node->mpath = mpath;
44395481 1113 hash_idx = mesh_table_hash(mpath->dst, newtbl);
eb2b9311
LCC
1114 hlist_add_head(&new_node->list,
1115 &newtbl->hash_buckets[hash_idx]);
4caf86c6 1116 return 0;
eb2b9311
LCC
1117}
1118
2bdaf386 1119int mesh_pathtbl_init(struct ieee80211_sub_if_data *sdata)
eb2b9311 1120{
349eb8cf 1121 struct mesh_table *tbl_path, *tbl_mpp;
4c5ade41 1122 int ret;
349eb8cf
JB
1123
1124 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1125 if (!tbl_path)
79617dee 1126 return -ENOMEM;
349eb8cf
JB
1127 tbl_path->free_node = &mesh_path_node_free;
1128 tbl_path->copy_node = &mesh_path_node_copy;
5ee68e5b 1129 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
4c5ade41
DC
1130 if (!tbl_path->known_gates) {
1131 ret = -ENOMEM;
1132 goto free_path;
1133 }
5ee68e5b
JC
1134 INIT_HLIST_HEAD(tbl_path->known_gates);
1135
79617dee 1136
349eb8cf
JB
1137 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1138 if (!tbl_mpp) {
4c5ade41
DC
1139 ret = -ENOMEM;
1140 goto free_path;
79617dee 1141 }
349eb8cf
JB
1142 tbl_mpp->free_node = &mesh_path_node_free;
1143 tbl_mpp->copy_node = &mesh_path_node_copy;
5ee68e5b 1144 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
4c5ade41
DC
1145 if (!tbl_mpp->known_gates) {
1146 ret = -ENOMEM;
1147 goto free_mpp;
1148 }
5ee68e5b 1149 INIT_HLIST_HEAD(tbl_mpp->known_gates);
349eb8cf 1150
2bdaf386
BC
1151 rwlock_init(&sdata->u.mesh.pathtbl_resize_lock);
1152
349eb8cf 1153 /* Need no locking since this is during init */
2bdaf386
BC
1154 RCU_INIT_POINTER(sdata->u.mesh.mesh_paths, tbl_path);
1155 RCU_INIT_POINTER(sdata->u.mesh.mpp_paths, tbl_mpp);
79617dee 1156
eb2b9311 1157 return 0;
4c5ade41
DC
1158
1159free_mpp:
1160 mesh_table_free(tbl_mpp, true);
1161free_path:
1162 mesh_table_free(tbl_path, true);
1163 return ret;
eb2b9311
LCC
1164}
1165
f698d856 1166void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
eb2b9311 1167{
349eb8cf 1168 struct mesh_table *tbl;
eb2b9311
LCC
1169 struct mesh_path *mpath;
1170 struct mpath_node *node;
eb2b9311
LCC
1171 int i;
1172
349eb8cf 1173 rcu_read_lock();
2bdaf386 1174 tbl = rcu_dereference(sdata->u.mesh.mesh_paths);
b67bfe0d 1175 for_each_mesh_entry(tbl, node, i) {
eb2b9311 1176 mpath = node->mpath;
eb2b9311
LCC
1177 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1178 (!(mpath->flags & MESH_PATH_FIXED)) &&
f5e50cd0 1179 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
2bdaf386 1180 mesh_path_del(sdata, mpath->dst);
eb2b9311 1181 }
ab1c7906 1182
2bdaf386 1183 tbl = rcu_dereference(sdata->u.mesh.mpp_paths);
ab1c7906 1184 for_each_mesh_entry(tbl, node, i) {
ab1c7906
HR
1185 mpath = node->mpath;
1186 if ((!(mpath->flags & MESH_PATH_FIXED)) &&
1187 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
2bdaf386 1188 mpp_path_del(sdata, mpath->dst);
ab1c7906
HR
1189 }
1190
349eb8cf 1191 rcu_read_unlock();
eb2b9311
LCC
1192}
1193
2bdaf386 1194void mesh_pathtbl_unregister(struct ieee80211_sub_if_data *sdata)
eb2b9311 1195{
349eb8cf 1196 /* no need for locking during exit path */
2bdaf386
BC
1197 mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mesh_paths, 1),
1198 true);
1199 mesh_table_free(rcu_dereference_protected(sdata->u.mesh.mpp_paths, 1),
1200 true);
eb2b9311 1201}
This page took 0.784418 seconds and 5 git commands to generate.