Merge remote-tracking branches 'regmap/topic/doc' and 'regmap/topic/flat' into regmap...
[deliverable/linux.git] / net / mac80211 / mesh_pathtbl.c
1 /*
2 * Copyright (c) 2008, 2009 open80211s Ltd.
3 * Author: Luis Carlos Cobo <luisca@cozybit.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10 #include <linux/etherdevice.h>
11 #include <linux/list.h>
12 #include <linux/random.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/string.h>
16 #include <net/mac80211.h>
17 #include "wme.h"
18 #include "ieee80211_i.h"
19 #include "mesh.h"
20
21 /* There will be initially 2^INIT_PATHS_SIZE_ORDER buckets */
22 #define INIT_PATHS_SIZE_ORDER 2
23
24 /* Keep the mean chain length below this constant */
25 #define MEAN_CHAIN_LEN 2
26
27 static inline bool mpath_expired(struct mesh_path *mpath)
28 {
29 return (mpath->flags & MESH_PATH_ACTIVE) &&
30 time_after(jiffies, mpath->exp_time) &&
31 !(mpath->flags & MESH_PATH_FIXED);
32 }
33
34 struct mpath_node {
35 struct hlist_node list;
36 struct rcu_head rcu;
37 /* This indirection allows two different tables to point to the same
38 * mesh_path structure, useful when resizing
39 */
40 struct mesh_path *mpath;
41 };
42
43 static struct mesh_table __rcu *mesh_paths;
44 static struct mesh_table __rcu *mpp_paths; /* Store paths for MPP&MAP */
45
46 int mesh_paths_generation;
47 int mpp_paths_generation;
48
49 /* This lock will have the grow table function as writer and add / delete nodes
50 * as readers. RCU provides sufficient protection only when reading the table
51 * (i.e. doing lookups). Adding or adding or removing nodes requires we take
52 * the read lock or we risk operating on an old table. The write lock is only
53 * needed when modifying the number of buckets a table.
54 */
55 static DEFINE_RWLOCK(pathtbl_resize_lock);
56
57
58 static inline struct mesh_table *resize_dereference_paths(
59 struct mesh_table __rcu *table)
60 {
61 return rcu_dereference_protected(table,
62 lockdep_is_held(&pathtbl_resize_lock));
63 }
64
65 static inline struct mesh_table *resize_dereference_mesh_paths(void)
66 {
67 return resize_dereference_paths(mesh_paths);
68 }
69
70 static inline struct mesh_table *resize_dereference_mpp_paths(void)
71 {
72 return resize_dereference_paths(mpp_paths);
73 }
74
75 /*
76 * CAREFUL -- "tbl" must not be an expression,
77 * in particular not an rcu_dereference(), since
78 * it's used twice. So it is illegal to do
79 * for_each_mesh_entry(rcu_dereference(...), ...)
80 */
81 #define for_each_mesh_entry(tbl, node, i) \
82 for (i = 0; i <= tbl->hash_mask; i++) \
83 hlist_for_each_entry_rcu(node, &tbl->hash_buckets[i], list)
84
85
86 static struct mesh_table *mesh_table_alloc(int size_order)
87 {
88 int i;
89 struct mesh_table *newtbl;
90
91 newtbl = kmalloc(sizeof(struct mesh_table), GFP_ATOMIC);
92 if (!newtbl)
93 return NULL;
94
95 newtbl->hash_buckets = kzalloc(sizeof(struct hlist_head) *
96 (1 << size_order), GFP_ATOMIC);
97
98 if (!newtbl->hash_buckets) {
99 kfree(newtbl);
100 return NULL;
101 }
102
103 newtbl->hashwlock = kmalloc(sizeof(spinlock_t) *
104 (1 << size_order), GFP_ATOMIC);
105 if (!newtbl->hashwlock) {
106 kfree(newtbl->hash_buckets);
107 kfree(newtbl);
108 return NULL;
109 }
110
111 newtbl->size_order = size_order;
112 newtbl->hash_mask = (1 << size_order) - 1;
113 atomic_set(&newtbl->entries, 0);
114 get_random_bytes(&newtbl->hash_rnd,
115 sizeof(newtbl->hash_rnd));
116 for (i = 0; i <= newtbl->hash_mask; i++)
117 spin_lock_init(&newtbl->hashwlock[i]);
118 spin_lock_init(&newtbl->gates_lock);
119
120 return newtbl;
121 }
122
123 static void __mesh_table_free(struct mesh_table *tbl)
124 {
125 kfree(tbl->hash_buckets);
126 kfree(tbl->hashwlock);
127 kfree(tbl);
128 }
129
130 static void mesh_table_free(struct mesh_table *tbl, bool free_leafs)
131 {
132 struct hlist_head *mesh_hash;
133 struct hlist_node *p, *q;
134 struct mpath_node *gate;
135 int i;
136
137 mesh_hash = tbl->hash_buckets;
138 for (i = 0; i <= tbl->hash_mask; i++) {
139 spin_lock_bh(&tbl->hashwlock[i]);
140 hlist_for_each_safe(p, q, &mesh_hash[i]) {
141 tbl->free_node(p, free_leafs);
142 atomic_dec(&tbl->entries);
143 }
144 spin_unlock_bh(&tbl->hashwlock[i]);
145 }
146 if (free_leafs) {
147 spin_lock_bh(&tbl->gates_lock);
148 hlist_for_each_entry_safe(gate, q,
149 tbl->known_gates, list) {
150 hlist_del(&gate->list);
151 kfree(gate);
152 }
153 kfree(tbl->known_gates);
154 spin_unlock_bh(&tbl->gates_lock);
155 }
156
157 __mesh_table_free(tbl);
158 }
159
160 static int mesh_table_grow(struct mesh_table *oldtbl,
161 struct mesh_table *newtbl)
162 {
163 struct hlist_head *oldhash;
164 struct hlist_node *p, *q;
165 int i;
166
167 if (atomic_read(&oldtbl->entries)
168 < MEAN_CHAIN_LEN * (oldtbl->hash_mask + 1))
169 return -EAGAIN;
170
171 newtbl->free_node = oldtbl->free_node;
172 newtbl->copy_node = oldtbl->copy_node;
173 newtbl->known_gates = oldtbl->known_gates;
174 atomic_set(&newtbl->entries, atomic_read(&oldtbl->entries));
175
176 oldhash = oldtbl->hash_buckets;
177 for (i = 0; i <= oldtbl->hash_mask; i++)
178 hlist_for_each(p, &oldhash[i])
179 if (oldtbl->copy_node(p, newtbl) < 0)
180 goto errcopy;
181
182 return 0;
183
184 errcopy:
185 for (i = 0; i <= newtbl->hash_mask; i++) {
186 hlist_for_each_safe(p, q, &newtbl->hash_buckets[i])
187 oldtbl->free_node(p, 0);
188 }
189 return -ENOMEM;
190 }
191
192 static u32 mesh_table_hash(const u8 *addr, struct ieee80211_sub_if_data *sdata,
193 struct mesh_table *tbl)
194 {
195 /* Use last four bytes of hw addr and interface index as hash index */
196 return jhash_2words(*(u32 *)(addr+2), sdata->dev->ifindex,
197 tbl->hash_rnd) & tbl->hash_mask;
198 }
199
200
201 /**
202 *
203 * mesh_path_assign_nexthop - update mesh path next hop
204 *
205 * @mpath: mesh path to update
206 * @sta: next hop to assign
207 *
208 * Locking: mpath->state_lock must be held when calling this function
209 */
210 void mesh_path_assign_nexthop(struct mesh_path *mpath, struct sta_info *sta)
211 {
212 struct sk_buff *skb;
213 struct ieee80211_hdr *hdr;
214 unsigned long flags;
215
216 rcu_assign_pointer(mpath->next_hop, sta);
217
218 spin_lock_irqsave(&mpath->frame_queue.lock, flags);
219 skb_queue_walk(&mpath->frame_queue, skb) {
220 hdr = (struct ieee80211_hdr *) skb->data;
221 memcpy(hdr->addr1, sta->sta.addr, ETH_ALEN);
222 memcpy(hdr->addr2, mpath->sdata->vif.addr, ETH_ALEN);
223 ieee80211_mps_set_frame_flags(sta->sdata, sta, hdr);
224 }
225
226 spin_unlock_irqrestore(&mpath->frame_queue.lock, flags);
227 }
228
229 static void prepare_for_gate(struct sk_buff *skb, char *dst_addr,
230 struct mesh_path *gate_mpath)
231 {
232 struct ieee80211_hdr *hdr;
233 struct ieee80211s_hdr *mshdr;
234 int mesh_hdrlen, hdrlen;
235 char *next_hop;
236
237 hdr = (struct ieee80211_hdr *) skb->data;
238 hdrlen = ieee80211_hdrlen(hdr->frame_control);
239 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
240
241 if (!(mshdr->flags & MESH_FLAGS_AE)) {
242 /* size of the fixed part of the mesh header */
243 mesh_hdrlen = 6;
244
245 /* make room for the two extended addresses */
246 skb_push(skb, 2 * ETH_ALEN);
247 memmove(skb->data, hdr, hdrlen + mesh_hdrlen);
248
249 hdr = (struct ieee80211_hdr *) skb->data;
250
251 /* we preserve the previous mesh header and only add
252 * the new addreses */
253 mshdr = (struct ieee80211s_hdr *) (skb->data + hdrlen);
254 mshdr->flags = MESH_FLAGS_AE_A5_A6;
255 memcpy(mshdr->eaddr1, hdr->addr3, ETH_ALEN);
256 memcpy(mshdr->eaddr2, hdr->addr4, ETH_ALEN);
257 }
258
259 /* update next hop */
260 hdr = (struct ieee80211_hdr *) skb->data;
261 rcu_read_lock();
262 next_hop = rcu_dereference(gate_mpath->next_hop)->sta.addr;
263 memcpy(hdr->addr1, next_hop, ETH_ALEN);
264 rcu_read_unlock();
265 memcpy(hdr->addr2, gate_mpath->sdata->vif.addr, ETH_ALEN);
266 memcpy(hdr->addr3, dst_addr, ETH_ALEN);
267 }
268
269 /**
270 *
271 * mesh_path_move_to_queue - Move or copy frames from one mpath queue to another
272 *
273 * This function is used to transfer or copy frames from an unresolved mpath to
274 * a gate mpath. The function also adds the Address Extension field and
275 * updates the next hop.
276 *
277 * If a frame already has an Address Extension field, only the next hop and
278 * destination addresses are updated.
279 *
280 * The gate mpath must be an active mpath with a valid mpath->next_hop.
281 *
282 * @mpath: An active mpath the frames will be sent to (i.e. the gate)
283 * @from_mpath: The failed mpath
284 * @copy: When true, copy all the frames to the new mpath queue. When false,
285 * move them.
286 */
287 static void mesh_path_move_to_queue(struct mesh_path *gate_mpath,
288 struct mesh_path *from_mpath,
289 bool copy)
290 {
291 struct sk_buff *skb, *fskb, *tmp;
292 struct sk_buff_head failq;
293 unsigned long flags;
294
295 if (WARN_ON(gate_mpath == from_mpath))
296 return;
297 if (WARN_ON(!gate_mpath->next_hop))
298 return;
299
300 __skb_queue_head_init(&failq);
301
302 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
303 skb_queue_splice_init(&from_mpath->frame_queue, &failq);
304 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
305
306 skb_queue_walk_safe(&failq, fskb, tmp) {
307 if (skb_queue_len(&gate_mpath->frame_queue) >=
308 MESH_FRAME_QUEUE_LEN) {
309 mpath_dbg(gate_mpath->sdata, "mpath queue full!\n");
310 break;
311 }
312
313 skb = skb_copy(fskb, GFP_ATOMIC);
314 if (WARN_ON(!skb))
315 break;
316
317 prepare_for_gate(skb, gate_mpath->dst, gate_mpath);
318 skb_queue_tail(&gate_mpath->frame_queue, skb);
319
320 if (copy)
321 continue;
322
323 __skb_unlink(fskb, &failq);
324 kfree_skb(fskb);
325 }
326
327 mpath_dbg(gate_mpath->sdata, "Mpath queue for gate %pM has %d frames\n",
328 gate_mpath->dst, skb_queue_len(&gate_mpath->frame_queue));
329
330 if (!copy)
331 return;
332
333 spin_lock_irqsave(&from_mpath->frame_queue.lock, flags);
334 skb_queue_splice(&failq, &from_mpath->frame_queue);
335 spin_unlock_irqrestore(&from_mpath->frame_queue.lock, flags);
336 }
337
338
339 static struct mesh_path *mpath_lookup(struct mesh_table *tbl, const u8 *dst,
340 struct ieee80211_sub_if_data *sdata)
341 {
342 struct mesh_path *mpath;
343 struct hlist_head *bucket;
344 struct mpath_node *node;
345
346 bucket = &tbl->hash_buckets[mesh_table_hash(dst, sdata, tbl)];
347 hlist_for_each_entry_rcu(node, bucket, list) {
348 mpath = node->mpath;
349 if (mpath->sdata == sdata &&
350 ether_addr_equal(dst, mpath->dst)) {
351 if (mpath_expired(mpath)) {
352 spin_lock_bh(&mpath->state_lock);
353 mpath->flags &= ~MESH_PATH_ACTIVE;
354 spin_unlock_bh(&mpath->state_lock);
355 }
356 return mpath;
357 }
358 }
359 return NULL;
360 }
361
362 /**
363 * mesh_path_lookup - look up a path in the mesh path table
364 * @sdata: local subif
365 * @dst: hardware address (ETH_ALEN length) of destination
366 *
367 * Returns: pointer to the mesh path structure, or NULL if not found
368 *
369 * Locking: must be called within a read rcu section.
370 */
371 struct mesh_path *
372 mesh_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
373 {
374 return mpath_lookup(rcu_dereference(mesh_paths), dst, sdata);
375 }
376
377 struct mesh_path *
378 mpp_path_lookup(struct ieee80211_sub_if_data *sdata, const u8 *dst)
379 {
380 return mpath_lookup(rcu_dereference(mpp_paths), dst, sdata);
381 }
382
383
384 /**
385 * mesh_path_lookup_by_idx - look up a path in the mesh path table by its index
386 * @idx: index
387 * @sdata: local subif, or NULL for all entries
388 *
389 * Returns: pointer to the mesh path structure, or NULL if not found.
390 *
391 * Locking: must be called within a read rcu section.
392 */
393 struct mesh_path *
394 mesh_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
395 {
396 struct mesh_table *tbl = rcu_dereference(mesh_paths);
397 struct mpath_node *node;
398 int i;
399 int j = 0;
400
401 for_each_mesh_entry(tbl, node, i) {
402 if (sdata && node->mpath->sdata != sdata)
403 continue;
404 if (j++ == idx) {
405 if (mpath_expired(node->mpath)) {
406 spin_lock_bh(&node->mpath->state_lock);
407 node->mpath->flags &= ~MESH_PATH_ACTIVE;
408 spin_unlock_bh(&node->mpath->state_lock);
409 }
410 return node->mpath;
411 }
412 }
413
414 return NULL;
415 }
416
417 /**
418 * mpp_path_lookup_by_idx - look up a path in the proxy path table by its index
419 * @idx: index
420 * @sdata: local subif, or NULL for all entries
421 *
422 * Returns: pointer to the proxy path structure, or NULL if not found.
423 *
424 * Locking: must be called within a read rcu section.
425 */
426 struct mesh_path *
427 mpp_path_lookup_by_idx(struct ieee80211_sub_if_data *sdata, int idx)
428 {
429 struct mesh_table *tbl = rcu_dereference(mpp_paths);
430 struct mpath_node *node;
431 int i;
432 int j = 0;
433
434 for_each_mesh_entry(tbl, node, i) {
435 if (sdata && node->mpath->sdata != sdata)
436 continue;
437 if (j++ == idx)
438 return node->mpath;
439 }
440
441 return NULL;
442 }
443
444 /**
445 * mesh_path_add_gate - add the given mpath to a mesh gate to our path table
446 * @mpath: gate path to add to table
447 */
448 int mesh_path_add_gate(struct mesh_path *mpath)
449 {
450 struct mesh_table *tbl;
451 struct mpath_node *gate, *new_gate;
452 int err;
453
454 rcu_read_lock();
455 tbl = rcu_dereference(mesh_paths);
456
457 hlist_for_each_entry_rcu(gate, tbl->known_gates, list)
458 if (gate->mpath == mpath) {
459 err = -EEXIST;
460 goto err_rcu;
461 }
462
463 new_gate = kzalloc(sizeof(struct mpath_node), GFP_ATOMIC);
464 if (!new_gate) {
465 err = -ENOMEM;
466 goto err_rcu;
467 }
468
469 mpath->is_gate = true;
470 mpath->sdata->u.mesh.num_gates++;
471 new_gate->mpath = mpath;
472 spin_lock_bh(&tbl->gates_lock);
473 hlist_add_head_rcu(&new_gate->list, tbl->known_gates);
474 spin_unlock_bh(&tbl->gates_lock);
475 mpath_dbg(mpath->sdata,
476 "Mesh path: Recorded new gate: %pM. %d known gates\n",
477 mpath->dst, mpath->sdata->u.mesh.num_gates);
478 err = 0;
479 err_rcu:
480 rcu_read_unlock();
481 return err;
482 }
483
484 /**
485 * mesh_gate_del - remove a mesh gate from the list of known gates
486 * @tbl: table which holds our list of known gates
487 * @mpath: gate mpath
488 *
489 * Locking: must be called inside rcu_read_lock() section
490 */
491 static void mesh_gate_del(struct mesh_table *tbl, struct mesh_path *mpath)
492 {
493 struct mpath_node *gate;
494 struct hlist_node *q;
495
496 hlist_for_each_entry_safe(gate, q, tbl->known_gates, list) {
497 if (gate->mpath != mpath)
498 continue;
499 spin_lock_bh(&tbl->gates_lock);
500 hlist_del_rcu(&gate->list);
501 kfree_rcu(gate, rcu);
502 spin_unlock_bh(&tbl->gates_lock);
503 mpath->sdata->u.mesh.num_gates--;
504 mpath->is_gate = false;
505 mpath_dbg(mpath->sdata,
506 "Mesh path: Deleted gate: %pM. %d known gates\n",
507 mpath->dst, mpath->sdata->u.mesh.num_gates);
508 break;
509 }
510 }
511
512 /**
513 * mesh_gate_num - number of gates known to this interface
514 * @sdata: subif data
515 */
516 int mesh_gate_num(struct ieee80211_sub_if_data *sdata)
517 {
518 return sdata->u.mesh.num_gates;
519 }
520
521 /**
522 * mesh_path_add - allocate and add a new path to the mesh path table
523 * @dst: destination address of the path (ETH_ALEN length)
524 * @sdata: local subif
525 *
526 * Returns: 0 on success
527 *
528 * State: the initial state of the new path is set to 0
529 */
530 struct mesh_path *mesh_path_add(struct ieee80211_sub_if_data *sdata,
531 const u8 *dst)
532 {
533 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
534 struct ieee80211_local *local = sdata->local;
535 struct mesh_table *tbl;
536 struct mesh_path *mpath, *new_mpath;
537 struct mpath_node *node, *new_node;
538 struct hlist_head *bucket;
539 int grow = 0;
540 int err;
541 u32 hash_idx;
542
543 if (ether_addr_equal(dst, sdata->vif.addr))
544 /* never add ourselves as neighbours */
545 return ERR_PTR(-ENOTSUPP);
546
547 if (is_multicast_ether_addr(dst))
548 return ERR_PTR(-ENOTSUPP);
549
550 if (atomic_add_unless(&sdata->u.mesh.mpaths, 1, MESH_MAX_MPATHS) == 0)
551 return ERR_PTR(-ENOSPC);
552
553 read_lock_bh(&pathtbl_resize_lock);
554 tbl = resize_dereference_mesh_paths();
555
556 hash_idx = mesh_table_hash(dst, sdata, tbl);
557 bucket = &tbl->hash_buckets[hash_idx];
558
559 spin_lock(&tbl->hashwlock[hash_idx]);
560
561 hlist_for_each_entry(node, bucket, list) {
562 mpath = node->mpath;
563 if (mpath->sdata == sdata &&
564 ether_addr_equal(dst, mpath->dst))
565 goto found;
566 }
567
568 err = -ENOMEM;
569 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
570 if (!new_mpath)
571 goto err_path_alloc;
572
573 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
574 if (!new_node)
575 goto err_node_alloc;
576
577 memcpy(new_mpath->dst, dst, ETH_ALEN);
578 eth_broadcast_addr(new_mpath->rann_snd_addr);
579 new_mpath->is_root = false;
580 new_mpath->sdata = sdata;
581 new_mpath->flags = 0;
582 skb_queue_head_init(&new_mpath->frame_queue);
583 new_node->mpath = new_mpath;
584 new_mpath->timer.data = (unsigned long) new_mpath;
585 new_mpath->timer.function = mesh_path_timer;
586 new_mpath->exp_time = jiffies;
587 spin_lock_init(&new_mpath->state_lock);
588 init_timer(&new_mpath->timer);
589
590 hlist_add_head_rcu(&new_node->list, bucket);
591 if (atomic_inc_return(&tbl->entries) >=
592 MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
593 grow = 1;
594
595 mesh_paths_generation++;
596
597 if (grow) {
598 set_bit(MESH_WORK_GROW_MPATH_TABLE, &ifmsh->wrkq_flags);
599 ieee80211_queue_work(&local->hw, &sdata->work);
600 }
601 mpath = new_mpath;
602 found:
603 spin_unlock(&tbl->hashwlock[hash_idx]);
604 read_unlock_bh(&pathtbl_resize_lock);
605 return mpath;
606
607 err_node_alloc:
608 kfree(new_mpath);
609 err_path_alloc:
610 atomic_dec(&sdata->u.mesh.mpaths);
611 spin_unlock(&tbl->hashwlock[hash_idx]);
612 read_unlock_bh(&pathtbl_resize_lock);
613 return ERR_PTR(err);
614 }
615
616 static void mesh_table_free_rcu(struct rcu_head *rcu)
617 {
618 struct mesh_table *tbl = container_of(rcu, struct mesh_table, rcu_head);
619
620 mesh_table_free(tbl, false);
621 }
622
623 void mesh_mpath_table_grow(void)
624 {
625 struct mesh_table *oldtbl, *newtbl;
626
627 write_lock_bh(&pathtbl_resize_lock);
628 oldtbl = resize_dereference_mesh_paths();
629 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
630 if (!newtbl)
631 goto out;
632 if (mesh_table_grow(oldtbl, newtbl) < 0) {
633 __mesh_table_free(newtbl);
634 goto out;
635 }
636 rcu_assign_pointer(mesh_paths, newtbl);
637
638 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
639
640 out:
641 write_unlock_bh(&pathtbl_resize_lock);
642 }
643
644 void mesh_mpp_table_grow(void)
645 {
646 struct mesh_table *oldtbl, *newtbl;
647
648 write_lock_bh(&pathtbl_resize_lock);
649 oldtbl = resize_dereference_mpp_paths();
650 newtbl = mesh_table_alloc(oldtbl->size_order + 1);
651 if (!newtbl)
652 goto out;
653 if (mesh_table_grow(oldtbl, newtbl) < 0) {
654 __mesh_table_free(newtbl);
655 goto out;
656 }
657 rcu_assign_pointer(mpp_paths, newtbl);
658 call_rcu(&oldtbl->rcu_head, mesh_table_free_rcu);
659
660 out:
661 write_unlock_bh(&pathtbl_resize_lock);
662 }
663
664 int mpp_path_add(struct ieee80211_sub_if_data *sdata,
665 const u8 *dst, const u8 *mpp)
666 {
667 struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
668 struct ieee80211_local *local = sdata->local;
669 struct mesh_table *tbl;
670 struct mesh_path *mpath, *new_mpath;
671 struct mpath_node *node, *new_node;
672 struct hlist_head *bucket;
673 int grow = 0;
674 int err = 0;
675 u32 hash_idx;
676
677 if (ether_addr_equal(dst, sdata->vif.addr))
678 /* never add ourselves as neighbours */
679 return -ENOTSUPP;
680
681 if (is_multicast_ether_addr(dst))
682 return -ENOTSUPP;
683
684 err = -ENOMEM;
685 new_mpath = kzalloc(sizeof(struct mesh_path), GFP_ATOMIC);
686 if (!new_mpath)
687 goto err_path_alloc;
688
689 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
690 if (!new_node)
691 goto err_node_alloc;
692
693 read_lock_bh(&pathtbl_resize_lock);
694 memcpy(new_mpath->dst, dst, ETH_ALEN);
695 memcpy(new_mpath->mpp, mpp, ETH_ALEN);
696 new_mpath->sdata = sdata;
697 new_mpath->flags = 0;
698 skb_queue_head_init(&new_mpath->frame_queue);
699 new_node->mpath = new_mpath;
700 init_timer(&new_mpath->timer);
701 new_mpath->exp_time = jiffies;
702 spin_lock_init(&new_mpath->state_lock);
703
704 tbl = resize_dereference_mpp_paths();
705
706 hash_idx = mesh_table_hash(dst, sdata, tbl);
707 bucket = &tbl->hash_buckets[hash_idx];
708
709 spin_lock(&tbl->hashwlock[hash_idx]);
710
711 err = -EEXIST;
712 hlist_for_each_entry(node, bucket, list) {
713 mpath = node->mpath;
714 if (mpath->sdata == sdata &&
715 ether_addr_equal(dst, mpath->dst))
716 goto err_exists;
717 }
718
719 hlist_add_head_rcu(&new_node->list, bucket);
720 if (atomic_inc_return(&tbl->entries) >=
721 MEAN_CHAIN_LEN * (tbl->hash_mask + 1))
722 grow = 1;
723
724 spin_unlock(&tbl->hashwlock[hash_idx]);
725 read_unlock_bh(&pathtbl_resize_lock);
726
727 mpp_paths_generation++;
728
729 if (grow) {
730 set_bit(MESH_WORK_GROW_MPP_TABLE, &ifmsh->wrkq_flags);
731 ieee80211_queue_work(&local->hw, &sdata->work);
732 }
733 return 0;
734
735 err_exists:
736 spin_unlock(&tbl->hashwlock[hash_idx]);
737 read_unlock_bh(&pathtbl_resize_lock);
738 kfree(new_node);
739 err_node_alloc:
740 kfree(new_mpath);
741 err_path_alloc:
742 return err;
743 }
744
745
746 /**
747 * mesh_plink_broken - deactivates paths and sends perr when a link breaks
748 *
749 * @sta: broken peer link
750 *
751 * This function must be called from the rate control algorithm if enough
752 * delivery errors suggest that a peer link is no longer usable.
753 */
754 void mesh_plink_broken(struct sta_info *sta)
755 {
756 struct mesh_table *tbl;
757 static const u8 bcast[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
758 struct mesh_path *mpath;
759 struct mpath_node *node;
760 struct ieee80211_sub_if_data *sdata = sta->sdata;
761 int i;
762
763 rcu_read_lock();
764 tbl = rcu_dereference(mesh_paths);
765 for_each_mesh_entry(tbl, node, i) {
766 mpath = node->mpath;
767 if (rcu_access_pointer(mpath->next_hop) == sta &&
768 mpath->flags & MESH_PATH_ACTIVE &&
769 !(mpath->flags & MESH_PATH_FIXED)) {
770 spin_lock_bh(&mpath->state_lock);
771 mpath->flags &= ~MESH_PATH_ACTIVE;
772 ++mpath->sn;
773 spin_unlock_bh(&mpath->state_lock);
774 mesh_path_error_tx(sdata,
775 sdata->u.mesh.mshcfg.element_ttl,
776 mpath->dst, mpath->sn,
777 WLAN_REASON_MESH_PATH_DEST_UNREACHABLE, bcast);
778 }
779 }
780 rcu_read_unlock();
781 }
782
783 static void mesh_path_node_reclaim(struct rcu_head *rp)
784 {
785 struct mpath_node *node = container_of(rp, struct mpath_node, rcu);
786
787 del_timer_sync(&node->mpath->timer);
788 kfree(node->mpath);
789 kfree(node);
790 }
791
792 /* needs to be called with the corresponding hashwlock taken */
793 static void __mesh_path_del(struct mesh_table *tbl, struct mpath_node *node)
794 {
795 struct mesh_path *mpath = node->mpath;
796 struct ieee80211_sub_if_data *sdata = node->mpath->sdata;
797
798 spin_lock(&mpath->state_lock);
799 mpath->flags |= MESH_PATH_RESOLVING;
800 if (mpath->is_gate)
801 mesh_gate_del(tbl, mpath);
802 hlist_del_rcu(&node->list);
803 call_rcu(&node->rcu, mesh_path_node_reclaim);
804 spin_unlock(&mpath->state_lock);
805 atomic_dec(&sdata->u.mesh.mpaths);
806 atomic_dec(&tbl->entries);
807 }
808
809 /**
810 * mesh_path_flush_by_nexthop - Deletes mesh paths if their next hop matches
811 *
812 * @sta: mesh peer to match
813 *
814 * RCU notes: this function is called when a mesh plink transitions from
815 * PLINK_ESTAB to any other state, since PLINK_ESTAB state is the only one that
816 * allows path creation. This will happen before the sta can be freed (because
817 * sta_info_destroy() calls this) so any reader in a rcu read block will be
818 * protected against the plink disappearing.
819 */
820 void mesh_path_flush_by_nexthop(struct sta_info *sta)
821 {
822 struct mesh_table *tbl;
823 struct mesh_path *mpath;
824 struct mpath_node *node;
825 int i;
826
827 rcu_read_lock();
828 read_lock_bh(&pathtbl_resize_lock);
829 tbl = resize_dereference_mesh_paths();
830 for_each_mesh_entry(tbl, node, i) {
831 mpath = node->mpath;
832 if (rcu_access_pointer(mpath->next_hop) == sta) {
833 spin_lock(&tbl->hashwlock[i]);
834 __mesh_path_del(tbl, node);
835 spin_unlock(&tbl->hashwlock[i]);
836 }
837 }
838 read_unlock_bh(&pathtbl_resize_lock);
839 rcu_read_unlock();
840 }
841
842 static void mpp_flush_by_proxy(struct ieee80211_sub_if_data *sdata,
843 const u8 *proxy)
844 {
845 struct mesh_table *tbl;
846 struct mesh_path *mpp;
847 struct mpath_node *node;
848 int i;
849
850 rcu_read_lock();
851 read_lock_bh(&pathtbl_resize_lock);
852 tbl = resize_dereference_mpp_paths();
853 for_each_mesh_entry(tbl, node, i) {
854 mpp = node->mpath;
855 if (ether_addr_equal(mpp->mpp, proxy)) {
856 spin_lock(&tbl->hashwlock[i]);
857 __mesh_path_del(tbl, node);
858 spin_unlock(&tbl->hashwlock[i]);
859 }
860 }
861 read_unlock_bh(&pathtbl_resize_lock);
862 rcu_read_unlock();
863 }
864
865 static void table_flush_by_iface(struct mesh_table *tbl,
866 struct ieee80211_sub_if_data *sdata)
867 {
868 struct mesh_path *mpath;
869 struct mpath_node *node;
870 int i;
871
872 WARN_ON(!rcu_read_lock_held());
873 for_each_mesh_entry(tbl, node, i) {
874 mpath = node->mpath;
875 if (mpath->sdata != sdata)
876 continue;
877 spin_lock_bh(&tbl->hashwlock[i]);
878 __mesh_path_del(tbl, node);
879 spin_unlock_bh(&tbl->hashwlock[i]);
880 }
881 }
882
883 /**
884 * mesh_path_flush_by_iface - Deletes all mesh paths associated with a given iface
885 *
886 * This function deletes both mesh paths as well as mesh portal paths.
887 *
888 * @sdata: interface data to match
889 *
890 */
891 void mesh_path_flush_by_iface(struct ieee80211_sub_if_data *sdata)
892 {
893 struct mesh_table *tbl;
894
895 rcu_read_lock();
896 read_lock_bh(&pathtbl_resize_lock);
897 tbl = resize_dereference_mesh_paths();
898 table_flush_by_iface(tbl, sdata);
899 tbl = resize_dereference_mpp_paths();
900 table_flush_by_iface(tbl, sdata);
901 read_unlock_bh(&pathtbl_resize_lock);
902 rcu_read_unlock();
903 }
904
905 /**
906 * table_path_del - delete a path from the mesh or mpp table
907 *
908 * @tbl: mesh or mpp path table
909 * @sdata: local subif
910 * @addr: dst address (ETH_ALEN length)
911 *
912 * Returns: 0 if successful
913 */
914 static int table_path_del(struct mesh_table __rcu *rcu_tbl,
915 struct ieee80211_sub_if_data *sdata,
916 const u8 *addr)
917 {
918 struct mesh_table *tbl;
919 struct mesh_path *mpath;
920 struct mpath_node *node;
921 struct hlist_head *bucket;
922 int hash_idx;
923 int err = 0;
924
925 tbl = resize_dereference_paths(rcu_tbl);
926 hash_idx = mesh_table_hash(addr, sdata, tbl);
927 bucket = &tbl->hash_buckets[hash_idx];
928
929 spin_lock(&tbl->hashwlock[hash_idx]);
930 hlist_for_each_entry(node, bucket, list) {
931 mpath = node->mpath;
932 if (mpath->sdata == sdata &&
933 ether_addr_equal(addr, mpath->dst)) {
934 __mesh_path_del(tbl, node);
935 goto enddel;
936 }
937 }
938
939 err = -ENXIO;
940 enddel:
941 spin_unlock(&tbl->hashwlock[hash_idx]);
942 return err;
943 }
944
945 /**
946 * mesh_path_del - delete a mesh path from the table
947 *
948 * @addr: dst address (ETH_ALEN length)
949 * @sdata: local subif
950 *
951 * Returns: 0 if successful
952 */
953 int mesh_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
954 {
955 int err = 0;
956
957 /* flush relevant mpp entries first */
958 mpp_flush_by_proxy(sdata, addr);
959
960 read_lock_bh(&pathtbl_resize_lock);
961 err = table_path_del(mesh_paths, sdata, addr);
962 mesh_paths_generation++;
963 read_unlock_bh(&pathtbl_resize_lock);
964
965 return err;
966 }
967
968 /**
969 * mpp_path_del - delete a mesh proxy path from the table
970 *
971 * @addr: addr address (ETH_ALEN length)
972 * @sdata: local subif
973 *
974 * Returns: 0 if successful
975 */
976 static int mpp_path_del(struct ieee80211_sub_if_data *sdata, const u8 *addr)
977 {
978 int err = 0;
979
980 read_lock_bh(&pathtbl_resize_lock);
981 err = table_path_del(mpp_paths, sdata, addr);
982 mpp_paths_generation++;
983 read_unlock_bh(&pathtbl_resize_lock);
984
985 return err;
986 }
987
988 /**
989 * mesh_path_tx_pending - sends pending frames in a mesh path queue
990 *
991 * @mpath: mesh path to activate
992 *
993 * Locking: the state_lock of the mpath structure must NOT be held when calling
994 * this function.
995 */
996 void mesh_path_tx_pending(struct mesh_path *mpath)
997 {
998 if (mpath->flags & MESH_PATH_ACTIVE)
999 ieee80211_add_pending_skbs(mpath->sdata->local,
1000 &mpath->frame_queue);
1001 }
1002
1003 /**
1004 * mesh_path_send_to_gates - sends pending frames to all known mesh gates
1005 *
1006 * @mpath: mesh path whose queue will be emptied
1007 *
1008 * If there is only one gate, the frames are transferred from the failed mpath
1009 * queue to that gate's queue. If there are more than one gates, the frames
1010 * are copied from each gate to the next. After frames are copied, the
1011 * mpath queues are emptied onto the transmission queue.
1012 */
1013 int mesh_path_send_to_gates(struct mesh_path *mpath)
1014 {
1015 struct ieee80211_sub_if_data *sdata = mpath->sdata;
1016 struct mesh_table *tbl;
1017 struct mesh_path *from_mpath = mpath;
1018 struct mpath_node *gate = NULL;
1019 bool copy = false;
1020 struct hlist_head *known_gates;
1021
1022 rcu_read_lock();
1023 tbl = rcu_dereference(mesh_paths);
1024 known_gates = tbl->known_gates;
1025 rcu_read_unlock();
1026
1027 if (!known_gates)
1028 return -EHOSTUNREACH;
1029
1030 hlist_for_each_entry_rcu(gate, known_gates, list) {
1031 if (gate->mpath->sdata != sdata)
1032 continue;
1033
1034 if (gate->mpath->flags & MESH_PATH_ACTIVE) {
1035 mpath_dbg(sdata, "Forwarding to %pM\n", gate->mpath->dst);
1036 mesh_path_move_to_queue(gate->mpath, from_mpath, copy);
1037 from_mpath = gate->mpath;
1038 copy = true;
1039 } else {
1040 mpath_dbg(sdata,
1041 "Not forwarding to %pM (flags %#x)\n",
1042 gate->mpath->dst, gate->mpath->flags);
1043 }
1044 }
1045
1046 hlist_for_each_entry_rcu(gate, known_gates, list)
1047 if (gate->mpath->sdata == sdata) {
1048 mpath_dbg(sdata, "Sending to %pM\n", gate->mpath->dst);
1049 mesh_path_tx_pending(gate->mpath);
1050 }
1051
1052 return (from_mpath == mpath) ? -EHOSTUNREACH : 0;
1053 }
1054
1055 /**
1056 * mesh_path_discard_frame - discard a frame whose path could not be resolved
1057 *
1058 * @skb: frame to discard
1059 * @sdata: network subif the frame was to be sent through
1060 *
1061 * Locking: the function must me called within a rcu_read_lock region
1062 */
1063 void mesh_path_discard_frame(struct ieee80211_sub_if_data *sdata,
1064 struct sk_buff *skb)
1065 {
1066 kfree_skb(skb);
1067 sdata->u.mesh.mshstats.dropped_frames_no_route++;
1068 }
1069
1070 /**
1071 * mesh_path_flush_pending - free the pending queue of a mesh path
1072 *
1073 * @mpath: mesh path whose queue has to be freed
1074 *
1075 * Locking: the function must me called within a rcu_read_lock region
1076 */
1077 void mesh_path_flush_pending(struct mesh_path *mpath)
1078 {
1079 struct sk_buff *skb;
1080
1081 while ((skb = skb_dequeue(&mpath->frame_queue)) != NULL)
1082 mesh_path_discard_frame(mpath->sdata, skb);
1083 }
1084
1085 /**
1086 * mesh_path_fix_nexthop - force a specific next hop for a mesh path
1087 *
1088 * @mpath: the mesh path to modify
1089 * @next_hop: the next hop to force
1090 *
1091 * Locking: this function must be called holding mpath->state_lock
1092 */
1093 void mesh_path_fix_nexthop(struct mesh_path *mpath, struct sta_info *next_hop)
1094 {
1095 spin_lock_bh(&mpath->state_lock);
1096 mesh_path_assign_nexthop(mpath, next_hop);
1097 mpath->sn = 0xffff;
1098 mpath->metric = 0;
1099 mpath->hop_count = 0;
1100 mpath->exp_time = 0;
1101 mpath->flags |= MESH_PATH_FIXED;
1102 mesh_path_activate(mpath);
1103 spin_unlock_bh(&mpath->state_lock);
1104 mesh_path_tx_pending(mpath);
1105 }
1106
1107 static void mesh_path_node_free(struct hlist_node *p, bool free_leafs)
1108 {
1109 struct mesh_path *mpath;
1110 struct mpath_node *node = hlist_entry(p, struct mpath_node, list);
1111 mpath = node->mpath;
1112 hlist_del_rcu(p);
1113 if (free_leafs) {
1114 del_timer_sync(&mpath->timer);
1115 kfree(mpath);
1116 }
1117 kfree(node);
1118 }
1119
1120 static int mesh_path_node_copy(struct hlist_node *p, struct mesh_table *newtbl)
1121 {
1122 struct mesh_path *mpath;
1123 struct mpath_node *node, *new_node;
1124 u32 hash_idx;
1125
1126 new_node = kmalloc(sizeof(struct mpath_node), GFP_ATOMIC);
1127 if (new_node == NULL)
1128 return -ENOMEM;
1129
1130 node = hlist_entry(p, struct mpath_node, list);
1131 mpath = node->mpath;
1132 new_node->mpath = mpath;
1133 hash_idx = mesh_table_hash(mpath->dst, mpath->sdata, newtbl);
1134 hlist_add_head(&new_node->list,
1135 &newtbl->hash_buckets[hash_idx]);
1136 return 0;
1137 }
1138
1139 int mesh_pathtbl_init(void)
1140 {
1141 struct mesh_table *tbl_path, *tbl_mpp;
1142 int ret;
1143
1144 tbl_path = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1145 if (!tbl_path)
1146 return -ENOMEM;
1147 tbl_path->free_node = &mesh_path_node_free;
1148 tbl_path->copy_node = &mesh_path_node_copy;
1149 tbl_path->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1150 if (!tbl_path->known_gates) {
1151 ret = -ENOMEM;
1152 goto free_path;
1153 }
1154 INIT_HLIST_HEAD(tbl_path->known_gates);
1155
1156
1157 tbl_mpp = mesh_table_alloc(INIT_PATHS_SIZE_ORDER);
1158 if (!tbl_mpp) {
1159 ret = -ENOMEM;
1160 goto free_path;
1161 }
1162 tbl_mpp->free_node = &mesh_path_node_free;
1163 tbl_mpp->copy_node = &mesh_path_node_copy;
1164 tbl_mpp->known_gates = kzalloc(sizeof(struct hlist_head), GFP_ATOMIC);
1165 if (!tbl_mpp->known_gates) {
1166 ret = -ENOMEM;
1167 goto free_mpp;
1168 }
1169 INIT_HLIST_HEAD(tbl_mpp->known_gates);
1170
1171 /* Need no locking since this is during init */
1172 RCU_INIT_POINTER(mesh_paths, tbl_path);
1173 RCU_INIT_POINTER(mpp_paths, tbl_mpp);
1174
1175 return 0;
1176
1177 free_mpp:
1178 mesh_table_free(tbl_mpp, true);
1179 free_path:
1180 mesh_table_free(tbl_path, true);
1181 return ret;
1182 }
1183
1184 void mesh_path_expire(struct ieee80211_sub_if_data *sdata)
1185 {
1186 struct mesh_table *tbl;
1187 struct mesh_path *mpath;
1188 struct mpath_node *node;
1189 int i;
1190
1191 rcu_read_lock();
1192 tbl = rcu_dereference(mesh_paths);
1193 for_each_mesh_entry(tbl, node, i) {
1194 if (node->mpath->sdata != sdata)
1195 continue;
1196 mpath = node->mpath;
1197 if ((!(mpath->flags & MESH_PATH_RESOLVING)) &&
1198 (!(mpath->flags & MESH_PATH_FIXED)) &&
1199 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1200 mesh_path_del(mpath->sdata, mpath->dst);
1201 }
1202
1203 tbl = rcu_dereference(mpp_paths);
1204 for_each_mesh_entry(tbl, node, i) {
1205 if (node->mpath->sdata != sdata)
1206 continue;
1207 mpath = node->mpath;
1208 if ((!(mpath->flags & MESH_PATH_FIXED)) &&
1209 time_after(jiffies, mpath->exp_time + MESH_PATH_EXPIRE))
1210 mpp_path_del(mpath->sdata, mpath->dst);
1211 }
1212
1213 rcu_read_unlock();
1214 }
1215
1216 void mesh_pathtbl_unregister(void)
1217 {
1218 /* no need for locking during exit path */
1219 mesh_table_free(rcu_dereference_protected(mesh_paths, 1), true);
1220 mesh_table_free(rcu_dereference_protected(mpp_paths, 1), true);
1221 }
This page took 0.054593 seconds and 6 git commands to generate.