b337a937ea529674af85c5a6e799c212ddd3103c
[deliverable/linux.git] / net / core / neighbour.c
1 /*
2 * Generic address resolution entity
3 *
4 * Authors:
5 * Pedro Roque <roque@di.fc.ul.pt>
6 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Fixes:
14 * Vitaly E. Lavrov releasing NULL neighbor in neigh_add.
15 * Harald Welte Add neighbour cache statistics like rtstat
16 */
17
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/socket.h>
22 #include <linux/netdevice.h>
23 #include <linux/proc_fs.h>
24 #ifdef CONFIG_SYSCTL
25 #include <linux/sysctl.h>
26 #endif
27 #include <linux/times.h>
28 #include <net/net_namespace.h>
29 #include <net/neighbour.h>
30 #include <net/dst.h>
31 #include <net/sock.h>
32 #include <net/netevent.h>
33 #include <net/netlink.h>
34 #include <linux/rtnetlink.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/log2.h>
38
39 #define NEIGH_DEBUG 1
40
41 #define NEIGH_PRINTK(x...) printk(x)
42 #define NEIGH_NOPRINTK(x...) do { ; } while(0)
43 #define NEIGH_PRINTK0 NEIGH_PRINTK
44 #define NEIGH_PRINTK1 NEIGH_NOPRINTK
45 #define NEIGH_PRINTK2 NEIGH_NOPRINTK
46
47 #if NEIGH_DEBUG >= 1
48 #undef NEIGH_PRINTK1
49 #define NEIGH_PRINTK1 NEIGH_PRINTK
50 #endif
51 #if NEIGH_DEBUG >= 2
52 #undef NEIGH_PRINTK2
53 #define NEIGH_PRINTK2 NEIGH_PRINTK
54 #endif
55
56 #define PNEIGH_HASHMASK 0xF
57
58 static void neigh_timer_handler(unsigned long arg);
59 static void __neigh_notify(struct neighbour *n, int type, int flags);
60 static void neigh_update_notify(struct neighbour *neigh);
61 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
62
63 static struct neigh_table *neigh_tables;
64 #ifdef CONFIG_PROC_FS
65 static const struct file_operations neigh_stat_seq_fops;
66 #endif
67
68 /*
69 Neighbour hash table buckets are protected with rwlock tbl->lock.
70
71 - All the scans/updates to hash buckets MUST be made under this lock.
72 - NOTHING clever should be made under this lock: no callbacks
73 to protocol backends, no attempts to send something to network.
74 It will result in deadlocks, if backend/driver wants to use neighbour
75 cache.
76 - If the entry requires some non-trivial actions, increase
77 its reference count and release table lock.
78
79 Neighbour entries are protected:
80 - with reference count.
81 - with rwlock neigh->lock
82
83 Reference count prevents destruction.
84
85 neigh->lock mainly serializes ll address data and its validity state.
86 However, the same lock is used to protect another entry fields:
87 - timer
88 - resolution queue
89
90 Again, nothing clever shall be made under neigh->lock,
91 the most complicated procedure, which we allow is dev->hard_header.
92 It is supposed, that dev->hard_header is simplistic and does
93 not make callbacks to neighbour tables.
94
95 The last lock is neigh_tbl_lock. It is pure SMP lock, protecting
96 list of neighbour tables. This list is used only in process context,
97 */
98
99 static DEFINE_RWLOCK(neigh_tbl_lock);
100
101 static int neigh_blackhole(struct sk_buff *skb)
102 {
103 kfree_skb(skb);
104 return -ENETDOWN;
105 }
106
107 static void neigh_cleanup_and_release(struct neighbour *neigh)
108 {
109 if (neigh->parms->neigh_cleanup)
110 neigh->parms->neigh_cleanup(neigh);
111
112 __neigh_notify(neigh, RTM_DELNEIGH, 0);
113 neigh_release(neigh);
114 }
115
116 /*
117 * It is random distribution in the interval (1/2)*base...(3/2)*base.
118 * It corresponds to default IPv6 settings and is not overridable,
119 * because it is really reasonable choice.
120 */
121
122 unsigned long neigh_rand_reach_time(unsigned long base)
123 {
124 return (base ? (net_random() % base) + (base >> 1) : 0);
125 }
126 EXPORT_SYMBOL(neigh_rand_reach_time);
127
128
129 static int neigh_forced_gc(struct neigh_table *tbl)
130 {
131 int shrunk = 0;
132 int i;
133
134 NEIGH_CACHE_STAT_INC(tbl, forced_gc_runs);
135
136 write_lock_bh(&tbl->lock);
137 for (i = 0; i <= tbl->hash_mask; i++) {
138 struct neighbour *n, **np;
139
140 np = &tbl->hash_buckets[i];
141 while ((n = *np) != NULL) {
142 /* Neighbour record may be discarded if:
143 * - nobody refers to it.
144 * - it is not permanent
145 */
146 write_lock(&n->lock);
147 if (atomic_read(&n->refcnt) == 1 &&
148 !(n->nud_state & NUD_PERMANENT)) {
149 *np = n->next;
150 n->dead = 1;
151 shrunk = 1;
152 write_unlock(&n->lock);
153 neigh_cleanup_and_release(n);
154 continue;
155 }
156 write_unlock(&n->lock);
157 np = &n->next;
158 }
159 }
160
161 tbl->last_flush = jiffies;
162
163 write_unlock_bh(&tbl->lock);
164
165 return shrunk;
166 }
167
168 static void neigh_add_timer(struct neighbour *n, unsigned long when)
169 {
170 neigh_hold(n);
171 if (unlikely(mod_timer(&n->timer, when))) {
172 printk("NEIGH: BUG, double timer add, state is %x\n",
173 n->nud_state);
174 dump_stack();
175 }
176 }
177
178 static int neigh_del_timer(struct neighbour *n)
179 {
180 if ((n->nud_state & NUD_IN_TIMER) &&
181 del_timer(&n->timer)) {
182 neigh_release(n);
183 return 1;
184 }
185 return 0;
186 }
187
188 static void pneigh_queue_purge(struct sk_buff_head *list)
189 {
190 struct sk_buff *skb;
191
192 while ((skb = skb_dequeue(list)) != NULL) {
193 dev_put(skb->dev);
194 kfree_skb(skb);
195 }
196 }
197
198 static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev)
199 {
200 int i;
201
202 for (i = 0; i <= tbl->hash_mask; i++) {
203 struct neighbour *n, **np = &tbl->hash_buckets[i];
204
205 while ((n = *np) != NULL) {
206 if (dev && n->dev != dev) {
207 np = &n->next;
208 continue;
209 }
210 *np = n->next;
211 write_lock(&n->lock);
212 neigh_del_timer(n);
213 n->dead = 1;
214
215 if (atomic_read(&n->refcnt) != 1) {
216 /* The most unpleasant situation.
217 We must destroy neighbour entry,
218 but someone still uses it.
219
220 The destroy will be delayed until
221 the last user releases us, but
222 we must kill timers etc. and move
223 it to safe state.
224 */
225 skb_queue_purge(&n->arp_queue);
226 n->output = neigh_blackhole;
227 if (n->nud_state & NUD_VALID)
228 n->nud_state = NUD_NOARP;
229 else
230 n->nud_state = NUD_NONE;
231 NEIGH_PRINTK2("neigh %p is stray.\n", n);
232 }
233 write_unlock(&n->lock);
234 neigh_cleanup_and_release(n);
235 }
236 }
237 }
238
239 void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev)
240 {
241 write_lock_bh(&tbl->lock);
242 neigh_flush_dev(tbl, dev);
243 write_unlock_bh(&tbl->lock);
244 }
245 EXPORT_SYMBOL(neigh_changeaddr);
246
247 int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
248 {
249 write_lock_bh(&tbl->lock);
250 neigh_flush_dev(tbl, dev);
251 pneigh_ifdown(tbl, dev);
252 write_unlock_bh(&tbl->lock);
253
254 del_timer_sync(&tbl->proxy_timer);
255 pneigh_queue_purge(&tbl->proxy_queue);
256 return 0;
257 }
258 EXPORT_SYMBOL(neigh_ifdown);
259
260 static struct neighbour *neigh_alloc(struct neigh_table *tbl)
261 {
262 struct neighbour *n = NULL;
263 unsigned long now = jiffies;
264 int entries;
265
266 entries = atomic_inc_return(&tbl->entries) - 1;
267 if (entries >= tbl->gc_thresh3 ||
268 (entries >= tbl->gc_thresh2 &&
269 time_after(now, tbl->last_flush + 5 * HZ))) {
270 if (!neigh_forced_gc(tbl) &&
271 entries >= tbl->gc_thresh3)
272 goto out_entries;
273 }
274
275 n = kmem_cache_zalloc(tbl->kmem_cachep, GFP_ATOMIC);
276 if (!n)
277 goto out_entries;
278
279 skb_queue_head_init(&n->arp_queue);
280 rwlock_init(&n->lock);
281 n->updated = n->used = now;
282 n->nud_state = NUD_NONE;
283 n->output = neigh_blackhole;
284 n->parms = neigh_parms_clone(&tbl->parms);
285 setup_timer(&n->timer, neigh_timer_handler, (unsigned long)n);
286
287 NEIGH_CACHE_STAT_INC(tbl, allocs);
288 n->tbl = tbl;
289 atomic_set(&n->refcnt, 1);
290 n->dead = 1;
291 out:
292 return n;
293
294 out_entries:
295 atomic_dec(&tbl->entries);
296 goto out;
297 }
298
299 static struct neighbour **neigh_hash_alloc(unsigned int entries)
300 {
301 unsigned long size = entries * sizeof(struct neighbour *);
302 struct neighbour **ret;
303
304 if (size <= PAGE_SIZE) {
305 ret = kzalloc(size, GFP_ATOMIC);
306 } else {
307 ret = (struct neighbour **)
308 __get_free_pages(GFP_ATOMIC|__GFP_ZERO, get_order(size));
309 }
310 return ret;
311 }
312
313 static void neigh_hash_free(struct neighbour **hash, unsigned int entries)
314 {
315 unsigned long size = entries * sizeof(struct neighbour *);
316
317 if (size <= PAGE_SIZE)
318 kfree(hash);
319 else
320 free_pages((unsigned long)hash, get_order(size));
321 }
322
323 static void neigh_hash_grow(struct neigh_table *tbl, unsigned long new_entries)
324 {
325 struct neighbour **new_hash, **old_hash;
326 unsigned int i, new_hash_mask, old_entries;
327
328 NEIGH_CACHE_STAT_INC(tbl, hash_grows);
329
330 BUG_ON(!is_power_of_2(new_entries));
331 new_hash = neigh_hash_alloc(new_entries);
332 if (!new_hash)
333 return;
334
335 old_entries = tbl->hash_mask + 1;
336 new_hash_mask = new_entries - 1;
337 old_hash = tbl->hash_buckets;
338
339 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
340 for (i = 0; i < old_entries; i++) {
341 struct neighbour *n, *next;
342
343 for (n = old_hash[i]; n; n = next) {
344 unsigned int hash_val = tbl->hash(n->primary_key, n->dev);
345
346 hash_val &= new_hash_mask;
347 next = n->next;
348
349 n->next = new_hash[hash_val];
350 new_hash[hash_val] = n;
351 }
352 }
353 tbl->hash_buckets = new_hash;
354 tbl->hash_mask = new_hash_mask;
355
356 neigh_hash_free(old_hash, old_entries);
357 }
358
359 struct neighbour *neigh_lookup(struct neigh_table *tbl, const void *pkey,
360 struct net_device *dev)
361 {
362 struct neighbour *n;
363 int key_len = tbl->key_len;
364 u32 hash_val;
365
366 NEIGH_CACHE_STAT_INC(tbl, lookups);
367
368 read_lock_bh(&tbl->lock);
369 hash_val = tbl->hash(pkey, dev);
370 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
371 if (dev == n->dev && !memcmp(n->primary_key, pkey, key_len)) {
372 neigh_hold(n);
373 NEIGH_CACHE_STAT_INC(tbl, hits);
374 break;
375 }
376 }
377 read_unlock_bh(&tbl->lock);
378 return n;
379 }
380 EXPORT_SYMBOL(neigh_lookup);
381
382 struct neighbour *neigh_lookup_nodev(struct neigh_table *tbl, struct net *net,
383 const void *pkey)
384 {
385 struct neighbour *n;
386 int key_len = tbl->key_len;
387 u32 hash_val;
388
389 NEIGH_CACHE_STAT_INC(tbl, lookups);
390
391 read_lock_bh(&tbl->lock);
392 hash_val = tbl->hash(pkey, NULL);
393 for (n = tbl->hash_buckets[hash_val & tbl->hash_mask]; n; n = n->next) {
394 if (!memcmp(n->primary_key, pkey, key_len) &&
395 net_eq(dev_net(n->dev), net)) {
396 neigh_hold(n);
397 NEIGH_CACHE_STAT_INC(tbl, hits);
398 break;
399 }
400 }
401 read_unlock_bh(&tbl->lock);
402 return n;
403 }
404 EXPORT_SYMBOL(neigh_lookup_nodev);
405
406 struct neighbour *neigh_create(struct neigh_table *tbl, const void *pkey,
407 struct net_device *dev)
408 {
409 u32 hash_val;
410 int key_len = tbl->key_len;
411 int error;
412 struct neighbour *n1, *rc, *n = neigh_alloc(tbl);
413
414 if (!n) {
415 rc = ERR_PTR(-ENOBUFS);
416 goto out;
417 }
418
419 memcpy(n->primary_key, pkey, key_len);
420 n->dev = dev;
421 dev_hold(dev);
422
423 /* Protocol specific setup. */
424 if (tbl->constructor && (error = tbl->constructor(n)) < 0) {
425 rc = ERR_PTR(error);
426 goto out_neigh_release;
427 }
428
429 /* Device specific setup. */
430 if (n->parms->neigh_setup &&
431 (error = n->parms->neigh_setup(n)) < 0) {
432 rc = ERR_PTR(error);
433 goto out_neigh_release;
434 }
435
436 n->confirmed = jiffies - (n->parms->base_reachable_time << 1);
437
438 write_lock_bh(&tbl->lock);
439
440 if (atomic_read(&tbl->entries) > (tbl->hash_mask + 1))
441 neigh_hash_grow(tbl, (tbl->hash_mask + 1) << 1);
442
443 hash_val = tbl->hash(pkey, dev) & tbl->hash_mask;
444
445 if (n->parms->dead) {
446 rc = ERR_PTR(-EINVAL);
447 goto out_tbl_unlock;
448 }
449
450 for (n1 = tbl->hash_buckets[hash_val]; n1; n1 = n1->next) {
451 if (dev == n1->dev && !memcmp(n1->primary_key, pkey, key_len)) {
452 neigh_hold(n1);
453 rc = n1;
454 goto out_tbl_unlock;
455 }
456 }
457
458 n->next = tbl->hash_buckets[hash_val];
459 tbl->hash_buckets[hash_val] = n;
460 n->dead = 0;
461 neigh_hold(n);
462 write_unlock_bh(&tbl->lock);
463 NEIGH_PRINTK2("neigh %p is created.\n", n);
464 rc = n;
465 out:
466 return rc;
467 out_tbl_unlock:
468 write_unlock_bh(&tbl->lock);
469 out_neigh_release:
470 neigh_release(n);
471 goto out;
472 }
473 EXPORT_SYMBOL(neigh_create);
474
475 static u32 pneigh_hash(const void *pkey, int key_len)
476 {
477 u32 hash_val = *(u32 *)(pkey + key_len - 4);
478 hash_val ^= (hash_val >> 16);
479 hash_val ^= hash_val >> 8;
480 hash_val ^= hash_val >> 4;
481 hash_val &= PNEIGH_HASHMASK;
482 return hash_val;
483 }
484
485 static struct pneigh_entry *__pneigh_lookup_1(struct pneigh_entry *n,
486 struct net *net,
487 const void *pkey,
488 int key_len,
489 struct net_device *dev)
490 {
491 while (n) {
492 if (!memcmp(n->key, pkey, key_len) &&
493 net_eq(pneigh_net(n), net) &&
494 (n->dev == dev || !n->dev))
495 return n;
496 n = n->next;
497 }
498 return NULL;
499 }
500
501 struct pneigh_entry *__pneigh_lookup(struct neigh_table *tbl,
502 struct net *net, const void *pkey, struct net_device *dev)
503 {
504 int key_len = tbl->key_len;
505 u32 hash_val = pneigh_hash(pkey, key_len);
506
507 return __pneigh_lookup_1(tbl->phash_buckets[hash_val],
508 net, pkey, key_len, dev);
509 }
510 EXPORT_SYMBOL_GPL(__pneigh_lookup);
511
512 struct pneigh_entry * pneigh_lookup(struct neigh_table *tbl,
513 struct net *net, const void *pkey,
514 struct net_device *dev, int creat)
515 {
516 struct pneigh_entry *n;
517 int key_len = tbl->key_len;
518 u32 hash_val = pneigh_hash(pkey, key_len);
519
520 read_lock_bh(&tbl->lock);
521 n = __pneigh_lookup_1(tbl->phash_buckets[hash_val],
522 net, pkey, key_len, dev);
523 read_unlock_bh(&tbl->lock);
524
525 if (n || !creat)
526 goto out;
527
528 ASSERT_RTNL();
529
530 n = kmalloc(sizeof(*n) + key_len, GFP_KERNEL);
531 if (!n)
532 goto out;
533
534 #ifdef CONFIG_NET_NS
535 n->net = hold_net(net);
536 #endif
537 memcpy(n->key, pkey, key_len);
538 n->dev = dev;
539 if (dev)
540 dev_hold(dev);
541
542 if (tbl->pconstructor && tbl->pconstructor(n)) {
543 if (dev)
544 dev_put(dev);
545 release_net(net);
546 kfree(n);
547 n = NULL;
548 goto out;
549 }
550
551 write_lock_bh(&tbl->lock);
552 n->next = tbl->phash_buckets[hash_val];
553 tbl->phash_buckets[hash_val] = n;
554 write_unlock_bh(&tbl->lock);
555 out:
556 return n;
557 }
558 EXPORT_SYMBOL(pneigh_lookup);
559
560
561 int pneigh_delete(struct neigh_table *tbl, struct net *net, const void *pkey,
562 struct net_device *dev)
563 {
564 struct pneigh_entry *n, **np;
565 int key_len = tbl->key_len;
566 u32 hash_val = pneigh_hash(pkey, key_len);
567
568 write_lock_bh(&tbl->lock);
569 for (np = &tbl->phash_buckets[hash_val]; (n = *np) != NULL;
570 np = &n->next) {
571 if (!memcmp(n->key, pkey, key_len) && n->dev == dev &&
572 net_eq(pneigh_net(n), net)) {
573 *np = n->next;
574 write_unlock_bh(&tbl->lock);
575 if (tbl->pdestructor)
576 tbl->pdestructor(n);
577 if (n->dev)
578 dev_put(n->dev);
579 release_net(pneigh_net(n));
580 kfree(n);
581 return 0;
582 }
583 }
584 write_unlock_bh(&tbl->lock);
585 return -ENOENT;
586 }
587
588 static int pneigh_ifdown(struct neigh_table *tbl, struct net_device *dev)
589 {
590 struct pneigh_entry *n, **np;
591 u32 h;
592
593 for (h = 0; h <= PNEIGH_HASHMASK; h++) {
594 np = &tbl->phash_buckets[h];
595 while ((n = *np) != NULL) {
596 if (!dev || n->dev == dev) {
597 *np = n->next;
598 if (tbl->pdestructor)
599 tbl->pdestructor(n);
600 if (n->dev)
601 dev_put(n->dev);
602 release_net(pneigh_net(n));
603 kfree(n);
604 continue;
605 }
606 np = &n->next;
607 }
608 }
609 return -ENOENT;
610 }
611
612 static void neigh_parms_destroy(struct neigh_parms *parms);
613
614 static inline void neigh_parms_put(struct neigh_parms *parms)
615 {
616 if (atomic_dec_and_test(&parms->refcnt))
617 neigh_parms_destroy(parms);
618 }
619
620 /*
621 * neighbour must already be out of the table;
622 *
623 */
624 void neigh_destroy(struct neighbour *neigh)
625 {
626 struct hh_cache *hh;
627
628 NEIGH_CACHE_STAT_INC(neigh->tbl, destroys);
629
630 if (!neigh->dead) {
631 printk(KERN_WARNING
632 "Destroying alive neighbour %p\n", neigh);
633 dump_stack();
634 return;
635 }
636
637 if (neigh_del_timer(neigh))
638 printk(KERN_WARNING "Impossible event.\n");
639
640 while ((hh = neigh->hh) != NULL) {
641 neigh->hh = hh->hh_next;
642 hh->hh_next = NULL;
643
644 write_seqlock_bh(&hh->hh_lock);
645 hh->hh_output = neigh_blackhole;
646 write_sequnlock_bh(&hh->hh_lock);
647 if (atomic_dec_and_test(&hh->hh_refcnt))
648 kfree(hh);
649 }
650
651 skb_queue_purge(&neigh->arp_queue);
652
653 dev_put(neigh->dev);
654 neigh_parms_put(neigh->parms);
655
656 NEIGH_PRINTK2("neigh %p is destroyed.\n", neigh);
657
658 atomic_dec(&neigh->tbl->entries);
659 kmem_cache_free(neigh->tbl->kmem_cachep, neigh);
660 }
661 EXPORT_SYMBOL(neigh_destroy);
662
663 /* Neighbour state is suspicious;
664 disable fast path.
665
666 Called with write_locked neigh.
667 */
668 static void neigh_suspect(struct neighbour *neigh)
669 {
670 struct hh_cache *hh;
671
672 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
673
674 neigh->output = neigh->ops->output;
675
676 for (hh = neigh->hh; hh; hh = hh->hh_next)
677 hh->hh_output = neigh->ops->output;
678 }
679
680 /* Neighbour state is OK;
681 enable fast path.
682
683 Called with write_locked neigh.
684 */
685 static void neigh_connect(struct neighbour *neigh)
686 {
687 struct hh_cache *hh;
688
689 NEIGH_PRINTK2("neigh %p is connected.\n", neigh);
690
691 neigh->output = neigh->ops->connected_output;
692
693 for (hh = neigh->hh; hh; hh = hh->hh_next)
694 hh->hh_output = neigh->ops->hh_output;
695 }
696
697 static void neigh_periodic_timer(unsigned long arg)
698 {
699 struct neigh_table *tbl = (struct neigh_table *)arg;
700 struct neighbour *n, **np;
701 unsigned long expire, now = jiffies;
702
703 NEIGH_CACHE_STAT_INC(tbl, periodic_gc_runs);
704
705 write_lock(&tbl->lock);
706
707 /*
708 * periodically recompute ReachableTime from random function
709 */
710
711 if (time_after(now, tbl->last_rand + 300 * HZ)) {
712 struct neigh_parms *p;
713 tbl->last_rand = now;
714 for (p = &tbl->parms; p; p = p->next)
715 p->reachable_time =
716 neigh_rand_reach_time(p->base_reachable_time);
717 }
718
719 np = &tbl->hash_buckets[tbl->hash_chain_gc];
720 tbl->hash_chain_gc = ((tbl->hash_chain_gc + 1) & tbl->hash_mask);
721
722 while ((n = *np) != NULL) {
723 unsigned int state;
724
725 write_lock(&n->lock);
726
727 state = n->nud_state;
728 if (state & (NUD_PERMANENT | NUD_IN_TIMER)) {
729 write_unlock(&n->lock);
730 goto next_elt;
731 }
732
733 if (time_before(n->used, n->confirmed))
734 n->used = n->confirmed;
735
736 if (atomic_read(&n->refcnt) == 1 &&
737 (state == NUD_FAILED ||
738 time_after(now, n->used + n->parms->gc_staletime))) {
739 *np = n->next;
740 n->dead = 1;
741 write_unlock(&n->lock);
742 neigh_cleanup_and_release(n);
743 continue;
744 }
745 write_unlock(&n->lock);
746
747 next_elt:
748 np = &n->next;
749 }
750
751 /* Cycle through all hash buckets every base_reachable_time/2 ticks.
752 * ARP entry timeouts range from 1/2 base_reachable_time to 3/2
753 * base_reachable_time.
754 */
755 expire = tbl->parms.base_reachable_time >> 1;
756 expire /= (tbl->hash_mask + 1);
757 if (!expire)
758 expire = 1;
759
760 if (expire>HZ)
761 mod_timer(&tbl->gc_timer, round_jiffies(now + expire));
762 else
763 mod_timer(&tbl->gc_timer, now + expire);
764
765 write_unlock(&tbl->lock);
766 }
767
768 static __inline__ int neigh_max_probes(struct neighbour *n)
769 {
770 struct neigh_parms *p = n->parms;
771 return (n->nud_state & NUD_PROBE ?
772 p->ucast_probes :
773 p->ucast_probes + p->app_probes + p->mcast_probes);
774 }
775
776 /* Called when a timer expires for a neighbour entry. */
777
778 static void neigh_timer_handler(unsigned long arg)
779 {
780 unsigned long now, next;
781 struct neighbour *neigh = (struct neighbour *)arg;
782 unsigned state;
783 int notify = 0;
784
785 write_lock(&neigh->lock);
786
787 state = neigh->nud_state;
788 now = jiffies;
789 next = now + HZ;
790
791 if (!(state & NUD_IN_TIMER)) {
792 #ifndef CONFIG_SMP
793 printk(KERN_WARNING "neigh: timer & !nud_in_timer\n");
794 #endif
795 goto out;
796 }
797
798 if (state & NUD_REACHABLE) {
799 if (time_before_eq(now,
800 neigh->confirmed + neigh->parms->reachable_time)) {
801 NEIGH_PRINTK2("neigh %p is still alive.\n", neigh);
802 next = neigh->confirmed + neigh->parms->reachable_time;
803 } else if (time_before_eq(now,
804 neigh->used + neigh->parms->delay_probe_time)) {
805 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
806 neigh->nud_state = NUD_DELAY;
807 neigh->updated = jiffies;
808 neigh_suspect(neigh);
809 next = now + neigh->parms->delay_probe_time;
810 } else {
811 NEIGH_PRINTK2("neigh %p is suspected.\n", neigh);
812 neigh->nud_state = NUD_STALE;
813 neigh->updated = jiffies;
814 neigh_suspect(neigh);
815 notify = 1;
816 }
817 } else if (state & NUD_DELAY) {
818 if (time_before_eq(now,
819 neigh->confirmed + neigh->parms->delay_probe_time)) {
820 NEIGH_PRINTK2("neigh %p is now reachable.\n", neigh);
821 neigh->nud_state = NUD_REACHABLE;
822 neigh->updated = jiffies;
823 neigh_connect(neigh);
824 notify = 1;
825 next = neigh->confirmed + neigh->parms->reachable_time;
826 } else {
827 NEIGH_PRINTK2("neigh %p is probed.\n", neigh);
828 neigh->nud_state = NUD_PROBE;
829 neigh->updated = jiffies;
830 atomic_set(&neigh->probes, 0);
831 next = now + neigh->parms->retrans_time;
832 }
833 } else {
834 /* NUD_PROBE|NUD_INCOMPLETE */
835 next = now + neigh->parms->retrans_time;
836 }
837
838 if ((neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) &&
839 atomic_read(&neigh->probes) >= neigh_max_probes(neigh)) {
840 struct sk_buff *skb;
841
842 neigh->nud_state = NUD_FAILED;
843 neigh->updated = jiffies;
844 notify = 1;
845 NEIGH_CACHE_STAT_INC(neigh->tbl, res_failed);
846 NEIGH_PRINTK2("neigh %p is failed.\n", neigh);
847
848 /* It is very thin place. report_unreachable is very complicated
849 routine. Particularly, it can hit the same neighbour entry!
850
851 So that, we try to be accurate and avoid dead loop. --ANK
852 */
853 while (neigh->nud_state == NUD_FAILED &&
854 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
855 write_unlock(&neigh->lock);
856 neigh->ops->error_report(neigh, skb);
857 write_lock(&neigh->lock);
858 }
859 skb_queue_purge(&neigh->arp_queue);
860 }
861
862 if (neigh->nud_state & NUD_IN_TIMER) {
863 if (time_before(next, jiffies + HZ/2))
864 next = jiffies + HZ/2;
865 if (!mod_timer(&neigh->timer, next))
866 neigh_hold(neigh);
867 }
868 if (neigh->nud_state & (NUD_INCOMPLETE | NUD_PROBE)) {
869 struct sk_buff *skb = skb_peek(&neigh->arp_queue);
870 /* keep skb alive even if arp_queue overflows */
871 if (skb)
872 skb = skb_copy(skb, GFP_ATOMIC);
873 write_unlock(&neigh->lock);
874 neigh->ops->solicit(neigh, skb);
875 atomic_inc(&neigh->probes);
876 if (skb)
877 kfree_skb(skb);
878 } else {
879 out:
880 write_unlock(&neigh->lock);
881 }
882
883 if (notify)
884 neigh_update_notify(neigh);
885
886 neigh_release(neigh);
887 }
888
889 int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb)
890 {
891 int rc;
892 unsigned long now;
893
894 write_lock_bh(&neigh->lock);
895
896 rc = 0;
897 if (neigh->nud_state & (NUD_CONNECTED | NUD_DELAY | NUD_PROBE))
898 goto out_unlock_bh;
899
900 now = jiffies;
901
902 if (!(neigh->nud_state & (NUD_STALE | NUD_INCOMPLETE))) {
903 if (neigh->parms->mcast_probes + neigh->parms->app_probes) {
904 atomic_set(&neigh->probes, neigh->parms->ucast_probes);
905 neigh->nud_state = NUD_INCOMPLETE;
906 neigh->updated = jiffies;
907 neigh_add_timer(neigh, now + 1);
908 } else {
909 neigh->nud_state = NUD_FAILED;
910 neigh->updated = jiffies;
911 write_unlock_bh(&neigh->lock);
912
913 if (skb)
914 kfree_skb(skb);
915 return 1;
916 }
917 } else if (neigh->nud_state & NUD_STALE) {
918 NEIGH_PRINTK2("neigh %p is delayed.\n", neigh);
919 neigh->nud_state = NUD_DELAY;
920 neigh->updated = jiffies;
921 neigh_add_timer(neigh,
922 jiffies + neigh->parms->delay_probe_time);
923 }
924
925 if (neigh->nud_state == NUD_INCOMPLETE) {
926 if (skb) {
927 if (skb_queue_len(&neigh->arp_queue) >=
928 neigh->parms->queue_len) {
929 struct sk_buff *buff;
930 buff = __skb_dequeue(&neigh->arp_queue);
931 kfree_skb(buff);
932 NEIGH_CACHE_STAT_INC(neigh->tbl, unres_discards);
933 }
934 __skb_queue_tail(&neigh->arp_queue, skb);
935 }
936 rc = 1;
937 }
938 out_unlock_bh:
939 write_unlock_bh(&neigh->lock);
940 return rc;
941 }
942 EXPORT_SYMBOL(__neigh_event_send);
943
944 static void neigh_update_hhs(struct neighbour *neigh)
945 {
946 struct hh_cache *hh;
947 void (*update)(struct hh_cache*, const struct net_device*, const unsigned char *)
948 = neigh->dev->header_ops->cache_update;
949
950 if (update) {
951 for (hh = neigh->hh; hh; hh = hh->hh_next) {
952 write_seqlock_bh(&hh->hh_lock);
953 update(hh, neigh->dev, neigh->ha);
954 write_sequnlock_bh(&hh->hh_lock);
955 }
956 }
957 }
958
959
960
961 /* Generic update routine.
962 -- lladdr is new lladdr or NULL, if it is not supplied.
963 -- new is new state.
964 -- flags
965 NEIGH_UPDATE_F_OVERRIDE allows to override existing lladdr,
966 if it is different.
967 NEIGH_UPDATE_F_WEAK_OVERRIDE will suspect existing "connected"
968 lladdr instead of overriding it
969 if it is different.
970 It also allows to retain current state
971 if lladdr is unchanged.
972 NEIGH_UPDATE_F_ADMIN means that the change is administrative.
973
974 NEIGH_UPDATE_F_OVERRIDE_ISROUTER allows to override existing
975 NTF_ROUTER flag.
976 NEIGH_UPDATE_F_ISROUTER indicates if the neighbour is known as
977 a router.
978
979 Caller MUST hold reference count on the entry.
980 */
981
982 int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new,
983 u32 flags)
984 {
985 u8 old;
986 int err;
987 int notify = 0;
988 struct net_device *dev;
989 int update_isrouter = 0;
990
991 write_lock_bh(&neigh->lock);
992
993 dev = neigh->dev;
994 old = neigh->nud_state;
995 err = -EPERM;
996
997 if (!(flags & NEIGH_UPDATE_F_ADMIN) &&
998 (old & (NUD_NOARP | NUD_PERMANENT)))
999 goto out;
1000
1001 if (!(new & NUD_VALID)) {
1002 neigh_del_timer(neigh);
1003 if (old & NUD_CONNECTED)
1004 neigh_suspect(neigh);
1005 neigh->nud_state = new;
1006 err = 0;
1007 notify = old & NUD_VALID;
1008 goto out;
1009 }
1010
1011 /* Compare new lladdr with cached one */
1012 if (!dev->addr_len) {
1013 /* First case: device needs no address. */
1014 lladdr = neigh->ha;
1015 } else if (lladdr) {
1016 /* The second case: if something is already cached
1017 and a new address is proposed:
1018 - compare new & old
1019 - if they are different, check override flag
1020 */
1021 if ((old & NUD_VALID) &&
1022 !memcmp(lladdr, neigh->ha, dev->addr_len))
1023 lladdr = neigh->ha;
1024 } else {
1025 /* No address is supplied; if we know something,
1026 use it, otherwise discard the request.
1027 */
1028 err = -EINVAL;
1029 if (!(old & NUD_VALID))
1030 goto out;
1031 lladdr = neigh->ha;
1032 }
1033
1034 if (new & NUD_CONNECTED)
1035 neigh->confirmed = jiffies;
1036 neigh->updated = jiffies;
1037
1038 /* If entry was valid and address is not changed,
1039 do not change entry state, if new one is STALE.
1040 */
1041 err = 0;
1042 update_isrouter = flags & NEIGH_UPDATE_F_OVERRIDE_ISROUTER;
1043 if (old & NUD_VALID) {
1044 if (lladdr != neigh->ha && !(flags & NEIGH_UPDATE_F_OVERRIDE)) {
1045 update_isrouter = 0;
1046 if ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) &&
1047 (old & NUD_CONNECTED)) {
1048 lladdr = neigh->ha;
1049 new = NUD_STALE;
1050 } else
1051 goto out;
1052 } else {
1053 if (lladdr == neigh->ha && new == NUD_STALE &&
1054 ((flags & NEIGH_UPDATE_F_WEAK_OVERRIDE) ||
1055 (old & NUD_CONNECTED))
1056 )
1057 new = old;
1058 }
1059 }
1060
1061 if (new != old) {
1062 neigh_del_timer(neigh);
1063 if (new & NUD_IN_TIMER)
1064 neigh_add_timer(neigh, (jiffies +
1065 ((new & NUD_REACHABLE) ?
1066 neigh->parms->reachable_time :
1067 0)));
1068 neigh->nud_state = new;
1069 }
1070
1071 if (lladdr != neigh->ha) {
1072 memcpy(&neigh->ha, lladdr, dev->addr_len);
1073 neigh_update_hhs(neigh);
1074 if (!(new & NUD_CONNECTED))
1075 neigh->confirmed = jiffies -
1076 (neigh->parms->base_reachable_time << 1);
1077 notify = 1;
1078 }
1079 if (new == old)
1080 goto out;
1081 if (new & NUD_CONNECTED)
1082 neigh_connect(neigh);
1083 else
1084 neigh_suspect(neigh);
1085 if (!(old & NUD_VALID)) {
1086 struct sk_buff *skb;
1087
1088 /* Again: avoid dead loop if something went wrong */
1089
1090 while (neigh->nud_state & NUD_VALID &&
1091 (skb = __skb_dequeue(&neigh->arp_queue)) != NULL) {
1092 struct neighbour *n1 = neigh;
1093 write_unlock_bh(&neigh->lock);
1094 /* On shaper/eql skb->dst->neighbour != neigh :( */
1095 if (skb->dst && skb->dst->neighbour)
1096 n1 = skb->dst->neighbour;
1097 n1->output(skb);
1098 write_lock_bh(&neigh->lock);
1099 }
1100 skb_queue_purge(&neigh->arp_queue);
1101 }
1102 out:
1103 if (update_isrouter) {
1104 neigh->flags = (flags & NEIGH_UPDATE_F_ISROUTER) ?
1105 (neigh->flags | NTF_ROUTER) :
1106 (neigh->flags & ~NTF_ROUTER);
1107 }
1108 write_unlock_bh(&neigh->lock);
1109
1110 if (notify)
1111 neigh_update_notify(neigh);
1112
1113 return err;
1114 }
1115 EXPORT_SYMBOL(neigh_update);
1116
1117 struct neighbour *neigh_event_ns(struct neigh_table *tbl,
1118 u8 *lladdr, void *saddr,
1119 struct net_device *dev)
1120 {
1121 struct neighbour *neigh = __neigh_lookup(tbl, saddr, dev,
1122 lladdr || !dev->addr_len);
1123 if (neigh)
1124 neigh_update(neigh, lladdr, NUD_STALE,
1125 NEIGH_UPDATE_F_OVERRIDE);
1126 return neigh;
1127 }
1128 EXPORT_SYMBOL(neigh_event_ns);
1129
1130 static void neigh_hh_init(struct neighbour *n, struct dst_entry *dst,
1131 __be16 protocol)
1132 {
1133 struct hh_cache *hh;
1134 struct net_device *dev = dst->dev;
1135
1136 for (hh = n->hh; hh; hh = hh->hh_next)
1137 if (hh->hh_type == protocol)
1138 break;
1139
1140 if (!hh && (hh = kzalloc(sizeof(*hh), GFP_ATOMIC)) != NULL) {
1141 seqlock_init(&hh->hh_lock);
1142 hh->hh_type = protocol;
1143 atomic_set(&hh->hh_refcnt, 0);
1144 hh->hh_next = NULL;
1145
1146 if (dev->header_ops->cache(n, hh)) {
1147 kfree(hh);
1148 hh = NULL;
1149 } else {
1150 atomic_inc(&hh->hh_refcnt);
1151 hh->hh_next = n->hh;
1152 n->hh = hh;
1153 if (n->nud_state & NUD_CONNECTED)
1154 hh->hh_output = n->ops->hh_output;
1155 else
1156 hh->hh_output = n->ops->output;
1157 }
1158 }
1159 if (hh) {
1160 atomic_inc(&hh->hh_refcnt);
1161 dst->hh = hh;
1162 }
1163 }
1164
1165 /* This function can be used in contexts, where only old dev_queue_xmit
1166 worked, f.e. if you want to override normal output path (eql, shaper),
1167 but resolution is not made yet.
1168 */
1169
1170 int neigh_compat_output(struct sk_buff *skb)
1171 {
1172 struct net_device *dev = skb->dev;
1173
1174 __skb_pull(skb, skb_network_offset(skb));
1175
1176 if (dev_hard_header(skb, dev, ntohs(skb->protocol), NULL, NULL,
1177 skb->len) < 0 &&
1178 dev->header_ops->rebuild(skb))
1179 return 0;
1180
1181 return dev_queue_xmit(skb);
1182 }
1183 EXPORT_SYMBOL(neigh_compat_output);
1184
1185 /* Slow and careful. */
1186
1187 int neigh_resolve_output(struct sk_buff *skb)
1188 {
1189 struct dst_entry *dst = skb->dst;
1190 struct neighbour *neigh;
1191 int rc = 0;
1192
1193 if (!dst || !(neigh = dst->neighbour))
1194 goto discard;
1195
1196 __skb_pull(skb, skb_network_offset(skb));
1197
1198 if (!neigh_event_send(neigh, skb)) {
1199 int err;
1200 struct net_device *dev = neigh->dev;
1201 if (dev->header_ops->cache && !dst->hh) {
1202 write_lock_bh(&neigh->lock);
1203 if (!dst->hh)
1204 neigh_hh_init(neigh, dst, dst->ops->protocol);
1205 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1206 neigh->ha, NULL, skb->len);
1207 write_unlock_bh(&neigh->lock);
1208 } else {
1209 read_lock_bh(&neigh->lock);
1210 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1211 neigh->ha, NULL, skb->len);
1212 read_unlock_bh(&neigh->lock);
1213 }
1214 if (err >= 0)
1215 rc = neigh->ops->queue_xmit(skb);
1216 else
1217 goto out_kfree_skb;
1218 }
1219 out:
1220 return rc;
1221 discard:
1222 NEIGH_PRINTK1("neigh_resolve_output: dst=%p neigh=%p\n",
1223 dst, dst ? dst->neighbour : NULL);
1224 out_kfree_skb:
1225 rc = -EINVAL;
1226 kfree_skb(skb);
1227 goto out;
1228 }
1229 EXPORT_SYMBOL(neigh_resolve_output);
1230
1231 /* As fast as possible without hh cache */
1232
1233 int neigh_connected_output(struct sk_buff *skb)
1234 {
1235 int err;
1236 struct dst_entry *dst = skb->dst;
1237 struct neighbour *neigh = dst->neighbour;
1238 struct net_device *dev = neigh->dev;
1239
1240 __skb_pull(skb, skb_network_offset(skb));
1241
1242 read_lock_bh(&neigh->lock);
1243 err = dev_hard_header(skb, dev, ntohs(skb->protocol),
1244 neigh->ha, NULL, skb->len);
1245 read_unlock_bh(&neigh->lock);
1246 if (err >= 0)
1247 err = neigh->ops->queue_xmit(skb);
1248 else {
1249 err = -EINVAL;
1250 kfree_skb(skb);
1251 }
1252 return err;
1253 }
1254 EXPORT_SYMBOL(neigh_connected_output);
1255
1256 static void neigh_proxy_process(unsigned long arg)
1257 {
1258 struct neigh_table *tbl = (struct neigh_table *)arg;
1259 long sched_next = 0;
1260 unsigned long now = jiffies;
1261 struct sk_buff *skb, *n;
1262
1263 spin_lock(&tbl->proxy_queue.lock);
1264
1265 skb_queue_walk_safe(&tbl->proxy_queue, skb, n) {
1266 long tdif = NEIGH_CB(skb)->sched_next - now;
1267
1268 if (tdif <= 0) {
1269 struct net_device *dev = skb->dev;
1270 __skb_unlink(skb, &tbl->proxy_queue);
1271 if (tbl->proxy_redo && netif_running(dev))
1272 tbl->proxy_redo(skb);
1273 else
1274 kfree_skb(skb);
1275
1276 dev_put(dev);
1277 } else if (!sched_next || tdif < sched_next)
1278 sched_next = tdif;
1279 }
1280 del_timer(&tbl->proxy_timer);
1281 if (sched_next)
1282 mod_timer(&tbl->proxy_timer, jiffies + sched_next);
1283 spin_unlock(&tbl->proxy_queue.lock);
1284 }
1285
1286 void pneigh_enqueue(struct neigh_table *tbl, struct neigh_parms *p,
1287 struct sk_buff *skb)
1288 {
1289 unsigned long now = jiffies;
1290 unsigned long sched_next = now + (net_random() % p->proxy_delay);
1291
1292 if (tbl->proxy_queue.qlen > p->proxy_qlen) {
1293 kfree_skb(skb);
1294 return;
1295 }
1296
1297 NEIGH_CB(skb)->sched_next = sched_next;
1298 NEIGH_CB(skb)->flags |= LOCALLY_ENQUEUED;
1299
1300 spin_lock(&tbl->proxy_queue.lock);
1301 if (del_timer(&tbl->proxy_timer)) {
1302 if (time_before(tbl->proxy_timer.expires, sched_next))
1303 sched_next = tbl->proxy_timer.expires;
1304 }
1305 dst_release(skb->dst);
1306 skb->dst = NULL;
1307 dev_hold(skb->dev);
1308 __skb_queue_tail(&tbl->proxy_queue, skb);
1309 mod_timer(&tbl->proxy_timer, sched_next);
1310 spin_unlock(&tbl->proxy_queue.lock);
1311 }
1312 EXPORT_SYMBOL(pneigh_enqueue);
1313
1314 static inline struct neigh_parms *lookup_neigh_params(struct neigh_table *tbl,
1315 struct net *net, int ifindex)
1316 {
1317 struct neigh_parms *p;
1318
1319 for (p = &tbl->parms; p; p = p->next) {
1320 if ((p->dev && p->dev->ifindex == ifindex && net_eq(neigh_parms_net(p), net)) ||
1321 (!p->dev && !ifindex))
1322 return p;
1323 }
1324
1325 return NULL;
1326 }
1327
1328 struct neigh_parms *neigh_parms_alloc(struct net_device *dev,
1329 struct neigh_table *tbl)
1330 {
1331 struct neigh_parms *p, *ref;
1332 struct net *net;
1333
1334 net = dev_net(dev);
1335 ref = lookup_neigh_params(tbl, net, 0);
1336 if (!ref)
1337 return NULL;
1338
1339 p = kmemdup(ref, sizeof(*p), GFP_KERNEL);
1340 if (p) {
1341 p->tbl = tbl;
1342 atomic_set(&p->refcnt, 1);
1343 p->reachable_time =
1344 neigh_rand_reach_time(p->base_reachable_time);
1345
1346 if (dev->neigh_setup && dev->neigh_setup(dev, p)) {
1347 kfree(p);
1348 return NULL;
1349 }
1350
1351 dev_hold(dev);
1352 p->dev = dev;
1353 #ifdef CONFIG_NET_NS
1354 p->net = hold_net(net);
1355 #endif
1356 p->sysctl_table = NULL;
1357 write_lock_bh(&tbl->lock);
1358 p->next = tbl->parms.next;
1359 tbl->parms.next = p;
1360 write_unlock_bh(&tbl->lock);
1361 }
1362 return p;
1363 }
1364 EXPORT_SYMBOL(neigh_parms_alloc);
1365
1366 static void neigh_rcu_free_parms(struct rcu_head *head)
1367 {
1368 struct neigh_parms *parms =
1369 container_of(head, struct neigh_parms, rcu_head);
1370
1371 neigh_parms_put(parms);
1372 }
1373
1374 void neigh_parms_release(struct neigh_table *tbl, struct neigh_parms *parms)
1375 {
1376 struct neigh_parms **p;
1377
1378 if (!parms || parms == &tbl->parms)
1379 return;
1380 write_lock_bh(&tbl->lock);
1381 for (p = &tbl->parms.next; *p; p = &(*p)->next) {
1382 if (*p == parms) {
1383 *p = parms->next;
1384 parms->dead = 1;
1385 write_unlock_bh(&tbl->lock);
1386 if (parms->dev)
1387 dev_put(parms->dev);
1388 call_rcu(&parms->rcu_head, neigh_rcu_free_parms);
1389 return;
1390 }
1391 }
1392 write_unlock_bh(&tbl->lock);
1393 NEIGH_PRINTK1("neigh_parms_release: not found\n");
1394 }
1395 EXPORT_SYMBOL(neigh_parms_release);
1396
1397 static void neigh_parms_destroy(struct neigh_parms *parms)
1398 {
1399 release_net(neigh_parms_net(parms));
1400 kfree(parms);
1401 }
1402
1403 static struct lock_class_key neigh_table_proxy_queue_class;
1404
1405 void neigh_table_init_no_netlink(struct neigh_table *tbl)
1406 {
1407 unsigned long now = jiffies;
1408 unsigned long phsize;
1409
1410 #ifdef CONFIG_NET_NS
1411 tbl->parms.net = &init_net;
1412 #endif
1413 atomic_set(&tbl->parms.refcnt, 1);
1414 tbl->parms.reachable_time =
1415 neigh_rand_reach_time(tbl->parms.base_reachable_time);
1416
1417 if (!tbl->kmem_cachep)
1418 tbl->kmem_cachep =
1419 kmem_cache_create(tbl->id, tbl->entry_size, 0,
1420 SLAB_HWCACHE_ALIGN|SLAB_PANIC,
1421 NULL);
1422 tbl->stats = alloc_percpu(struct neigh_statistics);
1423 if (!tbl->stats)
1424 panic("cannot create neighbour cache statistics");
1425
1426 #ifdef CONFIG_PROC_FS
1427 tbl->pde = proc_create_data(tbl->id, 0, init_net.proc_net_stat,
1428 &neigh_stat_seq_fops, tbl);
1429 if (!tbl->pde)
1430 panic("cannot create neighbour proc dir entry");
1431 #endif
1432
1433 tbl->hash_mask = 1;
1434 tbl->hash_buckets = neigh_hash_alloc(tbl->hash_mask + 1);
1435
1436 phsize = (PNEIGH_HASHMASK + 1) * sizeof(struct pneigh_entry *);
1437 tbl->phash_buckets = kzalloc(phsize, GFP_KERNEL);
1438
1439 if (!tbl->hash_buckets || !tbl->phash_buckets)
1440 panic("cannot allocate neighbour cache hashes");
1441
1442 get_random_bytes(&tbl->hash_rnd, sizeof(tbl->hash_rnd));
1443
1444 rwlock_init(&tbl->lock);
1445 setup_timer(&tbl->gc_timer, neigh_periodic_timer, (unsigned long)tbl);
1446 tbl->gc_timer.expires = now + 1;
1447 add_timer(&tbl->gc_timer);
1448
1449 setup_timer(&tbl->proxy_timer, neigh_proxy_process, (unsigned long)tbl);
1450 skb_queue_head_init_class(&tbl->proxy_queue,
1451 &neigh_table_proxy_queue_class);
1452
1453 tbl->last_flush = now;
1454 tbl->last_rand = now + tbl->parms.reachable_time * 20;
1455 }
1456 EXPORT_SYMBOL(neigh_table_init_no_netlink);
1457
1458 void neigh_table_init(struct neigh_table *tbl)
1459 {
1460 struct neigh_table *tmp;
1461
1462 neigh_table_init_no_netlink(tbl);
1463 write_lock(&neigh_tbl_lock);
1464 for (tmp = neigh_tables; tmp; tmp = tmp->next) {
1465 if (tmp->family == tbl->family)
1466 break;
1467 }
1468 tbl->next = neigh_tables;
1469 neigh_tables = tbl;
1470 write_unlock(&neigh_tbl_lock);
1471
1472 if (unlikely(tmp)) {
1473 printk(KERN_ERR "NEIGH: Registering multiple tables for "
1474 "family %d\n", tbl->family);
1475 dump_stack();
1476 }
1477 }
1478 EXPORT_SYMBOL(neigh_table_init);
1479
1480 int neigh_table_clear(struct neigh_table *tbl)
1481 {
1482 struct neigh_table **tp;
1483
1484 /* It is not clean... Fix it to unload IPv6 module safely */
1485 del_timer_sync(&tbl->gc_timer);
1486 del_timer_sync(&tbl->proxy_timer);
1487 pneigh_queue_purge(&tbl->proxy_queue);
1488 neigh_ifdown(tbl, NULL);
1489 if (atomic_read(&tbl->entries))
1490 printk(KERN_CRIT "neighbour leakage\n");
1491 write_lock(&neigh_tbl_lock);
1492 for (tp = &neigh_tables; *tp; tp = &(*tp)->next) {
1493 if (*tp == tbl) {
1494 *tp = tbl->next;
1495 break;
1496 }
1497 }
1498 write_unlock(&neigh_tbl_lock);
1499
1500 neigh_hash_free(tbl->hash_buckets, tbl->hash_mask + 1);
1501 tbl->hash_buckets = NULL;
1502
1503 kfree(tbl->phash_buckets);
1504 tbl->phash_buckets = NULL;
1505
1506 remove_proc_entry(tbl->id, init_net.proc_net_stat);
1507
1508 free_percpu(tbl->stats);
1509 tbl->stats = NULL;
1510
1511 kmem_cache_destroy(tbl->kmem_cachep);
1512 tbl->kmem_cachep = NULL;
1513
1514 return 0;
1515 }
1516 EXPORT_SYMBOL(neigh_table_clear);
1517
1518 static int neigh_delete(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1519 {
1520 struct net *net = sock_net(skb->sk);
1521 struct ndmsg *ndm;
1522 struct nlattr *dst_attr;
1523 struct neigh_table *tbl;
1524 struct net_device *dev = NULL;
1525 int err = -EINVAL;
1526
1527 if (nlmsg_len(nlh) < sizeof(*ndm))
1528 goto out;
1529
1530 dst_attr = nlmsg_find_attr(nlh, sizeof(*ndm), NDA_DST);
1531 if (dst_attr == NULL)
1532 goto out;
1533
1534 ndm = nlmsg_data(nlh);
1535 if (ndm->ndm_ifindex) {
1536 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1537 if (dev == NULL) {
1538 err = -ENODEV;
1539 goto out;
1540 }
1541 }
1542
1543 read_lock(&neigh_tbl_lock);
1544 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1545 struct neighbour *neigh;
1546
1547 if (tbl->family != ndm->ndm_family)
1548 continue;
1549 read_unlock(&neigh_tbl_lock);
1550
1551 if (nla_len(dst_attr) < tbl->key_len)
1552 goto out_dev_put;
1553
1554 if (ndm->ndm_flags & NTF_PROXY) {
1555 err = pneigh_delete(tbl, net, nla_data(dst_attr), dev);
1556 goto out_dev_put;
1557 }
1558
1559 if (dev == NULL)
1560 goto out_dev_put;
1561
1562 neigh = neigh_lookup(tbl, nla_data(dst_attr), dev);
1563 if (neigh == NULL) {
1564 err = -ENOENT;
1565 goto out_dev_put;
1566 }
1567
1568 err = neigh_update(neigh, NULL, NUD_FAILED,
1569 NEIGH_UPDATE_F_OVERRIDE |
1570 NEIGH_UPDATE_F_ADMIN);
1571 neigh_release(neigh);
1572 goto out_dev_put;
1573 }
1574 read_unlock(&neigh_tbl_lock);
1575 err = -EAFNOSUPPORT;
1576
1577 out_dev_put:
1578 if (dev)
1579 dev_put(dev);
1580 out:
1581 return err;
1582 }
1583
1584 static int neigh_add(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1585 {
1586 struct net *net = sock_net(skb->sk);
1587 struct ndmsg *ndm;
1588 struct nlattr *tb[NDA_MAX+1];
1589 struct neigh_table *tbl;
1590 struct net_device *dev = NULL;
1591 int err;
1592
1593 err = nlmsg_parse(nlh, sizeof(*ndm), tb, NDA_MAX, NULL);
1594 if (err < 0)
1595 goto out;
1596
1597 err = -EINVAL;
1598 if (tb[NDA_DST] == NULL)
1599 goto out;
1600
1601 ndm = nlmsg_data(nlh);
1602 if (ndm->ndm_ifindex) {
1603 dev = dev_get_by_index(net, ndm->ndm_ifindex);
1604 if (dev == NULL) {
1605 err = -ENODEV;
1606 goto out;
1607 }
1608
1609 if (tb[NDA_LLADDR] && nla_len(tb[NDA_LLADDR]) < dev->addr_len)
1610 goto out_dev_put;
1611 }
1612
1613 read_lock(&neigh_tbl_lock);
1614 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1615 int flags = NEIGH_UPDATE_F_ADMIN | NEIGH_UPDATE_F_OVERRIDE;
1616 struct neighbour *neigh;
1617 void *dst, *lladdr;
1618
1619 if (tbl->family != ndm->ndm_family)
1620 continue;
1621 read_unlock(&neigh_tbl_lock);
1622
1623 if (nla_len(tb[NDA_DST]) < tbl->key_len)
1624 goto out_dev_put;
1625 dst = nla_data(tb[NDA_DST]);
1626 lladdr = tb[NDA_LLADDR] ? nla_data(tb[NDA_LLADDR]) : NULL;
1627
1628 if (ndm->ndm_flags & NTF_PROXY) {
1629 struct pneigh_entry *pn;
1630
1631 err = -ENOBUFS;
1632 pn = pneigh_lookup(tbl, net, dst, dev, 1);
1633 if (pn) {
1634 pn->flags = ndm->ndm_flags;
1635 err = 0;
1636 }
1637 goto out_dev_put;
1638 }
1639
1640 if (dev == NULL)
1641 goto out_dev_put;
1642
1643 neigh = neigh_lookup(tbl, dst, dev);
1644 if (neigh == NULL) {
1645 if (!(nlh->nlmsg_flags & NLM_F_CREATE)) {
1646 err = -ENOENT;
1647 goto out_dev_put;
1648 }
1649
1650 neigh = __neigh_lookup_errno(tbl, dst, dev);
1651 if (IS_ERR(neigh)) {
1652 err = PTR_ERR(neigh);
1653 goto out_dev_put;
1654 }
1655 } else {
1656 if (nlh->nlmsg_flags & NLM_F_EXCL) {
1657 err = -EEXIST;
1658 neigh_release(neigh);
1659 goto out_dev_put;
1660 }
1661
1662 if (!(nlh->nlmsg_flags & NLM_F_REPLACE))
1663 flags &= ~NEIGH_UPDATE_F_OVERRIDE;
1664 }
1665
1666 err = neigh_update(neigh, lladdr, ndm->ndm_state, flags);
1667 neigh_release(neigh);
1668 goto out_dev_put;
1669 }
1670
1671 read_unlock(&neigh_tbl_lock);
1672 err = -EAFNOSUPPORT;
1673
1674 out_dev_put:
1675 if (dev)
1676 dev_put(dev);
1677 out:
1678 return err;
1679 }
1680
1681 static int neightbl_fill_parms(struct sk_buff *skb, struct neigh_parms *parms)
1682 {
1683 struct nlattr *nest;
1684
1685 nest = nla_nest_start(skb, NDTA_PARMS);
1686 if (nest == NULL)
1687 return -ENOBUFS;
1688
1689 if (parms->dev)
1690 NLA_PUT_U32(skb, NDTPA_IFINDEX, parms->dev->ifindex);
1691
1692 NLA_PUT_U32(skb, NDTPA_REFCNT, atomic_read(&parms->refcnt));
1693 NLA_PUT_U32(skb, NDTPA_QUEUE_LEN, parms->queue_len);
1694 NLA_PUT_U32(skb, NDTPA_PROXY_QLEN, parms->proxy_qlen);
1695 NLA_PUT_U32(skb, NDTPA_APP_PROBES, parms->app_probes);
1696 NLA_PUT_U32(skb, NDTPA_UCAST_PROBES, parms->ucast_probes);
1697 NLA_PUT_U32(skb, NDTPA_MCAST_PROBES, parms->mcast_probes);
1698 NLA_PUT_MSECS(skb, NDTPA_REACHABLE_TIME, parms->reachable_time);
1699 NLA_PUT_MSECS(skb, NDTPA_BASE_REACHABLE_TIME,
1700 parms->base_reachable_time);
1701 NLA_PUT_MSECS(skb, NDTPA_GC_STALETIME, parms->gc_staletime);
1702 NLA_PUT_MSECS(skb, NDTPA_DELAY_PROBE_TIME, parms->delay_probe_time);
1703 NLA_PUT_MSECS(skb, NDTPA_RETRANS_TIME, parms->retrans_time);
1704 NLA_PUT_MSECS(skb, NDTPA_ANYCAST_DELAY, parms->anycast_delay);
1705 NLA_PUT_MSECS(skb, NDTPA_PROXY_DELAY, parms->proxy_delay);
1706 NLA_PUT_MSECS(skb, NDTPA_LOCKTIME, parms->locktime);
1707
1708 return nla_nest_end(skb, nest);
1709
1710 nla_put_failure:
1711 nla_nest_cancel(skb, nest);
1712 return -EMSGSIZE;
1713 }
1714
1715 static int neightbl_fill_info(struct sk_buff *skb, struct neigh_table *tbl,
1716 u32 pid, u32 seq, int type, int flags)
1717 {
1718 struct nlmsghdr *nlh;
1719 struct ndtmsg *ndtmsg;
1720
1721 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1722 if (nlh == NULL)
1723 return -EMSGSIZE;
1724
1725 ndtmsg = nlmsg_data(nlh);
1726
1727 read_lock_bh(&tbl->lock);
1728 ndtmsg->ndtm_family = tbl->family;
1729 ndtmsg->ndtm_pad1 = 0;
1730 ndtmsg->ndtm_pad2 = 0;
1731
1732 NLA_PUT_STRING(skb, NDTA_NAME, tbl->id);
1733 NLA_PUT_MSECS(skb, NDTA_GC_INTERVAL, tbl->gc_interval);
1734 NLA_PUT_U32(skb, NDTA_THRESH1, tbl->gc_thresh1);
1735 NLA_PUT_U32(skb, NDTA_THRESH2, tbl->gc_thresh2);
1736 NLA_PUT_U32(skb, NDTA_THRESH3, tbl->gc_thresh3);
1737
1738 {
1739 unsigned long now = jiffies;
1740 unsigned int flush_delta = now - tbl->last_flush;
1741 unsigned int rand_delta = now - tbl->last_rand;
1742
1743 struct ndt_config ndc = {
1744 .ndtc_key_len = tbl->key_len,
1745 .ndtc_entry_size = tbl->entry_size,
1746 .ndtc_entries = atomic_read(&tbl->entries),
1747 .ndtc_last_flush = jiffies_to_msecs(flush_delta),
1748 .ndtc_last_rand = jiffies_to_msecs(rand_delta),
1749 .ndtc_hash_rnd = tbl->hash_rnd,
1750 .ndtc_hash_mask = tbl->hash_mask,
1751 .ndtc_hash_chain_gc = tbl->hash_chain_gc,
1752 .ndtc_proxy_qlen = tbl->proxy_queue.qlen,
1753 };
1754
1755 NLA_PUT(skb, NDTA_CONFIG, sizeof(ndc), &ndc);
1756 }
1757
1758 {
1759 int cpu;
1760 struct ndt_stats ndst;
1761
1762 memset(&ndst, 0, sizeof(ndst));
1763
1764 for_each_possible_cpu(cpu) {
1765 struct neigh_statistics *st;
1766
1767 st = per_cpu_ptr(tbl->stats, cpu);
1768 ndst.ndts_allocs += st->allocs;
1769 ndst.ndts_destroys += st->destroys;
1770 ndst.ndts_hash_grows += st->hash_grows;
1771 ndst.ndts_res_failed += st->res_failed;
1772 ndst.ndts_lookups += st->lookups;
1773 ndst.ndts_hits += st->hits;
1774 ndst.ndts_rcv_probes_mcast += st->rcv_probes_mcast;
1775 ndst.ndts_rcv_probes_ucast += st->rcv_probes_ucast;
1776 ndst.ndts_periodic_gc_runs += st->periodic_gc_runs;
1777 ndst.ndts_forced_gc_runs += st->forced_gc_runs;
1778 }
1779
1780 NLA_PUT(skb, NDTA_STATS, sizeof(ndst), &ndst);
1781 }
1782
1783 BUG_ON(tbl->parms.dev);
1784 if (neightbl_fill_parms(skb, &tbl->parms) < 0)
1785 goto nla_put_failure;
1786
1787 read_unlock_bh(&tbl->lock);
1788 return nlmsg_end(skb, nlh);
1789
1790 nla_put_failure:
1791 read_unlock_bh(&tbl->lock);
1792 nlmsg_cancel(skb, nlh);
1793 return -EMSGSIZE;
1794 }
1795
1796 static int neightbl_fill_param_info(struct sk_buff *skb,
1797 struct neigh_table *tbl,
1798 struct neigh_parms *parms,
1799 u32 pid, u32 seq, int type,
1800 unsigned int flags)
1801 {
1802 struct ndtmsg *ndtmsg;
1803 struct nlmsghdr *nlh;
1804
1805 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndtmsg), flags);
1806 if (nlh == NULL)
1807 return -EMSGSIZE;
1808
1809 ndtmsg = nlmsg_data(nlh);
1810
1811 read_lock_bh(&tbl->lock);
1812 ndtmsg->ndtm_family = tbl->family;
1813 ndtmsg->ndtm_pad1 = 0;
1814 ndtmsg->ndtm_pad2 = 0;
1815
1816 if (nla_put_string(skb, NDTA_NAME, tbl->id) < 0 ||
1817 neightbl_fill_parms(skb, parms) < 0)
1818 goto errout;
1819
1820 read_unlock_bh(&tbl->lock);
1821 return nlmsg_end(skb, nlh);
1822 errout:
1823 read_unlock_bh(&tbl->lock);
1824 nlmsg_cancel(skb, nlh);
1825 return -EMSGSIZE;
1826 }
1827
1828 static const struct nla_policy nl_neightbl_policy[NDTA_MAX+1] = {
1829 [NDTA_NAME] = { .type = NLA_STRING },
1830 [NDTA_THRESH1] = { .type = NLA_U32 },
1831 [NDTA_THRESH2] = { .type = NLA_U32 },
1832 [NDTA_THRESH3] = { .type = NLA_U32 },
1833 [NDTA_GC_INTERVAL] = { .type = NLA_U64 },
1834 [NDTA_PARMS] = { .type = NLA_NESTED },
1835 };
1836
1837 static const struct nla_policy nl_ntbl_parm_policy[NDTPA_MAX+1] = {
1838 [NDTPA_IFINDEX] = { .type = NLA_U32 },
1839 [NDTPA_QUEUE_LEN] = { .type = NLA_U32 },
1840 [NDTPA_PROXY_QLEN] = { .type = NLA_U32 },
1841 [NDTPA_APP_PROBES] = { .type = NLA_U32 },
1842 [NDTPA_UCAST_PROBES] = { .type = NLA_U32 },
1843 [NDTPA_MCAST_PROBES] = { .type = NLA_U32 },
1844 [NDTPA_BASE_REACHABLE_TIME] = { .type = NLA_U64 },
1845 [NDTPA_GC_STALETIME] = { .type = NLA_U64 },
1846 [NDTPA_DELAY_PROBE_TIME] = { .type = NLA_U64 },
1847 [NDTPA_RETRANS_TIME] = { .type = NLA_U64 },
1848 [NDTPA_ANYCAST_DELAY] = { .type = NLA_U64 },
1849 [NDTPA_PROXY_DELAY] = { .type = NLA_U64 },
1850 [NDTPA_LOCKTIME] = { .type = NLA_U64 },
1851 };
1852
1853 static int neightbl_set(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
1854 {
1855 struct net *net = sock_net(skb->sk);
1856 struct neigh_table *tbl;
1857 struct ndtmsg *ndtmsg;
1858 struct nlattr *tb[NDTA_MAX+1];
1859 int err;
1860
1861 err = nlmsg_parse(nlh, sizeof(*ndtmsg), tb, NDTA_MAX,
1862 nl_neightbl_policy);
1863 if (err < 0)
1864 goto errout;
1865
1866 if (tb[NDTA_NAME] == NULL) {
1867 err = -EINVAL;
1868 goto errout;
1869 }
1870
1871 ndtmsg = nlmsg_data(nlh);
1872 read_lock(&neigh_tbl_lock);
1873 for (tbl = neigh_tables; tbl; tbl = tbl->next) {
1874 if (ndtmsg->ndtm_family && tbl->family != ndtmsg->ndtm_family)
1875 continue;
1876
1877 if (nla_strcmp(tb[NDTA_NAME], tbl->id) == 0)
1878 break;
1879 }
1880
1881 if (tbl == NULL) {
1882 err = -ENOENT;
1883 goto errout_locked;
1884 }
1885
1886 /*
1887 * We acquire tbl->lock to be nice to the periodic timers and
1888 * make sure they always see a consistent set of values.
1889 */
1890 write_lock_bh(&tbl->lock);
1891
1892 if (tb[NDTA_PARMS]) {
1893 struct nlattr *tbp[NDTPA_MAX+1];
1894 struct neigh_parms *p;
1895 int i, ifindex = 0;
1896
1897 err = nla_parse_nested(tbp, NDTPA_MAX, tb[NDTA_PARMS],
1898 nl_ntbl_parm_policy);
1899 if (err < 0)
1900 goto errout_tbl_lock;
1901
1902 if (tbp[NDTPA_IFINDEX])
1903 ifindex = nla_get_u32(tbp[NDTPA_IFINDEX]);
1904
1905 p = lookup_neigh_params(tbl, net, ifindex);
1906 if (p == NULL) {
1907 err = -ENOENT;
1908 goto errout_tbl_lock;
1909 }
1910
1911 for (i = 1; i <= NDTPA_MAX; i++) {
1912 if (tbp[i] == NULL)
1913 continue;
1914
1915 switch (i) {
1916 case NDTPA_QUEUE_LEN:
1917 p->queue_len = nla_get_u32(tbp[i]);
1918 break;
1919 case NDTPA_PROXY_QLEN:
1920 p->proxy_qlen = nla_get_u32(tbp[i]);
1921 break;
1922 case NDTPA_APP_PROBES:
1923 p->app_probes = nla_get_u32(tbp[i]);
1924 break;
1925 case NDTPA_UCAST_PROBES:
1926 p->ucast_probes = nla_get_u32(tbp[i]);
1927 break;
1928 case NDTPA_MCAST_PROBES:
1929 p->mcast_probes = nla_get_u32(tbp[i]);
1930 break;
1931 case NDTPA_BASE_REACHABLE_TIME:
1932 p->base_reachable_time = nla_get_msecs(tbp[i]);
1933 break;
1934 case NDTPA_GC_STALETIME:
1935 p->gc_staletime = nla_get_msecs(tbp[i]);
1936 break;
1937 case NDTPA_DELAY_PROBE_TIME:
1938 p->delay_probe_time = nla_get_msecs(tbp[i]);
1939 break;
1940 case NDTPA_RETRANS_TIME:
1941 p->retrans_time = nla_get_msecs(tbp[i]);
1942 break;
1943 case NDTPA_ANYCAST_DELAY:
1944 p->anycast_delay = nla_get_msecs(tbp[i]);
1945 break;
1946 case NDTPA_PROXY_DELAY:
1947 p->proxy_delay = nla_get_msecs(tbp[i]);
1948 break;
1949 case NDTPA_LOCKTIME:
1950 p->locktime = nla_get_msecs(tbp[i]);
1951 break;
1952 }
1953 }
1954 }
1955
1956 if (tb[NDTA_THRESH1])
1957 tbl->gc_thresh1 = nla_get_u32(tb[NDTA_THRESH1]);
1958
1959 if (tb[NDTA_THRESH2])
1960 tbl->gc_thresh2 = nla_get_u32(tb[NDTA_THRESH2]);
1961
1962 if (tb[NDTA_THRESH3])
1963 tbl->gc_thresh3 = nla_get_u32(tb[NDTA_THRESH3]);
1964
1965 if (tb[NDTA_GC_INTERVAL])
1966 tbl->gc_interval = nla_get_msecs(tb[NDTA_GC_INTERVAL]);
1967
1968 err = 0;
1969
1970 errout_tbl_lock:
1971 write_unlock_bh(&tbl->lock);
1972 errout_locked:
1973 read_unlock(&neigh_tbl_lock);
1974 errout:
1975 return err;
1976 }
1977
1978 static int neightbl_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
1979 {
1980 struct net *net = sock_net(skb->sk);
1981 int family, tidx, nidx = 0;
1982 int tbl_skip = cb->args[0];
1983 int neigh_skip = cb->args[1];
1984 struct neigh_table *tbl;
1985
1986 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
1987
1988 read_lock(&neigh_tbl_lock);
1989 for (tbl = neigh_tables, tidx = 0; tbl; tbl = tbl->next, tidx++) {
1990 struct neigh_parms *p;
1991
1992 if (tidx < tbl_skip || (family && tbl->family != family))
1993 continue;
1994
1995 if (neightbl_fill_info(skb, tbl, NETLINK_CB(cb->skb).pid,
1996 cb->nlh->nlmsg_seq, RTM_NEWNEIGHTBL,
1997 NLM_F_MULTI) <= 0)
1998 break;
1999
2000 for (nidx = 0, p = tbl->parms.next; p; p = p->next) {
2001 if (!net_eq(neigh_parms_net(p), net))
2002 continue;
2003
2004 if (nidx++ < neigh_skip)
2005 continue;
2006
2007 if (neightbl_fill_param_info(skb, tbl, p,
2008 NETLINK_CB(cb->skb).pid,
2009 cb->nlh->nlmsg_seq,
2010 RTM_NEWNEIGHTBL,
2011 NLM_F_MULTI) <= 0)
2012 goto out;
2013 }
2014
2015 neigh_skip = 0;
2016 }
2017 out:
2018 read_unlock(&neigh_tbl_lock);
2019 cb->args[0] = tidx;
2020 cb->args[1] = nidx;
2021
2022 return skb->len;
2023 }
2024
2025 static int neigh_fill_info(struct sk_buff *skb, struct neighbour *neigh,
2026 u32 pid, u32 seq, int type, unsigned int flags)
2027 {
2028 unsigned long now = jiffies;
2029 struct nda_cacheinfo ci;
2030 struct nlmsghdr *nlh;
2031 struct ndmsg *ndm;
2032
2033 nlh = nlmsg_put(skb, pid, seq, type, sizeof(*ndm), flags);
2034 if (nlh == NULL)
2035 return -EMSGSIZE;
2036
2037 ndm = nlmsg_data(nlh);
2038 ndm->ndm_family = neigh->ops->family;
2039 ndm->ndm_pad1 = 0;
2040 ndm->ndm_pad2 = 0;
2041 ndm->ndm_flags = neigh->flags;
2042 ndm->ndm_type = neigh->type;
2043 ndm->ndm_ifindex = neigh->dev->ifindex;
2044
2045 NLA_PUT(skb, NDA_DST, neigh->tbl->key_len, neigh->primary_key);
2046
2047 read_lock_bh(&neigh->lock);
2048 ndm->ndm_state = neigh->nud_state;
2049 if ((neigh->nud_state & NUD_VALID) &&
2050 nla_put(skb, NDA_LLADDR, neigh->dev->addr_len, neigh->ha) < 0) {
2051 read_unlock_bh(&neigh->lock);
2052 goto nla_put_failure;
2053 }
2054
2055 ci.ndm_used = jiffies_to_clock_t(now - neigh->used);
2056 ci.ndm_confirmed = jiffies_to_clock_t(now - neigh->confirmed);
2057 ci.ndm_updated = jiffies_to_clock_t(now - neigh->updated);
2058 ci.ndm_refcnt = atomic_read(&neigh->refcnt) - 1;
2059 read_unlock_bh(&neigh->lock);
2060
2061 NLA_PUT_U32(skb, NDA_PROBES, atomic_read(&neigh->probes));
2062 NLA_PUT(skb, NDA_CACHEINFO, sizeof(ci), &ci);
2063
2064 return nlmsg_end(skb, nlh);
2065
2066 nla_put_failure:
2067 nlmsg_cancel(skb, nlh);
2068 return -EMSGSIZE;
2069 }
2070
2071 static void neigh_update_notify(struct neighbour *neigh)
2072 {
2073 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, neigh);
2074 __neigh_notify(neigh, RTM_NEWNEIGH, 0);
2075 }
2076
2077 static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2078 struct netlink_callback *cb)
2079 {
2080 struct net * net = sock_net(skb->sk);
2081 struct neighbour *n;
2082 int rc, h, s_h = cb->args[1];
2083 int idx, s_idx = idx = cb->args[2];
2084
2085 read_lock_bh(&tbl->lock);
2086 for (h = 0; h <= tbl->hash_mask; h++) {
2087 if (h < s_h)
2088 continue;
2089 if (h > s_h)
2090 s_idx = 0;
2091 for (n = tbl->hash_buckets[h], idx = 0; n; n = n->next) {
2092 int lidx;
2093 if (dev_net(n->dev) != net)
2094 continue;
2095 lidx = idx++;
2096 if (lidx < s_idx)
2097 continue;
2098 if (neigh_fill_info(skb, n, NETLINK_CB(cb->skb).pid,
2099 cb->nlh->nlmsg_seq,
2100 RTM_NEWNEIGH,
2101 NLM_F_MULTI) <= 0) {
2102 read_unlock_bh(&tbl->lock);
2103 rc = -1;
2104 goto out;
2105 }
2106 }
2107 }
2108 read_unlock_bh(&tbl->lock);
2109 rc = skb->len;
2110 out:
2111 cb->args[1] = h;
2112 cb->args[2] = idx;
2113 return rc;
2114 }
2115
2116 static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2117 {
2118 struct neigh_table *tbl;
2119 int t, family, s_t;
2120
2121 read_lock(&neigh_tbl_lock);
2122 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
2123 s_t = cb->args[0];
2124
2125 for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) {
2126 if (t < s_t || (family && tbl->family != family))
2127 continue;
2128 if (t > s_t)
2129 memset(&cb->args[1], 0, sizeof(cb->args) -
2130 sizeof(cb->args[0]));
2131 if (neigh_dump_table(tbl, skb, cb) < 0)
2132 break;
2133 }
2134 read_unlock(&neigh_tbl_lock);
2135
2136 cb->args[0] = t;
2137 return skb->len;
2138 }
2139
2140 void neigh_for_each(struct neigh_table *tbl, void (*cb)(struct neighbour *, void *), void *cookie)
2141 {
2142 int chain;
2143
2144 read_lock_bh(&tbl->lock);
2145 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2146 struct neighbour *n;
2147
2148 for (n = tbl->hash_buckets[chain]; n; n = n->next)
2149 cb(n, cookie);
2150 }
2151 read_unlock_bh(&tbl->lock);
2152 }
2153 EXPORT_SYMBOL(neigh_for_each);
2154
2155 /* The tbl->lock must be held as a writer and BH disabled. */
2156 void __neigh_for_each_release(struct neigh_table *tbl,
2157 int (*cb)(struct neighbour *))
2158 {
2159 int chain;
2160
2161 for (chain = 0; chain <= tbl->hash_mask; chain++) {
2162 struct neighbour *n, **np;
2163
2164 np = &tbl->hash_buckets[chain];
2165 while ((n = *np) != NULL) {
2166 int release;
2167
2168 write_lock(&n->lock);
2169 release = cb(n);
2170 if (release) {
2171 *np = n->next;
2172 n->dead = 1;
2173 } else
2174 np = &n->next;
2175 write_unlock(&n->lock);
2176 if (release)
2177 neigh_cleanup_and_release(n);
2178 }
2179 }
2180 }
2181 EXPORT_SYMBOL(__neigh_for_each_release);
2182
2183 #ifdef CONFIG_PROC_FS
2184
2185 static struct neighbour *neigh_get_first(struct seq_file *seq)
2186 {
2187 struct neigh_seq_state *state = seq->private;
2188 struct net *net = seq_file_net(seq);
2189 struct neigh_table *tbl = state->tbl;
2190 struct neighbour *n = NULL;
2191 int bucket = state->bucket;
2192
2193 state->flags &= ~NEIGH_SEQ_IS_PNEIGH;
2194 for (bucket = 0; bucket <= tbl->hash_mask; bucket++) {
2195 n = tbl->hash_buckets[bucket];
2196
2197 while (n) {
2198 if (!net_eq(dev_net(n->dev), net))
2199 goto next;
2200 if (state->neigh_sub_iter) {
2201 loff_t fakep = 0;
2202 void *v;
2203
2204 v = state->neigh_sub_iter(state, n, &fakep);
2205 if (!v)
2206 goto next;
2207 }
2208 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2209 break;
2210 if (n->nud_state & ~NUD_NOARP)
2211 break;
2212 next:
2213 n = n->next;
2214 }
2215
2216 if (n)
2217 break;
2218 }
2219 state->bucket = bucket;
2220
2221 return n;
2222 }
2223
2224 static struct neighbour *neigh_get_next(struct seq_file *seq,
2225 struct neighbour *n,
2226 loff_t *pos)
2227 {
2228 struct neigh_seq_state *state = seq->private;
2229 struct net *net = seq_file_net(seq);
2230 struct neigh_table *tbl = state->tbl;
2231
2232 if (state->neigh_sub_iter) {
2233 void *v = state->neigh_sub_iter(state, n, pos);
2234 if (v)
2235 return n;
2236 }
2237 n = n->next;
2238
2239 while (1) {
2240 while (n) {
2241 if (!net_eq(dev_net(n->dev), net))
2242 goto next;
2243 if (state->neigh_sub_iter) {
2244 void *v = state->neigh_sub_iter(state, n, pos);
2245 if (v)
2246 return n;
2247 goto next;
2248 }
2249 if (!(state->flags & NEIGH_SEQ_SKIP_NOARP))
2250 break;
2251
2252 if (n->nud_state & ~NUD_NOARP)
2253 break;
2254 next:
2255 n = n->next;
2256 }
2257
2258 if (n)
2259 break;
2260
2261 if (++state->bucket > tbl->hash_mask)
2262 break;
2263
2264 n = tbl->hash_buckets[state->bucket];
2265 }
2266
2267 if (n && pos)
2268 --(*pos);
2269 return n;
2270 }
2271
2272 static struct neighbour *neigh_get_idx(struct seq_file *seq, loff_t *pos)
2273 {
2274 struct neighbour *n = neigh_get_first(seq);
2275
2276 if (n) {
2277 --(*pos);
2278 while (*pos) {
2279 n = neigh_get_next(seq, n, pos);
2280 if (!n)
2281 break;
2282 }
2283 }
2284 return *pos ? NULL : n;
2285 }
2286
2287 static struct pneigh_entry *pneigh_get_first(struct seq_file *seq)
2288 {
2289 struct neigh_seq_state *state = seq->private;
2290 struct net *net = seq_file_net(seq);
2291 struct neigh_table *tbl = state->tbl;
2292 struct pneigh_entry *pn = NULL;
2293 int bucket = state->bucket;
2294
2295 state->flags |= NEIGH_SEQ_IS_PNEIGH;
2296 for (bucket = 0; bucket <= PNEIGH_HASHMASK; bucket++) {
2297 pn = tbl->phash_buckets[bucket];
2298 while (pn && !net_eq(pneigh_net(pn), net))
2299 pn = pn->next;
2300 if (pn)
2301 break;
2302 }
2303 state->bucket = bucket;
2304
2305 return pn;
2306 }
2307
2308 static struct pneigh_entry *pneigh_get_next(struct seq_file *seq,
2309 struct pneigh_entry *pn,
2310 loff_t *pos)
2311 {
2312 struct neigh_seq_state *state = seq->private;
2313 struct net *net = seq_file_net(seq);
2314 struct neigh_table *tbl = state->tbl;
2315
2316 pn = pn->next;
2317 while (!pn) {
2318 if (++state->bucket > PNEIGH_HASHMASK)
2319 break;
2320 pn = tbl->phash_buckets[state->bucket];
2321 while (pn && !net_eq(pneigh_net(pn), net))
2322 pn = pn->next;
2323 if (pn)
2324 break;
2325 }
2326
2327 if (pn && pos)
2328 --(*pos);
2329
2330 return pn;
2331 }
2332
2333 static struct pneigh_entry *pneigh_get_idx(struct seq_file *seq, loff_t *pos)
2334 {
2335 struct pneigh_entry *pn = pneigh_get_first(seq);
2336
2337 if (pn) {
2338 --(*pos);
2339 while (*pos) {
2340 pn = pneigh_get_next(seq, pn, pos);
2341 if (!pn)
2342 break;
2343 }
2344 }
2345 return *pos ? NULL : pn;
2346 }
2347
2348 static void *neigh_get_idx_any(struct seq_file *seq, loff_t *pos)
2349 {
2350 struct neigh_seq_state *state = seq->private;
2351 void *rc;
2352 loff_t idxpos = *pos;
2353
2354 rc = neigh_get_idx(seq, &idxpos);
2355 if (!rc && !(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2356 rc = pneigh_get_idx(seq, &idxpos);
2357
2358 return rc;
2359 }
2360
2361 void *neigh_seq_start(struct seq_file *seq, loff_t *pos, struct neigh_table *tbl, unsigned int neigh_seq_flags)
2362 __acquires(tbl->lock)
2363 {
2364 struct neigh_seq_state *state = seq->private;
2365
2366 state->tbl = tbl;
2367 state->bucket = 0;
2368 state->flags = (neigh_seq_flags & ~NEIGH_SEQ_IS_PNEIGH);
2369
2370 read_lock_bh(&tbl->lock);
2371
2372 return *pos ? neigh_get_idx_any(seq, pos) : SEQ_START_TOKEN;
2373 }
2374 EXPORT_SYMBOL(neigh_seq_start);
2375
2376 void *neigh_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2377 {
2378 struct neigh_seq_state *state;
2379 void *rc;
2380
2381 if (v == SEQ_START_TOKEN) {
2382 rc = neigh_get_first(seq);
2383 goto out;
2384 }
2385
2386 state = seq->private;
2387 if (!(state->flags & NEIGH_SEQ_IS_PNEIGH)) {
2388 rc = neigh_get_next(seq, v, NULL);
2389 if (rc)
2390 goto out;
2391 if (!(state->flags & NEIGH_SEQ_NEIGH_ONLY))
2392 rc = pneigh_get_first(seq);
2393 } else {
2394 BUG_ON(state->flags & NEIGH_SEQ_NEIGH_ONLY);
2395 rc = pneigh_get_next(seq, v, NULL);
2396 }
2397 out:
2398 ++(*pos);
2399 return rc;
2400 }
2401 EXPORT_SYMBOL(neigh_seq_next);
2402
2403 void neigh_seq_stop(struct seq_file *seq, void *v)
2404 __releases(tbl->lock)
2405 {
2406 struct neigh_seq_state *state = seq->private;
2407 struct neigh_table *tbl = state->tbl;
2408
2409 read_unlock_bh(&tbl->lock);
2410 }
2411 EXPORT_SYMBOL(neigh_seq_stop);
2412
2413 /* statistics via seq_file */
2414
2415 static void *neigh_stat_seq_start(struct seq_file *seq, loff_t *pos)
2416 {
2417 struct proc_dir_entry *pde = seq->private;
2418 struct neigh_table *tbl = pde->data;
2419 int cpu;
2420
2421 if (*pos == 0)
2422 return SEQ_START_TOKEN;
2423
2424 for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
2425 if (!cpu_possible(cpu))
2426 continue;
2427 *pos = cpu+1;
2428 return per_cpu_ptr(tbl->stats, cpu);
2429 }
2430 return NULL;
2431 }
2432
2433 static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2434 {
2435 struct proc_dir_entry *pde = seq->private;
2436 struct neigh_table *tbl = pde->data;
2437 int cpu;
2438
2439 for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
2440 if (!cpu_possible(cpu))
2441 continue;
2442 *pos = cpu+1;
2443 return per_cpu_ptr(tbl->stats, cpu);
2444 }
2445 return NULL;
2446 }
2447
2448 static void neigh_stat_seq_stop(struct seq_file *seq, void *v)
2449 {
2450
2451 }
2452
2453 static int neigh_stat_seq_show(struct seq_file *seq, void *v)
2454 {
2455 struct proc_dir_entry *pde = seq->private;
2456 struct neigh_table *tbl = pde->data;
2457 struct neigh_statistics *st = v;
2458
2459 if (v == SEQ_START_TOKEN) {
2460 seq_printf(seq, "entries allocs destroys hash_grows lookups hits res_failed rcv_probes_mcast rcv_probes_ucast periodic_gc_runs forced_gc_runs unresolved_discards\n");
2461 return 0;
2462 }
2463
2464 seq_printf(seq, "%08x %08lx %08lx %08lx %08lx %08lx %08lx "
2465 "%08lx %08lx %08lx %08lx %08lx\n",
2466 atomic_read(&tbl->entries),
2467
2468 st->allocs,
2469 st->destroys,
2470 st->hash_grows,
2471
2472 st->lookups,
2473 st->hits,
2474
2475 st->res_failed,
2476
2477 st->rcv_probes_mcast,
2478 st->rcv_probes_ucast,
2479
2480 st->periodic_gc_runs,
2481 st->forced_gc_runs,
2482 st->unres_discards
2483 );
2484
2485 return 0;
2486 }
2487
2488 static const struct seq_operations neigh_stat_seq_ops = {
2489 .start = neigh_stat_seq_start,
2490 .next = neigh_stat_seq_next,
2491 .stop = neigh_stat_seq_stop,
2492 .show = neigh_stat_seq_show,
2493 };
2494
2495 static int neigh_stat_seq_open(struct inode *inode, struct file *file)
2496 {
2497 int ret = seq_open(file, &neigh_stat_seq_ops);
2498
2499 if (!ret) {
2500 struct seq_file *sf = file->private_data;
2501 sf->private = PDE(inode);
2502 }
2503 return ret;
2504 };
2505
2506 static const struct file_operations neigh_stat_seq_fops = {
2507 .owner = THIS_MODULE,
2508 .open = neigh_stat_seq_open,
2509 .read = seq_read,
2510 .llseek = seq_lseek,
2511 .release = seq_release,
2512 };
2513
2514 #endif /* CONFIG_PROC_FS */
2515
2516 static inline size_t neigh_nlmsg_size(void)
2517 {
2518 return NLMSG_ALIGN(sizeof(struct ndmsg))
2519 + nla_total_size(MAX_ADDR_LEN) /* NDA_DST */
2520 + nla_total_size(MAX_ADDR_LEN) /* NDA_LLADDR */
2521 + nla_total_size(sizeof(struct nda_cacheinfo))
2522 + nla_total_size(4); /* NDA_PROBES */
2523 }
2524
2525 static void __neigh_notify(struct neighbour *n, int type, int flags)
2526 {
2527 struct net *net = dev_net(n->dev);
2528 struct sk_buff *skb;
2529 int err = -ENOBUFS;
2530
2531 skb = nlmsg_new(neigh_nlmsg_size(), GFP_ATOMIC);
2532 if (skb == NULL)
2533 goto errout;
2534
2535 err = neigh_fill_info(skb, n, 0, 0, type, flags);
2536 if (err < 0) {
2537 /* -EMSGSIZE implies BUG in neigh_nlmsg_size() */
2538 WARN_ON(err == -EMSGSIZE);
2539 kfree_skb(skb);
2540 goto errout;
2541 }
2542 err = rtnl_notify(skb, net, 0, RTNLGRP_NEIGH, NULL, GFP_ATOMIC);
2543 errout:
2544 if (err < 0)
2545 rtnl_set_sk_err(net, RTNLGRP_NEIGH, err);
2546 }
2547
2548 #ifdef CONFIG_ARPD
2549 void neigh_app_ns(struct neighbour *n)
2550 {
2551 __neigh_notify(n, RTM_GETNEIGH, NLM_F_REQUEST);
2552 }
2553 EXPORT_SYMBOL(neigh_app_ns);
2554 #endif /* CONFIG_ARPD */
2555
2556 #ifdef CONFIG_SYSCTL
2557
2558 static struct neigh_sysctl_table {
2559 struct ctl_table_header *sysctl_header;
2560 struct ctl_table neigh_vars[__NET_NEIGH_MAX];
2561 char *dev_name;
2562 } neigh_sysctl_template __read_mostly = {
2563 .neigh_vars = {
2564 {
2565 .ctl_name = NET_NEIGH_MCAST_SOLICIT,
2566 .procname = "mcast_solicit",
2567 .maxlen = sizeof(int),
2568 .mode = 0644,
2569 .proc_handler = &proc_dointvec,
2570 },
2571 {
2572 .ctl_name = NET_NEIGH_UCAST_SOLICIT,
2573 .procname = "ucast_solicit",
2574 .maxlen = sizeof(int),
2575 .mode = 0644,
2576 .proc_handler = &proc_dointvec,
2577 },
2578 {
2579 .ctl_name = NET_NEIGH_APP_SOLICIT,
2580 .procname = "app_solicit",
2581 .maxlen = sizeof(int),
2582 .mode = 0644,
2583 .proc_handler = &proc_dointvec,
2584 },
2585 {
2586 .procname = "retrans_time",
2587 .maxlen = sizeof(int),
2588 .mode = 0644,
2589 .proc_handler = &proc_dointvec_userhz_jiffies,
2590 },
2591 {
2592 .ctl_name = NET_NEIGH_REACHABLE_TIME,
2593 .procname = "base_reachable_time",
2594 .maxlen = sizeof(int),
2595 .mode = 0644,
2596 .proc_handler = &proc_dointvec_jiffies,
2597 .strategy = &sysctl_jiffies,
2598 },
2599 {
2600 .ctl_name = NET_NEIGH_DELAY_PROBE_TIME,
2601 .procname = "delay_first_probe_time",
2602 .maxlen = sizeof(int),
2603 .mode = 0644,
2604 .proc_handler = &proc_dointvec_jiffies,
2605 .strategy = &sysctl_jiffies,
2606 },
2607 {
2608 .ctl_name = NET_NEIGH_GC_STALE_TIME,
2609 .procname = "gc_stale_time",
2610 .maxlen = sizeof(int),
2611 .mode = 0644,
2612 .proc_handler = &proc_dointvec_jiffies,
2613 .strategy = &sysctl_jiffies,
2614 },
2615 {
2616 .ctl_name = NET_NEIGH_UNRES_QLEN,
2617 .procname = "unres_qlen",
2618 .maxlen = sizeof(int),
2619 .mode = 0644,
2620 .proc_handler = &proc_dointvec,
2621 },
2622 {
2623 .ctl_name = NET_NEIGH_PROXY_QLEN,
2624 .procname = "proxy_qlen",
2625 .maxlen = sizeof(int),
2626 .mode = 0644,
2627 .proc_handler = &proc_dointvec,
2628 },
2629 {
2630 .procname = "anycast_delay",
2631 .maxlen = sizeof(int),
2632 .mode = 0644,
2633 .proc_handler = &proc_dointvec_userhz_jiffies,
2634 },
2635 {
2636 .procname = "proxy_delay",
2637 .maxlen = sizeof(int),
2638 .mode = 0644,
2639 .proc_handler = &proc_dointvec_userhz_jiffies,
2640 },
2641 {
2642 .procname = "locktime",
2643 .maxlen = sizeof(int),
2644 .mode = 0644,
2645 .proc_handler = &proc_dointvec_userhz_jiffies,
2646 },
2647 {
2648 .ctl_name = NET_NEIGH_RETRANS_TIME_MS,
2649 .procname = "retrans_time_ms",
2650 .maxlen = sizeof(int),
2651 .mode = 0644,
2652 .proc_handler = &proc_dointvec_ms_jiffies,
2653 .strategy = &sysctl_ms_jiffies,
2654 },
2655 {
2656 .ctl_name = NET_NEIGH_REACHABLE_TIME_MS,
2657 .procname = "base_reachable_time_ms",
2658 .maxlen = sizeof(int),
2659 .mode = 0644,
2660 .proc_handler = &proc_dointvec_ms_jiffies,
2661 .strategy = &sysctl_ms_jiffies,
2662 },
2663 {
2664 .ctl_name = NET_NEIGH_GC_INTERVAL,
2665 .procname = "gc_interval",
2666 .maxlen = sizeof(int),
2667 .mode = 0644,
2668 .proc_handler = &proc_dointvec_jiffies,
2669 .strategy = &sysctl_jiffies,
2670 },
2671 {
2672 .ctl_name = NET_NEIGH_GC_THRESH1,
2673 .procname = "gc_thresh1",
2674 .maxlen = sizeof(int),
2675 .mode = 0644,
2676 .proc_handler = &proc_dointvec,
2677 },
2678 {
2679 .ctl_name = NET_NEIGH_GC_THRESH2,
2680 .procname = "gc_thresh2",
2681 .maxlen = sizeof(int),
2682 .mode = 0644,
2683 .proc_handler = &proc_dointvec,
2684 },
2685 {
2686 .ctl_name = NET_NEIGH_GC_THRESH3,
2687 .procname = "gc_thresh3",
2688 .maxlen = sizeof(int),
2689 .mode = 0644,
2690 .proc_handler = &proc_dointvec,
2691 },
2692 {},
2693 },
2694 };
2695
2696 int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
2697 int p_id, int pdev_id, char *p_name,
2698 proc_handler *handler, ctl_handler *strategy)
2699 {
2700 struct neigh_sysctl_table *t;
2701 const char *dev_name_source = NULL;
2702
2703 #define NEIGH_CTL_PATH_ROOT 0
2704 #define NEIGH_CTL_PATH_PROTO 1
2705 #define NEIGH_CTL_PATH_NEIGH 2
2706 #define NEIGH_CTL_PATH_DEV 3
2707
2708 struct ctl_path neigh_path[] = {
2709 { .procname = "net", .ctl_name = CTL_NET, },
2710 { .procname = "proto", .ctl_name = 0, },
2711 { .procname = "neigh", .ctl_name = 0, },
2712 { .procname = "default", .ctl_name = NET_PROTO_CONF_DEFAULT, },
2713 { },
2714 };
2715
2716 t = kmemdup(&neigh_sysctl_template, sizeof(*t), GFP_KERNEL);
2717 if (!t)
2718 goto err;
2719
2720 t->neigh_vars[0].data = &p->mcast_probes;
2721 t->neigh_vars[1].data = &p->ucast_probes;
2722 t->neigh_vars[2].data = &p->app_probes;
2723 t->neigh_vars[3].data = &p->retrans_time;
2724 t->neigh_vars[4].data = &p->base_reachable_time;
2725 t->neigh_vars[5].data = &p->delay_probe_time;
2726 t->neigh_vars[6].data = &p->gc_staletime;
2727 t->neigh_vars[7].data = &p->queue_len;
2728 t->neigh_vars[8].data = &p->proxy_qlen;
2729 t->neigh_vars[9].data = &p->anycast_delay;
2730 t->neigh_vars[10].data = &p->proxy_delay;
2731 t->neigh_vars[11].data = &p->locktime;
2732 t->neigh_vars[12].data = &p->retrans_time;
2733 t->neigh_vars[13].data = &p->base_reachable_time;
2734
2735 if (dev) {
2736 dev_name_source = dev->name;
2737 neigh_path[NEIGH_CTL_PATH_DEV].ctl_name = dev->ifindex;
2738 /* Terminate the table early */
2739 memset(&t->neigh_vars[14], 0, sizeof(t->neigh_vars[14]));
2740 } else {
2741 dev_name_source = neigh_path[NEIGH_CTL_PATH_DEV].procname;
2742 t->neigh_vars[14].data = (int *)(p + 1);
2743 t->neigh_vars[15].data = (int *)(p + 1) + 1;
2744 t->neigh_vars[16].data = (int *)(p + 1) + 2;
2745 t->neigh_vars[17].data = (int *)(p + 1) + 3;
2746 }
2747
2748
2749 if (handler || strategy) {
2750 /* RetransTime */
2751 t->neigh_vars[3].proc_handler = handler;
2752 t->neigh_vars[3].strategy = strategy;
2753 t->neigh_vars[3].extra1 = dev;
2754 if (!strategy)
2755 t->neigh_vars[3].ctl_name = CTL_UNNUMBERED;
2756 /* ReachableTime */
2757 t->neigh_vars[4].proc_handler = handler;
2758 t->neigh_vars[4].strategy = strategy;
2759 t->neigh_vars[4].extra1 = dev;
2760 if (!strategy)
2761 t->neigh_vars[4].ctl_name = CTL_UNNUMBERED;
2762 /* RetransTime (in milliseconds)*/
2763 t->neigh_vars[12].proc_handler = handler;
2764 t->neigh_vars[12].strategy = strategy;
2765 t->neigh_vars[12].extra1 = dev;
2766 if (!strategy)
2767 t->neigh_vars[12].ctl_name = CTL_UNNUMBERED;
2768 /* ReachableTime (in milliseconds) */
2769 t->neigh_vars[13].proc_handler = handler;
2770 t->neigh_vars[13].strategy = strategy;
2771 t->neigh_vars[13].extra1 = dev;
2772 if (!strategy)
2773 t->neigh_vars[13].ctl_name = CTL_UNNUMBERED;
2774 }
2775
2776 t->dev_name = kstrdup(dev_name_source, GFP_KERNEL);
2777 if (!t->dev_name)
2778 goto free;
2779
2780 neigh_path[NEIGH_CTL_PATH_DEV].procname = t->dev_name;
2781 neigh_path[NEIGH_CTL_PATH_NEIGH].ctl_name = pdev_id;
2782 neigh_path[NEIGH_CTL_PATH_PROTO].procname = p_name;
2783 neigh_path[NEIGH_CTL_PATH_PROTO].ctl_name = p_id;
2784
2785 t->sysctl_header =
2786 register_net_sysctl_table(neigh_parms_net(p), neigh_path, t->neigh_vars);
2787 if (!t->sysctl_header)
2788 goto free_procname;
2789
2790 p->sysctl_table = t;
2791 return 0;
2792
2793 free_procname:
2794 kfree(t->dev_name);
2795 free:
2796 kfree(t);
2797 err:
2798 return -ENOBUFS;
2799 }
2800 EXPORT_SYMBOL(neigh_sysctl_register);
2801
2802 void neigh_sysctl_unregister(struct neigh_parms *p)
2803 {
2804 if (p->sysctl_table) {
2805 struct neigh_sysctl_table *t = p->sysctl_table;
2806 p->sysctl_table = NULL;
2807 unregister_sysctl_table(t->sysctl_header);
2808 kfree(t->dev_name);
2809 kfree(t);
2810 }
2811 }
2812 EXPORT_SYMBOL(neigh_sysctl_unregister);
2813
2814 #endif /* CONFIG_SYSCTL */
2815
2816 static int __init neigh_init(void)
2817 {
2818 rtnl_register(PF_UNSPEC, RTM_NEWNEIGH, neigh_add, NULL);
2819 rtnl_register(PF_UNSPEC, RTM_DELNEIGH, neigh_delete, NULL);
2820 rtnl_register(PF_UNSPEC, RTM_GETNEIGH, NULL, neigh_dump_info);
2821
2822 rtnl_register(PF_UNSPEC, RTM_GETNEIGHTBL, NULL, neightbl_dump_info);
2823 rtnl_register(PF_UNSPEC, RTM_SETNEIGHTBL, neightbl_set, NULL);
2824
2825 return 0;
2826 }
2827
2828 subsys_initcall(neigh_init);
2829
This page took 0.090891 seconds and 5 git commands to generate.