kvm: vmx: add MSR logic for XSAVES
[deliverable/linux.git] / net / ipv6 / ip6_fib.c
1 /*
2 * Linux INET6 implementation
3 * Forwarding Information Database
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 *
13 * Changes:
14 * Yuji SEKIYA @USAGI: Support default route on router node;
15 * remove ip6_null_entry from the top of
16 * routing table.
17 * Ville Nuorvala: Fixed routing subtrees.
18 */
19
20 #define pr_fmt(fmt) "IPv6: " fmt
21
22 #include <linux/errno.h>
23 #include <linux/types.h>
24 #include <linux/net.h>
25 #include <linux/route.h>
26 #include <linux/netdevice.h>
27 #include <linux/in6.h>
28 #include <linux/init.h>
29 #include <linux/list.h>
30 #include <linux/slab.h>
31
32 #include <net/ipv6.h>
33 #include <net/ndisc.h>
34 #include <net/addrconf.h>
35
36 #include <net/ip6_fib.h>
37 #include <net/ip6_route.h>
38
39 #define RT6_DEBUG 2
40
41 #if RT6_DEBUG >= 3
42 #define RT6_TRACE(x...) pr_debug(x)
43 #else
44 #define RT6_TRACE(x...) do { ; } while (0)
45 #endif
46
47 static struct kmem_cache *fib6_node_kmem __read_mostly;
48
49 struct fib6_cleaner {
50 struct fib6_walker w;
51 struct net *net;
52 int (*func)(struct rt6_info *, void *arg);
53 int sernum;
54 void *arg;
55 };
56
57 static DEFINE_RWLOCK(fib6_walker_lock);
58
59 #ifdef CONFIG_IPV6_SUBTREES
60 #define FWS_INIT FWS_S
61 #else
62 #define FWS_INIT FWS_L
63 #endif
64
65 static void fib6_prune_clones(struct net *net, struct fib6_node *fn);
66 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn);
67 static struct fib6_node *fib6_repair_tree(struct net *net, struct fib6_node *fn);
68 static int fib6_walk(struct fib6_walker *w);
69 static int fib6_walk_continue(struct fib6_walker *w);
70
71 /*
72 * A routing update causes an increase of the serial number on the
73 * affected subtree. This allows for cached routes to be asynchronously
74 * tested when modifications are made to the destination cache as a
75 * result of redirects, path MTU changes, etc.
76 */
77
78 static void fib6_gc_timer_cb(unsigned long arg);
79
80 static LIST_HEAD(fib6_walkers);
81 #define FOR_WALKERS(w) list_for_each_entry(w, &fib6_walkers, lh)
82
83 static void fib6_walker_link(struct fib6_walker *w)
84 {
85 write_lock_bh(&fib6_walker_lock);
86 list_add(&w->lh, &fib6_walkers);
87 write_unlock_bh(&fib6_walker_lock);
88 }
89
90 static void fib6_walker_unlink(struct fib6_walker *w)
91 {
92 write_lock_bh(&fib6_walker_lock);
93 list_del(&w->lh);
94 write_unlock_bh(&fib6_walker_lock);
95 }
96
97 static int fib6_new_sernum(struct net *net)
98 {
99 int new, old;
100
101 do {
102 old = atomic_read(&net->ipv6.fib6_sernum);
103 new = old < INT_MAX ? old + 1 : 1;
104 } while (atomic_cmpxchg(&net->ipv6.fib6_sernum,
105 old, new) != old);
106 return new;
107 }
108
109 enum {
110 FIB6_NO_SERNUM_CHANGE = 0,
111 };
112
113 /*
114 * Auxiliary address test functions for the radix tree.
115 *
116 * These assume a 32bit processor (although it will work on
117 * 64bit processors)
118 */
119
120 /*
121 * test bit
122 */
123 #if defined(__LITTLE_ENDIAN)
124 # define BITOP_BE32_SWIZZLE (0x1F & ~7)
125 #else
126 # define BITOP_BE32_SWIZZLE 0
127 #endif
128
129 static __be32 addr_bit_set(const void *token, int fn_bit)
130 {
131 const __be32 *addr = token;
132 /*
133 * Here,
134 * 1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)
135 * is optimized version of
136 * htonl(1 << ((~fn_bit)&0x1F))
137 * See include/asm-generic/bitops/le.h.
138 */
139 return (__force __be32)(1 << ((~fn_bit ^ BITOP_BE32_SWIZZLE) & 0x1f)) &
140 addr[fn_bit >> 5];
141 }
142
143 static struct fib6_node *node_alloc(void)
144 {
145 struct fib6_node *fn;
146
147 fn = kmem_cache_zalloc(fib6_node_kmem, GFP_ATOMIC);
148
149 return fn;
150 }
151
152 static void node_free(struct fib6_node *fn)
153 {
154 kmem_cache_free(fib6_node_kmem, fn);
155 }
156
157 static void rt6_release(struct rt6_info *rt)
158 {
159 if (atomic_dec_and_test(&rt->rt6i_ref))
160 dst_free(&rt->dst);
161 }
162
163 static void fib6_link_table(struct net *net, struct fib6_table *tb)
164 {
165 unsigned int h;
166
167 /*
168 * Initialize table lock at a single place to give lockdep a key,
169 * tables aren't visible prior to being linked to the list.
170 */
171 rwlock_init(&tb->tb6_lock);
172
173 h = tb->tb6_id & (FIB6_TABLE_HASHSZ - 1);
174
175 /*
176 * No protection necessary, this is the only list mutatation
177 * operation, tables never disappear once they exist.
178 */
179 hlist_add_head_rcu(&tb->tb6_hlist, &net->ipv6.fib_table_hash[h]);
180 }
181
182 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
183
184 static struct fib6_table *fib6_alloc_table(struct net *net, u32 id)
185 {
186 struct fib6_table *table;
187
188 table = kzalloc(sizeof(*table), GFP_ATOMIC);
189 if (table) {
190 table->tb6_id = id;
191 table->tb6_root.leaf = net->ipv6.ip6_null_entry;
192 table->tb6_root.fn_flags = RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
193 inet_peer_base_init(&table->tb6_peers);
194 }
195
196 return table;
197 }
198
199 struct fib6_table *fib6_new_table(struct net *net, u32 id)
200 {
201 struct fib6_table *tb;
202
203 if (id == 0)
204 id = RT6_TABLE_MAIN;
205 tb = fib6_get_table(net, id);
206 if (tb)
207 return tb;
208
209 tb = fib6_alloc_table(net, id);
210 if (tb)
211 fib6_link_table(net, tb);
212
213 return tb;
214 }
215
216 struct fib6_table *fib6_get_table(struct net *net, u32 id)
217 {
218 struct fib6_table *tb;
219 struct hlist_head *head;
220 unsigned int h;
221
222 if (id == 0)
223 id = RT6_TABLE_MAIN;
224 h = id & (FIB6_TABLE_HASHSZ - 1);
225 rcu_read_lock();
226 head = &net->ipv6.fib_table_hash[h];
227 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
228 if (tb->tb6_id == id) {
229 rcu_read_unlock();
230 return tb;
231 }
232 }
233 rcu_read_unlock();
234
235 return NULL;
236 }
237
238 static void __net_init fib6_tables_init(struct net *net)
239 {
240 fib6_link_table(net, net->ipv6.fib6_main_tbl);
241 fib6_link_table(net, net->ipv6.fib6_local_tbl);
242 }
243 #else
244
245 struct fib6_table *fib6_new_table(struct net *net, u32 id)
246 {
247 return fib6_get_table(net, id);
248 }
249
250 struct fib6_table *fib6_get_table(struct net *net, u32 id)
251 {
252 return net->ipv6.fib6_main_tbl;
253 }
254
255 struct dst_entry *fib6_rule_lookup(struct net *net, struct flowi6 *fl6,
256 int flags, pol_lookup_t lookup)
257 {
258 return (struct dst_entry *) lookup(net, net->ipv6.fib6_main_tbl, fl6, flags);
259 }
260
261 static void __net_init fib6_tables_init(struct net *net)
262 {
263 fib6_link_table(net, net->ipv6.fib6_main_tbl);
264 }
265
266 #endif
267
268 static int fib6_dump_node(struct fib6_walker *w)
269 {
270 int res;
271 struct rt6_info *rt;
272
273 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
274 res = rt6_dump_route(rt, w->args);
275 if (res < 0) {
276 /* Frame is full, suspend walking */
277 w->leaf = rt;
278 return 1;
279 }
280 WARN_ON(res == 0);
281 }
282 w->leaf = NULL;
283 return 0;
284 }
285
286 static void fib6_dump_end(struct netlink_callback *cb)
287 {
288 struct fib6_walker *w = (void *)cb->args[2];
289
290 if (w) {
291 if (cb->args[4]) {
292 cb->args[4] = 0;
293 fib6_walker_unlink(w);
294 }
295 cb->args[2] = 0;
296 kfree(w);
297 }
298 cb->done = (void *)cb->args[3];
299 cb->args[1] = 3;
300 }
301
302 static int fib6_dump_done(struct netlink_callback *cb)
303 {
304 fib6_dump_end(cb);
305 return cb->done ? cb->done(cb) : 0;
306 }
307
308 static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
309 struct netlink_callback *cb)
310 {
311 struct fib6_walker *w;
312 int res;
313
314 w = (void *)cb->args[2];
315 w->root = &table->tb6_root;
316
317 if (cb->args[4] == 0) {
318 w->count = 0;
319 w->skip = 0;
320
321 read_lock_bh(&table->tb6_lock);
322 res = fib6_walk(w);
323 read_unlock_bh(&table->tb6_lock);
324 if (res > 0) {
325 cb->args[4] = 1;
326 cb->args[5] = w->root->fn_sernum;
327 }
328 } else {
329 if (cb->args[5] != w->root->fn_sernum) {
330 /* Begin at the root if the tree changed */
331 cb->args[5] = w->root->fn_sernum;
332 w->state = FWS_INIT;
333 w->node = w->root;
334 w->skip = w->count;
335 } else
336 w->skip = 0;
337
338 read_lock_bh(&table->tb6_lock);
339 res = fib6_walk_continue(w);
340 read_unlock_bh(&table->tb6_lock);
341 if (res <= 0) {
342 fib6_walker_unlink(w);
343 cb->args[4] = 0;
344 }
345 }
346
347 return res;
348 }
349
350 static int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
351 {
352 struct net *net = sock_net(skb->sk);
353 unsigned int h, s_h;
354 unsigned int e = 0, s_e;
355 struct rt6_rtnl_dump_arg arg;
356 struct fib6_walker *w;
357 struct fib6_table *tb;
358 struct hlist_head *head;
359 int res = 0;
360
361 s_h = cb->args[0];
362 s_e = cb->args[1];
363
364 w = (void *)cb->args[2];
365 if (!w) {
366 /* New dump:
367 *
368 * 1. hook callback destructor.
369 */
370 cb->args[3] = (long)cb->done;
371 cb->done = fib6_dump_done;
372
373 /*
374 * 2. allocate and initialize walker.
375 */
376 w = kzalloc(sizeof(*w), GFP_ATOMIC);
377 if (!w)
378 return -ENOMEM;
379 w->func = fib6_dump_node;
380 cb->args[2] = (long)w;
381 }
382
383 arg.skb = skb;
384 arg.cb = cb;
385 arg.net = net;
386 w->args = &arg;
387
388 rcu_read_lock();
389 for (h = s_h; h < FIB6_TABLE_HASHSZ; h++, s_e = 0) {
390 e = 0;
391 head = &net->ipv6.fib_table_hash[h];
392 hlist_for_each_entry_rcu(tb, head, tb6_hlist) {
393 if (e < s_e)
394 goto next;
395 res = fib6_dump_table(tb, skb, cb);
396 if (res != 0)
397 goto out;
398 next:
399 e++;
400 }
401 }
402 out:
403 rcu_read_unlock();
404 cb->args[1] = e;
405 cb->args[0] = h;
406
407 res = res < 0 ? res : skb->len;
408 if (res <= 0)
409 fib6_dump_end(cb);
410 return res;
411 }
412
413 /*
414 * Routing Table
415 *
416 * return the appropriate node for a routing tree "add" operation
417 * by either creating and inserting or by returning an existing
418 * node.
419 */
420
421 static struct fib6_node *fib6_add_1(struct fib6_node *root,
422 struct in6_addr *addr, int plen,
423 int offset, int allow_create,
424 int replace_required, int sernum)
425 {
426 struct fib6_node *fn, *in, *ln;
427 struct fib6_node *pn = NULL;
428 struct rt6key *key;
429 int bit;
430 __be32 dir = 0;
431
432 RT6_TRACE("fib6_add_1\n");
433
434 /* insert node in tree */
435
436 fn = root;
437
438 do {
439 key = (struct rt6key *)((u8 *)fn->leaf + offset);
440
441 /*
442 * Prefix match
443 */
444 if (plen < fn->fn_bit ||
445 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit)) {
446 if (!allow_create) {
447 if (replace_required) {
448 pr_warn("Can't replace route, no match found\n");
449 return ERR_PTR(-ENOENT);
450 }
451 pr_warn("NLM_F_CREATE should be set when creating new route\n");
452 }
453 goto insert_above;
454 }
455
456 /*
457 * Exact match ?
458 */
459
460 if (plen == fn->fn_bit) {
461 /* clean up an intermediate node */
462 if (!(fn->fn_flags & RTN_RTINFO)) {
463 rt6_release(fn->leaf);
464 fn->leaf = NULL;
465 }
466
467 fn->fn_sernum = sernum;
468
469 return fn;
470 }
471
472 /*
473 * We have more bits to go
474 */
475
476 /* Try to walk down on tree. */
477 fn->fn_sernum = sernum;
478 dir = addr_bit_set(addr, fn->fn_bit);
479 pn = fn;
480 fn = dir ? fn->right : fn->left;
481 } while (fn);
482
483 if (!allow_create) {
484 /* We should not create new node because
485 * NLM_F_REPLACE was specified without NLM_F_CREATE
486 * I assume it is safe to require NLM_F_CREATE when
487 * REPLACE flag is used! Later we may want to remove the
488 * check for replace_required, because according
489 * to netlink specification, NLM_F_CREATE
490 * MUST be specified if new route is created.
491 * That would keep IPv6 consistent with IPv4
492 */
493 if (replace_required) {
494 pr_warn("Can't replace route, no match found\n");
495 return ERR_PTR(-ENOENT);
496 }
497 pr_warn("NLM_F_CREATE should be set when creating new route\n");
498 }
499 /*
500 * We walked to the bottom of tree.
501 * Create new leaf node without children.
502 */
503
504 ln = node_alloc();
505
506 if (!ln)
507 return ERR_PTR(-ENOMEM);
508 ln->fn_bit = plen;
509
510 ln->parent = pn;
511 ln->fn_sernum = sernum;
512
513 if (dir)
514 pn->right = ln;
515 else
516 pn->left = ln;
517
518 return ln;
519
520
521 insert_above:
522 /*
523 * split since we don't have a common prefix anymore or
524 * we have a less significant route.
525 * we've to insert an intermediate node on the list
526 * this new node will point to the one we need to create
527 * and the current
528 */
529
530 pn = fn->parent;
531
532 /* find 1st bit in difference between the 2 addrs.
533
534 See comment in __ipv6_addr_diff: bit may be an invalid value,
535 but if it is >= plen, the value is ignored in any case.
536 */
537
538 bit = __ipv6_addr_diff(addr, &key->addr, sizeof(*addr));
539
540 /*
541 * (intermediate)[in]
542 * / \
543 * (new leaf node)[ln] (old node)[fn]
544 */
545 if (plen > bit) {
546 in = node_alloc();
547 ln = node_alloc();
548
549 if (!in || !ln) {
550 if (in)
551 node_free(in);
552 if (ln)
553 node_free(ln);
554 return ERR_PTR(-ENOMEM);
555 }
556
557 /*
558 * new intermediate node.
559 * RTN_RTINFO will
560 * be off since that an address that chooses one of
561 * the branches would not match less specific routes
562 * in the other branch
563 */
564
565 in->fn_bit = bit;
566
567 in->parent = pn;
568 in->leaf = fn->leaf;
569 atomic_inc(&in->leaf->rt6i_ref);
570
571 in->fn_sernum = sernum;
572
573 /* update parent pointer */
574 if (dir)
575 pn->right = in;
576 else
577 pn->left = in;
578
579 ln->fn_bit = plen;
580
581 ln->parent = in;
582 fn->parent = in;
583
584 ln->fn_sernum = sernum;
585
586 if (addr_bit_set(addr, bit)) {
587 in->right = ln;
588 in->left = fn;
589 } else {
590 in->left = ln;
591 in->right = fn;
592 }
593 } else { /* plen <= bit */
594
595 /*
596 * (new leaf node)[ln]
597 * / \
598 * (old node)[fn] NULL
599 */
600
601 ln = node_alloc();
602
603 if (!ln)
604 return ERR_PTR(-ENOMEM);
605
606 ln->fn_bit = plen;
607
608 ln->parent = pn;
609
610 ln->fn_sernum = sernum;
611
612 if (dir)
613 pn->right = ln;
614 else
615 pn->left = ln;
616
617 if (addr_bit_set(&key->addr, plen))
618 ln->right = fn;
619 else
620 ln->left = fn;
621
622 fn->parent = ln;
623 }
624 return ln;
625 }
626
627 static bool rt6_qualify_for_ecmp(struct rt6_info *rt)
628 {
629 return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
630 RTF_GATEWAY;
631 }
632
633 static int fib6_commit_metrics(struct dst_entry *dst,
634 struct nlattr *mx, int mx_len)
635 {
636 struct nlattr *nla;
637 int remaining;
638 u32 *mp;
639
640 if (dst->flags & DST_HOST) {
641 mp = dst_metrics_write_ptr(dst);
642 } else {
643 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
644 if (!mp)
645 return -ENOMEM;
646 dst_init_metrics(dst, mp, 0);
647 }
648
649 nla_for_each_attr(nla, mx, mx_len, remaining) {
650 int type = nla_type(nla);
651
652 if (type) {
653 if (type > RTAX_MAX)
654 return -EINVAL;
655
656 mp[type - 1] = nla_get_u32(nla);
657 }
658 }
659 return 0;
660 }
661
662 /*
663 * Insert routing information in a node.
664 */
665
666 static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
667 struct nl_info *info, struct nlattr *mx, int mx_len)
668 {
669 struct rt6_info *iter = NULL;
670 struct rt6_info **ins;
671 int replace = (info->nlh &&
672 (info->nlh->nlmsg_flags & NLM_F_REPLACE));
673 int add = (!info->nlh ||
674 (info->nlh->nlmsg_flags & NLM_F_CREATE));
675 int found = 0;
676 bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
677 int err;
678
679 ins = &fn->leaf;
680
681 for (iter = fn->leaf; iter; iter = iter->dst.rt6_next) {
682 /*
683 * Search for duplicates
684 */
685
686 if (iter->rt6i_metric == rt->rt6i_metric) {
687 /*
688 * Same priority level
689 */
690 if (info->nlh &&
691 (info->nlh->nlmsg_flags & NLM_F_EXCL))
692 return -EEXIST;
693 if (replace) {
694 found++;
695 break;
696 }
697
698 if (iter->dst.dev == rt->dst.dev &&
699 iter->rt6i_idev == rt->rt6i_idev &&
700 ipv6_addr_equal(&iter->rt6i_gateway,
701 &rt->rt6i_gateway)) {
702 if (rt->rt6i_nsiblings)
703 rt->rt6i_nsiblings = 0;
704 if (!(iter->rt6i_flags & RTF_EXPIRES))
705 return -EEXIST;
706 if (!(rt->rt6i_flags & RTF_EXPIRES))
707 rt6_clean_expires(iter);
708 else
709 rt6_set_expires(iter, rt->dst.expires);
710 return -EEXIST;
711 }
712 /* If we have the same destination and the same metric,
713 * but not the same gateway, then the route we try to
714 * add is sibling to this route, increment our counter
715 * of siblings, and later we will add our route to the
716 * list.
717 * Only static routes (which don't have flag
718 * RTF_EXPIRES) are used for ECMPv6.
719 *
720 * To avoid long list, we only had siblings if the
721 * route have a gateway.
722 */
723 if (rt_can_ecmp &&
724 rt6_qualify_for_ecmp(iter))
725 rt->rt6i_nsiblings++;
726 }
727
728 if (iter->rt6i_metric > rt->rt6i_metric)
729 break;
730
731 ins = &iter->dst.rt6_next;
732 }
733
734 /* Reset round-robin state, if necessary */
735 if (ins == &fn->leaf)
736 fn->rr_ptr = NULL;
737
738 /* Link this route to others same route. */
739 if (rt->rt6i_nsiblings) {
740 unsigned int rt6i_nsiblings;
741 struct rt6_info *sibling, *temp_sibling;
742
743 /* Find the first route that have the same metric */
744 sibling = fn->leaf;
745 while (sibling) {
746 if (sibling->rt6i_metric == rt->rt6i_metric &&
747 rt6_qualify_for_ecmp(sibling)) {
748 list_add_tail(&rt->rt6i_siblings,
749 &sibling->rt6i_siblings);
750 break;
751 }
752 sibling = sibling->dst.rt6_next;
753 }
754 /* For each sibling in the list, increment the counter of
755 * siblings. BUG() if counters does not match, list of siblings
756 * is broken!
757 */
758 rt6i_nsiblings = 0;
759 list_for_each_entry_safe(sibling, temp_sibling,
760 &rt->rt6i_siblings, rt6i_siblings) {
761 sibling->rt6i_nsiblings++;
762 BUG_ON(sibling->rt6i_nsiblings != rt->rt6i_nsiblings);
763 rt6i_nsiblings++;
764 }
765 BUG_ON(rt6i_nsiblings != rt->rt6i_nsiblings);
766 }
767
768 /*
769 * insert node
770 */
771 if (!replace) {
772 if (!add)
773 pr_warn("NLM_F_CREATE should be set when creating new route\n");
774
775 add:
776 if (mx) {
777 err = fib6_commit_metrics(&rt->dst, mx, mx_len);
778 if (err)
779 return err;
780 }
781 rt->dst.rt6_next = iter;
782 *ins = rt;
783 rt->rt6i_node = fn;
784 atomic_inc(&rt->rt6i_ref);
785 inet6_rt_notify(RTM_NEWROUTE, rt, info);
786 info->nl_net->ipv6.rt6_stats->fib_rt_entries++;
787
788 if (!(fn->fn_flags & RTN_RTINFO)) {
789 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
790 fn->fn_flags |= RTN_RTINFO;
791 }
792
793 } else {
794 if (!found) {
795 if (add)
796 goto add;
797 pr_warn("NLM_F_REPLACE set, but no existing node found!\n");
798 return -ENOENT;
799 }
800 if (mx) {
801 err = fib6_commit_metrics(&rt->dst, mx, mx_len);
802 if (err)
803 return err;
804 }
805 *ins = rt;
806 rt->rt6i_node = fn;
807 rt->dst.rt6_next = iter->dst.rt6_next;
808 atomic_inc(&rt->rt6i_ref);
809 inet6_rt_notify(RTM_NEWROUTE, rt, info);
810 rt6_release(iter);
811 if (!(fn->fn_flags & RTN_RTINFO)) {
812 info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
813 fn->fn_flags |= RTN_RTINFO;
814 }
815 }
816
817 return 0;
818 }
819
820 static void fib6_start_gc(struct net *net, struct rt6_info *rt)
821 {
822 if (!timer_pending(&net->ipv6.ip6_fib_timer) &&
823 (rt->rt6i_flags & (RTF_EXPIRES | RTF_CACHE)))
824 mod_timer(&net->ipv6.ip6_fib_timer,
825 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
826 }
827
828 void fib6_force_start_gc(struct net *net)
829 {
830 if (!timer_pending(&net->ipv6.ip6_fib_timer))
831 mod_timer(&net->ipv6.ip6_fib_timer,
832 jiffies + net->ipv6.sysctl.ip6_rt_gc_interval);
833 }
834
835 /*
836 * Add routing information to the routing tree.
837 * <destination addr>/<source addr>
838 * with source addr info in sub-trees
839 */
840
841 int fib6_add(struct fib6_node *root, struct rt6_info *rt, struct nl_info *info,
842 struct nlattr *mx, int mx_len)
843 {
844 struct fib6_node *fn, *pn = NULL;
845 int err = -ENOMEM;
846 int allow_create = 1;
847 int replace_required = 0;
848 int sernum = fib6_new_sernum(info->nl_net);
849
850 if (info->nlh) {
851 if (!(info->nlh->nlmsg_flags & NLM_F_CREATE))
852 allow_create = 0;
853 if (info->nlh->nlmsg_flags & NLM_F_REPLACE)
854 replace_required = 1;
855 }
856 if (!allow_create && !replace_required)
857 pr_warn("RTM_NEWROUTE with no NLM_F_CREATE or NLM_F_REPLACE\n");
858
859 fn = fib6_add_1(root, &rt->rt6i_dst.addr, rt->rt6i_dst.plen,
860 offsetof(struct rt6_info, rt6i_dst), allow_create,
861 replace_required, sernum);
862 if (IS_ERR(fn)) {
863 err = PTR_ERR(fn);
864 fn = NULL;
865 goto out;
866 }
867
868 pn = fn;
869
870 #ifdef CONFIG_IPV6_SUBTREES
871 if (rt->rt6i_src.plen) {
872 struct fib6_node *sn;
873
874 if (!fn->subtree) {
875 struct fib6_node *sfn;
876
877 /*
878 * Create subtree.
879 *
880 * fn[main tree]
881 * |
882 * sfn[subtree root]
883 * \
884 * sn[new leaf node]
885 */
886
887 /* Create subtree root node */
888 sfn = node_alloc();
889 if (!sfn)
890 goto st_failure;
891
892 sfn->leaf = info->nl_net->ipv6.ip6_null_entry;
893 atomic_inc(&info->nl_net->ipv6.ip6_null_entry->rt6i_ref);
894 sfn->fn_flags = RTN_ROOT;
895 sfn->fn_sernum = sernum;
896
897 /* Now add the first leaf node to new subtree */
898
899 sn = fib6_add_1(sfn, &rt->rt6i_src.addr,
900 rt->rt6i_src.plen,
901 offsetof(struct rt6_info, rt6i_src),
902 allow_create, replace_required, sernum);
903
904 if (IS_ERR(sn)) {
905 /* If it is failed, discard just allocated
906 root, and then (in st_failure) stale node
907 in main tree.
908 */
909 node_free(sfn);
910 err = PTR_ERR(sn);
911 goto st_failure;
912 }
913
914 /* Now link new subtree to main tree */
915 sfn->parent = fn;
916 fn->subtree = sfn;
917 } else {
918 sn = fib6_add_1(fn->subtree, &rt->rt6i_src.addr,
919 rt->rt6i_src.plen,
920 offsetof(struct rt6_info, rt6i_src),
921 allow_create, replace_required, sernum);
922
923 if (IS_ERR(sn)) {
924 err = PTR_ERR(sn);
925 goto st_failure;
926 }
927 }
928
929 if (!fn->leaf) {
930 fn->leaf = rt;
931 atomic_inc(&rt->rt6i_ref);
932 }
933 fn = sn;
934 }
935 #endif
936
937 err = fib6_add_rt2node(fn, rt, info, mx, mx_len);
938 if (!err) {
939 fib6_start_gc(info->nl_net, rt);
940 if (!(rt->rt6i_flags & RTF_CACHE))
941 fib6_prune_clones(info->nl_net, pn);
942 }
943
944 out:
945 if (err) {
946 #ifdef CONFIG_IPV6_SUBTREES
947 /*
948 * If fib6_add_1 has cleared the old leaf pointer in the
949 * super-tree leaf node we have to find a new one for it.
950 */
951 if (pn != fn && pn->leaf == rt) {
952 pn->leaf = NULL;
953 atomic_dec(&rt->rt6i_ref);
954 }
955 if (pn != fn && !pn->leaf && !(pn->fn_flags & RTN_RTINFO)) {
956 pn->leaf = fib6_find_prefix(info->nl_net, pn);
957 #if RT6_DEBUG >= 2
958 if (!pn->leaf) {
959 WARN_ON(pn->leaf == NULL);
960 pn->leaf = info->nl_net->ipv6.ip6_null_entry;
961 }
962 #endif
963 atomic_inc(&pn->leaf->rt6i_ref);
964 }
965 #endif
966 dst_free(&rt->dst);
967 }
968 return err;
969
970 #ifdef CONFIG_IPV6_SUBTREES
971 /* Subtree creation failed, probably main tree node
972 is orphan. If it is, shoot it.
973 */
974 st_failure:
975 if (fn && !(fn->fn_flags & (RTN_RTINFO|RTN_ROOT)))
976 fib6_repair_tree(info->nl_net, fn);
977 dst_free(&rt->dst);
978 return err;
979 #endif
980 }
981
982 /*
983 * Routing tree lookup
984 *
985 */
986
987 struct lookup_args {
988 int offset; /* key offset on rt6_info */
989 const struct in6_addr *addr; /* search key */
990 };
991
992 static struct fib6_node *fib6_lookup_1(struct fib6_node *root,
993 struct lookup_args *args)
994 {
995 struct fib6_node *fn;
996 __be32 dir;
997
998 if (unlikely(args->offset == 0))
999 return NULL;
1000
1001 /*
1002 * Descend on a tree
1003 */
1004
1005 fn = root;
1006
1007 for (;;) {
1008 struct fib6_node *next;
1009
1010 dir = addr_bit_set(args->addr, fn->fn_bit);
1011
1012 next = dir ? fn->right : fn->left;
1013
1014 if (next) {
1015 fn = next;
1016 continue;
1017 }
1018 break;
1019 }
1020
1021 while (fn) {
1022 if (FIB6_SUBTREE(fn) || fn->fn_flags & RTN_RTINFO) {
1023 struct rt6key *key;
1024
1025 key = (struct rt6key *) ((u8 *) fn->leaf +
1026 args->offset);
1027
1028 if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) {
1029 #ifdef CONFIG_IPV6_SUBTREES
1030 if (fn->subtree) {
1031 struct fib6_node *sfn;
1032 sfn = fib6_lookup_1(fn->subtree,
1033 args + 1);
1034 if (!sfn)
1035 goto backtrack;
1036 fn = sfn;
1037 }
1038 #endif
1039 if (fn->fn_flags & RTN_RTINFO)
1040 return fn;
1041 }
1042 }
1043 #ifdef CONFIG_IPV6_SUBTREES
1044 backtrack:
1045 #endif
1046 if (fn->fn_flags & RTN_ROOT)
1047 break;
1048
1049 fn = fn->parent;
1050 }
1051
1052 return NULL;
1053 }
1054
1055 struct fib6_node *fib6_lookup(struct fib6_node *root, const struct in6_addr *daddr,
1056 const struct in6_addr *saddr)
1057 {
1058 struct fib6_node *fn;
1059 struct lookup_args args[] = {
1060 {
1061 .offset = offsetof(struct rt6_info, rt6i_dst),
1062 .addr = daddr,
1063 },
1064 #ifdef CONFIG_IPV6_SUBTREES
1065 {
1066 .offset = offsetof(struct rt6_info, rt6i_src),
1067 .addr = saddr,
1068 },
1069 #endif
1070 {
1071 .offset = 0, /* sentinel */
1072 }
1073 };
1074
1075 fn = fib6_lookup_1(root, daddr ? args : args + 1);
1076 if (!fn || fn->fn_flags & RTN_TL_ROOT)
1077 fn = root;
1078
1079 return fn;
1080 }
1081
1082 /*
1083 * Get node with specified destination prefix (and source prefix,
1084 * if subtrees are used)
1085 */
1086
1087
1088 static struct fib6_node *fib6_locate_1(struct fib6_node *root,
1089 const struct in6_addr *addr,
1090 int plen, int offset)
1091 {
1092 struct fib6_node *fn;
1093
1094 for (fn = root; fn ; ) {
1095 struct rt6key *key = (struct rt6key *)((u8 *)fn->leaf + offset);
1096
1097 /*
1098 * Prefix match
1099 */
1100 if (plen < fn->fn_bit ||
1101 !ipv6_prefix_equal(&key->addr, addr, fn->fn_bit))
1102 return NULL;
1103
1104 if (plen == fn->fn_bit)
1105 return fn;
1106
1107 /*
1108 * We have more bits to go
1109 */
1110 if (addr_bit_set(addr, fn->fn_bit))
1111 fn = fn->right;
1112 else
1113 fn = fn->left;
1114 }
1115 return NULL;
1116 }
1117
1118 struct fib6_node *fib6_locate(struct fib6_node *root,
1119 const struct in6_addr *daddr, int dst_len,
1120 const struct in6_addr *saddr, int src_len)
1121 {
1122 struct fib6_node *fn;
1123
1124 fn = fib6_locate_1(root, daddr, dst_len,
1125 offsetof(struct rt6_info, rt6i_dst));
1126
1127 #ifdef CONFIG_IPV6_SUBTREES
1128 if (src_len) {
1129 WARN_ON(saddr == NULL);
1130 if (fn && fn->subtree)
1131 fn = fib6_locate_1(fn->subtree, saddr, src_len,
1132 offsetof(struct rt6_info, rt6i_src));
1133 }
1134 #endif
1135
1136 if (fn && fn->fn_flags & RTN_RTINFO)
1137 return fn;
1138
1139 return NULL;
1140 }
1141
1142
1143 /*
1144 * Deletion
1145 *
1146 */
1147
1148 static struct rt6_info *fib6_find_prefix(struct net *net, struct fib6_node *fn)
1149 {
1150 if (fn->fn_flags & RTN_ROOT)
1151 return net->ipv6.ip6_null_entry;
1152
1153 while (fn) {
1154 if (fn->left)
1155 return fn->left->leaf;
1156 if (fn->right)
1157 return fn->right->leaf;
1158
1159 fn = FIB6_SUBTREE(fn);
1160 }
1161 return NULL;
1162 }
1163
1164 /*
1165 * Called to trim the tree of intermediate nodes when possible. "fn"
1166 * is the node we want to try and remove.
1167 */
1168
1169 static struct fib6_node *fib6_repair_tree(struct net *net,
1170 struct fib6_node *fn)
1171 {
1172 int children;
1173 int nstate;
1174 struct fib6_node *child, *pn;
1175 struct fib6_walker *w;
1176 int iter = 0;
1177
1178 for (;;) {
1179 RT6_TRACE("fixing tree: plen=%d iter=%d\n", fn->fn_bit, iter);
1180 iter++;
1181
1182 WARN_ON(fn->fn_flags & RTN_RTINFO);
1183 WARN_ON(fn->fn_flags & RTN_TL_ROOT);
1184 WARN_ON(fn->leaf != NULL);
1185
1186 children = 0;
1187 child = NULL;
1188 if (fn->right)
1189 child = fn->right, children |= 1;
1190 if (fn->left)
1191 child = fn->left, children |= 2;
1192
1193 if (children == 3 || FIB6_SUBTREE(fn)
1194 #ifdef CONFIG_IPV6_SUBTREES
1195 /* Subtree root (i.e. fn) may have one child */
1196 || (children && fn->fn_flags & RTN_ROOT)
1197 #endif
1198 ) {
1199 fn->leaf = fib6_find_prefix(net, fn);
1200 #if RT6_DEBUG >= 2
1201 if (!fn->leaf) {
1202 WARN_ON(!fn->leaf);
1203 fn->leaf = net->ipv6.ip6_null_entry;
1204 }
1205 #endif
1206 atomic_inc(&fn->leaf->rt6i_ref);
1207 return fn->parent;
1208 }
1209
1210 pn = fn->parent;
1211 #ifdef CONFIG_IPV6_SUBTREES
1212 if (FIB6_SUBTREE(pn) == fn) {
1213 WARN_ON(!(fn->fn_flags & RTN_ROOT));
1214 FIB6_SUBTREE(pn) = NULL;
1215 nstate = FWS_L;
1216 } else {
1217 WARN_ON(fn->fn_flags & RTN_ROOT);
1218 #endif
1219 if (pn->right == fn)
1220 pn->right = child;
1221 else if (pn->left == fn)
1222 pn->left = child;
1223 #if RT6_DEBUG >= 2
1224 else
1225 WARN_ON(1);
1226 #endif
1227 if (child)
1228 child->parent = pn;
1229 nstate = FWS_R;
1230 #ifdef CONFIG_IPV6_SUBTREES
1231 }
1232 #endif
1233
1234 read_lock(&fib6_walker_lock);
1235 FOR_WALKERS(w) {
1236 if (!child) {
1237 if (w->root == fn) {
1238 w->root = w->node = NULL;
1239 RT6_TRACE("W %p adjusted by delroot 1\n", w);
1240 } else if (w->node == fn) {
1241 RT6_TRACE("W %p adjusted by delnode 1, s=%d/%d\n", w, w->state, nstate);
1242 w->node = pn;
1243 w->state = nstate;
1244 }
1245 } else {
1246 if (w->root == fn) {
1247 w->root = child;
1248 RT6_TRACE("W %p adjusted by delroot 2\n", w);
1249 }
1250 if (w->node == fn) {
1251 w->node = child;
1252 if (children&2) {
1253 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1254 w->state = w->state >= FWS_R ? FWS_U : FWS_INIT;
1255 } else {
1256 RT6_TRACE("W %p adjusted by delnode 2, s=%d\n", w, w->state);
1257 w->state = w->state >= FWS_C ? FWS_U : FWS_INIT;
1258 }
1259 }
1260 }
1261 }
1262 read_unlock(&fib6_walker_lock);
1263
1264 node_free(fn);
1265 if (pn->fn_flags & RTN_RTINFO || FIB6_SUBTREE(pn))
1266 return pn;
1267
1268 rt6_release(pn->leaf);
1269 pn->leaf = NULL;
1270 fn = pn;
1271 }
1272 }
1273
1274 static void fib6_del_route(struct fib6_node *fn, struct rt6_info **rtp,
1275 struct nl_info *info)
1276 {
1277 struct fib6_walker *w;
1278 struct rt6_info *rt = *rtp;
1279 struct net *net = info->nl_net;
1280
1281 RT6_TRACE("fib6_del_route\n");
1282
1283 /* Unlink it */
1284 *rtp = rt->dst.rt6_next;
1285 rt->rt6i_node = NULL;
1286 net->ipv6.rt6_stats->fib_rt_entries--;
1287 net->ipv6.rt6_stats->fib_discarded_routes++;
1288
1289 /* Reset round-robin state, if necessary */
1290 if (fn->rr_ptr == rt)
1291 fn->rr_ptr = NULL;
1292
1293 /* Remove this entry from other siblings */
1294 if (rt->rt6i_nsiblings) {
1295 struct rt6_info *sibling, *next_sibling;
1296
1297 list_for_each_entry_safe(sibling, next_sibling,
1298 &rt->rt6i_siblings, rt6i_siblings)
1299 sibling->rt6i_nsiblings--;
1300 rt->rt6i_nsiblings = 0;
1301 list_del_init(&rt->rt6i_siblings);
1302 }
1303
1304 /* Adjust walkers */
1305 read_lock(&fib6_walker_lock);
1306 FOR_WALKERS(w) {
1307 if (w->state == FWS_C && w->leaf == rt) {
1308 RT6_TRACE("walker %p adjusted by delroute\n", w);
1309 w->leaf = rt->dst.rt6_next;
1310 if (!w->leaf)
1311 w->state = FWS_U;
1312 }
1313 }
1314 read_unlock(&fib6_walker_lock);
1315
1316 rt->dst.rt6_next = NULL;
1317
1318 /* If it was last route, expunge its radix tree node */
1319 if (!fn->leaf) {
1320 fn->fn_flags &= ~RTN_RTINFO;
1321 net->ipv6.rt6_stats->fib_route_nodes--;
1322 fn = fib6_repair_tree(net, fn);
1323 }
1324
1325 if (atomic_read(&rt->rt6i_ref) != 1) {
1326 /* This route is used as dummy address holder in some split
1327 * nodes. It is not leaked, but it still holds other resources,
1328 * which must be released in time. So, scan ascendant nodes
1329 * and replace dummy references to this route with references
1330 * to still alive ones.
1331 */
1332 while (fn) {
1333 if (!(fn->fn_flags & RTN_RTINFO) && fn->leaf == rt) {
1334 fn->leaf = fib6_find_prefix(net, fn);
1335 atomic_inc(&fn->leaf->rt6i_ref);
1336 rt6_release(rt);
1337 }
1338 fn = fn->parent;
1339 }
1340 /* No more references are possible at this point. */
1341 BUG_ON(atomic_read(&rt->rt6i_ref) != 1);
1342 }
1343
1344 inet6_rt_notify(RTM_DELROUTE, rt, info);
1345 rt6_release(rt);
1346 }
1347
1348 int fib6_del(struct rt6_info *rt, struct nl_info *info)
1349 {
1350 struct net *net = info->nl_net;
1351 struct fib6_node *fn = rt->rt6i_node;
1352 struct rt6_info **rtp;
1353
1354 #if RT6_DEBUG >= 2
1355 if (rt->dst.obsolete > 0) {
1356 WARN_ON(fn != NULL);
1357 return -ENOENT;
1358 }
1359 #endif
1360 if (!fn || rt == net->ipv6.ip6_null_entry)
1361 return -ENOENT;
1362
1363 WARN_ON(!(fn->fn_flags & RTN_RTINFO));
1364
1365 if (!(rt->rt6i_flags & RTF_CACHE)) {
1366 struct fib6_node *pn = fn;
1367 #ifdef CONFIG_IPV6_SUBTREES
1368 /* clones of this route might be in another subtree */
1369 if (rt->rt6i_src.plen) {
1370 while (!(pn->fn_flags & RTN_ROOT))
1371 pn = pn->parent;
1372 pn = pn->parent;
1373 }
1374 #endif
1375 fib6_prune_clones(info->nl_net, pn);
1376 }
1377
1378 /*
1379 * Walk the leaf entries looking for ourself
1380 */
1381
1382 for (rtp = &fn->leaf; *rtp; rtp = &(*rtp)->dst.rt6_next) {
1383 if (*rtp == rt) {
1384 fib6_del_route(fn, rtp, info);
1385 return 0;
1386 }
1387 }
1388 return -ENOENT;
1389 }
1390
1391 /*
1392 * Tree traversal function.
1393 *
1394 * Certainly, it is not interrupt safe.
1395 * However, it is internally reenterable wrt itself and fib6_add/fib6_del.
1396 * It means, that we can modify tree during walking
1397 * and use this function for garbage collection, clone pruning,
1398 * cleaning tree when a device goes down etc. etc.
1399 *
1400 * It guarantees that every node will be traversed,
1401 * and that it will be traversed only once.
1402 *
1403 * Callback function w->func may return:
1404 * 0 -> continue walking.
1405 * positive value -> walking is suspended (used by tree dumps,
1406 * and probably by gc, if it will be split to several slices)
1407 * negative value -> terminate walking.
1408 *
1409 * The function itself returns:
1410 * 0 -> walk is complete.
1411 * >0 -> walk is incomplete (i.e. suspended)
1412 * <0 -> walk is terminated by an error.
1413 */
1414
1415 static int fib6_walk_continue(struct fib6_walker *w)
1416 {
1417 struct fib6_node *fn, *pn;
1418
1419 for (;;) {
1420 fn = w->node;
1421 if (!fn)
1422 return 0;
1423
1424 if (w->prune && fn != w->root &&
1425 fn->fn_flags & RTN_RTINFO && w->state < FWS_C) {
1426 w->state = FWS_C;
1427 w->leaf = fn->leaf;
1428 }
1429 switch (w->state) {
1430 #ifdef CONFIG_IPV6_SUBTREES
1431 case FWS_S:
1432 if (FIB6_SUBTREE(fn)) {
1433 w->node = FIB6_SUBTREE(fn);
1434 continue;
1435 }
1436 w->state = FWS_L;
1437 #endif
1438 case FWS_L:
1439 if (fn->left) {
1440 w->node = fn->left;
1441 w->state = FWS_INIT;
1442 continue;
1443 }
1444 w->state = FWS_R;
1445 case FWS_R:
1446 if (fn->right) {
1447 w->node = fn->right;
1448 w->state = FWS_INIT;
1449 continue;
1450 }
1451 w->state = FWS_C;
1452 w->leaf = fn->leaf;
1453 case FWS_C:
1454 if (w->leaf && fn->fn_flags & RTN_RTINFO) {
1455 int err;
1456
1457 if (w->skip) {
1458 w->skip--;
1459 goto skip;
1460 }
1461
1462 err = w->func(w);
1463 if (err)
1464 return err;
1465
1466 w->count++;
1467 continue;
1468 }
1469 skip:
1470 w->state = FWS_U;
1471 case FWS_U:
1472 if (fn == w->root)
1473 return 0;
1474 pn = fn->parent;
1475 w->node = pn;
1476 #ifdef CONFIG_IPV6_SUBTREES
1477 if (FIB6_SUBTREE(pn) == fn) {
1478 WARN_ON(!(fn->fn_flags & RTN_ROOT));
1479 w->state = FWS_L;
1480 continue;
1481 }
1482 #endif
1483 if (pn->left == fn) {
1484 w->state = FWS_R;
1485 continue;
1486 }
1487 if (pn->right == fn) {
1488 w->state = FWS_C;
1489 w->leaf = w->node->leaf;
1490 continue;
1491 }
1492 #if RT6_DEBUG >= 2
1493 WARN_ON(1);
1494 #endif
1495 }
1496 }
1497 }
1498
1499 static int fib6_walk(struct fib6_walker *w)
1500 {
1501 int res;
1502
1503 w->state = FWS_INIT;
1504 w->node = w->root;
1505
1506 fib6_walker_link(w);
1507 res = fib6_walk_continue(w);
1508 if (res <= 0)
1509 fib6_walker_unlink(w);
1510 return res;
1511 }
1512
1513 static int fib6_clean_node(struct fib6_walker *w)
1514 {
1515 int res;
1516 struct rt6_info *rt;
1517 struct fib6_cleaner *c = container_of(w, struct fib6_cleaner, w);
1518 struct nl_info info = {
1519 .nl_net = c->net,
1520 };
1521
1522 if (c->sernum != FIB6_NO_SERNUM_CHANGE &&
1523 w->node->fn_sernum != c->sernum)
1524 w->node->fn_sernum = c->sernum;
1525
1526 if (!c->func) {
1527 WARN_ON_ONCE(c->sernum == FIB6_NO_SERNUM_CHANGE);
1528 w->leaf = NULL;
1529 return 0;
1530 }
1531
1532 for (rt = w->leaf; rt; rt = rt->dst.rt6_next) {
1533 res = c->func(rt, c->arg);
1534 if (res < 0) {
1535 w->leaf = rt;
1536 res = fib6_del(rt, &info);
1537 if (res) {
1538 #if RT6_DEBUG >= 2
1539 pr_debug("%s: del failed: rt=%p@%p err=%d\n",
1540 __func__, rt, rt->rt6i_node, res);
1541 #endif
1542 continue;
1543 }
1544 return 0;
1545 }
1546 WARN_ON(res != 0);
1547 }
1548 w->leaf = rt;
1549 return 0;
1550 }
1551
1552 /*
1553 * Convenient frontend to tree walker.
1554 *
1555 * func is called on each route.
1556 * It may return -1 -> delete this route.
1557 * 0 -> continue walking
1558 *
1559 * prune==1 -> only immediate children of node (certainly,
1560 * ignoring pure split nodes) will be scanned.
1561 */
1562
1563 static void fib6_clean_tree(struct net *net, struct fib6_node *root,
1564 int (*func)(struct rt6_info *, void *arg),
1565 bool prune, int sernum, void *arg)
1566 {
1567 struct fib6_cleaner c;
1568
1569 c.w.root = root;
1570 c.w.func = fib6_clean_node;
1571 c.w.prune = prune;
1572 c.w.count = 0;
1573 c.w.skip = 0;
1574 c.func = func;
1575 c.sernum = sernum;
1576 c.arg = arg;
1577 c.net = net;
1578
1579 fib6_walk(&c.w);
1580 }
1581
1582 static void __fib6_clean_all(struct net *net,
1583 int (*func)(struct rt6_info *, void *),
1584 int sernum, void *arg)
1585 {
1586 struct fib6_table *table;
1587 struct hlist_head *head;
1588 unsigned int h;
1589
1590 rcu_read_lock();
1591 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
1592 head = &net->ipv6.fib_table_hash[h];
1593 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
1594 write_lock_bh(&table->tb6_lock);
1595 fib6_clean_tree(net, &table->tb6_root,
1596 func, false, sernum, arg);
1597 write_unlock_bh(&table->tb6_lock);
1598 }
1599 }
1600 rcu_read_unlock();
1601 }
1602
1603 void fib6_clean_all(struct net *net, int (*func)(struct rt6_info *, void *),
1604 void *arg)
1605 {
1606 __fib6_clean_all(net, func, FIB6_NO_SERNUM_CHANGE, arg);
1607 }
1608
1609 static int fib6_prune_clone(struct rt6_info *rt, void *arg)
1610 {
1611 if (rt->rt6i_flags & RTF_CACHE) {
1612 RT6_TRACE("pruning clone %p\n", rt);
1613 return -1;
1614 }
1615
1616 return 0;
1617 }
1618
1619 static void fib6_prune_clones(struct net *net, struct fib6_node *fn)
1620 {
1621 fib6_clean_tree(net, fn, fib6_prune_clone, true,
1622 FIB6_NO_SERNUM_CHANGE, NULL);
1623 }
1624
1625 static void fib6_flush_trees(struct net *net)
1626 {
1627 int new_sernum = fib6_new_sernum(net);
1628
1629 __fib6_clean_all(net, NULL, new_sernum, NULL);
1630 }
1631
1632 /*
1633 * Garbage collection
1634 */
1635
1636 static struct fib6_gc_args
1637 {
1638 int timeout;
1639 int more;
1640 } gc_args;
1641
1642 static int fib6_age(struct rt6_info *rt, void *arg)
1643 {
1644 unsigned long now = jiffies;
1645
1646 /*
1647 * check addrconf expiration here.
1648 * Routes are expired even if they are in use.
1649 *
1650 * Also age clones. Note, that clones are aged out
1651 * only if they are not in use now.
1652 */
1653
1654 if (rt->rt6i_flags & RTF_EXPIRES && rt->dst.expires) {
1655 if (time_after(now, rt->dst.expires)) {
1656 RT6_TRACE("expiring %p\n", rt);
1657 return -1;
1658 }
1659 gc_args.more++;
1660 } else if (rt->rt6i_flags & RTF_CACHE) {
1661 if (atomic_read(&rt->dst.__refcnt) == 0 &&
1662 time_after_eq(now, rt->dst.lastuse + gc_args.timeout)) {
1663 RT6_TRACE("aging clone %p\n", rt);
1664 return -1;
1665 } else if (rt->rt6i_flags & RTF_GATEWAY) {
1666 struct neighbour *neigh;
1667 __u8 neigh_flags = 0;
1668
1669 neigh = dst_neigh_lookup(&rt->dst, &rt->rt6i_gateway);
1670 if (neigh) {
1671 neigh_flags = neigh->flags;
1672 neigh_release(neigh);
1673 }
1674 if (!(neigh_flags & NTF_ROUTER)) {
1675 RT6_TRACE("purging route %p via non-router but gateway\n",
1676 rt);
1677 return -1;
1678 }
1679 }
1680 gc_args.more++;
1681 }
1682
1683 return 0;
1684 }
1685
1686 static DEFINE_SPINLOCK(fib6_gc_lock);
1687
1688 void fib6_run_gc(unsigned long expires, struct net *net, bool force)
1689 {
1690 unsigned long now;
1691
1692 if (force) {
1693 spin_lock_bh(&fib6_gc_lock);
1694 } else if (!spin_trylock_bh(&fib6_gc_lock)) {
1695 mod_timer(&net->ipv6.ip6_fib_timer, jiffies + HZ);
1696 return;
1697 }
1698 gc_args.timeout = expires ? (int)expires :
1699 net->ipv6.sysctl.ip6_rt_gc_interval;
1700
1701 gc_args.more = icmp6_dst_gc();
1702
1703 fib6_clean_all(net, fib6_age, NULL);
1704 now = jiffies;
1705 net->ipv6.ip6_rt_last_gc = now;
1706
1707 if (gc_args.more)
1708 mod_timer(&net->ipv6.ip6_fib_timer,
1709 round_jiffies(now
1710 + net->ipv6.sysctl.ip6_rt_gc_interval));
1711 else
1712 del_timer(&net->ipv6.ip6_fib_timer);
1713 spin_unlock_bh(&fib6_gc_lock);
1714 }
1715
1716 static void fib6_gc_timer_cb(unsigned long arg)
1717 {
1718 fib6_run_gc(0, (struct net *)arg, true);
1719 }
1720
1721 static int __net_init fib6_net_init(struct net *net)
1722 {
1723 size_t size = sizeof(struct hlist_head) * FIB6_TABLE_HASHSZ;
1724
1725 setup_timer(&net->ipv6.ip6_fib_timer, fib6_gc_timer_cb, (unsigned long)net);
1726
1727 net->ipv6.rt6_stats = kzalloc(sizeof(*net->ipv6.rt6_stats), GFP_KERNEL);
1728 if (!net->ipv6.rt6_stats)
1729 goto out_timer;
1730
1731 /* Avoid false sharing : Use at least a full cache line */
1732 size = max_t(size_t, size, L1_CACHE_BYTES);
1733
1734 net->ipv6.fib_table_hash = kzalloc(size, GFP_KERNEL);
1735 if (!net->ipv6.fib_table_hash)
1736 goto out_rt6_stats;
1737
1738 net->ipv6.fib6_main_tbl = kzalloc(sizeof(*net->ipv6.fib6_main_tbl),
1739 GFP_KERNEL);
1740 if (!net->ipv6.fib6_main_tbl)
1741 goto out_fib_table_hash;
1742
1743 net->ipv6.fib6_main_tbl->tb6_id = RT6_TABLE_MAIN;
1744 net->ipv6.fib6_main_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1745 net->ipv6.fib6_main_tbl->tb6_root.fn_flags =
1746 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1747 inet_peer_base_init(&net->ipv6.fib6_main_tbl->tb6_peers);
1748
1749 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1750 net->ipv6.fib6_local_tbl = kzalloc(sizeof(*net->ipv6.fib6_local_tbl),
1751 GFP_KERNEL);
1752 if (!net->ipv6.fib6_local_tbl)
1753 goto out_fib6_main_tbl;
1754 net->ipv6.fib6_local_tbl->tb6_id = RT6_TABLE_LOCAL;
1755 net->ipv6.fib6_local_tbl->tb6_root.leaf = net->ipv6.ip6_null_entry;
1756 net->ipv6.fib6_local_tbl->tb6_root.fn_flags =
1757 RTN_ROOT | RTN_TL_ROOT | RTN_RTINFO;
1758 inet_peer_base_init(&net->ipv6.fib6_local_tbl->tb6_peers);
1759 #endif
1760 fib6_tables_init(net);
1761
1762 return 0;
1763
1764 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1765 out_fib6_main_tbl:
1766 kfree(net->ipv6.fib6_main_tbl);
1767 #endif
1768 out_fib_table_hash:
1769 kfree(net->ipv6.fib_table_hash);
1770 out_rt6_stats:
1771 kfree(net->ipv6.rt6_stats);
1772 out_timer:
1773 return -ENOMEM;
1774 }
1775
1776 static void fib6_net_exit(struct net *net)
1777 {
1778 rt6_ifdown(net, NULL);
1779 del_timer_sync(&net->ipv6.ip6_fib_timer);
1780
1781 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
1782 inetpeer_invalidate_tree(&net->ipv6.fib6_local_tbl->tb6_peers);
1783 kfree(net->ipv6.fib6_local_tbl);
1784 #endif
1785 inetpeer_invalidate_tree(&net->ipv6.fib6_main_tbl->tb6_peers);
1786 kfree(net->ipv6.fib6_main_tbl);
1787 kfree(net->ipv6.fib_table_hash);
1788 kfree(net->ipv6.rt6_stats);
1789 }
1790
1791 static struct pernet_operations fib6_net_ops = {
1792 .init = fib6_net_init,
1793 .exit = fib6_net_exit,
1794 };
1795
1796 int __init fib6_init(void)
1797 {
1798 int ret = -ENOMEM;
1799
1800 fib6_node_kmem = kmem_cache_create("fib6_nodes",
1801 sizeof(struct fib6_node),
1802 0, SLAB_HWCACHE_ALIGN,
1803 NULL);
1804 if (!fib6_node_kmem)
1805 goto out;
1806
1807 ret = register_pernet_subsys(&fib6_net_ops);
1808 if (ret)
1809 goto out_kmem_cache_create;
1810
1811 ret = __rtnl_register(PF_INET6, RTM_GETROUTE, NULL, inet6_dump_fib,
1812 NULL);
1813 if (ret)
1814 goto out_unregister_subsys;
1815
1816 __fib6_flush_trees = fib6_flush_trees;
1817 out:
1818 return ret;
1819
1820 out_unregister_subsys:
1821 unregister_pernet_subsys(&fib6_net_ops);
1822 out_kmem_cache_create:
1823 kmem_cache_destroy(fib6_node_kmem);
1824 goto out;
1825 }
1826
1827 void fib6_gc_cleanup(void)
1828 {
1829 unregister_pernet_subsys(&fib6_net_ops);
1830 kmem_cache_destroy(fib6_node_kmem);
1831 }
1832
1833 #ifdef CONFIG_PROC_FS
1834
1835 struct ipv6_route_iter {
1836 struct seq_net_private p;
1837 struct fib6_walker w;
1838 loff_t skip;
1839 struct fib6_table *tbl;
1840 int sernum;
1841 };
1842
1843 static int ipv6_route_seq_show(struct seq_file *seq, void *v)
1844 {
1845 struct rt6_info *rt = v;
1846 struct ipv6_route_iter *iter = seq->private;
1847
1848 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_dst.addr, rt->rt6i_dst.plen);
1849
1850 #ifdef CONFIG_IPV6_SUBTREES
1851 seq_printf(seq, "%pi6 %02x ", &rt->rt6i_src.addr, rt->rt6i_src.plen);
1852 #else
1853 seq_puts(seq, "00000000000000000000000000000000 00 ");
1854 #endif
1855 if (rt->rt6i_flags & RTF_GATEWAY)
1856 seq_printf(seq, "%pi6", &rt->rt6i_gateway);
1857 else
1858 seq_puts(seq, "00000000000000000000000000000000");
1859
1860 seq_printf(seq, " %08x %08x %08x %08x %8s\n",
1861 rt->rt6i_metric, atomic_read(&rt->dst.__refcnt),
1862 rt->dst.__use, rt->rt6i_flags,
1863 rt->dst.dev ? rt->dst.dev->name : "");
1864 iter->w.leaf = NULL;
1865 return 0;
1866 }
1867
1868 static int ipv6_route_yield(struct fib6_walker *w)
1869 {
1870 struct ipv6_route_iter *iter = w->args;
1871
1872 if (!iter->skip)
1873 return 1;
1874
1875 do {
1876 iter->w.leaf = iter->w.leaf->dst.rt6_next;
1877 iter->skip--;
1878 if (!iter->skip && iter->w.leaf)
1879 return 1;
1880 } while (iter->w.leaf);
1881
1882 return 0;
1883 }
1884
1885 static void ipv6_route_seq_setup_walk(struct ipv6_route_iter *iter)
1886 {
1887 memset(&iter->w, 0, sizeof(iter->w));
1888 iter->w.func = ipv6_route_yield;
1889 iter->w.root = &iter->tbl->tb6_root;
1890 iter->w.state = FWS_INIT;
1891 iter->w.node = iter->w.root;
1892 iter->w.args = iter;
1893 iter->sernum = iter->w.root->fn_sernum;
1894 INIT_LIST_HEAD(&iter->w.lh);
1895 fib6_walker_link(&iter->w);
1896 }
1897
1898 static struct fib6_table *ipv6_route_seq_next_table(struct fib6_table *tbl,
1899 struct net *net)
1900 {
1901 unsigned int h;
1902 struct hlist_node *node;
1903
1904 if (tbl) {
1905 h = (tbl->tb6_id & (FIB6_TABLE_HASHSZ - 1)) + 1;
1906 node = rcu_dereference_bh(hlist_next_rcu(&tbl->tb6_hlist));
1907 } else {
1908 h = 0;
1909 node = NULL;
1910 }
1911
1912 while (!node && h < FIB6_TABLE_HASHSZ) {
1913 node = rcu_dereference_bh(
1914 hlist_first_rcu(&net->ipv6.fib_table_hash[h++]));
1915 }
1916 return hlist_entry_safe(node, struct fib6_table, tb6_hlist);
1917 }
1918
1919 static void ipv6_route_check_sernum(struct ipv6_route_iter *iter)
1920 {
1921 if (iter->sernum != iter->w.root->fn_sernum) {
1922 iter->sernum = iter->w.root->fn_sernum;
1923 iter->w.state = FWS_INIT;
1924 iter->w.node = iter->w.root;
1925 WARN_ON(iter->w.skip);
1926 iter->w.skip = iter->w.count;
1927 }
1928 }
1929
1930 static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1931 {
1932 int r;
1933 struct rt6_info *n;
1934 struct net *net = seq_file_net(seq);
1935 struct ipv6_route_iter *iter = seq->private;
1936
1937 if (!v)
1938 goto iter_table;
1939
1940 n = ((struct rt6_info *)v)->dst.rt6_next;
1941 if (n) {
1942 ++*pos;
1943 return n;
1944 }
1945
1946 iter_table:
1947 ipv6_route_check_sernum(iter);
1948 read_lock(&iter->tbl->tb6_lock);
1949 r = fib6_walk_continue(&iter->w);
1950 read_unlock(&iter->tbl->tb6_lock);
1951 if (r > 0) {
1952 if (v)
1953 ++*pos;
1954 return iter->w.leaf;
1955 } else if (r < 0) {
1956 fib6_walker_unlink(&iter->w);
1957 return NULL;
1958 }
1959 fib6_walker_unlink(&iter->w);
1960
1961 iter->tbl = ipv6_route_seq_next_table(iter->tbl, net);
1962 if (!iter->tbl)
1963 return NULL;
1964
1965 ipv6_route_seq_setup_walk(iter);
1966 goto iter_table;
1967 }
1968
1969 static void *ipv6_route_seq_start(struct seq_file *seq, loff_t *pos)
1970 __acquires(RCU_BH)
1971 {
1972 struct net *net = seq_file_net(seq);
1973 struct ipv6_route_iter *iter = seq->private;
1974
1975 rcu_read_lock_bh();
1976 iter->tbl = ipv6_route_seq_next_table(NULL, net);
1977 iter->skip = *pos;
1978
1979 if (iter->tbl) {
1980 ipv6_route_seq_setup_walk(iter);
1981 return ipv6_route_seq_next(seq, NULL, pos);
1982 } else {
1983 return NULL;
1984 }
1985 }
1986
1987 static bool ipv6_route_iter_active(struct ipv6_route_iter *iter)
1988 {
1989 struct fib6_walker *w = &iter->w;
1990 return w->node && !(w->state == FWS_U && w->node == w->root);
1991 }
1992
1993 static void ipv6_route_seq_stop(struct seq_file *seq, void *v)
1994 __releases(RCU_BH)
1995 {
1996 struct ipv6_route_iter *iter = seq->private;
1997
1998 if (ipv6_route_iter_active(iter))
1999 fib6_walker_unlink(&iter->w);
2000
2001 rcu_read_unlock_bh();
2002 }
2003
2004 static const struct seq_operations ipv6_route_seq_ops = {
2005 .start = ipv6_route_seq_start,
2006 .next = ipv6_route_seq_next,
2007 .stop = ipv6_route_seq_stop,
2008 .show = ipv6_route_seq_show
2009 };
2010
2011 int ipv6_route_open(struct inode *inode, struct file *file)
2012 {
2013 return seq_open_net(inode, file, &ipv6_route_seq_ops,
2014 sizeof(struct ipv6_route_iter));
2015 }
2016
2017 #endif /* CONFIG_PROC_FS */
This page took 0.071205 seconds and 5 git commands to generate.