Merge branch 'fixes' of git://ftp.arm.linux.org.uk/~rmk/linux-arm
[deliverable/linux.git] / net / ipv4 / fib_semantics.c
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * IPv4 Forwarding Information Base: semantics.
7 *
8 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16 #include <asm/uaccess.h>
17 #include <linux/bitops.h>
18 #include <linux/types.h>
19 #include <linux/kernel.h>
20 #include <linux/jiffies.h>
21 #include <linux/mm.h>
22 #include <linux/string.h>
23 #include <linux/socket.h>
24 #include <linux/sockios.h>
25 #include <linux/errno.h>
26 #include <linux/in.h>
27 #include <linux/inet.h>
28 #include <linux/inetdevice.h>
29 #include <linux/netdevice.h>
30 #include <linux/if_arp.h>
31 #include <linux/proc_fs.h>
32 #include <linux/skbuff.h>
33 #include <linux/init.h>
34 #include <linux/slab.h>
35
36 #include <net/arp.h>
37 #include <net/ip.h>
38 #include <net/protocol.h>
39 #include <net/route.h>
40 #include <net/tcp.h>
41 #include <net/sock.h>
42 #include <net/ip_fib.h>
43 #include <net/netlink.h>
44 #include <net/nexthop.h>
45
46 #include "fib_lookup.h"
47
48 static DEFINE_SPINLOCK(fib_info_lock);
49 static struct hlist_head *fib_info_hash;
50 static struct hlist_head *fib_info_laddrhash;
51 static unsigned int fib_info_hash_size;
52 static unsigned int fib_info_cnt;
53
54 #define DEVINDEX_HASHBITS 8
55 #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
56 static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
57
58 #ifdef CONFIG_IP_ROUTE_MULTIPATH
59
60 static DEFINE_SPINLOCK(fib_multipath_lock);
61
62 #define for_nexthops(fi) { \
63 int nhsel; const struct fib_nh *nh; \
64 for (nhsel = 0, nh = (fi)->fib_nh; \
65 nhsel < (fi)->fib_nhs; \
66 nh++, nhsel++)
67
68 #define change_nexthops(fi) { \
69 int nhsel; struct fib_nh *nexthop_nh; \
70 for (nhsel = 0, nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
71 nhsel < (fi)->fib_nhs; \
72 nexthop_nh++, nhsel++)
73
74 #else /* CONFIG_IP_ROUTE_MULTIPATH */
75
76 /* Hope, that gcc will optimize it to get rid of dummy loop */
77
78 #define for_nexthops(fi) { \
79 int nhsel; const struct fib_nh *nh = (fi)->fib_nh; \
80 for (nhsel = 0; nhsel < 1; nhsel++)
81
82 #define change_nexthops(fi) { \
83 int nhsel; \
84 struct fib_nh *nexthop_nh = (struct fib_nh *)((fi)->fib_nh); \
85 for (nhsel = 0; nhsel < 1; nhsel++)
86
87 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
88
89 #define endfor_nexthops(fi) }
90
91
92 const struct fib_prop fib_props[RTN_MAX + 1] = {
93 [RTN_UNSPEC] = {
94 .error = 0,
95 .scope = RT_SCOPE_NOWHERE,
96 },
97 [RTN_UNICAST] = {
98 .error = 0,
99 .scope = RT_SCOPE_UNIVERSE,
100 },
101 [RTN_LOCAL] = {
102 .error = 0,
103 .scope = RT_SCOPE_HOST,
104 },
105 [RTN_BROADCAST] = {
106 .error = 0,
107 .scope = RT_SCOPE_LINK,
108 },
109 [RTN_ANYCAST] = {
110 .error = 0,
111 .scope = RT_SCOPE_LINK,
112 },
113 [RTN_MULTICAST] = {
114 .error = 0,
115 .scope = RT_SCOPE_UNIVERSE,
116 },
117 [RTN_BLACKHOLE] = {
118 .error = -EINVAL,
119 .scope = RT_SCOPE_UNIVERSE,
120 },
121 [RTN_UNREACHABLE] = {
122 .error = -EHOSTUNREACH,
123 .scope = RT_SCOPE_UNIVERSE,
124 },
125 [RTN_PROHIBIT] = {
126 .error = -EACCES,
127 .scope = RT_SCOPE_UNIVERSE,
128 },
129 [RTN_THROW] = {
130 .error = -EAGAIN,
131 .scope = RT_SCOPE_UNIVERSE,
132 },
133 [RTN_NAT] = {
134 .error = -EINVAL,
135 .scope = RT_SCOPE_NOWHERE,
136 },
137 [RTN_XRESOLVE] = {
138 .error = -EINVAL,
139 .scope = RT_SCOPE_NOWHERE,
140 },
141 };
142
143 static void rt_fibinfo_free(struct rtable __rcu **rtp)
144 {
145 struct rtable *rt = rcu_dereference_protected(*rtp, 1);
146
147 if (!rt)
148 return;
149
150 /* Not even needed : RCU_INIT_POINTER(*rtp, NULL);
151 * because we waited an RCU grace period before calling
152 * free_fib_info_rcu()
153 */
154
155 dst_free(&rt->dst);
156 }
157
158 static void free_nh_exceptions(struct fib_nh *nh)
159 {
160 struct fnhe_hash_bucket *hash;
161 int i;
162
163 hash = rcu_dereference_protected(nh->nh_exceptions, 1);
164 if (!hash)
165 return;
166 for (i = 0; i < FNHE_HASH_SIZE; i++) {
167 struct fib_nh_exception *fnhe;
168
169 fnhe = rcu_dereference_protected(hash[i].chain, 1);
170 while (fnhe) {
171 struct fib_nh_exception *next;
172
173 next = rcu_dereference_protected(fnhe->fnhe_next, 1);
174
175 rt_fibinfo_free(&fnhe->fnhe_rth_input);
176 rt_fibinfo_free(&fnhe->fnhe_rth_output);
177
178 kfree(fnhe);
179
180 fnhe = next;
181 }
182 }
183 kfree(hash);
184 }
185
186 static void rt_fibinfo_free_cpus(struct rtable __rcu * __percpu *rtp)
187 {
188 int cpu;
189
190 if (!rtp)
191 return;
192
193 for_each_possible_cpu(cpu) {
194 struct rtable *rt;
195
196 rt = rcu_dereference_protected(*per_cpu_ptr(rtp, cpu), 1);
197 if (rt)
198 dst_free(&rt->dst);
199 }
200 free_percpu(rtp);
201 }
202
203 /* Release a nexthop info record */
204 static void free_fib_info_rcu(struct rcu_head *head)
205 {
206 struct fib_info *fi = container_of(head, struct fib_info, rcu);
207
208 change_nexthops(fi) {
209 if (nexthop_nh->nh_dev)
210 dev_put(nexthop_nh->nh_dev);
211 free_nh_exceptions(nexthop_nh);
212 rt_fibinfo_free_cpus(nexthop_nh->nh_pcpu_rth_output);
213 rt_fibinfo_free(&nexthop_nh->nh_rth_input);
214 } endfor_nexthops(fi);
215
216 release_net(fi->fib_net);
217 if (fi->fib_metrics != (u32 *) dst_default_metrics)
218 kfree(fi->fib_metrics);
219 kfree(fi);
220 }
221
222 void free_fib_info(struct fib_info *fi)
223 {
224 if (fi->fib_dead == 0) {
225 pr_warn("Freeing alive fib_info %p\n", fi);
226 return;
227 }
228 fib_info_cnt--;
229 #ifdef CONFIG_IP_ROUTE_CLASSID
230 change_nexthops(fi) {
231 if (nexthop_nh->nh_tclassid)
232 fi->fib_net->ipv4.fib_num_tclassid_users--;
233 } endfor_nexthops(fi);
234 #endif
235 call_rcu(&fi->rcu, free_fib_info_rcu);
236 }
237
238 void fib_release_info(struct fib_info *fi)
239 {
240 spin_lock_bh(&fib_info_lock);
241 if (fi && --fi->fib_treeref == 0) {
242 hlist_del(&fi->fib_hash);
243 if (fi->fib_prefsrc)
244 hlist_del(&fi->fib_lhash);
245 change_nexthops(fi) {
246 if (!nexthop_nh->nh_dev)
247 continue;
248 hlist_del(&nexthop_nh->nh_hash);
249 } endfor_nexthops(fi)
250 fi->fib_dead = 1;
251 fib_info_put(fi);
252 }
253 spin_unlock_bh(&fib_info_lock);
254 }
255
256 static inline int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
257 {
258 const struct fib_nh *onh = ofi->fib_nh;
259
260 for_nexthops(fi) {
261 if (nh->nh_oif != onh->nh_oif ||
262 nh->nh_gw != onh->nh_gw ||
263 nh->nh_scope != onh->nh_scope ||
264 #ifdef CONFIG_IP_ROUTE_MULTIPATH
265 nh->nh_weight != onh->nh_weight ||
266 #endif
267 #ifdef CONFIG_IP_ROUTE_CLASSID
268 nh->nh_tclassid != onh->nh_tclassid ||
269 #endif
270 ((nh->nh_flags ^ onh->nh_flags) & ~RTNH_F_DEAD))
271 return -1;
272 onh++;
273 } endfor_nexthops(fi);
274 return 0;
275 }
276
277 static inline unsigned int fib_devindex_hashfn(unsigned int val)
278 {
279 unsigned int mask = DEVINDEX_HASHSIZE - 1;
280
281 return (val ^
282 (val >> DEVINDEX_HASHBITS) ^
283 (val >> (DEVINDEX_HASHBITS * 2))) & mask;
284 }
285
286 static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
287 {
288 unsigned int mask = (fib_info_hash_size - 1);
289 unsigned int val = fi->fib_nhs;
290
291 val ^= (fi->fib_protocol << 8) | fi->fib_scope;
292 val ^= (__force u32)fi->fib_prefsrc;
293 val ^= fi->fib_priority;
294 for_nexthops(fi) {
295 val ^= fib_devindex_hashfn(nh->nh_oif);
296 } endfor_nexthops(fi)
297
298 return (val ^ (val >> 7) ^ (val >> 12)) & mask;
299 }
300
301 static struct fib_info *fib_find_info(const struct fib_info *nfi)
302 {
303 struct hlist_head *head;
304 struct fib_info *fi;
305 unsigned int hash;
306
307 hash = fib_info_hashfn(nfi);
308 head = &fib_info_hash[hash];
309
310 hlist_for_each_entry(fi, head, fib_hash) {
311 if (!net_eq(fi->fib_net, nfi->fib_net))
312 continue;
313 if (fi->fib_nhs != nfi->fib_nhs)
314 continue;
315 if (nfi->fib_protocol == fi->fib_protocol &&
316 nfi->fib_scope == fi->fib_scope &&
317 nfi->fib_prefsrc == fi->fib_prefsrc &&
318 nfi->fib_priority == fi->fib_priority &&
319 nfi->fib_type == fi->fib_type &&
320 memcmp(nfi->fib_metrics, fi->fib_metrics,
321 sizeof(u32) * RTAX_MAX) == 0 &&
322 ((nfi->fib_flags ^ fi->fib_flags) & ~RTNH_F_DEAD) == 0 &&
323 (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
324 return fi;
325 }
326
327 return NULL;
328 }
329
330 /* Check, that the gateway is already configured.
331 * Used only by redirect accept routine.
332 */
333 int ip_fib_check_default(__be32 gw, struct net_device *dev)
334 {
335 struct hlist_head *head;
336 struct fib_nh *nh;
337 unsigned int hash;
338
339 spin_lock(&fib_info_lock);
340
341 hash = fib_devindex_hashfn(dev->ifindex);
342 head = &fib_info_devhash[hash];
343 hlist_for_each_entry(nh, head, nh_hash) {
344 if (nh->nh_dev == dev &&
345 nh->nh_gw == gw &&
346 !(nh->nh_flags & RTNH_F_DEAD)) {
347 spin_unlock(&fib_info_lock);
348 return 0;
349 }
350 }
351
352 spin_unlock(&fib_info_lock);
353
354 return -1;
355 }
356
357 static inline size_t fib_nlmsg_size(struct fib_info *fi)
358 {
359 size_t payload = NLMSG_ALIGN(sizeof(struct rtmsg))
360 + nla_total_size(4) /* RTA_TABLE */
361 + nla_total_size(4) /* RTA_DST */
362 + nla_total_size(4) /* RTA_PRIORITY */
363 + nla_total_size(4); /* RTA_PREFSRC */
364
365 /* space for nested metrics */
366 payload += nla_total_size((RTAX_MAX * nla_total_size(4)));
367
368 if (fi->fib_nhs) {
369 /* Also handles the special case fib_nhs == 1 */
370
371 /* each nexthop is packed in an attribute */
372 size_t nhsize = nla_total_size(sizeof(struct rtnexthop));
373
374 /* may contain flow and gateway attribute */
375 nhsize += 2 * nla_total_size(4);
376
377 /* all nexthops are packed in a nested attribute */
378 payload += nla_total_size(fi->fib_nhs * nhsize);
379 }
380
381 return payload;
382 }
383
384 void rtmsg_fib(int event, __be32 key, struct fib_alias *fa,
385 int dst_len, u32 tb_id, const struct nl_info *info,
386 unsigned int nlm_flags)
387 {
388 struct sk_buff *skb;
389 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
390 int err = -ENOBUFS;
391
392 skb = nlmsg_new(fib_nlmsg_size(fa->fa_info), GFP_KERNEL);
393 if (skb == NULL)
394 goto errout;
395
396 err = fib_dump_info(skb, info->portid, seq, event, tb_id,
397 fa->fa_type, key, dst_len,
398 fa->fa_tos, fa->fa_info, nlm_flags);
399 if (err < 0) {
400 /* -EMSGSIZE implies BUG in fib_nlmsg_size() */
401 WARN_ON(err == -EMSGSIZE);
402 kfree_skb(skb);
403 goto errout;
404 }
405 rtnl_notify(skb, info->nl_net, info->portid, RTNLGRP_IPV4_ROUTE,
406 info->nlh, GFP_KERNEL);
407 return;
408 errout:
409 if (err < 0)
410 rtnl_set_sk_err(info->nl_net, RTNLGRP_IPV4_ROUTE, err);
411 }
412
413 /* Return the first fib alias matching TOS with
414 * priority less than or equal to PRIO.
415 */
416 struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
417 {
418 if (fah) {
419 struct fib_alias *fa;
420 list_for_each_entry(fa, fah, fa_list) {
421 if (fa->fa_tos > tos)
422 continue;
423 if (fa->fa_info->fib_priority >= prio ||
424 fa->fa_tos < tos)
425 return fa;
426 }
427 }
428 return NULL;
429 }
430
431 static int fib_detect_death(struct fib_info *fi, int order,
432 struct fib_info **last_resort, int *last_idx,
433 int dflt)
434 {
435 struct neighbour *n;
436 int state = NUD_NONE;
437
438 n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
439 if (n) {
440 state = n->nud_state;
441 neigh_release(n);
442 }
443 if (state == NUD_REACHABLE)
444 return 0;
445 if ((state & NUD_VALID) && order != dflt)
446 return 0;
447 if ((state & NUD_VALID) ||
448 (*last_idx < 0 && order > dflt)) {
449 *last_resort = fi;
450 *last_idx = order;
451 }
452 return 1;
453 }
454
455 #ifdef CONFIG_IP_ROUTE_MULTIPATH
456
457 static int fib_count_nexthops(struct rtnexthop *rtnh, int remaining)
458 {
459 int nhs = 0;
460
461 while (rtnh_ok(rtnh, remaining)) {
462 nhs++;
463 rtnh = rtnh_next(rtnh, &remaining);
464 }
465
466 /* leftover implies invalid nexthop configuration, discard it */
467 return remaining > 0 ? 0 : nhs;
468 }
469
470 static int fib_get_nhs(struct fib_info *fi, struct rtnexthop *rtnh,
471 int remaining, struct fib_config *cfg)
472 {
473 change_nexthops(fi) {
474 int attrlen;
475
476 if (!rtnh_ok(rtnh, remaining))
477 return -EINVAL;
478
479 nexthop_nh->nh_flags =
480 (cfg->fc_flags & ~0xFF) | rtnh->rtnh_flags;
481 nexthop_nh->nh_oif = rtnh->rtnh_ifindex;
482 nexthop_nh->nh_weight = rtnh->rtnh_hops + 1;
483
484 attrlen = rtnh_attrlen(rtnh);
485 if (attrlen > 0) {
486 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
487
488 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
489 nexthop_nh->nh_gw = nla ? nla_get_be32(nla) : 0;
490 #ifdef CONFIG_IP_ROUTE_CLASSID
491 nla = nla_find(attrs, attrlen, RTA_FLOW);
492 nexthop_nh->nh_tclassid = nla ? nla_get_u32(nla) : 0;
493 if (nexthop_nh->nh_tclassid)
494 fi->fib_net->ipv4.fib_num_tclassid_users++;
495 #endif
496 }
497
498 rtnh = rtnh_next(rtnh, &remaining);
499 } endfor_nexthops(fi);
500
501 return 0;
502 }
503
504 #endif
505
506 int fib_nh_match(struct fib_config *cfg, struct fib_info *fi)
507 {
508 #ifdef CONFIG_IP_ROUTE_MULTIPATH
509 struct rtnexthop *rtnh;
510 int remaining;
511 #endif
512
513 if (cfg->fc_priority && cfg->fc_priority != fi->fib_priority)
514 return 1;
515
516 if (cfg->fc_oif || cfg->fc_gw) {
517 if ((!cfg->fc_oif || cfg->fc_oif == fi->fib_nh->nh_oif) &&
518 (!cfg->fc_gw || cfg->fc_gw == fi->fib_nh->nh_gw))
519 return 0;
520 return 1;
521 }
522
523 #ifdef CONFIG_IP_ROUTE_MULTIPATH
524 if (cfg->fc_mp == NULL)
525 return 0;
526
527 rtnh = cfg->fc_mp;
528 remaining = cfg->fc_mp_len;
529
530 for_nexthops(fi) {
531 int attrlen;
532
533 if (!rtnh_ok(rtnh, remaining))
534 return -EINVAL;
535
536 if (rtnh->rtnh_ifindex && rtnh->rtnh_ifindex != nh->nh_oif)
537 return 1;
538
539 attrlen = rtnh_attrlen(rtnh);
540 if (attrlen > 0) {
541 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
542
543 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
544 if (nla && nla_get_be32(nla) != nh->nh_gw)
545 return 1;
546 #ifdef CONFIG_IP_ROUTE_CLASSID
547 nla = nla_find(attrs, attrlen, RTA_FLOW);
548 if (nla && nla_get_u32(nla) != nh->nh_tclassid)
549 return 1;
550 #endif
551 }
552
553 rtnh = rtnh_next(rtnh, &remaining);
554 } endfor_nexthops(fi);
555 #endif
556 return 0;
557 }
558
559
560 /*
561 * Picture
562 * -------
563 *
564 * Semantics of nexthop is very messy by historical reasons.
565 * We have to take into account, that:
566 * a) gateway can be actually local interface address,
567 * so that gatewayed route is direct.
568 * b) gateway must be on-link address, possibly
569 * described not by an ifaddr, but also by a direct route.
570 * c) If both gateway and interface are specified, they should not
571 * contradict.
572 * d) If we use tunnel routes, gateway could be not on-link.
573 *
574 * Attempt to reconcile all of these (alas, self-contradictory) conditions
575 * results in pretty ugly and hairy code with obscure logic.
576 *
577 * I chose to generalized it instead, so that the size
578 * of code does not increase practically, but it becomes
579 * much more general.
580 * Every prefix is assigned a "scope" value: "host" is local address,
581 * "link" is direct route,
582 * [ ... "site" ... "interior" ... ]
583 * and "universe" is true gateway route with global meaning.
584 *
585 * Every prefix refers to a set of "nexthop"s (gw, oif),
586 * where gw must have narrower scope. This recursion stops
587 * when gw has LOCAL scope or if "nexthop" is declared ONLINK,
588 * which means that gw is forced to be on link.
589 *
590 * Code is still hairy, but now it is apparently logically
591 * consistent and very flexible. F.e. as by-product it allows
592 * to co-exists in peace independent exterior and interior
593 * routing processes.
594 *
595 * Normally it looks as following.
596 *
597 * {universe prefix} -> (gw, oif) [scope link]
598 * |
599 * |-> {link prefix} -> (gw, oif) [scope local]
600 * |
601 * |-> {local prefix} (terminal node)
602 */
603 static int fib_check_nh(struct fib_config *cfg, struct fib_info *fi,
604 struct fib_nh *nh)
605 {
606 int err;
607 struct net *net;
608 struct net_device *dev;
609
610 net = cfg->fc_nlinfo.nl_net;
611 if (nh->nh_gw) {
612 struct fib_result res;
613
614 if (nh->nh_flags & RTNH_F_ONLINK) {
615
616 if (cfg->fc_scope >= RT_SCOPE_LINK)
617 return -EINVAL;
618 if (inet_addr_type(net, nh->nh_gw) != RTN_UNICAST)
619 return -EINVAL;
620 dev = __dev_get_by_index(net, nh->nh_oif);
621 if (!dev)
622 return -ENODEV;
623 if (!(dev->flags & IFF_UP))
624 return -ENETDOWN;
625 nh->nh_dev = dev;
626 dev_hold(dev);
627 nh->nh_scope = RT_SCOPE_LINK;
628 return 0;
629 }
630 rcu_read_lock();
631 {
632 struct flowi4 fl4 = {
633 .daddr = nh->nh_gw,
634 .flowi4_scope = cfg->fc_scope + 1,
635 .flowi4_oif = nh->nh_oif,
636 .flowi4_iif = LOOPBACK_IFINDEX,
637 };
638
639 /* It is not necessary, but requires a bit of thinking */
640 if (fl4.flowi4_scope < RT_SCOPE_LINK)
641 fl4.flowi4_scope = RT_SCOPE_LINK;
642 err = fib_lookup(net, &fl4, &res);
643 if (err) {
644 rcu_read_unlock();
645 return err;
646 }
647 }
648 err = -EINVAL;
649 if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
650 goto out;
651 nh->nh_scope = res.scope;
652 nh->nh_oif = FIB_RES_OIF(res);
653 nh->nh_dev = dev = FIB_RES_DEV(res);
654 if (!dev)
655 goto out;
656 dev_hold(dev);
657 err = (dev->flags & IFF_UP) ? 0 : -ENETDOWN;
658 } else {
659 struct in_device *in_dev;
660
661 if (nh->nh_flags & (RTNH_F_PERVASIVE | RTNH_F_ONLINK))
662 return -EINVAL;
663
664 rcu_read_lock();
665 err = -ENODEV;
666 in_dev = inetdev_by_index(net, nh->nh_oif);
667 if (in_dev == NULL)
668 goto out;
669 err = -ENETDOWN;
670 if (!(in_dev->dev->flags & IFF_UP))
671 goto out;
672 nh->nh_dev = in_dev->dev;
673 dev_hold(nh->nh_dev);
674 nh->nh_scope = RT_SCOPE_HOST;
675 err = 0;
676 }
677 out:
678 rcu_read_unlock();
679 return err;
680 }
681
682 static inline unsigned int fib_laddr_hashfn(__be32 val)
683 {
684 unsigned int mask = (fib_info_hash_size - 1);
685
686 return ((__force u32)val ^
687 ((__force u32)val >> 7) ^
688 ((__force u32)val >> 14)) & mask;
689 }
690
691 static struct hlist_head *fib_info_hash_alloc(int bytes)
692 {
693 if (bytes <= PAGE_SIZE)
694 return kzalloc(bytes, GFP_KERNEL);
695 else
696 return (struct hlist_head *)
697 __get_free_pages(GFP_KERNEL | __GFP_ZERO,
698 get_order(bytes));
699 }
700
701 static void fib_info_hash_free(struct hlist_head *hash, int bytes)
702 {
703 if (!hash)
704 return;
705
706 if (bytes <= PAGE_SIZE)
707 kfree(hash);
708 else
709 free_pages((unsigned long) hash, get_order(bytes));
710 }
711
712 static void fib_info_hash_move(struct hlist_head *new_info_hash,
713 struct hlist_head *new_laddrhash,
714 unsigned int new_size)
715 {
716 struct hlist_head *old_info_hash, *old_laddrhash;
717 unsigned int old_size = fib_info_hash_size;
718 unsigned int i, bytes;
719
720 spin_lock_bh(&fib_info_lock);
721 old_info_hash = fib_info_hash;
722 old_laddrhash = fib_info_laddrhash;
723 fib_info_hash_size = new_size;
724
725 for (i = 0; i < old_size; i++) {
726 struct hlist_head *head = &fib_info_hash[i];
727 struct hlist_node *n;
728 struct fib_info *fi;
729
730 hlist_for_each_entry_safe(fi, n, head, fib_hash) {
731 struct hlist_head *dest;
732 unsigned int new_hash;
733
734 hlist_del(&fi->fib_hash);
735
736 new_hash = fib_info_hashfn(fi);
737 dest = &new_info_hash[new_hash];
738 hlist_add_head(&fi->fib_hash, dest);
739 }
740 }
741 fib_info_hash = new_info_hash;
742
743 for (i = 0; i < old_size; i++) {
744 struct hlist_head *lhead = &fib_info_laddrhash[i];
745 struct hlist_node *n;
746 struct fib_info *fi;
747
748 hlist_for_each_entry_safe(fi, n, lhead, fib_lhash) {
749 struct hlist_head *ldest;
750 unsigned int new_hash;
751
752 hlist_del(&fi->fib_lhash);
753
754 new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
755 ldest = &new_laddrhash[new_hash];
756 hlist_add_head(&fi->fib_lhash, ldest);
757 }
758 }
759 fib_info_laddrhash = new_laddrhash;
760
761 spin_unlock_bh(&fib_info_lock);
762
763 bytes = old_size * sizeof(struct hlist_head *);
764 fib_info_hash_free(old_info_hash, bytes);
765 fib_info_hash_free(old_laddrhash, bytes);
766 }
767
768 __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
769 {
770 nh->nh_saddr = inet_select_addr(nh->nh_dev,
771 nh->nh_gw,
772 nh->nh_parent->fib_scope);
773 nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
774
775 return nh->nh_saddr;
776 }
777
778 struct fib_info *fib_create_info(struct fib_config *cfg)
779 {
780 int err;
781 struct fib_info *fi = NULL;
782 struct fib_info *ofi;
783 int nhs = 1;
784 struct net *net = cfg->fc_nlinfo.nl_net;
785
786 if (cfg->fc_type > RTN_MAX)
787 goto err_inval;
788
789 /* Fast check to catch the most weird cases */
790 if (fib_props[cfg->fc_type].scope > cfg->fc_scope)
791 goto err_inval;
792
793 #ifdef CONFIG_IP_ROUTE_MULTIPATH
794 if (cfg->fc_mp) {
795 nhs = fib_count_nexthops(cfg->fc_mp, cfg->fc_mp_len);
796 if (nhs == 0)
797 goto err_inval;
798 }
799 #endif
800
801 err = -ENOBUFS;
802 if (fib_info_cnt >= fib_info_hash_size) {
803 unsigned int new_size = fib_info_hash_size << 1;
804 struct hlist_head *new_info_hash;
805 struct hlist_head *new_laddrhash;
806 unsigned int bytes;
807
808 if (!new_size)
809 new_size = 16;
810 bytes = new_size * sizeof(struct hlist_head *);
811 new_info_hash = fib_info_hash_alloc(bytes);
812 new_laddrhash = fib_info_hash_alloc(bytes);
813 if (!new_info_hash || !new_laddrhash) {
814 fib_info_hash_free(new_info_hash, bytes);
815 fib_info_hash_free(new_laddrhash, bytes);
816 } else
817 fib_info_hash_move(new_info_hash, new_laddrhash, new_size);
818
819 if (!fib_info_hash_size)
820 goto failure;
821 }
822
823 fi = kzalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
824 if (fi == NULL)
825 goto failure;
826 fib_info_cnt++;
827 if (cfg->fc_mx) {
828 fi->fib_metrics = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
829 if (!fi->fib_metrics)
830 goto failure;
831 } else
832 fi->fib_metrics = (u32 *) dst_default_metrics;
833
834 fi->fib_net = hold_net(net);
835 fi->fib_protocol = cfg->fc_protocol;
836 fi->fib_scope = cfg->fc_scope;
837 fi->fib_flags = cfg->fc_flags;
838 fi->fib_priority = cfg->fc_priority;
839 fi->fib_prefsrc = cfg->fc_prefsrc;
840 fi->fib_type = cfg->fc_type;
841
842 fi->fib_nhs = nhs;
843 change_nexthops(fi) {
844 nexthop_nh->nh_parent = fi;
845 nexthop_nh->nh_pcpu_rth_output = alloc_percpu(struct rtable __rcu *);
846 if (!nexthop_nh->nh_pcpu_rth_output)
847 goto failure;
848 } endfor_nexthops(fi)
849
850 if (cfg->fc_mx) {
851 struct nlattr *nla;
852 int remaining;
853
854 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
855 int type = nla_type(nla);
856
857 if (type) {
858 u32 val;
859
860 if (type > RTAX_MAX)
861 goto err_inval;
862 val = nla_get_u32(nla);
863 if (type == RTAX_ADVMSS && val > 65535 - 40)
864 val = 65535 - 40;
865 if (type == RTAX_MTU && val > 65535 - 15)
866 val = 65535 - 15;
867 fi->fib_metrics[type - 1] = val;
868 }
869 }
870 }
871
872 if (cfg->fc_mp) {
873 #ifdef CONFIG_IP_ROUTE_MULTIPATH
874 err = fib_get_nhs(fi, cfg->fc_mp, cfg->fc_mp_len, cfg);
875 if (err != 0)
876 goto failure;
877 if (cfg->fc_oif && fi->fib_nh->nh_oif != cfg->fc_oif)
878 goto err_inval;
879 if (cfg->fc_gw && fi->fib_nh->nh_gw != cfg->fc_gw)
880 goto err_inval;
881 #ifdef CONFIG_IP_ROUTE_CLASSID
882 if (cfg->fc_flow && fi->fib_nh->nh_tclassid != cfg->fc_flow)
883 goto err_inval;
884 #endif
885 #else
886 goto err_inval;
887 #endif
888 } else {
889 struct fib_nh *nh = fi->fib_nh;
890
891 nh->nh_oif = cfg->fc_oif;
892 nh->nh_gw = cfg->fc_gw;
893 nh->nh_flags = cfg->fc_flags;
894 #ifdef CONFIG_IP_ROUTE_CLASSID
895 nh->nh_tclassid = cfg->fc_flow;
896 if (nh->nh_tclassid)
897 fi->fib_net->ipv4.fib_num_tclassid_users++;
898 #endif
899 #ifdef CONFIG_IP_ROUTE_MULTIPATH
900 nh->nh_weight = 1;
901 #endif
902 }
903
904 if (fib_props[cfg->fc_type].error) {
905 if (cfg->fc_gw || cfg->fc_oif || cfg->fc_mp)
906 goto err_inval;
907 goto link_it;
908 } else {
909 switch (cfg->fc_type) {
910 case RTN_UNICAST:
911 case RTN_LOCAL:
912 case RTN_BROADCAST:
913 case RTN_ANYCAST:
914 case RTN_MULTICAST:
915 break;
916 default:
917 goto err_inval;
918 }
919 }
920
921 if (cfg->fc_scope > RT_SCOPE_HOST)
922 goto err_inval;
923
924 if (cfg->fc_scope == RT_SCOPE_HOST) {
925 struct fib_nh *nh = fi->fib_nh;
926
927 /* Local address is added. */
928 if (nhs != 1 || nh->nh_gw)
929 goto err_inval;
930 nh->nh_scope = RT_SCOPE_NOWHERE;
931 nh->nh_dev = dev_get_by_index(net, fi->fib_nh->nh_oif);
932 err = -ENODEV;
933 if (nh->nh_dev == NULL)
934 goto failure;
935 } else {
936 change_nexthops(fi) {
937 err = fib_check_nh(cfg, fi, nexthop_nh);
938 if (err != 0)
939 goto failure;
940 } endfor_nexthops(fi)
941 }
942
943 if (fi->fib_prefsrc) {
944 if (cfg->fc_type != RTN_LOCAL || !cfg->fc_dst ||
945 fi->fib_prefsrc != cfg->fc_dst)
946 if (inet_addr_type(net, fi->fib_prefsrc) != RTN_LOCAL)
947 goto err_inval;
948 }
949
950 change_nexthops(fi) {
951 fib_info_update_nh_saddr(net, nexthop_nh);
952 } endfor_nexthops(fi)
953
954 link_it:
955 ofi = fib_find_info(fi);
956 if (ofi) {
957 fi->fib_dead = 1;
958 free_fib_info(fi);
959 ofi->fib_treeref++;
960 return ofi;
961 }
962
963 fi->fib_treeref++;
964 atomic_inc(&fi->fib_clntref);
965 spin_lock_bh(&fib_info_lock);
966 hlist_add_head(&fi->fib_hash,
967 &fib_info_hash[fib_info_hashfn(fi)]);
968 if (fi->fib_prefsrc) {
969 struct hlist_head *head;
970
971 head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
972 hlist_add_head(&fi->fib_lhash, head);
973 }
974 change_nexthops(fi) {
975 struct hlist_head *head;
976 unsigned int hash;
977
978 if (!nexthop_nh->nh_dev)
979 continue;
980 hash = fib_devindex_hashfn(nexthop_nh->nh_dev->ifindex);
981 head = &fib_info_devhash[hash];
982 hlist_add_head(&nexthop_nh->nh_hash, head);
983 } endfor_nexthops(fi)
984 spin_unlock_bh(&fib_info_lock);
985 return fi;
986
987 err_inval:
988 err = -EINVAL;
989
990 failure:
991 if (fi) {
992 fi->fib_dead = 1;
993 free_fib_info(fi);
994 }
995
996 return ERR_PTR(err);
997 }
998
999 int fib_dump_info(struct sk_buff *skb, u32 portid, u32 seq, int event,
1000 u32 tb_id, u8 type, __be32 dst, int dst_len, u8 tos,
1001 struct fib_info *fi, unsigned int flags)
1002 {
1003 struct nlmsghdr *nlh;
1004 struct rtmsg *rtm;
1005
1006 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*rtm), flags);
1007 if (nlh == NULL)
1008 return -EMSGSIZE;
1009
1010 rtm = nlmsg_data(nlh);
1011 rtm->rtm_family = AF_INET;
1012 rtm->rtm_dst_len = dst_len;
1013 rtm->rtm_src_len = 0;
1014 rtm->rtm_tos = tos;
1015 if (tb_id < 256)
1016 rtm->rtm_table = tb_id;
1017 else
1018 rtm->rtm_table = RT_TABLE_COMPAT;
1019 if (nla_put_u32(skb, RTA_TABLE, tb_id))
1020 goto nla_put_failure;
1021 rtm->rtm_type = type;
1022 rtm->rtm_flags = fi->fib_flags;
1023 rtm->rtm_scope = fi->fib_scope;
1024 rtm->rtm_protocol = fi->fib_protocol;
1025
1026 if (rtm->rtm_dst_len &&
1027 nla_put_be32(skb, RTA_DST, dst))
1028 goto nla_put_failure;
1029 if (fi->fib_priority &&
1030 nla_put_u32(skb, RTA_PRIORITY, fi->fib_priority))
1031 goto nla_put_failure;
1032 if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
1033 goto nla_put_failure;
1034
1035 if (fi->fib_prefsrc &&
1036 nla_put_be32(skb, RTA_PREFSRC, fi->fib_prefsrc))
1037 goto nla_put_failure;
1038 if (fi->fib_nhs == 1) {
1039 if (fi->fib_nh->nh_gw &&
1040 nla_put_be32(skb, RTA_GATEWAY, fi->fib_nh->nh_gw))
1041 goto nla_put_failure;
1042 if (fi->fib_nh->nh_oif &&
1043 nla_put_u32(skb, RTA_OIF, fi->fib_nh->nh_oif))
1044 goto nla_put_failure;
1045 #ifdef CONFIG_IP_ROUTE_CLASSID
1046 if (fi->fib_nh[0].nh_tclassid &&
1047 nla_put_u32(skb, RTA_FLOW, fi->fib_nh[0].nh_tclassid))
1048 goto nla_put_failure;
1049 #endif
1050 }
1051 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1052 if (fi->fib_nhs > 1) {
1053 struct rtnexthop *rtnh;
1054 struct nlattr *mp;
1055
1056 mp = nla_nest_start(skb, RTA_MULTIPATH);
1057 if (mp == NULL)
1058 goto nla_put_failure;
1059
1060 for_nexthops(fi) {
1061 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
1062 if (rtnh == NULL)
1063 goto nla_put_failure;
1064
1065 rtnh->rtnh_flags = nh->nh_flags & 0xFF;
1066 rtnh->rtnh_hops = nh->nh_weight - 1;
1067 rtnh->rtnh_ifindex = nh->nh_oif;
1068
1069 if (nh->nh_gw &&
1070 nla_put_be32(skb, RTA_GATEWAY, nh->nh_gw))
1071 goto nla_put_failure;
1072 #ifdef CONFIG_IP_ROUTE_CLASSID
1073 if (nh->nh_tclassid &&
1074 nla_put_u32(skb, RTA_FLOW, nh->nh_tclassid))
1075 goto nla_put_failure;
1076 #endif
1077 /* length of rtnetlink header + attributes */
1078 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *) rtnh;
1079 } endfor_nexthops(fi);
1080
1081 nla_nest_end(skb, mp);
1082 }
1083 #endif
1084 return nlmsg_end(skb, nlh);
1085
1086 nla_put_failure:
1087 nlmsg_cancel(skb, nlh);
1088 return -EMSGSIZE;
1089 }
1090
1091 /*
1092 * Update FIB if:
1093 * - local address disappeared -> we must delete all the entries
1094 * referring to it.
1095 * - device went down -> we must shutdown all nexthops going via it.
1096 */
1097 int fib_sync_down_addr(struct net *net, __be32 local)
1098 {
1099 int ret = 0;
1100 unsigned int hash = fib_laddr_hashfn(local);
1101 struct hlist_head *head = &fib_info_laddrhash[hash];
1102 struct fib_info *fi;
1103
1104 if (fib_info_laddrhash == NULL || local == 0)
1105 return 0;
1106
1107 hlist_for_each_entry(fi, head, fib_lhash) {
1108 if (!net_eq(fi->fib_net, net))
1109 continue;
1110 if (fi->fib_prefsrc == local) {
1111 fi->fib_flags |= RTNH_F_DEAD;
1112 ret++;
1113 }
1114 }
1115 return ret;
1116 }
1117
1118 int fib_sync_down_dev(struct net_device *dev, int force)
1119 {
1120 int ret = 0;
1121 int scope = RT_SCOPE_NOWHERE;
1122 struct fib_info *prev_fi = NULL;
1123 unsigned int hash = fib_devindex_hashfn(dev->ifindex);
1124 struct hlist_head *head = &fib_info_devhash[hash];
1125 struct fib_nh *nh;
1126
1127 if (force)
1128 scope = -1;
1129
1130 hlist_for_each_entry(nh, head, nh_hash) {
1131 struct fib_info *fi = nh->nh_parent;
1132 int dead;
1133
1134 BUG_ON(!fi->fib_nhs);
1135 if (nh->nh_dev != dev || fi == prev_fi)
1136 continue;
1137 prev_fi = fi;
1138 dead = 0;
1139 change_nexthops(fi) {
1140 if (nexthop_nh->nh_flags & RTNH_F_DEAD)
1141 dead++;
1142 else if (nexthop_nh->nh_dev == dev &&
1143 nexthop_nh->nh_scope != scope) {
1144 nexthop_nh->nh_flags |= RTNH_F_DEAD;
1145 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1146 spin_lock_bh(&fib_multipath_lock);
1147 fi->fib_power -= nexthop_nh->nh_power;
1148 nexthop_nh->nh_power = 0;
1149 spin_unlock_bh(&fib_multipath_lock);
1150 #endif
1151 dead++;
1152 }
1153 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1154 if (force > 1 && nexthop_nh->nh_dev == dev) {
1155 dead = fi->fib_nhs;
1156 break;
1157 }
1158 #endif
1159 } endfor_nexthops(fi)
1160 if (dead == fi->fib_nhs) {
1161 fi->fib_flags |= RTNH_F_DEAD;
1162 ret++;
1163 }
1164 }
1165
1166 return ret;
1167 }
1168
1169 /* Must be invoked inside of an RCU protected region. */
1170 void fib_select_default(struct fib_result *res)
1171 {
1172 struct fib_info *fi = NULL, *last_resort = NULL;
1173 struct list_head *fa_head = res->fa_head;
1174 struct fib_table *tb = res->table;
1175 int order = -1, last_idx = -1;
1176 struct fib_alias *fa;
1177
1178 list_for_each_entry_rcu(fa, fa_head, fa_list) {
1179 struct fib_info *next_fi = fa->fa_info;
1180
1181 if (next_fi->fib_scope != res->scope ||
1182 fa->fa_type != RTN_UNICAST)
1183 continue;
1184
1185 if (next_fi->fib_priority > res->fi->fib_priority)
1186 break;
1187 if (!next_fi->fib_nh[0].nh_gw ||
1188 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1189 continue;
1190
1191 fib_alias_accessed(fa);
1192
1193 if (fi == NULL) {
1194 if (next_fi != res->fi)
1195 break;
1196 } else if (!fib_detect_death(fi, order, &last_resort,
1197 &last_idx, tb->tb_default)) {
1198 fib_result_assign(res, fi);
1199 tb->tb_default = order;
1200 goto out;
1201 }
1202 fi = next_fi;
1203 order++;
1204 }
1205
1206 if (order <= 0 || fi == NULL) {
1207 tb->tb_default = -1;
1208 goto out;
1209 }
1210
1211 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1212 tb->tb_default)) {
1213 fib_result_assign(res, fi);
1214 tb->tb_default = order;
1215 goto out;
1216 }
1217
1218 if (last_idx >= 0)
1219 fib_result_assign(res, last_resort);
1220 tb->tb_default = last_idx;
1221 out:
1222 return;
1223 }
1224
1225 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1226
1227 /*
1228 * Dead device goes up. We wake up dead nexthops.
1229 * It takes sense only on multipath routes.
1230 */
1231 int fib_sync_up(struct net_device *dev)
1232 {
1233 struct fib_info *prev_fi;
1234 unsigned int hash;
1235 struct hlist_head *head;
1236 struct fib_nh *nh;
1237 int ret;
1238
1239 if (!(dev->flags & IFF_UP))
1240 return 0;
1241
1242 prev_fi = NULL;
1243 hash = fib_devindex_hashfn(dev->ifindex);
1244 head = &fib_info_devhash[hash];
1245 ret = 0;
1246
1247 hlist_for_each_entry(nh, head, nh_hash) {
1248 struct fib_info *fi = nh->nh_parent;
1249 int alive;
1250
1251 BUG_ON(!fi->fib_nhs);
1252 if (nh->nh_dev != dev || fi == prev_fi)
1253 continue;
1254
1255 prev_fi = fi;
1256 alive = 0;
1257 change_nexthops(fi) {
1258 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1259 alive++;
1260 continue;
1261 }
1262 if (nexthop_nh->nh_dev == NULL ||
1263 !(nexthop_nh->nh_dev->flags & IFF_UP))
1264 continue;
1265 if (nexthop_nh->nh_dev != dev ||
1266 !__in_dev_get_rtnl(dev))
1267 continue;
1268 alive++;
1269 spin_lock_bh(&fib_multipath_lock);
1270 nexthop_nh->nh_power = 0;
1271 nexthop_nh->nh_flags &= ~RTNH_F_DEAD;
1272 spin_unlock_bh(&fib_multipath_lock);
1273 } endfor_nexthops(fi)
1274
1275 if (alive > 0) {
1276 fi->fib_flags &= ~RTNH_F_DEAD;
1277 ret++;
1278 }
1279 }
1280
1281 return ret;
1282 }
1283
1284 /*
1285 * The algorithm is suboptimal, but it provides really
1286 * fair weighted route distribution.
1287 */
1288 void fib_select_multipath(struct fib_result *res)
1289 {
1290 struct fib_info *fi = res->fi;
1291 int w;
1292
1293 spin_lock_bh(&fib_multipath_lock);
1294 if (fi->fib_power <= 0) {
1295 int power = 0;
1296 change_nexthops(fi) {
1297 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD)) {
1298 power += nexthop_nh->nh_weight;
1299 nexthop_nh->nh_power = nexthop_nh->nh_weight;
1300 }
1301 } endfor_nexthops(fi);
1302 fi->fib_power = power;
1303 if (power <= 0) {
1304 spin_unlock_bh(&fib_multipath_lock);
1305 /* Race condition: route has just become dead. */
1306 res->nh_sel = 0;
1307 return;
1308 }
1309 }
1310
1311
1312 /* w should be random number [0..fi->fib_power-1],
1313 * it is pretty bad approximation.
1314 */
1315
1316 w = jiffies % fi->fib_power;
1317
1318 change_nexthops(fi) {
1319 if (!(nexthop_nh->nh_flags & RTNH_F_DEAD) &&
1320 nexthop_nh->nh_power) {
1321 w -= nexthop_nh->nh_power;
1322 if (w <= 0) {
1323 nexthop_nh->nh_power--;
1324 fi->fib_power--;
1325 res->nh_sel = nhsel;
1326 spin_unlock_bh(&fib_multipath_lock);
1327 return;
1328 }
1329 }
1330 } endfor_nexthops(fi);
1331
1332 /* Race condition: route has just become dead. */
1333 res->nh_sel = 0;
1334 spin_unlock_bh(&fib_multipath_lock);
1335 }
1336 #endif
This page took 0.059488 seconds and 5 git commands to generate.