ipv4: percpu nh_rth_output cache
[deliverable/linux.git] / net / ipv4 / route.c
CommitLineData
1da177e4
LT
1/*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
5 *
6 * ROUTE - implementation of the IP router.
7 *
02c30a84 8 * Authors: Ross Biro
1da177e4
LT
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Alan Cox, <gw4pts@gw4pts.ampr.org>
11 * Linus Torvalds, <Linus.Torvalds@helsinki.fi>
12 * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
13 *
14 * Fixes:
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
18 * (rco@di.uminho.pt) Routing table insertion and update
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
e905a9ed 21 * Alan Cox : Super /proc >4K
1da177e4
LT
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
24 * clamper.
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
e905a9ed 39 *
1da177e4
LT
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
bb1d23b0 55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
cef2685e
IS
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
1da177e4
LT
58 *
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
63 */
64
afd46503
JP
65#define pr_fmt(fmt) "IPv4: " fmt
66
1da177e4
LT
67#include <linux/module.h>
68#include <asm/uaccess.h>
1da177e4
LT
69#include <linux/bitops.h>
70#include <linux/types.h>
71#include <linux/kernel.h>
1da177e4 72#include <linux/mm.h>
424c4b70 73#include <linux/bootmem.h>
1da177e4
LT
74#include <linux/string.h>
75#include <linux/socket.h>
76#include <linux/sockios.h>
77#include <linux/errno.h>
78#include <linux/in.h>
79#include <linux/inet.h>
80#include <linux/netdevice.h>
81#include <linux/proc_fs.h>
82#include <linux/init.h>
39c90ece 83#include <linux/workqueue.h>
1da177e4 84#include <linux/skbuff.h>
1da177e4
LT
85#include <linux/inetdevice.h>
86#include <linux/igmp.h>
87#include <linux/pkt_sched.h>
88#include <linux/mroute.h>
89#include <linux/netfilter_ipv4.h>
90#include <linux/random.h>
91#include <linux/jhash.h>
92#include <linux/rcupdate.h>
93#include <linux/times.h>
5a0e3ad6 94#include <linux/slab.h>
b9eda06f 95#include <linux/prefetch.h>
352e512c 96#include <net/dst.h>
457c4cbc 97#include <net/net_namespace.h>
1da177e4
LT
98#include <net/protocol.h>
99#include <net/ip.h>
100#include <net/route.h>
101#include <net/inetpeer.h>
102#include <net/sock.h>
103#include <net/ip_fib.h>
104#include <net/arp.h>
105#include <net/tcp.h>
106#include <net/icmp.h>
107#include <net/xfrm.h>
8d71740c 108#include <net/netevent.h>
63f3444f 109#include <net/rtnetlink.h>
1da177e4
LT
110#ifdef CONFIG_SYSCTL
111#include <linux/sysctl.h>
7426a564 112#include <linux/kmemleak.h>
1da177e4 113#endif
6e5714ea 114#include <net/secure_seq.h>
1da177e4 115
68a5e3dd 116#define RT_FL_TOS(oldflp4) \
f61759e6 117 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
1da177e4
LT
118
119#define IP_MAX_MTU 0xFFF0
120
121#define RT_GC_TIMEOUT (300*HZ)
122
1da177e4 123static int ip_rt_max_size;
817bc4db 124static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
9f28a2fc 125static int ip_rt_gc_interval __read_mostly = 60 * HZ;
817bc4db
SH
126static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
127static int ip_rt_redirect_number __read_mostly = 9;
128static int ip_rt_redirect_load __read_mostly = HZ / 50;
129static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
130static int ip_rt_error_cost __read_mostly = HZ;
131static int ip_rt_error_burst __read_mostly = 5 * HZ;
132static int ip_rt_gc_elasticity __read_mostly = 8;
133static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
134static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
135static int ip_rt_min_advmss __read_mostly = 256;
9f28a2fc 136
1da177e4
LT
137/*
138 * Interface to generic destination cache.
139 */
140
141static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
0dbaee3b 142static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
ebb762f2 143static unsigned int ipv4_mtu(const struct dst_entry *dst);
1da177e4
LT
144static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
145static void ipv4_link_failure(struct sk_buff *skb);
6700c270
DM
146static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
147 struct sk_buff *skb, u32 mtu);
148static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
149 struct sk_buff *skb);
1da177e4 150
72cdd1d9
ED
151static void ipv4_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
152 int how)
153{
154}
1da177e4 155
62fa8a84
DM
156static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
157{
31248731
DM
158 WARN_ON(1);
159 return NULL;
62fa8a84
DM
160}
161
f894cbf8
DM
162static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
163 struct sk_buff *skb,
164 const void *daddr);
d3aaeb38 165
1da177e4
LT
166static struct dst_ops ipv4_dst_ops = {
167 .family = AF_INET,
09640e63 168 .protocol = cpu_to_be16(ETH_P_IP),
1da177e4 169 .check = ipv4_dst_check,
0dbaee3b 170 .default_advmss = ipv4_default_advmss,
ebb762f2 171 .mtu = ipv4_mtu,
62fa8a84 172 .cow_metrics = ipv4_cow_metrics,
1da177e4
LT
173 .ifdown = ipv4_dst_ifdown,
174 .negative_advice = ipv4_negative_advice,
175 .link_failure = ipv4_link_failure,
176 .update_pmtu = ip_rt_update_pmtu,
e47a185b 177 .redirect = ip_do_redirect,
1ac06e03 178 .local_out = __ip_local_out,
d3aaeb38 179 .neigh_lookup = ipv4_neigh_lookup,
1da177e4
LT
180};
181
182#define ECN_OR_COST(class) TC_PRIO_##class
183
4839c52b 184const __u8 ip_tos2prio[16] = {
1da177e4 185 TC_PRIO_BESTEFFORT,
4a2b9c37 186 ECN_OR_COST(BESTEFFORT),
1da177e4
LT
187 TC_PRIO_BESTEFFORT,
188 ECN_OR_COST(BESTEFFORT),
189 TC_PRIO_BULK,
190 ECN_OR_COST(BULK),
191 TC_PRIO_BULK,
192 ECN_OR_COST(BULK),
193 TC_PRIO_INTERACTIVE,
194 ECN_OR_COST(INTERACTIVE),
195 TC_PRIO_INTERACTIVE,
196 ECN_OR_COST(INTERACTIVE),
197 TC_PRIO_INTERACTIVE_BULK,
198 ECN_OR_COST(INTERACTIVE_BULK),
199 TC_PRIO_INTERACTIVE_BULK,
200 ECN_OR_COST(INTERACTIVE_BULK)
201};
d4a96865 202EXPORT_SYMBOL(ip_tos2prio);
1da177e4 203
2f970d83 204static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
27f39c73 205#define RT_CACHE_STAT_INC(field) __this_cpu_inc(rt_cache_stat.field)
1da177e4 206
e84f84f2
DL
207static inline int rt_genid(struct net *net)
208{
209 return atomic_read(&net->ipv4.rt_genid);
210}
211
1da177e4 212#ifdef CONFIG_PROC_FS
1da177e4
LT
213static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
214{
29e75252 215 if (*pos)
89aef892 216 return NULL;
29e75252 217 return SEQ_START_TOKEN;
1da177e4
LT
218}
219
220static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
221{
1da177e4 222 ++*pos;
89aef892 223 return NULL;
1da177e4
LT
224}
225
226static void rt_cache_seq_stop(struct seq_file *seq, void *v)
227{
1da177e4
LT
228}
229
230static int rt_cache_seq_show(struct seq_file *seq, void *v)
231{
232 if (v == SEQ_START_TOKEN)
233 seq_printf(seq, "%-127s\n",
234 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
235 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
236 "HHUptod\tSpecDst");
e905a9ed 237 return 0;
1da177e4
LT
238}
239
f690808e 240static const struct seq_operations rt_cache_seq_ops = {
1da177e4
LT
241 .start = rt_cache_seq_start,
242 .next = rt_cache_seq_next,
243 .stop = rt_cache_seq_stop,
244 .show = rt_cache_seq_show,
245};
246
247static int rt_cache_seq_open(struct inode *inode, struct file *file)
248{
89aef892 249 return seq_open(file, &rt_cache_seq_ops);
1da177e4
LT
250}
251
9a32144e 252static const struct file_operations rt_cache_seq_fops = {
1da177e4
LT
253 .owner = THIS_MODULE,
254 .open = rt_cache_seq_open,
255 .read = seq_read,
256 .llseek = seq_lseek,
89aef892 257 .release = seq_release,
1da177e4
LT
258};
259
260
261static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
262{
263 int cpu;
264
265 if (*pos == 0)
266 return SEQ_START_TOKEN;
267
0f23174a 268 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
269 if (!cpu_possible(cpu))
270 continue;
271 *pos = cpu+1;
2f970d83 272 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
273 }
274 return NULL;
275}
276
277static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
278{
279 int cpu;
280
0f23174a 281 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
1da177e4
LT
282 if (!cpu_possible(cpu))
283 continue;
284 *pos = cpu+1;
2f970d83 285 return &per_cpu(rt_cache_stat, cpu);
1da177e4
LT
286 }
287 return NULL;
e905a9ed 288
1da177e4
LT
289}
290
291static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
292{
293
294}
295
296static int rt_cpu_seq_show(struct seq_file *seq, void *v)
297{
298 struct rt_cache_stat *st = v;
299
300 if (v == SEQ_START_TOKEN) {
5bec0039 301 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
1da177e4
LT
302 return 0;
303 }
e905a9ed 304
1da177e4
LT
305 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
306 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
fc66f95c 307 dst_entries_get_slow(&ipv4_dst_ops),
1da177e4
LT
308 st->in_hit,
309 st->in_slow_tot,
310 st->in_slow_mc,
311 st->in_no_route,
312 st->in_brd,
313 st->in_martian_dst,
314 st->in_martian_src,
315
316 st->out_hit,
317 st->out_slow_tot,
e905a9ed 318 st->out_slow_mc,
1da177e4
LT
319
320 st->gc_total,
321 st->gc_ignored,
322 st->gc_goal_miss,
323 st->gc_dst_overflow,
324 st->in_hlist_search,
325 st->out_hlist_search
326 );
327 return 0;
328}
329
f690808e 330static const struct seq_operations rt_cpu_seq_ops = {
1da177e4
LT
331 .start = rt_cpu_seq_start,
332 .next = rt_cpu_seq_next,
333 .stop = rt_cpu_seq_stop,
334 .show = rt_cpu_seq_show,
335};
336
337
338static int rt_cpu_seq_open(struct inode *inode, struct file *file)
339{
340 return seq_open(file, &rt_cpu_seq_ops);
341}
342
9a32144e 343static const struct file_operations rt_cpu_seq_fops = {
1da177e4
LT
344 .owner = THIS_MODULE,
345 .open = rt_cpu_seq_open,
346 .read = seq_read,
347 .llseek = seq_lseek,
348 .release = seq_release,
349};
350
c7066f70 351#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 352static int rt_acct_proc_show(struct seq_file *m, void *v)
78c686e9 353{
a661c419
AD
354 struct ip_rt_acct *dst, *src;
355 unsigned int i, j;
356
357 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
358 if (!dst)
359 return -ENOMEM;
360
361 for_each_possible_cpu(i) {
362 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
363 for (j = 0; j < 256; j++) {
364 dst[j].o_bytes += src[j].o_bytes;
365 dst[j].o_packets += src[j].o_packets;
366 dst[j].i_bytes += src[j].i_bytes;
367 dst[j].i_packets += src[j].i_packets;
368 }
78c686e9
PE
369 }
370
a661c419
AD
371 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
372 kfree(dst);
373 return 0;
374}
78c686e9 375
a661c419
AD
376static int rt_acct_proc_open(struct inode *inode, struct file *file)
377{
378 return single_open(file, rt_acct_proc_show, NULL);
78c686e9 379}
a661c419
AD
380
381static const struct file_operations rt_acct_proc_fops = {
382 .owner = THIS_MODULE,
383 .open = rt_acct_proc_open,
384 .read = seq_read,
385 .llseek = seq_lseek,
386 .release = single_release,
387};
78c686e9 388#endif
107f1634 389
73b38711 390static int __net_init ip_rt_do_proc_init(struct net *net)
107f1634
PE
391{
392 struct proc_dir_entry *pde;
393
394 pde = proc_net_fops_create(net, "rt_cache", S_IRUGO,
395 &rt_cache_seq_fops);
396 if (!pde)
397 goto err1;
398
77020720
WC
399 pde = proc_create("rt_cache", S_IRUGO,
400 net->proc_net_stat, &rt_cpu_seq_fops);
107f1634
PE
401 if (!pde)
402 goto err2;
403
c7066f70 404#ifdef CONFIG_IP_ROUTE_CLASSID
a661c419 405 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
107f1634
PE
406 if (!pde)
407 goto err3;
408#endif
409 return 0;
410
c7066f70 411#ifdef CONFIG_IP_ROUTE_CLASSID
107f1634
PE
412err3:
413 remove_proc_entry("rt_cache", net->proc_net_stat);
414#endif
415err2:
416 remove_proc_entry("rt_cache", net->proc_net);
417err1:
418 return -ENOMEM;
419}
73b38711
DL
420
421static void __net_exit ip_rt_do_proc_exit(struct net *net)
422{
423 remove_proc_entry("rt_cache", net->proc_net_stat);
424 remove_proc_entry("rt_cache", net->proc_net);
c7066f70 425#ifdef CONFIG_IP_ROUTE_CLASSID
73b38711 426 remove_proc_entry("rt_acct", net->proc_net);
0a931acf 427#endif
73b38711
DL
428}
429
430static struct pernet_operations ip_rt_proc_ops __net_initdata = {
431 .init = ip_rt_do_proc_init,
432 .exit = ip_rt_do_proc_exit,
433};
434
435static int __init ip_rt_proc_init(void)
436{
437 return register_pernet_subsys(&ip_rt_proc_ops);
438}
439
107f1634 440#else
73b38711 441static inline int ip_rt_proc_init(void)
107f1634
PE
442{
443 return 0;
444}
1da177e4 445#endif /* CONFIG_PROC_FS */
e905a9ed 446
4331debc 447static inline bool rt_is_expired(const struct rtable *rth)
e84f84f2 448{
d8d1f30b 449 return rth->rt_genid != rt_genid(dev_net(rth->dst.dev));
e84f84f2
DL
450}
451
29e75252 452/*
25985edc 453 * Perturbation of rt_genid by a small quantity [1..256]
29e75252
ED
454 * Using 8 bits of shuffling ensure we can call rt_cache_invalidate()
455 * many times (2^24) without giving recent rt_genid.
456 * Jenkins hash is strong enough that litle changes of rt_genid are OK.
1da177e4 457 */
86c657f6 458static void rt_cache_invalidate(struct net *net)
1da177e4 459{
29e75252 460 unsigned char shuffle;
1da177e4 461
29e75252 462 get_random_bytes(&shuffle, sizeof(shuffle));
e84f84f2 463 atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
1da177e4
LT
464}
465
29e75252
ED
466/*
467 * delay < 0 : invalidate cache (fast : entries will be deleted later)
468 * delay >= 0 : invalidate & flush cache (can be long)
469 */
76e6ebfb 470void rt_cache_flush(struct net *net, int delay)
1da177e4 471{
86c657f6 472 rt_cache_invalidate(net);
98376387
ED
473}
474
f894cbf8
DM
475static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
476 struct sk_buff *skb,
477 const void *daddr)
3769cffb 478{
d3aaeb38
DM
479 struct net_device *dev = dst->dev;
480 const __be32 *pkey = daddr;
39232973 481 const struct rtable *rt;
3769cffb
DM
482 struct neighbour *n;
483
39232973 484 rt = (const struct rtable *) dst;
a263b309 485 if (rt->rt_gateway)
39232973 486 pkey = (const __be32 *) &rt->rt_gateway;
f894cbf8
DM
487 else if (skb)
488 pkey = &ip_hdr(skb)->daddr;
d3aaeb38 489
80703d26 490 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
d3aaeb38
DM
491 if (n)
492 return n;
32092ecf 493 return neigh_create(&arp_tbl, pkey, dev);
d3aaeb38
DM
494}
495
1da177e4
LT
496/*
497 * Peer allocation may fail only in serious out-of-memory conditions. However
498 * we still can generate some output.
499 * Random ID selection looks a bit dangerous because we have no chances to
500 * select ID being unique in a reasonable period of time.
501 * But broken packet identifier may be better than no packet at all.
502 */
503static void ip_select_fb_ident(struct iphdr *iph)
504{
505 static DEFINE_SPINLOCK(ip_fb_id_lock);
506 static u32 ip_fallback_id;
507 u32 salt;
508
509 spin_lock_bh(&ip_fb_id_lock);
e448515c 510 salt = secure_ip_id((__force __be32)ip_fallback_id ^ iph->daddr);
1da177e4
LT
511 iph->id = htons(salt & 0xFFFF);
512 ip_fallback_id = salt;
513 spin_unlock_bh(&ip_fb_id_lock);
514}
515
516void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more)
517{
1d861aa4
DM
518 struct net *net = dev_net(dst->dev);
519 struct inet_peer *peer;
1da177e4 520
1d861aa4
DM
521 peer = inet_getpeer_v4(net->ipv4.peers, iph->daddr, 1);
522 if (peer) {
523 iph->id = htons(inet_getid(peer, more));
524 inet_putpeer(peer);
525 return;
526 }
1da177e4
LT
527
528 ip_select_fb_ident(iph);
529}
4bc2f18b 530EXPORT_SYMBOL(__ip_select_ident);
1da177e4 531
5abf7f7e 532static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
4895c771
DM
533 const struct iphdr *iph,
534 int oif, u8 tos,
535 u8 prot, u32 mark, int flow_flags)
536{
537 if (sk) {
538 const struct inet_sock *inet = inet_sk(sk);
539
540 oif = sk->sk_bound_dev_if;
541 mark = sk->sk_mark;
542 tos = RT_CONN_FLAGS(sk);
543 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
544 }
545 flowi4_init_output(fl4, oif, mark, tos,
546 RT_SCOPE_UNIVERSE, prot,
547 flow_flags,
548 iph->daddr, iph->saddr, 0, 0);
549}
550
5abf7f7e
ED
551static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
552 const struct sock *sk)
4895c771
DM
553{
554 const struct iphdr *iph = ip_hdr(skb);
555 int oif = skb->dev->ifindex;
556 u8 tos = RT_TOS(iph->tos);
557 u8 prot = iph->protocol;
558 u32 mark = skb->mark;
559
560 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
561}
562
5abf7f7e 563static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
4895c771
DM
564{
565 const struct inet_sock *inet = inet_sk(sk);
5abf7f7e 566 const struct ip_options_rcu *inet_opt;
4895c771
DM
567 __be32 daddr = inet->inet_daddr;
568
569 rcu_read_lock();
570 inet_opt = rcu_dereference(inet->inet_opt);
571 if (inet_opt && inet_opt->opt.srr)
572 daddr = inet_opt->opt.faddr;
573 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
574 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
575 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
576 inet_sk_flowi_flags(sk),
577 daddr, inet->inet_saddr, 0, 0);
578 rcu_read_unlock();
579}
580
5abf7f7e
ED
581static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
582 const struct sk_buff *skb)
4895c771
DM
583{
584 if (skb)
585 build_skb_flow_key(fl4, skb, sk);
586 else
587 build_sk_flow_key(fl4, sk);
588}
589
aee06da6 590static DEFINE_SEQLOCK(fnhe_seqlock);
4895c771 591
aee06da6 592static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
4895c771
DM
593{
594 struct fib_nh_exception *fnhe, *oldest;
595
596 oldest = rcu_dereference(hash->chain);
597 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
598 fnhe = rcu_dereference(fnhe->fnhe_next)) {
599 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
600 oldest = fnhe;
601 }
602 return oldest;
603}
604
d3a25c98
DM
605static inline u32 fnhe_hashfun(__be32 daddr)
606{
607 u32 hval;
608
609 hval = (__force u32) daddr;
610 hval ^= (hval >> 11) ^ (hval >> 22);
611
612 return hval & (FNHE_HASH_SIZE - 1);
613}
614
aee06da6
JA
615static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
616 u32 pmtu, unsigned long expires)
4895c771 617{
aee06da6 618 struct fnhe_hash_bucket *hash;
4895c771
DM
619 struct fib_nh_exception *fnhe;
620 int depth;
aee06da6
JA
621 u32 hval = fnhe_hashfun(daddr);
622
623 write_seqlock_bh(&fnhe_seqlock);
4895c771 624
aee06da6 625 hash = nh->nh_exceptions;
4895c771 626 if (!hash) {
aee06da6 627 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
4895c771 628 if (!hash)
aee06da6
JA
629 goto out_unlock;
630 nh->nh_exceptions = hash;
4895c771
DM
631 }
632
4895c771
DM
633 hash += hval;
634
635 depth = 0;
636 for (fnhe = rcu_dereference(hash->chain); fnhe;
637 fnhe = rcu_dereference(fnhe->fnhe_next)) {
638 if (fnhe->fnhe_daddr == daddr)
aee06da6 639 break;
4895c771
DM
640 depth++;
641 }
642
aee06da6
JA
643 if (fnhe) {
644 if (gw)
645 fnhe->fnhe_gw = gw;
646 if (pmtu) {
647 fnhe->fnhe_pmtu = pmtu;
648 fnhe->fnhe_expires = expires;
649 }
650 } else {
651 if (depth > FNHE_RECLAIM_DEPTH)
652 fnhe = fnhe_oldest(hash);
653 else {
654 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
655 if (!fnhe)
656 goto out_unlock;
657
658 fnhe->fnhe_next = hash->chain;
659 rcu_assign_pointer(hash->chain, fnhe);
660 }
661 fnhe->fnhe_daddr = daddr;
662 fnhe->fnhe_gw = gw;
663 fnhe->fnhe_pmtu = pmtu;
664 fnhe->fnhe_expires = expires;
4895c771 665 }
4895c771 666
4895c771 667 fnhe->fnhe_stamp = jiffies;
aee06da6
JA
668
669out_unlock:
670 write_sequnlock_bh(&fnhe_seqlock);
671 return;
4895c771
DM
672}
673
ceb33206
DM
674static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
675 bool kill_route)
1da177e4 676{
e47a185b 677 __be32 new_gw = icmp_hdr(skb)->un.gateway;
94206125 678 __be32 old_gw = ip_hdr(skb)->saddr;
e47a185b 679 struct net_device *dev = skb->dev;
e47a185b 680 struct in_device *in_dev;
4895c771 681 struct fib_result res;
e47a185b 682 struct neighbour *n;
317805b8 683 struct net *net;
1da177e4 684
94206125
DM
685 switch (icmp_hdr(skb)->code & 7) {
686 case ICMP_REDIR_NET:
687 case ICMP_REDIR_NETTOS:
688 case ICMP_REDIR_HOST:
689 case ICMP_REDIR_HOSTTOS:
690 break;
691
692 default:
693 return;
694 }
695
e47a185b
DM
696 if (rt->rt_gateway != old_gw)
697 return;
698
699 in_dev = __in_dev_get_rcu(dev);
700 if (!in_dev)
701 return;
702
c346dca1 703 net = dev_net(dev);
9d4fb27d
JP
704 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
705 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
706 ipv4_is_zeronet(new_gw))
1da177e4
LT
707 goto reject_redirect;
708
709 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
710 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
711 goto reject_redirect;
712 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
713 goto reject_redirect;
714 } else {
317805b8 715 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
1da177e4
LT
716 goto reject_redirect;
717 }
718
4895c771 719 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
e47a185b
DM
720 if (n) {
721 if (!(n->nud_state & NUD_VALID)) {
722 neigh_event_send(n, NULL);
723 } else {
4895c771
DM
724 if (fib_lookup(net, fl4, &res) == 0) {
725 struct fib_nh *nh = &FIB_RES_NH(res);
4895c771 726
aee06da6
JA
727 update_or_create_fnhe(nh, fl4->daddr, new_gw,
728 0, 0);
4895c771 729 }
ceb33206
DM
730 if (kill_route)
731 rt->dst.obsolete = DST_OBSOLETE_KILL;
e47a185b
DM
732 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
733 }
734 neigh_release(n);
735 }
736 return;
737
738reject_redirect:
739#ifdef CONFIG_IP_ROUTE_VERBOSE
99ee038d
DM
740 if (IN_DEV_LOG_MARTIANS(in_dev)) {
741 const struct iphdr *iph = (const struct iphdr *) skb->data;
742 __be32 daddr = iph->daddr;
743 __be32 saddr = iph->saddr;
744
e47a185b
DM
745 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
746 " Advised path = %pI4 -> %pI4\n",
747 &old_gw, dev->name, &new_gw,
748 &saddr, &daddr);
99ee038d 749 }
e47a185b
DM
750#endif
751 ;
752}
753
4895c771
DM
754static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
755{
756 struct rtable *rt;
757 struct flowi4 fl4;
758
759 rt = (struct rtable *) dst;
760
761 ip_rt_build_flow_key(&fl4, sk, skb);
ceb33206 762 __ip_do_redirect(rt, skb, &fl4, true);
4895c771
DM
763}
764
1da177e4
LT
765static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
766{
ee6b9673 767 struct rtable *rt = (struct rtable *)dst;
1da177e4
LT
768 struct dst_entry *ret = dst;
769
770 if (rt) {
d11a4dc1 771 if (dst->obsolete > 0) {
1da177e4
LT
772 ip_rt_put(rt);
773 ret = NULL;
5943634f
DM
774 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
775 rt->dst.expires) {
89aef892 776 ip_rt_put(rt);
1da177e4
LT
777 ret = NULL;
778 }
779 }
780 return ret;
781}
782
783/*
784 * Algorithm:
785 * 1. The first ip_rt_redirect_number redirects are sent
786 * with exponential backoff, then we stop sending them at all,
787 * assuming that the host ignores our redirects.
788 * 2. If we did not see packets requiring redirects
789 * during ip_rt_redirect_silence, we assume that the host
790 * forgot redirected route and start to send redirects again.
791 *
792 * This algorithm is much cheaper and more intelligent than dumb load limiting
793 * in icmp.c.
794 *
795 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
796 * and "frag. need" (breaks PMTU discovery) in icmp.c.
797 */
798
799void ip_rt_send_redirect(struct sk_buff *skb)
800{
511c3f92 801 struct rtable *rt = skb_rtable(skb);
30038fc6 802 struct in_device *in_dev;
92d86829 803 struct inet_peer *peer;
1d861aa4 804 struct net *net;
30038fc6 805 int log_martians;
1da177e4 806
30038fc6 807 rcu_read_lock();
d8d1f30b 808 in_dev = __in_dev_get_rcu(rt->dst.dev);
30038fc6
ED
809 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
810 rcu_read_unlock();
1da177e4 811 return;
30038fc6
ED
812 }
813 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
814 rcu_read_unlock();
1da177e4 815
1d861aa4
DM
816 net = dev_net(rt->dst.dev);
817 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
92d86829
DM
818 if (!peer) {
819 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
820 return;
821 }
822
1da177e4
LT
823 /* No redirected packets during ip_rt_redirect_silence;
824 * reset the algorithm.
825 */
92d86829
DM
826 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
827 peer->rate_tokens = 0;
1da177e4
LT
828
829 /* Too many ignored redirects; do not send anything
d8d1f30b 830 * set dst.rate_last to the last seen redirected packet.
1da177e4 831 */
92d86829
DM
832 if (peer->rate_tokens >= ip_rt_redirect_number) {
833 peer->rate_last = jiffies;
1d861aa4 834 goto out_put_peer;
1da177e4
LT
835 }
836
837 /* Check for load limit; set rate_last to the latest sent
838 * redirect.
839 */
92d86829 840 if (peer->rate_tokens == 0 ||
14fb8a76 841 time_after(jiffies,
92d86829
DM
842 (peer->rate_last +
843 (ip_rt_redirect_load << peer->rate_tokens)))) {
1da177e4 844 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, rt->rt_gateway);
92d86829
DM
845 peer->rate_last = jiffies;
846 ++peer->rate_tokens;
1da177e4 847#ifdef CONFIG_IP_ROUTE_VERBOSE
30038fc6 848 if (log_martians &&
e87cc472
JP
849 peer->rate_tokens == ip_rt_redirect_number)
850 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
92101b3b 851 &ip_hdr(skb)->saddr, inet_iif(skb),
f1ce3062 852 &ip_hdr(skb)->daddr, &rt->rt_gateway);
1da177e4
LT
853#endif
854 }
1d861aa4
DM
855out_put_peer:
856 inet_putpeer(peer);
1da177e4
LT
857}
858
859static int ip_error(struct sk_buff *skb)
860{
251da413 861 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
511c3f92 862 struct rtable *rt = skb_rtable(skb);
92d86829 863 struct inet_peer *peer;
1da177e4 864 unsigned long now;
251da413 865 struct net *net;
92d86829 866 bool send;
1da177e4
LT
867 int code;
868
251da413
DM
869 net = dev_net(rt->dst.dev);
870 if (!IN_DEV_FORWARD(in_dev)) {
871 switch (rt->dst.error) {
872 case EHOSTUNREACH:
873 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
874 break;
875
876 case ENETUNREACH:
877 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
878 break;
879 }
880 goto out;
881 }
882
d8d1f30b 883 switch (rt->dst.error) {
4500ebf8
JP
884 case EINVAL:
885 default:
886 goto out;
887 case EHOSTUNREACH:
888 code = ICMP_HOST_UNREACH;
889 break;
890 case ENETUNREACH:
891 code = ICMP_NET_UNREACH;
251da413 892 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
4500ebf8
JP
893 break;
894 case EACCES:
895 code = ICMP_PKT_FILTERED;
896 break;
1da177e4
LT
897 }
898
1d861aa4 899 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
92d86829
DM
900
901 send = true;
902 if (peer) {
903 now = jiffies;
904 peer->rate_tokens += now - peer->rate_last;
905 if (peer->rate_tokens > ip_rt_error_burst)
906 peer->rate_tokens = ip_rt_error_burst;
907 peer->rate_last = now;
908 if (peer->rate_tokens >= ip_rt_error_cost)
909 peer->rate_tokens -= ip_rt_error_cost;
910 else
911 send = false;
1d861aa4 912 inet_putpeer(peer);
1da177e4 913 }
92d86829
DM
914 if (send)
915 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1da177e4
LT
916
917out: kfree_skb(skb);
918 return 0;
e905a9ed 919}
1da177e4 920
ceb33206 921static u32 __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1da177e4 922{
4895c771 923 struct fib_result res;
2c8cec5c 924
5943634f
DM
925 if (mtu < ip_rt_min_pmtu)
926 mtu = ip_rt_min_pmtu;
2c8cec5c 927
4895c771
DM
928 if (fib_lookup(dev_net(rt->dst.dev), fl4, &res) == 0) {
929 struct fib_nh *nh = &FIB_RES_NH(res);
4895c771 930
aee06da6
JA
931 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
932 jiffies + ip_rt_mtu_expires);
4895c771 933 }
ceb33206 934 return mtu;
1da177e4
LT
935}
936
4895c771
DM
937static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
938 struct sk_buff *skb, u32 mtu)
939{
940 struct rtable *rt = (struct rtable *) dst;
941 struct flowi4 fl4;
942
943 ip_rt_build_flow_key(&fl4, sk, skb);
ceb33206
DM
944 mtu = __ip_rt_update_pmtu(rt, &fl4, mtu);
945
946 if (!rt->rt_pmtu) {
947 dst->obsolete = DST_OBSOLETE_KILL;
948 } else {
949 rt->rt_pmtu = mtu;
950 dst_set_expires(&rt->dst, ip_rt_mtu_expires);
951 }
4895c771
DM
952}
953
36393395
DM
954void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
955 int oif, u32 mark, u8 protocol, int flow_flags)
956{
4895c771 957 const struct iphdr *iph = (const struct iphdr *) skb->data;
36393395
DM
958 struct flowi4 fl4;
959 struct rtable *rt;
960
4895c771
DM
961 __build_flow_key(&fl4, NULL, iph, oif,
962 RT_TOS(iph->tos), protocol, mark, flow_flags);
36393395
DM
963 rt = __ip_route_output_key(net, &fl4);
964 if (!IS_ERR(rt)) {
4895c771 965 __ip_rt_update_pmtu(rt, &fl4, mtu);
36393395
DM
966 ip_rt_put(rt);
967 }
968}
969EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
970
971void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
972{
4895c771
DM
973 const struct iphdr *iph = (const struct iphdr *) skb->data;
974 struct flowi4 fl4;
975 struct rtable *rt;
36393395 976
4895c771
DM
977 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
978 rt = __ip_route_output_key(sock_net(sk), &fl4);
979 if (!IS_ERR(rt)) {
980 __ip_rt_update_pmtu(rt, &fl4, mtu);
981 ip_rt_put(rt);
982 }
36393395
DM
983}
984EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
f39925db 985
b42597e2
DM
986void ipv4_redirect(struct sk_buff *skb, struct net *net,
987 int oif, u32 mark, u8 protocol, int flow_flags)
988{
4895c771 989 const struct iphdr *iph = (const struct iphdr *) skb->data;
b42597e2
DM
990 struct flowi4 fl4;
991 struct rtable *rt;
992
4895c771
DM
993 __build_flow_key(&fl4, NULL, iph, oif,
994 RT_TOS(iph->tos), protocol, mark, flow_flags);
b42597e2
DM
995 rt = __ip_route_output_key(net, &fl4);
996 if (!IS_ERR(rt)) {
ceb33206 997 __ip_do_redirect(rt, skb, &fl4, false);
b42597e2
DM
998 ip_rt_put(rt);
999 }
1000}
1001EXPORT_SYMBOL_GPL(ipv4_redirect);
1002
1003void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1004{
4895c771
DM
1005 const struct iphdr *iph = (const struct iphdr *) skb->data;
1006 struct flowi4 fl4;
1007 struct rtable *rt;
b42597e2 1008
4895c771
DM
1009 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1010 rt = __ip_route_output_key(sock_net(sk), &fl4);
1011 if (!IS_ERR(rt)) {
ceb33206 1012 __ip_do_redirect(rt, skb, &fl4, false);
4895c771
DM
1013 ip_rt_put(rt);
1014 }
b42597e2
DM
1015}
1016EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1017
efbc368d
DM
1018static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1019{
1020 struct rtable *rt = (struct rtable *) dst;
1021
ceb33206
DM
1022 /* All IPV4 dsts are created with ->obsolete set to the value
1023 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1024 * into this function always.
1025 *
1026 * When a PMTU/redirect information update invalidates a
1027 * route, this is indicated by setting obsolete to
1028 * DST_OBSOLETE_KILL.
1029 */
1030 if (dst->obsolete == DST_OBSOLETE_KILL || rt_is_expired(rt))
efbc368d 1031 return NULL;
d11a4dc1 1032 return dst;
1da177e4
LT
1033}
1034
1da177e4
LT
1035static void ipv4_link_failure(struct sk_buff *skb)
1036{
1037 struct rtable *rt;
1038
1039 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1040
511c3f92 1041 rt = skb_rtable(skb);
5943634f
DM
1042 if (rt)
1043 dst_set_expires(&rt->dst, 0);
1da177e4
LT
1044}
1045
1046static int ip_rt_bug(struct sk_buff *skb)
1047{
91df42be
JP
1048 pr_debug("%s: %pI4 -> %pI4, %s\n",
1049 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1050 skb->dev ? skb->dev->name : "?");
1da177e4 1051 kfree_skb(skb);
c378a9c0 1052 WARN_ON(1);
1da177e4
LT
1053 return 0;
1054}
1055
1056/*
1057 We do not cache source address of outgoing interface,
1058 because it is used only by IP RR, TS and SRR options,
1059 so that it out of fast path.
1060
1061 BTW remember: "addr" is allowed to be not aligned
1062 in IP options!
1063 */
1064
8e36360a 1065void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1da177e4 1066{
a61ced5d 1067 __be32 src;
1da177e4 1068
c7537967 1069 if (rt_is_output_route(rt))
c5be24ff 1070 src = ip_hdr(skb)->saddr;
ebc0ffae 1071 else {
8e36360a
DM
1072 struct fib_result res;
1073 struct flowi4 fl4;
1074 struct iphdr *iph;
1075
1076 iph = ip_hdr(skb);
1077
1078 memset(&fl4, 0, sizeof(fl4));
1079 fl4.daddr = iph->daddr;
1080 fl4.saddr = iph->saddr;
b0fe4a31 1081 fl4.flowi4_tos = RT_TOS(iph->tos);
8e36360a
DM
1082 fl4.flowi4_oif = rt->dst.dev->ifindex;
1083 fl4.flowi4_iif = skb->dev->ifindex;
1084 fl4.flowi4_mark = skb->mark;
5e2b61f7 1085
ebc0ffae 1086 rcu_read_lock();
68a5e3dd 1087 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res) == 0)
436c3b66 1088 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
ebc0ffae 1089 else
f8126f1d
DM
1090 src = inet_select_addr(rt->dst.dev,
1091 rt_nexthop(rt, iph->daddr),
1092 RT_SCOPE_UNIVERSE);
ebc0ffae
ED
1093 rcu_read_unlock();
1094 }
1da177e4
LT
1095 memcpy(addr, &src, 4);
1096}
1097
c7066f70 1098#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4
LT
1099static void set_class_tag(struct rtable *rt, u32 tag)
1100{
d8d1f30b
CG
1101 if (!(rt->dst.tclassid & 0xFFFF))
1102 rt->dst.tclassid |= tag & 0xFFFF;
1103 if (!(rt->dst.tclassid & 0xFFFF0000))
1104 rt->dst.tclassid |= tag & 0xFFFF0000;
1da177e4
LT
1105}
1106#endif
1107
0dbaee3b
DM
1108static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1109{
1110 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1111
1112 if (advmss == 0) {
1113 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1114 ip_rt_min_advmss);
1115 if (advmss > 65535 - 40)
1116 advmss = 65535 - 40;
1117 }
1118 return advmss;
1119}
1120
ebb762f2 1121static unsigned int ipv4_mtu(const struct dst_entry *dst)
d33e4553 1122{
261663b0 1123 const struct rtable *rt = (const struct rtable *) dst;
5943634f
DM
1124 unsigned int mtu = rt->rt_pmtu;
1125
1126 if (mtu && time_after_eq(jiffies, rt->dst.expires))
1127 mtu = 0;
1128
1129 if (!mtu)
1130 mtu = dst_metric_raw(dst, RTAX_MTU);
618f9bc7 1131
261663b0 1132 if (mtu && rt_is_output_route(rt))
618f9bc7
SK
1133 return mtu;
1134
1135 mtu = dst->dev->mtu;
d33e4553
DM
1136
1137 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
f8126f1d 1138 if (rt->rt_gateway && mtu > 576)
d33e4553
DM
1139 mtu = 576;
1140 }
1141
1142 if (mtu > IP_MAX_MTU)
1143 mtu = IP_MAX_MTU;
1144
1145 return mtu;
1146}
1147
f2bb4bed 1148static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
4895c771
DM
1149{
1150 struct fnhe_hash_bucket *hash = nh->nh_exceptions;
1151 struct fib_nh_exception *fnhe;
1152 u32 hval;
1153
f2bb4bed
DM
1154 if (!hash)
1155 return NULL;
1156
d3a25c98 1157 hval = fnhe_hashfun(daddr);
4895c771
DM
1158
1159 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1160 fnhe = rcu_dereference(fnhe->fnhe_next)) {
f2bb4bed
DM
1161 if (fnhe->fnhe_daddr == daddr)
1162 return fnhe;
1163 }
1164 return NULL;
1165}
aee06da6 1166
f2bb4bed
DM
1167static void rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1168 __be32 daddr)
1169{
1170 __be32 fnhe_daddr, gw;
1171 unsigned long expires;
1172 unsigned int seq;
1173 u32 pmtu;
1174
1175restart:
1176 seq = read_seqbegin(&fnhe_seqlock);
1177 fnhe_daddr = fnhe->fnhe_daddr;
1178 gw = fnhe->fnhe_gw;
1179 pmtu = fnhe->fnhe_pmtu;
1180 expires = fnhe->fnhe_expires;
1181 if (read_seqretry(&fnhe_seqlock, seq))
1182 goto restart;
1183
1184 if (daddr != fnhe_daddr)
1185 return;
1186
1187 if (pmtu) {
1188 unsigned long diff = expires - jiffies;
1189
1190 if (time_before(jiffies, expires)) {
1191 rt->rt_pmtu = pmtu;
1192 dst_set_expires(&rt->dst, diff);
ceb33206 1193 }
f2bb4bed
DM
1194 }
1195 if (gw) {
1196 rt->rt_flags |= RTCF_REDIRECTED;
1197 rt->rt_gateway = gw;
1198 }
1199 fnhe->fnhe_stamp = jiffies;
1200}
1201
54764bb6
ED
1202static inline void rt_free(struct rtable *rt)
1203{
1204 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
1205}
1206
f2bb4bed
DM
1207static void rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1208{
d26b3a7c 1209 struct rtable *orig, *prev, **p;
f2bb4bed 1210
d26b3a7c 1211 if (rt_is_input_route(rt)) {
54764bb6 1212 p = (struct rtable **)&nh->nh_rth_input;
d26b3a7c
ED
1213 } else {
1214 if (!nh->nh_pcpu_rth_output)
1215 goto nocache;
1216 p = (struct rtable **)__this_cpu_ptr(nh->nh_pcpu_rth_output);
1217 }
f2bb4bed
DM
1218 orig = *p;
1219
1220 prev = cmpxchg(p, orig, rt);
1221 if (prev == orig) {
f2bb4bed 1222 if (orig)
54764bb6 1223 rt_free(orig);
c6cffba4 1224 } else {
54764bb6
ED
1225 /* Routes we intend to cache in the FIB nexthop have
1226 * the DST_NOCACHE bit clear. However, if we are
1227 * unsuccessful at storing this route into the cache
1228 * we really need to set it.
1229 */
d26b3a7c 1230nocache:
54764bb6 1231 rt->dst.flags |= DST_NOCACHE;
4895c771
DM
1232 }
1233}
1234
4331debc 1235static bool rt_cache_valid(const struct rtable *rt)
d2d68ba9 1236{
4331debc
ED
1237 return rt &&
1238 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1239 !rt_is_expired(rt);
d2d68ba9
DM
1240}
1241
f2bb4bed 1242static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
5e2b61f7 1243 const struct fib_result *res,
f2bb4bed 1244 struct fib_nh_exception *fnhe,
982721f3 1245 struct fib_info *fi, u16 type, u32 itag)
1da177e4 1246{
1da177e4 1247 if (fi) {
4895c771
DM
1248 struct fib_nh *nh = &FIB_RES_NH(*res);
1249
1250 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK)
1251 rt->rt_gateway = nh->nh_gw;
f2bb4bed
DM
1252 if (unlikely(fnhe))
1253 rt_bind_exception(rt, fnhe, daddr);
2860583f 1254 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
c7066f70 1255#ifdef CONFIG_IP_ROUTE_CLASSID
f2bb4bed 1256 rt->dst.tclassid = nh->nh_tclassid;
1da177e4 1257#endif
c6cffba4 1258 if (!(rt->dst.flags & DST_NOCACHE))
f2bb4bed 1259 rt_cache_route(nh, rt);
d33e4553 1260 }
defb3519 1261
c7066f70 1262#ifdef CONFIG_IP_ROUTE_CLASSID
1da177e4 1263#ifdef CONFIG_IP_MULTIPLE_TABLES
85b91b03 1264 set_class_tag(rt, res->tclassid);
1da177e4
LT
1265#endif
1266 set_class_tag(rt, itag);
1267#endif
1da177e4
LT
1268}
1269
5c1e6aa3 1270static struct rtable *rt_dst_alloc(struct net_device *dev,
f2bb4bed 1271 bool nopolicy, bool noxfrm, bool will_cache)
0c4dcd58 1272{
f5b0a874 1273 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
c6cffba4 1274 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
5c1e6aa3
DM
1275 (nopolicy ? DST_NOPOLICY : 0) |
1276 (noxfrm ? DST_NOXFRM : 0));
0c4dcd58
DM
1277}
1278
96d36220 1279/* called in rcu_read_lock() section */
9e12bb22 1280static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1da177e4
LT
1281 u8 tos, struct net_device *dev, int our)
1282{
1da177e4 1283 struct rtable *rth;
96d36220 1284 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1285 u32 itag = 0;
b5f7e755 1286 int err;
1da177e4
LT
1287
1288 /* Primary sanity checks. */
1289
1290 if (in_dev == NULL)
1291 return -EINVAL;
1292
1e637c74 1293 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
d0daebc3 1294 skb->protocol != htons(ETH_P_IP))
1da177e4
LT
1295 goto e_inval;
1296
d0daebc3
TG
1297 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1298 if (ipv4_is_loopback(saddr))
1299 goto e_inval;
1300
f97c1e0c
JP
1301 if (ipv4_is_zeronet(saddr)) {
1302 if (!ipv4_is_local_multicast(daddr))
1da177e4 1303 goto e_inval;
b5f7e755 1304 } else {
9e56e380
DM
1305 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1306 in_dev, &itag);
b5f7e755
ED
1307 if (err < 0)
1308 goto e_err;
1309 }
4e7b2f14 1310 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
f2bb4bed 1311 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1da177e4
LT
1312 if (!rth)
1313 goto e_nobufs;
1314
cf911662
DM
1315#ifdef CONFIG_IP_ROUTE_CLASSID
1316 rth->dst.tclassid = itag;
1317#endif
d8d1f30b 1318 rth->dst.output = ip_rt_bug;
1da177e4 1319
cf911662
DM
1320 rth->rt_genid = rt_genid(dev_net(dev));
1321 rth->rt_flags = RTCF_MULTICAST;
1322 rth->rt_type = RTN_MULTICAST;
9917e1e8 1323 rth->rt_is_input= 1;
13378cad 1324 rth->rt_iif = 0;
5943634f 1325 rth->rt_pmtu = 0;
f8126f1d 1326 rth->rt_gateway = 0;
1da177e4 1327 if (our) {
d8d1f30b 1328 rth->dst.input= ip_local_deliver;
1da177e4
LT
1329 rth->rt_flags |= RTCF_LOCAL;
1330 }
1331
1332#ifdef CONFIG_IP_MROUTE
f97c1e0c 1333 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
d8d1f30b 1334 rth->dst.input = ip_mr_input;
1da177e4
LT
1335#endif
1336 RT_CACHE_STAT_INC(in_slow_mc);
1337
89aef892
DM
1338 skb_dst_set(skb, &rth->dst);
1339 return 0;
1da177e4
LT
1340
1341e_nobufs:
1da177e4 1342 return -ENOBUFS;
1da177e4 1343e_inval:
96d36220 1344 return -EINVAL;
b5f7e755 1345e_err:
b5f7e755 1346 return err;
1da177e4
LT
1347}
1348
1349
1350static void ip_handle_martian_source(struct net_device *dev,
1351 struct in_device *in_dev,
1352 struct sk_buff *skb,
9e12bb22
AV
1353 __be32 daddr,
1354 __be32 saddr)
1da177e4
LT
1355{
1356 RT_CACHE_STAT_INC(in_martian_src);
1357#ifdef CONFIG_IP_ROUTE_VERBOSE
1358 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1359 /*
1360 * RFC1812 recommendation, if source is martian,
1361 * the only hint is MAC header.
1362 */
058bd4d2 1363 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
673d57e7 1364 &daddr, &saddr, dev->name);
98e399f8 1365 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
058bd4d2
JP
1366 print_hex_dump(KERN_WARNING, "ll header: ",
1367 DUMP_PREFIX_OFFSET, 16, 1,
1368 skb_mac_header(skb),
1369 dev->hard_header_len, true);
1da177e4
LT
1370 }
1371 }
1372#endif
1373}
1374
47360228 1375/* called in rcu_read_lock() section */
5969f71d 1376static int __mkroute_input(struct sk_buff *skb,
982721f3 1377 const struct fib_result *res,
5969f71d 1378 struct in_device *in_dev,
c6cffba4 1379 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 1380{
1da177e4
LT
1381 struct rtable *rth;
1382 int err;
1383 struct in_device *out_dev;
47360228 1384 unsigned int flags = 0;
d2d68ba9 1385 bool do_cache;
d9c9df8c 1386 u32 itag;
1da177e4
LT
1387
1388 /* get a working reference to the output device */
47360228 1389 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1da177e4 1390 if (out_dev == NULL) {
e87cc472 1391 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1da177e4
LT
1392 return -EINVAL;
1393 }
1394
1395
5c04c819 1396 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
9e56e380 1397 in_dev->dev, in_dev, &itag);
1da177e4 1398 if (err < 0) {
e905a9ed 1399 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1da177e4 1400 saddr);
e905a9ed 1401
1da177e4
LT
1402 goto cleanup;
1403 }
1404
51b77cae 1405 if (out_dev == in_dev && err &&
1da177e4
LT
1406 (IN_DEV_SHARED_MEDIA(out_dev) ||
1407 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1408 flags |= RTCF_DOREDIRECT;
1409
1410 if (skb->protocol != htons(ETH_P_IP)) {
1411 /* Not IP (i.e. ARP). Do not create route, if it is
1412 * invalid for proxy arp. DNAT routes are always valid.
65324144
JDB
1413 *
1414 * Proxy arp feature have been extended to allow, ARP
1415 * replies back to the same interface, to support
1416 * Private VLAN switch technologies. See arp.c.
1da177e4 1417 */
65324144
JDB
1418 if (out_dev == in_dev &&
1419 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1da177e4
LT
1420 err = -EINVAL;
1421 goto cleanup;
1422 }
1423 }
1424
d2d68ba9
DM
1425 do_cache = false;
1426 if (res->fi) {
fe3edf45 1427 if (!itag) {
54764bb6 1428 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
d2d68ba9 1429 if (rt_cache_valid(rth)) {
c6cffba4 1430 skb_dst_set_noref(skb, &rth->dst);
d2d68ba9
DM
1431 goto out;
1432 }
1433 do_cache = true;
1434 }
1435 }
f2bb4bed 1436
5c1e6aa3
DM
1437 rth = rt_dst_alloc(out_dev->dev,
1438 IN_DEV_CONF_GET(in_dev, NOPOLICY),
d2d68ba9 1439 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1da177e4
LT
1440 if (!rth) {
1441 err = -ENOBUFS;
1442 goto cleanup;
1443 }
1444
cf911662
DM
1445 rth->rt_genid = rt_genid(dev_net(rth->dst.dev));
1446 rth->rt_flags = flags;
1447 rth->rt_type = res->type;
9917e1e8 1448 rth->rt_is_input = 1;
13378cad 1449 rth->rt_iif = 0;
5943634f 1450 rth->rt_pmtu = 0;
f8126f1d 1451 rth->rt_gateway = 0;
1da177e4 1452
d8d1f30b
CG
1453 rth->dst.input = ip_forward;
1454 rth->dst.output = ip_output;
1da177e4 1455
d2d68ba9 1456 rt_set_nexthop(rth, daddr, res, NULL, res->fi, res->type, itag);
c6cffba4 1457 skb_dst_set(skb, &rth->dst);
d2d68ba9 1458out:
1da177e4
LT
1459 err = 0;
1460 cleanup:
1da177e4 1461 return err;
e905a9ed 1462}
1da177e4 1463
5969f71d
SH
1464static int ip_mkroute_input(struct sk_buff *skb,
1465 struct fib_result *res,
68a5e3dd 1466 const struct flowi4 *fl4,
5969f71d
SH
1467 struct in_device *in_dev,
1468 __be32 daddr, __be32 saddr, u32 tos)
1da177e4 1469{
1da177e4 1470#ifdef CONFIG_IP_ROUTE_MULTIPATH
ff3fccb3 1471 if (res->fi && res->fi->fib_nhs > 1)
1b7fe593 1472 fib_select_multipath(res);
1da177e4
LT
1473#endif
1474
1475 /* create a routing cache entry */
c6cffba4 1476 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1da177e4
LT
1477}
1478
1da177e4
LT
1479/*
1480 * NOTE. We drop all the packets that has local source
1481 * addresses, because every properly looped back packet
1482 * must have correct destination already attached by output routine.
1483 *
1484 * Such approach solves two big problems:
1485 * 1. Not simplex devices are handled properly.
1486 * 2. IP spoofing attempts are filtered with 100% of guarantee.
ebc0ffae 1487 * called with rcu_read_lock()
1da177e4
LT
1488 */
1489
9e12bb22 1490static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
c10237e0 1491 u8 tos, struct net_device *dev)
1da177e4
LT
1492{
1493 struct fib_result res;
96d36220 1494 struct in_device *in_dev = __in_dev_get_rcu(dev);
68a5e3dd 1495 struct flowi4 fl4;
95c96174 1496 unsigned int flags = 0;
1da177e4 1497 u32 itag = 0;
95c96174 1498 struct rtable *rth;
1da177e4 1499 int err = -EINVAL;
5e73ea1a 1500 struct net *net = dev_net(dev);
d2d68ba9 1501 bool do_cache;
1da177e4
LT
1502
1503 /* IP on this device is disabled. */
1504
1505 if (!in_dev)
1506 goto out;
1507
1508 /* Check for the most weird martians, which can be not detected
1509 by fib_lookup.
1510 */
1511
d0daebc3 1512 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1da177e4
LT
1513 goto martian_source;
1514
d2d68ba9 1515 res.fi = NULL;
27a954bd 1516 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1da177e4
LT
1517 goto brd_input;
1518
1519 /* Accept zero addresses only to limited broadcast;
1520 * I even do not know to fix it or not. Waiting for complains :-)
1521 */
f97c1e0c 1522 if (ipv4_is_zeronet(saddr))
1da177e4
LT
1523 goto martian_source;
1524
d0daebc3 1525 if (ipv4_is_zeronet(daddr))
1da177e4
LT
1526 goto martian_destination;
1527
d0daebc3
TG
1528 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev))) {
1529 if (ipv4_is_loopback(daddr))
1530 goto martian_destination;
1531
1532 if (ipv4_is_loopback(saddr))
1533 goto martian_source;
1534 }
1535
1da177e4
LT
1536 /*
1537 * Now we are ready to route packet.
1538 */
68a5e3dd
DM
1539 fl4.flowi4_oif = 0;
1540 fl4.flowi4_iif = dev->ifindex;
1541 fl4.flowi4_mark = skb->mark;
1542 fl4.flowi4_tos = tos;
1543 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1544 fl4.daddr = daddr;
1545 fl4.saddr = saddr;
1546 err = fib_lookup(net, &fl4, &res);
251da413 1547 if (err != 0)
1da177e4 1548 goto no_route;
1da177e4
LT
1549
1550 RT_CACHE_STAT_INC(in_slow_tot);
1551
1552 if (res.type == RTN_BROADCAST)
1553 goto brd_input;
1554
1555 if (res.type == RTN_LOCAL) {
5c04c819 1556 err = fib_validate_source(skb, saddr, daddr, tos,
ebc0ffae 1557 net->loopback_dev->ifindex,
9e56e380 1558 dev, in_dev, &itag);
b5f7e755
ED
1559 if (err < 0)
1560 goto martian_source_keep_err;
1da177e4
LT
1561 goto local_input;
1562 }
1563
1564 if (!IN_DEV_FORWARD(in_dev))
251da413 1565 goto no_route;
1da177e4
LT
1566 if (res.type != RTN_UNICAST)
1567 goto martian_destination;
1568
68a5e3dd 1569 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1da177e4
LT
1570out: return err;
1571
1572brd_input:
1573 if (skb->protocol != htons(ETH_P_IP))
1574 goto e_inval;
1575
41347dcd 1576 if (!ipv4_is_zeronet(saddr)) {
9e56e380
DM
1577 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1578 in_dev, &itag);
1da177e4 1579 if (err < 0)
b5f7e755 1580 goto martian_source_keep_err;
1da177e4
LT
1581 }
1582 flags |= RTCF_BROADCAST;
1583 res.type = RTN_BROADCAST;
1584 RT_CACHE_STAT_INC(in_brd);
1585
1586local_input:
d2d68ba9
DM
1587 do_cache = false;
1588 if (res.fi) {
fe3edf45 1589 if (!itag) {
54764bb6 1590 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
d2d68ba9 1591 if (rt_cache_valid(rth)) {
c6cffba4
DM
1592 skb_dst_set_noref(skb, &rth->dst);
1593 err = 0;
1594 goto out;
d2d68ba9
DM
1595 }
1596 do_cache = true;
1597 }
1598 }
1599
5c1e6aa3 1600 rth = rt_dst_alloc(net->loopback_dev,
d2d68ba9 1601 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1da177e4
LT
1602 if (!rth)
1603 goto e_nobufs;
1604
cf911662 1605 rth->dst.input= ip_local_deliver;
d8d1f30b 1606 rth->dst.output= ip_rt_bug;
cf911662
DM
1607#ifdef CONFIG_IP_ROUTE_CLASSID
1608 rth->dst.tclassid = itag;
1609#endif
1da177e4 1610
cf911662
DM
1611 rth->rt_genid = rt_genid(net);
1612 rth->rt_flags = flags|RTCF_LOCAL;
1613 rth->rt_type = res.type;
9917e1e8 1614 rth->rt_is_input = 1;
13378cad 1615 rth->rt_iif = 0;
5943634f 1616 rth->rt_pmtu = 0;
f8126f1d 1617 rth->rt_gateway = 0;
1da177e4 1618 if (res.type == RTN_UNREACHABLE) {
d8d1f30b
CG
1619 rth->dst.input= ip_error;
1620 rth->dst.error= -err;
1da177e4
LT
1621 rth->rt_flags &= ~RTCF_LOCAL;
1622 }
d2d68ba9
DM
1623 if (do_cache)
1624 rt_cache_route(&FIB_RES_NH(res), rth);
89aef892 1625 skb_dst_set(skb, &rth->dst);
b23dd4fe 1626 err = 0;
ebc0ffae 1627 goto out;
1da177e4
LT
1628
1629no_route:
1630 RT_CACHE_STAT_INC(in_no_route);
1da177e4 1631 res.type = RTN_UNREACHABLE;
7f53878d
MC
1632 if (err == -ESRCH)
1633 err = -ENETUNREACH;
1da177e4
LT
1634 goto local_input;
1635
1636 /*
1637 * Do not cache martian addresses: they should be logged (RFC1812)
1638 */
1639martian_destination:
1640 RT_CACHE_STAT_INC(in_martian_dst);
1641#ifdef CONFIG_IP_ROUTE_VERBOSE
e87cc472
JP
1642 if (IN_DEV_LOG_MARTIANS(in_dev))
1643 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1644 &daddr, &saddr, dev->name);
1da177e4 1645#endif
2c2910a4 1646
1da177e4
LT
1647e_inval:
1648 err = -EINVAL;
ebc0ffae 1649 goto out;
1da177e4
LT
1650
1651e_nobufs:
1652 err = -ENOBUFS;
ebc0ffae 1653 goto out;
1da177e4
LT
1654
1655martian_source:
b5f7e755
ED
1656 err = -EINVAL;
1657martian_source_keep_err:
1da177e4 1658 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
ebc0ffae 1659 goto out;
1da177e4
LT
1660}
1661
c6cffba4
DM
1662int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1663 u8 tos, struct net_device *dev)
1da177e4 1664{
96d36220 1665 int res;
1da177e4 1666
96d36220
ED
1667 rcu_read_lock();
1668
1da177e4
LT
1669 /* Multicast recognition logic is moved from route cache to here.
1670 The problem was that too many Ethernet cards have broken/missing
1671 hardware multicast filters :-( As result the host on multicasting
1672 network acquires a lot of useless route cache entries, sort of
1673 SDR messages from all the world. Now we try to get rid of them.
1674 Really, provided software IP multicast filter is organized
1675 reasonably (at least, hashed), it does not result in a slowdown
1676 comparing with route cache reject entries.
1677 Note, that multicast routers are not affected, because
1678 route cache entry is created eventually.
1679 */
f97c1e0c 1680 if (ipv4_is_multicast(daddr)) {
96d36220 1681 struct in_device *in_dev = __in_dev_get_rcu(dev);
1da177e4 1682
96d36220 1683 if (in_dev) {
dbdd9a52
DM
1684 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1685 ip_hdr(skb)->protocol);
1da177e4
LT
1686 if (our
1687#ifdef CONFIG_IP_MROUTE
9d4fb27d
JP
1688 ||
1689 (!ipv4_is_local_multicast(daddr) &&
1690 IN_DEV_MFORWARD(in_dev))
1da177e4 1691#endif
9d4fb27d 1692 ) {
96d36220
ED
1693 int res = ip_route_input_mc(skb, daddr, saddr,
1694 tos, dev, our);
1da177e4 1695 rcu_read_unlock();
96d36220 1696 return res;
1da177e4
LT
1697 }
1698 }
1699 rcu_read_unlock();
1700 return -EINVAL;
1701 }
c10237e0 1702 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
96d36220
ED
1703 rcu_read_unlock();
1704 return res;
1da177e4 1705}
c6cffba4 1706EXPORT_SYMBOL(ip_route_input_noref);
1da177e4 1707
ebc0ffae 1708/* called with rcu_read_lock() */
982721f3 1709static struct rtable *__mkroute_output(const struct fib_result *res,
1a00fee4 1710 const struct flowi4 *fl4, int orig_oif,
f61759e6 1711 struct net_device *dev_out,
5ada5527 1712 unsigned int flags)
1da177e4 1713{
982721f3 1714 struct fib_info *fi = res->fi;
f2bb4bed 1715 struct fib_nh_exception *fnhe;
5ada5527 1716 struct in_device *in_dev;
982721f3 1717 u16 type = res->type;
5ada5527 1718 struct rtable *rth;
1da177e4 1719
d0daebc3
TG
1720 in_dev = __in_dev_get_rcu(dev_out);
1721 if (!in_dev)
5ada5527 1722 return ERR_PTR(-EINVAL);
1da177e4 1723
d0daebc3
TG
1724 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1725 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1726 return ERR_PTR(-EINVAL);
1727
68a5e3dd 1728 if (ipv4_is_lbcast(fl4->daddr))
982721f3 1729 type = RTN_BROADCAST;
68a5e3dd 1730 else if (ipv4_is_multicast(fl4->daddr))
982721f3 1731 type = RTN_MULTICAST;
68a5e3dd 1732 else if (ipv4_is_zeronet(fl4->daddr))
5ada5527 1733 return ERR_PTR(-EINVAL);
1da177e4
LT
1734
1735 if (dev_out->flags & IFF_LOOPBACK)
1736 flags |= RTCF_LOCAL;
1737
982721f3 1738 if (type == RTN_BROADCAST) {
1da177e4 1739 flags |= RTCF_BROADCAST | RTCF_LOCAL;
982721f3
DM
1740 fi = NULL;
1741 } else if (type == RTN_MULTICAST) {
dd28d1a0 1742 flags |= RTCF_MULTICAST | RTCF_LOCAL;
813b3b5d
DM
1743 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1744 fl4->flowi4_proto))
1da177e4
LT
1745 flags &= ~RTCF_LOCAL;
1746 /* If multicast route do not exist use
dd28d1a0
ED
1747 * default one, but do not gateway in this case.
1748 * Yes, it is hack.
1da177e4 1749 */
982721f3
DM
1750 if (fi && res->prefixlen < 4)
1751 fi = NULL;
1da177e4
LT
1752 }
1753
f2bb4bed
DM
1754 fnhe = NULL;
1755 if (fi) {
1756 fnhe = find_exception(&FIB_RES_NH(*res), fl4->daddr);
d26b3a7c
ED
1757 if (!fnhe && FIB_RES_NH(*res).nh_pcpu_rth_output) {
1758 struct rtable __rcu **prth;
1759
1760 prth = __this_cpu_ptr(FIB_RES_NH(*res).nh_pcpu_rth_output);
1761 rth = rcu_dereference(*prth);
d2d68ba9 1762 if (rt_cache_valid(rth)) {
93ac5341 1763 dst_hold(&rth->dst);
f2bb4bed
DM
1764 return rth;
1765 }
1766 }
1767 }
5c1e6aa3
DM
1768 rth = rt_dst_alloc(dev_out,
1769 IN_DEV_CONF_GET(in_dev, NOPOLICY),
f2bb4bed
DM
1770 IN_DEV_CONF_GET(in_dev, NOXFRM),
1771 fi && !fnhe);
8391d07b 1772 if (!rth)
5ada5527 1773 return ERR_PTR(-ENOBUFS);
8391d07b 1774
cf911662
DM
1775 rth->dst.output = ip_output;
1776
cf911662
DM
1777 rth->rt_genid = rt_genid(dev_net(dev_out));
1778 rth->rt_flags = flags;
1779 rth->rt_type = type;
9917e1e8 1780 rth->rt_is_input = 0;
13378cad 1781 rth->rt_iif = orig_oif ? : 0;
5943634f 1782 rth->rt_pmtu = 0;
f8126f1d 1783 rth->rt_gateway = 0;
1da177e4
LT
1784
1785 RT_CACHE_STAT_INC(out_slow_tot);
1786
41347dcd 1787 if (flags & RTCF_LOCAL)
d8d1f30b 1788 rth->dst.input = ip_local_deliver;
1da177e4 1789 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
e905a9ed 1790 if (flags & RTCF_LOCAL &&
1da177e4 1791 !(dev_out->flags & IFF_LOOPBACK)) {
d8d1f30b 1792 rth->dst.output = ip_mc_output;
1da177e4
LT
1793 RT_CACHE_STAT_INC(out_slow_mc);
1794 }
1795#ifdef CONFIG_IP_MROUTE
982721f3 1796 if (type == RTN_MULTICAST) {
1da177e4 1797 if (IN_DEV_MFORWARD(in_dev) &&
813b3b5d 1798 !ipv4_is_local_multicast(fl4->daddr)) {
d8d1f30b
CG
1799 rth->dst.input = ip_mr_input;
1800 rth->dst.output = ip_mc_output;
1da177e4
LT
1801 }
1802 }
1803#endif
1804 }
1805
f2bb4bed 1806 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
1da177e4 1807
5ada5527 1808 return rth;
1da177e4
LT
1809}
1810
1da177e4
LT
1811/*
1812 * Major route resolver routine.
1813 */
1814
89aef892 1815struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
1da177e4 1816{
1da177e4 1817 struct net_device *dev_out = NULL;
f61759e6 1818 __u8 tos = RT_FL_TOS(fl4);
813b3b5d
DM
1819 unsigned int flags = 0;
1820 struct fib_result res;
5ada5527 1821 struct rtable *rth;
813b3b5d 1822 int orig_oif;
1da177e4 1823
85b91b03 1824 res.tclassid = 0;
1da177e4 1825 res.fi = NULL;
8b96d22d 1826 res.table = NULL;
1da177e4 1827
813b3b5d
DM
1828 orig_oif = fl4->flowi4_oif;
1829
1830 fl4->flowi4_iif = net->loopback_dev->ifindex;
1831 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
1832 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
1833 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
44713b67 1834
010c2708 1835 rcu_read_lock();
813b3b5d 1836 if (fl4->saddr) {
b23dd4fe 1837 rth = ERR_PTR(-EINVAL);
813b3b5d
DM
1838 if (ipv4_is_multicast(fl4->saddr) ||
1839 ipv4_is_lbcast(fl4->saddr) ||
1840 ipv4_is_zeronet(fl4->saddr))
1da177e4
LT
1841 goto out;
1842
1da177e4
LT
1843 /* I removed check for oif == dev_out->oif here.
1844 It was wrong for two reasons:
1ab35276
DL
1845 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
1846 is assigned to multiple interfaces.
1da177e4
LT
1847 2. Moreover, we are allowed to send packets with saddr
1848 of another iface. --ANK
1849 */
1850
813b3b5d
DM
1851 if (fl4->flowi4_oif == 0 &&
1852 (ipv4_is_multicast(fl4->daddr) ||
1853 ipv4_is_lbcast(fl4->daddr))) {
a210d01a 1854 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
813b3b5d 1855 dev_out = __ip_dev_find(net, fl4->saddr, false);
a210d01a
JA
1856 if (dev_out == NULL)
1857 goto out;
1858
1da177e4
LT
1859 /* Special hack: user can direct multicasts
1860 and limited broadcast via necessary interface
1861 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
1862 This hack is not just for fun, it allows
1863 vic,vat and friends to work.
1864 They bind socket to loopback, set ttl to zero
1865 and expect that it will work.
1866 From the viewpoint of routing cache they are broken,
1867 because we are not allowed to build multicast path
1868 with loopback source addr (look, routing cache
1869 cannot know, that ttl is zero, so that packet
1870 will not leave this host and route is valid).
1871 Luckily, this hack is good workaround.
1872 */
1873
813b3b5d 1874 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
1875 goto make_route;
1876 }
a210d01a 1877
813b3b5d 1878 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
a210d01a 1879 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
813b3b5d 1880 if (!__ip_dev_find(net, fl4->saddr, false))
a210d01a 1881 goto out;
a210d01a 1882 }
1da177e4
LT
1883 }
1884
1885
813b3b5d
DM
1886 if (fl4->flowi4_oif) {
1887 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
b23dd4fe 1888 rth = ERR_PTR(-ENODEV);
1da177e4
LT
1889 if (dev_out == NULL)
1890 goto out;
e5ed6399
HX
1891
1892 /* RACE: Check return value of inet_select_addr instead. */
fc75fc83 1893 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
b23dd4fe 1894 rth = ERR_PTR(-ENETUNREACH);
fc75fc83
ED
1895 goto out;
1896 }
813b3b5d
DM
1897 if (ipv4_is_local_multicast(fl4->daddr) ||
1898 ipv4_is_lbcast(fl4->daddr)) {
1899 if (!fl4->saddr)
1900 fl4->saddr = inet_select_addr(dev_out, 0,
1901 RT_SCOPE_LINK);
1da177e4
LT
1902 goto make_route;
1903 }
813b3b5d
DM
1904 if (fl4->saddr) {
1905 if (ipv4_is_multicast(fl4->daddr))
1906 fl4->saddr = inet_select_addr(dev_out, 0,
1907 fl4->flowi4_scope);
1908 else if (!fl4->daddr)
1909 fl4->saddr = inet_select_addr(dev_out, 0,
1910 RT_SCOPE_HOST);
1da177e4
LT
1911 }
1912 }
1913
813b3b5d
DM
1914 if (!fl4->daddr) {
1915 fl4->daddr = fl4->saddr;
1916 if (!fl4->daddr)
1917 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
b40afd0e 1918 dev_out = net->loopback_dev;
813b3b5d 1919 fl4->flowi4_oif = net->loopback_dev->ifindex;
1da177e4
LT
1920 res.type = RTN_LOCAL;
1921 flags |= RTCF_LOCAL;
1922 goto make_route;
1923 }
1924
813b3b5d 1925 if (fib_lookup(net, fl4, &res)) {
1da177e4 1926 res.fi = NULL;
8b96d22d 1927 res.table = NULL;
813b3b5d 1928 if (fl4->flowi4_oif) {
1da177e4
LT
1929 /* Apparently, routing tables are wrong. Assume,
1930 that the destination is on link.
1931
1932 WHY? DW.
1933 Because we are allowed to send to iface
1934 even if it has NO routes and NO assigned
1935 addresses. When oif is specified, routing
1936 tables are looked up with only one purpose:
1937 to catch if destination is gatewayed, rather than
1938 direct. Moreover, if MSG_DONTROUTE is set,
1939 we send packet, ignoring both routing tables
1940 and ifaddr state. --ANK
1941
1942
1943 We could make it even if oif is unknown,
1944 likely IPv6, but we do not.
1945 */
1946
813b3b5d
DM
1947 if (fl4->saddr == 0)
1948 fl4->saddr = inet_select_addr(dev_out, 0,
1949 RT_SCOPE_LINK);
1da177e4
LT
1950 res.type = RTN_UNICAST;
1951 goto make_route;
1952 }
b23dd4fe 1953 rth = ERR_PTR(-ENETUNREACH);
1da177e4
LT
1954 goto out;
1955 }
1da177e4
LT
1956
1957 if (res.type == RTN_LOCAL) {
813b3b5d 1958 if (!fl4->saddr) {
9fc3bbb4 1959 if (res.fi->fib_prefsrc)
813b3b5d 1960 fl4->saddr = res.fi->fib_prefsrc;
9fc3bbb4 1961 else
813b3b5d 1962 fl4->saddr = fl4->daddr;
9fc3bbb4 1963 }
b40afd0e 1964 dev_out = net->loopback_dev;
813b3b5d 1965 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
1966 res.fi = NULL;
1967 flags |= RTCF_LOCAL;
1968 goto make_route;
1969 }
1970
1971#ifdef CONFIG_IP_ROUTE_MULTIPATH
813b3b5d 1972 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
1b7fe593 1973 fib_select_multipath(&res);
1da177e4
LT
1974 else
1975#endif
21d8c49e
DM
1976 if (!res.prefixlen &&
1977 res.table->tb_num_default > 1 &&
813b3b5d 1978 res.type == RTN_UNICAST && !fl4->flowi4_oif)
0c838ff1 1979 fib_select_default(&res);
1da177e4 1980
813b3b5d
DM
1981 if (!fl4->saddr)
1982 fl4->saddr = FIB_RES_PREFSRC(net, res);
1da177e4 1983
1da177e4 1984 dev_out = FIB_RES_DEV(res);
813b3b5d 1985 fl4->flowi4_oif = dev_out->ifindex;
1da177e4
LT
1986
1987
1988make_route:
1a00fee4 1989 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
1da177e4 1990
010c2708
DM
1991out:
1992 rcu_read_unlock();
b23dd4fe 1993 return rth;
1da177e4 1994}
d8c97a94
ACM
1995EXPORT_SYMBOL_GPL(__ip_route_output_key);
1996
ae2688d5
JW
1997static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
1998{
1999 return NULL;
2000}
2001
ebb762f2 2002static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
ec831ea7 2003{
618f9bc7
SK
2004 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2005
2006 return mtu ? : dst->dev->mtu;
ec831ea7
RD
2007}
2008
6700c270
DM
2009static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2010 struct sk_buff *skb, u32 mtu)
14e50e57
DM
2011{
2012}
2013
6700c270
DM
2014static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2015 struct sk_buff *skb)
b587ee3b
DM
2016{
2017}
2018
0972ddb2
HB
2019static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2020 unsigned long old)
2021{
2022 return NULL;
2023}
2024
14e50e57
DM
2025static struct dst_ops ipv4_dst_blackhole_ops = {
2026 .family = AF_INET,
09640e63 2027 .protocol = cpu_to_be16(ETH_P_IP),
ae2688d5 2028 .check = ipv4_blackhole_dst_check,
ebb762f2 2029 .mtu = ipv4_blackhole_mtu,
214f45c9 2030 .default_advmss = ipv4_default_advmss,
14e50e57 2031 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
b587ee3b 2032 .redirect = ipv4_rt_blackhole_redirect,
0972ddb2 2033 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
d3aaeb38 2034 .neigh_lookup = ipv4_neigh_lookup,
14e50e57
DM
2035};
2036
2774c131 2037struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
14e50e57 2038{
2774c131 2039 struct rtable *ort = (struct rtable *) dst_orig;
f5b0a874 2040 struct rtable *rt;
14e50e57 2041
f5b0a874 2042 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
14e50e57 2043 if (rt) {
d8d1f30b 2044 struct dst_entry *new = &rt->dst;
14e50e57 2045
14e50e57 2046 new->__use = 1;
352e512c
HX
2047 new->input = dst_discard;
2048 new->output = dst_discard;
14e50e57 2049
d8d1f30b 2050 new->dev = ort->dst.dev;
14e50e57
DM
2051 if (new->dev)
2052 dev_hold(new->dev);
2053
9917e1e8 2054 rt->rt_is_input = ort->rt_is_input;
5e2b61f7 2055 rt->rt_iif = ort->rt_iif;
5943634f 2056 rt->rt_pmtu = ort->rt_pmtu;
14e50e57 2057
e84f84f2 2058 rt->rt_genid = rt_genid(net);
14e50e57
DM
2059 rt->rt_flags = ort->rt_flags;
2060 rt->rt_type = ort->rt_type;
14e50e57 2061 rt->rt_gateway = ort->rt_gateway;
14e50e57
DM
2062
2063 dst_free(new);
2064 }
2065
2774c131
DM
2066 dst_release(dst_orig);
2067
2068 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
14e50e57
DM
2069}
2070
9d6ec938 2071struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
b23dd4fe 2072 struct sock *sk)
1da177e4 2073{
9d6ec938 2074 struct rtable *rt = __ip_route_output_key(net, flp4);
1da177e4 2075
b23dd4fe
DM
2076 if (IS_ERR(rt))
2077 return rt;
1da177e4 2078
56157872 2079 if (flp4->flowi4_proto)
9d6ec938
DM
2080 rt = (struct rtable *) xfrm_lookup(net, &rt->dst,
2081 flowi4_to_flowi(flp4),
2082 sk, 0);
1da177e4 2083
b23dd4fe 2084 return rt;
1da177e4 2085}
d8c97a94
ACM
2086EXPORT_SYMBOL_GPL(ip_route_output_flow);
2087
f1ce3062
DM
2088static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2089 struct flowi4 *fl4, struct sk_buff *skb, u32 pid,
2090 u32 seq, int event, int nowait, unsigned int flags)
1da177e4 2091{
511c3f92 2092 struct rtable *rt = skb_rtable(skb);
1da177e4 2093 struct rtmsg *r;
be403ea1 2094 struct nlmsghdr *nlh;
2bc8ca40 2095 unsigned long expires = 0;
f185071d 2096 u32 error;
521f5490 2097 u32 metrics[RTAX_MAX];
be403ea1
TG
2098
2099 nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
2100 if (nlh == NULL)
26932566 2101 return -EMSGSIZE;
be403ea1
TG
2102
2103 r = nlmsg_data(nlh);
1da177e4
LT
2104 r->rtm_family = AF_INET;
2105 r->rtm_dst_len = 32;
2106 r->rtm_src_len = 0;
d6c0a4f6 2107 r->rtm_tos = fl4->flowi4_tos;
1da177e4 2108 r->rtm_table = RT_TABLE_MAIN;
f3756b79
DM
2109 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2110 goto nla_put_failure;
1da177e4
LT
2111 r->rtm_type = rt->rt_type;
2112 r->rtm_scope = RT_SCOPE_UNIVERSE;
2113 r->rtm_protocol = RTPROT_UNSPEC;
2114 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2115 if (rt->rt_flags & RTCF_NOTIFY)
2116 r->rtm_flags |= RTM_F_NOTIFY;
be403ea1 2117
f1ce3062 2118 if (nla_put_be32(skb, RTA_DST, dst))
f3756b79 2119 goto nla_put_failure;
1a00fee4 2120 if (src) {
1da177e4 2121 r->rtm_src_len = 32;
1a00fee4 2122 if (nla_put_be32(skb, RTA_SRC, src))
f3756b79 2123 goto nla_put_failure;
1da177e4 2124 }
f3756b79
DM
2125 if (rt->dst.dev &&
2126 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2127 goto nla_put_failure;
c7066f70 2128#ifdef CONFIG_IP_ROUTE_CLASSID
f3756b79
DM
2129 if (rt->dst.tclassid &&
2130 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2131 goto nla_put_failure;
1da177e4 2132#endif
41347dcd 2133 if (!rt_is_input_route(rt) &&
d6c0a4f6
DM
2134 fl4->saddr != src) {
2135 if (nla_put_be32(skb, RTA_PREFSRC, fl4->saddr))
f3756b79
DM
2136 goto nla_put_failure;
2137 }
f8126f1d 2138 if (rt->rt_gateway &&
f3756b79
DM
2139 nla_put_be32(skb, RTA_GATEWAY, rt->rt_gateway))
2140 goto nla_put_failure;
be403ea1 2141
521f5490
JA
2142 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2143 if (rt->rt_pmtu)
2144 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2145 if (rtnetlink_put_metrics(skb, metrics) < 0)
be403ea1
TG
2146 goto nla_put_failure;
2147
b4869889
DM
2148 if (fl4->flowi4_mark &&
2149 nla_put_be32(skb, RTA_MARK, fl4->flowi4_mark))
f3756b79 2150 goto nla_put_failure;
963bfeee 2151
d8d1f30b 2152 error = rt->dst.error;
5943634f
DM
2153 expires = rt->dst.expires;
2154 if (expires) {
2155 if (time_before(jiffies, expires))
2156 expires -= jiffies;
2157 else
2158 expires = 0;
1da177e4 2159 }
be403ea1 2160
c7537967 2161 if (rt_is_input_route(rt)) {
f1ce3062
DM
2162 if (nla_put_u32(skb, RTA_IIF, rt->rt_iif))
2163 goto nla_put_failure;
1da177e4
LT
2164 }
2165
f185071d 2166 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
e3703b3d 2167 goto nla_put_failure;
be403ea1
TG
2168
2169 return nlmsg_end(skb, nlh);
1da177e4 2170
be403ea1 2171nla_put_failure:
26932566
PM
2172 nlmsg_cancel(skb, nlh);
2173 return -EMSGSIZE;
1da177e4
LT
2174}
2175
5e73ea1a 2176static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
1da177e4 2177{
3b1e0a65 2178 struct net *net = sock_net(in_skb->sk);
d889ce3b
TG
2179 struct rtmsg *rtm;
2180 struct nlattr *tb[RTA_MAX+1];
1da177e4 2181 struct rtable *rt = NULL;
d6c0a4f6 2182 struct flowi4 fl4;
9e12bb22
AV
2183 __be32 dst = 0;
2184 __be32 src = 0;
2185 u32 iif;
d889ce3b 2186 int err;
963bfeee 2187 int mark;
1da177e4
LT
2188 struct sk_buff *skb;
2189
d889ce3b
TG
2190 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2191 if (err < 0)
2192 goto errout;
2193
2194 rtm = nlmsg_data(nlh);
2195
1da177e4 2196 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
d889ce3b
TG
2197 if (skb == NULL) {
2198 err = -ENOBUFS;
2199 goto errout;
2200 }
1da177e4
LT
2201
2202 /* Reserve room for dummy headers, this skb can pass
2203 through good chunk of routing engine.
2204 */
459a98ed 2205 skb_reset_mac_header(skb);
c1d2bbe1 2206 skb_reset_network_header(skb);
d2c962b8
SH
2207
2208 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
eddc9ec5 2209 ip_hdr(skb)->protocol = IPPROTO_ICMP;
1da177e4
LT
2210 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2211
17fb2c64
AV
2212 src = tb[RTA_SRC] ? nla_get_be32(tb[RTA_SRC]) : 0;
2213 dst = tb[RTA_DST] ? nla_get_be32(tb[RTA_DST]) : 0;
d889ce3b 2214 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
963bfeee 2215 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
1da177e4 2216
d6c0a4f6
DM
2217 memset(&fl4, 0, sizeof(fl4));
2218 fl4.daddr = dst;
2219 fl4.saddr = src;
2220 fl4.flowi4_tos = rtm->rtm_tos;
2221 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2222 fl4.flowi4_mark = mark;
2223
1da177e4 2224 if (iif) {
d889ce3b
TG
2225 struct net_device *dev;
2226
1937504d 2227 dev = __dev_get_by_index(net, iif);
d889ce3b
TG
2228 if (dev == NULL) {
2229 err = -ENODEV;
2230 goto errout_free;
2231 }
2232
1da177e4
LT
2233 skb->protocol = htons(ETH_P_IP);
2234 skb->dev = dev;
963bfeee 2235 skb->mark = mark;
1da177e4
LT
2236 local_bh_disable();
2237 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2238 local_bh_enable();
d889ce3b 2239
511c3f92 2240 rt = skb_rtable(skb);
d8d1f30b
CG
2241 if (err == 0 && rt->dst.error)
2242 err = -rt->dst.error;
1da177e4 2243 } else {
9d6ec938 2244 rt = ip_route_output_key(net, &fl4);
b23dd4fe
DM
2245
2246 err = 0;
2247 if (IS_ERR(rt))
2248 err = PTR_ERR(rt);
1da177e4 2249 }
d889ce3b 2250
1da177e4 2251 if (err)
d889ce3b 2252 goto errout_free;
1da177e4 2253
d8d1f30b 2254 skb_dst_set(skb, &rt->dst);
1da177e4
LT
2255 if (rtm->rtm_flags & RTM_F_NOTIFY)
2256 rt->rt_flags |= RTCF_NOTIFY;
2257
f1ce3062 2258 err = rt_fill_info(net, dst, src, &fl4, skb,
1a00fee4 2259 NETLINK_CB(in_skb).pid, nlh->nlmsg_seq,
1937504d 2260 RTM_NEWROUTE, 0, 0);
d889ce3b
TG
2261 if (err <= 0)
2262 goto errout_free;
1da177e4 2263
1937504d 2264 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).pid);
d889ce3b 2265errout:
2942e900 2266 return err;
1da177e4 2267
d889ce3b 2268errout_free:
1da177e4 2269 kfree_skb(skb);
d889ce3b 2270 goto errout;
1da177e4
LT
2271}
2272
2273int ip_rt_dump(struct sk_buff *skb, struct netlink_callback *cb)
2274{
1da177e4
LT
2275 return skb->len;
2276}
2277
2278void ip_rt_multicast_event(struct in_device *in_dev)
2279{
76e6ebfb 2280 rt_cache_flush(dev_net(in_dev->dev), 0);
1da177e4
LT
2281}
2282
2283#ifdef CONFIG_SYSCTL
81c684d1 2284static int ipv4_sysctl_rtcache_flush(ctl_table *__ctl, int write,
8d65af78 2285 void __user *buffer,
1da177e4
LT
2286 size_t *lenp, loff_t *ppos)
2287{
2288 if (write) {
639e104f 2289 int flush_delay;
81c684d1 2290 ctl_table ctl;
39a23e75 2291 struct net *net;
639e104f 2292
81c684d1
DL
2293 memcpy(&ctl, __ctl, sizeof(ctl));
2294 ctl.data = &flush_delay;
8d65af78 2295 proc_dointvec(&ctl, write, buffer, lenp, ppos);
639e104f 2296
81c684d1 2297 net = (struct net *)__ctl->extra1;
39a23e75 2298 rt_cache_flush(net, flush_delay);
1da177e4 2299 return 0;
e905a9ed 2300 }
1da177e4
LT
2301
2302 return -EINVAL;
2303}
2304
eeb61f71 2305static ctl_table ipv4_route_table[] = {
1da177e4 2306 {
1da177e4
LT
2307 .procname = "gc_thresh",
2308 .data = &ipv4_dst_ops.gc_thresh,
2309 .maxlen = sizeof(int),
2310 .mode = 0644,
6d9f239a 2311 .proc_handler = proc_dointvec,
1da177e4
LT
2312 },
2313 {
1da177e4
LT
2314 .procname = "max_size",
2315 .data = &ip_rt_max_size,
2316 .maxlen = sizeof(int),
2317 .mode = 0644,
6d9f239a 2318 .proc_handler = proc_dointvec,
1da177e4
LT
2319 },
2320 {
2321 /* Deprecated. Use gc_min_interval_ms */
e905a9ed 2322
1da177e4
LT
2323 .procname = "gc_min_interval",
2324 .data = &ip_rt_gc_min_interval,
2325 .maxlen = sizeof(int),
2326 .mode = 0644,
6d9f239a 2327 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
2328 },
2329 {
1da177e4
LT
2330 .procname = "gc_min_interval_ms",
2331 .data = &ip_rt_gc_min_interval,
2332 .maxlen = sizeof(int),
2333 .mode = 0644,
6d9f239a 2334 .proc_handler = proc_dointvec_ms_jiffies,
1da177e4
LT
2335 },
2336 {
1da177e4
LT
2337 .procname = "gc_timeout",
2338 .data = &ip_rt_gc_timeout,
2339 .maxlen = sizeof(int),
2340 .mode = 0644,
6d9f239a 2341 .proc_handler = proc_dointvec_jiffies,
1da177e4 2342 },
9f28a2fc
ED
2343 {
2344 .procname = "gc_interval",
2345 .data = &ip_rt_gc_interval,
2346 .maxlen = sizeof(int),
2347 .mode = 0644,
2348 .proc_handler = proc_dointvec_jiffies,
2349 },
1da177e4 2350 {
1da177e4
LT
2351 .procname = "redirect_load",
2352 .data = &ip_rt_redirect_load,
2353 .maxlen = sizeof(int),
2354 .mode = 0644,
6d9f239a 2355 .proc_handler = proc_dointvec,
1da177e4
LT
2356 },
2357 {
1da177e4
LT
2358 .procname = "redirect_number",
2359 .data = &ip_rt_redirect_number,
2360 .maxlen = sizeof(int),
2361 .mode = 0644,
6d9f239a 2362 .proc_handler = proc_dointvec,
1da177e4
LT
2363 },
2364 {
1da177e4
LT
2365 .procname = "redirect_silence",
2366 .data = &ip_rt_redirect_silence,
2367 .maxlen = sizeof(int),
2368 .mode = 0644,
6d9f239a 2369 .proc_handler = proc_dointvec,
1da177e4
LT
2370 },
2371 {
1da177e4
LT
2372 .procname = "error_cost",
2373 .data = &ip_rt_error_cost,
2374 .maxlen = sizeof(int),
2375 .mode = 0644,
6d9f239a 2376 .proc_handler = proc_dointvec,
1da177e4
LT
2377 },
2378 {
1da177e4
LT
2379 .procname = "error_burst",
2380 .data = &ip_rt_error_burst,
2381 .maxlen = sizeof(int),
2382 .mode = 0644,
6d9f239a 2383 .proc_handler = proc_dointvec,
1da177e4
LT
2384 },
2385 {
1da177e4
LT
2386 .procname = "gc_elasticity",
2387 .data = &ip_rt_gc_elasticity,
2388 .maxlen = sizeof(int),
2389 .mode = 0644,
6d9f239a 2390 .proc_handler = proc_dointvec,
1da177e4
LT
2391 },
2392 {
1da177e4
LT
2393 .procname = "mtu_expires",
2394 .data = &ip_rt_mtu_expires,
2395 .maxlen = sizeof(int),
2396 .mode = 0644,
6d9f239a 2397 .proc_handler = proc_dointvec_jiffies,
1da177e4
LT
2398 },
2399 {
1da177e4
LT
2400 .procname = "min_pmtu",
2401 .data = &ip_rt_min_pmtu,
2402 .maxlen = sizeof(int),
2403 .mode = 0644,
6d9f239a 2404 .proc_handler = proc_dointvec,
1da177e4
LT
2405 },
2406 {
1da177e4
LT
2407 .procname = "min_adv_mss",
2408 .data = &ip_rt_min_advmss,
2409 .maxlen = sizeof(int),
2410 .mode = 0644,
6d9f239a 2411 .proc_handler = proc_dointvec,
1da177e4 2412 },
f8572d8f 2413 { }
1da177e4 2414};
39a23e75 2415
39a23e75
DL
2416static struct ctl_table ipv4_route_flush_table[] = {
2417 {
39a23e75
DL
2418 .procname = "flush",
2419 .maxlen = sizeof(int),
2420 .mode = 0200,
6d9f239a 2421 .proc_handler = ipv4_sysctl_rtcache_flush,
39a23e75 2422 },
f8572d8f 2423 { },
39a23e75
DL
2424};
2425
2426static __net_init int sysctl_route_net_init(struct net *net)
2427{
2428 struct ctl_table *tbl;
2429
2430 tbl = ipv4_route_flush_table;
09ad9bc7 2431 if (!net_eq(net, &init_net)) {
39a23e75
DL
2432 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2433 if (tbl == NULL)
2434 goto err_dup;
2435 }
2436 tbl[0].extra1 = net;
2437
ec8f23ce 2438 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
39a23e75
DL
2439 if (net->ipv4.route_hdr == NULL)
2440 goto err_reg;
2441 return 0;
2442
2443err_reg:
2444 if (tbl != ipv4_route_flush_table)
2445 kfree(tbl);
2446err_dup:
2447 return -ENOMEM;
2448}
2449
2450static __net_exit void sysctl_route_net_exit(struct net *net)
2451{
2452 struct ctl_table *tbl;
2453
2454 tbl = net->ipv4.route_hdr->ctl_table_arg;
2455 unregister_net_sysctl_table(net->ipv4.route_hdr);
2456 BUG_ON(tbl == ipv4_route_flush_table);
2457 kfree(tbl);
2458}
2459
2460static __net_initdata struct pernet_operations sysctl_route_ops = {
2461 .init = sysctl_route_net_init,
2462 .exit = sysctl_route_net_exit,
2463};
1da177e4
LT
2464#endif
2465
3ee94372 2466static __net_init int rt_genid_init(struct net *net)
9f5e97e5 2467{
3ee94372
NH
2468 get_random_bytes(&net->ipv4.rt_genid,
2469 sizeof(net->ipv4.rt_genid));
436c3b66
DM
2470 get_random_bytes(&net->ipv4.dev_addr_genid,
2471 sizeof(net->ipv4.dev_addr_genid));
9f5e97e5
DL
2472 return 0;
2473}
2474
3ee94372
NH
2475static __net_initdata struct pernet_operations rt_genid_ops = {
2476 .init = rt_genid_init,
9f5e97e5
DL
2477};
2478
c3426b47
DM
2479static int __net_init ipv4_inetpeer_init(struct net *net)
2480{
2481 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2482
2483 if (!bp)
2484 return -ENOMEM;
2485 inet_peer_base_init(bp);
2486 net->ipv4.peers = bp;
2487 return 0;
2488}
2489
2490static void __net_exit ipv4_inetpeer_exit(struct net *net)
2491{
2492 struct inet_peer_base *bp = net->ipv4.peers;
2493
2494 net->ipv4.peers = NULL;
56a6b248 2495 inetpeer_invalidate_tree(bp);
c3426b47
DM
2496 kfree(bp);
2497}
2498
2499static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2500 .init = ipv4_inetpeer_init,
2501 .exit = ipv4_inetpeer_exit,
2502};
9f5e97e5 2503
c7066f70 2504#ifdef CONFIG_IP_ROUTE_CLASSID
7d720c3e 2505struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
c7066f70 2506#endif /* CONFIG_IP_ROUTE_CLASSID */
1da177e4 2507
1da177e4
LT
2508int __init ip_rt_init(void)
2509{
424c4b70 2510 int rc = 0;
1da177e4 2511
c7066f70 2512#ifdef CONFIG_IP_ROUTE_CLASSID
0dcec8c2 2513 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
1da177e4
LT
2514 if (!ip_rt_acct)
2515 panic("IP: failed to allocate ip_rt_acct\n");
1da177e4
LT
2516#endif
2517
e5d679f3
AD
2518 ipv4_dst_ops.kmem_cachep =
2519 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
20c2df83 2520 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1da177e4 2521
14e50e57
DM
2522 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2523
fc66f95c
ED
2524 if (dst_entries_init(&ipv4_dst_ops) < 0)
2525 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2526
2527 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2528 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2529
89aef892
DM
2530 ipv4_dst_ops.gc_thresh = ~0;
2531 ip_rt_max_size = INT_MAX;
1da177e4 2532
1da177e4
LT
2533 devinet_init();
2534 ip_fib_init();
2535
73b38711 2536 if (ip_rt_proc_init())
058bd4d2 2537 pr_err("Unable to create route proc files\n");
1da177e4
LT
2538#ifdef CONFIG_XFRM
2539 xfrm_init();
a33bc5c1 2540 xfrm4_init(ip_rt_max_size);
1da177e4 2541#endif
c7ac8679 2542 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
63f3444f 2543
39a23e75
DL
2544#ifdef CONFIG_SYSCTL
2545 register_pernet_subsys(&sysctl_route_ops);
2546#endif
3ee94372 2547 register_pernet_subsys(&rt_genid_ops);
c3426b47 2548 register_pernet_subsys(&ipv4_inetpeer_ops);
1da177e4
LT
2549 return rc;
2550}
2551
a1bc6eb4 2552#ifdef CONFIG_SYSCTL
eeb61f71
AV
2553/*
2554 * We really need to sanitize the damn ipv4 init order, then all
2555 * this nonsense will go away.
2556 */
2557void __init ip_static_sysctl_init(void)
2558{
4e5ca785 2559 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);
eeb61f71 2560}
a1bc6eb4 2561#endif
This page took 0.993965 seconds and 5 git commands to generate.