raid5: allow arbitrary max_hw_sectors
[deliverable/linux.git] / net / ipv6 / addrconf.c
1 /*
2 * IPv6 Address [auto]configuration
3 * Linux INET6 implementation
4 *
5 * Authors:
6 * Pedro Roque <roque@di.fc.ul.pt>
7 * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15 /*
16 * Changes:
17 *
18 * Janos Farkas : delete timer on ifdown
19 * <chexum@bankinf.banki.hu>
20 * Andi Kleen : kill double kfree on module
21 * unload.
22 * Maciej W. Rozycki : FDDI support
23 * sekiya@USAGI : Don't send too many RS
24 * packets.
25 * yoshfuji@USAGI : Fixed interval between DAD
26 * packets.
27 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
28 * address validation timer.
29 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
30 * support.
31 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
32 * address on a same interface.
33 * YOSHIFUJI Hideaki @USAGI : ARCnet support
34 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
35 * seq_file.
36 * YOSHIFUJI Hideaki @USAGI : improved source address
37 * selection; consider scope,
38 * status etc.
39 */
40
41 #define pr_fmt(fmt) "IPv6: " fmt
42
43 #include <linux/errno.h>
44 #include <linux/types.h>
45 #include <linux/kernel.h>
46 #include <linux/socket.h>
47 #include <linux/sockios.h>
48 #include <linux/net.h>
49 #include <linux/inet.h>
50 #include <linux/in6.h>
51 #include <linux/netdevice.h>
52 #include <linux/if_addr.h>
53 #include <linux/if_arp.h>
54 #include <linux/if_arcnet.h>
55 #include <linux/if_infiniband.h>
56 #include <linux/route.h>
57 #include <linux/inetdevice.h>
58 #include <linux/init.h>
59 #include <linux/slab.h>
60 #ifdef CONFIG_SYSCTL
61 #include <linux/sysctl.h>
62 #endif
63 #include <linux/capability.h>
64 #include <linux/delay.h>
65 #include <linux/notifier.h>
66 #include <linux/string.h>
67 #include <linux/hash.h>
68
69 #include <net/net_namespace.h>
70 #include <net/sock.h>
71 #include <net/snmp.h>
72
73 #include <net/6lowpan.h>
74 #include <net/firewire.h>
75 #include <net/ipv6.h>
76 #include <net/protocol.h>
77 #include <net/ndisc.h>
78 #include <net/ip6_route.h>
79 #include <net/addrconf.h>
80 #include <net/tcp.h>
81 #include <net/ip.h>
82 #include <net/netlink.h>
83 #include <net/pkt_sched.h>
84 #include <net/l3mdev.h>
85 #include <linux/if_tunnel.h>
86 #include <linux/rtnetlink.h>
87 #include <linux/netconf.h>
88 #include <linux/random.h>
89 #include <linux/uaccess.h>
90 #include <asm/unaligned.h>
91
92 #include <linux/proc_fs.h>
93 #include <linux/seq_file.h>
94 #include <linux/export.h>
95
96 /* Set to 3 to get tracing... */
97 #define ACONF_DEBUG 2
98
99 #if ACONF_DEBUG >= 3
100 #define ADBG(fmt, ...) printk(fmt, ##__VA_ARGS__)
101 #else
102 #define ADBG(fmt, ...) do { if (0) printk(fmt, ##__VA_ARGS__); } while (0)
103 #endif
104
105 #define INFINITY_LIFE_TIME 0xFFFFFFFF
106
107 #define IPV6_MAX_STRLEN \
108 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
109
110 static inline u32 cstamp_delta(unsigned long cstamp)
111 {
112 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
113 }
114
115 #ifdef CONFIG_SYSCTL
116 static int addrconf_sysctl_register(struct inet6_dev *idev);
117 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
118 #else
119 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
120 {
121 return 0;
122 }
123
124 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
125 {
126 }
127 #endif
128
129 static void __ipv6_regen_rndid(struct inet6_dev *idev);
130 static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr);
131 static void ipv6_regen_rndid(unsigned long data);
132
133 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
134 static int ipv6_count_addresses(struct inet6_dev *idev);
135 static int ipv6_generate_stable_address(struct in6_addr *addr,
136 u8 dad_count,
137 const struct inet6_dev *idev);
138
139 /*
140 * Configured unicast address hash table
141 */
142 static struct hlist_head inet6_addr_lst[IN6_ADDR_HSIZE];
143 static DEFINE_SPINLOCK(addrconf_hash_lock);
144
145 static void addrconf_verify(void);
146 static void addrconf_verify_rtnl(void);
147 static void addrconf_verify_work(struct work_struct *);
148
149 static struct workqueue_struct *addrconf_wq;
150 static DECLARE_DELAYED_WORK(addr_chk_work, addrconf_verify_work);
151
152 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
153 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
154
155 static void addrconf_type_change(struct net_device *dev,
156 unsigned long event);
157 static int addrconf_ifdown(struct net_device *dev, int how);
158
159 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
160 int plen,
161 const struct net_device *dev,
162 u32 flags, u32 noflags);
163
164 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
165 static void addrconf_dad_work(struct work_struct *w);
166 static void addrconf_dad_completed(struct inet6_ifaddr *ifp);
167 static void addrconf_dad_run(struct inet6_dev *idev);
168 static void addrconf_rs_timer(unsigned long data);
169 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
170 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
171
172 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
173 struct prefix_info *pinfo);
174 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
175 struct net_device *dev);
176
177 static struct ipv6_devconf ipv6_devconf __read_mostly = {
178 .forwarding = 0,
179 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
180 .mtu6 = IPV6_MIN_MTU,
181 .accept_ra = 1,
182 .accept_redirects = 1,
183 .autoconf = 1,
184 .force_mld_version = 0,
185 .mldv1_unsolicited_report_interval = 10 * HZ,
186 .mldv2_unsolicited_report_interval = HZ,
187 .dad_transmits = 1,
188 .rtr_solicits = MAX_RTR_SOLICITATIONS,
189 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
190 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
191 .use_tempaddr = 0,
192 .temp_valid_lft = TEMP_VALID_LIFETIME,
193 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
194 .regen_max_retry = REGEN_MAX_RETRY,
195 .max_desync_factor = MAX_DESYNC_FACTOR,
196 .max_addresses = IPV6_MAX_ADDRESSES,
197 .accept_ra_defrtr = 1,
198 .accept_ra_from_local = 0,
199 .accept_ra_min_hop_limit= 1,
200 .accept_ra_pinfo = 1,
201 #ifdef CONFIG_IPV6_ROUTER_PREF
202 .accept_ra_rtr_pref = 1,
203 .rtr_probe_interval = 60 * HZ,
204 #ifdef CONFIG_IPV6_ROUTE_INFO
205 .accept_ra_rt_info_max_plen = 0,
206 #endif
207 #endif
208 .proxy_ndp = 0,
209 .accept_source_route = 0, /* we do not accept RH0 by default. */
210 .disable_ipv6 = 0,
211 .accept_dad = 1,
212 .suppress_frag_ndisc = 1,
213 .accept_ra_mtu = 1,
214 .stable_secret = {
215 .initialized = false,
216 },
217 .use_oif_addrs_only = 0,
218 .ignore_routes_with_linkdown = 0,
219 .keep_addr_on_down = 0,
220 };
221
222 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
223 .forwarding = 0,
224 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
225 .mtu6 = IPV6_MIN_MTU,
226 .accept_ra = 1,
227 .accept_redirects = 1,
228 .autoconf = 1,
229 .force_mld_version = 0,
230 .mldv1_unsolicited_report_interval = 10 * HZ,
231 .mldv2_unsolicited_report_interval = HZ,
232 .dad_transmits = 1,
233 .rtr_solicits = MAX_RTR_SOLICITATIONS,
234 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
235 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
236 .use_tempaddr = 0,
237 .temp_valid_lft = TEMP_VALID_LIFETIME,
238 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
239 .regen_max_retry = REGEN_MAX_RETRY,
240 .max_desync_factor = MAX_DESYNC_FACTOR,
241 .max_addresses = IPV6_MAX_ADDRESSES,
242 .accept_ra_defrtr = 1,
243 .accept_ra_from_local = 0,
244 .accept_ra_min_hop_limit= 1,
245 .accept_ra_pinfo = 1,
246 #ifdef CONFIG_IPV6_ROUTER_PREF
247 .accept_ra_rtr_pref = 1,
248 .rtr_probe_interval = 60 * HZ,
249 #ifdef CONFIG_IPV6_ROUTE_INFO
250 .accept_ra_rt_info_max_plen = 0,
251 #endif
252 #endif
253 .proxy_ndp = 0,
254 .accept_source_route = 0, /* we do not accept RH0 by default. */
255 .disable_ipv6 = 0,
256 .accept_dad = 1,
257 .suppress_frag_ndisc = 1,
258 .accept_ra_mtu = 1,
259 .stable_secret = {
260 .initialized = false,
261 },
262 .use_oif_addrs_only = 0,
263 .ignore_routes_with_linkdown = 0,
264 .keep_addr_on_down = 0,
265 };
266
267 /* Check if a valid qdisc is available */
268 static inline bool addrconf_qdisc_ok(const struct net_device *dev)
269 {
270 return !qdisc_tx_is_noop(dev);
271 }
272
273 static void addrconf_del_rs_timer(struct inet6_dev *idev)
274 {
275 if (del_timer(&idev->rs_timer))
276 __in6_dev_put(idev);
277 }
278
279 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
280 {
281 if (cancel_delayed_work(&ifp->dad_work))
282 __in6_ifa_put(ifp);
283 }
284
285 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
286 unsigned long when)
287 {
288 if (!timer_pending(&idev->rs_timer))
289 in6_dev_hold(idev);
290 mod_timer(&idev->rs_timer, jiffies + when);
291 }
292
293 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
294 unsigned long delay)
295 {
296 if (!delayed_work_pending(&ifp->dad_work))
297 in6_ifa_hold(ifp);
298 mod_delayed_work(addrconf_wq, &ifp->dad_work, delay);
299 }
300
301 static int snmp6_alloc_dev(struct inet6_dev *idev)
302 {
303 int i;
304
305 idev->stats.ipv6 = alloc_percpu(struct ipstats_mib);
306 if (!idev->stats.ipv6)
307 goto err_ip;
308
309 for_each_possible_cpu(i) {
310 struct ipstats_mib *addrconf_stats;
311 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
312 u64_stats_init(&addrconf_stats->syncp);
313 }
314
315
316 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
317 GFP_KERNEL);
318 if (!idev->stats.icmpv6dev)
319 goto err_icmp;
320 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
321 GFP_KERNEL);
322 if (!idev->stats.icmpv6msgdev)
323 goto err_icmpmsg;
324
325 return 0;
326
327 err_icmpmsg:
328 kfree(idev->stats.icmpv6dev);
329 err_icmp:
330 free_percpu(idev->stats.ipv6);
331 err_ip:
332 return -ENOMEM;
333 }
334
335 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
336 {
337 struct inet6_dev *ndev;
338 int err = -ENOMEM;
339
340 ASSERT_RTNL();
341
342 if (dev->mtu < IPV6_MIN_MTU)
343 return ERR_PTR(-EINVAL);
344
345 ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
346 if (!ndev)
347 return ERR_PTR(err);
348
349 rwlock_init(&ndev->lock);
350 ndev->dev = dev;
351 INIT_LIST_HEAD(&ndev->addr_list);
352 setup_timer(&ndev->rs_timer, addrconf_rs_timer,
353 (unsigned long)ndev);
354 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
355
356 if (ndev->cnf.stable_secret.initialized)
357 ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
358 else
359 ndev->addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64;
360
361 ndev->cnf.mtu6 = dev->mtu;
362 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
363 if (!ndev->nd_parms) {
364 kfree(ndev);
365 return ERR_PTR(err);
366 }
367 if (ndev->cnf.forwarding)
368 dev_disable_lro(dev);
369 /* We refer to the device */
370 dev_hold(dev);
371
372 if (snmp6_alloc_dev(ndev) < 0) {
373 ADBG(KERN_WARNING
374 "%s: cannot allocate memory for statistics; dev=%s.\n",
375 __func__, dev->name);
376 neigh_parms_release(&nd_tbl, ndev->nd_parms);
377 dev_put(dev);
378 kfree(ndev);
379 return ERR_PTR(err);
380 }
381
382 if (snmp6_register_dev(ndev) < 0) {
383 ADBG(KERN_WARNING
384 "%s: cannot create /proc/net/dev_snmp6/%s\n",
385 __func__, dev->name);
386 goto err_release;
387 }
388
389 /* One reference from device. We must do this before
390 * we invoke __ipv6_regen_rndid().
391 */
392 in6_dev_hold(ndev);
393
394 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
395 ndev->cnf.accept_dad = -1;
396
397 #if IS_ENABLED(CONFIG_IPV6_SIT)
398 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
399 pr_info("%s: Disabled Multicast RS\n", dev->name);
400 ndev->cnf.rtr_solicits = 0;
401 }
402 #endif
403
404 INIT_LIST_HEAD(&ndev->tempaddr_list);
405 setup_timer(&ndev->regen_timer, ipv6_regen_rndid, (unsigned long)ndev);
406 if ((dev->flags&IFF_LOOPBACK) ||
407 dev->type == ARPHRD_TUNNEL ||
408 dev->type == ARPHRD_TUNNEL6 ||
409 dev->type == ARPHRD_SIT ||
410 dev->type == ARPHRD_NONE) {
411 ndev->cnf.use_tempaddr = -1;
412 } else {
413 in6_dev_hold(ndev);
414 ipv6_regen_rndid((unsigned long) ndev);
415 }
416
417 ndev->token = in6addr_any;
418
419 if (netif_running(dev) && addrconf_qdisc_ok(dev))
420 ndev->if_flags |= IF_READY;
421
422 ipv6_mc_init_dev(ndev);
423 ndev->tstamp = jiffies;
424 err = addrconf_sysctl_register(ndev);
425 if (err) {
426 ipv6_mc_destroy_dev(ndev);
427 del_timer(&ndev->regen_timer);
428 snmp6_unregister_dev(ndev);
429 goto err_release;
430 }
431 /* protected by rtnl_lock */
432 rcu_assign_pointer(dev->ip6_ptr, ndev);
433
434 /* Join interface-local all-node multicast group */
435 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
436
437 /* Join all-node multicast group */
438 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
439
440 /* Join all-router multicast group if forwarding is set */
441 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
442 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
443
444 return ndev;
445
446 err_release:
447 neigh_parms_release(&nd_tbl, ndev->nd_parms);
448 ndev->dead = 1;
449 in6_dev_finish_destroy(ndev);
450 return ERR_PTR(err);
451 }
452
453 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
454 {
455 struct inet6_dev *idev;
456
457 ASSERT_RTNL();
458
459 idev = __in6_dev_get(dev);
460 if (!idev) {
461 idev = ipv6_add_dev(dev);
462 if (IS_ERR(idev))
463 return NULL;
464 }
465
466 if (dev->flags&IFF_UP)
467 ipv6_mc_up(idev);
468 return idev;
469 }
470
471 static int inet6_netconf_msgsize_devconf(int type)
472 {
473 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
474 + nla_total_size(4); /* NETCONFA_IFINDEX */
475 bool all = false;
476
477 if (type == NETCONFA_ALL)
478 all = true;
479
480 if (all || type == NETCONFA_FORWARDING)
481 size += nla_total_size(4);
482 #ifdef CONFIG_IPV6_MROUTE
483 if (all || type == NETCONFA_MC_FORWARDING)
484 size += nla_total_size(4);
485 #endif
486 if (all || type == NETCONFA_PROXY_NEIGH)
487 size += nla_total_size(4);
488
489 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
490 size += nla_total_size(4);
491
492 return size;
493 }
494
495 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
496 struct ipv6_devconf *devconf, u32 portid,
497 u32 seq, int event, unsigned int flags,
498 int type)
499 {
500 struct nlmsghdr *nlh;
501 struct netconfmsg *ncm;
502 bool all = false;
503
504 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
505 flags);
506 if (!nlh)
507 return -EMSGSIZE;
508
509 if (type == NETCONFA_ALL)
510 all = true;
511
512 ncm = nlmsg_data(nlh);
513 ncm->ncm_family = AF_INET6;
514
515 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
516 goto nla_put_failure;
517
518 if ((all || type == NETCONFA_FORWARDING) &&
519 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
520 goto nla_put_failure;
521 #ifdef CONFIG_IPV6_MROUTE
522 if ((all || type == NETCONFA_MC_FORWARDING) &&
523 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
524 devconf->mc_forwarding) < 0)
525 goto nla_put_failure;
526 #endif
527 if ((all || type == NETCONFA_PROXY_NEIGH) &&
528 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
529 goto nla_put_failure;
530
531 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
532 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
533 devconf->ignore_routes_with_linkdown) < 0)
534 goto nla_put_failure;
535
536 nlmsg_end(skb, nlh);
537 return 0;
538
539 nla_put_failure:
540 nlmsg_cancel(skb, nlh);
541 return -EMSGSIZE;
542 }
543
544 void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
545 struct ipv6_devconf *devconf)
546 {
547 struct sk_buff *skb;
548 int err = -ENOBUFS;
549
550 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
551 if (!skb)
552 goto errout;
553
554 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
555 RTM_NEWNETCONF, 0, type);
556 if (err < 0) {
557 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
558 WARN_ON(err == -EMSGSIZE);
559 kfree_skb(skb);
560 goto errout;
561 }
562 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
563 return;
564 errout:
565 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
566 }
567
568 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
569 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
570 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
571 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
572 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
573 };
574
575 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
576 struct nlmsghdr *nlh)
577 {
578 struct net *net = sock_net(in_skb->sk);
579 struct nlattr *tb[NETCONFA_MAX+1];
580 struct netconfmsg *ncm;
581 struct sk_buff *skb;
582 struct ipv6_devconf *devconf;
583 struct inet6_dev *in6_dev;
584 struct net_device *dev;
585 int ifindex;
586 int err;
587
588 err = nlmsg_parse(nlh, sizeof(*ncm), tb, NETCONFA_MAX,
589 devconf_ipv6_policy);
590 if (err < 0)
591 goto errout;
592
593 err = -EINVAL;
594 if (!tb[NETCONFA_IFINDEX])
595 goto errout;
596
597 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
598 switch (ifindex) {
599 case NETCONFA_IFINDEX_ALL:
600 devconf = net->ipv6.devconf_all;
601 break;
602 case NETCONFA_IFINDEX_DEFAULT:
603 devconf = net->ipv6.devconf_dflt;
604 break;
605 default:
606 dev = __dev_get_by_index(net, ifindex);
607 if (!dev)
608 goto errout;
609 in6_dev = __in6_dev_get(dev);
610 if (!in6_dev)
611 goto errout;
612 devconf = &in6_dev->cnf;
613 break;
614 }
615
616 err = -ENOBUFS;
617 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_ATOMIC);
618 if (!skb)
619 goto errout;
620
621 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
622 NETLINK_CB(in_skb).portid,
623 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
624 NETCONFA_ALL);
625 if (err < 0) {
626 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
627 WARN_ON(err == -EMSGSIZE);
628 kfree_skb(skb);
629 goto errout;
630 }
631 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
632 errout:
633 return err;
634 }
635
636 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
637 struct netlink_callback *cb)
638 {
639 struct net *net = sock_net(skb->sk);
640 int h, s_h;
641 int idx, s_idx;
642 struct net_device *dev;
643 struct inet6_dev *idev;
644 struct hlist_head *head;
645
646 s_h = cb->args[0];
647 s_idx = idx = cb->args[1];
648
649 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
650 idx = 0;
651 head = &net->dev_index_head[h];
652 rcu_read_lock();
653 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
654 net->dev_base_seq;
655 hlist_for_each_entry_rcu(dev, head, index_hlist) {
656 if (idx < s_idx)
657 goto cont;
658 idev = __in6_dev_get(dev);
659 if (!idev)
660 goto cont;
661
662 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
663 &idev->cnf,
664 NETLINK_CB(cb->skb).portid,
665 cb->nlh->nlmsg_seq,
666 RTM_NEWNETCONF,
667 NLM_F_MULTI,
668 NETCONFA_ALL) < 0) {
669 rcu_read_unlock();
670 goto done;
671 }
672 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
673 cont:
674 idx++;
675 }
676 rcu_read_unlock();
677 }
678 if (h == NETDEV_HASHENTRIES) {
679 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
680 net->ipv6.devconf_all,
681 NETLINK_CB(cb->skb).portid,
682 cb->nlh->nlmsg_seq,
683 RTM_NEWNETCONF, NLM_F_MULTI,
684 NETCONFA_ALL) < 0)
685 goto done;
686 else
687 h++;
688 }
689 if (h == NETDEV_HASHENTRIES + 1) {
690 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
691 net->ipv6.devconf_dflt,
692 NETLINK_CB(cb->skb).portid,
693 cb->nlh->nlmsg_seq,
694 RTM_NEWNETCONF, NLM_F_MULTI,
695 NETCONFA_ALL) < 0)
696 goto done;
697 else
698 h++;
699 }
700 done:
701 cb->args[0] = h;
702 cb->args[1] = idx;
703
704 return skb->len;
705 }
706
707 #ifdef CONFIG_SYSCTL
708 static void dev_forward_change(struct inet6_dev *idev)
709 {
710 struct net_device *dev;
711 struct inet6_ifaddr *ifa;
712
713 if (!idev)
714 return;
715 dev = idev->dev;
716 if (idev->cnf.forwarding)
717 dev_disable_lro(dev);
718 if (dev->flags & IFF_MULTICAST) {
719 if (idev->cnf.forwarding) {
720 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
721 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
722 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
723 } else {
724 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
725 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
726 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
727 }
728 }
729
730 list_for_each_entry(ifa, &idev->addr_list, if_list) {
731 if (ifa->flags&IFA_F_TENTATIVE)
732 continue;
733 if (idev->cnf.forwarding)
734 addrconf_join_anycast(ifa);
735 else
736 addrconf_leave_anycast(ifa);
737 }
738 inet6_netconf_notify_devconf(dev_net(dev), NETCONFA_FORWARDING,
739 dev->ifindex, &idev->cnf);
740 }
741
742
743 static void addrconf_forward_change(struct net *net, __s32 newf)
744 {
745 struct net_device *dev;
746 struct inet6_dev *idev;
747
748 for_each_netdev(net, dev) {
749 idev = __in6_dev_get(dev);
750 if (idev) {
751 int changed = (!idev->cnf.forwarding) ^ (!newf);
752 idev->cnf.forwarding = newf;
753 if (changed)
754 dev_forward_change(idev);
755 }
756 }
757 }
758
759 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
760 {
761 struct net *net;
762 int old;
763
764 if (!rtnl_trylock())
765 return restart_syscall();
766
767 net = (struct net *)table->extra2;
768 old = *p;
769 *p = newf;
770
771 if (p == &net->ipv6.devconf_dflt->forwarding) {
772 if ((!newf) ^ (!old))
773 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
774 NETCONFA_IFINDEX_DEFAULT,
775 net->ipv6.devconf_dflt);
776 rtnl_unlock();
777 return 0;
778 }
779
780 if (p == &net->ipv6.devconf_all->forwarding) {
781 net->ipv6.devconf_dflt->forwarding = newf;
782 addrconf_forward_change(net, newf);
783 if ((!newf) ^ (!old))
784 inet6_netconf_notify_devconf(net, NETCONFA_FORWARDING,
785 NETCONFA_IFINDEX_ALL,
786 net->ipv6.devconf_all);
787 } else if ((!newf) ^ (!old))
788 dev_forward_change((struct inet6_dev *)table->extra1);
789 rtnl_unlock();
790
791 if (newf)
792 rt6_purge_dflt_routers(net);
793 return 1;
794 }
795
796 static void addrconf_linkdown_change(struct net *net, __s32 newf)
797 {
798 struct net_device *dev;
799 struct inet6_dev *idev;
800
801 for_each_netdev(net, dev) {
802 idev = __in6_dev_get(dev);
803 if (idev) {
804 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
805
806 idev->cnf.ignore_routes_with_linkdown = newf;
807 if (changed)
808 inet6_netconf_notify_devconf(dev_net(dev),
809 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
810 dev->ifindex,
811 &idev->cnf);
812 }
813 }
814 }
815
816 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
817 {
818 struct net *net;
819 int old;
820
821 if (!rtnl_trylock())
822 return restart_syscall();
823
824 net = (struct net *)table->extra2;
825 old = *p;
826 *p = newf;
827
828 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
829 if ((!newf) ^ (!old))
830 inet6_netconf_notify_devconf(net,
831 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
832 NETCONFA_IFINDEX_DEFAULT,
833 net->ipv6.devconf_dflt);
834 rtnl_unlock();
835 return 0;
836 }
837
838 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
839 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
840 addrconf_linkdown_change(net, newf);
841 if ((!newf) ^ (!old))
842 inet6_netconf_notify_devconf(net,
843 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
844 NETCONFA_IFINDEX_ALL,
845 net->ipv6.devconf_all);
846 }
847 rtnl_unlock();
848
849 return 1;
850 }
851
852 #endif
853
854 /* Nobody refers to this ifaddr, destroy it */
855 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
856 {
857 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
858
859 #ifdef NET_REFCNT_DEBUG
860 pr_debug("%s\n", __func__);
861 #endif
862
863 in6_dev_put(ifp->idev);
864
865 if (cancel_delayed_work(&ifp->dad_work))
866 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
867 ifp);
868
869 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
870 pr_warn("Freeing alive inet6 address %p\n", ifp);
871 return;
872 }
873 ip6_rt_put(ifp->rt);
874
875 kfree_rcu(ifp, rcu);
876 }
877
878 static void
879 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
880 {
881 struct list_head *p;
882 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
883
884 /*
885 * Each device address list is sorted in order of scope -
886 * global before linklocal.
887 */
888 list_for_each(p, &idev->addr_list) {
889 struct inet6_ifaddr *ifa
890 = list_entry(p, struct inet6_ifaddr, if_list);
891 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
892 break;
893 }
894
895 list_add_tail(&ifp->if_list, p);
896 }
897
898 static u32 inet6_addr_hash(const struct in6_addr *addr)
899 {
900 return hash_32(ipv6_addr_hash(addr), IN6_ADDR_HSIZE_SHIFT);
901 }
902
903 /* On success it returns ifp with increased reference count */
904
905 static struct inet6_ifaddr *
906 ipv6_add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
907 const struct in6_addr *peer_addr, int pfxlen,
908 int scope, u32 flags, u32 valid_lft, u32 prefered_lft)
909 {
910 struct inet6_ifaddr *ifa = NULL;
911 struct rt6_info *rt;
912 unsigned int hash;
913 int err = 0;
914 int addr_type = ipv6_addr_type(addr);
915
916 if (addr_type == IPV6_ADDR_ANY ||
917 addr_type & IPV6_ADDR_MULTICAST ||
918 (!(idev->dev->flags & IFF_LOOPBACK) &&
919 addr_type & IPV6_ADDR_LOOPBACK))
920 return ERR_PTR(-EADDRNOTAVAIL);
921
922 rcu_read_lock_bh();
923 if (idev->dead) {
924 err = -ENODEV; /*XXX*/
925 goto out2;
926 }
927
928 if (idev->cnf.disable_ipv6) {
929 err = -EACCES;
930 goto out2;
931 }
932
933 spin_lock(&addrconf_hash_lock);
934
935 /* Ignore adding duplicate addresses on an interface */
936 if (ipv6_chk_same_addr(dev_net(idev->dev), addr, idev->dev)) {
937 ADBG("ipv6_add_addr: already assigned\n");
938 err = -EEXIST;
939 goto out;
940 }
941
942 ifa = kzalloc(sizeof(struct inet6_ifaddr), GFP_ATOMIC);
943
944 if (!ifa) {
945 ADBG("ipv6_add_addr: malloc failed\n");
946 err = -ENOBUFS;
947 goto out;
948 }
949
950 rt = addrconf_dst_alloc(idev, addr, false);
951 if (IS_ERR(rt)) {
952 err = PTR_ERR(rt);
953 goto out;
954 }
955
956 neigh_parms_data_state_setall(idev->nd_parms);
957
958 ifa->addr = *addr;
959 if (peer_addr)
960 ifa->peer_addr = *peer_addr;
961
962 spin_lock_init(&ifa->lock);
963 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
964 INIT_HLIST_NODE(&ifa->addr_lst);
965 ifa->scope = scope;
966 ifa->prefix_len = pfxlen;
967 ifa->flags = flags | IFA_F_TENTATIVE;
968 ifa->valid_lft = valid_lft;
969 ifa->prefered_lft = prefered_lft;
970 ifa->cstamp = ifa->tstamp = jiffies;
971 ifa->tokenized = false;
972
973 ifa->rt = rt;
974
975 ifa->idev = idev;
976 in6_dev_hold(idev);
977 /* For caller */
978 in6_ifa_hold(ifa);
979
980 /* Add to big hash table */
981 hash = inet6_addr_hash(addr);
982
983 hlist_add_head_rcu(&ifa->addr_lst, &inet6_addr_lst[hash]);
984 spin_unlock(&addrconf_hash_lock);
985
986 write_lock(&idev->lock);
987 /* Add to inet6_dev unicast addr list. */
988 ipv6_link_dev_addr(idev, ifa);
989
990 if (ifa->flags&IFA_F_TEMPORARY) {
991 list_add(&ifa->tmp_list, &idev->tempaddr_list);
992 in6_ifa_hold(ifa);
993 }
994
995 in6_ifa_hold(ifa);
996 write_unlock(&idev->lock);
997 out2:
998 rcu_read_unlock_bh();
999
1000 if (likely(err == 0))
1001 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1002 else {
1003 kfree(ifa);
1004 ifa = ERR_PTR(err);
1005 }
1006
1007 return ifa;
1008 out:
1009 spin_unlock(&addrconf_hash_lock);
1010 goto out2;
1011 }
1012
1013 enum cleanup_prefix_rt_t {
1014 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1015 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1016 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1017 };
1018
1019 /*
1020 * Check, whether the prefix for ifp would still need a prefix route
1021 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1022 * constants.
1023 *
1024 * 1) we don't purge prefix if address was not permanent.
1025 * prefix is managed by its own lifetime.
1026 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1027 * 3) if there are no addresses, delete prefix.
1028 * 4) if there are still other permanent address(es),
1029 * corresponding prefix is still permanent.
1030 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1031 * don't purge the prefix, assume user space is managing it.
1032 * 6) otherwise, update prefix lifetime to the
1033 * longest valid lifetime among the corresponding
1034 * addresses on the device.
1035 * Note: subsequent RA will update lifetime.
1036 **/
1037 static enum cleanup_prefix_rt_t
1038 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1039 {
1040 struct inet6_ifaddr *ifa;
1041 struct inet6_dev *idev = ifp->idev;
1042 unsigned long lifetime;
1043 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1044
1045 *expires = jiffies;
1046
1047 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1048 if (ifa == ifp)
1049 continue;
1050 if (!ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1051 ifp->prefix_len))
1052 continue;
1053 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1054 return CLEANUP_PREFIX_RT_NOP;
1055
1056 action = CLEANUP_PREFIX_RT_EXPIRE;
1057
1058 spin_lock(&ifa->lock);
1059
1060 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1061 /*
1062 * Note: Because this address is
1063 * not permanent, lifetime <
1064 * LONG_MAX / HZ here.
1065 */
1066 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1067 *expires = ifa->tstamp + lifetime * HZ;
1068 spin_unlock(&ifa->lock);
1069 }
1070
1071 return action;
1072 }
1073
1074 static void
1075 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires, bool del_rt)
1076 {
1077 struct rt6_info *rt;
1078
1079 rt = addrconf_get_prefix_route(&ifp->addr,
1080 ifp->prefix_len,
1081 ifp->idev->dev,
1082 0, RTF_GATEWAY | RTF_DEFAULT);
1083 if (rt) {
1084 if (del_rt)
1085 ip6_del_rt(rt);
1086 else {
1087 if (!(rt->rt6i_flags & RTF_EXPIRES))
1088 rt6_set_expires(rt, expires);
1089 ip6_rt_put(rt);
1090 }
1091 }
1092 }
1093
1094
1095 /* This function wants to get referenced ifp and releases it before return */
1096
1097 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1098 {
1099 int state;
1100 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1101 unsigned long expires;
1102
1103 ASSERT_RTNL();
1104
1105 spin_lock_bh(&ifp->lock);
1106 state = ifp->state;
1107 ifp->state = INET6_IFADDR_STATE_DEAD;
1108 spin_unlock_bh(&ifp->lock);
1109
1110 if (state == INET6_IFADDR_STATE_DEAD)
1111 goto out;
1112
1113 spin_lock_bh(&addrconf_hash_lock);
1114 hlist_del_init_rcu(&ifp->addr_lst);
1115 spin_unlock_bh(&addrconf_hash_lock);
1116
1117 write_lock_bh(&ifp->idev->lock);
1118
1119 if (ifp->flags&IFA_F_TEMPORARY) {
1120 list_del(&ifp->tmp_list);
1121 if (ifp->ifpub) {
1122 in6_ifa_put(ifp->ifpub);
1123 ifp->ifpub = NULL;
1124 }
1125 __in6_ifa_put(ifp);
1126 }
1127
1128 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1129 action = check_cleanup_prefix_route(ifp, &expires);
1130
1131 list_del_init(&ifp->if_list);
1132 __in6_ifa_put(ifp);
1133
1134 write_unlock_bh(&ifp->idev->lock);
1135
1136 addrconf_del_dad_work(ifp);
1137
1138 ipv6_ifa_notify(RTM_DELADDR, ifp);
1139
1140 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1141
1142 if (action != CLEANUP_PREFIX_RT_NOP) {
1143 cleanup_prefix_route(ifp, expires,
1144 action == CLEANUP_PREFIX_RT_DEL);
1145 }
1146
1147 /* clean up prefsrc entries */
1148 rt6_remove_prefsrc(ifp);
1149 out:
1150 in6_ifa_put(ifp);
1151 }
1152
1153 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, struct inet6_ifaddr *ift)
1154 {
1155 struct inet6_dev *idev = ifp->idev;
1156 struct in6_addr addr, *tmpaddr;
1157 unsigned long tmp_prefered_lft, tmp_valid_lft, tmp_tstamp, age;
1158 unsigned long regen_advance;
1159 int tmp_plen;
1160 int ret = 0;
1161 u32 addr_flags;
1162 unsigned long now = jiffies;
1163
1164 write_lock_bh(&idev->lock);
1165 if (ift) {
1166 spin_lock_bh(&ift->lock);
1167 memcpy(&addr.s6_addr[8], &ift->addr.s6_addr[8], 8);
1168 spin_unlock_bh(&ift->lock);
1169 tmpaddr = &addr;
1170 } else {
1171 tmpaddr = NULL;
1172 }
1173 retry:
1174 in6_dev_hold(idev);
1175 if (idev->cnf.use_tempaddr <= 0) {
1176 write_unlock_bh(&idev->lock);
1177 pr_info("%s: use_tempaddr is disabled\n", __func__);
1178 in6_dev_put(idev);
1179 ret = -1;
1180 goto out;
1181 }
1182 spin_lock_bh(&ifp->lock);
1183 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1184 idev->cnf.use_tempaddr = -1; /*XXX*/
1185 spin_unlock_bh(&ifp->lock);
1186 write_unlock_bh(&idev->lock);
1187 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1188 __func__);
1189 in6_dev_put(idev);
1190 ret = -1;
1191 goto out;
1192 }
1193 in6_ifa_hold(ifp);
1194 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1195 __ipv6_try_regen_rndid(idev, tmpaddr);
1196 memcpy(&addr.s6_addr[8], idev->rndid, 8);
1197 age = (now - ifp->tstamp) / HZ;
1198 tmp_valid_lft = min_t(__u32,
1199 ifp->valid_lft,
1200 idev->cnf.temp_valid_lft + age);
1201 tmp_prefered_lft = min_t(__u32,
1202 ifp->prefered_lft,
1203 idev->cnf.temp_prefered_lft + age -
1204 idev->cnf.max_desync_factor);
1205 tmp_plen = ifp->prefix_len;
1206 tmp_tstamp = ifp->tstamp;
1207 spin_unlock_bh(&ifp->lock);
1208
1209 regen_advance = idev->cnf.regen_max_retry *
1210 idev->cnf.dad_transmits *
1211 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) / HZ;
1212 write_unlock_bh(&idev->lock);
1213
1214 /* A temporary address is created only if this calculated Preferred
1215 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1216 * an implementation must not create a temporary address with a zero
1217 * Preferred Lifetime.
1218 * Use age calculation as in addrconf_verify to avoid unnecessary
1219 * temporary addresses being generated.
1220 */
1221 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1222 if (tmp_prefered_lft <= regen_advance + age) {
1223 in6_ifa_put(ifp);
1224 in6_dev_put(idev);
1225 ret = -1;
1226 goto out;
1227 }
1228
1229 addr_flags = IFA_F_TEMPORARY;
1230 /* set in addrconf_prefix_rcv() */
1231 if (ifp->flags & IFA_F_OPTIMISTIC)
1232 addr_flags |= IFA_F_OPTIMISTIC;
1233
1234 ift = ipv6_add_addr(idev, &addr, NULL, tmp_plen,
1235 ipv6_addr_scope(&addr), addr_flags,
1236 tmp_valid_lft, tmp_prefered_lft);
1237 if (IS_ERR(ift)) {
1238 in6_ifa_put(ifp);
1239 in6_dev_put(idev);
1240 pr_info("%s: retry temporary address regeneration\n", __func__);
1241 tmpaddr = &addr;
1242 write_lock_bh(&idev->lock);
1243 goto retry;
1244 }
1245
1246 spin_lock_bh(&ift->lock);
1247 ift->ifpub = ifp;
1248 ift->cstamp = now;
1249 ift->tstamp = tmp_tstamp;
1250 spin_unlock_bh(&ift->lock);
1251
1252 addrconf_dad_start(ift);
1253 in6_ifa_put(ift);
1254 in6_dev_put(idev);
1255 out:
1256 return ret;
1257 }
1258
1259 /*
1260 * Choose an appropriate source address (RFC3484)
1261 */
1262 enum {
1263 IPV6_SADDR_RULE_INIT = 0,
1264 IPV6_SADDR_RULE_LOCAL,
1265 IPV6_SADDR_RULE_SCOPE,
1266 IPV6_SADDR_RULE_PREFERRED,
1267 #ifdef CONFIG_IPV6_MIP6
1268 IPV6_SADDR_RULE_HOA,
1269 #endif
1270 IPV6_SADDR_RULE_OIF,
1271 IPV6_SADDR_RULE_LABEL,
1272 IPV6_SADDR_RULE_PRIVACY,
1273 IPV6_SADDR_RULE_ORCHID,
1274 IPV6_SADDR_RULE_PREFIX,
1275 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1276 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1277 #endif
1278 IPV6_SADDR_RULE_MAX
1279 };
1280
1281 struct ipv6_saddr_score {
1282 int rule;
1283 int addr_type;
1284 struct inet6_ifaddr *ifa;
1285 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1286 int scopedist;
1287 int matchlen;
1288 };
1289
1290 struct ipv6_saddr_dst {
1291 const struct in6_addr *addr;
1292 int ifindex;
1293 int scope;
1294 int label;
1295 unsigned int prefs;
1296 };
1297
1298 static inline int ipv6_saddr_preferred(int type)
1299 {
1300 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1301 return 1;
1302 return 0;
1303 }
1304
1305 static inline bool ipv6_use_optimistic_addr(struct inet6_dev *idev)
1306 {
1307 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1308 return idev && idev->cnf.optimistic_dad && idev->cnf.use_optimistic;
1309 #else
1310 return false;
1311 #endif
1312 }
1313
1314 static int ipv6_get_saddr_eval(struct net *net,
1315 struct ipv6_saddr_score *score,
1316 struct ipv6_saddr_dst *dst,
1317 int i)
1318 {
1319 int ret;
1320
1321 if (i <= score->rule) {
1322 switch (i) {
1323 case IPV6_SADDR_RULE_SCOPE:
1324 ret = score->scopedist;
1325 break;
1326 case IPV6_SADDR_RULE_PREFIX:
1327 ret = score->matchlen;
1328 break;
1329 default:
1330 ret = !!test_bit(i, score->scorebits);
1331 }
1332 goto out;
1333 }
1334
1335 switch (i) {
1336 case IPV6_SADDR_RULE_INIT:
1337 /* Rule 0: remember if hiscore is not ready yet */
1338 ret = !!score->ifa;
1339 break;
1340 case IPV6_SADDR_RULE_LOCAL:
1341 /* Rule 1: Prefer same address */
1342 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1343 break;
1344 case IPV6_SADDR_RULE_SCOPE:
1345 /* Rule 2: Prefer appropriate scope
1346 *
1347 * ret
1348 * ^
1349 * -1 | d 15
1350 * ---+--+-+---> scope
1351 * |
1352 * | d is scope of the destination.
1353 * B-d | \
1354 * | \ <- smaller scope is better if
1355 * B-15 | \ if scope is enough for destination.
1356 * | ret = B - scope (-1 <= scope >= d <= 15).
1357 * d-C-1 | /
1358 * |/ <- greater is better
1359 * -C / if scope is not enough for destination.
1360 * /| ret = scope - C (-1 <= d < scope <= 15).
1361 *
1362 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1363 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1364 * Assume B = 0 and we get C > 29.
1365 */
1366 ret = __ipv6_addr_src_scope(score->addr_type);
1367 if (ret >= dst->scope)
1368 ret = -ret;
1369 else
1370 ret -= 128; /* 30 is enough */
1371 score->scopedist = ret;
1372 break;
1373 case IPV6_SADDR_RULE_PREFERRED:
1374 {
1375 /* Rule 3: Avoid deprecated and optimistic addresses */
1376 u8 avoid = IFA_F_DEPRECATED;
1377
1378 if (!ipv6_use_optimistic_addr(score->ifa->idev))
1379 avoid |= IFA_F_OPTIMISTIC;
1380 ret = ipv6_saddr_preferred(score->addr_type) ||
1381 !(score->ifa->flags & avoid);
1382 break;
1383 }
1384 #ifdef CONFIG_IPV6_MIP6
1385 case IPV6_SADDR_RULE_HOA:
1386 {
1387 /* Rule 4: Prefer home address */
1388 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1389 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1390 break;
1391 }
1392 #endif
1393 case IPV6_SADDR_RULE_OIF:
1394 /* Rule 5: Prefer outgoing interface */
1395 ret = (!dst->ifindex ||
1396 dst->ifindex == score->ifa->idev->dev->ifindex);
1397 break;
1398 case IPV6_SADDR_RULE_LABEL:
1399 /* Rule 6: Prefer matching label */
1400 ret = ipv6_addr_label(net,
1401 &score->ifa->addr, score->addr_type,
1402 score->ifa->idev->dev->ifindex) == dst->label;
1403 break;
1404 case IPV6_SADDR_RULE_PRIVACY:
1405 {
1406 /* Rule 7: Prefer public address
1407 * Note: prefer temporary address if use_tempaddr >= 2
1408 */
1409 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1410 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1411 score->ifa->idev->cnf.use_tempaddr >= 2;
1412 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1413 break;
1414 }
1415 case IPV6_SADDR_RULE_ORCHID:
1416 /* Rule 8-: Prefer ORCHID vs ORCHID or
1417 * non-ORCHID vs non-ORCHID
1418 */
1419 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1420 ipv6_addr_orchid(dst->addr));
1421 break;
1422 case IPV6_SADDR_RULE_PREFIX:
1423 /* Rule 8: Use longest matching prefix */
1424 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1425 if (ret > score->ifa->prefix_len)
1426 ret = score->ifa->prefix_len;
1427 score->matchlen = ret;
1428 break;
1429 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1430 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1431 /* Optimistic addresses still have lower precedence than other
1432 * preferred addresses.
1433 */
1434 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1435 break;
1436 #endif
1437 default:
1438 ret = 0;
1439 }
1440
1441 if (ret)
1442 __set_bit(i, score->scorebits);
1443 score->rule = i;
1444 out:
1445 return ret;
1446 }
1447
1448 static int __ipv6_dev_get_saddr(struct net *net,
1449 struct ipv6_saddr_dst *dst,
1450 struct inet6_dev *idev,
1451 struct ipv6_saddr_score *scores,
1452 int hiscore_idx)
1453 {
1454 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1455
1456 read_lock_bh(&idev->lock);
1457 list_for_each_entry(score->ifa, &idev->addr_list, if_list) {
1458 int i;
1459
1460 /*
1461 * - Tentative Address (RFC2462 section 5.4)
1462 * - A tentative address is not considered
1463 * "assigned to an interface" in the traditional
1464 * sense, unless it is also flagged as optimistic.
1465 * - Candidate Source Address (section 4)
1466 * - In any case, anycast addresses, multicast
1467 * addresses, and the unspecified address MUST
1468 * NOT be included in a candidate set.
1469 */
1470 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1471 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1472 continue;
1473
1474 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1475
1476 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1477 score->addr_type & IPV6_ADDR_MULTICAST)) {
1478 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1479 idev->dev->name);
1480 continue;
1481 }
1482
1483 score->rule = -1;
1484 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1485
1486 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1487 int minihiscore, miniscore;
1488
1489 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1490 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1491
1492 if (minihiscore > miniscore) {
1493 if (i == IPV6_SADDR_RULE_SCOPE &&
1494 score->scopedist > 0) {
1495 /*
1496 * special case:
1497 * each remaining entry
1498 * has too small (not enough)
1499 * scope, because ifa entries
1500 * are sorted by their scope
1501 * values.
1502 */
1503 goto out;
1504 }
1505 break;
1506 } else if (minihiscore < miniscore) {
1507 if (hiscore->ifa)
1508 in6_ifa_put(hiscore->ifa);
1509
1510 in6_ifa_hold(score->ifa);
1511
1512 swap(hiscore, score);
1513 hiscore_idx = 1 - hiscore_idx;
1514
1515 /* restore our iterator */
1516 score->ifa = hiscore->ifa;
1517
1518 break;
1519 }
1520 }
1521 }
1522 out:
1523 read_unlock_bh(&idev->lock);
1524 return hiscore_idx;
1525 }
1526
1527 static int ipv6_get_saddr_master(struct net *net,
1528 const struct net_device *dst_dev,
1529 const struct net_device *master,
1530 struct ipv6_saddr_dst *dst,
1531 struct ipv6_saddr_score *scores,
1532 int hiscore_idx)
1533 {
1534 struct inet6_dev *idev;
1535
1536 idev = __in6_dev_get(dst_dev);
1537 if (idev)
1538 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1539 scores, hiscore_idx);
1540
1541 idev = __in6_dev_get(master);
1542 if (idev)
1543 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1544 scores, hiscore_idx);
1545
1546 return hiscore_idx;
1547 }
1548
1549 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1550 const struct in6_addr *daddr, unsigned int prefs,
1551 struct in6_addr *saddr)
1552 {
1553 struct ipv6_saddr_score scores[2], *hiscore;
1554 struct ipv6_saddr_dst dst;
1555 struct inet6_dev *idev;
1556 struct net_device *dev;
1557 int dst_type;
1558 bool use_oif_addr = false;
1559 int hiscore_idx = 0;
1560
1561 dst_type = __ipv6_addr_type(daddr);
1562 dst.addr = daddr;
1563 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1564 dst.scope = __ipv6_addr_src_scope(dst_type);
1565 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1566 dst.prefs = prefs;
1567
1568 scores[hiscore_idx].rule = -1;
1569 scores[hiscore_idx].ifa = NULL;
1570
1571 rcu_read_lock();
1572
1573 /* Candidate Source Address (section 4)
1574 * - multicast and link-local destination address,
1575 * the set of candidate source address MUST only
1576 * include addresses assigned to interfaces
1577 * belonging to the same link as the outgoing
1578 * interface.
1579 * (- For site-local destination addresses, the
1580 * set of candidate source addresses MUST only
1581 * include addresses assigned to interfaces
1582 * belonging to the same site as the outgoing
1583 * interface.)
1584 * - "It is RECOMMENDED that the candidate source addresses
1585 * be the set of unicast addresses assigned to the
1586 * interface that will be used to send to the destination
1587 * (the 'outgoing' interface)." (RFC 6724)
1588 */
1589 if (dst_dev) {
1590 idev = __in6_dev_get(dst_dev);
1591 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1592 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1593 (idev && idev->cnf.use_oif_addrs_only)) {
1594 use_oif_addr = true;
1595 }
1596 }
1597
1598 if (use_oif_addr) {
1599 if (idev)
1600 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1601 } else {
1602 const struct net_device *master;
1603 int master_idx = 0;
1604
1605 /* if dst_dev exists and is enslaved to an L3 device, then
1606 * prefer addresses from dst_dev and then the master over
1607 * any other enslaved devices in the L3 domain.
1608 */
1609 master = l3mdev_master_dev_rcu(dst_dev);
1610 if (master) {
1611 master_idx = master->ifindex;
1612
1613 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1614 master, &dst,
1615 scores, hiscore_idx);
1616
1617 if (scores[hiscore_idx].ifa)
1618 goto out;
1619 }
1620
1621 for_each_netdev_rcu(net, dev) {
1622 /* only consider addresses on devices in the
1623 * same L3 domain
1624 */
1625 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1626 continue;
1627 idev = __in6_dev_get(dev);
1628 if (!idev)
1629 continue;
1630 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1631 }
1632 }
1633
1634 out:
1635 rcu_read_unlock();
1636
1637 hiscore = &scores[hiscore_idx];
1638 if (!hiscore->ifa)
1639 return -EADDRNOTAVAIL;
1640
1641 *saddr = hiscore->ifa->addr;
1642 in6_ifa_put(hiscore->ifa);
1643 return 0;
1644 }
1645 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1646
1647 int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1648 u32 banned_flags)
1649 {
1650 struct inet6_ifaddr *ifp;
1651 int err = -EADDRNOTAVAIL;
1652
1653 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1654 if (ifp->scope > IFA_LINK)
1655 break;
1656 if (ifp->scope == IFA_LINK &&
1657 !(ifp->flags & banned_flags)) {
1658 *addr = ifp->addr;
1659 err = 0;
1660 break;
1661 }
1662 }
1663 return err;
1664 }
1665
1666 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1667 u32 banned_flags)
1668 {
1669 struct inet6_dev *idev;
1670 int err = -EADDRNOTAVAIL;
1671
1672 rcu_read_lock();
1673 idev = __in6_dev_get(dev);
1674 if (idev) {
1675 read_lock_bh(&idev->lock);
1676 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1677 read_unlock_bh(&idev->lock);
1678 }
1679 rcu_read_unlock();
1680 return err;
1681 }
1682
1683 static int ipv6_count_addresses(struct inet6_dev *idev)
1684 {
1685 int cnt = 0;
1686 struct inet6_ifaddr *ifp;
1687
1688 read_lock_bh(&idev->lock);
1689 list_for_each_entry(ifp, &idev->addr_list, if_list)
1690 cnt++;
1691 read_unlock_bh(&idev->lock);
1692 return cnt;
1693 }
1694
1695 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1696 const struct net_device *dev, int strict)
1697 {
1698 return ipv6_chk_addr_and_flags(net, addr, dev, strict, IFA_F_TENTATIVE);
1699 }
1700 EXPORT_SYMBOL(ipv6_chk_addr);
1701
1702 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1703 const struct net_device *dev, int strict,
1704 u32 banned_flags)
1705 {
1706 struct inet6_ifaddr *ifp;
1707 unsigned int hash = inet6_addr_hash(addr);
1708 u32 ifp_flags;
1709
1710 rcu_read_lock_bh();
1711 hlist_for_each_entry_rcu(ifp, &inet6_addr_lst[hash], addr_lst) {
1712 if (!net_eq(dev_net(ifp->idev->dev), net))
1713 continue;
1714 /* Decouple optimistic from tentative for evaluation here.
1715 * Ban optimistic addresses explicitly, when required.
1716 */
1717 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1718 ? (ifp->flags&~IFA_F_TENTATIVE)
1719 : ifp->flags;
1720 if (ipv6_addr_equal(&ifp->addr, addr) &&
1721 !(ifp_flags&banned_flags) &&
1722 (!dev || ifp->idev->dev == dev ||
1723 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1724 rcu_read_unlock_bh();
1725 return 1;
1726 }
1727 }
1728
1729 rcu_read_unlock_bh();
1730 return 0;
1731 }
1732 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1733
1734 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1735 struct net_device *dev)
1736 {
1737 unsigned int hash = inet6_addr_hash(addr);
1738 struct inet6_ifaddr *ifp;
1739
1740 hlist_for_each_entry(ifp, &inet6_addr_lst[hash], addr_lst) {
1741 if (!net_eq(dev_net(ifp->idev->dev), net))
1742 continue;
1743 if (ipv6_addr_equal(&ifp->addr, addr)) {
1744 if (!dev || ifp->idev->dev == dev)
1745 return true;
1746 }
1747 }
1748 return false;
1749 }
1750
1751 /* Compares an address/prefix_len with addresses on device @dev.
1752 * If one is found it returns true.
1753 */
1754 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1755 const unsigned int prefix_len, struct net_device *dev)
1756 {
1757 struct inet6_dev *idev;
1758 struct inet6_ifaddr *ifa;
1759 bool ret = false;
1760
1761 rcu_read_lock();
1762 idev = __in6_dev_get(dev);
1763 if (idev) {
1764 read_lock_bh(&idev->lock);
1765 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1766 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1767 if (ret)
1768 break;
1769 }
1770 read_unlock_bh(&idev->lock);
1771 }
1772 rcu_read_unlock();
1773
1774 return ret;
1775 }
1776 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1777
1778 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1779 {
1780 struct inet6_dev *idev;
1781 struct inet6_ifaddr *ifa;
1782 int onlink;
1783
1784 onlink = 0;
1785 rcu_read_lock();
1786 idev = __in6_dev_get(dev);
1787 if (idev) {
1788 read_lock_bh(&idev->lock);
1789 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1790 onlink = ipv6_prefix_equal(addr, &ifa->addr,
1791 ifa->prefix_len);
1792 if (onlink)
1793 break;
1794 }
1795 read_unlock_bh(&idev->lock);
1796 }
1797 rcu_read_unlock();
1798 return onlink;
1799 }
1800 EXPORT_SYMBOL(ipv6_chk_prefix);
1801
1802 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
1803 struct net_device *dev, int strict)
1804 {
1805 struct inet6_ifaddr *ifp, *result = NULL;
1806 unsigned int hash = inet6_addr_hash(addr);
1807
1808 rcu_read_lock_bh();
1809 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
1810 if (!net_eq(dev_net(ifp->idev->dev), net))
1811 continue;
1812 if (ipv6_addr_equal(&ifp->addr, addr)) {
1813 if (!dev || ifp->idev->dev == dev ||
1814 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
1815 result = ifp;
1816 in6_ifa_hold(ifp);
1817 break;
1818 }
1819 }
1820 }
1821 rcu_read_unlock_bh();
1822
1823 return result;
1824 }
1825
1826 /* Gets referenced address, destroys ifaddr */
1827
1828 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
1829 {
1830 if (dad_failed)
1831 ifp->flags |= IFA_F_DADFAILED;
1832
1833 if (ifp->flags&IFA_F_PERMANENT) {
1834 spin_lock_bh(&ifp->lock);
1835 addrconf_del_dad_work(ifp);
1836 ifp->flags |= IFA_F_TENTATIVE;
1837 spin_unlock_bh(&ifp->lock);
1838 if (dad_failed)
1839 ipv6_ifa_notify(0, ifp);
1840 in6_ifa_put(ifp);
1841 } else if (ifp->flags&IFA_F_TEMPORARY) {
1842 struct inet6_ifaddr *ifpub;
1843 spin_lock_bh(&ifp->lock);
1844 ifpub = ifp->ifpub;
1845 if (ifpub) {
1846 in6_ifa_hold(ifpub);
1847 spin_unlock_bh(&ifp->lock);
1848 ipv6_create_tempaddr(ifpub, ifp);
1849 in6_ifa_put(ifpub);
1850 } else {
1851 spin_unlock_bh(&ifp->lock);
1852 }
1853 ipv6_del_addr(ifp);
1854 } else {
1855 ipv6_del_addr(ifp);
1856 }
1857 }
1858
1859 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
1860 {
1861 int err = -ENOENT;
1862
1863 spin_lock_bh(&ifp->lock);
1864 if (ifp->state == INET6_IFADDR_STATE_DAD) {
1865 ifp->state = INET6_IFADDR_STATE_POSTDAD;
1866 err = 0;
1867 }
1868 spin_unlock_bh(&ifp->lock);
1869
1870 return err;
1871 }
1872
1873 void addrconf_dad_failure(struct inet6_ifaddr *ifp)
1874 {
1875 struct inet6_dev *idev = ifp->idev;
1876 struct net *net = dev_net(ifp->idev->dev);
1877
1878 if (addrconf_dad_end(ifp)) {
1879 in6_ifa_put(ifp);
1880 return;
1881 }
1882
1883 net_info_ratelimited("%s: IPv6 duplicate address %pI6c detected!\n",
1884 ifp->idev->dev->name, &ifp->addr);
1885
1886 spin_lock_bh(&ifp->lock);
1887
1888 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
1889 int scope = ifp->scope;
1890 u32 flags = ifp->flags;
1891 struct in6_addr new_addr;
1892 struct inet6_ifaddr *ifp2;
1893 u32 valid_lft, preferred_lft;
1894 int pfxlen = ifp->prefix_len;
1895 int retries = ifp->stable_privacy_retry + 1;
1896
1897 if (retries > net->ipv6.sysctl.idgen_retries) {
1898 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
1899 ifp->idev->dev->name);
1900 goto errdad;
1901 }
1902
1903 new_addr = ifp->addr;
1904 if (ipv6_generate_stable_address(&new_addr, retries,
1905 idev))
1906 goto errdad;
1907
1908 valid_lft = ifp->valid_lft;
1909 preferred_lft = ifp->prefered_lft;
1910
1911 spin_unlock_bh(&ifp->lock);
1912
1913 if (idev->cnf.max_addresses &&
1914 ipv6_count_addresses(idev) >=
1915 idev->cnf.max_addresses)
1916 goto lock_errdad;
1917
1918 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
1919 ifp->idev->dev->name);
1920
1921 ifp2 = ipv6_add_addr(idev, &new_addr, NULL, pfxlen,
1922 scope, flags, valid_lft,
1923 preferred_lft);
1924 if (IS_ERR(ifp2))
1925 goto lock_errdad;
1926
1927 spin_lock_bh(&ifp2->lock);
1928 ifp2->stable_privacy_retry = retries;
1929 ifp2->state = INET6_IFADDR_STATE_PREDAD;
1930 spin_unlock_bh(&ifp2->lock);
1931
1932 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
1933 in6_ifa_put(ifp2);
1934 lock_errdad:
1935 spin_lock_bh(&ifp->lock);
1936 }
1937
1938 errdad:
1939 /* transition from _POSTDAD to _ERRDAD */
1940 ifp->state = INET6_IFADDR_STATE_ERRDAD;
1941 spin_unlock_bh(&ifp->lock);
1942
1943 addrconf_mod_dad_work(ifp, 0);
1944 }
1945
1946 /* Join to solicited addr multicast group.
1947 * caller must hold RTNL */
1948 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
1949 {
1950 struct in6_addr maddr;
1951
1952 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
1953 return;
1954
1955 addrconf_addr_solict_mult(addr, &maddr);
1956 ipv6_dev_mc_inc(dev, &maddr);
1957 }
1958
1959 /* caller must hold RTNL */
1960 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
1961 {
1962 struct in6_addr maddr;
1963
1964 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
1965 return;
1966
1967 addrconf_addr_solict_mult(addr, &maddr);
1968 __ipv6_dev_mc_dec(idev, &maddr);
1969 }
1970
1971 /* caller must hold RTNL */
1972 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
1973 {
1974 struct in6_addr addr;
1975
1976 if (ifp->prefix_len >= 127) /* RFC 6164 */
1977 return;
1978 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1979 if (ipv6_addr_any(&addr))
1980 return;
1981 __ipv6_dev_ac_inc(ifp->idev, &addr);
1982 }
1983
1984 /* caller must hold RTNL */
1985 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
1986 {
1987 struct in6_addr addr;
1988
1989 if (ifp->prefix_len >= 127) /* RFC 6164 */
1990 return;
1991 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
1992 if (ipv6_addr_any(&addr))
1993 return;
1994 __ipv6_dev_ac_dec(ifp->idev, &addr);
1995 }
1996
1997 static int addrconf_ifid_eui64(u8 *eui, struct net_device *dev)
1998 {
1999 if (dev->addr_len != EUI64_ADDR_LEN)
2000 return -1;
2001 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2002 eui[0] ^= 2;
2003 return 0;
2004 }
2005
2006 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2007 {
2008 union fwnet_hwaddr *ha;
2009
2010 if (dev->addr_len != FWNET_ALEN)
2011 return -1;
2012
2013 ha = (union fwnet_hwaddr *)dev->dev_addr;
2014
2015 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2016 eui[0] ^= 2;
2017 return 0;
2018 }
2019
2020 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2021 {
2022 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2023 if (dev->addr_len != ARCNET_ALEN)
2024 return -1;
2025 memset(eui, 0, 7);
2026 eui[7] = *(u8 *)dev->dev_addr;
2027 return 0;
2028 }
2029
2030 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2031 {
2032 if (dev->addr_len != INFINIBAND_ALEN)
2033 return -1;
2034 memcpy(eui, dev->dev_addr + 12, 8);
2035 eui[0] |= 2;
2036 return 0;
2037 }
2038
2039 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2040 {
2041 if (addr == 0)
2042 return -1;
2043 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2044 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2045 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2046 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2047 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2048 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2049 eui[1] = 0;
2050 eui[2] = 0x5E;
2051 eui[3] = 0xFE;
2052 memcpy(eui + 4, &addr, 4);
2053 return 0;
2054 }
2055
2056 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2057 {
2058 if (dev->priv_flags & IFF_ISATAP)
2059 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2060 return -1;
2061 }
2062
2063 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2064 {
2065 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2066 }
2067
2068 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2069 {
2070 memcpy(eui, dev->perm_addr, 3);
2071 memcpy(eui + 5, dev->perm_addr + 3, 3);
2072 eui[3] = 0xFF;
2073 eui[4] = 0xFE;
2074 eui[0] ^= 2;
2075 return 0;
2076 }
2077
2078 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2079 {
2080 switch (dev->type) {
2081 case ARPHRD_ETHER:
2082 case ARPHRD_FDDI:
2083 return addrconf_ifid_eui48(eui, dev);
2084 case ARPHRD_ARCNET:
2085 return addrconf_ifid_arcnet(eui, dev);
2086 case ARPHRD_INFINIBAND:
2087 return addrconf_ifid_infiniband(eui, dev);
2088 case ARPHRD_SIT:
2089 return addrconf_ifid_sit(eui, dev);
2090 case ARPHRD_IPGRE:
2091 return addrconf_ifid_gre(eui, dev);
2092 case ARPHRD_6LOWPAN:
2093 return addrconf_ifid_eui64(eui, dev);
2094 case ARPHRD_IEEE1394:
2095 return addrconf_ifid_ieee1394(eui, dev);
2096 case ARPHRD_TUNNEL6:
2097 return addrconf_ifid_ip6tnl(eui, dev);
2098 }
2099 return -1;
2100 }
2101
2102 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2103 {
2104 int err = -1;
2105 struct inet6_ifaddr *ifp;
2106
2107 read_lock_bh(&idev->lock);
2108 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2109 if (ifp->scope > IFA_LINK)
2110 break;
2111 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2112 memcpy(eui, ifp->addr.s6_addr+8, 8);
2113 err = 0;
2114 break;
2115 }
2116 }
2117 read_unlock_bh(&idev->lock);
2118 return err;
2119 }
2120
2121 /* (re)generation of randomized interface identifier (RFC 3041 3.2, 3.5) */
2122 static void __ipv6_regen_rndid(struct inet6_dev *idev)
2123 {
2124 regen:
2125 get_random_bytes(idev->rndid, sizeof(idev->rndid));
2126 idev->rndid[0] &= ~0x02;
2127
2128 /*
2129 * <draft-ietf-ipngwg-temp-addresses-v2-00.txt>:
2130 * check if generated address is not inappropriate
2131 *
2132 * - Reserved subnet anycast (RFC 2526)
2133 * 11111101 11....11 1xxxxxxx
2134 * - ISATAP (RFC4214) 6.1
2135 * 00-00-5E-FE-xx-xx-xx-xx
2136 * - value 0
2137 * - XXX: already assigned to an address on the device
2138 */
2139 if (idev->rndid[0] == 0xfd &&
2140 (idev->rndid[1]&idev->rndid[2]&idev->rndid[3]&idev->rndid[4]&idev->rndid[5]&idev->rndid[6]) == 0xff &&
2141 (idev->rndid[7]&0x80))
2142 goto regen;
2143 if ((idev->rndid[0]|idev->rndid[1]) == 0) {
2144 if (idev->rndid[2] == 0x5e && idev->rndid[3] == 0xfe)
2145 goto regen;
2146 if ((idev->rndid[2]|idev->rndid[3]|idev->rndid[4]|idev->rndid[5]|idev->rndid[6]|idev->rndid[7]) == 0x00)
2147 goto regen;
2148 }
2149 }
2150
2151 static void ipv6_regen_rndid(unsigned long data)
2152 {
2153 struct inet6_dev *idev = (struct inet6_dev *) data;
2154 unsigned long expires;
2155
2156 rcu_read_lock_bh();
2157 write_lock_bh(&idev->lock);
2158
2159 if (idev->dead)
2160 goto out;
2161
2162 __ipv6_regen_rndid(idev);
2163
2164 expires = jiffies +
2165 idev->cnf.temp_prefered_lft * HZ -
2166 idev->cnf.regen_max_retry * idev->cnf.dad_transmits *
2167 NEIGH_VAR(idev->nd_parms, RETRANS_TIME) -
2168 idev->cnf.max_desync_factor * HZ;
2169 if (time_before(expires, jiffies)) {
2170 pr_warn("%s: too short regeneration interval; timer disabled for %s\n",
2171 __func__, idev->dev->name);
2172 goto out;
2173 }
2174
2175 if (!mod_timer(&idev->regen_timer, expires))
2176 in6_dev_hold(idev);
2177
2178 out:
2179 write_unlock_bh(&idev->lock);
2180 rcu_read_unlock_bh();
2181 in6_dev_put(idev);
2182 }
2183
2184 static void __ipv6_try_regen_rndid(struct inet6_dev *idev, struct in6_addr *tmpaddr)
2185 {
2186 if (tmpaddr && memcmp(idev->rndid, &tmpaddr->s6_addr[8], 8) == 0)
2187 __ipv6_regen_rndid(idev);
2188 }
2189
2190 /*
2191 * Add prefix route.
2192 */
2193
2194 static void
2195 addrconf_prefix_route(struct in6_addr *pfx, int plen, struct net_device *dev,
2196 unsigned long expires, u32 flags)
2197 {
2198 struct fib6_config cfg = {
2199 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2200 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2201 .fc_ifindex = dev->ifindex,
2202 .fc_expires = expires,
2203 .fc_dst_len = plen,
2204 .fc_flags = RTF_UP | flags,
2205 .fc_nlinfo.nl_net = dev_net(dev),
2206 .fc_protocol = RTPROT_KERNEL,
2207 };
2208
2209 cfg.fc_dst = *pfx;
2210
2211 /* Prevent useless cloning on PtP SIT.
2212 This thing is done here expecting that the whole
2213 class of non-broadcast devices need not cloning.
2214 */
2215 #if IS_ENABLED(CONFIG_IPV6_SIT)
2216 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2217 cfg.fc_flags |= RTF_NONEXTHOP;
2218 #endif
2219
2220 ip6_route_add(&cfg);
2221 }
2222
2223
2224 static struct rt6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2225 int plen,
2226 const struct net_device *dev,
2227 u32 flags, u32 noflags)
2228 {
2229 struct fib6_node *fn;
2230 struct rt6_info *rt = NULL;
2231 struct fib6_table *table;
2232 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2233
2234 table = fib6_get_table(dev_net(dev), tb_id);
2235 if (!table)
2236 return NULL;
2237
2238 read_lock_bh(&table->tb6_lock);
2239 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0);
2240 if (!fn)
2241 goto out;
2242
2243 noflags |= RTF_CACHE;
2244 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2245 if (rt->dst.dev->ifindex != dev->ifindex)
2246 continue;
2247 if ((rt->rt6i_flags & flags) != flags)
2248 continue;
2249 if ((rt->rt6i_flags & noflags) != 0)
2250 continue;
2251 dst_hold(&rt->dst);
2252 break;
2253 }
2254 out:
2255 read_unlock_bh(&table->tb6_lock);
2256 return rt;
2257 }
2258
2259
2260 /* Create "default" multicast route to the interface */
2261
2262 static void addrconf_add_mroute(struct net_device *dev)
2263 {
2264 struct fib6_config cfg = {
2265 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2266 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2267 .fc_ifindex = dev->ifindex,
2268 .fc_dst_len = 8,
2269 .fc_flags = RTF_UP,
2270 .fc_nlinfo.nl_net = dev_net(dev),
2271 };
2272
2273 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2274
2275 ip6_route_add(&cfg);
2276 }
2277
2278 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2279 {
2280 struct inet6_dev *idev;
2281
2282 ASSERT_RTNL();
2283
2284 idev = ipv6_find_idev(dev);
2285 if (!idev)
2286 return ERR_PTR(-ENOBUFS);
2287
2288 if (idev->cnf.disable_ipv6)
2289 return ERR_PTR(-EACCES);
2290
2291 /* Add default multicast route */
2292 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2293 addrconf_add_mroute(dev);
2294
2295 return idev;
2296 }
2297
2298 static void manage_tempaddrs(struct inet6_dev *idev,
2299 struct inet6_ifaddr *ifp,
2300 __u32 valid_lft, __u32 prefered_lft,
2301 bool create, unsigned long now)
2302 {
2303 u32 flags;
2304 struct inet6_ifaddr *ift;
2305
2306 read_lock_bh(&idev->lock);
2307 /* update all temporary addresses in the list */
2308 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2309 int age, max_valid, max_prefered;
2310
2311 if (ifp != ift->ifpub)
2312 continue;
2313
2314 /* RFC 4941 section 3.3:
2315 * If a received option will extend the lifetime of a public
2316 * address, the lifetimes of temporary addresses should
2317 * be extended, subject to the overall constraint that no
2318 * temporary addresses should ever remain "valid" or "preferred"
2319 * for a time longer than (TEMP_VALID_LIFETIME) or
2320 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2321 */
2322 age = (now - ift->cstamp) / HZ;
2323 max_valid = idev->cnf.temp_valid_lft - age;
2324 if (max_valid < 0)
2325 max_valid = 0;
2326
2327 max_prefered = idev->cnf.temp_prefered_lft -
2328 idev->cnf.max_desync_factor - age;
2329 if (max_prefered < 0)
2330 max_prefered = 0;
2331
2332 if (valid_lft > max_valid)
2333 valid_lft = max_valid;
2334
2335 if (prefered_lft > max_prefered)
2336 prefered_lft = max_prefered;
2337
2338 spin_lock(&ift->lock);
2339 flags = ift->flags;
2340 ift->valid_lft = valid_lft;
2341 ift->prefered_lft = prefered_lft;
2342 ift->tstamp = now;
2343 if (prefered_lft > 0)
2344 ift->flags &= ~IFA_F_DEPRECATED;
2345
2346 spin_unlock(&ift->lock);
2347 if (!(flags&IFA_F_TENTATIVE))
2348 ipv6_ifa_notify(0, ift);
2349 }
2350
2351 if ((create || list_empty(&idev->tempaddr_list)) &&
2352 idev->cnf.use_tempaddr > 0) {
2353 /* When a new public address is created as described
2354 * in [ADDRCONF], also create a new temporary address.
2355 * Also create a temporary address if it's enabled but
2356 * no temporary address currently exists.
2357 */
2358 read_unlock_bh(&idev->lock);
2359 ipv6_create_tempaddr(ifp, NULL);
2360 } else {
2361 read_unlock_bh(&idev->lock);
2362 }
2363 }
2364
2365 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2366 {
2367 return idev->addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2368 idev->addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2369 }
2370
2371 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2372 const struct prefix_info *pinfo,
2373 struct inet6_dev *in6_dev,
2374 const struct in6_addr *addr, int addr_type,
2375 u32 addr_flags, bool sllao, bool tokenized,
2376 __u32 valid_lft, u32 prefered_lft)
2377 {
2378 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2379 int create = 0, update_lft = 0;
2380
2381 if (!ifp && valid_lft) {
2382 int max_addresses = in6_dev->cnf.max_addresses;
2383
2384 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2385 if (in6_dev->cnf.optimistic_dad &&
2386 !net->ipv6.devconf_all->forwarding && sllao)
2387 addr_flags |= IFA_F_OPTIMISTIC;
2388 #endif
2389
2390 /* Do not allow to create too much of autoconfigured
2391 * addresses; this would be too easy way to crash kernel.
2392 */
2393 if (!max_addresses ||
2394 ipv6_count_addresses(in6_dev) < max_addresses)
2395 ifp = ipv6_add_addr(in6_dev, addr, NULL,
2396 pinfo->prefix_len,
2397 addr_type&IPV6_ADDR_SCOPE_MASK,
2398 addr_flags, valid_lft,
2399 prefered_lft);
2400
2401 if (IS_ERR_OR_NULL(ifp))
2402 return -1;
2403
2404 update_lft = 0;
2405 create = 1;
2406 spin_lock_bh(&ifp->lock);
2407 ifp->flags |= IFA_F_MANAGETEMPADDR;
2408 ifp->cstamp = jiffies;
2409 ifp->tokenized = tokenized;
2410 spin_unlock_bh(&ifp->lock);
2411 addrconf_dad_start(ifp);
2412 }
2413
2414 if (ifp) {
2415 u32 flags;
2416 unsigned long now;
2417 u32 stored_lft;
2418
2419 /* update lifetime (RFC2462 5.5.3 e) */
2420 spin_lock_bh(&ifp->lock);
2421 now = jiffies;
2422 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2423 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2424 else
2425 stored_lft = 0;
2426 if (!update_lft && !create && stored_lft) {
2427 const u32 minimum_lft = min_t(u32,
2428 stored_lft, MIN_VALID_LIFETIME);
2429 valid_lft = max(valid_lft, minimum_lft);
2430
2431 /* RFC4862 Section 5.5.3e:
2432 * "Note that the preferred lifetime of the
2433 * corresponding address is always reset to
2434 * the Preferred Lifetime in the received
2435 * Prefix Information option, regardless of
2436 * whether the valid lifetime is also reset or
2437 * ignored."
2438 *
2439 * So we should always update prefered_lft here.
2440 */
2441 update_lft = 1;
2442 }
2443
2444 if (update_lft) {
2445 ifp->valid_lft = valid_lft;
2446 ifp->prefered_lft = prefered_lft;
2447 ifp->tstamp = now;
2448 flags = ifp->flags;
2449 ifp->flags &= ~IFA_F_DEPRECATED;
2450 spin_unlock_bh(&ifp->lock);
2451
2452 if (!(flags&IFA_F_TENTATIVE))
2453 ipv6_ifa_notify(0, ifp);
2454 } else
2455 spin_unlock_bh(&ifp->lock);
2456
2457 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2458 create, now);
2459
2460 in6_ifa_put(ifp);
2461 addrconf_verify();
2462 }
2463
2464 return 0;
2465 }
2466 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2467
2468 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2469 {
2470 struct prefix_info *pinfo;
2471 __u32 valid_lft;
2472 __u32 prefered_lft;
2473 int addr_type, err;
2474 u32 addr_flags = 0;
2475 struct inet6_dev *in6_dev;
2476 struct net *net = dev_net(dev);
2477
2478 pinfo = (struct prefix_info *) opt;
2479
2480 if (len < sizeof(struct prefix_info)) {
2481 ADBG("addrconf: prefix option too short\n");
2482 return;
2483 }
2484
2485 /*
2486 * Validation checks ([ADDRCONF], page 19)
2487 */
2488
2489 addr_type = ipv6_addr_type(&pinfo->prefix);
2490
2491 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2492 return;
2493
2494 valid_lft = ntohl(pinfo->valid);
2495 prefered_lft = ntohl(pinfo->prefered);
2496
2497 if (prefered_lft > valid_lft) {
2498 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2499 return;
2500 }
2501
2502 in6_dev = in6_dev_get(dev);
2503
2504 if (!in6_dev) {
2505 net_dbg_ratelimited("addrconf: device %s not configured\n",
2506 dev->name);
2507 return;
2508 }
2509
2510 /*
2511 * Two things going on here:
2512 * 1) Add routes for on-link prefixes
2513 * 2) Configure prefixes with the auto flag set
2514 */
2515
2516 if (pinfo->onlink) {
2517 struct rt6_info *rt;
2518 unsigned long rt_expires;
2519
2520 /* Avoid arithmetic overflow. Really, we could
2521 * save rt_expires in seconds, likely valid_lft,
2522 * but it would require division in fib gc, that it
2523 * not good.
2524 */
2525 if (HZ > USER_HZ)
2526 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2527 else
2528 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2529
2530 if (addrconf_finite_timeout(rt_expires))
2531 rt_expires *= HZ;
2532
2533 rt = addrconf_get_prefix_route(&pinfo->prefix,
2534 pinfo->prefix_len,
2535 dev,
2536 RTF_ADDRCONF | RTF_PREFIX_RT,
2537 RTF_GATEWAY | RTF_DEFAULT);
2538
2539 if (rt) {
2540 /* Autoconf prefix route */
2541 if (valid_lft == 0) {
2542 ip6_del_rt(rt);
2543 rt = NULL;
2544 } else if (addrconf_finite_timeout(rt_expires)) {
2545 /* not infinity */
2546 rt6_set_expires(rt, jiffies + rt_expires);
2547 } else {
2548 rt6_clean_expires(rt);
2549 }
2550 } else if (valid_lft) {
2551 clock_t expires = 0;
2552 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2553 if (addrconf_finite_timeout(rt_expires)) {
2554 /* not infinity */
2555 flags |= RTF_EXPIRES;
2556 expires = jiffies_to_clock_t(rt_expires);
2557 }
2558 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2559 dev, expires, flags);
2560 }
2561 ip6_rt_put(rt);
2562 }
2563
2564 /* Try to figure out our local address for this prefix */
2565
2566 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2567 struct in6_addr addr;
2568 bool tokenized = false, dev_addr_generated = false;
2569
2570 if (pinfo->prefix_len == 64) {
2571 memcpy(&addr, &pinfo->prefix, 8);
2572
2573 if (!ipv6_addr_any(&in6_dev->token)) {
2574 read_lock_bh(&in6_dev->lock);
2575 memcpy(addr.s6_addr + 8,
2576 in6_dev->token.s6_addr + 8, 8);
2577 read_unlock_bh(&in6_dev->lock);
2578 tokenized = true;
2579 } else if (is_addr_mode_generate_stable(in6_dev) &&
2580 !ipv6_generate_stable_address(&addr, 0,
2581 in6_dev)) {
2582 addr_flags |= IFA_F_STABLE_PRIVACY;
2583 goto ok;
2584 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2585 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2586 goto put;
2587 } else {
2588 dev_addr_generated = true;
2589 }
2590 goto ok;
2591 }
2592 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2593 pinfo->prefix_len);
2594 goto put;
2595
2596 ok:
2597 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2598 &addr, addr_type,
2599 addr_flags, sllao,
2600 tokenized, valid_lft,
2601 prefered_lft);
2602 if (err)
2603 goto put;
2604
2605 /* Ignore error case here because previous prefix add addr was
2606 * successful which will be notified.
2607 */
2608 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2609 addr_type, addr_flags, sllao,
2610 tokenized, valid_lft,
2611 prefered_lft,
2612 dev_addr_generated);
2613 }
2614 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2615 put:
2616 in6_dev_put(in6_dev);
2617 }
2618
2619 /*
2620 * Set destination address.
2621 * Special case for SIT interfaces where we create a new "virtual"
2622 * device.
2623 */
2624 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2625 {
2626 struct in6_ifreq ireq;
2627 struct net_device *dev;
2628 int err = -EINVAL;
2629
2630 rtnl_lock();
2631
2632 err = -EFAULT;
2633 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2634 goto err_exit;
2635
2636 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2637
2638 err = -ENODEV;
2639 if (!dev)
2640 goto err_exit;
2641
2642 #if IS_ENABLED(CONFIG_IPV6_SIT)
2643 if (dev->type == ARPHRD_SIT) {
2644 const struct net_device_ops *ops = dev->netdev_ops;
2645 struct ifreq ifr;
2646 struct ip_tunnel_parm p;
2647
2648 err = -EADDRNOTAVAIL;
2649 if (!(ipv6_addr_type(&ireq.ifr6_addr) & IPV6_ADDR_COMPATv4))
2650 goto err_exit;
2651
2652 memset(&p, 0, sizeof(p));
2653 p.iph.daddr = ireq.ifr6_addr.s6_addr32[3];
2654 p.iph.saddr = 0;
2655 p.iph.version = 4;
2656 p.iph.ihl = 5;
2657 p.iph.protocol = IPPROTO_IPV6;
2658 p.iph.ttl = 64;
2659 ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
2660
2661 if (ops->ndo_do_ioctl) {
2662 mm_segment_t oldfs = get_fs();
2663
2664 set_fs(KERNEL_DS);
2665 err = ops->ndo_do_ioctl(dev, &ifr, SIOCADDTUNNEL);
2666 set_fs(oldfs);
2667 } else
2668 err = -EOPNOTSUPP;
2669
2670 if (err == 0) {
2671 err = -ENOBUFS;
2672 dev = __dev_get_by_name(net, p.name);
2673 if (!dev)
2674 goto err_exit;
2675 err = dev_open(dev);
2676 }
2677 }
2678 #endif
2679
2680 err_exit:
2681 rtnl_unlock();
2682 return err;
2683 }
2684
2685 static int ipv6_mc_config(struct sock *sk, bool join,
2686 const struct in6_addr *addr, int ifindex)
2687 {
2688 int ret;
2689
2690 ASSERT_RTNL();
2691
2692 lock_sock(sk);
2693 if (join)
2694 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2695 else
2696 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2697 release_sock(sk);
2698
2699 return ret;
2700 }
2701
2702 /*
2703 * Manual configuration of address on an interface
2704 */
2705 static int inet6_addr_add(struct net *net, int ifindex,
2706 const struct in6_addr *pfx,
2707 const struct in6_addr *peer_pfx,
2708 unsigned int plen, __u32 ifa_flags,
2709 __u32 prefered_lft, __u32 valid_lft)
2710 {
2711 struct inet6_ifaddr *ifp;
2712 struct inet6_dev *idev;
2713 struct net_device *dev;
2714 unsigned long timeout;
2715 clock_t expires;
2716 int scope;
2717 u32 flags;
2718
2719 ASSERT_RTNL();
2720
2721 if (plen > 128)
2722 return -EINVAL;
2723
2724 /* check the lifetime */
2725 if (!valid_lft || prefered_lft > valid_lft)
2726 return -EINVAL;
2727
2728 if (ifa_flags & IFA_F_MANAGETEMPADDR && plen != 64)
2729 return -EINVAL;
2730
2731 dev = __dev_get_by_index(net, ifindex);
2732 if (!dev)
2733 return -ENODEV;
2734
2735 idev = addrconf_add_dev(dev);
2736 if (IS_ERR(idev))
2737 return PTR_ERR(idev);
2738
2739 if (ifa_flags & IFA_F_MCAUTOJOIN) {
2740 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2741 true, pfx, ifindex);
2742
2743 if (ret < 0)
2744 return ret;
2745 }
2746
2747 scope = ipv6_addr_scope(pfx);
2748
2749 timeout = addrconf_timeout_fixup(valid_lft, HZ);
2750 if (addrconf_finite_timeout(timeout)) {
2751 expires = jiffies_to_clock_t(timeout * HZ);
2752 valid_lft = timeout;
2753 flags = RTF_EXPIRES;
2754 } else {
2755 expires = 0;
2756 flags = 0;
2757 ifa_flags |= IFA_F_PERMANENT;
2758 }
2759
2760 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
2761 if (addrconf_finite_timeout(timeout)) {
2762 if (timeout == 0)
2763 ifa_flags |= IFA_F_DEPRECATED;
2764 prefered_lft = timeout;
2765 }
2766
2767 ifp = ipv6_add_addr(idev, pfx, peer_pfx, plen, scope, ifa_flags,
2768 valid_lft, prefered_lft);
2769
2770 if (!IS_ERR(ifp)) {
2771 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
2772 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, dev,
2773 expires, flags);
2774 }
2775
2776 /*
2777 * Note that section 3.1 of RFC 4429 indicates
2778 * that the Optimistic flag should not be set for
2779 * manually configured addresses
2780 */
2781 addrconf_dad_start(ifp);
2782 if (ifa_flags & IFA_F_MANAGETEMPADDR)
2783 manage_tempaddrs(idev, ifp, valid_lft, prefered_lft,
2784 true, jiffies);
2785 in6_ifa_put(ifp);
2786 addrconf_verify_rtnl();
2787 return 0;
2788 } else if (ifa_flags & IFA_F_MCAUTOJOIN) {
2789 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2790 false, pfx, ifindex);
2791 }
2792
2793 return PTR_ERR(ifp);
2794 }
2795
2796 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
2797 const struct in6_addr *pfx, unsigned int plen)
2798 {
2799 struct inet6_ifaddr *ifp;
2800 struct inet6_dev *idev;
2801 struct net_device *dev;
2802
2803 if (plen > 128)
2804 return -EINVAL;
2805
2806 dev = __dev_get_by_index(net, ifindex);
2807 if (!dev)
2808 return -ENODEV;
2809
2810 idev = __in6_dev_get(dev);
2811 if (!idev)
2812 return -ENXIO;
2813
2814 read_lock_bh(&idev->lock);
2815 list_for_each_entry(ifp, &idev->addr_list, if_list) {
2816 if (ifp->prefix_len == plen &&
2817 ipv6_addr_equal(pfx, &ifp->addr)) {
2818 in6_ifa_hold(ifp);
2819 read_unlock_bh(&idev->lock);
2820
2821 if (!(ifp->flags & IFA_F_TEMPORARY) &&
2822 (ifa_flags & IFA_F_MANAGETEMPADDR))
2823 manage_tempaddrs(idev, ifp, 0, 0, false,
2824 jiffies);
2825 ipv6_del_addr(ifp);
2826 addrconf_verify_rtnl();
2827 if (ipv6_addr_is_multicast(pfx)) {
2828 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2829 false, pfx, dev->ifindex);
2830 }
2831 return 0;
2832 }
2833 }
2834 read_unlock_bh(&idev->lock);
2835 return -EADDRNOTAVAIL;
2836 }
2837
2838
2839 int addrconf_add_ifaddr(struct net *net, void __user *arg)
2840 {
2841 struct in6_ifreq ireq;
2842 int err;
2843
2844 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2845 return -EPERM;
2846
2847 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2848 return -EFAULT;
2849
2850 rtnl_lock();
2851 err = inet6_addr_add(net, ireq.ifr6_ifindex, &ireq.ifr6_addr, NULL,
2852 ireq.ifr6_prefixlen, IFA_F_PERMANENT,
2853 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2854 rtnl_unlock();
2855 return err;
2856 }
2857
2858 int addrconf_del_ifaddr(struct net *net, void __user *arg)
2859 {
2860 struct in6_ifreq ireq;
2861 int err;
2862
2863 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2864 return -EPERM;
2865
2866 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2867 return -EFAULT;
2868
2869 rtnl_lock();
2870 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
2871 ireq.ifr6_prefixlen);
2872 rtnl_unlock();
2873 return err;
2874 }
2875
2876 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
2877 int plen, int scope)
2878 {
2879 struct inet6_ifaddr *ifp;
2880
2881 ifp = ipv6_add_addr(idev, addr, NULL, plen,
2882 scope, IFA_F_PERMANENT,
2883 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
2884 if (!IS_ERR(ifp)) {
2885 spin_lock_bh(&ifp->lock);
2886 ifp->flags &= ~IFA_F_TENTATIVE;
2887 spin_unlock_bh(&ifp->lock);
2888 ipv6_ifa_notify(RTM_NEWADDR, ifp);
2889 in6_ifa_put(ifp);
2890 }
2891 }
2892
2893 #if IS_ENABLED(CONFIG_IPV6_SIT)
2894 static void sit_add_v4_addrs(struct inet6_dev *idev)
2895 {
2896 struct in6_addr addr;
2897 struct net_device *dev;
2898 struct net *net = dev_net(idev->dev);
2899 int scope, plen;
2900 u32 pflags = 0;
2901
2902 ASSERT_RTNL();
2903
2904 memset(&addr, 0, sizeof(struct in6_addr));
2905 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr, 4);
2906
2907 if (idev->dev->flags&IFF_POINTOPOINT) {
2908 addr.s6_addr32[0] = htonl(0xfe800000);
2909 scope = IFA_LINK;
2910 plen = 64;
2911 } else {
2912 scope = IPV6_ADDR_COMPATv4;
2913 plen = 96;
2914 pflags |= RTF_NONEXTHOP;
2915 }
2916
2917 if (addr.s6_addr32[3]) {
2918 add_addr(idev, &addr, plen, scope);
2919 addrconf_prefix_route(&addr, plen, idev->dev, 0, pflags);
2920 return;
2921 }
2922
2923 for_each_netdev(net, dev) {
2924 struct in_device *in_dev = __in_dev_get_rtnl(dev);
2925 if (in_dev && (dev->flags & IFF_UP)) {
2926 struct in_ifaddr *ifa;
2927
2928 int flag = scope;
2929
2930 for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next) {
2931
2932 addr.s6_addr32[3] = ifa->ifa_local;
2933
2934 if (ifa->ifa_scope == RT_SCOPE_LINK)
2935 continue;
2936 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
2937 if (idev->dev->flags&IFF_POINTOPOINT)
2938 continue;
2939 flag |= IFA_HOST;
2940 }
2941
2942 add_addr(idev, &addr, plen, flag);
2943 addrconf_prefix_route(&addr, plen, idev->dev, 0,
2944 pflags);
2945 }
2946 }
2947 }
2948 }
2949 #endif
2950
2951 static void init_loopback(struct net_device *dev)
2952 {
2953 struct inet6_dev *idev;
2954 struct net_device *sp_dev;
2955 struct inet6_ifaddr *sp_ifa;
2956 struct rt6_info *sp_rt;
2957
2958 /* ::1 */
2959
2960 ASSERT_RTNL();
2961
2962 idev = ipv6_find_idev(dev);
2963 if (!idev) {
2964 pr_debug("%s: add_dev failed\n", __func__);
2965 return;
2966 }
2967
2968 add_addr(idev, &in6addr_loopback, 128, IFA_HOST);
2969
2970 /* Add routes to other interface's IPv6 addresses */
2971 for_each_netdev(dev_net(dev), sp_dev) {
2972 if (!strcmp(sp_dev->name, dev->name))
2973 continue;
2974
2975 idev = __in6_dev_get(sp_dev);
2976 if (!idev)
2977 continue;
2978
2979 read_lock_bh(&idev->lock);
2980 list_for_each_entry(sp_ifa, &idev->addr_list, if_list) {
2981
2982 if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE))
2983 continue;
2984
2985 if (sp_ifa->rt) {
2986 /* This dst has been added to garbage list when
2987 * lo device down, release this obsolete dst and
2988 * reallocate a new router for ifa.
2989 */
2990 if (sp_ifa->rt->dst.obsolete > 0) {
2991 ip6_rt_put(sp_ifa->rt);
2992 sp_ifa->rt = NULL;
2993 } else {
2994 continue;
2995 }
2996 }
2997
2998 sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, false);
2999
3000 /* Failure cases are ignored */
3001 if (!IS_ERR(sp_rt)) {
3002 sp_ifa->rt = sp_rt;
3003 ip6_ins_rt(sp_rt);
3004 }
3005 }
3006 read_unlock_bh(&idev->lock);
3007 }
3008 }
3009
3010 void addrconf_add_linklocal(struct inet6_dev *idev,
3011 const struct in6_addr *addr, u32 flags)
3012 {
3013 struct inet6_ifaddr *ifp;
3014 u32 addr_flags = flags | IFA_F_PERMANENT;
3015
3016 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3017 if (idev->cnf.optimistic_dad &&
3018 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3019 addr_flags |= IFA_F_OPTIMISTIC;
3020 #endif
3021
3022 ifp = ipv6_add_addr(idev, addr, NULL, 64, IFA_LINK, addr_flags,
3023 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME);
3024 if (!IS_ERR(ifp)) {
3025 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, idev->dev, 0, 0);
3026 addrconf_dad_start(ifp);
3027 in6_ifa_put(ifp);
3028 }
3029 }
3030 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3031
3032 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3033 {
3034 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3035 return true;
3036
3037 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3038 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3039 return true;
3040
3041 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3042 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3043 return true;
3044
3045 return false;
3046 }
3047
3048 static int ipv6_generate_stable_address(struct in6_addr *address,
3049 u8 dad_count,
3050 const struct inet6_dev *idev)
3051 {
3052 static DEFINE_SPINLOCK(lock);
3053 static __u32 digest[SHA_DIGEST_WORDS];
3054 static __u32 workspace[SHA_WORKSPACE_WORDS];
3055
3056 static union {
3057 char __data[SHA_MESSAGE_BYTES];
3058 struct {
3059 struct in6_addr secret;
3060 __be32 prefix[2];
3061 unsigned char hwaddr[MAX_ADDR_LEN];
3062 u8 dad_count;
3063 } __packed;
3064 } data;
3065
3066 struct in6_addr secret;
3067 struct in6_addr temp;
3068 struct net *net = dev_net(idev->dev);
3069
3070 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3071
3072 if (idev->cnf.stable_secret.initialized)
3073 secret = idev->cnf.stable_secret.secret;
3074 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3075 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3076 else
3077 return -1;
3078
3079 retry:
3080 spin_lock_bh(&lock);
3081
3082 sha_init(digest);
3083 memset(&data, 0, sizeof(data));
3084 memset(workspace, 0, sizeof(workspace));
3085 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3086 data.prefix[0] = address->s6_addr32[0];
3087 data.prefix[1] = address->s6_addr32[1];
3088 data.secret = secret;
3089 data.dad_count = dad_count;
3090
3091 sha_transform(digest, data.__data, workspace);
3092
3093 temp = *address;
3094 temp.s6_addr32[2] = (__force __be32)digest[0];
3095 temp.s6_addr32[3] = (__force __be32)digest[1];
3096
3097 spin_unlock_bh(&lock);
3098
3099 if (ipv6_reserved_interfaceid(temp)) {
3100 dad_count++;
3101 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3102 return -1;
3103 goto retry;
3104 }
3105
3106 *address = temp;
3107 return 0;
3108 }
3109
3110 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3111 {
3112 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3113
3114 if (s->initialized)
3115 return;
3116 s = &idev->cnf.stable_secret;
3117 get_random_bytes(&s->secret, sizeof(s->secret));
3118 s->initialized = true;
3119 }
3120
3121 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3122 {
3123 struct in6_addr addr;
3124
3125 /* no link local addresses on L3 master devices */
3126 if (netif_is_l3_master(idev->dev))
3127 return;
3128
3129 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3130
3131 switch (idev->addr_gen_mode) {
3132 case IN6_ADDR_GEN_MODE_RANDOM:
3133 ipv6_gen_mode_random_init(idev);
3134 /* fallthrough */
3135 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3136 if (!ipv6_generate_stable_address(&addr, 0, idev))
3137 addrconf_add_linklocal(idev, &addr,
3138 IFA_F_STABLE_PRIVACY);
3139 else if (prefix_route)
3140 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3141 break;
3142 case IN6_ADDR_GEN_MODE_EUI64:
3143 /* addrconf_add_linklocal also adds a prefix_route and we
3144 * only need to care about prefix routes if ipv6_generate_eui64
3145 * couldn't generate one.
3146 */
3147 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3148 addrconf_add_linklocal(idev, &addr, 0);
3149 else if (prefix_route)
3150 addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
3151 break;
3152 case IN6_ADDR_GEN_MODE_NONE:
3153 default:
3154 /* will not add any link local address */
3155 break;
3156 }
3157 }
3158
3159 static void addrconf_dev_config(struct net_device *dev)
3160 {
3161 struct inet6_dev *idev;
3162
3163 ASSERT_RTNL();
3164
3165 if ((dev->type != ARPHRD_ETHER) &&
3166 (dev->type != ARPHRD_FDDI) &&
3167 (dev->type != ARPHRD_ARCNET) &&
3168 (dev->type != ARPHRD_INFINIBAND) &&
3169 (dev->type != ARPHRD_IEEE1394) &&
3170 (dev->type != ARPHRD_TUNNEL6) &&
3171 (dev->type != ARPHRD_6LOWPAN) &&
3172 (dev->type != ARPHRD_NONE)) {
3173 /* Alas, we support only Ethernet autoconfiguration. */
3174 return;
3175 }
3176
3177 idev = addrconf_add_dev(dev);
3178 if (IS_ERR(idev))
3179 return;
3180
3181 /* this device type has no EUI support */
3182 if (dev->type == ARPHRD_NONE &&
3183 idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3184 idev->addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3185
3186 addrconf_addr_gen(idev, false);
3187 }
3188
3189 #if IS_ENABLED(CONFIG_IPV6_SIT)
3190 static void addrconf_sit_config(struct net_device *dev)
3191 {
3192 struct inet6_dev *idev;
3193
3194 ASSERT_RTNL();
3195
3196 /*
3197 * Configure the tunnel with one of our IPv4
3198 * addresses... we should configure all of
3199 * our v4 addrs in the tunnel
3200 */
3201
3202 idev = ipv6_find_idev(dev);
3203 if (!idev) {
3204 pr_debug("%s: add_dev failed\n", __func__);
3205 return;
3206 }
3207
3208 if (dev->priv_flags & IFF_ISATAP) {
3209 addrconf_addr_gen(idev, false);
3210 return;
3211 }
3212
3213 sit_add_v4_addrs(idev);
3214
3215 if (dev->flags&IFF_POINTOPOINT)
3216 addrconf_add_mroute(dev);
3217 }
3218 #endif
3219
3220 #if IS_ENABLED(CONFIG_NET_IPGRE)
3221 static void addrconf_gre_config(struct net_device *dev)
3222 {
3223 struct inet6_dev *idev;
3224
3225 ASSERT_RTNL();
3226
3227 idev = ipv6_find_idev(dev);
3228 if (!idev) {
3229 pr_debug("%s: add_dev failed\n", __func__);
3230 return;
3231 }
3232
3233 addrconf_addr_gen(idev, true);
3234 if (dev->flags & IFF_POINTOPOINT)
3235 addrconf_add_mroute(dev);
3236 }
3237 #endif
3238
3239 static int fixup_permanent_addr(struct inet6_dev *idev,
3240 struct inet6_ifaddr *ifp)
3241 {
3242 if (!ifp->rt) {
3243 struct rt6_info *rt;
3244
3245 rt = addrconf_dst_alloc(idev, &ifp->addr, false);
3246 if (unlikely(IS_ERR(rt)))
3247 return PTR_ERR(rt);
3248
3249 ifp->rt = rt;
3250 }
3251
3252 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3253 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3254 idev->dev, 0, 0);
3255 }
3256
3257 addrconf_dad_start(ifp);
3258
3259 return 0;
3260 }
3261
3262 static void addrconf_permanent_addr(struct net_device *dev)
3263 {
3264 struct inet6_ifaddr *ifp, *tmp;
3265 struct inet6_dev *idev;
3266
3267 idev = __in6_dev_get(dev);
3268 if (!idev)
3269 return;
3270
3271 write_lock_bh(&idev->lock);
3272
3273 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3274 if ((ifp->flags & IFA_F_PERMANENT) &&
3275 fixup_permanent_addr(idev, ifp) < 0) {
3276 write_unlock_bh(&idev->lock);
3277 ipv6_del_addr(ifp);
3278 write_lock_bh(&idev->lock);
3279
3280 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3281 idev->dev->name, &ifp->addr);
3282 }
3283 }
3284
3285 write_unlock_bh(&idev->lock);
3286 }
3287
3288 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3289 void *ptr)
3290 {
3291 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3292 struct netdev_notifier_changeupper_info *info;
3293 struct inet6_dev *idev = __in6_dev_get(dev);
3294 int run_pending = 0;
3295 int err;
3296
3297 switch (event) {
3298 case NETDEV_REGISTER:
3299 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3300 idev = ipv6_add_dev(dev);
3301 if (IS_ERR(idev))
3302 return notifier_from_errno(PTR_ERR(idev));
3303 }
3304 break;
3305
3306 case NETDEV_CHANGEMTU:
3307 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3308 if (dev->mtu < IPV6_MIN_MTU) {
3309 addrconf_ifdown(dev, 1);
3310 break;
3311 }
3312
3313 if (idev) {
3314 rt6_mtu_change(dev, dev->mtu);
3315 idev->cnf.mtu6 = dev->mtu;
3316 break;
3317 }
3318
3319 /* allocate new idev */
3320 idev = ipv6_add_dev(dev);
3321 if (IS_ERR(idev))
3322 break;
3323
3324 /* device is still not ready */
3325 if (!(idev->if_flags & IF_READY))
3326 break;
3327
3328 run_pending = 1;
3329
3330 /* fall through */
3331
3332 case NETDEV_UP:
3333 case NETDEV_CHANGE:
3334 if (dev->flags & IFF_SLAVE)
3335 break;
3336
3337 if (idev && idev->cnf.disable_ipv6)
3338 break;
3339
3340 if (event == NETDEV_UP) {
3341 /* restore routes for permanent addresses */
3342 addrconf_permanent_addr(dev);
3343
3344 if (!addrconf_qdisc_ok(dev)) {
3345 /* device is not ready yet. */
3346 pr_info("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3347 dev->name);
3348 break;
3349 }
3350
3351 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3352 idev = ipv6_add_dev(dev);
3353
3354 if (!IS_ERR_OR_NULL(idev)) {
3355 idev->if_flags |= IF_READY;
3356 run_pending = 1;
3357 }
3358 } else if (event == NETDEV_CHANGE) {
3359 if (!addrconf_qdisc_ok(dev)) {
3360 /* device is still not ready. */
3361 break;
3362 }
3363
3364 if (idev) {
3365 if (idev->if_flags & IF_READY)
3366 /* device is already configured. */
3367 break;
3368 idev->if_flags |= IF_READY;
3369 }
3370
3371 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3372 dev->name);
3373
3374 run_pending = 1;
3375 }
3376
3377 switch (dev->type) {
3378 #if IS_ENABLED(CONFIG_IPV6_SIT)
3379 case ARPHRD_SIT:
3380 addrconf_sit_config(dev);
3381 break;
3382 #endif
3383 #if IS_ENABLED(CONFIG_NET_IPGRE)
3384 case ARPHRD_IPGRE:
3385 addrconf_gre_config(dev);
3386 break;
3387 #endif
3388 case ARPHRD_LOOPBACK:
3389 init_loopback(dev);
3390 break;
3391
3392 default:
3393 addrconf_dev_config(dev);
3394 break;
3395 }
3396
3397 if (!IS_ERR_OR_NULL(idev)) {
3398 if (run_pending)
3399 addrconf_dad_run(idev);
3400
3401 /*
3402 * If the MTU changed during the interface down,
3403 * when the interface up, the changed MTU must be
3404 * reflected in the idev as well as routers.
3405 */
3406 if (idev->cnf.mtu6 != dev->mtu &&
3407 dev->mtu >= IPV6_MIN_MTU) {
3408 rt6_mtu_change(dev, dev->mtu);
3409 idev->cnf.mtu6 = dev->mtu;
3410 }
3411 idev->tstamp = jiffies;
3412 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3413
3414 /*
3415 * If the changed mtu during down is lower than
3416 * IPV6_MIN_MTU stop IPv6 on this interface.
3417 */
3418 if (dev->mtu < IPV6_MIN_MTU)
3419 addrconf_ifdown(dev, 1);
3420 }
3421 break;
3422
3423 case NETDEV_DOWN:
3424 case NETDEV_UNREGISTER:
3425 /*
3426 * Remove all addresses from this interface.
3427 */
3428 addrconf_ifdown(dev, event != NETDEV_DOWN);
3429 break;
3430
3431 case NETDEV_CHANGENAME:
3432 if (idev) {
3433 snmp6_unregister_dev(idev);
3434 addrconf_sysctl_unregister(idev);
3435 err = addrconf_sysctl_register(idev);
3436 if (err)
3437 return notifier_from_errno(err);
3438 err = snmp6_register_dev(idev);
3439 if (err) {
3440 addrconf_sysctl_unregister(idev);
3441 return notifier_from_errno(err);
3442 }
3443 }
3444 break;
3445
3446 case NETDEV_PRE_TYPE_CHANGE:
3447 case NETDEV_POST_TYPE_CHANGE:
3448 if (idev)
3449 addrconf_type_change(dev, event);
3450 break;
3451
3452 case NETDEV_CHANGEUPPER:
3453 info = ptr;
3454
3455 /* flush all routes if dev is linked to or unlinked from
3456 * an L3 master device (e.g., VRF)
3457 */
3458 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3459 addrconf_ifdown(dev, 0);
3460 }
3461
3462 return NOTIFY_OK;
3463 }
3464
3465 /*
3466 * addrconf module should be notified of a device going up
3467 */
3468 static struct notifier_block ipv6_dev_notf = {
3469 .notifier_call = addrconf_notify,
3470 };
3471
3472 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3473 {
3474 struct inet6_dev *idev;
3475 ASSERT_RTNL();
3476
3477 idev = __in6_dev_get(dev);
3478
3479 if (event == NETDEV_POST_TYPE_CHANGE)
3480 ipv6_mc_remap(idev);
3481 else if (event == NETDEV_PRE_TYPE_CHANGE)
3482 ipv6_mc_unmap(idev);
3483 }
3484
3485 static bool addr_is_local(const struct in6_addr *addr)
3486 {
3487 return ipv6_addr_type(addr) &
3488 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3489 }
3490
3491 static int addrconf_ifdown(struct net_device *dev, int how)
3492 {
3493 struct net *net = dev_net(dev);
3494 struct inet6_dev *idev;
3495 struct inet6_ifaddr *ifa, *tmp;
3496 struct list_head del_list;
3497 int _keep_addr;
3498 bool keep_addr;
3499 int state, i;
3500
3501 ASSERT_RTNL();
3502
3503 rt6_ifdown(net, dev);
3504 neigh_ifdown(&nd_tbl, dev);
3505
3506 idev = __in6_dev_get(dev);
3507 if (!idev)
3508 return -ENODEV;
3509
3510 /*
3511 * Step 1: remove reference to ipv6 device from parent device.
3512 * Do not dev_put!
3513 */
3514 if (how) {
3515 idev->dead = 1;
3516
3517 /* protected by rtnl_lock */
3518 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3519
3520 /* Step 1.5: remove snmp6 entry */
3521 snmp6_unregister_dev(idev);
3522
3523 }
3524
3525 /* aggregate the system setting and interface setting */
3526 _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3527 if (!_keep_addr)
3528 _keep_addr = idev->cnf.keep_addr_on_down;
3529
3530 /* combine the user config with event to determine if permanent
3531 * addresses are to be removed from address hash table
3532 */
3533 keep_addr = !(how || _keep_addr <= 0 || idev->cnf.disable_ipv6);
3534
3535 /* Step 2: clear hash table */
3536 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3537 struct hlist_head *h = &inet6_addr_lst[i];
3538
3539 spin_lock_bh(&addrconf_hash_lock);
3540 restart:
3541 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3542 if (ifa->idev == idev) {
3543 addrconf_del_dad_work(ifa);
3544 /* combined flag + permanent flag decide if
3545 * address is retained on a down event
3546 */
3547 if (!keep_addr ||
3548 !(ifa->flags & IFA_F_PERMANENT) ||
3549 addr_is_local(&ifa->addr)) {
3550 hlist_del_init_rcu(&ifa->addr_lst);
3551 goto restart;
3552 }
3553 }
3554 }
3555 spin_unlock_bh(&addrconf_hash_lock);
3556 }
3557
3558 write_lock_bh(&idev->lock);
3559
3560 addrconf_del_rs_timer(idev);
3561
3562 /* Step 2: clear flags for stateless addrconf */
3563 if (!how)
3564 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3565
3566 if (how && del_timer(&idev->regen_timer))
3567 in6_dev_put(idev);
3568
3569 /* Step 3: clear tempaddr list */
3570 while (!list_empty(&idev->tempaddr_list)) {
3571 ifa = list_first_entry(&idev->tempaddr_list,
3572 struct inet6_ifaddr, tmp_list);
3573 list_del(&ifa->tmp_list);
3574 write_unlock_bh(&idev->lock);
3575 spin_lock_bh(&ifa->lock);
3576
3577 if (ifa->ifpub) {
3578 in6_ifa_put(ifa->ifpub);
3579 ifa->ifpub = NULL;
3580 }
3581 spin_unlock_bh(&ifa->lock);
3582 in6_ifa_put(ifa);
3583 write_lock_bh(&idev->lock);
3584 }
3585
3586 /* re-combine the user config with event to determine if permanent
3587 * addresses are to be removed from the interface list
3588 */
3589 keep_addr = (!how && _keep_addr > 0 && !idev->cnf.disable_ipv6);
3590
3591 INIT_LIST_HEAD(&del_list);
3592 list_for_each_entry_safe(ifa, tmp, &idev->addr_list, if_list) {
3593 struct rt6_info *rt = NULL;
3594
3595 addrconf_del_dad_work(ifa);
3596
3597 write_unlock_bh(&idev->lock);
3598 spin_lock_bh(&ifa->lock);
3599
3600 if (keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3601 !addr_is_local(&ifa->addr)) {
3602 /* set state to skip the notifier below */
3603 state = INET6_IFADDR_STATE_DEAD;
3604 ifa->state = 0;
3605 if (!(ifa->flags & IFA_F_NODAD))
3606 ifa->flags |= IFA_F_TENTATIVE;
3607
3608 rt = ifa->rt;
3609 ifa->rt = NULL;
3610 } else {
3611 state = ifa->state;
3612 ifa->state = INET6_IFADDR_STATE_DEAD;
3613
3614 list_move(&ifa->if_list, &del_list);
3615 }
3616
3617 spin_unlock_bh(&ifa->lock);
3618
3619 if (rt)
3620 ip6_del_rt(rt);
3621
3622 if (state != INET6_IFADDR_STATE_DEAD) {
3623 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3624 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3625 } else {
3626 if (idev->cnf.forwarding)
3627 addrconf_leave_anycast(ifa);
3628 addrconf_leave_solict(ifa->idev, &ifa->addr);
3629 }
3630
3631 write_lock_bh(&idev->lock);
3632 }
3633
3634 write_unlock_bh(&idev->lock);
3635
3636 /* now clean up addresses to be removed */
3637 while (!list_empty(&del_list)) {
3638 ifa = list_first_entry(&del_list,
3639 struct inet6_ifaddr, if_list);
3640 list_del(&ifa->if_list);
3641
3642 in6_ifa_put(ifa);
3643 }
3644
3645 /* Step 5: Discard anycast and multicast list */
3646 if (how) {
3647 ipv6_ac_destroy_dev(idev);
3648 ipv6_mc_destroy_dev(idev);
3649 } else {
3650 ipv6_mc_down(idev);
3651 }
3652
3653 idev->tstamp = jiffies;
3654
3655 /* Last: Shot the device (if unregistered) */
3656 if (how) {
3657 addrconf_sysctl_unregister(idev);
3658 neigh_parms_release(&nd_tbl, idev->nd_parms);
3659 neigh_ifdown(&nd_tbl, dev);
3660 in6_dev_put(idev);
3661 }
3662 return 0;
3663 }
3664
3665 static void addrconf_rs_timer(unsigned long data)
3666 {
3667 struct inet6_dev *idev = (struct inet6_dev *)data;
3668 struct net_device *dev = idev->dev;
3669 struct in6_addr lladdr;
3670
3671 write_lock(&idev->lock);
3672 if (idev->dead || !(idev->if_flags & IF_READY))
3673 goto out;
3674
3675 if (!ipv6_accept_ra(idev))
3676 goto out;
3677
3678 /* Announcement received after solicitation was sent */
3679 if (idev->if_flags & IF_RA_RCVD)
3680 goto out;
3681
3682 if (idev->rs_probes++ < idev->cnf.rtr_solicits) {
3683 write_unlock(&idev->lock);
3684 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3685 ndisc_send_rs(dev, &lladdr,
3686 &in6addr_linklocal_allrouters);
3687 else
3688 goto put;
3689
3690 write_lock(&idev->lock);
3691 /* The wait after the last probe can be shorter */
3692 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3693 idev->cnf.rtr_solicits) ?
3694 idev->cnf.rtr_solicit_delay :
3695 idev->cnf.rtr_solicit_interval);
3696 } else {
3697 /*
3698 * Note: we do not support deprecated "all on-link"
3699 * assumption any longer.
3700 */
3701 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3702 }
3703
3704 out:
3705 write_unlock(&idev->lock);
3706 put:
3707 in6_dev_put(idev);
3708 }
3709
3710 /*
3711 * Duplicate Address Detection
3712 */
3713 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3714 {
3715 unsigned long rand_num;
3716 struct inet6_dev *idev = ifp->idev;
3717
3718 if (ifp->flags & IFA_F_OPTIMISTIC)
3719 rand_num = 0;
3720 else
3721 rand_num = prandom_u32() % (idev->cnf.rtr_solicit_delay ? : 1);
3722
3723 ifp->dad_probes = idev->cnf.dad_transmits;
3724 addrconf_mod_dad_work(ifp, rand_num);
3725 }
3726
3727 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3728 {
3729 struct inet6_dev *idev = ifp->idev;
3730 struct net_device *dev = idev->dev;
3731 bool notify = false;
3732
3733 addrconf_join_solict(dev, &ifp->addr);
3734
3735 prandom_seed((__force u32) ifp->addr.s6_addr32[3]);
3736
3737 read_lock_bh(&idev->lock);
3738 spin_lock(&ifp->lock);
3739 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3740 goto out;
3741
3742 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
3743 idev->cnf.accept_dad < 1 ||
3744 !(ifp->flags&IFA_F_TENTATIVE) ||
3745 ifp->flags & IFA_F_NODAD) {
3746 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3747 spin_unlock(&ifp->lock);
3748 read_unlock_bh(&idev->lock);
3749
3750 addrconf_dad_completed(ifp);
3751 return;
3752 }
3753
3754 if (!(idev->if_flags & IF_READY)) {
3755 spin_unlock(&ifp->lock);
3756 read_unlock_bh(&idev->lock);
3757 /*
3758 * If the device is not ready:
3759 * - keep it tentative if it is a permanent address.
3760 * - otherwise, kill it.
3761 */
3762 in6_ifa_hold(ifp);
3763 addrconf_dad_stop(ifp, 0);
3764 return;
3765 }
3766
3767 /*
3768 * Optimistic nodes can start receiving
3769 * Frames right away
3770 */
3771 if (ifp->flags & IFA_F_OPTIMISTIC) {
3772 ip6_ins_rt(ifp->rt);
3773 if (ipv6_use_optimistic_addr(idev)) {
3774 /* Because optimistic nodes can use this address,
3775 * notify listeners. If DAD fails, RTM_DELADDR is sent.
3776 */
3777 notify = true;
3778 }
3779 }
3780
3781 addrconf_dad_kick(ifp);
3782 out:
3783 spin_unlock(&ifp->lock);
3784 read_unlock_bh(&idev->lock);
3785 if (notify)
3786 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3787 }
3788
3789 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
3790 {
3791 bool begin_dad = false;
3792
3793 spin_lock_bh(&ifp->lock);
3794 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
3795 ifp->state = INET6_IFADDR_STATE_PREDAD;
3796 begin_dad = true;
3797 }
3798 spin_unlock_bh(&ifp->lock);
3799
3800 if (begin_dad)
3801 addrconf_mod_dad_work(ifp, 0);
3802 }
3803
3804 static void addrconf_dad_work(struct work_struct *w)
3805 {
3806 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
3807 struct inet6_ifaddr,
3808 dad_work);
3809 struct inet6_dev *idev = ifp->idev;
3810 struct in6_addr mcaddr;
3811 bool disable_ipv6 = false;
3812
3813 enum {
3814 DAD_PROCESS,
3815 DAD_BEGIN,
3816 DAD_ABORT,
3817 } action = DAD_PROCESS;
3818
3819 rtnl_lock();
3820
3821 spin_lock_bh(&ifp->lock);
3822 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
3823 action = DAD_BEGIN;
3824 ifp->state = INET6_IFADDR_STATE_DAD;
3825 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
3826 action = DAD_ABORT;
3827 ifp->state = INET6_IFADDR_STATE_POSTDAD;
3828
3829 if (idev->cnf.accept_dad > 1 && !idev->cnf.disable_ipv6 &&
3830 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
3831 struct in6_addr addr;
3832
3833 addr.s6_addr32[0] = htonl(0xfe800000);
3834 addr.s6_addr32[1] = 0;
3835
3836 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
3837 ipv6_addr_equal(&ifp->addr, &addr)) {
3838 /* DAD failed for link-local based on MAC */
3839 idev->cnf.disable_ipv6 = 1;
3840
3841 pr_info("%s: IPv6 being disabled!\n",
3842 ifp->idev->dev->name);
3843 disable_ipv6 = true;
3844 }
3845 }
3846 }
3847 spin_unlock_bh(&ifp->lock);
3848
3849 if (action == DAD_BEGIN) {
3850 addrconf_dad_begin(ifp);
3851 goto out;
3852 } else if (action == DAD_ABORT) {
3853 addrconf_dad_stop(ifp, 1);
3854 if (disable_ipv6)
3855 addrconf_ifdown(idev->dev, 0);
3856 goto out;
3857 }
3858
3859 if (!ifp->dad_probes && addrconf_dad_end(ifp))
3860 goto out;
3861
3862 write_lock_bh(&idev->lock);
3863 if (idev->dead || !(idev->if_flags & IF_READY)) {
3864 write_unlock_bh(&idev->lock);
3865 goto out;
3866 }
3867
3868 spin_lock(&ifp->lock);
3869 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
3870 spin_unlock(&ifp->lock);
3871 write_unlock_bh(&idev->lock);
3872 goto out;
3873 }
3874
3875 if (ifp->dad_probes == 0) {
3876 /*
3877 * DAD was successful
3878 */
3879
3880 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
3881 spin_unlock(&ifp->lock);
3882 write_unlock_bh(&idev->lock);
3883
3884 addrconf_dad_completed(ifp);
3885
3886 goto out;
3887 }
3888
3889 ifp->dad_probes--;
3890 addrconf_mod_dad_work(ifp,
3891 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME));
3892 spin_unlock(&ifp->lock);
3893 write_unlock_bh(&idev->lock);
3894
3895 /* send a neighbour solicitation for our addr */
3896 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
3897 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any);
3898 out:
3899 in6_ifa_put(ifp);
3900 rtnl_unlock();
3901 }
3902
3903 /* ifp->idev must be at least read locked */
3904 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
3905 {
3906 struct inet6_ifaddr *ifpiter;
3907 struct inet6_dev *idev = ifp->idev;
3908
3909 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
3910 if (ifpiter->scope > IFA_LINK)
3911 break;
3912 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
3913 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
3914 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
3915 IFA_F_PERMANENT)
3916 return false;
3917 }
3918 return true;
3919 }
3920
3921 static void addrconf_dad_completed(struct inet6_ifaddr *ifp)
3922 {
3923 struct net_device *dev = ifp->idev->dev;
3924 struct in6_addr lladdr;
3925 bool send_rs, send_mld;
3926
3927 addrconf_del_dad_work(ifp);
3928
3929 /*
3930 * Configure the address for reception. Now it is valid.
3931 */
3932
3933 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3934
3935 /* If added prefix is link local and we are prepared to process
3936 router advertisements, start sending router solicitations.
3937 */
3938
3939 read_lock_bh(&ifp->idev->lock);
3940 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
3941 send_rs = send_mld &&
3942 ipv6_accept_ra(ifp->idev) &&
3943 ifp->idev->cnf.rtr_solicits > 0 &&
3944 (dev->flags&IFF_LOOPBACK) == 0;
3945 read_unlock_bh(&ifp->idev->lock);
3946
3947 /* While dad is in progress mld report's source address is in6_addrany.
3948 * Resend with proper ll now.
3949 */
3950 if (send_mld)
3951 ipv6_mc_dad_complete(ifp->idev);
3952
3953 if (send_rs) {
3954 /*
3955 * If a host as already performed a random delay
3956 * [...] as part of DAD [...] there is no need
3957 * to delay again before sending the first RS
3958 */
3959 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3960 return;
3961 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
3962
3963 write_lock_bh(&ifp->idev->lock);
3964 spin_lock(&ifp->lock);
3965 ifp->idev->rs_probes = 1;
3966 ifp->idev->if_flags |= IF_RS_SENT;
3967 addrconf_mod_rs_timer(ifp->idev,
3968 ifp->idev->cnf.rtr_solicit_interval);
3969 spin_unlock(&ifp->lock);
3970 write_unlock_bh(&ifp->idev->lock);
3971 }
3972 }
3973
3974 static void addrconf_dad_run(struct inet6_dev *idev)
3975 {
3976 struct inet6_ifaddr *ifp;
3977
3978 read_lock_bh(&idev->lock);
3979 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3980 spin_lock(&ifp->lock);
3981 if (ifp->flags & IFA_F_TENTATIVE &&
3982 ifp->state == INET6_IFADDR_STATE_DAD)
3983 addrconf_dad_kick(ifp);
3984 spin_unlock(&ifp->lock);
3985 }
3986 read_unlock_bh(&idev->lock);
3987 }
3988
3989 #ifdef CONFIG_PROC_FS
3990 struct if6_iter_state {
3991 struct seq_net_private p;
3992 int bucket;
3993 int offset;
3994 };
3995
3996 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
3997 {
3998 struct inet6_ifaddr *ifa = NULL;
3999 struct if6_iter_state *state = seq->private;
4000 struct net *net = seq_file_net(seq);
4001 int p = 0;
4002
4003 /* initial bucket if pos is 0 */
4004 if (pos == 0) {
4005 state->bucket = 0;
4006 state->offset = 0;
4007 }
4008
4009 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4010 hlist_for_each_entry_rcu_bh(ifa, &inet6_addr_lst[state->bucket],
4011 addr_lst) {
4012 if (!net_eq(dev_net(ifa->idev->dev), net))
4013 continue;
4014 /* sync with offset */
4015 if (p < state->offset) {
4016 p++;
4017 continue;
4018 }
4019 state->offset++;
4020 return ifa;
4021 }
4022
4023 /* prepare for next bucket */
4024 state->offset = 0;
4025 p = 0;
4026 }
4027 return NULL;
4028 }
4029
4030 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4031 struct inet6_ifaddr *ifa)
4032 {
4033 struct if6_iter_state *state = seq->private;
4034 struct net *net = seq_file_net(seq);
4035
4036 hlist_for_each_entry_continue_rcu_bh(ifa, addr_lst) {
4037 if (!net_eq(dev_net(ifa->idev->dev), net))
4038 continue;
4039 state->offset++;
4040 return ifa;
4041 }
4042
4043 while (++state->bucket < IN6_ADDR_HSIZE) {
4044 state->offset = 0;
4045 hlist_for_each_entry_rcu_bh(ifa,
4046 &inet6_addr_lst[state->bucket], addr_lst) {
4047 if (!net_eq(dev_net(ifa->idev->dev), net))
4048 continue;
4049 state->offset++;
4050 return ifa;
4051 }
4052 }
4053
4054 return NULL;
4055 }
4056
4057 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4058 __acquires(rcu_bh)
4059 {
4060 rcu_read_lock_bh();
4061 return if6_get_first(seq, *pos);
4062 }
4063
4064 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4065 {
4066 struct inet6_ifaddr *ifa;
4067
4068 ifa = if6_get_next(seq, v);
4069 ++*pos;
4070 return ifa;
4071 }
4072
4073 static void if6_seq_stop(struct seq_file *seq, void *v)
4074 __releases(rcu_bh)
4075 {
4076 rcu_read_unlock_bh();
4077 }
4078
4079 static int if6_seq_show(struct seq_file *seq, void *v)
4080 {
4081 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4082 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4083 &ifp->addr,
4084 ifp->idev->dev->ifindex,
4085 ifp->prefix_len,
4086 ifp->scope,
4087 (u8) ifp->flags,
4088 ifp->idev->dev->name);
4089 return 0;
4090 }
4091
4092 static const struct seq_operations if6_seq_ops = {
4093 .start = if6_seq_start,
4094 .next = if6_seq_next,
4095 .show = if6_seq_show,
4096 .stop = if6_seq_stop,
4097 };
4098
4099 static int if6_seq_open(struct inode *inode, struct file *file)
4100 {
4101 return seq_open_net(inode, file, &if6_seq_ops,
4102 sizeof(struct if6_iter_state));
4103 }
4104
4105 static const struct file_operations if6_fops = {
4106 .owner = THIS_MODULE,
4107 .open = if6_seq_open,
4108 .read = seq_read,
4109 .llseek = seq_lseek,
4110 .release = seq_release_net,
4111 };
4112
4113 static int __net_init if6_proc_net_init(struct net *net)
4114 {
4115 if (!proc_create("if_inet6", S_IRUGO, net->proc_net, &if6_fops))
4116 return -ENOMEM;
4117 return 0;
4118 }
4119
4120 static void __net_exit if6_proc_net_exit(struct net *net)
4121 {
4122 remove_proc_entry("if_inet6", net->proc_net);
4123 }
4124
4125 static struct pernet_operations if6_proc_net_ops = {
4126 .init = if6_proc_net_init,
4127 .exit = if6_proc_net_exit,
4128 };
4129
4130 int __init if6_proc_init(void)
4131 {
4132 return register_pernet_subsys(&if6_proc_net_ops);
4133 }
4134
4135 void if6_proc_exit(void)
4136 {
4137 unregister_pernet_subsys(&if6_proc_net_ops);
4138 }
4139 #endif /* CONFIG_PROC_FS */
4140
4141 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4142 /* Check if address is a home address configured on any interface. */
4143 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4144 {
4145 int ret = 0;
4146 struct inet6_ifaddr *ifp = NULL;
4147 unsigned int hash = inet6_addr_hash(addr);
4148
4149 rcu_read_lock_bh();
4150 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[hash], addr_lst) {
4151 if (!net_eq(dev_net(ifp->idev->dev), net))
4152 continue;
4153 if (ipv6_addr_equal(&ifp->addr, addr) &&
4154 (ifp->flags & IFA_F_HOMEADDRESS)) {
4155 ret = 1;
4156 break;
4157 }
4158 }
4159 rcu_read_unlock_bh();
4160 return ret;
4161 }
4162 #endif
4163
4164 /*
4165 * Periodic address status verification
4166 */
4167
4168 static void addrconf_verify_rtnl(void)
4169 {
4170 unsigned long now, next, next_sec, next_sched;
4171 struct inet6_ifaddr *ifp;
4172 int i;
4173
4174 ASSERT_RTNL();
4175
4176 rcu_read_lock_bh();
4177 now = jiffies;
4178 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4179
4180 cancel_delayed_work(&addr_chk_work);
4181
4182 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4183 restart:
4184 hlist_for_each_entry_rcu_bh(ifp, &inet6_addr_lst[i], addr_lst) {
4185 unsigned long age;
4186
4187 /* When setting preferred_lft to a value not zero or
4188 * infinity, while valid_lft is infinity
4189 * IFA_F_PERMANENT has a non-infinity life time.
4190 */
4191 if ((ifp->flags & IFA_F_PERMANENT) &&
4192 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4193 continue;
4194
4195 spin_lock(&ifp->lock);
4196 /* We try to batch several events at once. */
4197 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4198
4199 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4200 age >= ifp->valid_lft) {
4201 spin_unlock(&ifp->lock);
4202 in6_ifa_hold(ifp);
4203 ipv6_del_addr(ifp);
4204 goto restart;
4205 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4206 spin_unlock(&ifp->lock);
4207 continue;
4208 } else if (age >= ifp->prefered_lft) {
4209 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4210 int deprecate = 0;
4211
4212 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4213 deprecate = 1;
4214 ifp->flags |= IFA_F_DEPRECATED;
4215 }
4216
4217 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4218 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4219 next = ifp->tstamp + ifp->valid_lft * HZ;
4220
4221 spin_unlock(&ifp->lock);
4222
4223 if (deprecate) {
4224 in6_ifa_hold(ifp);
4225
4226 ipv6_ifa_notify(0, ifp);
4227 in6_ifa_put(ifp);
4228 goto restart;
4229 }
4230 } else if ((ifp->flags&IFA_F_TEMPORARY) &&
4231 !(ifp->flags&IFA_F_TENTATIVE)) {
4232 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4233 ifp->idev->cnf.dad_transmits *
4234 NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME) / HZ;
4235
4236 if (age >= ifp->prefered_lft - regen_advance) {
4237 struct inet6_ifaddr *ifpub = ifp->ifpub;
4238 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4239 next = ifp->tstamp + ifp->prefered_lft * HZ;
4240 if (!ifp->regen_count && ifpub) {
4241 ifp->regen_count++;
4242 in6_ifa_hold(ifp);
4243 in6_ifa_hold(ifpub);
4244 spin_unlock(&ifp->lock);
4245
4246 spin_lock(&ifpub->lock);
4247 ifpub->regen_count = 0;
4248 spin_unlock(&ifpub->lock);
4249 ipv6_create_tempaddr(ifpub, ifp);
4250 in6_ifa_put(ifpub);
4251 in6_ifa_put(ifp);
4252 goto restart;
4253 }
4254 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4255 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4256 spin_unlock(&ifp->lock);
4257 } else {
4258 /* ifp->prefered_lft <= ifp->valid_lft */
4259 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4260 next = ifp->tstamp + ifp->prefered_lft * HZ;
4261 spin_unlock(&ifp->lock);
4262 }
4263 }
4264 }
4265
4266 next_sec = round_jiffies_up(next);
4267 next_sched = next;
4268
4269 /* If rounded timeout is accurate enough, accept it. */
4270 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4271 next_sched = next_sec;
4272
4273 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4274 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4275 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4276
4277 ADBG(KERN_DEBUG "now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4278 now, next, next_sec, next_sched);
4279 mod_delayed_work(addrconf_wq, &addr_chk_work, next_sched - now);
4280 rcu_read_unlock_bh();
4281 }
4282
4283 static void addrconf_verify_work(struct work_struct *w)
4284 {
4285 rtnl_lock();
4286 addrconf_verify_rtnl();
4287 rtnl_unlock();
4288 }
4289
4290 static void addrconf_verify(void)
4291 {
4292 mod_delayed_work(addrconf_wq, &addr_chk_work, 0);
4293 }
4294
4295 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4296 struct in6_addr **peer_pfx)
4297 {
4298 struct in6_addr *pfx = NULL;
4299
4300 *peer_pfx = NULL;
4301
4302 if (addr)
4303 pfx = nla_data(addr);
4304
4305 if (local) {
4306 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4307 *peer_pfx = pfx;
4308 pfx = nla_data(local);
4309 }
4310
4311 return pfx;
4312 }
4313
4314 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4315 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4316 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4317 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4318 [IFA_FLAGS] = { .len = sizeof(u32) },
4319 };
4320
4321 static int
4322 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh)
4323 {
4324 struct net *net = sock_net(skb->sk);
4325 struct ifaddrmsg *ifm;
4326 struct nlattr *tb[IFA_MAX+1];
4327 struct in6_addr *pfx, *peer_pfx;
4328 u32 ifa_flags;
4329 int err;
4330
4331 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
4332 if (err < 0)
4333 return err;
4334
4335 ifm = nlmsg_data(nlh);
4336 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4337 if (!pfx)
4338 return -EINVAL;
4339
4340 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4341
4342 /* We ignore other flags so far. */
4343 ifa_flags &= IFA_F_MANAGETEMPADDR;
4344
4345 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4346 ifm->ifa_prefixlen);
4347 }
4348
4349 static int inet6_addr_modify(struct inet6_ifaddr *ifp, u32 ifa_flags,
4350 u32 prefered_lft, u32 valid_lft)
4351 {
4352 u32 flags;
4353 clock_t expires;
4354 unsigned long timeout;
4355 bool was_managetempaddr;
4356 bool had_prefixroute;
4357
4358 ASSERT_RTNL();
4359
4360 if (!valid_lft || (prefered_lft > valid_lft))
4361 return -EINVAL;
4362
4363 if (ifa_flags & IFA_F_MANAGETEMPADDR &&
4364 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4365 return -EINVAL;
4366
4367 timeout = addrconf_timeout_fixup(valid_lft, HZ);
4368 if (addrconf_finite_timeout(timeout)) {
4369 expires = jiffies_to_clock_t(timeout * HZ);
4370 valid_lft = timeout;
4371 flags = RTF_EXPIRES;
4372 } else {
4373 expires = 0;
4374 flags = 0;
4375 ifa_flags |= IFA_F_PERMANENT;
4376 }
4377
4378 timeout = addrconf_timeout_fixup(prefered_lft, HZ);
4379 if (addrconf_finite_timeout(timeout)) {
4380 if (timeout == 0)
4381 ifa_flags |= IFA_F_DEPRECATED;
4382 prefered_lft = timeout;
4383 }
4384
4385 spin_lock_bh(&ifp->lock);
4386 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4387 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4388 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4389 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4390 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4391 IFA_F_NOPREFIXROUTE);
4392 ifp->flags |= ifa_flags;
4393 ifp->tstamp = jiffies;
4394 ifp->valid_lft = valid_lft;
4395 ifp->prefered_lft = prefered_lft;
4396
4397 spin_unlock_bh(&ifp->lock);
4398 if (!(ifp->flags&IFA_F_TENTATIVE))
4399 ipv6_ifa_notify(0, ifp);
4400
4401 if (!(ifa_flags & IFA_F_NOPREFIXROUTE)) {
4402 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, ifp->idev->dev,
4403 expires, flags);
4404 } else if (had_prefixroute) {
4405 enum cleanup_prefix_rt_t action;
4406 unsigned long rt_expires;
4407
4408 write_lock_bh(&ifp->idev->lock);
4409 action = check_cleanup_prefix_route(ifp, &rt_expires);
4410 write_unlock_bh(&ifp->idev->lock);
4411
4412 if (action != CLEANUP_PREFIX_RT_NOP) {
4413 cleanup_prefix_route(ifp, rt_expires,
4414 action == CLEANUP_PREFIX_RT_DEL);
4415 }
4416 }
4417
4418 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4419 if (was_managetempaddr && !(ifp->flags & IFA_F_MANAGETEMPADDR))
4420 valid_lft = prefered_lft = 0;
4421 manage_tempaddrs(ifp->idev, ifp, valid_lft, prefered_lft,
4422 !was_managetempaddr, jiffies);
4423 }
4424
4425 addrconf_verify_rtnl();
4426
4427 return 0;
4428 }
4429
4430 static int
4431 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
4432 {
4433 struct net *net = sock_net(skb->sk);
4434 struct ifaddrmsg *ifm;
4435 struct nlattr *tb[IFA_MAX+1];
4436 struct in6_addr *pfx, *peer_pfx;
4437 struct inet6_ifaddr *ifa;
4438 struct net_device *dev;
4439 u32 valid_lft = INFINITY_LIFE_TIME, preferred_lft = INFINITY_LIFE_TIME;
4440 u32 ifa_flags;
4441 int err;
4442
4443 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
4444 if (err < 0)
4445 return err;
4446
4447 ifm = nlmsg_data(nlh);
4448 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4449 if (!pfx)
4450 return -EINVAL;
4451
4452 if (tb[IFA_CACHEINFO]) {
4453 struct ifa_cacheinfo *ci;
4454
4455 ci = nla_data(tb[IFA_CACHEINFO]);
4456 valid_lft = ci->ifa_valid;
4457 preferred_lft = ci->ifa_prefered;
4458 } else {
4459 preferred_lft = INFINITY_LIFE_TIME;
4460 valid_lft = INFINITY_LIFE_TIME;
4461 }
4462
4463 dev = __dev_get_by_index(net, ifm->ifa_index);
4464 if (!dev)
4465 return -ENODEV;
4466
4467 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4468
4469 /* We ignore other flags so far. */
4470 ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4471 IFA_F_NOPREFIXROUTE | IFA_F_MCAUTOJOIN;
4472
4473 ifa = ipv6_get_ifaddr(net, pfx, dev, 1);
4474 if (!ifa) {
4475 /*
4476 * It would be best to check for !NLM_F_CREATE here but
4477 * userspace already relies on not having to provide this.
4478 */
4479 return inet6_addr_add(net, ifm->ifa_index, pfx, peer_pfx,
4480 ifm->ifa_prefixlen, ifa_flags,
4481 preferred_lft, valid_lft);
4482 }
4483
4484 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4485 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4486 err = -EEXIST;
4487 else
4488 err = inet6_addr_modify(ifa, ifa_flags, preferred_lft, valid_lft);
4489
4490 in6_ifa_put(ifa);
4491
4492 return err;
4493 }
4494
4495 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4496 u8 scope, int ifindex)
4497 {
4498 struct ifaddrmsg *ifm;
4499
4500 ifm = nlmsg_data(nlh);
4501 ifm->ifa_family = AF_INET6;
4502 ifm->ifa_prefixlen = prefixlen;
4503 ifm->ifa_flags = flags;
4504 ifm->ifa_scope = scope;
4505 ifm->ifa_index = ifindex;
4506 }
4507
4508 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4509 unsigned long tstamp, u32 preferred, u32 valid)
4510 {
4511 struct ifa_cacheinfo ci;
4512
4513 ci.cstamp = cstamp_delta(cstamp);
4514 ci.tstamp = cstamp_delta(tstamp);
4515 ci.ifa_prefered = preferred;
4516 ci.ifa_valid = valid;
4517
4518 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4519 }
4520
4521 static inline int rt_scope(int ifa_scope)
4522 {
4523 if (ifa_scope & IFA_HOST)
4524 return RT_SCOPE_HOST;
4525 else if (ifa_scope & IFA_LINK)
4526 return RT_SCOPE_LINK;
4527 else if (ifa_scope & IFA_SITE)
4528 return RT_SCOPE_SITE;
4529 else
4530 return RT_SCOPE_UNIVERSE;
4531 }
4532
4533 static inline int inet6_ifaddr_msgsize(void)
4534 {
4535 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4536 + nla_total_size(16) /* IFA_LOCAL */
4537 + nla_total_size(16) /* IFA_ADDRESS */
4538 + nla_total_size(sizeof(struct ifa_cacheinfo))
4539 + nla_total_size(4) /* IFA_FLAGS */;
4540 }
4541
4542 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
4543 u32 portid, u32 seq, int event, unsigned int flags)
4544 {
4545 struct nlmsghdr *nlh;
4546 u32 preferred, valid;
4547
4548 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4549 if (!nlh)
4550 return -EMSGSIZE;
4551
4552 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
4553 ifa->idev->dev->ifindex);
4554
4555 if (!((ifa->flags&IFA_F_PERMANENT) &&
4556 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
4557 preferred = ifa->prefered_lft;
4558 valid = ifa->valid_lft;
4559 if (preferred != INFINITY_LIFE_TIME) {
4560 long tval = (jiffies - ifa->tstamp)/HZ;
4561 if (preferred > tval)
4562 preferred -= tval;
4563 else
4564 preferred = 0;
4565 if (valid != INFINITY_LIFE_TIME) {
4566 if (valid > tval)
4567 valid -= tval;
4568 else
4569 valid = 0;
4570 }
4571 }
4572 } else {
4573 preferred = INFINITY_LIFE_TIME;
4574 valid = INFINITY_LIFE_TIME;
4575 }
4576
4577 if (!ipv6_addr_any(&ifa->peer_addr)) {
4578 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
4579 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
4580 goto error;
4581 } else
4582 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
4583 goto error;
4584
4585 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
4586 goto error;
4587
4588 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
4589 goto error;
4590
4591 nlmsg_end(skb, nlh);
4592 return 0;
4593
4594 error:
4595 nlmsg_cancel(skb, nlh);
4596 return -EMSGSIZE;
4597 }
4598
4599 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
4600 u32 portid, u32 seq, int event, u16 flags)
4601 {
4602 struct nlmsghdr *nlh;
4603 u8 scope = RT_SCOPE_UNIVERSE;
4604 int ifindex = ifmca->idev->dev->ifindex;
4605
4606 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
4607 scope = RT_SCOPE_SITE;
4608
4609 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4610 if (!nlh)
4611 return -EMSGSIZE;
4612
4613 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4614 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
4615 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
4616 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4617 nlmsg_cancel(skb, nlh);
4618 return -EMSGSIZE;
4619 }
4620
4621 nlmsg_end(skb, nlh);
4622 return 0;
4623 }
4624
4625 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
4626 u32 portid, u32 seq, int event, unsigned int flags)
4627 {
4628 struct nlmsghdr *nlh;
4629 u8 scope = RT_SCOPE_UNIVERSE;
4630 int ifindex = ifaca->aca_idev->dev->ifindex;
4631
4632 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
4633 scope = RT_SCOPE_SITE;
4634
4635 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct ifaddrmsg), flags);
4636 if (!nlh)
4637 return -EMSGSIZE;
4638
4639 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
4640 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
4641 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
4642 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
4643 nlmsg_cancel(skb, nlh);
4644 return -EMSGSIZE;
4645 }
4646
4647 nlmsg_end(skb, nlh);
4648 return 0;
4649 }
4650
4651 enum addr_type_t {
4652 UNICAST_ADDR,
4653 MULTICAST_ADDR,
4654 ANYCAST_ADDR,
4655 };
4656
4657 /* called with rcu_read_lock() */
4658 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
4659 struct netlink_callback *cb, enum addr_type_t type,
4660 int s_ip_idx, int *p_ip_idx)
4661 {
4662 struct ifmcaddr6 *ifmca;
4663 struct ifacaddr6 *ifaca;
4664 int err = 1;
4665 int ip_idx = *p_ip_idx;
4666
4667 read_lock_bh(&idev->lock);
4668 switch (type) {
4669 case UNICAST_ADDR: {
4670 struct inet6_ifaddr *ifa;
4671
4672 /* unicast address incl. temp addr */
4673 list_for_each_entry(ifa, &idev->addr_list, if_list) {
4674 if (++ip_idx < s_ip_idx)
4675 continue;
4676 err = inet6_fill_ifaddr(skb, ifa,
4677 NETLINK_CB(cb->skb).portid,
4678 cb->nlh->nlmsg_seq,
4679 RTM_NEWADDR,
4680 NLM_F_MULTI);
4681 if (err < 0)
4682 break;
4683 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
4684 }
4685 break;
4686 }
4687 case MULTICAST_ADDR:
4688 /* multicast address */
4689 for (ifmca = idev->mc_list; ifmca;
4690 ifmca = ifmca->next, ip_idx++) {
4691 if (ip_idx < s_ip_idx)
4692 continue;
4693 err = inet6_fill_ifmcaddr(skb, ifmca,
4694 NETLINK_CB(cb->skb).portid,
4695 cb->nlh->nlmsg_seq,
4696 RTM_GETMULTICAST,
4697 NLM_F_MULTI);
4698 if (err < 0)
4699 break;
4700 }
4701 break;
4702 case ANYCAST_ADDR:
4703 /* anycast address */
4704 for (ifaca = idev->ac_list; ifaca;
4705 ifaca = ifaca->aca_next, ip_idx++) {
4706 if (ip_idx < s_ip_idx)
4707 continue;
4708 err = inet6_fill_ifacaddr(skb, ifaca,
4709 NETLINK_CB(cb->skb).portid,
4710 cb->nlh->nlmsg_seq,
4711 RTM_GETANYCAST,
4712 NLM_F_MULTI);
4713 if (err < 0)
4714 break;
4715 }
4716 break;
4717 default:
4718 break;
4719 }
4720 read_unlock_bh(&idev->lock);
4721 *p_ip_idx = ip_idx;
4722 return err;
4723 }
4724
4725 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
4726 enum addr_type_t type)
4727 {
4728 struct net *net = sock_net(skb->sk);
4729 int h, s_h;
4730 int idx, ip_idx;
4731 int s_idx, s_ip_idx;
4732 struct net_device *dev;
4733 struct inet6_dev *idev;
4734 struct hlist_head *head;
4735
4736 s_h = cb->args[0];
4737 s_idx = idx = cb->args[1];
4738 s_ip_idx = ip_idx = cb->args[2];
4739
4740 rcu_read_lock();
4741 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
4742 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
4743 idx = 0;
4744 head = &net->dev_index_head[h];
4745 hlist_for_each_entry_rcu(dev, head, index_hlist) {
4746 if (idx < s_idx)
4747 goto cont;
4748 if (h > s_h || idx > s_idx)
4749 s_ip_idx = 0;
4750 ip_idx = 0;
4751 idev = __in6_dev_get(dev);
4752 if (!idev)
4753 goto cont;
4754
4755 if (in6_dump_addrs(idev, skb, cb, type,
4756 s_ip_idx, &ip_idx) < 0)
4757 goto done;
4758 cont:
4759 idx++;
4760 }
4761 }
4762 done:
4763 rcu_read_unlock();
4764 cb->args[0] = h;
4765 cb->args[1] = idx;
4766 cb->args[2] = ip_idx;
4767
4768 return skb->len;
4769 }
4770
4771 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
4772 {
4773 enum addr_type_t type = UNICAST_ADDR;
4774
4775 return inet6_dump_addr(skb, cb, type);
4776 }
4777
4778 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
4779 {
4780 enum addr_type_t type = MULTICAST_ADDR;
4781
4782 return inet6_dump_addr(skb, cb, type);
4783 }
4784
4785
4786 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
4787 {
4788 enum addr_type_t type = ANYCAST_ADDR;
4789
4790 return inet6_dump_addr(skb, cb, type);
4791 }
4792
4793 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh)
4794 {
4795 struct net *net = sock_net(in_skb->sk);
4796 struct ifaddrmsg *ifm;
4797 struct nlattr *tb[IFA_MAX+1];
4798 struct in6_addr *addr = NULL, *peer;
4799 struct net_device *dev = NULL;
4800 struct inet6_ifaddr *ifa;
4801 struct sk_buff *skb;
4802 int err;
4803
4804 err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, ifa_ipv6_policy);
4805 if (err < 0)
4806 goto errout;
4807
4808 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
4809 if (!addr) {
4810 err = -EINVAL;
4811 goto errout;
4812 }
4813
4814 ifm = nlmsg_data(nlh);
4815 if (ifm->ifa_index)
4816 dev = __dev_get_by_index(net, ifm->ifa_index);
4817
4818 ifa = ipv6_get_ifaddr(net, addr, dev, 1);
4819 if (!ifa) {
4820 err = -EADDRNOTAVAIL;
4821 goto errout;
4822 }
4823
4824 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
4825 if (!skb) {
4826 err = -ENOBUFS;
4827 goto errout_ifa;
4828 }
4829
4830 err = inet6_fill_ifaddr(skb, ifa, NETLINK_CB(in_skb).portid,
4831 nlh->nlmsg_seq, RTM_NEWADDR, 0);
4832 if (err < 0) {
4833 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4834 WARN_ON(err == -EMSGSIZE);
4835 kfree_skb(skb);
4836 goto errout_ifa;
4837 }
4838 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4839 errout_ifa:
4840 in6_ifa_put(ifa);
4841 errout:
4842 return err;
4843 }
4844
4845 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
4846 {
4847 struct sk_buff *skb;
4848 struct net *net = dev_net(ifa->idev->dev);
4849 int err = -ENOBUFS;
4850
4851 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
4852 if (!skb)
4853 goto errout;
4854
4855 err = inet6_fill_ifaddr(skb, ifa, 0, 0, event, 0);
4856 if (err < 0) {
4857 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
4858 WARN_ON(err == -EMSGSIZE);
4859 kfree_skb(skb);
4860 goto errout;
4861 }
4862 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
4863 return;
4864 errout:
4865 if (err < 0)
4866 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
4867 }
4868
4869 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
4870 __s32 *array, int bytes)
4871 {
4872 BUG_ON(bytes < (DEVCONF_MAX * 4));
4873
4874 memset(array, 0, bytes);
4875 array[DEVCONF_FORWARDING] = cnf->forwarding;
4876 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
4877 array[DEVCONF_MTU6] = cnf->mtu6;
4878 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
4879 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
4880 array[DEVCONF_AUTOCONF] = cnf->autoconf;
4881 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
4882 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
4883 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
4884 jiffies_to_msecs(cnf->rtr_solicit_interval);
4885 array[DEVCONF_RTR_SOLICIT_DELAY] =
4886 jiffies_to_msecs(cnf->rtr_solicit_delay);
4887 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
4888 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
4889 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
4890 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
4891 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
4892 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
4893 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
4894 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
4895 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
4896 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
4897 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
4898 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
4899 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
4900 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
4901 #ifdef CONFIG_IPV6_ROUTER_PREF
4902 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
4903 array[DEVCONF_RTR_PROBE_INTERVAL] =
4904 jiffies_to_msecs(cnf->rtr_probe_interval);
4905 #ifdef CONFIG_IPV6_ROUTE_INFO
4906 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
4907 #endif
4908 #endif
4909 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
4910 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
4911 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
4912 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
4913 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
4914 #endif
4915 #ifdef CONFIG_IPV6_MROUTE
4916 array[DEVCONF_MC_FORWARDING] = cnf->mc_forwarding;
4917 #endif
4918 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
4919 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
4920 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
4921 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
4922 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
4923 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
4924 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
4925 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
4926 /* we omit DEVCONF_STABLE_SECRET for now */
4927 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
4928 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
4929 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
4930 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
4931 }
4932
4933 static inline size_t inet6_ifla6_size(void)
4934 {
4935 return nla_total_size(4) /* IFLA_INET6_FLAGS */
4936 + nla_total_size(sizeof(struct ifla_cacheinfo))
4937 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
4938 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
4939 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
4940 + nla_total_size(sizeof(struct in6_addr)); /* IFLA_INET6_TOKEN */
4941 }
4942
4943 static inline size_t inet6_if_nlmsg_size(void)
4944 {
4945 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
4946 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
4947 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
4948 + nla_total_size(4) /* IFLA_MTU */
4949 + nla_total_size(4) /* IFLA_LINK */
4950 + nla_total_size(1) /* IFLA_OPERSTATE */
4951 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
4952 }
4953
4954 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
4955 int items, int bytes)
4956 {
4957 int i;
4958 int pad = bytes - sizeof(u64) * items;
4959 BUG_ON(pad < 0);
4960
4961 /* Use put_unaligned() because stats may not be aligned for u64. */
4962 put_unaligned(items, &stats[0]);
4963 for (i = 1; i < items; i++)
4964 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
4965
4966 memset(&stats[items], 0, pad);
4967 }
4968
4969 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
4970 int bytes, size_t syncpoff)
4971 {
4972 int i, c;
4973 u64 buff[IPSTATS_MIB_MAX];
4974 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
4975
4976 BUG_ON(pad < 0);
4977
4978 memset(buff, 0, sizeof(buff));
4979 buff[0] = IPSTATS_MIB_MAX;
4980
4981 for_each_possible_cpu(c) {
4982 for (i = 1; i < IPSTATS_MIB_MAX; i++)
4983 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
4984 }
4985
4986 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
4987 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
4988 }
4989
4990 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
4991 int bytes)
4992 {
4993 switch (attrtype) {
4994 case IFLA_INET6_STATS:
4995 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
4996 offsetof(struct ipstats_mib, syncp));
4997 break;
4998 case IFLA_INET6_ICMP6STATS:
4999 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, ICMP6_MIB_MAX, bytes);
5000 break;
5001 }
5002 }
5003
5004 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5005 u32 ext_filter_mask)
5006 {
5007 struct nlattr *nla;
5008 struct ifla_cacheinfo ci;
5009
5010 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5011 goto nla_put_failure;
5012 ci.max_reasm_len = IPV6_MAXPLEN;
5013 ci.tstamp = cstamp_delta(idev->tstamp);
5014 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5015 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5016 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5017 goto nla_put_failure;
5018 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5019 if (!nla)
5020 goto nla_put_failure;
5021 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5022
5023 /* XXX - MC not implemented */
5024
5025 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5026 return 0;
5027
5028 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5029 if (!nla)
5030 goto nla_put_failure;
5031 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5032
5033 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5034 if (!nla)
5035 goto nla_put_failure;
5036 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5037
5038 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5039 if (!nla)
5040 goto nla_put_failure;
5041
5042 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
5043 goto nla_put_failure;
5044
5045 read_lock_bh(&idev->lock);
5046 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5047 read_unlock_bh(&idev->lock);
5048
5049 return 0;
5050
5051 nla_put_failure:
5052 return -EMSGSIZE;
5053 }
5054
5055 static size_t inet6_get_link_af_size(const struct net_device *dev,
5056 u32 ext_filter_mask)
5057 {
5058 if (!__in6_dev_get(dev))
5059 return 0;
5060
5061 return inet6_ifla6_size();
5062 }
5063
5064 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5065 u32 ext_filter_mask)
5066 {
5067 struct inet6_dev *idev = __in6_dev_get(dev);
5068
5069 if (!idev)
5070 return -ENODATA;
5071
5072 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5073 return -EMSGSIZE;
5074
5075 return 0;
5076 }
5077
5078 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
5079 {
5080 struct inet6_ifaddr *ifp;
5081 struct net_device *dev = idev->dev;
5082 bool clear_token, update_rs = false;
5083 struct in6_addr ll_addr;
5084
5085 ASSERT_RTNL();
5086
5087 if (!token)
5088 return -EINVAL;
5089 if (dev->flags & (IFF_LOOPBACK | IFF_NOARP))
5090 return -EINVAL;
5091 if (!ipv6_accept_ra(idev))
5092 return -EINVAL;
5093 if (idev->cnf.rtr_solicits <= 0)
5094 return -EINVAL;
5095
5096 write_lock_bh(&idev->lock);
5097
5098 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5099 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5100
5101 write_unlock_bh(&idev->lock);
5102
5103 clear_token = ipv6_addr_any(token);
5104 if (clear_token)
5105 goto update_lft;
5106
5107 if (!idev->dead && (idev->if_flags & IF_READY) &&
5108 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5109 IFA_F_OPTIMISTIC)) {
5110 /* If we're not ready, then normal ifup will take care
5111 * of this. Otherwise, we need to request our rs here.
5112 */
5113 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5114 update_rs = true;
5115 }
5116
5117 update_lft:
5118 write_lock_bh(&idev->lock);
5119
5120 if (update_rs) {
5121 idev->if_flags |= IF_RS_SENT;
5122 idev->rs_probes = 1;
5123 addrconf_mod_rs_timer(idev, idev->cnf.rtr_solicit_interval);
5124 }
5125
5126 /* Well, that's kinda nasty ... */
5127 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5128 spin_lock(&ifp->lock);
5129 if (ifp->tokenized) {
5130 ifp->valid_lft = 0;
5131 ifp->prefered_lft = 0;
5132 }
5133 spin_unlock(&ifp->lock);
5134 }
5135
5136 write_unlock_bh(&idev->lock);
5137 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5138 addrconf_verify_rtnl();
5139 return 0;
5140 }
5141
5142 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5143 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5144 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5145 };
5146
5147 static int inet6_validate_link_af(const struct net_device *dev,
5148 const struct nlattr *nla)
5149 {
5150 struct nlattr *tb[IFLA_INET6_MAX + 1];
5151
5152 if (dev && !__in6_dev_get(dev))
5153 return -EAFNOSUPPORT;
5154
5155 return nla_parse_nested(tb, IFLA_INET6_MAX, nla, inet6_af_policy);
5156 }
5157
5158 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
5159 {
5160 int err = -EINVAL;
5161 struct inet6_dev *idev = __in6_dev_get(dev);
5162 struct nlattr *tb[IFLA_INET6_MAX + 1];
5163
5164 if (!idev)
5165 return -EAFNOSUPPORT;
5166
5167 if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL) < 0)
5168 BUG();
5169
5170 if (tb[IFLA_INET6_TOKEN]) {
5171 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
5172 if (err)
5173 return err;
5174 }
5175
5176 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5177 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5178
5179 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5180 mode != IN6_ADDR_GEN_MODE_NONE &&
5181 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5182 mode != IN6_ADDR_GEN_MODE_RANDOM)
5183 return -EINVAL;
5184
5185 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5186 !idev->cnf.stable_secret.initialized &&
5187 !dev_net(dev)->ipv6.devconf_dflt->stable_secret.initialized)
5188 return -EINVAL;
5189
5190 idev->addr_gen_mode = mode;
5191 err = 0;
5192 }
5193
5194 return err;
5195 }
5196
5197 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5198 u32 portid, u32 seq, int event, unsigned int flags)
5199 {
5200 struct net_device *dev = idev->dev;
5201 struct ifinfomsg *hdr;
5202 struct nlmsghdr *nlh;
5203 void *protoinfo;
5204
5205 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5206 if (!nlh)
5207 return -EMSGSIZE;
5208
5209 hdr = nlmsg_data(nlh);
5210 hdr->ifi_family = AF_INET6;
5211 hdr->__ifi_pad = 0;
5212 hdr->ifi_type = dev->type;
5213 hdr->ifi_index = dev->ifindex;
5214 hdr->ifi_flags = dev_get_flags(dev);
5215 hdr->ifi_change = 0;
5216
5217 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5218 (dev->addr_len &&
5219 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5220 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5221 (dev->ifindex != dev_get_iflink(dev) &&
5222 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5223 nla_put_u8(skb, IFLA_OPERSTATE,
5224 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5225 goto nla_put_failure;
5226 protoinfo = nla_nest_start(skb, IFLA_PROTINFO);
5227 if (!protoinfo)
5228 goto nla_put_failure;
5229
5230 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5231 goto nla_put_failure;
5232
5233 nla_nest_end(skb, protoinfo);
5234 nlmsg_end(skb, nlh);
5235 return 0;
5236
5237 nla_put_failure:
5238 nlmsg_cancel(skb, nlh);
5239 return -EMSGSIZE;
5240 }
5241
5242 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5243 {
5244 struct net *net = sock_net(skb->sk);
5245 int h, s_h;
5246 int idx = 0, s_idx;
5247 struct net_device *dev;
5248 struct inet6_dev *idev;
5249 struct hlist_head *head;
5250
5251 s_h = cb->args[0];
5252 s_idx = cb->args[1];
5253
5254 rcu_read_lock();
5255 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5256 idx = 0;
5257 head = &net->dev_index_head[h];
5258 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5259 if (idx < s_idx)
5260 goto cont;
5261 idev = __in6_dev_get(dev);
5262 if (!idev)
5263 goto cont;
5264 if (inet6_fill_ifinfo(skb, idev,
5265 NETLINK_CB(cb->skb).portid,
5266 cb->nlh->nlmsg_seq,
5267 RTM_NEWLINK, NLM_F_MULTI) < 0)
5268 goto out;
5269 cont:
5270 idx++;
5271 }
5272 }
5273 out:
5274 rcu_read_unlock();
5275 cb->args[1] = idx;
5276 cb->args[0] = h;
5277
5278 return skb->len;
5279 }
5280
5281 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
5282 {
5283 struct sk_buff *skb;
5284 struct net *net = dev_net(idev->dev);
5285 int err = -ENOBUFS;
5286
5287 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
5288 if (!skb)
5289 goto errout;
5290
5291 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
5292 if (err < 0) {
5293 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
5294 WARN_ON(err == -EMSGSIZE);
5295 kfree_skb(skb);
5296 goto errout;
5297 }
5298 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
5299 return;
5300 errout:
5301 if (err < 0)
5302 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
5303 }
5304
5305 static inline size_t inet6_prefix_nlmsg_size(void)
5306 {
5307 return NLMSG_ALIGN(sizeof(struct prefixmsg))
5308 + nla_total_size(sizeof(struct in6_addr))
5309 + nla_total_size(sizeof(struct prefix_cacheinfo));
5310 }
5311
5312 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
5313 struct prefix_info *pinfo, u32 portid, u32 seq,
5314 int event, unsigned int flags)
5315 {
5316 struct prefixmsg *pmsg;
5317 struct nlmsghdr *nlh;
5318 struct prefix_cacheinfo ci;
5319
5320 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
5321 if (!nlh)
5322 return -EMSGSIZE;
5323
5324 pmsg = nlmsg_data(nlh);
5325 pmsg->prefix_family = AF_INET6;
5326 pmsg->prefix_pad1 = 0;
5327 pmsg->prefix_pad2 = 0;
5328 pmsg->prefix_ifindex = idev->dev->ifindex;
5329 pmsg->prefix_len = pinfo->prefix_len;
5330 pmsg->prefix_type = pinfo->type;
5331 pmsg->prefix_pad3 = 0;
5332 pmsg->prefix_flags = 0;
5333 if (pinfo->onlink)
5334 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
5335 if (pinfo->autoconf)
5336 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
5337
5338 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
5339 goto nla_put_failure;
5340 ci.preferred_time = ntohl(pinfo->prefered);
5341 ci.valid_time = ntohl(pinfo->valid);
5342 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
5343 goto nla_put_failure;
5344 nlmsg_end(skb, nlh);
5345 return 0;
5346
5347 nla_put_failure:
5348 nlmsg_cancel(skb, nlh);
5349 return -EMSGSIZE;
5350 }
5351
5352 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
5353 struct prefix_info *pinfo)
5354 {
5355 struct sk_buff *skb;
5356 struct net *net = dev_net(idev->dev);
5357 int err = -ENOBUFS;
5358
5359 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
5360 if (!skb)
5361 goto errout;
5362
5363 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
5364 if (err < 0) {
5365 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
5366 WARN_ON(err == -EMSGSIZE);
5367 kfree_skb(skb);
5368 goto errout;
5369 }
5370 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
5371 return;
5372 errout:
5373 if (err < 0)
5374 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
5375 }
5376
5377 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5378 {
5379 struct net *net = dev_net(ifp->idev->dev);
5380
5381 if (event)
5382 ASSERT_RTNL();
5383
5384 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
5385
5386 switch (event) {
5387 case RTM_NEWADDR:
5388 /*
5389 * If the address was optimistic
5390 * we inserted the route at the start of
5391 * our DAD process, so we don't need
5392 * to do it again
5393 */
5394 if (!(ifp->rt->rt6i_node))
5395 ip6_ins_rt(ifp->rt);
5396 if (ifp->idev->cnf.forwarding)
5397 addrconf_join_anycast(ifp);
5398 if (!ipv6_addr_any(&ifp->peer_addr))
5399 addrconf_prefix_route(&ifp->peer_addr, 128,
5400 ifp->idev->dev, 0, 0);
5401 break;
5402 case RTM_DELADDR:
5403 if (ifp->idev->cnf.forwarding)
5404 addrconf_leave_anycast(ifp);
5405 addrconf_leave_solict(ifp->idev, &ifp->addr);
5406 if (!ipv6_addr_any(&ifp->peer_addr)) {
5407 struct rt6_info *rt;
5408
5409 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
5410 ifp->idev->dev, 0, 0);
5411 if (rt)
5412 ip6_del_rt(rt);
5413 }
5414 if (ifp->rt) {
5415 dst_hold(&ifp->rt->dst);
5416 ip6_del_rt(ifp->rt);
5417 }
5418 rt_genid_bump_ipv6(net);
5419 break;
5420 }
5421 atomic_inc(&net->ipv6.dev_addr_genid);
5422 }
5423
5424 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
5425 {
5426 rcu_read_lock_bh();
5427 if (likely(ifp->idev->dead == 0))
5428 __ipv6_ifa_notify(event, ifp);
5429 rcu_read_unlock_bh();
5430 }
5431
5432 #ifdef CONFIG_SYSCTL
5433
5434 static
5435 int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
5436 void __user *buffer, size_t *lenp, loff_t *ppos)
5437 {
5438 int *valp = ctl->data;
5439 int val = *valp;
5440 loff_t pos = *ppos;
5441 struct ctl_table lctl;
5442 int ret;
5443
5444 /*
5445 * ctl->data points to idev->cnf.forwarding, we should
5446 * not modify it until we get the rtnl lock.
5447 */
5448 lctl = *ctl;
5449 lctl.data = &val;
5450
5451 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5452
5453 if (write)
5454 ret = addrconf_fixup_forwarding(ctl, valp, val);
5455 if (ret)
5456 *ppos = pos;
5457 return ret;
5458 }
5459
5460 static
5461 int addrconf_sysctl_hop_limit(struct ctl_table *ctl, int write,
5462 void __user *buffer, size_t *lenp, loff_t *ppos)
5463 {
5464 struct ctl_table lctl;
5465 int min_hl = 1, max_hl = 255;
5466
5467 lctl = *ctl;
5468 lctl.extra1 = &min_hl;
5469 lctl.extra2 = &max_hl;
5470
5471 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5472 }
5473
5474 static
5475 int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
5476 void __user *buffer, size_t *lenp, loff_t *ppos)
5477 {
5478 struct inet6_dev *idev = ctl->extra1;
5479 int min_mtu = IPV6_MIN_MTU;
5480 struct ctl_table lctl;
5481
5482 lctl = *ctl;
5483 lctl.extra1 = &min_mtu;
5484 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
5485
5486 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
5487 }
5488
5489 static void dev_disable_change(struct inet6_dev *idev)
5490 {
5491 struct netdev_notifier_info info;
5492
5493 if (!idev || !idev->dev)
5494 return;
5495
5496 netdev_notifier_info_init(&info, idev->dev);
5497 if (idev->cnf.disable_ipv6)
5498 addrconf_notify(NULL, NETDEV_DOWN, &info);
5499 else
5500 addrconf_notify(NULL, NETDEV_UP, &info);
5501 }
5502
5503 static void addrconf_disable_change(struct net *net, __s32 newf)
5504 {
5505 struct net_device *dev;
5506 struct inet6_dev *idev;
5507
5508 rcu_read_lock();
5509 for_each_netdev_rcu(net, dev) {
5510 idev = __in6_dev_get(dev);
5511 if (idev) {
5512 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
5513 idev->cnf.disable_ipv6 = newf;
5514 if (changed)
5515 dev_disable_change(idev);
5516 }
5517 }
5518 rcu_read_unlock();
5519 }
5520
5521 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
5522 {
5523 struct net *net;
5524 int old;
5525
5526 if (!rtnl_trylock())
5527 return restart_syscall();
5528
5529 net = (struct net *)table->extra2;
5530 old = *p;
5531 *p = newf;
5532
5533 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
5534 rtnl_unlock();
5535 return 0;
5536 }
5537
5538 if (p == &net->ipv6.devconf_all->disable_ipv6) {
5539 net->ipv6.devconf_dflt->disable_ipv6 = newf;
5540 addrconf_disable_change(net, newf);
5541 } else if ((!newf) ^ (!old))
5542 dev_disable_change((struct inet6_dev *)table->extra1);
5543
5544 rtnl_unlock();
5545 return 0;
5546 }
5547
5548 static
5549 int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
5550 void __user *buffer, size_t *lenp, loff_t *ppos)
5551 {
5552 int *valp = ctl->data;
5553 int val = *valp;
5554 loff_t pos = *ppos;
5555 struct ctl_table lctl;
5556 int ret;
5557
5558 /*
5559 * ctl->data points to idev->cnf.disable_ipv6, we should
5560 * not modify it until we get the rtnl lock.
5561 */
5562 lctl = *ctl;
5563 lctl.data = &val;
5564
5565 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5566
5567 if (write)
5568 ret = addrconf_disable_ipv6(ctl, valp, val);
5569 if (ret)
5570 *ppos = pos;
5571 return ret;
5572 }
5573
5574 static
5575 int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
5576 void __user *buffer, size_t *lenp, loff_t *ppos)
5577 {
5578 int *valp = ctl->data;
5579 int ret;
5580 int old, new;
5581
5582 old = *valp;
5583 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
5584 new = *valp;
5585
5586 if (write && old != new) {
5587 struct net *net = ctl->extra2;
5588
5589 if (!rtnl_trylock())
5590 return restart_syscall();
5591
5592 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
5593 inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
5594 NETCONFA_IFINDEX_DEFAULT,
5595 net->ipv6.devconf_dflt);
5596 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
5597 inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
5598 NETCONFA_IFINDEX_ALL,
5599 net->ipv6.devconf_all);
5600 else {
5601 struct inet6_dev *idev = ctl->extra1;
5602
5603 inet6_netconf_notify_devconf(net, NETCONFA_PROXY_NEIGH,
5604 idev->dev->ifindex,
5605 &idev->cnf);
5606 }
5607 rtnl_unlock();
5608 }
5609
5610 return ret;
5611 }
5612
5613 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
5614 void __user *buffer, size_t *lenp,
5615 loff_t *ppos)
5616 {
5617 int err;
5618 struct in6_addr addr;
5619 char str[IPV6_MAX_STRLEN];
5620 struct ctl_table lctl = *ctl;
5621 struct net *net = ctl->extra2;
5622 struct ipv6_stable_secret *secret = ctl->data;
5623
5624 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
5625 return -EIO;
5626
5627 lctl.maxlen = IPV6_MAX_STRLEN;
5628 lctl.data = str;
5629
5630 if (!rtnl_trylock())
5631 return restart_syscall();
5632
5633 if (!write && !secret->initialized) {
5634 err = -EIO;
5635 goto out;
5636 }
5637
5638 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
5639 if (err >= sizeof(str)) {
5640 err = -EIO;
5641 goto out;
5642 }
5643
5644 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
5645 if (err || !write)
5646 goto out;
5647
5648 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
5649 err = -EIO;
5650 goto out;
5651 }
5652
5653 secret->initialized = true;
5654 secret->secret = addr;
5655
5656 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
5657 struct net_device *dev;
5658
5659 for_each_netdev(net, dev) {
5660 struct inet6_dev *idev = __in6_dev_get(dev);
5661
5662 if (idev) {
5663 idev->addr_gen_mode =
5664 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5665 }
5666 }
5667 } else {
5668 struct inet6_dev *idev = ctl->extra1;
5669
5670 idev->addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
5671 }
5672
5673 out:
5674 rtnl_unlock();
5675
5676 return err;
5677 }
5678
5679 static
5680 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
5681 int write,
5682 void __user *buffer,
5683 size_t *lenp,
5684 loff_t *ppos)
5685 {
5686 int *valp = ctl->data;
5687 int val = *valp;
5688 loff_t pos = *ppos;
5689 struct ctl_table lctl;
5690 int ret;
5691
5692 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
5693 * we should not modify it until we get the rtnl lock.
5694 */
5695 lctl = *ctl;
5696 lctl.data = &val;
5697
5698 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
5699
5700 if (write)
5701 ret = addrconf_fixup_linkdown(ctl, valp, val);
5702 if (ret)
5703 *ppos = pos;
5704 return ret;
5705 }
5706
5707 static const struct ctl_table addrconf_sysctl[] = {
5708 {
5709 .procname = "forwarding",
5710 .data = &ipv6_devconf.forwarding,
5711 .maxlen = sizeof(int),
5712 .mode = 0644,
5713 .proc_handler = addrconf_sysctl_forward,
5714 },
5715 {
5716 .procname = "hop_limit",
5717 .data = &ipv6_devconf.hop_limit,
5718 .maxlen = sizeof(int),
5719 .mode = 0644,
5720 .proc_handler = addrconf_sysctl_hop_limit,
5721 },
5722 {
5723 .procname = "mtu",
5724 .data = &ipv6_devconf.mtu6,
5725 .maxlen = sizeof(int),
5726 .mode = 0644,
5727 .proc_handler = addrconf_sysctl_mtu,
5728 },
5729 {
5730 .procname = "accept_ra",
5731 .data = &ipv6_devconf.accept_ra,
5732 .maxlen = sizeof(int),
5733 .mode = 0644,
5734 .proc_handler = proc_dointvec,
5735 },
5736 {
5737 .procname = "accept_redirects",
5738 .data = &ipv6_devconf.accept_redirects,
5739 .maxlen = sizeof(int),
5740 .mode = 0644,
5741 .proc_handler = proc_dointvec,
5742 },
5743 {
5744 .procname = "autoconf",
5745 .data = &ipv6_devconf.autoconf,
5746 .maxlen = sizeof(int),
5747 .mode = 0644,
5748 .proc_handler = proc_dointvec,
5749 },
5750 {
5751 .procname = "dad_transmits",
5752 .data = &ipv6_devconf.dad_transmits,
5753 .maxlen = sizeof(int),
5754 .mode = 0644,
5755 .proc_handler = proc_dointvec,
5756 },
5757 {
5758 .procname = "router_solicitations",
5759 .data = &ipv6_devconf.rtr_solicits,
5760 .maxlen = sizeof(int),
5761 .mode = 0644,
5762 .proc_handler = proc_dointvec,
5763 },
5764 {
5765 .procname = "router_solicitation_interval",
5766 .data = &ipv6_devconf.rtr_solicit_interval,
5767 .maxlen = sizeof(int),
5768 .mode = 0644,
5769 .proc_handler = proc_dointvec_jiffies,
5770 },
5771 {
5772 .procname = "router_solicitation_delay",
5773 .data = &ipv6_devconf.rtr_solicit_delay,
5774 .maxlen = sizeof(int),
5775 .mode = 0644,
5776 .proc_handler = proc_dointvec_jiffies,
5777 },
5778 {
5779 .procname = "force_mld_version",
5780 .data = &ipv6_devconf.force_mld_version,
5781 .maxlen = sizeof(int),
5782 .mode = 0644,
5783 .proc_handler = proc_dointvec,
5784 },
5785 {
5786 .procname = "mldv1_unsolicited_report_interval",
5787 .data =
5788 &ipv6_devconf.mldv1_unsolicited_report_interval,
5789 .maxlen = sizeof(int),
5790 .mode = 0644,
5791 .proc_handler = proc_dointvec_ms_jiffies,
5792 },
5793 {
5794 .procname = "mldv2_unsolicited_report_interval",
5795 .data =
5796 &ipv6_devconf.mldv2_unsolicited_report_interval,
5797 .maxlen = sizeof(int),
5798 .mode = 0644,
5799 .proc_handler = proc_dointvec_ms_jiffies,
5800 },
5801 {
5802 .procname = "use_tempaddr",
5803 .data = &ipv6_devconf.use_tempaddr,
5804 .maxlen = sizeof(int),
5805 .mode = 0644,
5806 .proc_handler = proc_dointvec,
5807 },
5808 {
5809 .procname = "temp_valid_lft",
5810 .data = &ipv6_devconf.temp_valid_lft,
5811 .maxlen = sizeof(int),
5812 .mode = 0644,
5813 .proc_handler = proc_dointvec,
5814 },
5815 {
5816 .procname = "temp_prefered_lft",
5817 .data = &ipv6_devconf.temp_prefered_lft,
5818 .maxlen = sizeof(int),
5819 .mode = 0644,
5820 .proc_handler = proc_dointvec,
5821 },
5822 {
5823 .procname = "regen_max_retry",
5824 .data = &ipv6_devconf.regen_max_retry,
5825 .maxlen = sizeof(int),
5826 .mode = 0644,
5827 .proc_handler = proc_dointvec,
5828 },
5829 {
5830 .procname = "max_desync_factor",
5831 .data = &ipv6_devconf.max_desync_factor,
5832 .maxlen = sizeof(int),
5833 .mode = 0644,
5834 .proc_handler = proc_dointvec,
5835 },
5836 {
5837 .procname = "max_addresses",
5838 .data = &ipv6_devconf.max_addresses,
5839 .maxlen = sizeof(int),
5840 .mode = 0644,
5841 .proc_handler = proc_dointvec,
5842 },
5843 {
5844 .procname = "accept_ra_defrtr",
5845 .data = &ipv6_devconf.accept_ra_defrtr,
5846 .maxlen = sizeof(int),
5847 .mode = 0644,
5848 .proc_handler = proc_dointvec,
5849 },
5850 {
5851 .procname = "accept_ra_min_hop_limit",
5852 .data = &ipv6_devconf.accept_ra_min_hop_limit,
5853 .maxlen = sizeof(int),
5854 .mode = 0644,
5855 .proc_handler = proc_dointvec,
5856 },
5857 {
5858 .procname = "accept_ra_pinfo",
5859 .data = &ipv6_devconf.accept_ra_pinfo,
5860 .maxlen = sizeof(int),
5861 .mode = 0644,
5862 .proc_handler = proc_dointvec,
5863 },
5864 #ifdef CONFIG_IPV6_ROUTER_PREF
5865 {
5866 .procname = "accept_ra_rtr_pref",
5867 .data = &ipv6_devconf.accept_ra_rtr_pref,
5868 .maxlen = sizeof(int),
5869 .mode = 0644,
5870 .proc_handler = proc_dointvec,
5871 },
5872 {
5873 .procname = "router_probe_interval",
5874 .data = &ipv6_devconf.rtr_probe_interval,
5875 .maxlen = sizeof(int),
5876 .mode = 0644,
5877 .proc_handler = proc_dointvec_jiffies,
5878 },
5879 #ifdef CONFIG_IPV6_ROUTE_INFO
5880 {
5881 .procname = "accept_ra_rt_info_max_plen",
5882 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
5883 .maxlen = sizeof(int),
5884 .mode = 0644,
5885 .proc_handler = proc_dointvec,
5886 },
5887 #endif
5888 #endif
5889 {
5890 .procname = "proxy_ndp",
5891 .data = &ipv6_devconf.proxy_ndp,
5892 .maxlen = sizeof(int),
5893 .mode = 0644,
5894 .proc_handler = addrconf_sysctl_proxy_ndp,
5895 },
5896 {
5897 .procname = "accept_source_route",
5898 .data = &ipv6_devconf.accept_source_route,
5899 .maxlen = sizeof(int),
5900 .mode = 0644,
5901 .proc_handler = proc_dointvec,
5902 },
5903 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5904 {
5905 .procname = "optimistic_dad",
5906 .data = &ipv6_devconf.optimistic_dad,
5907 .maxlen = sizeof(int),
5908 .mode = 0644,
5909 .proc_handler = proc_dointvec,
5910 },
5911 {
5912 .procname = "use_optimistic",
5913 .data = &ipv6_devconf.use_optimistic,
5914 .maxlen = sizeof(int),
5915 .mode = 0644,
5916 .proc_handler = proc_dointvec,
5917 },
5918 #endif
5919 #ifdef CONFIG_IPV6_MROUTE
5920 {
5921 .procname = "mc_forwarding",
5922 .data = &ipv6_devconf.mc_forwarding,
5923 .maxlen = sizeof(int),
5924 .mode = 0444,
5925 .proc_handler = proc_dointvec,
5926 },
5927 #endif
5928 {
5929 .procname = "disable_ipv6",
5930 .data = &ipv6_devconf.disable_ipv6,
5931 .maxlen = sizeof(int),
5932 .mode = 0644,
5933 .proc_handler = addrconf_sysctl_disable,
5934 },
5935 {
5936 .procname = "accept_dad",
5937 .data = &ipv6_devconf.accept_dad,
5938 .maxlen = sizeof(int),
5939 .mode = 0644,
5940 .proc_handler = proc_dointvec,
5941 },
5942 {
5943 .procname = "force_tllao",
5944 .data = &ipv6_devconf.force_tllao,
5945 .maxlen = sizeof(int),
5946 .mode = 0644,
5947 .proc_handler = proc_dointvec
5948 },
5949 {
5950 .procname = "ndisc_notify",
5951 .data = &ipv6_devconf.ndisc_notify,
5952 .maxlen = sizeof(int),
5953 .mode = 0644,
5954 .proc_handler = proc_dointvec
5955 },
5956 {
5957 .procname = "suppress_frag_ndisc",
5958 .data = &ipv6_devconf.suppress_frag_ndisc,
5959 .maxlen = sizeof(int),
5960 .mode = 0644,
5961 .proc_handler = proc_dointvec
5962 },
5963 {
5964 .procname = "accept_ra_from_local",
5965 .data = &ipv6_devconf.accept_ra_from_local,
5966 .maxlen = sizeof(int),
5967 .mode = 0644,
5968 .proc_handler = proc_dointvec,
5969 },
5970 {
5971 .procname = "accept_ra_mtu",
5972 .data = &ipv6_devconf.accept_ra_mtu,
5973 .maxlen = sizeof(int),
5974 .mode = 0644,
5975 .proc_handler = proc_dointvec,
5976 },
5977 {
5978 .procname = "stable_secret",
5979 .data = &ipv6_devconf.stable_secret,
5980 .maxlen = IPV6_MAX_STRLEN,
5981 .mode = 0600,
5982 .proc_handler = addrconf_sysctl_stable_secret,
5983 },
5984 {
5985 .procname = "use_oif_addrs_only",
5986 .data = &ipv6_devconf.use_oif_addrs_only,
5987 .maxlen = sizeof(int),
5988 .mode = 0644,
5989 .proc_handler = proc_dointvec,
5990 },
5991 {
5992 .procname = "ignore_routes_with_linkdown",
5993 .data = &ipv6_devconf.ignore_routes_with_linkdown,
5994 .maxlen = sizeof(int),
5995 .mode = 0644,
5996 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
5997 },
5998 {
5999 .procname = "drop_unicast_in_l2_multicast",
6000 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6001 .maxlen = sizeof(int),
6002 .mode = 0644,
6003 .proc_handler = proc_dointvec,
6004 },
6005 {
6006 .procname = "drop_unsolicited_na",
6007 .data = &ipv6_devconf.drop_unsolicited_na,
6008 .maxlen = sizeof(int),
6009 .mode = 0644,
6010 .proc_handler = proc_dointvec,
6011 },
6012 {
6013 .procname = "keep_addr_on_down",
6014 .data = &ipv6_devconf.keep_addr_on_down,
6015 .maxlen = sizeof(int),
6016 .mode = 0644,
6017 .proc_handler = proc_dointvec,
6018
6019 },
6020 {
6021 /* sentinel */
6022 }
6023 };
6024
6025 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
6026 struct inet6_dev *idev, struct ipv6_devconf *p)
6027 {
6028 int i;
6029 struct ctl_table *table;
6030 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
6031
6032 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL);
6033 if (!table)
6034 goto out;
6035
6036 for (i = 0; table[i].data; i++) {
6037 table[i].data += (char *)p - (char *)&ipv6_devconf;
6038 table[i].extra1 = idev; /* embedded; no ref */
6039 table[i].extra2 = net;
6040 }
6041
6042 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
6043
6044 p->sysctl_header = register_net_sysctl(net, path, table);
6045 if (!p->sysctl_header)
6046 goto free;
6047
6048 return 0;
6049
6050 free:
6051 kfree(table);
6052 out:
6053 return -ENOBUFS;
6054 }
6055
6056 static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
6057 {
6058 struct ctl_table *table;
6059
6060 if (!p->sysctl_header)
6061 return;
6062
6063 table = p->sysctl_header->ctl_table_arg;
6064 unregister_net_sysctl_table(p->sysctl_header);
6065 p->sysctl_header = NULL;
6066 kfree(table);
6067 }
6068
6069 static int addrconf_sysctl_register(struct inet6_dev *idev)
6070 {
6071 int err;
6072
6073 if (!sysctl_dev_name_is_allowed(idev->dev->name))
6074 return -EINVAL;
6075
6076 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
6077 &ndisc_ifinfo_sysctl_change);
6078 if (err)
6079 return err;
6080 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
6081 idev, &idev->cnf);
6082 if (err)
6083 neigh_sysctl_unregister(idev->nd_parms);
6084
6085 return err;
6086 }
6087
6088 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
6089 {
6090 __addrconf_sysctl_unregister(&idev->cnf);
6091 neigh_sysctl_unregister(idev->nd_parms);
6092 }
6093
6094
6095 #endif
6096
6097 static int __net_init addrconf_init_net(struct net *net)
6098 {
6099 int err = -ENOMEM;
6100 struct ipv6_devconf *all, *dflt;
6101
6102 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
6103 if (!all)
6104 goto err_alloc_all;
6105
6106 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
6107 if (!dflt)
6108 goto err_alloc_dflt;
6109
6110 /* these will be inherited by all namespaces */
6111 dflt->autoconf = ipv6_defaults.autoconf;
6112 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
6113
6114 dflt->stable_secret.initialized = false;
6115 all->stable_secret.initialized = false;
6116
6117 net->ipv6.devconf_all = all;
6118 net->ipv6.devconf_dflt = dflt;
6119
6120 #ifdef CONFIG_SYSCTL
6121 err = __addrconf_sysctl_register(net, "all", NULL, all);
6122 if (err < 0)
6123 goto err_reg_all;
6124
6125 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
6126 if (err < 0)
6127 goto err_reg_dflt;
6128 #endif
6129 return 0;
6130
6131 #ifdef CONFIG_SYSCTL
6132 err_reg_dflt:
6133 __addrconf_sysctl_unregister(all);
6134 err_reg_all:
6135 kfree(dflt);
6136 #endif
6137 err_alloc_dflt:
6138 kfree(all);
6139 err_alloc_all:
6140 return err;
6141 }
6142
6143 static void __net_exit addrconf_exit_net(struct net *net)
6144 {
6145 #ifdef CONFIG_SYSCTL
6146 __addrconf_sysctl_unregister(net->ipv6.devconf_dflt);
6147 __addrconf_sysctl_unregister(net->ipv6.devconf_all);
6148 #endif
6149 kfree(net->ipv6.devconf_dflt);
6150 kfree(net->ipv6.devconf_all);
6151 }
6152
6153 static struct pernet_operations addrconf_ops = {
6154 .init = addrconf_init_net,
6155 .exit = addrconf_exit_net,
6156 };
6157
6158 static struct rtnl_af_ops inet6_ops __read_mostly = {
6159 .family = AF_INET6,
6160 .fill_link_af = inet6_fill_link_af,
6161 .get_link_af_size = inet6_get_link_af_size,
6162 .validate_link_af = inet6_validate_link_af,
6163 .set_link_af = inet6_set_link_af,
6164 };
6165
6166 /*
6167 * Init / cleanup code
6168 */
6169
6170 int __init addrconf_init(void)
6171 {
6172 struct inet6_dev *idev;
6173 int i, err;
6174
6175 err = ipv6_addr_label_init();
6176 if (err < 0) {
6177 pr_crit("%s: cannot initialize default policy table: %d\n",
6178 __func__, err);
6179 goto out;
6180 }
6181
6182 err = register_pernet_subsys(&addrconf_ops);
6183 if (err < 0)
6184 goto out_addrlabel;
6185
6186 addrconf_wq = create_workqueue("ipv6_addrconf");
6187 if (!addrconf_wq) {
6188 err = -ENOMEM;
6189 goto out_nowq;
6190 }
6191
6192 /* The addrconf netdev notifier requires that loopback_dev
6193 * has it's ipv6 private information allocated and setup
6194 * before it can bring up and give link-local addresses
6195 * to other devices which are up.
6196 *
6197 * Unfortunately, loopback_dev is not necessarily the first
6198 * entry in the global dev_base list of net devices. In fact,
6199 * it is likely to be the very last entry on that list.
6200 * So this causes the notifier registry below to try and
6201 * give link-local addresses to all devices besides loopback_dev
6202 * first, then loopback_dev, which cases all the non-loopback_dev
6203 * devices to fail to get a link-local address.
6204 *
6205 * So, as a temporary fix, allocate the ipv6 structure for
6206 * loopback_dev first by hand.
6207 * Longer term, all of the dependencies ipv6 has upon the loopback
6208 * device and it being up should be removed.
6209 */
6210 rtnl_lock();
6211 idev = ipv6_add_dev(init_net.loopback_dev);
6212 rtnl_unlock();
6213 if (IS_ERR(idev)) {
6214 err = PTR_ERR(idev);
6215 goto errlo;
6216 }
6217
6218 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6219 INIT_HLIST_HEAD(&inet6_addr_lst[i]);
6220
6221 register_netdevice_notifier(&ipv6_dev_notf);
6222
6223 addrconf_verify();
6224
6225 rtnl_af_register(&inet6_ops);
6226
6227 err = __rtnl_register(PF_INET6, RTM_GETLINK, NULL, inet6_dump_ifinfo,
6228 NULL);
6229 if (err < 0)
6230 goto errout;
6231
6232 /* Only the first call to __rtnl_register can fail */
6233 __rtnl_register(PF_INET6, RTM_NEWADDR, inet6_rtm_newaddr, NULL, NULL);
6234 __rtnl_register(PF_INET6, RTM_DELADDR, inet6_rtm_deladdr, NULL, NULL);
6235 __rtnl_register(PF_INET6, RTM_GETADDR, inet6_rtm_getaddr,
6236 inet6_dump_ifaddr, NULL);
6237 __rtnl_register(PF_INET6, RTM_GETMULTICAST, NULL,
6238 inet6_dump_ifmcaddr, NULL);
6239 __rtnl_register(PF_INET6, RTM_GETANYCAST, NULL,
6240 inet6_dump_ifacaddr, NULL);
6241 __rtnl_register(PF_INET6, RTM_GETNETCONF, inet6_netconf_get_devconf,
6242 inet6_netconf_dump_devconf, NULL);
6243
6244 ipv6_addr_label_rtnl_register();
6245
6246 return 0;
6247 errout:
6248 rtnl_af_unregister(&inet6_ops);
6249 unregister_netdevice_notifier(&ipv6_dev_notf);
6250 errlo:
6251 destroy_workqueue(addrconf_wq);
6252 out_nowq:
6253 unregister_pernet_subsys(&addrconf_ops);
6254 out_addrlabel:
6255 ipv6_addr_label_cleanup();
6256 out:
6257 return err;
6258 }
6259
6260 void addrconf_cleanup(void)
6261 {
6262 struct net_device *dev;
6263 int i;
6264
6265 unregister_netdevice_notifier(&ipv6_dev_notf);
6266 unregister_pernet_subsys(&addrconf_ops);
6267 ipv6_addr_label_cleanup();
6268
6269 rtnl_lock();
6270
6271 __rtnl_af_unregister(&inet6_ops);
6272
6273 /* clean dev list */
6274 for_each_netdev(&init_net, dev) {
6275 if (__in6_dev_get(dev) == NULL)
6276 continue;
6277 addrconf_ifdown(dev, 1);
6278 }
6279 addrconf_ifdown(init_net.loopback_dev, 2);
6280
6281 /*
6282 * Check hash table.
6283 */
6284 spin_lock_bh(&addrconf_hash_lock);
6285 for (i = 0; i < IN6_ADDR_HSIZE; i++)
6286 WARN_ON(!hlist_empty(&inet6_addr_lst[i]));
6287 spin_unlock_bh(&addrconf_hash_lock);
6288 cancel_delayed_work(&addr_chk_work);
6289 rtnl_unlock();
6290
6291 destroy_workqueue(addrconf_wq);
6292 }
This page took 0.165291 seconds and 5 git commands to generate.