1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * IPv6 Address [auto]configuration
4 * Linux INET6 implementation
14 * Janos Farkas : delete timer on ifdown
16 * Andi Kleen : kill double kfree on module
18 * Maciej W. Rozycki : FDDI support
19 * sekiya@USAGI : Don't send too many RS
21 * yoshfuji@USAGI : Fixed interval between DAD
23 * YOSHIFUJI Hideaki @USAGI : improved accuracy of
24 * address validation timer.
25 * YOSHIFUJI Hideaki @USAGI : Privacy Extensions (RFC3041)
27 * Yuji SEKIYA @USAGI : Don't assign a same IPv6
28 * address on a same interface.
29 * YOSHIFUJI Hideaki @USAGI : ARCnet support
30 * YOSHIFUJI Hideaki @USAGI : convert /proc/net/if_inet6 to
32 * YOSHIFUJI Hideaki @USAGI : improved source address
33 * selection; consider scope,
37 #define pr_fmt(fmt) "IPv6: " fmt
39 #include <linux/errno.h>
40 #include <linux/types.h>
41 #include <linux/kernel.h>
42 #include <linux/sched/signal.h>
43 #include <linux/socket.h>
44 #include <linux/sockios.h>
45 #include <linux/net.h>
46 #include <linux/inet.h>
47 #include <linux/in6.h>
48 #include <linux/netdevice.h>
49 #include <linux/if_addr.h>
50 #include <linux/if_arp.h>
51 #include <linux/if_arcnet.h>
52 #include <linux/if_infiniband.h>
53 #include <linux/route.h>
54 #include <linux/inetdevice.h>
55 #include <linux/init.h>
56 #include <linux/slab.h>
58 #include <linux/sysctl.h>
60 #include <linux/capability.h>
61 #include <linux/delay.h>
62 #include <linux/notifier.h>
63 #include <linux/string.h>
64 #include <linux/hash.h>
66 #include <net/net_namespace.h>
70 #include <net/6lowpan.h>
71 #include <net/firewire.h>
73 #include <net/protocol.h>
74 #include <net/ndisc.h>
75 #include <net/ip6_route.h>
76 #include <net/addrconf.h>
79 #include <net/netlink.h>
80 #include <net/pkt_sched.h>
81 #include <net/l3mdev.h>
82 #include <linux/if_tunnel.h>
83 #include <linux/rtnetlink.h>
84 #include <linux/netconf.h>
85 #include <linux/random.h>
86 #include <linux/uaccess.h>
87 #include <asm/unaligned.h>
89 #include <linux/proc_fs.h>
90 #include <linux/seq_file.h>
91 #include <linux/export.h>
92 #include <linux/ioam6.h>
94 #define INFINITY_LIFE_TIME 0xFFFFFFFF
96 #define IPV6_MAX_STRLEN \
97 sizeof("ffff:ffff:ffff:ffff:ffff:ffff:255.255.255.255")
99 static inline u32 cstamp_delta(unsigned long cstamp)
101 return (cstamp - INITIAL_JIFFIES) * 100UL / HZ;
104 static inline s32 rfc3315_s14_backoff_init(s32 irt)
106 /* multiply 'initial retransmission time' by 0.9 .. 1.1 */
107 u64 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)irt;
108 do_div(tmp, 1000000);
112 static inline s32 rfc3315_s14_backoff_update(s32 rt, s32 mrt)
114 /* multiply 'retransmission timeout' by 1.9 .. 2.1 */
115 u64 tmp = get_random_u32_inclusive(1900000, 2100000) * (u64)rt;
116 do_div(tmp, 1000000);
117 if ((s32)tmp > mrt) {
118 /* multiply 'maximum retransmission time' by 0.9 .. 1.1 */
119 tmp = get_random_u32_inclusive(900000, 1100000) * (u64)mrt;
120 do_div(tmp, 1000000);
126 static int addrconf_sysctl_register(struct inet6_dev *idev);
127 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
129 static inline int addrconf_sysctl_register(struct inet6_dev *idev)
134 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
139 static void ipv6_gen_rnd_iid(struct in6_addr *addr);
141 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev);
142 static int ipv6_count_addresses(const struct inet6_dev *idev);
143 static int ipv6_generate_stable_address(struct in6_addr *addr,
145 const struct inet6_dev *idev);
147 #define IN6_ADDR_HSIZE_SHIFT 8
148 #define IN6_ADDR_HSIZE (1 << IN6_ADDR_HSIZE_SHIFT)
150 static void addrconf_verify(struct net *net);
151 static void addrconf_verify_rtnl(struct net *net);
153 static struct workqueue_struct *addrconf_wq;
155 static void addrconf_join_anycast(struct inet6_ifaddr *ifp);
156 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp);
158 static void addrconf_type_change(struct net_device *dev,
159 unsigned long event);
160 static int addrconf_ifdown(struct net_device *dev, bool unregister);
162 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
164 const struct net_device *dev,
165 u32 flags, u32 noflags,
168 static void addrconf_dad_start(struct inet6_ifaddr *ifp);
169 static void addrconf_dad_work(struct work_struct *w);
170 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
172 static void addrconf_dad_run(struct inet6_dev *idev, bool restart);
173 static void addrconf_rs_timer(struct timer_list *t);
174 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
175 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifa);
177 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
178 struct prefix_info *pinfo);
180 static struct ipv6_devconf ipv6_devconf __read_mostly = {
182 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
183 .mtu6 = IPV6_MIN_MTU,
185 .accept_redirects = 1,
187 .force_mld_version = 0,
188 .mldv1_unsolicited_report_interval = 10 * HZ,
189 .mldv2_unsolicited_report_interval = HZ,
191 .rtr_solicits = MAX_RTR_SOLICITATIONS,
192 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
193 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
194 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
196 .temp_valid_lft = TEMP_VALID_LIFETIME,
197 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
198 .regen_max_retry = REGEN_MAX_RETRY,
199 .max_desync_factor = MAX_DESYNC_FACTOR,
200 .max_addresses = IPV6_MAX_ADDRESSES,
201 .accept_ra_defrtr = 1,
202 .ra_defrtr_metric = IP6_RT_PRIO_USER,
203 .accept_ra_from_local = 0,
204 .accept_ra_min_hop_limit= 1,
205 .accept_ra_pinfo = 1,
206 #ifdef CONFIG_IPV6_ROUTER_PREF
207 .accept_ra_rtr_pref = 1,
208 .rtr_probe_interval = 60 * HZ,
209 #ifdef CONFIG_IPV6_ROUTE_INFO
210 .accept_ra_rt_info_min_plen = 0,
211 .accept_ra_rt_info_max_plen = 0,
215 .accept_source_route = 0, /* we do not accept RH0 by default. */
218 .suppress_frag_ndisc = 1,
221 .initialized = false,
223 .use_oif_addrs_only = 0,
224 .ignore_routes_with_linkdown = 0,
225 .keep_addr_on_down = 0,
227 #ifdef CONFIG_IPV6_SEG6_HMAC
228 .seg6_require_hmac = 0,
231 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
233 .rpl_seg_enabled = 0,
235 .ioam6_id = IOAM6_DEFAULT_IF_ID,
236 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
237 .ndisc_evict_nocarrier = 1,
240 static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
242 .hop_limit = IPV6_DEFAULT_HOPLIMIT,
243 .mtu6 = IPV6_MIN_MTU,
245 .accept_redirects = 1,
247 .force_mld_version = 0,
248 .mldv1_unsolicited_report_interval = 10 * HZ,
249 .mldv2_unsolicited_report_interval = HZ,
251 .rtr_solicits = MAX_RTR_SOLICITATIONS,
252 .rtr_solicit_interval = RTR_SOLICITATION_INTERVAL,
253 .rtr_solicit_max_interval = RTR_SOLICITATION_MAX_INTERVAL,
254 .rtr_solicit_delay = MAX_RTR_SOLICITATION_DELAY,
256 .temp_valid_lft = TEMP_VALID_LIFETIME,
257 .temp_prefered_lft = TEMP_PREFERRED_LIFETIME,
258 .regen_max_retry = REGEN_MAX_RETRY,
259 .max_desync_factor = MAX_DESYNC_FACTOR,
260 .max_addresses = IPV6_MAX_ADDRESSES,
261 .accept_ra_defrtr = 1,
262 .ra_defrtr_metric = IP6_RT_PRIO_USER,
263 .accept_ra_from_local = 0,
264 .accept_ra_min_hop_limit= 1,
265 .accept_ra_pinfo = 1,
266 #ifdef CONFIG_IPV6_ROUTER_PREF
267 .accept_ra_rtr_pref = 1,
268 .rtr_probe_interval = 60 * HZ,
269 #ifdef CONFIG_IPV6_ROUTE_INFO
270 .accept_ra_rt_info_min_plen = 0,
271 .accept_ra_rt_info_max_plen = 0,
275 .accept_source_route = 0, /* we do not accept RH0 by default. */
278 .suppress_frag_ndisc = 1,
281 .initialized = false,
283 .use_oif_addrs_only = 0,
284 .ignore_routes_with_linkdown = 0,
285 .keep_addr_on_down = 0,
287 #ifdef CONFIG_IPV6_SEG6_HMAC
288 .seg6_require_hmac = 0,
291 .addr_gen_mode = IN6_ADDR_GEN_MODE_EUI64,
293 .rpl_seg_enabled = 0,
295 .ioam6_id = IOAM6_DEFAULT_IF_ID,
296 .ioam6_id_wide = IOAM6_DEFAULT_IF_ID_WIDE,
297 .ndisc_evict_nocarrier = 1,
300 /* Check if link is ready: is it up and is a valid qdisc available */
301 static inline bool addrconf_link_ready(const struct net_device *dev)
303 return netif_oper_up(dev) && !qdisc_tx_is_noop(dev);
306 static void addrconf_del_rs_timer(struct inet6_dev *idev)
308 if (del_timer(&idev->rs_timer))
312 static void addrconf_del_dad_work(struct inet6_ifaddr *ifp)
314 if (cancel_delayed_work(&ifp->dad_work))
318 static void addrconf_mod_rs_timer(struct inet6_dev *idev,
321 if (!timer_pending(&idev->rs_timer))
323 mod_timer(&idev->rs_timer, jiffies + when);
326 static void addrconf_mod_dad_work(struct inet6_ifaddr *ifp,
330 if (mod_delayed_work(addrconf_wq, &ifp->dad_work, delay))
334 static int snmp6_alloc_dev(struct inet6_dev *idev)
338 idev->stats.ipv6 = alloc_percpu_gfp(struct ipstats_mib, GFP_KERNEL_ACCOUNT);
339 if (!idev->stats.ipv6)
342 for_each_possible_cpu(i) {
343 struct ipstats_mib *addrconf_stats;
344 addrconf_stats = per_cpu_ptr(idev->stats.ipv6, i);
345 u64_stats_init(&addrconf_stats->syncp);
349 idev->stats.icmpv6dev = kzalloc(sizeof(struct icmpv6_mib_device),
351 if (!idev->stats.icmpv6dev)
353 idev->stats.icmpv6msgdev = kzalloc(sizeof(struct icmpv6msg_mib_device),
355 if (!idev->stats.icmpv6msgdev)
361 kfree(idev->stats.icmpv6dev);
363 free_percpu(idev->stats.ipv6);
368 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
370 struct inet6_dev *ndev;
375 if (dev->mtu < IPV6_MIN_MTU && dev != blackhole_netdev)
376 return ERR_PTR(-EINVAL);
378 ndev = kzalloc(sizeof(*ndev), GFP_KERNEL_ACCOUNT);
382 rwlock_init(&ndev->lock);
384 INIT_LIST_HEAD(&ndev->addr_list);
385 timer_setup(&ndev->rs_timer, addrconf_rs_timer, 0);
386 memcpy(&ndev->cnf, dev_net(dev)->ipv6.devconf_dflt, sizeof(ndev->cnf));
388 if (ndev->cnf.stable_secret.initialized)
389 ndev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
391 ndev->cnf.mtu6 = dev->mtu;
393 ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
394 if (!ndev->nd_parms) {
398 if (ndev->cnf.forwarding)
399 dev_disable_lro(dev);
400 /* We refer to the device */
401 netdev_hold(dev, &ndev->dev_tracker, GFP_KERNEL);
403 if (snmp6_alloc_dev(ndev) < 0) {
404 netdev_dbg(dev, "%s: cannot allocate memory for statistics\n",
406 neigh_parms_release(&nd_tbl, ndev->nd_parms);
407 netdev_put(dev, &ndev->dev_tracker);
412 if (dev != blackhole_netdev) {
413 if (snmp6_register_dev(ndev) < 0) {
414 netdev_dbg(dev, "%s: cannot create /proc/net/dev_snmp6/%s\n",
415 __func__, dev->name);
419 /* One reference from device. */
420 refcount_set(&ndev->refcnt, 1);
422 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
423 ndev->cnf.accept_dad = -1;
425 #if IS_ENABLED(CONFIG_IPV6_SIT)
426 if (dev->type == ARPHRD_SIT && (dev->priv_flags & IFF_ISATAP)) {
427 pr_info("%s: Disabled Multicast RS\n", dev->name);
428 ndev->cnf.rtr_solicits = 0;
432 INIT_LIST_HEAD(&ndev->tempaddr_list);
433 ndev->desync_factor = U32_MAX;
434 if ((dev->flags&IFF_LOOPBACK) ||
435 dev->type == ARPHRD_TUNNEL ||
436 dev->type == ARPHRD_TUNNEL6 ||
437 dev->type == ARPHRD_SIT ||
438 dev->type == ARPHRD_NONE) {
439 ndev->cnf.use_tempaddr = -1;
442 ndev->token = in6addr_any;
444 if (netif_running(dev) && addrconf_link_ready(dev))
445 ndev->if_flags |= IF_READY;
447 ipv6_mc_init_dev(ndev);
448 ndev->tstamp = jiffies;
449 if (dev != blackhole_netdev) {
450 err = addrconf_sysctl_register(ndev);
452 ipv6_mc_destroy_dev(ndev);
453 snmp6_unregister_dev(ndev);
457 /* protected by rtnl_lock */
458 rcu_assign_pointer(dev->ip6_ptr, ndev);
460 if (dev != blackhole_netdev) {
461 /* Join interface-local all-node multicast group */
462 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allnodes);
464 /* Join all-node multicast group */
465 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allnodes);
467 /* Join all-router multicast group if forwarding is set */
468 if (ndev->cnf.forwarding && (dev->flags & IFF_MULTICAST))
469 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
474 neigh_parms_release(&nd_tbl, ndev->nd_parms);
476 in6_dev_finish_destroy(ndev);
480 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
482 struct inet6_dev *idev;
486 idev = __in6_dev_get(dev);
488 idev = ipv6_add_dev(dev);
493 if (dev->flags&IFF_UP)
498 static int inet6_netconf_msgsize_devconf(int type)
500 int size = NLMSG_ALIGN(sizeof(struct netconfmsg))
501 + nla_total_size(4); /* NETCONFA_IFINDEX */
504 if (type == NETCONFA_ALL)
507 if (all || type == NETCONFA_FORWARDING)
508 size += nla_total_size(4);
509 #ifdef CONFIG_IPV6_MROUTE
510 if (all || type == NETCONFA_MC_FORWARDING)
511 size += nla_total_size(4);
513 if (all || type == NETCONFA_PROXY_NEIGH)
514 size += nla_total_size(4);
516 if (all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN)
517 size += nla_total_size(4);
522 static int inet6_netconf_fill_devconf(struct sk_buff *skb, int ifindex,
523 struct ipv6_devconf *devconf, u32 portid,
524 u32 seq, int event, unsigned int flags,
527 struct nlmsghdr *nlh;
528 struct netconfmsg *ncm;
531 nlh = nlmsg_put(skb, portid, seq, event, sizeof(struct netconfmsg),
536 if (type == NETCONFA_ALL)
539 ncm = nlmsg_data(nlh);
540 ncm->ncm_family = AF_INET6;
542 if (nla_put_s32(skb, NETCONFA_IFINDEX, ifindex) < 0)
543 goto nla_put_failure;
548 if ((all || type == NETCONFA_FORWARDING) &&
549 nla_put_s32(skb, NETCONFA_FORWARDING, devconf->forwarding) < 0)
550 goto nla_put_failure;
551 #ifdef CONFIG_IPV6_MROUTE
552 if ((all || type == NETCONFA_MC_FORWARDING) &&
553 nla_put_s32(skb, NETCONFA_MC_FORWARDING,
554 atomic_read(&devconf->mc_forwarding)) < 0)
555 goto nla_put_failure;
557 if ((all || type == NETCONFA_PROXY_NEIGH) &&
558 nla_put_s32(skb, NETCONFA_PROXY_NEIGH, devconf->proxy_ndp) < 0)
559 goto nla_put_failure;
561 if ((all || type == NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN) &&
562 nla_put_s32(skb, NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
563 devconf->ignore_routes_with_linkdown) < 0)
564 goto nla_put_failure;
571 nlmsg_cancel(skb, nlh);
575 void inet6_netconf_notify_devconf(struct net *net, int event, int type,
576 int ifindex, struct ipv6_devconf *devconf)
581 skb = nlmsg_new(inet6_netconf_msgsize_devconf(type), GFP_KERNEL);
585 err = inet6_netconf_fill_devconf(skb, ifindex, devconf, 0, 0,
588 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
589 WARN_ON(err == -EMSGSIZE);
593 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_NETCONF, NULL, GFP_KERNEL);
596 rtnl_set_sk_err(net, RTNLGRP_IPV6_NETCONF, err);
599 static const struct nla_policy devconf_ipv6_policy[NETCONFA_MAX+1] = {
600 [NETCONFA_IFINDEX] = { .len = sizeof(int) },
601 [NETCONFA_FORWARDING] = { .len = sizeof(int) },
602 [NETCONFA_PROXY_NEIGH] = { .len = sizeof(int) },
603 [NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN] = { .len = sizeof(int) },
606 static int inet6_netconf_valid_get_req(struct sk_buff *skb,
607 const struct nlmsghdr *nlh,
609 struct netlink_ext_ack *extack)
613 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(struct netconfmsg))) {
614 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf get request");
618 if (!netlink_strict_get_check(skb))
619 return nlmsg_parse_deprecated(nlh, sizeof(struct netconfmsg),
621 devconf_ipv6_policy, extack);
623 err = nlmsg_parse_deprecated_strict(nlh, sizeof(struct netconfmsg),
625 devconf_ipv6_policy, extack);
629 for (i = 0; i <= NETCONFA_MAX; i++) {
634 case NETCONFA_IFINDEX:
637 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in netconf get request");
645 static int inet6_netconf_get_devconf(struct sk_buff *in_skb,
646 struct nlmsghdr *nlh,
647 struct netlink_ext_ack *extack)
649 struct net *net = sock_net(in_skb->sk);
650 struct nlattr *tb[NETCONFA_MAX+1];
651 struct inet6_dev *in6_dev = NULL;
652 struct net_device *dev = NULL;
654 struct ipv6_devconf *devconf;
658 err = inet6_netconf_valid_get_req(in_skb, nlh, tb, extack);
662 if (!tb[NETCONFA_IFINDEX])
666 ifindex = nla_get_s32(tb[NETCONFA_IFINDEX]);
668 case NETCONFA_IFINDEX_ALL:
669 devconf = net->ipv6.devconf_all;
671 case NETCONFA_IFINDEX_DEFAULT:
672 devconf = net->ipv6.devconf_dflt;
675 dev = dev_get_by_index(net, ifindex);
678 in6_dev = in6_dev_get(dev);
681 devconf = &in6_dev->cnf;
686 skb = nlmsg_new(inet6_netconf_msgsize_devconf(NETCONFA_ALL), GFP_KERNEL);
690 err = inet6_netconf_fill_devconf(skb, ifindex, devconf,
691 NETLINK_CB(in_skb).portid,
692 nlh->nlmsg_seq, RTM_NEWNETCONF, 0,
695 /* -EMSGSIZE implies BUG in inet6_netconf_msgsize_devconf() */
696 WARN_ON(err == -EMSGSIZE);
700 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
703 in6_dev_put(in6_dev);
708 static int inet6_netconf_dump_devconf(struct sk_buff *skb,
709 struct netlink_callback *cb)
711 const struct nlmsghdr *nlh = cb->nlh;
712 struct net *net = sock_net(skb->sk);
715 struct net_device *dev;
716 struct inet6_dev *idev;
717 struct hlist_head *head;
719 if (cb->strict_check) {
720 struct netlink_ext_ack *extack = cb->extack;
721 struct netconfmsg *ncm;
723 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ncm))) {
724 NL_SET_ERR_MSG_MOD(extack, "Invalid header for netconf dump request");
728 if (nlmsg_attrlen(nlh, sizeof(*ncm))) {
729 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header in netconf dump request");
735 s_idx = idx = cb->args[1];
737 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
739 head = &net->dev_index_head[h];
741 cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
743 hlist_for_each_entry_rcu(dev, head, index_hlist) {
746 idev = __in6_dev_get(dev);
750 if (inet6_netconf_fill_devconf(skb, dev->ifindex,
752 NETLINK_CB(cb->skb).portid,
760 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
766 if (h == NETDEV_HASHENTRIES) {
767 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_ALL,
768 net->ipv6.devconf_all,
769 NETLINK_CB(cb->skb).portid,
771 RTM_NEWNETCONF, NLM_F_MULTI,
777 if (h == NETDEV_HASHENTRIES + 1) {
778 if (inet6_netconf_fill_devconf(skb, NETCONFA_IFINDEX_DEFAULT,
779 net->ipv6.devconf_dflt,
780 NETLINK_CB(cb->skb).portid,
782 RTM_NEWNETCONF, NLM_F_MULTI,
796 static void dev_forward_change(struct inet6_dev *idev)
798 struct net_device *dev;
799 struct inet6_ifaddr *ifa;
800 LIST_HEAD(tmp_addr_list);
805 if (idev->cnf.forwarding)
806 dev_disable_lro(dev);
807 if (dev->flags & IFF_MULTICAST) {
808 if (idev->cnf.forwarding) {
809 ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
810 ipv6_dev_mc_inc(dev, &in6addr_interfacelocal_allrouters);
811 ipv6_dev_mc_inc(dev, &in6addr_sitelocal_allrouters);
813 ipv6_dev_mc_dec(dev, &in6addr_linklocal_allrouters);
814 ipv6_dev_mc_dec(dev, &in6addr_interfacelocal_allrouters);
815 ipv6_dev_mc_dec(dev, &in6addr_sitelocal_allrouters);
819 read_lock_bh(&idev->lock);
820 list_for_each_entry(ifa, &idev->addr_list, if_list) {
821 if (ifa->flags&IFA_F_TENTATIVE)
823 list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
825 read_unlock_bh(&idev->lock);
827 while (!list_empty(&tmp_addr_list)) {
828 ifa = list_first_entry(&tmp_addr_list,
829 struct inet6_ifaddr, if_list_aux);
830 list_del(&ifa->if_list_aux);
831 if (idev->cnf.forwarding)
832 addrconf_join_anycast(ifa);
834 addrconf_leave_anycast(ifa);
837 inet6_netconf_notify_devconf(dev_net(dev), RTM_NEWNETCONF,
839 dev->ifindex, &idev->cnf);
843 static void addrconf_forward_change(struct net *net, __s32 newf)
845 struct net_device *dev;
846 struct inet6_dev *idev;
848 for_each_netdev(net, dev) {
849 idev = __in6_dev_get(dev);
851 int changed = (!idev->cnf.forwarding) ^ (!newf);
852 idev->cnf.forwarding = newf;
854 dev_forward_change(idev);
859 static int addrconf_fixup_forwarding(struct ctl_table *table, int *p, int newf)
865 return restart_syscall();
867 net = (struct net *)table->extra2;
871 if (p == &net->ipv6.devconf_dflt->forwarding) {
872 if ((!newf) ^ (!old))
873 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
875 NETCONFA_IFINDEX_DEFAULT,
876 net->ipv6.devconf_dflt);
881 if (p == &net->ipv6.devconf_all->forwarding) {
882 int old_dflt = net->ipv6.devconf_dflt->forwarding;
884 net->ipv6.devconf_dflt->forwarding = newf;
885 if ((!newf) ^ (!old_dflt))
886 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
888 NETCONFA_IFINDEX_DEFAULT,
889 net->ipv6.devconf_dflt);
891 addrconf_forward_change(net, newf);
892 if ((!newf) ^ (!old))
893 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
895 NETCONFA_IFINDEX_ALL,
896 net->ipv6.devconf_all);
897 } else if ((!newf) ^ (!old))
898 dev_forward_change((struct inet6_dev *)table->extra1);
902 rt6_purge_dflt_routers(net);
906 static void addrconf_linkdown_change(struct net *net, __s32 newf)
908 struct net_device *dev;
909 struct inet6_dev *idev;
911 for_each_netdev(net, dev) {
912 idev = __in6_dev_get(dev);
914 int changed = (!idev->cnf.ignore_routes_with_linkdown) ^ (!newf);
916 idev->cnf.ignore_routes_with_linkdown = newf;
918 inet6_netconf_notify_devconf(dev_net(dev),
920 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
927 static int addrconf_fixup_linkdown(struct ctl_table *table, int *p, int newf)
933 return restart_syscall();
935 net = (struct net *)table->extra2;
939 if (p == &net->ipv6.devconf_dflt->ignore_routes_with_linkdown) {
940 if ((!newf) ^ (!old))
941 inet6_netconf_notify_devconf(net,
943 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
944 NETCONFA_IFINDEX_DEFAULT,
945 net->ipv6.devconf_dflt);
950 if (p == &net->ipv6.devconf_all->ignore_routes_with_linkdown) {
951 net->ipv6.devconf_dflt->ignore_routes_with_linkdown = newf;
952 addrconf_linkdown_change(net, newf);
953 if ((!newf) ^ (!old))
954 inet6_netconf_notify_devconf(net,
956 NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
957 NETCONFA_IFINDEX_ALL,
958 net->ipv6.devconf_all);
967 /* Nobody refers to this ifaddr, destroy it */
968 void inet6_ifa_finish_destroy(struct inet6_ifaddr *ifp)
970 WARN_ON(!hlist_unhashed(&ifp->addr_lst));
972 #ifdef NET_REFCNT_DEBUG
973 pr_debug("%s\n", __func__);
976 in6_dev_put(ifp->idev);
978 if (cancel_delayed_work(&ifp->dad_work))
979 pr_notice("delayed DAD work was pending while freeing ifa=%p\n",
982 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
983 pr_warn("Freeing alive inet6 address %p\n", ifp);
991 ipv6_link_dev_addr(struct inet6_dev *idev, struct inet6_ifaddr *ifp)
994 int ifp_scope = ipv6_addr_src_scope(&ifp->addr);
997 * Each device address list is sorted in order of scope -
998 * global before linklocal.
1000 list_for_each(p, &idev->addr_list) {
1001 struct inet6_ifaddr *ifa
1002 = list_entry(p, struct inet6_ifaddr, if_list);
1003 if (ifp_scope >= ipv6_addr_src_scope(&ifa->addr))
1007 list_add_tail_rcu(&ifp->if_list, p);
1010 static u32 inet6_addr_hash(const struct net *net, const struct in6_addr *addr)
1012 u32 val = ipv6_addr_hash(addr) ^ net_hash_mix(net);
1014 return hash_32(val, IN6_ADDR_HSIZE_SHIFT);
1017 static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,
1018 struct net_device *dev, unsigned int hash)
1020 struct inet6_ifaddr *ifp;
1022 hlist_for_each_entry(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1023 if (ipv6_addr_equal(&ifp->addr, addr)) {
1024 if (!dev || ifp->idev->dev == dev)
1031 static int ipv6_add_addr_hash(struct net_device *dev, struct inet6_ifaddr *ifa)
1033 struct net *net = dev_net(dev);
1034 unsigned int hash = inet6_addr_hash(net, &ifa->addr);
1037 spin_lock(&net->ipv6.addrconf_hash_lock);
1039 /* Ignore adding duplicate addresses on an interface */
1040 if (ipv6_chk_same_addr(net, &ifa->addr, dev, hash)) {
1041 netdev_dbg(dev, "ipv6_add_addr: already assigned\n");
1044 hlist_add_head_rcu(&ifa->addr_lst, &net->ipv6.inet6_addr_lst[hash]);
1047 spin_unlock(&net->ipv6.addrconf_hash_lock);
1052 /* On success it returns ifp with increased reference count */
1054 static struct inet6_ifaddr *
1055 ipv6_add_addr(struct inet6_dev *idev, struct ifa6_config *cfg,
1056 bool can_block, struct netlink_ext_ack *extack)
1058 gfp_t gfp_flags = can_block ? GFP_KERNEL : GFP_ATOMIC;
1059 int addr_type = ipv6_addr_type(cfg->pfx);
1060 struct net *net = dev_net(idev->dev);
1061 struct inet6_ifaddr *ifa = NULL;
1062 struct fib6_info *f6i = NULL;
1065 if (addr_type == IPV6_ADDR_ANY ||
1066 (addr_type & IPV6_ADDR_MULTICAST &&
1067 !(cfg->ifa_flags & IFA_F_MCAUTOJOIN)) ||
1068 (!(idev->dev->flags & IFF_LOOPBACK) &&
1069 !netif_is_l3_master(idev->dev) &&
1070 addr_type & IPV6_ADDR_LOOPBACK))
1071 return ERR_PTR(-EADDRNOTAVAIL);
1074 err = -ENODEV; /*XXX*/
1078 if (idev->cnf.disable_ipv6) {
1083 /* validator notifier needs to be blocking;
1084 * do not call in atomic context
1087 struct in6_validator_info i6vi = {
1088 .i6vi_addr = *cfg->pfx,
1093 err = inet6addr_validator_notifier_call_chain(NETDEV_UP, &i6vi);
1094 err = notifier_to_errno(err);
1099 ifa = kzalloc(sizeof(*ifa), gfp_flags | __GFP_ACCOUNT);
1105 f6i = addrconf_f6i_alloc(net, idev, cfg->pfx, false, gfp_flags);
1112 neigh_parms_data_state_setall(idev->nd_parms);
1114 ifa->addr = *cfg->pfx;
1116 ifa->peer_addr = *cfg->peer_pfx;
1118 spin_lock_init(&ifa->lock);
1119 INIT_DELAYED_WORK(&ifa->dad_work, addrconf_dad_work);
1120 INIT_HLIST_NODE(&ifa->addr_lst);
1121 ifa->scope = cfg->scope;
1122 ifa->prefix_len = cfg->plen;
1123 ifa->rt_priority = cfg->rt_priority;
1124 ifa->flags = cfg->ifa_flags;
1125 ifa->ifa_proto = cfg->ifa_proto;
1126 /* No need to add the TENTATIVE flag for addresses with NODAD */
1127 if (!(cfg->ifa_flags & IFA_F_NODAD))
1128 ifa->flags |= IFA_F_TENTATIVE;
1129 ifa->valid_lft = cfg->valid_lft;
1130 ifa->prefered_lft = cfg->preferred_lft;
1131 ifa->cstamp = ifa->tstamp = jiffies;
1132 ifa->tokenized = false;
1140 refcount_set(&ifa->refcnt, 1);
1144 err = ipv6_add_addr_hash(idev->dev, ifa);
1146 rcu_read_unlock_bh();
1150 write_lock(&idev->lock);
1152 /* Add to inet6_dev unicast addr list. */
1153 ipv6_link_dev_addr(idev, ifa);
1155 if (ifa->flags&IFA_F_TEMPORARY) {
1156 list_add(&ifa->tmp_list, &idev->tempaddr_list);
1161 write_unlock(&idev->lock);
1163 rcu_read_unlock_bh();
1165 inet6addr_notifier_call_chain(NETDEV_UP, ifa);
1167 if (unlikely(err < 0)) {
1168 fib6_info_release(f6i);
1172 in6_dev_put(ifa->idev);
1181 enum cleanup_prefix_rt_t {
1182 CLEANUP_PREFIX_RT_NOP, /* no cleanup action for prefix route */
1183 CLEANUP_PREFIX_RT_DEL, /* delete the prefix route */
1184 CLEANUP_PREFIX_RT_EXPIRE, /* update the lifetime of the prefix route */
1188 * Check, whether the prefix for ifp would still need a prefix route
1189 * after deleting ifp. The function returns one of the CLEANUP_PREFIX_RT_*
1192 * 1) we don't purge prefix if address was not permanent.
1193 * prefix is managed by its own lifetime.
1194 * 2) we also don't purge, if the address was IFA_F_NOPREFIXROUTE.
1195 * 3) if there are no addresses, delete prefix.
1196 * 4) if there are still other permanent address(es),
1197 * corresponding prefix is still permanent.
1198 * 5) if there are still other addresses with IFA_F_NOPREFIXROUTE,
1199 * don't purge the prefix, assume user space is managing it.
1200 * 6) otherwise, update prefix lifetime to the
1201 * longest valid lifetime among the corresponding
1202 * addresses on the device.
1203 * Note: subsequent RA will update lifetime.
1205 static enum cleanup_prefix_rt_t
1206 check_cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long *expires)
1208 struct inet6_ifaddr *ifa;
1209 struct inet6_dev *idev = ifp->idev;
1210 unsigned long lifetime;
1211 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_DEL;
1215 list_for_each_entry(ifa, &idev->addr_list, if_list) {
1218 if (ifa->prefix_len != ifp->prefix_len ||
1219 !ipv6_prefix_equal(&ifa->addr, &ifp->addr,
1222 if (ifa->flags & (IFA_F_PERMANENT | IFA_F_NOPREFIXROUTE))
1223 return CLEANUP_PREFIX_RT_NOP;
1225 action = CLEANUP_PREFIX_RT_EXPIRE;
1227 spin_lock(&ifa->lock);
1229 lifetime = addrconf_timeout_fixup(ifa->valid_lft, HZ);
1231 * Note: Because this address is
1232 * not permanent, lifetime <
1233 * LONG_MAX / HZ here.
1235 if (time_before(*expires, ifa->tstamp + lifetime * HZ))
1236 *expires = ifa->tstamp + lifetime * HZ;
1237 spin_unlock(&ifa->lock);
1244 cleanup_prefix_route(struct inet6_ifaddr *ifp, unsigned long expires,
1245 bool del_rt, bool del_peer)
1247 struct fib6_info *f6i;
1249 f6i = addrconf_get_prefix_route(del_peer ? &ifp->peer_addr : &ifp->addr,
1251 ifp->idev->dev, 0, RTF_DEFAULT, true);
1254 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
1256 if (!(f6i->fib6_flags & RTF_EXPIRES))
1257 fib6_set_expires(f6i, expires);
1258 fib6_info_release(f6i);
1264 /* This function wants to get referenced ifp and releases it before return */
1266 static void ipv6_del_addr(struct inet6_ifaddr *ifp)
1268 enum cleanup_prefix_rt_t action = CLEANUP_PREFIX_RT_NOP;
1269 struct net *net = dev_net(ifp->idev->dev);
1270 unsigned long expires;
1275 spin_lock_bh(&ifp->lock);
1277 ifp->state = INET6_IFADDR_STATE_DEAD;
1278 spin_unlock_bh(&ifp->lock);
1280 if (state == INET6_IFADDR_STATE_DEAD)
1283 spin_lock_bh(&net->ipv6.addrconf_hash_lock);
1284 hlist_del_init_rcu(&ifp->addr_lst);
1285 spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
1287 write_lock_bh(&ifp->idev->lock);
1289 if (ifp->flags&IFA_F_TEMPORARY) {
1290 list_del(&ifp->tmp_list);
1292 in6_ifa_put(ifp->ifpub);
1298 if (ifp->flags & IFA_F_PERMANENT && !(ifp->flags & IFA_F_NOPREFIXROUTE))
1299 action = check_cleanup_prefix_route(ifp, &expires);
1301 list_del_rcu(&ifp->if_list);
1304 write_unlock_bh(&ifp->idev->lock);
1306 addrconf_del_dad_work(ifp);
1308 ipv6_ifa_notify(RTM_DELADDR, ifp);
1310 inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);
1312 if (action != CLEANUP_PREFIX_RT_NOP) {
1313 cleanup_prefix_route(ifp, expires,
1314 action == CLEANUP_PREFIX_RT_DEL, false);
1317 /* clean up prefsrc entries */
1318 rt6_remove_prefsrc(ifp);
1323 static int ipv6_create_tempaddr(struct inet6_ifaddr *ifp, bool block)
1325 struct inet6_dev *idev = ifp->idev;
1326 unsigned long tmp_tstamp, age;
1327 unsigned long regen_advance;
1328 unsigned long now = jiffies;
1329 s32 cnf_temp_preferred_lft;
1330 struct inet6_ifaddr *ift;
1331 struct ifa6_config cfg;
1332 long max_desync_factor;
1333 struct in6_addr addr;
1336 write_lock_bh(&idev->lock);
1340 if (idev->cnf.use_tempaddr <= 0) {
1341 write_unlock_bh(&idev->lock);
1342 pr_info("%s: use_tempaddr is disabled\n", __func__);
1347 spin_lock_bh(&ifp->lock);
1348 if (ifp->regen_count++ >= idev->cnf.regen_max_retry) {
1349 idev->cnf.use_tempaddr = -1; /*XXX*/
1350 spin_unlock_bh(&ifp->lock);
1351 write_unlock_bh(&idev->lock);
1352 pr_warn("%s: regeneration time exceeded - disabled temporary address support\n",
1359 memcpy(addr.s6_addr, ifp->addr.s6_addr, 8);
1360 ipv6_gen_rnd_iid(&addr);
1362 age = (now - ifp->tstamp) / HZ;
1364 regen_advance = idev->cnf.regen_max_retry *
1365 idev->cnf.dad_transmits *
1366 max(NEIGH_VAR(idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
1368 /* recalculate max_desync_factor each time and update
1369 * idev->desync_factor if it's larger
1371 cnf_temp_preferred_lft = READ_ONCE(idev->cnf.temp_prefered_lft);
1372 max_desync_factor = min_t(__u32,
1373 idev->cnf.max_desync_factor,
1374 cnf_temp_preferred_lft - regen_advance);
1376 if (unlikely(idev->desync_factor > max_desync_factor)) {
1377 if (max_desync_factor > 0) {
1378 get_random_bytes(&idev->desync_factor,
1379 sizeof(idev->desync_factor));
1380 idev->desync_factor %= max_desync_factor;
1382 idev->desync_factor = 0;
1386 memset(&cfg, 0, sizeof(cfg));
1387 cfg.valid_lft = min_t(__u32, ifp->valid_lft,
1388 idev->cnf.temp_valid_lft + age);
1389 cfg.preferred_lft = cnf_temp_preferred_lft + age - idev->desync_factor;
1390 cfg.preferred_lft = min_t(__u32, ifp->prefered_lft, cfg.preferred_lft);
1392 cfg.plen = ifp->prefix_len;
1393 tmp_tstamp = ifp->tstamp;
1394 spin_unlock_bh(&ifp->lock);
1396 write_unlock_bh(&idev->lock);
1398 /* A temporary address is created only if this calculated Preferred
1399 * Lifetime is greater than REGEN_ADVANCE time units. In particular,
1400 * an implementation must not create a temporary address with a zero
1401 * Preferred Lifetime.
1402 * Use age calculation as in addrconf_verify to avoid unnecessary
1403 * temporary addresses being generated.
1405 age = (now - tmp_tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
1406 if (cfg.preferred_lft <= regen_advance + age) {
1413 cfg.ifa_flags = IFA_F_TEMPORARY;
1414 /* set in addrconf_prefix_rcv() */
1415 if (ifp->flags & IFA_F_OPTIMISTIC)
1416 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
1419 cfg.scope = ipv6_addr_scope(cfg.pfx);
1421 ift = ipv6_add_addr(idev, &cfg, block, NULL);
1425 pr_info("%s: retry temporary address regeneration\n", __func__);
1426 write_lock_bh(&idev->lock);
1430 spin_lock_bh(&ift->lock);
1433 ift->tstamp = tmp_tstamp;
1434 spin_unlock_bh(&ift->lock);
1436 addrconf_dad_start(ift);
1444 * Choose an appropriate source address (RFC3484)
1447 IPV6_SADDR_RULE_INIT = 0,
1448 IPV6_SADDR_RULE_LOCAL,
1449 IPV6_SADDR_RULE_SCOPE,
1450 IPV6_SADDR_RULE_PREFERRED,
1451 #ifdef CONFIG_IPV6_MIP6
1452 IPV6_SADDR_RULE_HOA,
1454 IPV6_SADDR_RULE_OIF,
1455 IPV6_SADDR_RULE_LABEL,
1456 IPV6_SADDR_RULE_PRIVACY,
1457 IPV6_SADDR_RULE_ORCHID,
1458 IPV6_SADDR_RULE_PREFIX,
1459 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1460 IPV6_SADDR_RULE_NOT_OPTIMISTIC,
1465 struct ipv6_saddr_score {
1468 struct inet6_ifaddr *ifa;
1469 DECLARE_BITMAP(scorebits, IPV6_SADDR_RULE_MAX);
1474 struct ipv6_saddr_dst {
1475 const struct in6_addr *addr;
1482 static inline int ipv6_saddr_preferred(int type)
1484 if (type & (IPV6_ADDR_MAPPED|IPV6_ADDR_COMPATv4|IPV6_ADDR_LOOPBACK))
1489 static bool ipv6_use_optimistic_addr(struct net *net,
1490 struct inet6_dev *idev)
1492 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1495 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1497 if (!net->ipv6.devconf_all->use_optimistic && !idev->cnf.use_optimistic)
1506 static bool ipv6_allow_optimistic_dad(struct net *net,
1507 struct inet6_dev *idev)
1509 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1512 if (!net->ipv6.devconf_all->optimistic_dad && !idev->cnf.optimistic_dad)
1521 static int ipv6_get_saddr_eval(struct net *net,
1522 struct ipv6_saddr_score *score,
1523 struct ipv6_saddr_dst *dst,
1528 if (i <= score->rule) {
1530 case IPV6_SADDR_RULE_SCOPE:
1531 ret = score->scopedist;
1533 case IPV6_SADDR_RULE_PREFIX:
1534 ret = score->matchlen;
1537 ret = !!test_bit(i, score->scorebits);
1543 case IPV6_SADDR_RULE_INIT:
1544 /* Rule 0: remember if hiscore is not ready yet */
1547 case IPV6_SADDR_RULE_LOCAL:
1548 /* Rule 1: Prefer same address */
1549 ret = ipv6_addr_equal(&score->ifa->addr, dst->addr);
1551 case IPV6_SADDR_RULE_SCOPE:
1552 /* Rule 2: Prefer appropriate scope
1557 * ---+--+-+---> scope
1559 * | d is scope of the destination.
1561 * | \ <- smaller scope is better if
1562 * B-15 | \ if scope is enough for destination.
1563 * | ret = B - scope (-1 <= scope >= d <= 15).
1565 * |/ <- greater is better
1566 * -C / if scope is not enough for destination.
1567 * /| ret = scope - C (-1 <= d < scope <= 15).
1569 * d - C - 1 < B -15 (for all -1 <= d <= 15).
1570 * C > d + 14 - B >= 15 + 14 - B = 29 - B.
1571 * Assume B = 0 and we get C > 29.
1573 ret = __ipv6_addr_src_scope(score->addr_type);
1574 if (ret >= dst->scope)
1577 ret -= 128; /* 30 is enough */
1578 score->scopedist = ret;
1580 case IPV6_SADDR_RULE_PREFERRED:
1582 /* Rule 3: Avoid deprecated and optimistic addresses */
1583 u8 avoid = IFA_F_DEPRECATED;
1585 if (!ipv6_use_optimistic_addr(net, score->ifa->idev))
1586 avoid |= IFA_F_OPTIMISTIC;
1587 ret = ipv6_saddr_preferred(score->addr_type) ||
1588 !(score->ifa->flags & avoid);
1591 #ifdef CONFIG_IPV6_MIP6
1592 case IPV6_SADDR_RULE_HOA:
1594 /* Rule 4: Prefer home address */
1595 int prefhome = !(dst->prefs & IPV6_PREFER_SRC_COA);
1596 ret = !(score->ifa->flags & IFA_F_HOMEADDRESS) ^ prefhome;
1600 case IPV6_SADDR_RULE_OIF:
1601 /* Rule 5: Prefer outgoing interface */
1602 ret = (!dst->ifindex ||
1603 dst->ifindex == score->ifa->idev->dev->ifindex);
1605 case IPV6_SADDR_RULE_LABEL:
1606 /* Rule 6: Prefer matching label */
1607 ret = ipv6_addr_label(net,
1608 &score->ifa->addr, score->addr_type,
1609 score->ifa->idev->dev->ifindex) == dst->label;
1611 case IPV6_SADDR_RULE_PRIVACY:
1613 /* Rule 7: Prefer public address
1614 * Note: prefer temporary address if use_tempaddr >= 2
1616 int preftmp = dst->prefs & (IPV6_PREFER_SRC_PUBLIC|IPV6_PREFER_SRC_TMP) ?
1617 !!(dst->prefs & IPV6_PREFER_SRC_TMP) :
1618 score->ifa->idev->cnf.use_tempaddr >= 2;
1619 ret = (!(score->ifa->flags & IFA_F_TEMPORARY)) ^ preftmp;
1622 case IPV6_SADDR_RULE_ORCHID:
1623 /* Rule 8-: Prefer ORCHID vs ORCHID or
1624 * non-ORCHID vs non-ORCHID
1626 ret = !(ipv6_addr_orchid(&score->ifa->addr) ^
1627 ipv6_addr_orchid(dst->addr));
1629 case IPV6_SADDR_RULE_PREFIX:
1630 /* Rule 8: Use longest matching prefix */
1631 ret = ipv6_addr_diff(&score->ifa->addr, dst->addr);
1632 if (ret > score->ifa->prefix_len)
1633 ret = score->ifa->prefix_len;
1634 score->matchlen = ret;
1636 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
1637 case IPV6_SADDR_RULE_NOT_OPTIMISTIC:
1638 /* Optimistic addresses still have lower precedence than other
1639 * preferred addresses.
1641 ret = !(score->ifa->flags & IFA_F_OPTIMISTIC);
1649 __set_bit(i, score->scorebits);
1655 static int __ipv6_dev_get_saddr(struct net *net,
1656 struct ipv6_saddr_dst *dst,
1657 struct inet6_dev *idev,
1658 struct ipv6_saddr_score *scores,
1661 struct ipv6_saddr_score *score = &scores[1 - hiscore_idx], *hiscore = &scores[hiscore_idx];
1663 list_for_each_entry_rcu(score->ifa, &idev->addr_list, if_list) {
1667 * - Tentative Address (RFC2462 section 5.4)
1668 * - A tentative address is not considered
1669 * "assigned to an interface" in the traditional
1670 * sense, unless it is also flagged as optimistic.
1671 * - Candidate Source Address (section 4)
1672 * - In any case, anycast addresses, multicast
1673 * addresses, and the unspecified address MUST
1674 * NOT be included in a candidate set.
1676 if ((score->ifa->flags & IFA_F_TENTATIVE) &&
1677 (!(score->ifa->flags & IFA_F_OPTIMISTIC)))
1680 score->addr_type = __ipv6_addr_type(&score->ifa->addr);
1682 if (unlikely(score->addr_type == IPV6_ADDR_ANY ||
1683 score->addr_type & IPV6_ADDR_MULTICAST)) {
1684 net_dbg_ratelimited("ADDRCONF: unspecified / multicast address assigned as unicast address on %s",
1690 bitmap_zero(score->scorebits, IPV6_SADDR_RULE_MAX);
1692 for (i = 0; i < IPV6_SADDR_RULE_MAX; i++) {
1693 int minihiscore, miniscore;
1695 minihiscore = ipv6_get_saddr_eval(net, hiscore, dst, i);
1696 miniscore = ipv6_get_saddr_eval(net, score, dst, i);
1698 if (minihiscore > miniscore) {
1699 if (i == IPV6_SADDR_RULE_SCOPE &&
1700 score->scopedist > 0) {
1703 * each remaining entry
1704 * has too small (not enough)
1705 * scope, because ifa entries
1706 * are sorted by their scope
1712 } else if (minihiscore < miniscore) {
1713 swap(hiscore, score);
1714 hiscore_idx = 1 - hiscore_idx;
1716 /* restore our iterator */
1717 score->ifa = hiscore->ifa;
1727 static int ipv6_get_saddr_master(struct net *net,
1728 const struct net_device *dst_dev,
1729 const struct net_device *master,
1730 struct ipv6_saddr_dst *dst,
1731 struct ipv6_saddr_score *scores,
1734 struct inet6_dev *idev;
1736 idev = __in6_dev_get(dst_dev);
1738 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1739 scores, hiscore_idx);
1741 idev = __in6_dev_get(master);
1743 hiscore_idx = __ipv6_dev_get_saddr(net, dst, idev,
1744 scores, hiscore_idx);
1749 int ipv6_dev_get_saddr(struct net *net, const struct net_device *dst_dev,
1750 const struct in6_addr *daddr, unsigned int prefs,
1751 struct in6_addr *saddr)
1753 struct ipv6_saddr_score scores[2], *hiscore;
1754 struct ipv6_saddr_dst dst;
1755 struct inet6_dev *idev;
1756 struct net_device *dev;
1758 bool use_oif_addr = false;
1759 int hiscore_idx = 0;
1762 dst_type = __ipv6_addr_type(daddr);
1764 dst.ifindex = dst_dev ? dst_dev->ifindex : 0;
1765 dst.scope = __ipv6_addr_src_scope(dst_type);
1766 dst.label = ipv6_addr_label(net, daddr, dst_type, dst.ifindex);
1769 scores[hiscore_idx].rule = -1;
1770 scores[hiscore_idx].ifa = NULL;
1774 /* Candidate Source Address (section 4)
1775 * - multicast and link-local destination address,
1776 * the set of candidate source address MUST only
1777 * include addresses assigned to interfaces
1778 * belonging to the same link as the outgoing
1780 * (- For site-local destination addresses, the
1781 * set of candidate source addresses MUST only
1782 * include addresses assigned to interfaces
1783 * belonging to the same site as the outgoing
1785 * - "It is RECOMMENDED that the candidate source addresses
1786 * be the set of unicast addresses assigned to the
1787 * interface that will be used to send to the destination
1788 * (the 'outgoing' interface)." (RFC 6724)
1791 idev = __in6_dev_get(dst_dev);
1792 if ((dst_type & IPV6_ADDR_MULTICAST) ||
1793 dst.scope <= IPV6_ADDR_SCOPE_LINKLOCAL ||
1794 (idev && idev->cnf.use_oif_addrs_only)) {
1795 use_oif_addr = true;
1801 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1803 const struct net_device *master;
1806 /* if dst_dev exists and is enslaved to an L3 device, then
1807 * prefer addresses from dst_dev and then the master over
1808 * any other enslaved devices in the L3 domain.
1810 master = l3mdev_master_dev_rcu(dst_dev);
1812 master_idx = master->ifindex;
1814 hiscore_idx = ipv6_get_saddr_master(net, dst_dev,
1816 scores, hiscore_idx);
1818 if (scores[hiscore_idx].ifa)
1822 for_each_netdev_rcu(net, dev) {
1823 /* only consider addresses on devices in the
1826 if (l3mdev_master_ifindex_rcu(dev) != master_idx)
1828 idev = __in6_dev_get(dev);
1831 hiscore_idx = __ipv6_dev_get_saddr(net, &dst, idev, scores, hiscore_idx);
1836 hiscore = &scores[hiscore_idx];
1838 ret = -EADDRNOTAVAIL;
1840 *saddr = hiscore->ifa->addr;
1845 EXPORT_SYMBOL(ipv6_dev_get_saddr);
1847 static int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr,
1850 struct inet6_ifaddr *ifp;
1851 int err = -EADDRNOTAVAIL;
1853 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
1854 if (ifp->scope > IFA_LINK)
1856 if (ifp->scope == IFA_LINK &&
1857 !(ifp->flags & banned_flags)) {
1866 int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
1869 struct inet6_dev *idev;
1870 int err = -EADDRNOTAVAIL;
1873 idev = __in6_dev_get(dev);
1875 read_lock_bh(&idev->lock);
1876 err = __ipv6_get_lladdr(idev, addr, banned_flags);
1877 read_unlock_bh(&idev->lock);
1883 static int ipv6_count_addresses(const struct inet6_dev *idev)
1885 const struct inet6_ifaddr *ifp;
1889 list_for_each_entry_rcu(ifp, &idev->addr_list, if_list)
1895 int ipv6_chk_addr(struct net *net, const struct in6_addr *addr,
1896 const struct net_device *dev, int strict)
1898 return ipv6_chk_addr_and_flags(net, addr, dev, !dev,
1899 strict, IFA_F_TENTATIVE);
1901 EXPORT_SYMBOL(ipv6_chk_addr);
1903 /* device argument is used to find the L3 domain of interest. If
1904 * skip_dev_check is set, then the ifp device is not checked against
1905 * the passed in dev argument. So the 2 cases for addresses checks are:
1906 * 1. does the address exist in the L3 domain that dev is part of
1907 * (skip_dev_check = true), or
1909 * 2. does the address exist on the specific device
1910 * (skip_dev_check = false)
1912 static struct net_device *
1913 __ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1914 const struct net_device *dev, bool skip_dev_check,
1915 int strict, u32 banned_flags)
1917 unsigned int hash = inet6_addr_hash(net, addr);
1918 struct net_device *l3mdev, *ndev;
1919 struct inet6_ifaddr *ifp;
1924 l3mdev = l3mdev_master_dev_rcu(dev);
1928 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
1929 ndev = ifp->idev->dev;
1931 if (l3mdev_master_dev_rcu(ndev) != l3mdev)
1934 /* Decouple optimistic from tentative for evaluation here.
1935 * Ban optimistic addresses explicitly, when required.
1937 ifp_flags = (ifp->flags&IFA_F_OPTIMISTIC)
1938 ? (ifp->flags&~IFA_F_TENTATIVE)
1940 if (ipv6_addr_equal(&ifp->addr, addr) &&
1941 !(ifp_flags&banned_flags) &&
1942 (!dev || ndev == dev ||
1943 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict))) {
1953 int ipv6_chk_addr_and_flags(struct net *net, const struct in6_addr *addr,
1954 const struct net_device *dev, bool skip_dev_check,
1955 int strict, u32 banned_flags)
1957 return __ipv6_chk_addr_and_flags(net, addr, dev, skip_dev_check,
1958 strict, banned_flags) ? 1 : 0;
1960 EXPORT_SYMBOL(ipv6_chk_addr_and_flags);
1963 /* Compares an address/prefix_len with addresses on device @dev.
1964 * If one is found it returns true.
1966 bool ipv6_chk_custom_prefix(const struct in6_addr *addr,
1967 const unsigned int prefix_len, struct net_device *dev)
1969 const struct inet6_ifaddr *ifa;
1970 const struct inet6_dev *idev;
1974 idev = __in6_dev_get(dev);
1976 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1977 ret = ipv6_prefix_equal(addr, &ifa->addr, prefix_len);
1986 EXPORT_SYMBOL(ipv6_chk_custom_prefix);
1988 int ipv6_chk_prefix(const struct in6_addr *addr, struct net_device *dev)
1990 const struct inet6_ifaddr *ifa;
1991 const struct inet6_dev *idev;
1996 idev = __in6_dev_get(dev);
1998 list_for_each_entry_rcu(ifa, &idev->addr_list, if_list) {
1999 onlink = ipv6_prefix_equal(addr, &ifa->addr,
2008 EXPORT_SYMBOL(ipv6_chk_prefix);
2011 * ipv6_dev_find - find the first device with a given source address.
2012 * @net: the net namespace
2013 * @addr: the source address
2014 * @dev: used to find the L3 domain of interest
2016 * The caller should be protected by RCU, or RTNL.
2018 struct net_device *ipv6_dev_find(struct net *net, const struct in6_addr *addr,
2019 struct net_device *dev)
2021 return __ipv6_chk_addr_and_flags(net, addr, dev, !dev, 1,
2024 EXPORT_SYMBOL(ipv6_dev_find);
2026 struct inet6_ifaddr *ipv6_get_ifaddr(struct net *net, const struct in6_addr *addr,
2027 struct net_device *dev, int strict)
2029 unsigned int hash = inet6_addr_hash(net, addr);
2030 struct inet6_ifaddr *ifp, *result = NULL;
2033 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
2034 if (ipv6_addr_equal(&ifp->addr, addr)) {
2035 if (!dev || ifp->idev->dev == dev ||
2036 !(ifp->scope&(IFA_LINK|IFA_HOST) || strict)) {
2048 /* Gets referenced address, destroys ifaddr */
2050 static void addrconf_dad_stop(struct inet6_ifaddr *ifp, int dad_failed)
2053 ifp->flags |= IFA_F_DADFAILED;
2055 if (ifp->flags&IFA_F_TEMPORARY) {
2056 struct inet6_ifaddr *ifpub;
2057 spin_lock_bh(&ifp->lock);
2060 in6_ifa_hold(ifpub);
2061 spin_unlock_bh(&ifp->lock);
2062 ipv6_create_tempaddr(ifpub, true);
2065 spin_unlock_bh(&ifp->lock);
2068 } else if (ifp->flags&IFA_F_PERMANENT || !dad_failed) {
2069 spin_lock_bh(&ifp->lock);
2070 addrconf_del_dad_work(ifp);
2071 ifp->flags |= IFA_F_TENTATIVE;
2073 ifp->flags &= ~IFA_F_OPTIMISTIC;
2074 spin_unlock_bh(&ifp->lock);
2076 ipv6_ifa_notify(0, ifp);
2083 static int addrconf_dad_end(struct inet6_ifaddr *ifp)
2087 spin_lock_bh(&ifp->lock);
2088 if (ifp->state == INET6_IFADDR_STATE_DAD) {
2089 ifp->state = INET6_IFADDR_STATE_POSTDAD;
2092 spin_unlock_bh(&ifp->lock);
2097 void addrconf_dad_failure(struct sk_buff *skb, struct inet6_ifaddr *ifp)
2099 struct inet6_dev *idev = ifp->idev;
2100 struct net *net = dev_net(idev->dev);
2102 if (addrconf_dad_end(ifp)) {
2107 net_info_ratelimited("%s: IPv6 duplicate address %pI6c used by %pM detected!\n",
2108 ifp->idev->dev->name, &ifp->addr, eth_hdr(skb)->h_source);
2110 spin_lock_bh(&ifp->lock);
2112 if (ifp->flags & IFA_F_STABLE_PRIVACY) {
2113 struct in6_addr new_addr;
2114 struct inet6_ifaddr *ifp2;
2115 int retries = ifp->stable_privacy_retry + 1;
2116 struct ifa6_config cfg = {
2118 .plen = ifp->prefix_len,
2119 .ifa_flags = ifp->flags,
2120 .valid_lft = ifp->valid_lft,
2121 .preferred_lft = ifp->prefered_lft,
2122 .scope = ifp->scope,
2125 if (retries > net->ipv6.sysctl.idgen_retries) {
2126 net_info_ratelimited("%s: privacy stable address generation failed because of DAD conflicts!\n",
2127 ifp->idev->dev->name);
2131 new_addr = ifp->addr;
2132 if (ipv6_generate_stable_address(&new_addr, retries,
2136 spin_unlock_bh(&ifp->lock);
2138 if (idev->cnf.max_addresses &&
2139 ipv6_count_addresses(idev) >=
2140 idev->cnf.max_addresses)
2143 net_info_ratelimited("%s: generating new stable privacy address because of DAD conflict\n",
2144 ifp->idev->dev->name);
2146 ifp2 = ipv6_add_addr(idev, &cfg, false, NULL);
2150 spin_lock_bh(&ifp2->lock);
2151 ifp2->stable_privacy_retry = retries;
2152 ifp2->state = INET6_IFADDR_STATE_PREDAD;
2153 spin_unlock_bh(&ifp2->lock);
2155 addrconf_mod_dad_work(ifp2, net->ipv6.sysctl.idgen_delay);
2158 spin_lock_bh(&ifp->lock);
2162 /* transition from _POSTDAD to _ERRDAD */
2163 ifp->state = INET6_IFADDR_STATE_ERRDAD;
2164 spin_unlock_bh(&ifp->lock);
2166 addrconf_mod_dad_work(ifp, 0);
2170 /* Join to solicited addr multicast group.
2171 * caller must hold RTNL */
2172 void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr)
2174 struct in6_addr maddr;
2176 if (dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2179 addrconf_addr_solict_mult(addr, &maddr);
2180 ipv6_dev_mc_inc(dev, &maddr);
2183 /* caller must hold RTNL */
2184 void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr)
2186 struct in6_addr maddr;
2188 if (idev->dev->flags&(IFF_LOOPBACK|IFF_NOARP))
2191 addrconf_addr_solict_mult(addr, &maddr);
2192 __ipv6_dev_mc_dec(idev, &maddr);
2195 /* caller must hold RTNL */
2196 static void addrconf_join_anycast(struct inet6_ifaddr *ifp)
2198 struct in6_addr addr;
2200 if (ifp->prefix_len >= 127) /* RFC 6164 */
2202 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2203 if (ipv6_addr_any(&addr))
2205 __ipv6_dev_ac_inc(ifp->idev, &addr);
2208 /* caller must hold RTNL */
2209 static void addrconf_leave_anycast(struct inet6_ifaddr *ifp)
2211 struct in6_addr addr;
2213 if (ifp->prefix_len >= 127) /* RFC 6164 */
2215 ipv6_addr_prefix(&addr, &ifp->addr, ifp->prefix_len);
2216 if (ipv6_addr_any(&addr))
2218 __ipv6_dev_ac_dec(ifp->idev, &addr);
2221 static int addrconf_ifid_6lowpan(u8 *eui, struct net_device *dev)
2223 switch (dev->addr_len) {
2225 memcpy(eui, dev->dev_addr, 3);
2228 memcpy(eui + 5, dev->dev_addr + 3, 3);
2230 case EUI64_ADDR_LEN:
2231 memcpy(eui, dev->dev_addr, EUI64_ADDR_LEN);
2241 static int addrconf_ifid_ieee1394(u8 *eui, struct net_device *dev)
2243 const union fwnet_hwaddr *ha;
2245 if (dev->addr_len != FWNET_ALEN)
2248 ha = (const union fwnet_hwaddr *)dev->dev_addr;
2250 memcpy(eui, &ha->uc.uniq_id, sizeof(ha->uc.uniq_id));
2255 static int addrconf_ifid_arcnet(u8 *eui, struct net_device *dev)
2257 /* XXX: inherit EUI-64 from other interface -- yoshfuji */
2258 if (dev->addr_len != ARCNET_ALEN)
2261 eui[7] = *(u8 *)dev->dev_addr;
2265 static int addrconf_ifid_infiniband(u8 *eui, struct net_device *dev)
2267 if (dev->addr_len != INFINIBAND_ALEN)
2269 memcpy(eui, dev->dev_addr + 12, 8);
2274 static int __ipv6_isatap_ifid(u8 *eui, __be32 addr)
2278 eui[0] = (ipv4_is_zeronet(addr) || ipv4_is_private_10(addr) ||
2279 ipv4_is_loopback(addr) || ipv4_is_linklocal_169(addr) ||
2280 ipv4_is_private_172(addr) || ipv4_is_test_192(addr) ||
2281 ipv4_is_anycast_6to4(addr) || ipv4_is_private_192(addr) ||
2282 ipv4_is_test_198(addr) || ipv4_is_multicast(addr) ||
2283 ipv4_is_lbcast(addr)) ? 0x00 : 0x02;
2287 memcpy(eui + 4, &addr, 4);
2291 static int addrconf_ifid_sit(u8 *eui, struct net_device *dev)
2293 if (dev->priv_flags & IFF_ISATAP)
2294 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2298 static int addrconf_ifid_gre(u8 *eui, struct net_device *dev)
2300 return __ipv6_isatap_ifid(eui, *(__be32 *)dev->dev_addr);
2303 static int addrconf_ifid_ip6tnl(u8 *eui, struct net_device *dev)
2305 memcpy(eui, dev->perm_addr, 3);
2306 memcpy(eui + 5, dev->perm_addr + 3, 3);
2313 static int ipv6_generate_eui64(u8 *eui, struct net_device *dev)
2315 switch (dev->type) {
2318 return addrconf_ifid_eui48(eui, dev);
2320 return addrconf_ifid_arcnet(eui, dev);
2321 case ARPHRD_INFINIBAND:
2322 return addrconf_ifid_infiniband(eui, dev);
2324 return addrconf_ifid_sit(eui, dev);
2327 return addrconf_ifid_gre(eui, dev);
2328 case ARPHRD_6LOWPAN:
2329 return addrconf_ifid_6lowpan(eui, dev);
2330 case ARPHRD_IEEE1394:
2331 return addrconf_ifid_ieee1394(eui, dev);
2332 case ARPHRD_TUNNEL6:
2335 return addrconf_ifid_ip6tnl(eui, dev);
2340 static int ipv6_inherit_eui64(u8 *eui, struct inet6_dev *idev)
2343 struct inet6_ifaddr *ifp;
2345 read_lock_bh(&idev->lock);
2346 list_for_each_entry_reverse(ifp, &idev->addr_list, if_list) {
2347 if (ifp->scope > IFA_LINK)
2349 if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
2350 memcpy(eui, ifp->addr.s6_addr+8, 8);
2355 read_unlock_bh(&idev->lock);
2359 /* Generation of a randomized Interface Identifier
2360 * draft-ietf-6man-rfc4941bis, Section 3.3.1
2363 static void ipv6_gen_rnd_iid(struct in6_addr *addr)
2366 get_random_bytes(&addr->s6_addr[8], 8);
2368 /* <draft-ietf-6man-rfc4941bis-08.txt>, Section 3.3.1:
2369 * check if generated address is not inappropriate:
2371 * - Reserved IPv6 Interface Identifiers
2372 * - XXX: already assigned to an address on the device
2375 /* Subnet-router anycast: 0000:0000:0000:0000 */
2376 if (!(addr->s6_addr32[2] | addr->s6_addr32[3]))
2379 /* IANA Ethernet block: 0200:5EFF:FE00:0000-0200:5EFF:FE00:5212
2380 * Proxy Mobile IPv6: 0200:5EFF:FE00:5213
2381 * IANA Ethernet block: 0200:5EFF:FE00:5214-0200:5EFF:FEFF:FFFF
2383 if (ntohl(addr->s6_addr32[2]) == 0x02005eff &&
2384 (ntohl(addr->s6_addr32[3]) & 0Xff000000) == 0xfe000000)
2387 /* Reserved subnet anycast addresses */
2388 if (ntohl(addr->s6_addr32[2]) == 0xfdffffff &&
2389 ntohl(addr->s6_addr32[3]) >= 0Xffffff80)
2398 addrconf_prefix_route(struct in6_addr *pfx, int plen, u32 metric,
2399 struct net_device *dev, unsigned long expires,
2400 u32 flags, gfp_t gfp_flags)
2402 struct fib6_config cfg = {
2403 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX,
2404 .fc_metric = metric ? : IP6_RT_PRIO_ADDRCONF,
2405 .fc_ifindex = dev->ifindex,
2406 .fc_expires = expires,
2408 .fc_flags = RTF_UP | flags,
2409 .fc_nlinfo.nl_net = dev_net(dev),
2410 .fc_protocol = RTPROT_KERNEL,
2411 .fc_type = RTN_UNICAST,
2416 /* Prevent useless cloning on PtP SIT.
2417 This thing is done here expecting that the whole
2418 class of non-broadcast devices need not cloning.
2420 #if IS_ENABLED(CONFIG_IPV6_SIT)
2421 if (dev->type == ARPHRD_SIT && (dev->flags & IFF_POINTOPOINT))
2422 cfg.fc_flags |= RTF_NONEXTHOP;
2425 ip6_route_add(&cfg, gfp_flags, NULL);
2429 static struct fib6_info *addrconf_get_prefix_route(const struct in6_addr *pfx,
2431 const struct net_device *dev,
2432 u32 flags, u32 noflags,
2435 struct fib6_node *fn;
2436 struct fib6_info *rt = NULL;
2437 struct fib6_table *table;
2438 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_PREFIX;
2440 table = fib6_get_table(dev_net(dev), tb_id);
2445 fn = fib6_locate(&table->tb6_root, pfx, plen, NULL, 0, true);
2449 for_each_fib6_node_rt_rcu(fn) {
2450 /* prefix routes only use builtin fib6_nh */
2454 if (rt->fib6_nh->fib_nh_dev->ifindex != dev->ifindex)
2456 if (no_gw && rt->fib6_nh->fib_nh_gw_family)
2458 if ((rt->fib6_flags & flags) != flags)
2460 if ((rt->fib6_flags & noflags) != 0)
2462 if (!fib6_info_hold_safe(rt))
2472 /* Create "default" multicast route to the interface */
2474 static void addrconf_add_mroute(struct net_device *dev)
2476 struct fib6_config cfg = {
2477 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_LOCAL,
2478 .fc_metric = IP6_RT_PRIO_ADDRCONF,
2479 .fc_ifindex = dev->ifindex,
2482 .fc_type = RTN_MULTICAST,
2483 .fc_nlinfo.nl_net = dev_net(dev),
2484 .fc_protocol = RTPROT_KERNEL,
2487 ipv6_addr_set(&cfg.fc_dst, htonl(0xFF000000), 0, 0, 0);
2489 ip6_route_add(&cfg, GFP_KERNEL, NULL);
2492 static struct inet6_dev *addrconf_add_dev(struct net_device *dev)
2494 struct inet6_dev *idev;
2498 idev = ipv6_find_idev(dev);
2502 if (idev->cnf.disable_ipv6)
2503 return ERR_PTR(-EACCES);
2505 /* Add default multicast route */
2506 if (!(dev->flags & IFF_LOOPBACK) && !netif_is_l3_master(dev))
2507 addrconf_add_mroute(dev);
2512 static void manage_tempaddrs(struct inet6_dev *idev,
2513 struct inet6_ifaddr *ifp,
2514 __u32 valid_lft, __u32 prefered_lft,
2515 bool create, unsigned long now)
2518 struct inet6_ifaddr *ift;
2520 read_lock_bh(&idev->lock);
2521 /* update all temporary addresses in the list */
2522 list_for_each_entry(ift, &idev->tempaddr_list, tmp_list) {
2523 int age, max_valid, max_prefered;
2525 if (ifp != ift->ifpub)
2528 /* RFC 4941 section 3.3:
2529 * If a received option will extend the lifetime of a public
2530 * address, the lifetimes of temporary addresses should
2531 * be extended, subject to the overall constraint that no
2532 * temporary addresses should ever remain "valid" or "preferred"
2533 * for a time longer than (TEMP_VALID_LIFETIME) or
2534 * (TEMP_PREFERRED_LIFETIME - DESYNC_FACTOR), respectively.
2536 age = (now - ift->cstamp) / HZ;
2537 max_valid = idev->cnf.temp_valid_lft - age;
2541 max_prefered = idev->cnf.temp_prefered_lft -
2542 idev->desync_factor - age;
2543 if (max_prefered < 0)
2546 if (valid_lft > max_valid)
2547 valid_lft = max_valid;
2549 if (prefered_lft > max_prefered)
2550 prefered_lft = max_prefered;
2552 spin_lock(&ift->lock);
2554 ift->valid_lft = valid_lft;
2555 ift->prefered_lft = prefered_lft;
2557 if (prefered_lft > 0)
2558 ift->flags &= ~IFA_F_DEPRECATED;
2560 spin_unlock(&ift->lock);
2561 if (!(flags&IFA_F_TENTATIVE))
2562 ipv6_ifa_notify(0, ift);
2565 if ((create || list_empty(&idev->tempaddr_list)) &&
2566 idev->cnf.use_tempaddr > 0) {
2567 /* When a new public address is created as described
2568 * in [ADDRCONF], also create a new temporary address.
2569 * Also create a temporary address if it's enabled but
2570 * no temporary address currently exists.
2572 read_unlock_bh(&idev->lock);
2573 ipv6_create_tempaddr(ifp, false);
2575 read_unlock_bh(&idev->lock);
2579 static bool is_addr_mode_generate_stable(struct inet6_dev *idev)
2581 return idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY ||
2582 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_RANDOM;
2585 int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
2586 const struct prefix_info *pinfo,
2587 struct inet6_dev *in6_dev,
2588 const struct in6_addr *addr, int addr_type,
2589 u32 addr_flags, bool sllao, bool tokenized,
2590 __u32 valid_lft, u32 prefered_lft)
2592 struct inet6_ifaddr *ifp = ipv6_get_ifaddr(net, addr, dev, 1);
2593 int create = 0, update_lft = 0;
2595 if (!ifp && valid_lft) {
2596 int max_addresses = in6_dev->cnf.max_addresses;
2597 struct ifa6_config cfg = {
2599 .plen = pinfo->prefix_len,
2600 .ifa_flags = addr_flags,
2601 .valid_lft = valid_lft,
2602 .preferred_lft = prefered_lft,
2603 .scope = addr_type & IPV6_ADDR_SCOPE_MASK,
2604 .ifa_proto = IFAPROT_KERNEL_RA
2607 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
2608 if ((net->ipv6.devconf_all->optimistic_dad ||
2609 in6_dev->cnf.optimistic_dad) &&
2610 !net->ipv6.devconf_all->forwarding && sllao)
2611 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
2614 /* Do not allow to create too much of autoconfigured
2615 * addresses; this would be too easy way to crash kernel.
2617 if (!max_addresses ||
2618 ipv6_count_addresses(in6_dev) < max_addresses)
2619 ifp = ipv6_add_addr(in6_dev, &cfg, false, NULL);
2621 if (IS_ERR_OR_NULL(ifp))
2625 spin_lock_bh(&ifp->lock);
2626 ifp->flags |= IFA_F_MANAGETEMPADDR;
2627 ifp->cstamp = jiffies;
2628 ifp->tokenized = tokenized;
2629 spin_unlock_bh(&ifp->lock);
2630 addrconf_dad_start(ifp);
2638 /* update lifetime (RFC2462 5.5.3 e) */
2639 spin_lock_bh(&ifp->lock);
2641 if (ifp->valid_lft > (now - ifp->tstamp) / HZ)
2642 stored_lft = ifp->valid_lft - (now - ifp->tstamp) / HZ;
2645 if (!create && stored_lft) {
2646 const u32 minimum_lft = min_t(u32,
2647 stored_lft, MIN_VALID_LIFETIME);
2648 valid_lft = max(valid_lft, minimum_lft);
2650 /* RFC4862 Section 5.5.3e:
2651 * "Note that the preferred lifetime of the
2652 * corresponding address is always reset to
2653 * the Preferred Lifetime in the received
2654 * Prefix Information option, regardless of
2655 * whether the valid lifetime is also reset or
2658 * So we should always update prefered_lft here.
2664 ifp->valid_lft = valid_lft;
2665 ifp->prefered_lft = prefered_lft;
2668 ifp->flags &= ~IFA_F_DEPRECATED;
2669 spin_unlock_bh(&ifp->lock);
2671 if (!(flags&IFA_F_TENTATIVE))
2672 ipv6_ifa_notify(0, ifp);
2674 spin_unlock_bh(&ifp->lock);
2676 manage_tempaddrs(in6_dev, ifp, valid_lft, prefered_lft,
2680 addrconf_verify(net);
2685 EXPORT_SYMBOL_GPL(addrconf_prefix_rcv_add_addr);
2687 void addrconf_prefix_rcv(struct net_device *dev, u8 *opt, int len, bool sllao)
2689 struct prefix_info *pinfo;
2694 struct inet6_dev *in6_dev;
2695 struct net *net = dev_net(dev);
2697 pinfo = (struct prefix_info *) opt;
2699 if (len < sizeof(struct prefix_info)) {
2700 netdev_dbg(dev, "addrconf: prefix option too short\n");
2705 * Validation checks ([ADDRCONF], page 19)
2708 addr_type = ipv6_addr_type(&pinfo->prefix);
2710 if (addr_type & (IPV6_ADDR_MULTICAST|IPV6_ADDR_LINKLOCAL))
2713 valid_lft = ntohl(pinfo->valid);
2714 prefered_lft = ntohl(pinfo->prefered);
2716 if (prefered_lft > valid_lft) {
2717 net_warn_ratelimited("addrconf: prefix option has invalid lifetime\n");
2721 in6_dev = in6_dev_get(dev);
2724 net_dbg_ratelimited("addrconf: device %s not configured\n",
2730 * Two things going on here:
2731 * 1) Add routes for on-link prefixes
2732 * 2) Configure prefixes with the auto flag set
2735 if (pinfo->onlink) {
2736 struct fib6_info *rt;
2737 unsigned long rt_expires;
2739 /* Avoid arithmetic overflow. Really, we could
2740 * save rt_expires in seconds, likely valid_lft,
2741 * but it would require division in fib gc, that it
2745 rt_expires = addrconf_timeout_fixup(valid_lft, HZ);
2747 rt_expires = addrconf_timeout_fixup(valid_lft, USER_HZ);
2749 if (addrconf_finite_timeout(rt_expires))
2752 rt = addrconf_get_prefix_route(&pinfo->prefix,
2755 RTF_ADDRCONF | RTF_PREFIX_RT,
2759 /* Autoconf prefix route */
2760 if (valid_lft == 0) {
2761 ip6_del_rt(net, rt, false);
2763 } else if (addrconf_finite_timeout(rt_expires)) {
2765 fib6_set_expires(rt, jiffies + rt_expires);
2767 fib6_clean_expires(rt);
2769 } else if (valid_lft) {
2770 clock_t expires = 0;
2771 int flags = RTF_ADDRCONF | RTF_PREFIX_RT;
2772 if (addrconf_finite_timeout(rt_expires)) {
2774 flags |= RTF_EXPIRES;
2775 expires = jiffies_to_clock_t(rt_expires);
2777 addrconf_prefix_route(&pinfo->prefix, pinfo->prefix_len,
2778 0, dev, expires, flags,
2781 fib6_info_release(rt);
2784 /* Try to figure out our local address for this prefix */
2786 if (pinfo->autoconf && in6_dev->cnf.autoconf) {
2787 struct in6_addr addr;
2788 bool tokenized = false, dev_addr_generated = false;
2790 if (pinfo->prefix_len == 64) {
2791 memcpy(&addr, &pinfo->prefix, 8);
2793 if (!ipv6_addr_any(&in6_dev->token)) {
2794 read_lock_bh(&in6_dev->lock);
2795 memcpy(addr.s6_addr + 8,
2796 in6_dev->token.s6_addr + 8, 8);
2797 read_unlock_bh(&in6_dev->lock);
2799 } else if (is_addr_mode_generate_stable(in6_dev) &&
2800 !ipv6_generate_stable_address(&addr, 0,
2802 addr_flags |= IFA_F_STABLE_PRIVACY;
2804 } else if (ipv6_generate_eui64(addr.s6_addr + 8, dev) &&
2805 ipv6_inherit_eui64(addr.s6_addr + 8, in6_dev)) {
2808 dev_addr_generated = true;
2812 net_dbg_ratelimited("IPv6 addrconf: prefix with wrong length %d\n",
2817 err = addrconf_prefix_rcv_add_addr(net, dev, pinfo, in6_dev,
2820 tokenized, valid_lft,
2825 /* Ignore error case here because previous prefix add addr was
2826 * successful which will be notified.
2828 ndisc_ops_prefix_rcv_add_addr(net, dev, pinfo, in6_dev, &addr,
2829 addr_type, addr_flags, sllao,
2830 tokenized, valid_lft,
2832 dev_addr_generated);
2834 inet6_prefix_notify(RTM_NEWPREFIX, in6_dev, pinfo);
2836 in6_dev_put(in6_dev);
2839 static int addrconf_set_sit_dstaddr(struct net *net, struct net_device *dev,
2840 struct in6_ifreq *ireq)
2842 struct ip_tunnel_parm p = { };
2845 if (!(ipv6_addr_type(&ireq->ifr6_addr) & IPV6_ADDR_COMPATv4))
2846 return -EADDRNOTAVAIL;
2848 p.iph.daddr = ireq->ifr6_addr.s6_addr32[3];
2851 p.iph.protocol = IPPROTO_IPV6;
2854 if (!dev->netdev_ops->ndo_tunnel_ctl)
2856 err = dev->netdev_ops->ndo_tunnel_ctl(dev, &p, SIOCADDTUNNEL);
2860 dev = __dev_get_by_name(net, p.name);
2863 return dev_open(dev, NULL);
2867 * Set destination address.
2868 * Special case for SIT interfaces where we create a new "virtual"
2871 int addrconf_set_dstaddr(struct net *net, void __user *arg)
2873 struct net_device *dev;
2874 struct in6_ifreq ireq;
2877 if (!IS_ENABLED(CONFIG_IPV6_SIT))
2879 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
2883 dev = __dev_get_by_index(net, ireq.ifr6_ifindex);
2884 if (dev && dev->type == ARPHRD_SIT)
2885 err = addrconf_set_sit_dstaddr(net, dev, &ireq);
2890 static int ipv6_mc_config(struct sock *sk, bool join,
2891 const struct in6_addr *addr, int ifindex)
2899 ret = ipv6_sock_mc_join(sk, ifindex, addr);
2901 ret = ipv6_sock_mc_drop(sk, ifindex, addr);
2908 * Manual configuration of address on an interface
2910 static int inet6_addr_add(struct net *net, int ifindex,
2911 struct ifa6_config *cfg,
2912 struct netlink_ext_ack *extack)
2914 struct inet6_ifaddr *ifp;
2915 struct inet6_dev *idev;
2916 struct net_device *dev;
2917 unsigned long timeout;
2923 if (cfg->plen > 128)
2926 /* check the lifetime */
2927 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
2930 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR && cfg->plen != 64)
2933 dev = __dev_get_by_index(net, ifindex);
2937 idev = addrconf_add_dev(dev);
2939 return PTR_ERR(idev);
2941 if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2942 int ret = ipv6_mc_config(net->ipv6.mc_autojoin_sk,
2943 true, cfg->pfx, ifindex);
2949 cfg->scope = ipv6_addr_scope(cfg->pfx);
2951 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
2952 if (addrconf_finite_timeout(timeout)) {
2953 expires = jiffies_to_clock_t(timeout * HZ);
2954 cfg->valid_lft = timeout;
2955 flags = RTF_EXPIRES;
2959 cfg->ifa_flags |= IFA_F_PERMANENT;
2962 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
2963 if (addrconf_finite_timeout(timeout)) {
2965 cfg->ifa_flags |= IFA_F_DEPRECATED;
2966 cfg->preferred_lft = timeout;
2969 ifp = ipv6_add_addr(idev, cfg, true, extack);
2971 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
2972 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
2973 ifp->rt_priority, dev, expires,
2977 /* Send a netlink notification if DAD is enabled and
2978 * optimistic flag is not set
2980 if (!(ifp->flags & (IFA_F_OPTIMISTIC | IFA_F_NODAD)))
2981 ipv6_ifa_notify(0, ifp);
2983 * Note that section 3.1 of RFC 4429 indicates
2984 * that the Optimistic flag should not be set for
2985 * manually configured addresses
2987 addrconf_dad_start(ifp);
2988 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR)
2989 manage_tempaddrs(idev, ifp, cfg->valid_lft,
2990 cfg->preferred_lft, true, jiffies);
2992 addrconf_verify_rtnl(net);
2994 } else if (cfg->ifa_flags & IFA_F_MCAUTOJOIN) {
2995 ipv6_mc_config(net->ipv6.mc_autojoin_sk, false,
2999 return PTR_ERR(ifp);
3002 static int inet6_addr_del(struct net *net, int ifindex, u32 ifa_flags,
3003 const struct in6_addr *pfx, unsigned int plen)
3005 struct inet6_ifaddr *ifp;
3006 struct inet6_dev *idev;
3007 struct net_device *dev;
3012 dev = __dev_get_by_index(net, ifindex);
3016 idev = __in6_dev_get(dev);
3020 read_lock_bh(&idev->lock);
3021 list_for_each_entry(ifp, &idev->addr_list, if_list) {
3022 if (ifp->prefix_len == plen &&
3023 ipv6_addr_equal(pfx, &ifp->addr)) {
3025 read_unlock_bh(&idev->lock);
3027 if (!(ifp->flags & IFA_F_TEMPORARY) &&
3028 (ifa_flags & IFA_F_MANAGETEMPADDR))
3029 manage_tempaddrs(idev, ifp, 0, 0, false,
3032 addrconf_verify_rtnl(net);
3033 if (ipv6_addr_is_multicast(pfx)) {
3034 ipv6_mc_config(net->ipv6.mc_autojoin_sk,
3035 false, pfx, dev->ifindex);
3040 read_unlock_bh(&idev->lock);
3041 return -EADDRNOTAVAIL;
3045 int addrconf_add_ifaddr(struct net *net, void __user *arg)
3047 struct ifa6_config cfg = {
3048 .ifa_flags = IFA_F_PERMANENT,
3049 .preferred_lft = INFINITY_LIFE_TIME,
3050 .valid_lft = INFINITY_LIFE_TIME,
3052 struct in6_ifreq ireq;
3055 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3058 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3061 cfg.pfx = &ireq.ifr6_addr;
3062 cfg.plen = ireq.ifr6_prefixlen;
3065 err = inet6_addr_add(net, ireq.ifr6_ifindex, &cfg, NULL);
3070 int addrconf_del_ifaddr(struct net *net, void __user *arg)
3072 struct in6_ifreq ireq;
3075 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3078 if (copy_from_user(&ireq, arg, sizeof(struct in6_ifreq)))
3082 err = inet6_addr_del(net, ireq.ifr6_ifindex, 0, &ireq.ifr6_addr,
3083 ireq.ifr6_prefixlen);
3088 static void add_addr(struct inet6_dev *idev, const struct in6_addr *addr,
3089 int plen, int scope, u8 proto)
3091 struct inet6_ifaddr *ifp;
3092 struct ifa6_config cfg = {
3095 .ifa_flags = IFA_F_PERMANENT,
3096 .valid_lft = INFINITY_LIFE_TIME,
3097 .preferred_lft = INFINITY_LIFE_TIME,
3102 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3104 spin_lock_bh(&ifp->lock);
3105 ifp->flags &= ~IFA_F_TENTATIVE;
3106 spin_unlock_bh(&ifp->lock);
3107 rt_genid_bump_ipv6(dev_net(idev->dev));
3108 ipv6_ifa_notify(RTM_NEWADDR, ifp);
3113 #if IS_ENABLED(CONFIG_IPV6_SIT) || IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3114 static void add_v4_addrs(struct inet6_dev *idev)
3116 struct in6_addr addr;
3117 struct net_device *dev;
3118 struct net *net = dev_net(idev->dev);
3119 int scope, plen, offset = 0;
3124 memset(&addr, 0, sizeof(struct in6_addr));
3125 /* in case of IP6GRE the dev_addr is an IPv6 and therefore we use only the last 4 bytes */
3126 if (idev->dev->addr_len == sizeof(struct in6_addr))
3127 offset = sizeof(struct in6_addr) - 4;
3128 memcpy(&addr.s6_addr32[3], idev->dev->dev_addr + offset, 4);
3130 if (idev->dev->flags&IFF_POINTOPOINT) {
3131 if (idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_NONE)
3134 addr.s6_addr32[0] = htonl(0xfe800000);
3138 scope = IPV6_ADDR_COMPATv4;
3140 pflags |= RTF_NONEXTHOP;
3143 if (addr.s6_addr32[3]) {
3144 add_addr(idev, &addr, plen, scope, IFAPROT_UNSPEC);
3145 addrconf_prefix_route(&addr, plen, 0, idev->dev, 0, pflags,
3150 for_each_netdev(net, dev) {
3151 struct in_device *in_dev = __in_dev_get_rtnl(dev);
3152 if (in_dev && (dev->flags & IFF_UP)) {
3153 struct in_ifaddr *ifa;
3156 in_dev_for_each_ifa_rtnl(ifa, in_dev) {
3157 addr.s6_addr32[3] = ifa->ifa_local;
3159 if (ifa->ifa_scope == RT_SCOPE_LINK)
3161 if (ifa->ifa_scope >= RT_SCOPE_HOST) {
3162 if (idev->dev->flags&IFF_POINTOPOINT)
3167 add_addr(idev, &addr, plen, flag,
3169 addrconf_prefix_route(&addr, plen, 0, idev->dev,
3170 0, pflags, GFP_KERNEL);
3177 static void init_loopback(struct net_device *dev)
3179 struct inet6_dev *idev;
3185 idev = ipv6_find_idev(dev);
3187 pr_debug("%s: add_dev failed\n", __func__);
3191 add_addr(idev, &in6addr_loopback, 128, IFA_HOST, IFAPROT_KERNEL_LO);
3194 void addrconf_add_linklocal(struct inet6_dev *idev,
3195 const struct in6_addr *addr, u32 flags)
3197 struct ifa6_config cfg = {
3200 .ifa_flags = flags | IFA_F_PERMANENT,
3201 .valid_lft = INFINITY_LIFE_TIME,
3202 .preferred_lft = INFINITY_LIFE_TIME,
3204 .ifa_proto = IFAPROT_KERNEL_LL
3206 struct inet6_ifaddr *ifp;
3208 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
3209 if ((dev_net(idev->dev)->ipv6.devconf_all->optimistic_dad ||
3210 idev->cnf.optimistic_dad) &&
3211 !dev_net(idev->dev)->ipv6.devconf_all->forwarding)
3212 cfg.ifa_flags |= IFA_F_OPTIMISTIC;
3215 ifp = ipv6_add_addr(idev, &cfg, true, NULL);
3217 addrconf_prefix_route(&ifp->addr, ifp->prefix_len, 0, idev->dev,
3219 addrconf_dad_start(ifp);
3223 EXPORT_SYMBOL_GPL(addrconf_add_linklocal);
3225 static bool ipv6_reserved_interfaceid(struct in6_addr address)
3227 if ((address.s6_addr32[2] | address.s6_addr32[3]) == 0)
3230 if (address.s6_addr32[2] == htonl(0x02005eff) &&
3231 ((address.s6_addr32[3] & htonl(0xfe000000)) == htonl(0xfe000000)))
3234 if (address.s6_addr32[2] == htonl(0xfdffffff) &&
3235 ((address.s6_addr32[3] & htonl(0xffffff80)) == htonl(0xffffff80)))
3241 static int ipv6_generate_stable_address(struct in6_addr *address,
3243 const struct inet6_dev *idev)
3245 static DEFINE_SPINLOCK(lock);
3246 static __u32 digest[SHA1_DIGEST_WORDS];
3247 static __u32 workspace[SHA1_WORKSPACE_WORDS];
3250 char __data[SHA1_BLOCK_SIZE];
3252 struct in6_addr secret;
3254 unsigned char hwaddr[MAX_ADDR_LEN];
3259 struct in6_addr secret;
3260 struct in6_addr temp;
3261 struct net *net = dev_net(idev->dev);
3263 BUILD_BUG_ON(sizeof(data.__data) != sizeof(data));
3265 if (idev->cnf.stable_secret.initialized)
3266 secret = idev->cnf.stable_secret.secret;
3267 else if (net->ipv6.devconf_dflt->stable_secret.initialized)
3268 secret = net->ipv6.devconf_dflt->stable_secret.secret;
3273 spin_lock_bh(&lock);
3276 memset(&data, 0, sizeof(data));
3277 memset(workspace, 0, sizeof(workspace));
3278 memcpy(data.hwaddr, idev->dev->perm_addr, idev->dev->addr_len);
3279 data.prefix[0] = address->s6_addr32[0];
3280 data.prefix[1] = address->s6_addr32[1];
3281 data.secret = secret;
3282 data.dad_count = dad_count;
3284 sha1_transform(digest, data.__data, workspace);
3287 temp.s6_addr32[2] = (__force __be32)digest[0];
3288 temp.s6_addr32[3] = (__force __be32)digest[1];
3290 spin_unlock_bh(&lock);
3292 if (ipv6_reserved_interfaceid(temp)) {
3294 if (dad_count > dev_net(idev->dev)->ipv6.sysctl.idgen_retries)
3303 static void ipv6_gen_mode_random_init(struct inet6_dev *idev)
3305 struct ipv6_stable_secret *s = &idev->cnf.stable_secret;
3309 s = &idev->cnf.stable_secret;
3310 get_random_bytes(&s->secret, sizeof(s->secret));
3311 s->initialized = true;
3314 static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
3316 struct in6_addr addr;
3318 /* no link local addresses on L3 master devices */
3319 if (netif_is_l3_master(idev->dev))
3322 /* no link local addresses on devices flagged as slaves */
3323 if (idev->dev->priv_flags & IFF_NO_ADDRCONF)
3326 ipv6_addr_set(&addr, htonl(0xFE800000), 0, 0, 0);
3328 switch (idev->cnf.addr_gen_mode) {
3329 case IN6_ADDR_GEN_MODE_RANDOM:
3330 ipv6_gen_mode_random_init(idev);
3332 case IN6_ADDR_GEN_MODE_STABLE_PRIVACY:
3333 if (!ipv6_generate_stable_address(&addr, 0, idev))
3334 addrconf_add_linklocal(idev, &addr,
3335 IFA_F_STABLE_PRIVACY);
3336 else if (prefix_route)
3337 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3340 case IN6_ADDR_GEN_MODE_EUI64:
3341 /* addrconf_add_linklocal also adds a prefix_route and we
3342 * only need to care about prefix routes if ipv6_generate_eui64
3343 * couldn't generate one.
3345 if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
3346 addrconf_add_linklocal(idev, &addr, 0);
3347 else if (prefix_route)
3348 addrconf_prefix_route(&addr, 64, 0, idev->dev,
3351 case IN6_ADDR_GEN_MODE_NONE:
3353 /* will not add any link local address */
3358 static void addrconf_dev_config(struct net_device *dev)
3360 struct inet6_dev *idev;
3364 if ((dev->type != ARPHRD_ETHER) &&
3365 (dev->type != ARPHRD_FDDI) &&
3366 (dev->type != ARPHRD_ARCNET) &&
3367 (dev->type != ARPHRD_INFINIBAND) &&
3368 (dev->type != ARPHRD_IEEE1394) &&
3369 (dev->type != ARPHRD_TUNNEL6) &&
3370 (dev->type != ARPHRD_6LOWPAN) &&
3371 (dev->type != ARPHRD_TUNNEL) &&
3372 (dev->type != ARPHRD_NONE) &&
3373 (dev->type != ARPHRD_RAWIP)) {
3374 /* Alas, we support only Ethernet autoconfiguration. */
3375 idev = __in6_dev_get(dev);
3376 if (!IS_ERR_OR_NULL(idev) && dev->flags & IFF_UP &&
3377 dev->flags & IFF_MULTICAST)
3382 idev = addrconf_add_dev(dev);
3386 /* this device type has no EUI support */
3387 if (dev->type == ARPHRD_NONE &&
3388 idev->cnf.addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64)
3389 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_RANDOM;
3391 addrconf_addr_gen(idev, false);
3394 #if IS_ENABLED(CONFIG_IPV6_SIT)
3395 static void addrconf_sit_config(struct net_device *dev)
3397 struct inet6_dev *idev;
3402 * Configure the tunnel with one of our IPv4
3403 * addresses... we should configure all of
3404 * our v4 addrs in the tunnel
3407 idev = ipv6_find_idev(dev);
3409 pr_debug("%s: add_dev failed\n", __func__);
3413 if (dev->priv_flags & IFF_ISATAP) {
3414 addrconf_addr_gen(idev, false);
3420 if (dev->flags&IFF_POINTOPOINT)
3421 addrconf_add_mroute(dev);
3425 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3426 static void addrconf_gre_config(struct net_device *dev)
3428 struct inet6_dev *idev;
3432 idev = ipv6_find_idev(dev);
3434 pr_debug("%s: add_dev failed\n", __func__);
3438 if (dev->type == ARPHRD_ETHER) {
3439 addrconf_addr_gen(idev, true);
3445 if (dev->flags & IFF_POINTOPOINT)
3446 addrconf_add_mroute(dev);
3450 static int fixup_permanent_addr(struct net *net,
3451 struct inet6_dev *idev,
3452 struct inet6_ifaddr *ifp)
3454 /* !fib6_node means the host route was removed from the
3455 * FIB, for example, if 'lo' device is taken down. In that
3456 * case regenerate the host route.
3458 if (!ifp->rt || !ifp->rt->fib6_node) {
3459 struct fib6_info *f6i, *prev;
3461 f6i = addrconf_f6i_alloc(net, idev, &ifp->addr, false,
3464 return PTR_ERR(f6i);
3466 /* ifp->rt can be accessed outside of rtnl */
3467 spin_lock(&ifp->lock);
3470 spin_unlock(&ifp->lock);
3472 fib6_info_release(prev);
3475 if (!(ifp->flags & IFA_F_NOPREFIXROUTE)) {
3476 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
3477 ifp->rt_priority, idev->dev, 0, 0,
3481 if (ifp->state == INET6_IFADDR_STATE_PREDAD)
3482 addrconf_dad_start(ifp);
3487 static void addrconf_permanent_addr(struct net *net, struct net_device *dev)
3489 struct inet6_ifaddr *ifp, *tmp;
3490 struct inet6_dev *idev;
3492 idev = __in6_dev_get(dev);
3496 write_lock_bh(&idev->lock);
3498 list_for_each_entry_safe(ifp, tmp, &idev->addr_list, if_list) {
3499 if ((ifp->flags & IFA_F_PERMANENT) &&
3500 fixup_permanent_addr(net, idev, ifp) < 0) {
3501 write_unlock_bh(&idev->lock);
3504 write_lock_bh(&idev->lock);
3506 net_info_ratelimited("%s: Failed to add prefix route for address %pI6c; dropping\n",
3507 idev->dev->name, &ifp->addr);
3511 write_unlock_bh(&idev->lock);
3514 static int addrconf_notify(struct notifier_block *this, unsigned long event,
3517 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3518 struct netdev_notifier_change_info *change_info;
3519 struct netdev_notifier_changeupper_info *info;
3520 struct inet6_dev *idev = __in6_dev_get(dev);
3521 struct net *net = dev_net(dev);
3522 int run_pending = 0;
3526 case NETDEV_REGISTER:
3527 if (!idev && dev->mtu >= IPV6_MIN_MTU) {
3528 idev = ipv6_add_dev(dev);
3530 return notifier_from_errno(PTR_ERR(idev));
3534 case NETDEV_CHANGEMTU:
3535 /* if MTU under IPV6_MIN_MTU stop IPv6 on this interface. */
3536 if (dev->mtu < IPV6_MIN_MTU) {
3537 addrconf_ifdown(dev, dev != net->loopback_dev);
3542 rt6_mtu_change(dev, dev->mtu);
3543 idev->cnf.mtu6 = dev->mtu;
3547 /* allocate new idev */
3548 idev = ipv6_add_dev(dev);
3552 /* device is still not ready */
3553 if (!(idev->if_flags & IF_READY))
3560 if (idev && idev->cnf.disable_ipv6)
3563 if (dev->priv_flags & IFF_NO_ADDRCONF) {
3564 if (event == NETDEV_UP && !IS_ERR_OR_NULL(idev) &&
3565 dev->flags & IFF_UP && dev->flags & IFF_MULTICAST)
3570 if (event == NETDEV_UP) {
3571 /* restore routes for permanent addresses */
3572 addrconf_permanent_addr(net, dev);
3574 if (!addrconf_link_ready(dev)) {
3575 /* device is not ready yet. */
3576 pr_debug("ADDRCONF(NETDEV_UP): %s: link is not ready\n",
3581 if (!idev && dev->mtu >= IPV6_MIN_MTU)
3582 idev = ipv6_add_dev(dev);
3584 if (!IS_ERR_OR_NULL(idev)) {
3585 idev->if_flags |= IF_READY;
3588 } else if (event == NETDEV_CHANGE) {
3589 if (!addrconf_link_ready(dev)) {
3590 /* device is still not ready. */
3591 rt6_sync_down_dev(dev, event);
3595 if (!IS_ERR_OR_NULL(idev)) {
3596 if (idev->if_flags & IF_READY) {
3597 /* device is already configured -
3598 * but resend MLD reports, we might
3599 * have roamed and need to update
3600 * multicast snooping switches
3604 if (change_info->flags_changed & IFF_NOARP)
3605 addrconf_dad_run(idev, true);
3606 rt6_sync_up(dev, RTNH_F_LINKDOWN);
3609 idev->if_flags |= IF_READY;
3612 pr_info("ADDRCONF(NETDEV_CHANGE): %s: link becomes ready\n",
3618 switch (dev->type) {
3619 #if IS_ENABLED(CONFIG_IPV6_SIT)
3621 addrconf_sit_config(dev);
3624 #if IS_ENABLED(CONFIG_NET_IPGRE) || IS_ENABLED(CONFIG_IPV6_GRE)
3627 addrconf_gre_config(dev);
3630 case ARPHRD_LOOPBACK:
3635 addrconf_dev_config(dev);
3639 if (!IS_ERR_OR_NULL(idev)) {
3641 addrconf_dad_run(idev, false);
3643 /* Device has an address by now */
3644 rt6_sync_up(dev, RTNH_F_DEAD);
3647 * If the MTU changed during the interface down,
3648 * when the interface up, the changed MTU must be
3649 * reflected in the idev as well as routers.
3651 if (idev->cnf.mtu6 != dev->mtu &&
3652 dev->mtu >= IPV6_MIN_MTU) {
3653 rt6_mtu_change(dev, dev->mtu);
3654 idev->cnf.mtu6 = dev->mtu;
3656 idev->tstamp = jiffies;
3657 inet6_ifinfo_notify(RTM_NEWLINK, idev);
3660 * If the changed mtu during down is lower than
3661 * IPV6_MIN_MTU stop IPv6 on this interface.
3663 if (dev->mtu < IPV6_MIN_MTU)
3664 addrconf_ifdown(dev, dev != net->loopback_dev);
3669 case NETDEV_UNREGISTER:
3671 * Remove all addresses from this interface.
3673 addrconf_ifdown(dev, event != NETDEV_DOWN);
3676 case NETDEV_CHANGENAME:
3678 snmp6_unregister_dev(idev);
3679 addrconf_sysctl_unregister(idev);
3680 err = addrconf_sysctl_register(idev);
3682 return notifier_from_errno(err);
3683 err = snmp6_register_dev(idev);
3685 addrconf_sysctl_unregister(idev);
3686 return notifier_from_errno(err);
3691 case NETDEV_PRE_TYPE_CHANGE:
3692 case NETDEV_POST_TYPE_CHANGE:
3694 addrconf_type_change(dev, event);
3697 case NETDEV_CHANGEUPPER:
3700 /* flush all routes if dev is linked to or unlinked from
3701 * an L3 master device (e.g., VRF)
3703 if (info->upper_dev && netif_is_l3_master(info->upper_dev))
3704 addrconf_ifdown(dev, false);
3711 * addrconf module should be notified of a device going up
3713 static struct notifier_block ipv6_dev_notf = {
3714 .notifier_call = addrconf_notify,
3715 .priority = ADDRCONF_NOTIFY_PRIORITY,
3718 static void addrconf_type_change(struct net_device *dev, unsigned long event)
3720 struct inet6_dev *idev;
3723 idev = __in6_dev_get(dev);
3725 if (event == NETDEV_POST_TYPE_CHANGE)
3726 ipv6_mc_remap(idev);
3727 else if (event == NETDEV_PRE_TYPE_CHANGE)
3728 ipv6_mc_unmap(idev);
3731 static bool addr_is_local(const struct in6_addr *addr)
3733 return ipv6_addr_type(addr) &
3734 (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
3737 static int addrconf_ifdown(struct net_device *dev, bool unregister)
3739 unsigned long event = unregister ? NETDEV_UNREGISTER : NETDEV_DOWN;
3740 struct net *net = dev_net(dev);
3741 struct inet6_dev *idev;
3742 struct inet6_ifaddr *ifa;
3743 LIST_HEAD(tmp_addr_list);
3744 bool keep_addr = false;
3750 rt6_disable_ip(dev, event);
3752 idev = __in6_dev_get(dev);
3757 * Step 1: remove reference to ipv6 device from parent device.
3763 /* protected by rtnl_lock */
3764 RCU_INIT_POINTER(dev->ip6_ptr, NULL);
3766 /* Step 1.5: remove snmp6 entry */
3767 snmp6_unregister_dev(idev);
3771 /* combine the user config with event to determine if permanent
3772 * addresses are to be removed from address hash table
3774 if (!unregister && !idev->cnf.disable_ipv6) {
3775 /* aggregate the system setting and interface setting */
3776 int _keep_addr = net->ipv6.devconf_all->keep_addr_on_down;
3779 _keep_addr = idev->cnf.keep_addr_on_down;
3781 keep_addr = (_keep_addr > 0);
3784 /* Step 2: clear hash table */
3785 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
3786 struct hlist_head *h = &net->ipv6.inet6_addr_lst[i];
3788 spin_lock_bh(&net->ipv6.addrconf_hash_lock);
3790 hlist_for_each_entry_rcu(ifa, h, addr_lst) {
3791 if (ifa->idev == idev) {
3792 addrconf_del_dad_work(ifa);
3793 /* combined flag + permanent flag decide if
3794 * address is retained on a down event
3797 !(ifa->flags & IFA_F_PERMANENT) ||
3798 addr_is_local(&ifa->addr)) {
3799 hlist_del_init_rcu(&ifa->addr_lst);
3804 spin_unlock_bh(&net->ipv6.addrconf_hash_lock);
3807 write_lock_bh(&idev->lock);
3809 addrconf_del_rs_timer(idev);
3811 /* Step 2: clear flags for stateless addrconf, repeated down
3814 was_ready = idev->if_flags & IF_READY;
3816 idev->if_flags &= ~(IF_RS_SENT|IF_RA_RCVD|IF_READY);
3818 /* Step 3: clear tempaddr list */
3819 while (!list_empty(&idev->tempaddr_list)) {
3820 ifa = list_first_entry(&idev->tempaddr_list,
3821 struct inet6_ifaddr, tmp_list);
3822 list_del(&ifa->tmp_list);
3823 write_unlock_bh(&idev->lock);
3824 spin_lock_bh(&ifa->lock);
3827 in6_ifa_put(ifa->ifpub);
3830 spin_unlock_bh(&ifa->lock);
3832 write_lock_bh(&idev->lock);
3835 list_for_each_entry(ifa, &idev->addr_list, if_list)
3836 list_add_tail(&ifa->if_list_aux, &tmp_addr_list);
3837 write_unlock_bh(&idev->lock);
3839 while (!list_empty(&tmp_addr_list)) {
3840 struct fib6_info *rt = NULL;
3843 ifa = list_first_entry(&tmp_addr_list,
3844 struct inet6_ifaddr, if_list_aux);
3845 list_del(&ifa->if_list_aux);
3847 addrconf_del_dad_work(ifa);
3849 keep = keep_addr && (ifa->flags & IFA_F_PERMANENT) &&
3850 !addr_is_local(&ifa->addr);
3852 spin_lock_bh(&ifa->lock);
3855 /* set state to skip the notifier below */
3856 state = INET6_IFADDR_STATE_DEAD;
3857 ifa->state = INET6_IFADDR_STATE_PREDAD;
3858 if (!(ifa->flags & IFA_F_NODAD))
3859 ifa->flags |= IFA_F_TENTATIVE;
3865 ifa->state = INET6_IFADDR_STATE_DEAD;
3868 spin_unlock_bh(&ifa->lock);
3871 ip6_del_rt(net, rt, false);
3873 if (state != INET6_IFADDR_STATE_DEAD) {
3874 __ipv6_ifa_notify(RTM_DELADDR, ifa);
3875 inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);
3877 if (idev->cnf.forwarding)
3878 addrconf_leave_anycast(ifa);
3879 addrconf_leave_solict(ifa->idev, &ifa->addr);
3883 write_lock_bh(&idev->lock);
3884 list_del_rcu(&ifa->if_list);
3885 write_unlock_bh(&idev->lock);
3890 /* Step 5: Discard anycast and multicast list */
3892 ipv6_ac_destroy_dev(idev);
3893 ipv6_mc_destroy_dev(idev);
3894 } else if (was_ready) {
3898 idev->tstamp = jiffies;
3901 /* Last: Shot the device (if unregistered) */
3903 addrconf_sysctl_unregister(idev);
3904 neigh_parms_release(&nd_tbl, idev->nd_parms);
3905 neigh_ifdown(&nd_tbl, dev);
3911 static void addrconf_rs_timer(struct timer_list *t)
3913 struct inet6_dev *idev = from_timer(idev, t, rs_timer);
3914 struct net_device *dev = idev->dev;
3915 struct in6_addr lladdr;
3917 write_lock(&idev->lock);
3918 if (idev->dead || !(idev->if_flags & IF_READY))
3921 if (!ipv6_accept_ra(idev))
3924 /* Announcement received after solicitation was sent */
3925 if (idev->if_flags & IF_RA_RCVD)
3928 if (idev->rs_probes++ < idev->cnf.rtr_solicits || idev->cnf.rtr_solicits < 0) {
3929 write_unlock(&idev->lock);
3930 if (!ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
3931 ndisc_send_rs(dev, &lladdr,
3932 &in6addr_linklocal_allrouters);
3936 write_lock(&idev->lock);
3937 idev->rs_interval = rfc3315_s14_backoff_update(
3938 idev->rs_interval, idev->cnf.rtr_solicit_max_interval);
3939 /* The wait after the last probe can be shorter */
3940 addrconf_mod_rs_timer(idev, (idev->rs_probes ==
3941 idev->cnf.rtr_solicits) ?
3942 idev->cnf.rtr_solicit_delay :
3946 * Note: we do not support deprecated "all on-link"
3947 * assumption any longer.
3949 pr_debug("%s: no IPv6 routers present\n", idev->dev->name);
3953 write_unlock(&idev->lock);
3959 * Duplicate Address Detection
3961 static void addrconf_dad_kick(struct inet6_ifaddr *ifp)
3963 unsigned long rand_num;
3964 struct inet6_dev *idev = ifp->idev;
3967 if (ifp->flags & IFA_F_OPTIMISTIC)
3970 rand_num = get_random_u32_below(idev->cnf.rtr_solicit_delay ? : 1);
3973 if (idev->cnf.enhanced_dad ||
3974 dev_net(idev->dev)->ipv6.devconf_all->enhanced_dad) {
3976 get_random_bytes(&nonce, 6);
3979 ifp->dad_nonce = nonce;
3980 ifp->dad_probes = idev->cnf.dad_transmits;
3981 addrconf_mod_dad_work(ifp, rand_num);
3984 static void addrconf_dad_begin(struct inet6_ifaddr *ifp)
3986 struct inet6_dev *idev = ifp->idev;
3987 struct net_device *dev = idev->dev;
3988 bool bump_id, notify = false;
3991 addrconf_join_solict(dev, &ifp->addr);
3993 read_lock_bh(&idev->lock);
3994 spin_lock(&ifp->lock);
3995 if (ifp->state == INET6_IFADDR_STATE_DEAD)
3999 if (dev->flags&(IFF_NOARP|IFF_LOOPBACK) ||
4000 (net->ipv6.devconf_all->accept_dad < 1 &&
4001 idev->cnf.accept_dad < 1) ||
4002 !(ifp->flags&IFA_F_TENTATIVE) ||
4003 ifp->flags & IFA_F_NODAD) {
4004 bool send_na = false;
4006 if (ifp->flags & IFA_F_TENTATIVE &&
4007 !(ifp->flags & IFA_F_OPTIMISTIC))
4009 bump_id = ifp->flags & IFA_F_TENTATIVE;
4010 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4011 spin_unlock(&ifp->lock);
4012 read_unlock_bh(&idev->lock);
4014 addrconf_dad_completed(ifp, bump_id, send_na);
4018 if (!(idev->if_flags & IF_READY)) {
4019 spin_unlock(&ifp->lock);
4020 read_unlock_bh(&idev->lock);
4022 * If the device is not ready:
4023 * - keep it tentative if it is a permanent address.
4024 * - otherwise, kill it.
4027 addrconf_dad_stop(ifp, 0);
4032 * Optimistic nodes can start receiving
4035 if (ifp->flags & IFA_F_OPTIMISTIC) {
4036 ip6_ins_rt(net, ifp->rt);
4037 if (ipv6_use_optimistic_addr(net, idev)) {
4038 /* Because optimistic nodes can use this address,
4039 * notify listeners. If DAD fails, RTM_DELADDR is sent.
4045 addrconf_dad_kick(ifp);
4047 spin_unlock(&ifp->lock);
4048 read_unlock_bh(&idev->lock);
4050 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4053 static void addrconf_dad_start(struct inet6_ifaddr *ifp)
4055 bool begin_dad = false;
4057 spin_lock_bh(&ifp->lock);
4058 if (ifp->state != INET6_IFADDR_STATE_DEAD) {
4059 ifp->state = INET6_IFADDR_STATE_PREDAD;
4062 spin_unlock_bh(&ifp->lock);
4065 addrconf_mod_dad_work(ifp, 0);
4068 static void addrconf_dad_work(struct work_struct *w)
4070 struct inet6_ifaddr *ifp = container_of(to_delayed_work(w),
4071 struct inet6_ifaddr,
4073 struct inet6_dev *idev = ifp->idev;
4074 bool bump_id, disable_ipv6 = false;
4075 struct in6_addr mcaddr;
4081 } action = DAD_PROCESS;
4085 spin_lock_bh(&ifp->lock);
4086 if (ifp->state == INET6_IFADDR_STATE_PREDAD) {
4088 ifp->state = INET6_IFADDR_STATE_DAD;
4089 } else if (ifp->state == INET6_IFADDR_STATE_ERRDAD) {
4091 ifp->state = INET6_IFADDR_STATE_POSTDAD;
4093 if ((dev_net(idev->dev)->ipv6.devconf_all->accept_dad > 1 ||
4094 idev->cnf.accept_dad > 1) &&
4095 !idev->cnf.disable_ipv6 &&
4096 !(ifp->flags & IFA_F_STABLE_PRIVACY)) {
4097 struct in6_addr addr;
4099 addr.s6_addr32[0] = htonl(0xfe800000);
4100 addr.s6_addr32[1] = 0;
4102 if (!ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) &&
4103 ipv6_addr_equal(&ifp->addr, &addr)) {
4104 /* DAD failed for link-local based on MAC */
4105 idev->cnf.disable_ipv6 = 1;
4107 pr_info("%s: IPv6 being disabled!\n",
4108 ifp->idev->dev->name);
4109 disable_ipv6 = true;
4113 spin_unlock_bh(&ifp->lock);
4115 if (action == DAD_BEGIN) {
4116 addrconf_dad_begin(ifp);
4118 } else if (action == DAD_ABORT) {
4120 addrconf_dad_stop(ifp, 1);
4122 addrconf_ifdown(idev->dev, false);
4126 if (!ifp->dad_probes && addrconf_dad_end(ifp))
4129 write_lock_bh(&idev->lock);
4130 if (idev->dead || !(idev->if_flags & IF_READY)) {
4131 write_unlock_bh(&idev->lock);
4135 spin_lock(&ifp->lock);
4136 if (ifp->state == INET6_IFADDR_STATE_DEAD) {
4137 spin_unlock(&ifp->lock);
4138 write_unlock_bh(&idev->lock);
4142 if (ifp->dad_probes == 0) {
4143 bool send_na = false;
4146 * DAD was successful
4149 if (ifp->flags & IFA_F_TENTATIVE &&
4150 !(ifp->flags & IFA_F_OPTIMISTIC))
4152 bump_id = ifp->flags & IFA_F_TENTATIVE;
4153 ifp->flags &= ~(IFA_F_TENTATIVE|IFA_F_OPTIMISTIC|IFA_F_DADFAILED);
4154 spin_unlock(&ifp->lock);
4155 write_unlock_bh(&idev->lock);
4157 addrconf_dad_completed(ifp, bump_id, send_na);
4163 addrconf_mod_dad_work(ifp,
4164 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME),
4166 spin_unlock(&ifp->lock);
4167 write_unlock_bh(&idev->lock);
4169 /* send a neighbour solicitation for our addr */
4170 addrconf_addr_solict_mult(&ifp->addr, &mcaddr);
4171 ndisc_send_ns(ifp->idev->dev, &ifp->addr, &mcaddr, &in6addr_any,
4178 /* ifp->idev must be at least read locked */
4179 static bool ipv6_lonely_lladdr(struct inet6_ifaddr *ifp)
4181 struct inet6_ifaddr *ifpiter;
4182 struct inet6_dev *idev = ifp->idev;
4184 list_for_each_entry_reverse(ifpiter, &idev->addr_list, if_list) {
4185 if (ifpiter->scope > IFA_LINK)
4187 if (ifp != ifpiter && ifpiter->scope == IFA_LINK &&
4188 (ifpiter->flags & (IFA_F_PERMANENT|IFA_F_TENTATIVE|
4189 IFA_F_OPTIMISTIC|IFA_F_DADFAILED)) ==
4196 static void addrconf_dad_completed(struct inet6_ifaddr *ifp, bool bump_id,
4199 struct net_device *dev = ifp->idev->dev;
4200 struct in6_addr lladdr;
4201 bool send_rs, send_mld;
4203 addrconf_del_dad_work(ifp);
4206 * Configure the address for reception. Now it is valid.
4209 ipv6_ifa_notify(RTM_NEWADDR, ifp);
4211 /* If added prefix is link local and we are prepared to process
4212 router advertisements, start sending router solicitations.
4215 read_lock_bh(&ifp->idev->lock);
4216 send_mld = ifp->scope == IFA_LINK && ipv6_lonely_lladdr(ifp);
4217 send_rs = send_mld &&
4218 ipv6_accept_ra(ifp->idev) &&
4219 ifp->idev->cnf.rtr_solicits != 0 &&
4220 (dev->flags & IFF_LOOPBACK) == 0 &&
4221 (dev->type != ARPHRD_TUNNEL);
4222 read_unlock_bh(&ifp->idev->lock);
4224 /* While dad is in progress mld report's source address is in6_addrany.
4225 * Resend with proper ll now.
4228 ipv6_mc_dad_complete(ifp->idev);
4230 /* send unsolicited NA if enabled */
4232 (ifp->idev->cnf.ndisc_notify ||
4233 dev_net(dev)->ipv6.devconf_all->ndisc_notify)) {
4234 ndisc_send_na(dev, &in6addr_linklocal_allnodes, &ifp->addr,
4235 /*router=*/ !!ifp->idev->cnf.forwarding,
4236 /*solicited=*/ false, /*override=*/ true,
4242 * If a host as already performed a random delay
4243 * [...] as part of DAD [...] there is no need
4244 * to delay again before sending the first RS
4246 if (ipv6_get_lladdr(dev, &lladdr, IFA_F_TENTATIVE))
4248 ndisc_send_rs(dev, &lladdr, &in6addr_linklocal_allrouters);
4250 write_lock_bh(&ifp->idev->lock);
4251 spin_lock(&ifp->lock);
4252 ifp->idev->rs_interval = rfc3315_s14_backoff_init(
4253 ifp->idev->cnf.rtr_solicit_interval);
4254 ifp->idev->rs_probes = 1;
4255 ifp->idev->if_flags |= IF_RS_SENT;
4256 addrconf_mod_rs_timer(ifp->idev, ifp->idev->rs_interval);
4257 spin_unlock(&ifp->lock);
4258 write_unlock_bh(&ifp->idev->lock);
4262 rt_genid_bump_ipv6(dev_net(dev));
4264 /* Make sure that a new temporary address will be created
4265 * before this temporary address becomes deprecated.
4267 if (ifp->flags & IFA_F_TEMPORARY)
4268 addrconf_verify_rtnl(dev_net(dev));
4271 static void addrconf_dad_run(struct inet6_dev *idev, bool restart)
4273 struct inet6_ifaddr *ifp;
4275 read_lock_bh(&idev->lock);
4276 list_for_each_entry(ifp, &idev->addr_list, if_list) {
4277 spin_lock(&ifp->lock);
4278 if ((ifp->flags & IFA_F_TENTATIVE &&
4279 ifp->state == INET6_IFADDR_STATE_DAD) || restart) {
4281 ifp->state = INET6_IFADDR_STATE_PREDAD;
4282 addrconf_dad_kick(ifp);
4284 spin_unlock(&ifp->lock);
4286 read_unlock_bh(&idev->lock);
4289 #ifdef CONFIG_PROC_FS
4290 struct if6_iter_state {
4291 struct seq_net_private p;
4296 static struct inet6_ifaddr *if6_get_first(struct seq_file *seq, loff_t pos)
4298 struct if6_iter_state *state = seq->private;
4299 struct net *net = seq_file_net(seq);
4300 struct inet6_ifaddr *ifa = NULL;
4303 /* initial bucket if pos is 0 */
4309 for (; state->bucket < IN6_ADDR_HSIZE; ++state->bucket) {
4310 hlist_for_each_entry_rcu(ifa, &net->ipv6.inet6_addr_lst[state->bucket],
4312 /* sync with offset */
4313 if (p < state->offset) {
4320 /* prepare for next bucket */
4327 static struct inet6_ifaddr *if6_get_next(struct seq_file *seq,
4328 struct inet6_ifaddr *ifa)
4330 struct if6_iter_state *state = seq->private;
4331 struct net *net = seq_file_net(seq);
4333 hlist_for_each_entry_continue_rcu(ifa, addr_lst) {
4339 while (++state->bucket < IN6_ADDR_HSIZE) {
4340 hlist_for_each_entry_rcu(ifa,
4341 &net->ipv6.inet6_addr_lst[state->bucket], addr_lst) {
4349 static void *if6_seq_start(struct seq_file *seq, loff_t *pos)
4353 return if6_get_first(seq, *pos);
4356 static void *if6_seq_next(struct seq_file *seq, void *v, loff_t *pos)
4358 struct inet6_ifaddr *ifa;
4360 ifa = if6_get_next(seq, v);
4365 static void if6_seq_stop(struct seq_file *seq, void *v)
4371 static int if6_seq_show(struct seq_file *seq, void *v)
4373 struct inet6_ifaddr *ifp = (struct inet6_ifaddr *)v;
4374 seq_printf(seq, "%pi6 %02x %02x %02x %02x %8s\n",
4376 ifp->idev->dev->ifindex,
4380 ifp->idev->dev->name);
4384 static const struct seq_operations if6_seq_ops = {
4385 .start = if6_seq_start,
4386 .next = if6_seq_next,
4387 .show = if6_seq_show,
4388 .stop = if6_seq_stop,
4391 static int __net_init if6_proc_net_init(struct net *net)
4393 if (!proc_create_net("if_inet6", 0444, net->proc_net, &if6_seq_ops,
4394 sizeof(struct if6_iter_state)))
4399 static void __net_exit if6_proc_net_exit(struct net *net)
4401 remove_proc_entry("if_inet6", net->proc_net);
4404 static struct pernet_operations if6_proc_net_ops = {
4405 .init = if6_proc_net_init,
4406 .exit = if6_proc_net_exit,
4409 int __init if6_proc_init(void)
4411 return register_pernet_subsys(&if6_proc_net_ops);
4414 void if6_proc_exit(void)
4416 unregister_pernet_subsys(&if6_proc_net_ops);
4418 #endif /* CONFIG_PROC_FS */
4420 #if IS_ENABLED(CONFIG_IPV6_MIP6)
4421 /* Check if address is a home address configured on any interface. */
4422 int ipv6_chk_home_addr(struct net *net, const struct in6_addr *addr)
4424 unsigned int hash = inet6_addr_hash(net, addr);
4425 struct inet6_ifaddr *ifp = NULL;
4429 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4430 if (ipv6_addr_equal(&ifp->addr, addr) &&
4431 (ifp->flags & IFA_F_HOMEADDRESS)) {
4441 /* RFC6554 has some algorithm to avoid loops in segment routing by
4442 * checking if the segments contains any of a local interface address.
4446 * To detect loops in the SRH, a router MUST determine if the SRH
4447 * includes multiple addresses assigned to any interface on that router.
4448 * If such addresses appear more than once and are separated by at least
4449 * one address not assigned to that router.
4451 int ipv6_chk_rpl_srh_loop(struct net *net, const struct in6_addr *segs,
4452 unsigned char nsegs)
4454 const struct in6_addr *addr;
4455 int i, ret = 0, found = 0;
4456 struct inet6_ifaddr *ifp;
4457 bool separated = false;
4462 for (i = 0; i < nsegs; i++) {
4464 hash = inet6_addr_hash(net, addr);
4467 hlist_for_each_entry_rcu(ifp, &net->ipv6.inet6_addr_lst[hash], addr_lst) {
4469 if (ipv6_addr_equal(&ifp->addr, addr)) {
4476 if (found > 1 && separated) {
4493 * Periodic address status verification
4496 static void addrconf_verify_rtnl(struct net *net)
4498 unsigned long now, next, next_sec, next_sched;
4499 struct inet6_ifaddr *ifp;
4506 next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY);
4508 cancel_delayed_work(&net->ipv6.addr_chk_work);
4510 for (i = 0; i < IN6_ADDR_HSIZE; i++) {
4512 hlist_for_each_entry_rcu_bh(ifp, &net->ipv6.inet6_addr_lst[i], addr_lst) {
4515 /* When setting preferred_lft to a value not zero or
4516 * infinity, while valid_lft is infinity
4517 * IFA_F_PERMANENT has a non-infinity life time.
4519 if ((ifp->flags & IFA_F_PERMANENT) &&
4520 (ifp->prefered_lft == INFINITY_LIFE_TIME))
4523 spin_lock(&ifp->lock);
4524 /* We try to batch several events at once. */
4525 age = (now - ifp->tstamp + ADDRCONF_TIMER_FUZZ_MINUS) / HZ;
4527 if ((ifp->flags&IFA_F_TEMPORARY) &&
4528 !(ifp->flags&IFA_F_TENTATIVE) &&
4529 ifp->prefered_lft != INFINITY_LIFE_TIME &&
4530 !ifp->regen_count && ifp->ifpub) {
4531 /* This is a non-regenerated temporary addr. */
4533 unsigned long regen_advance = ifp->idev->cnf.regen_max_retry *
4534 ifp->idev->cnf.dad_transmits *
4535 max(NEIGH_VAR(ifp->idev->nd_parms, RETRANS_TIME), HZ/100) / HZ;
4537 if (age + regen_advance >= ifp->prefered_lft) {
4538 struct inet6_ifaddr *ifpub = ifp->ifpub;
4539 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4540 next = ifp->tstamp + ifp->prefered_lft * HZ;
4544 in6_ifa_hold(ifpub);
4545 spin_unlock(&ifp->lock);
4547 spin_lock(&ifpub->lock);
4548 ifpub->regen_count = 0;
4549 spin_unlock(&ifpub->lock);
4550 rcu_read_unlock_bh();
4551 ipv6_create_tempaddr(ifpub, true);
4556 } else if (time_before(ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ, next))
4557 next = ifp->tstamp + ifp->prefered_lft * HZ - regen_advance * HZ;
4560 if (ifp->valid_lft != INFINITY_LIFE_TIME &&
4561 age >= ifp->valid_lft) {
4562 spin_unlock(&ifp->lock);
4564 rcu_read_unlock_bh();
4568 } else if (ifp->prefered_lft == INFINITY_LIFE_TIME) {
4569 spin_unlock(&ifp->lock);
4571 } else if (age >= ifp->prefered_lft) {
4572 /* jiffies - ifp->tstamp > age >= ifp->prefered_lft */
4575 if (!(ifp->flags&IFA_F_DEPRECATED)) {
4577 ifp->flags |= IFA_F_DEPRECATED;
4580 if ((ifp->valid_lft != INFINITY_LIFE_TIME) &&
4581 (time_before(ifp->tstamp + ifp->valid_lft * HZ, next)))
4582 next = ifp->tstamp + ifp->valid_lft * HZ;
4584 spin_unlock(&ifp->lock);
4589 ipv6_ifa_notify(0, ifp);
4594 /* ifp->prefered_lft <= ifp->valid_lft */
4595 if (time_before(ifp->tstamp + ifp->prefered_lft * HZ, next))
4596 next = ifp->tstamp + ifp->prefered_lft * HZ;
4597 spin_unlock(&ifp->lock);
4602 next_sec = round_jiffies_up(next);
4605 /* If rounded timeout is accurate enough, accept it. */
4606 if (time_before(next_sec, next + ADDRCONF_TIMER_FUZZ))
4607 next_sched = next_sec;
4609 /* And minimum interval is ADDRCONF_TIMER_FUZZ_MAX. */
4610 if (time_before(next_sched, jiffies + ADDRCONF_TIMER_FUZZ_MAX))
4611 next_sched = jiffies + ADDRCONF_TIMER_FUZZ_MAX;
4613 pr_debug("now = %lu, schedule = %lu, rounded schedule = %lu => %lu\n",
4614 now, next, next_sec, next_sched);
4615 mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, next_sched - now);
4616 rcu_read_unlock_bh();
4619 static void addrconf_verify_work(struct work_struct *w)
4621 struct net *net = container_of(to_delayed_work(w), struct net,
4622 ipv6.addr_chk_work);
4625 addrconf_verify_rtnl(net);
4629 static void addrconf_verify(struct net *net)
4631 mod_delayed_work(addrconf_wq, &net->ipv6.addr_chk_work, 0);
4634 static struct in6_addr *extract_addr(struct nlattr *addr, struct nlattr *local,
4635 struct in6_addr **peer_pfx)
4637 struct in6_addr *pfx = NULL;
4642 pfx = nla_data(addr);
4645 if (pfx && nla_memcmp(local, pfx, sizeof(*pfx)))
4647 pfx = nla_data(local);
4653 static const struct nla_policy ifa_ipv6_policy[IFA_MAX+1] = {
4654 [IFA_ADDRESS] = { .len = sizeof(struct in6_addr) },
4655 [IFA_LOCAL] = { .len = sizeof(struct in6_addr) },
4656 [IFA_CACHEINFO] = { .len = sizeof(struct ifa_cacheinfo) },
4657 [IFA_FLAGS] = { .len = sizeof(u32) },
4658 [IFA_RT_PRIORITY] = { .len = sizeof(u32) },
4659 [IFA_TARGET_NETNSID] = { .type = NLA_S32 },
4660 [IFA_PROTO] = { .type = NLA_U8 },
4664 inet6_rtm_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh,
4665 struct netlink_ext_ack *extack)
4667 struct net *net = sock_net(skb->sk);
4668 struct ifaddrmsg *ifm;
4669 struct nlattr *tb[IFA_MAX+1];
4670 struct in6_addr *pfx, *peer_pfx;
4674 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4675 ifa_ipv6_policy, extack);
4679 ifm = nlmsg_data(nlh);
4680 pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4684 ifa_flags = tb[IFA_FLAGS] ? nla_get_u32(tb[IFA_FLAGS]) : ifm->ifa_flags;
4686 /* We ignore other flags so far. */
4687 ifa_flags &= IFA_F_MANAGETEMPADDR;
4689 return inet6_addr_del(net, ifm->ifa_index, ifa_flags, pfx,
4690 ifm->ifa_prefixlen);
4693 static int modify_prefix_route(struct inet6_ifaddr *ifp,
4694 unsigned long expires, u32 flags,
4697 struct fib6_info *f6i;
4700 f6i = addrconf_get_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4702 ifp->idev->dev, 0, RTF_DEFAULT, true);
4706 prio = ifp->rt_priority ? : IP6_RT_PRIO_ADDRCONF;
4707 if (f6i->fib6_metric != prio) {
4708 /* delete old one */
4709 ip6_del_rt(dev_net(ifp->idev->dev), f6i, false);
4712 addrconf_prefix_route(modify_peer ? &ifp->peer_addr : &ifp->addr,
4714 ifp->rt_priority, ifp->idev->dev,
4715 expires, flags, GFP_KERNEL);
4718 fib6_clean_expires(f6i);
4720 fib6_set_expires(f6i, expires);
4722 fib6_info_release(f6i);
4728 static int inet6_addr_modify(struct net *net, struct inet6_ifaddr *ifp,
4729 struct ifa6_config *cfg)
4733 unsigned long timeout;
4734 bool was_managetempaddr;
4735 bool had_prefixroute;
4736 bool new_peer = false;
4740 if (!cfg->valid_lft || cfg->preferred_lft > cfg->valid_lft)
4743 if (cfg->ifa_flags & IFA_F_MANAGETEMPADDR &&
4744 (ifp->flags & IFA_F_TEMPORARY || ifp->prefix_len != 64))
4747 if (!(ifp->flags & IFA_F_TENTATIVE) || ifp->flags & IFA_F_DADFAILED)
4748 cfg->ifa_flags &= ~IFA_F_OPTIMISTIC;
4750 timeout = addrconf_timeout_fixup(cfg->valid_lft, HZ);
4751 if (addrconf_finite_timeout(timeout)) {
4752 expires = jiffies_to_clock_t(timeout * HZ);
4753 cfg->valid_lft = timeout;
4754 flags = RTF_EXPIRES;
4758 cfg->ifa_flags |= IFA_F_PERMANENT;
4761 timeout = addrconf_timeout_fixup(cfg->preferred_lft, HZ);
4762 if (addrconf_finite_timeout(timeout)) {
4764 cfg->ifa_flags |= IFA_F_DEPRECATED;
4765 cfg->preferred_lft = timeout;
4768 if (cfg->peer_pfx &&
4769 memcmp(&ifp->peer_addr, cfg->peer_pfx, sizeof(struct in6_addr))) {
4770 if (!ipv6_addr_any(&ifp->peer_addr))
4771 cleanup_prefix_route(ifp, expires, true, true);
4775 spin_lock_bh(&ifp->lock);
4776 was_managetempaddr = ifp->flags & IFA_F_MANAGETEMPADDR;
4777 had_prefixroute = ifp->flags & IFA_F_PERMANENT &&
4778 !(ifp->flags & IFA_F_NOPREFIXROUTE);
4779 ifp->flags &= ~(IFA_F_DEPRECATED | IFA_F_PERMANENT | IFA_F_NODAD |
4780 IFA_F_HOMEADDRESS | IFA_F_MANAGETEMPADDR |
4781 IFA_F_NOPREFIXROUTE);
4782 ifp->flags |= cfg->ifa_flags;
4783 ifp->tstamp = jiffies;
4784 ifp->valid_lft = cfg->valid_lft;
4785 ifp->prefered_lft = cfg->preferred_lft;
4786 ifp->ifa_proto = cfg->ifa_proto;
4788 if (cfg->rt_priority && cfg->rt_priority != ifp->rt_priority)
4789 ifp->rt_priority = cfg->rt_priority;
4792 ifp->peer_addr = *cfg->peer_pfx;
4794 spin_unlock_bh(&ifp->lock);
4795 if (!(ifp->flags&IFA_F_TENTATIVE))
4796 ipv6_ifa_notify(0, ifp);
4798 if (!(cfg->ifa_flags & IFA_F_NOPREFIXROUTE)) {
4801 if (had_prefixroute)
4802 rc = modify_prefix_route(ifp, expires, flags, false);
4804 /* prefix route could have been deleted; if so restore it */
4805 if (rc == -ENOENT) {
4806 addrconf_prefix_route(&ifp->addr, ifp->prefix_len,
4807 ifp->rt_priority, ifp->idev->dev,
4808 expires, flags, GFP_KERNEL);
4811 if (had_prefixroute && !ipv6_addr_any(&ifp->peer_addr))
4812 rc = modify_prefix_route(ifp, expires, flags, true);
4814 if (rc == -ENOENT && !ipv6_addr_any(&ifp->peer_addr)) {
4815 addrconf_prefix_route(&ifp->peer_addr, ifp->prefix_len,
4816 ifp->rt_priority, ifp->idev->dev,
4817 expires, flags, GFP_KERNEL);
4819 } else if (had_prefixroute) {
4820 enum cleanup_prefix_rt_t action;
4821 unsigned long rt_expires;
4823 write_lock_bh(&ifp->idev->lock);
4824 action = check_cleanup_prefix_route(ifp, &rt_expires);
4825 write_unlock_bh(&ifp->idev->lock);
4827 if (action != CLEANUP_PREFIX_RT_NOP) {
4828 cleanup_prefix_route(ifp, rt_expires,
4829 action == CLEANUP_PREFIX_RT_DEL, false);
4833 if (was_managetempaddr || ifp->flags & IFA_F_MANAGETEMPADDR) {
4834 if (was_managetempaddr &&
4835 !(ifp->flags & IFA_F_MANAGETEMPADDR)) {
4837 cfg->preferred_lft = 0;
4839 manage_tempaddrs(ifp->idev, ifp, cfg->valid_lft,
4840 cfg->preferred_lft, !was_managetempaddr,
4844 addrconf_verify_rtnl(net);
4850 inet6_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh,
4851 struct netlink_ext_ack *extack)
4853 struct net *net = sock_net(skb->sk);
4854 struct ifaddrmsg *ifm;
4855 struct nlattr *tb[IFA_MAX+1];
4856 struct in6_addr *peer_pfx;
4857 struct inet6_ifaddr *ifa;
4858 struct net_device *dev;
4859 struct inet6_dev *idev;
4860 struct ifa6_config cfg;
4863 err = nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
4864 ifa_ipv6_policy, extack);
4868 memset(&cfg, 0, sizeof(cfg));
4870 ifm = nlmsg_data(nlh);
4871 cfg.pfx = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer_pfx);
4875 cfg.peer_pfx = peer_pfx;
4876 cfg.plen = ifm->ifa_prefixlen;
4877 if (tb[IFA_RT_PRIORITY])
4878 cfg.rt_priority = nla_get_u32(tb[IFA_RT_PRIORITY]);
4881 cfg.ifa_proto = nla_get_u8(tb[IFA_PROTO]);
4883 cfg.valid_lft = INFINITY_LIFE_TIME;
4884 cfg.preferred_lft = INFINITY_LIFE_TIME;
4886 if (tb[IFA_CACHEINFO]) {
4887 struct ifa_cacheinfo *ci;
4889 ci = nla_data(tb[IFA_CACHEINFO]);
4890 cfg.valid_lft = ci->ifa_valid;
4891 cfg.preferred_lft = ci->ifa_prefered;
4894 dev = __dev_get_by_index(net, ifm->ifa_index);
4899 cfg.ifa_flags = nla_get_u32(tb[IFA_FLAGS]);
4901 cfg.ifa_flags = ifm->ifa_flags;
4903 /* We ignore other flags so far. */
4904 cfg.ifa_flags &= IFA_F_NODAD | IFA_F_HOMEADDRESS |
4905 IFA_F_MANAGETEMPADDR | IFA_F_NOPREFIXROUTE |
4906 IFA_F_MCAUTOJOIN | IFA_F_OPTIMISTIC;
4908 idev = ipv6_find_idev(dev);
4910 return PTR_ERR(idev);
4912 if (!ipv6_allow_optimistic_dad(net, idev))
4913 cfg.ifa_flags &= ~IFA_F_OPTIMISTIC;
4915 if (cfg.ifa_flags & IFA_F_NODAD &&
4916 cfg.ifa_flags & IFA_F_OPTIMISTIC) {
4917 NL_SET_ERR_MSG(extack, "IFA_F_NODAD and IFA_F_OPTIMISTIC are mutually exclusive");
4921 ifa = ipv6_get_ifaddr(net, cfg.pfx, dev, 1);
4924 * It would be best to check for !NLM_F_CREATE here but
4925 * userspace already relies on not having to provide this.
4927 return inet6_addr_add(net, ifm->ifa_index, &cfg, extack);
4930 if (nlh->nlmsg_flags & NLM_F_EXCL ||
4931 !(nlh->nlmsg_flags & NLM_F_REPLACE))
4934 err = inet6_addr_modify(net, ifa, &cfg);
4941 static void put_ifaddrmsg(struct nlmsghdr *nlh, u8 prefixlen, u32 flags,
4942 u8 scope, int ifindex)
4944 struct ifaddrmsg *ifm;
4946 ifm = nlmsg_data(nlh);
4947 ifm->ifa_family = AF_INET6;
4948 ifm->ifa_prefixlen = prefixlen;
4949 ifm->ifa_flags = flags;
4950 ifm->ifa_scope = scope;
4951 ifm->ifa_index = ifindex;
4954 static int put_cacheinfo(struct sk_buff *skb, unsigned long cstamp,
4955 unsigned long tstamp, u32 preferred, u32 valid)
4957 struct ifa_cacheinfo ci;
4959 ci.cstamp = cstamp_delta(cstamp);
4960 ci.tstamp = cstamp_delta(tstamp);
4961 ci.ifa_prefered = preferred;
4962 ci.ifa_valid = valid;
4964 return nla_put(skb, IFA_CACHEINFO, sizeof(ci), &ci);
4967 static inline int rt_scope(int ifa_scope)
4969 if (ifa_scope & IFA_HOST)
4970 return RT_SCOPE_HOST;
4971 else if (ifa_scope & IFA_LINK)
4972 return RT_SCOPE_LINK;
4973 else if (ifa_scope & IFA_SITE)
4974 return RT_SCOPE_SITE;
4976 return RT_SCOPE_UNIVERSE;
4979 static inline int inet6_ifaddr_msgsize(void)
4981 return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
4982 + nla_total_size(16) /* IFA_LOCAL */
4983 + nla_total_size(16) /* IFA_ADDRESS */
4984 + nla_total_size(sizeof(struct ifa_cacheinfo))
4985 + nla_total_size(4) /* IFA_FLAGS */
4986 + nla_total_size(1) /* IFA_PROTO */
4987 + nla_total_size(4) /* IFA_RT_PRIORITY */;
4996 struct inet6_fill_args {
5003 enum addr_type_t type;
5006 static int inet6_fill_ifaddr(struct sk_buff *skb, struct inet6_ifaddr *ifa,
5007 struct inet6_fill_args *args)
5009 struct nlmsghdr *nlh;
5010 u32 preferred, valid;
5012 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5013 sizeof(struct ifaddrmsg), args->flags);
5017 put_ifaddrmsg(nlh, ifa->prefix_len, ifa->flags, rt_scope(ifa->scope),
5018 ifa->idev->dev->ifindex);
5020 if (args->netnsid >= 0 &&
5021 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid))
5024 spin_lock_bh(&ifa->lock);
5025 if (!((ifa->flags&IFA_F_PERMANENT) &&
5026 (ifa->prefered_lft == INFINITY_LIFE_TIME))) {
5027 preferred = ifa->prefered_lft;
5028 valid = ifa->valid_lft;
5029 if (preferred != INFINITY_LIFE_TIME) {
5030 long tval = (jiffies - ifa->tstamp)/HZ;
5031 if (preferred > tval)
5035 if (valid != INFINITY_LIFE_TIME) {
5043 preferred = INFINITY_LIFE_TIME;
5044 valid = INFINITY_LIFE_TIME;
5046 spin_unlock_bh(&ifa->lock);
5048 if (!ipv6_addr_any(&ifa->peer_addr)) {
5049 if (nla_put_in6_addr(skb, IFA_LOCAL, &ifa->addr) < 0 ||
5050 nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->peer_addr) < 0)
5053 if (nla_put_in6_addr(skb, IFA_ADDRESS, &ifa->addr) < 0)
5056 if (ifa->rt_priority &&
5057 nla_put_u32(skb, IFA_RT_PRIORITY, ifa->rt_priority))
5060 if (put_cacheinfo(skb, ifa->cstamp, ifa->tstamp, preferred, valid) < 0)
5063 if (nla_put_u32(skb, IFA_FLAGS, ifa->flags) < 0)
5066 if (ifa->ifa_proto &&
5067 nla_put_u8(skb, IFA_PROTO, ifa->ifa_proto))
5070 nlmsg_end(skb, nlh);
5074 nlmsg_cancel(skb, nlh);
5078 static int inet6_fill_ifmcaddr(struct sk_buff *skb, struct ifmcaddr6 *ifmca,
5079 struct inet6_fill_args *args)
5081 struct nlmsghdr *nlh;
5082 u8 scope = RT_SCOPE_UNIVERSE;
5083 int ifindex = ifmca->idev->dev->ifindex;
5085 if (ipv6_addr_scope(&ifmca->mca_addr) & IFA_SITE)
5086 scope = RT_SCOPE_SITE;
5088 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5089 sizeof(struct ifaddrmsg), args->flags);
5093 if (args->netnsid >= 0 &&
5094 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5095 nlmsg_cancel(skb, nlh);
5099 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5100 if (nla_put_in6_addr(skb, IFA_MULTICAST, &ifmca->mca_addr) < 0 ||
5101 put_cacheinfo(skb, ifmca->mca_cstamp, ifmca->mca_tstamp,
5102 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5103 nlmsg_cancel(skb, nlh);
5107 nlmsg_end(skb, nlh);
5111 static int inet6_fill_ifacaddr(struct sk_buff *skb, struct ifacaddr6 *ifaca,
5112 struct inet6_fill_args *args)
5114 struct net_device *dev = fib6_info_nh_dev(ifaca->aca_rt);
5115 int ifindex = dev ? dev->ifindex : 1;
5116 struct nlmsghdr *nlh;
5117 u8 scope = RT_SCOPE_UNIVERSE;
5119 if (ipv6_addr_scope(&ifaca->aca_addr) & IFA_SITE)
5120 scope = RT_SCOPE_SITE;
5122 nlh = nlmsg_put(skb, args->portid, args->seq, args->event,
5123 sizeof(struct ifaddrmsg), args->flags);
5127 if (args->netnsid >= 0 &&
5128 nla_put_s32(skb, IFA_TARGET_NETNSID, args->netnsid)) {
5129 nlmsg_cancel(skb, nlh);
5133 put_ifaddrmsg(nlh, 128, IFA_F_PERMANENT, scope, ifindex);
5134 if (nla_put_in6_addr(skb, IFA_ANYCAST, &ifaca->aca_addr) < 0 ||
5135 put_cacheinfo(skb, ifaca->aca_cstamp, ifaca->aca_tstamp,
5136 INFINITY_LIFE_TIME, INFINITY_LIFE_TIME) < 0) {
5137 nlmsg_cancel(skb, nlh);
5141 nlmsg_end(skb, nlh);
5145 /* called with rcu_read_lock() */
5146 static int in6_dump_addrs(struct inet6_dev *idev, struct sk_buff *skb,
5147 struct netlink_callback *cb, int s_ip_idx,
5148 struct inet6_fill_args *fillargs)
5150 struct ifmcaddr6 *ifmca;
5151 struct ifacaddr6 *ifaca;
5155 read_lock_bh(&idev->lock);
5156 switch (fillargs->type) {
5157 case UNICAST_ADDR: {
5158 struct inet6_ifaddr *ifa;
5159 fillargs->event = RTM_NEWADDR;
5161 /* unicast address incl. temp addr */
5162 list_for_each_entry(ifa, &idev->addr_list, if_list) {
5163 if (ip_idx < s_ip_idx)
5165 err = inet6_fill_ifaddr(skb, ifa, fillargs);
5168 nl_dump_check_consistent(cb, nlmsg_hdr(skb));
5174 case MULTICAST_ADDR:
5175 read_unlock_bh(&idev->lock);
5176 fillargs->event = RTM_GETMULTICAST;
5178 /* multicast address */
5179 for (ifmca = rtnl_dereference(idev->mc_list);
5181 ifmca = rtnl_dereference(ifmca->next), ip_idx++) {
5182 if (ip_idx < s_ip_idx)
5184 err = inet6_fill_ifmcaddr(skb, ifmca, fillargs);
5188 read_lock_bh(&idev->lock);
5191 fillargs->event = RTM_GETANYCAST;
5192 /* anycast address */
5193 for (ifaca = idev->ac_list; ifaca;
5194 ifaca = ifaca->aca_next, ip_idx++) {
5195 if (ip_idx < s_ip_idx)
5197 err = inet6_fill_ifacaddr(skb, ifaca, fillargs);
5205 read_unlock_bh(&idev->lock);
5206 cb->args[2] = ip_idx;
5210 static int inet6_valid_dump_ifaddr_req(const struct nlmsghdr *nlh,
5211 struct inet6_fill_args *fillargs,
5212 struct net **tgt_net, struct sock *sk,
5213 struct netlink_callback *cb)
5215 struct netlink_ext_ack *extack = cb->extack;
5216 struct nlattr *tb[IFA_MAX+1];
5217 struct ifaddrmsg *ifm;
5220 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5221 NL_SET_ERR_MSG_MOD(extack, "Invalid header for address dump request");
5225 ifm = nlmsg_data(nlh);
5226 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5227 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for address dump request");
5231 fillargs->ifindex = ifm->ifa_index;
5232 if (fillargs->ifindex) {
5233 cb->answer_flags |= NLM_F_DUMP_FILTERED;
5234 fillargs->flags |= NLM_F_DUMP_FILTERED;
5237 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5238 ifa_ipv6_policy, extack);
5242 for (i = 0; i <= IFA_MAX; ++i) {
5246 if (i == IFA_TARGET_NETNSID) {
5249 fillargs->netnsid = nla_get_s32(tb[i]);
5250 net = rtnl_get_net_ns_capable(sk, fillargs->netnsid);
5252 fillargs->netnsid = -1;
5253 NL_SET_ERR_MSG_MOD(extack, "Invalid target network namespace id");
5254 return PTR_ERR(net);
5258 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in dump request");
5266 static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
5267 enum addr_type_t type)
5269 const struct nlmsghdr *nlh = cb->nlh;
5270 struct inet6_fill_args fillargs = {
5271 .portid = NETLINK_CB(cb->skb).portid,
5272 .seq = cb->nlh->nlmsg_seq,
5273 .flags = NLM_F_MULTI,
5277 struct net *tgt_net = sock_net(skb->sk);
5278 int idx, s_idx, s_ip_idx;
5280 struct net_device *dev;
5281 struct inet6_dev *idev;
5282 struct hlist_head *head;
5286 s_idx = idx = cb->args[1];
5287 s_ip_idx = cb->args[2];
5289 if (cb->strict_check) {
5290 err = inet6_valid_dump_ifaddr_req(nlh, &fillargs, &tgt_net,
5296 if (fillargs.ifindex) {
5297 dev = __dev_get_by_index(tgt_net, fillargs.ifindex);
5302 idev = __in6_dev_get(dev);
5304 err = in6_dump_addrs(idev, skb, cb, s_ip_idx,
5314 cb->seq = atomic_read(&tgt_net->ipv6.dev_addr_genid) ^ tgt_net->dev_base_seq;
5315 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
5317 head = &tgt_net->dev_index_head[h];
5318 hlist_for_each_entry_rcu(dev, head, index_hlist) {
5321 if (h > s_h || idx > s_idx)
5323 idev = __in6_dev_get(dev);
5327 if (in6_dump_addrs(idev, skb, cb, s_ip_idx,
5339 if (fillargs.netnsid >= 0)
5342 return skb->len ? : err;
5345 static int inet6_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
5347 enum addr_type_t type = UNICAST_ADDR;
5349 return inet6_dump_addr(skb, cb, type);
5352 static int inet6_dump_ifmcaddr(struct sk_buff *skb, struct netlink_callback *cb)
5354 enum addr_type_t type = MULTICAST_ADDR;
5356 return inet6_dump_addr(skb, cb, type);
5360 static int inet6_dump_ifacaddr(struct sk_buff *skb, struct netlink_callback *cb)
5362 enum addr_type_t type = ANYCAST_ADDR;
5364 return inet6_dump_addr(skb, cb, type);
5367 static int inet6_rtm_valid_getaddr_req(struct sk_buff *skb,
5368 const struct nlmsghdr *nlh,
5370 struct netlink_ext_ack *extack)
5372 struct ifaddrmsg *ifm;
5375 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5376 NL_SET_ERR_MSG_MOD(extack, "Invalid header for get address request");
5380 if (!netlink_strict_get_check(skb))
5381 return nlmsg_parse_deprecated(nlh, sizeof(*ifm), tb, IFA_MAX,
5382 ifa_ipv6_policy, extack);
5384 ifm = nlmsg_data(nlh);
5385 if (ifm->ifa_prefixlen || ifm->ifa_flags || ifm->ifa_scope) {
5386 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for get address request");
5390 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*ifm), tb, IFA_MAX,
5391 ifa_ipv6_policy, extack);
5395 for (i = 0; i <= IFA_MAX; i++) {
5400 case IFA_TARGET_NETNSID:
5405 NL_SET_ERR_MSG_MOD(extack, "Unsupported attribute in get address request");
5413 static int inet6_rtm_getaddr(struct sk_buff *in_skb, struct nlmsghdr *nlh,
5414 struct netlink_ext_ack *extack)
5416 struct net *tgt_net = sock_net(in_skb->sk);
5417 struct inet6_fill_args fillargs = {
5418 .portid = NETLINK_CB(in_skb).portid,
5419 .seq = nlh->nlmsg_seq,
5420 .event = RTM_NEWADDR,
5424 struct ifaddrmsg *ifm;
5425 struct nlattr *tb[IFA_MAX+1];
5426 struct in6_addr *addr = NULL, *peer;
5427 struct net_device *dev = NULL;
5428 struct inet6_ifaddr *ifa;
5429 struct sk_buff *skb;
5432 err = inet6_rtm_valid_getaddr_req(in_skb, nlh, tb, extack);
5436 if (tb[IFA_TARGET_NETNSID]) {
5437 fillargs.netnsid = nla_get_s32(tb[IFA_TARGET_NETNSID]);
5439 tgt_net = rtnl_get_net_ns_capable(NETLINK_CB(in_skb).sk,
5441 if (IS_ERR(tgt_net))
5442 return PTR_ERR(tgt_net);
5445 addr = extract_addr(tb[IFA_ADDRESS], tb[IFA_LOCAL], &peer);
5449 ifm = nlmsg_data(nlh);
5451 dev = dev_get_by_index(tgt_net, ifm->ifa_index);
5453 ifa = ipv6_get_ifaddr(tgt_net, addr, dev, 1);
5455 err = -EADDRNOTAVAIL;
5459 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_KERNEL);
5465 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5467 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5468 WARN_ON(err == -EMSGSIZE);
5472 err = rtnl_unicast(skb, tgt_net, NETLINK_CB(in_skb).portid);
5477 if (fillargs.netnsid >= 0)
5483 static void inet6_ifa_notify(int event, struct inet6_ifaddr *ifa)
5485 struct sk_buff *skb;
5486 struct net *net = dev_net(ifa->idev->dev);
5487 struct inet6_fill_args fillargs = {
5496 skb = nlmsg_new(inet6_ifaddr_msgsize(), GFP_ATOMIC);
5500 err = inet6_fill_ifaddr(skb, ifa, &fillargs);
5502 /* -EMSGSIZE implies BUG in inet6_ifaddr_msgsize() */
5503 WARN_ON(err == -EMSGSIZE);
5507 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFADDR, NULL, GFP_ATOMIC);
5511 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFADDR, err);
5514 static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
5515 __s32 *array, int bytes)
5517 BUG_ON(bytes < (DEVCONF_MAX * 4));
5519 memset(array, 0, bytes);
5520 array[DEVCONF_FORWARDING] = cnf->forwarding;
5521 array[DEVCONF_HOPLIMIT] = cnf->hop_limit;
5522 array[DEVCONF_MTU6] = cnf->mtu6;
5523 array[DEVCONF_ACCEPT_RA] = cnf->accept_ra;
5524 array[DEVCONF_ACCEPT_REDIRECTS] = cnf->accept_redirects;
5525 array[DEVCONF_AUTOCONF] = cnf->autoconf;
5526 array[DEVCONF_DAD_TRANSMITS] = cnf->dad_transmits;
5527 array[DEVCONF_RTR_SOLICITS] = cnf->rtr_solicits;
5528 array[DEVCONF_RTR_SOLICIT_INTERVAL] =
5529 jiffies_to_msecs(cnf->rtr_solicit_interval);
5530 array[DEVCONF_RTR_SOLICIT_MAX_INTERVAL] =
5531 jiffies_to_msecs(cnf->rtr_solicit_max_interval);
5532 array[DEVCONF_RTR_SOLICIT_DELAY] =
5533 jiffies_to_msecs(cnf->rtr_solicit_delay);
5534 array[DEVCONF_FORCE_MLD_VERSION] = cnf->force_mld_version;
5535 array[DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL] =
5536 jiffies_to_msecs(cnf->mldv1_unsolicited_report_interval);
5537 array[DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL] =
5538 jiffies_to_msecs(cnf->mldv2_unsolicited_report_interval);
5539 array[DEVCONF_USE_TEMPADDR] = cnf->use_tempaddr;
5540 array[DEVCONF_TEMP_VALID_LFT] = cnf->temp_valid_lft;
5541 array[DEVCONF_TEMP_PREFERED_LFT] = cnf->temp_prefered_lft;
5542 array[DEVCONF_REGEN_MAX_RETRY] = cnf->regen_max_retry;
5543 array[DEVCONF_MAX_DESYNC_FACTOR] = cnf->max_desync_factor;
5544 array[DEVCONF_MAX_ADDRESSES] = cnf->max_addresses;
5545 array[DEVCONF_ACCEPT_RA_DEFRTR] = cnf->accept_ra_defrtr;
5546 array[DEVCONF_RA_DEFRTR_METRIC] = cnf->ra_defrtr_metric;
5547 array[DEVCONF_ACCEPT_RA_MIN_HOP_LIMIT] = cnf->accept_ra_min_hop_limit;
5548 array[DEVCONF_ACCEPT_RA_PINFO] = cnf->accept_ra_pinfo;
5549 #ifdef CONFIG_IPV6_ROUTER_PREF
5550 array[DEVCONF_ACCEPT_RA_RTR_PREF] = cnf->accept_ra_rtr_pref;
5551 array[DEVCONF_RTR_PROBE_INTERVAL] =
5552 jiffies_to_msecs(cnf->rtr_probe_interval);
5553 #ifdef CONFIG_IPV6_ROUTE_INFO
5554 array[DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN] = cnf->accept_ra_rt_info_min_plen;
5555 array[DEVCONF_ACCEPT_RA_RT_INFO_MAX_PLEN] = cnf->accept_ra_rt_info_max_plen;
5558 array[DEVCONF_PROXY_NDP] = cnf->proxy_ndp;
5559 array[DEVCONF_ACCEPT_SOURCE_ROUTE] = cnf->accept_source_route;
5560 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
5561 array[DEVCONF_OPTIMISTIC_DAD] = cnf->optimistic_dad;
5562 array[DEVCONF_USE_OPTIMISTIC] = cnf->use_optimistic;
5564 #ifdef CONFIG_IPV6_MROUTE
5565 array[DEVCONF_MC_FORWARDING] = atomic_read(&cnf->mc_forwarding);
5567 array[DEVCONF_DISABLE_IPV6] = cnf->disable_ipv6;
5568 array[DEVCONF_ACCEPT_DAD] = cnf->accept_dad;
5569 array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
5570 array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
5571 array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
5572 array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
5573 array[DEVCONF_ACCEPT_RA_MTU] = cnf->accept_ra_mtu;
5574 array[DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN] = cnf->ignore_routes_with_linkdown;
5575 /* we omit DEVCONF_STABLE_SECRET for now */
5576 array[DEVCONF_USE_OIF_ADDRS_ONLY] = cnf->use_oif_addrs_only;
5577 array[DEVCONF_DROP_UNICAST_IN_L2_MULTICAST] = cnf->drop_unicast_in_l2_multicast;
5578 array[DEVCONF_DROP_UNSOLICITED_NA] = cnf->drop_unsolicited_na;
5579 array[DEVCONF_KEEP_ADDR_ON_DOWN] = cnf->keep_addr_on_down;
5580 array[DEVCONF_SEG6_ENABLED] = cnf->seg6_enabled;
5581 #ifdef CONFIG_IPV6_SEG6_HMAC
5582 array[DEVCONF_SEG6_REQUIRE_HMAC] = cnf->seg6_require_hmac;
5584 array[DEVCONF_ENHANCED_DAD] = cnf->enhanced_dad;
5585 array[DEVCONF_ADDR_GEN_MODE] = cnf->addr_gen_mode;
5586 array[DEVCONF_DISABLE_POLICY] = cnf->disable_policy;
5587 array[DEVCONF_NDISC_TCLASS] = cnf->ndisc_tclass;
5588 array[DEVCONF_RPL_SEG_ENABLED] = cnf->rpl_seg_enabled;
5589 array[DEVCONF_IOAM6_ENABLED] = cnf->ioam6_enabled;
5590 array[DEVCONF_IOAM6_ID] = cnf->ioam6_id;
5591 array[DEVCONF_IOAM6_ID_WIDE] = cnf->ioam6_id_wide;
5592 array[DEVCONF_NDISC_EVICT_NOCARRIER] = cnf->ndisc_evict_nocarrier;
5593 array[DEVCONF_ACCEPT_UNTRACKED_NA] = cnf->accept_untracked_na;
5596 static inline size_t inet6_ifla6_size(void)
5598 return nla_total_size(4) /* IFLA_INET6_FLAGS */
5599 + nla_total_size(sizeof(struct ifla_cacheinfo))
5600 + nla_total_size(DEVCONF_MAX * 4) /* IFLA_INET6_CONF */
5601 + nla_total_size(IPSTATS_MIB_MAX * 8) /* IFLA_INET6_STATS */
5602 + nla_total_size(ICMP6_MIB_MAX * 8) /* IFLA_INET6_ICMP6STATS */
5603 + nla_total_size(sizeof(struct in6_addr)) /* IFLA_INET6_TOKEN */
5604 + nla_total_size(1) /* IFLA_INET6_ADDR_GEN_MODE */
5605 + nla_total_size(4) /* IFLA_INET6_RA_MTU */
5609 static inline size_t inet6_if_nlmsg_size(void)
5611 return NLMSG_ALIGN(sizeof(struct ifinfomsg))
5612 + nla_total_size(IFNAMSIZ) /* IFLA_IFNAME */
5613 + nla_total_size(MAX_ADDR_LEN) /* IFLA_ADDRESS */
5614 + nla_total_size(4) /* IFLA_MTU */
5615 + nla_total_size(4) /* IFLA_LINK */
5616 + nla_total_size(1) /* IFLA_OPERSTATE */
5617 + nla_total_size(inet6_ifla6_size()); /* IFLA_PROTINFO */
5620 static inline void __snmp6_fill_statsdev(u64 *stats, atomic_long_t *mib,
5624 int pad = bytes - sizeof(u64) * ICMP6_MIB_MAX;
5627 /* Use put_unaligned() because stats may not be aligned for u64. */
5628 put_unaligned(ICMP6_MIB_MAX, &stats[0]);
5629 for (i = 1; i < ICMP6_MIB_MAX; i++)
5630 put_unaligned(atomic_long_read(&mib[i]), &stats[i]);
5632 memset(&stats[ICMP6_MIB_MAX], 0, pad);
5635 static inline void __snmp6_fill_stats64(u64 *stats, void __percpu *mib,
5636 int bytes, size_t syncpoff)
5639 u64 buff[IPSTATS_MIB_MAX];
5640 int pad = bytes - sizeof(u64) * IPSTATS_MIB_MAX;
5644 memset(buff, 0, sizeof(buff));
5645 buff[0] = IPSTATS_MIB_MAX;
5647 for_each_possible_cpu(c) {
5648 for (i = 1; i < IPSTATS_MIB_MAX; i++)
5649 buff[i] += snmp_get_cpu_field64(mib, c, i, syncpoff);
5652 memcpy(stats, buff, IPSTATS_MIB_MAX * sizeof(u64));
5653 memset(&stats[IPSTATS_MIB_MAX], 0, pad);
5656 static void snmp6_fill_stats(u64 *stats, struct inet6_dev *idev, int attrtype,
5660 case IFLA_INET6_STATS:
5661 __snmp6_fill_stats64(stats, idev->stats.ipv6, bytes,
5662 offsetof(struct ipstats_mib, syncp));
5664 case IFLA_INET6_ICMP6STATS:
5665 __snmp6_fill_statsdev(stats, idev->stats.icmpv6dev->mibs, bytes);
5670 static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev,
5671 u32 ext_filter_mask)
5674 struct ifla_cacheinfo ci;
5676 if (nla_put_u32(skb, IFLA_INET6_FLAGS, idev->if_flags))
5677 goto nla_put_failure;
5678 ci.max_reasm_len = IPV6_MAXPLEN;
5679 ci.tstamp = cstamp_delta(idev->tstamp);
5680 ci.reachable_time = jiffies_to_msecs(idev->nd_parms->reachable_time);
5681 ci.retrans_time = jiffies_to_msecs(NEIGH_VAR(idev->nd_parms, RETRANS_TIME));
5682 if (nla_put(skb, IFLA_INET6_CACHEINFO, sizeof(ci), &ci))
5683 goto nla_put_failure;
5684 nla = nla_reserve(skb, IFLA_INET6_CONF, DEVCONF_MAX * sizeof(s32));
5686 goto nla_put_failure;
5687 ipv6_store_devconf(&idev->cnf, nla_data(nla), nla_len(nla));
5689 /* XXX - MC not implemented */
5691 if (ext_filter_mask & RTEXT_FILTER_SKIP_STATS)
5694 nla = nla_reserve(skb, IFLA_INET6_STATS, IPSTATS_MIB_MAX * sizeof(u64));
5696 goto nla_put_failure;
5697 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_STATS, nla_len(nla));
5699 nla = nla_reserve(skb, IFLA_INET6_ICMP6STATS, ICMP6_MIB_MAX * sizeof(u64));
5701 goto nla_put_failure;
5702 snmp6_fill_stats(nla_data(nla), idev, IFLA_INET6_ICMP6STATS, nla_len(nla));
5704 nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
5706 goto nla_put_failure;
5707 read_lock_bh(&idev->lock);
5708 memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
5709 read_unlock_bh(&idev->lock);
5711 if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->cnf.addr_gen_mode))
5712 goto nla_put_failure;
5715 nla_put_u32(skb, IFLA_INET6_RA_MTU, idev->ra_mtu))
5716 goto nla_put_failure;
5724 static size_t inet6_get_link_af_size(const struct net_device *dev,
5725 u32 ext_filter_mask)
5727 if (!__in6_dev_get(dev))
5730 return inet6_ifla6_size();
5733 static int inet6_fill_link_af(struct sk_buff *skb, const struct net_device *dev,
5734 u32 ext_filter_mask)
5736 struct inet6_dev *idev = __in6_dev_get(dev);
5741 if (inet6_fill_ifla6_attrs(skb, idev, ext_filter_mask) < 0)
5747 static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token,
5748 struct netlink_ext_ack *extack)
5750 struct inet6_ifaddr *ifp;
5751 struct net_device *dev = idev->dev;
5752 bool clear_token, update_rs = false;
5753 struct in6_addr ll_addr;
5760 if (dev->flags & IFF_LOOPBACK) {
5761 NL_SET_ERR_MSG_MOD(extack, "Device is loopback");
5765 if (dev->flags & IFF_NOARP) {
5766 NL_SET_ERR_MSG_MOD(extack,
5767 "Device does not do neighbour discovery");
5771 if (!ipv6_accept_ra(idev)) {
5772 NL_SET_ERR_MSG_MOD(extack,
5773 "Router advertisement is disabled on device");
5777 if (idev->cnf.rtr_solicits == 0) {
5778 NL_SET_ERR_MSG(extack,
5779 "Router solicitation is disabled on device");
5783 write_lock_bh(&idev->lock);
5785 BUILD_BUG_ON(sizeof(token->s6_addr) != 16);
5786 memcpy(idev->token.s6_addr + 8, token->s6_addr + 8, 8);
5788 write_unlock_bh(&idev->lock);
5790 clear_token = ipv6_addr_any(token);
5794 if (!idev->dead && (idev->if_flags & IF_READY) &&
5795 !ipv6_get_lladdr(dev, &ll_addr, IFA_F_TENTATIVE |
5796 IFA_F_OPTIMISTIC)) {
5797 /* If we're not ready, then normal ifup will take care
5798 * of this. Otherwise, we need to request our rs here.
5800 ndisc_send_rs(dev, &ll_addr, &in6addr_linklocal_allrouters);
5805 write_lock_bh(&idev->lock);
5808 idev->if_flags |= IF_RS_SENT;
5809 idev->rs_interval = rfc3315_s14_backoff_init(
5810 idev->cnf.rtr_solicit_interval);
5811 idev->rs_probes = 1;
5812 addrconf_mod_rs_timer(idev, idev->rs_interval);
5815 /* Well, that's kinda nasty ... */
5816 list_for_each_entry(ifp, &idev->addr_list, if_list) {
5817 spin_lock(&ifp->lock);
5818 if (ifp->tokenized) {
5820 ifp->prefered_lft = 0;
5822 spin_unlock(&ifp->lock);
5825 write_unlock_bh(&idev->lock);
5826 inet6_ifinfo_notify(RTM_NEWLINK, idev);
5827 addrconf_verify_rtnl(dev_net(dev));
5831 static const struct nla_policy inet6_af_policy[IFLA_INET6_MAX + 1] = {
5832 [IFLA_INET6_ADDR_GEN_MODE] = { .type = NLA_U8 },
5833 [IFLA_INET6_TOKEN] = { .len = sizeof(struct in6_addr) },
5834 [IFLA_INET6_RA_MTU] = { .type = NLA_REJECT,
5836 "IFLA_INET6_RA_MTU can not be set" },
5839 static int check_addr_gen_mode(int mode)
5841 if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
5842 mode != IN6_ADDR_GEN_MODE_NONE &&
5843 mode != IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5844 mode != IN6_ADDR_GEN_MODE_RANDOM)
5849 static int check_stable_privacy(struct inet6_dev *idev, struct net *net,
5852 if (mode == IN6_ADDR_GEN_MODE_STABLE_PRIVACY &&
5853 !idev->cnf.stable_secret.initialized &&
5854 !net->ipv6.devconf_dflt->stable_secret.initialized)
5859 static int inet6_validate_link_af(const struct net_device *dev,
5860 const struct nlattr *nla,
5861 struct netlink_ext_ack *extack)
5863 struct nlattr *tb[IFLA_INET6_MAX + 1];
5864 struct inet6_dev *idev = NULL;
5868 idev = __in6_dev_get(dev);
5870 return -EAFNOSUPPORT;
5873 err = nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla,
5874 inet6_af_policy, extack);
5878 if (!tb[IFLA_INET6_TOKEN] && !tb[IFLA_INET6_ADDR_GEN_MODE])
5881 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5882 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5884 if (check_addr_gen_mode(mode) < 0)
5886 if (dev && check_stable_privacy(idev, dev_net(dev), mode) < 0)
5893 static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla,
5894 struct netlink_ext_ack *extack)
5896 struct inet6_dev *idev = __in6_dev_get(dev);
5897 struct nlattr *tb[IFLA_INET6_MAX + 1];
5901 return -EAFNOSUPPORT;
5903 if (nla_parse_nested_deprecated(tb, IFLA_INET6_MAX, nla, NULL, NULL) < 0)
5906 if (tb[IFLA_INET6_TOKEN]) {
5907 err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]),
5913 if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
5914 u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
5916 idev->cnf.addr_gen_mode = mode;
5922 static int inet6_fill_ifinfo(struct sk_buff *skb, struct inet6_dev *idev,
5923 u32 portid, u32 seq, int event, unsigned int flags)
5925 struct net_device *dev = idev->dev;
5926 struct ifinfomsg *hdr;
5927 struct nlmsghdr *nlh;
5930 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*hdr), flags);
5934 hdr = nlmsg_data(nlh);
5935 hdr->ifi_family = AF_INET6;
5937 hdr->ifi_type = dev->type;
5938 hdr->ifi_index = dev->ifindex;
5939 hdr->ifi_flags = dev_get_flags(dev);
5940 hdr->ifi_change = 0;
5942 if (nla_put_string(skb, IFLA_IFNAME, dev->name) ||
5944 nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr)) ||
5945 nla_put_u32(skb, IFLA_MTU, dev->mtu) ||
5946 (dev->ifindex != dev_get_iflink(dev) &&
5947 nla_put_u32(skb, IFLA_LINK, dev_get_iflink(dev))) ||
5948 nla_put_u8(skb, IFLA_OPERSTATE,
5949 netif_running(dev) ? dev->operstate : IF_OPER_DOWN))
5950 goto nla_put_failure;
5951 protoinfo = nla_nest_start_noflag(skb, IFLA_PROTINFO);
5953 goto nla_put_failure;
5955 if (inet6_fill_ifla6_attrs(skb, idev, 0) < 0)
5956 goto nla_put_failure;
5958 nla_nest_end(skb, protoinfo);
5959 nlmsg_end(skb, nlh);
5963 nlmsg_cancel(skb, nlh);
5967 static int inet6_valid_dump_ifinfo(const struct nlmsghdr *nlh,
5968 struct netlink_ext_ack *extack)
5970 struct ifinfomsg *ifm;
5972 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*ifm))) {
5973 NL_SET_ERR_MSG_MOD(extack, "Invalid header for link dump request");
5977 if (nlmsg_attrlen(nlh, sizeof(*ifm))) {
5978 NL_SET_ERR_MSG_MOD(extack, "Invalid data after header");
5982 ifm = nlmsg_data(nlh);
5983 if (ifm->__ifi_pad || ifm->ifi_type || ifm->ifi_flags ||
5984 ifm->ifi_change || ifm->ifi_index) {
5985 NL_SET_ERR_MSG_MOD(extack, "Invalid values in header for dump request");
5992 static int inet6_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)
5994 struct net *net = sock_net(skb->sk);
5997 struct net_device *dev;
5998 struct inet6_dev *idev;
5999 struct hlist_head *head;
6001 /* only requests using strict checking can pass data to
6002 * influence the dump
6004 if (cb->strict_check) {
6005 int err = inet6_valid_dump_ifinfo(cb->nlh, cb->extack);
6012 s_idx = cb->args[1];
6015 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
6017 head = &net->dev_index_head[h];
6018 hlist_for_each_entry_rcu(dev, head, index_hlist) {
6021 idev = __in6_dev_get(dev);
6024 if (inet6_fill_ifinfo(skb, idev,
6025 NETLINK_CB(cb->skb).portid,
6027 RTM_NEWLINK, NLM_F_MULTI) < 0)
6041 void inet6_ifinfo_notify(int event, struct inet6_dev *idev)
6043 struct sk_buff *skb;
6044 struct net *net = dev_net(idev->dev);
6047 skb = nlmsg_new(inet6_if_nlmsg_size(), GFP_ATOMIC);
6051 err = inet6_fill_ifinfo(skb, idev, 0, 0, event, 0);
6053 /* -EMSGSIZE implies BUG in inet6_if_nlmsg_size() */
6054 WARN_ON(err == -EMSGSIZE);
6058 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_IFINFO, NULL, GFP_ATOMIC);
6062 rtnl_set_sk_err(net, RTNLGRP_IPV6_IFINFO, err);
6065 static inline size_t inet6_prefix_nlmsg_size(void)
6067 return NLMSG_ALIGN(sizeof(struct prefixmsg))
6068 + nla_total_size(sizeof(struct in6_addr))
6069 + nla_total_size(sizeof(struct prefix_cacheinfo));
6072 static int inet6_fill_prefix(struct sk_buff *skb, struct inet6_dev *idev,
6073 struct prefix_info *pinfo, u32 portid, u32 seq,
6074 int event, unsigned int flags)
6076 struct prefixmsg *pmsg;
6077 struct nlmsghdr *nlh;
6078 struct prefix_cacheinfo ci;
6080 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*pmsg), flags);
6084 pmsg = nlmsg_data(nlh);
6085 pmsg->prefix_family = AF_INET6;
6086 pmsg->prefix_pad1 = 0;
6087 pmsg->prefix_pad2 = 0;
6088 pmsg->prefix_ifindex = idev->dev->ifindex;
6089 pmsg->prefix_len = pinfo->prefix_len;
6090 pmsg->prefix_type = pinfo->type;
6091 pmsg->prefix_pad3 = 0;
6092 pmsg->prefix_flags = 0;
6094 pmsg->prefix_flags |= IF_PREFIX_ONLINK;
6095 if (pinfo->autoconf)
6096 pmsg->prefix_flags |= IF_PREFIX_AUTOCONF;
6098 if (nla_put(skb, PREFIX_ADDRESS, sizeof(pinfo->prefix), &pinfo->prefix))
6099 goto nla_put_failure;
6100 ci.preferred_time = ntohl(pinfo->prefered);
6101 ci.valid_time = ntohl(pinfo->valid);
6102 if (nla_put(skb, PREFIX_CACHEINFO, sizeof(ci), &ci))
6103 goto nla_put_failure;
6104 nlmsg_end(skb, nlh);
6108 nlmsg_cancel(skb, nlh);
6112 static void inet6_prefix_notify(int event, struct inet6_dev *idev,
6113 struct prefix_info *pinfo)
6115 struct sk_buff *skb;
6116 struct net *net = dev_net(idev->dev);
6119 skb = nlmsg_new(inet6_prefix_nlmsg_size(), GFP_ATOMIC);
6123 err = inet6_fill_prefix(skb, idev, pinfo, 0, 0, event, 0);
6125 /* -EMSGSIZE implies BUG in inet6_prefix_nlmsg_size() */
6126 WARN_ON(err == -EMSGSIZE);
6130 rtnl_notify(skb, net, 0, RTNLGRP_IPV6_PREFIX, NULL, GFP_ATOMIC);
6134 rtnl_set_sk_err(net, RTNLGRP_IPV6_PREFIX, err);
6137 static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6139 struct net *net = dev_net(ifp->idev->dev);
6144 inet6_ifa_notify(event ? : RTM_NEWADDR, ifp);
6149 * If the address was optimistic we inserted the route at the
6150 * start of our DAD process, so we don't need to do it again.
6151 * If the device was taken down in the middle of the DAD
6152 * cycle there is a race where we could get here without a
6153 * host route, so nothing to insert. That will be fixed when
6154 * the device is brought up.
6156 if (ifp->rt && !rcu_access_pointer(ifp->rt->fib6_node)) {
6157 ip6_ins_rt(net, ifp->rt);
6158 } else if (!ifp->rt && (ifp->idev->dev->flags & IFF_UP)) {
6159 pr_warn("BUG: Address %pI6c on device %s is missing its host route.\n",
6160 &ifp->addr, ifp->idev->dev->name);
6163 if (ifp->idev->cnf.forwarding)
6164 addrconf_join_anycast(ifp);
6165 if (!ipv6_addr_any(&ifp->peer_addr))
6166 addrconf_prefix_route(&ifp->peer_addr, 128,
6167 ifp->rt_priority, ifp->idev->dev,
6171 if (ifp->idev->cnf.forwarding)
6172 addrconf_leave_anycast(ifp);
6173 addrconf_leave_solict(ifp->idev, &ifp->addr);
6174 if (!ipv6_addr_any(&ifp->peer_addr)) {
6175 struct fib6_info *rt;
6177 rt = addrconf_get_prefix_route(&ifp->peer_addr, 128,
6178 ifp->idev->dev, 0, 0,
6181 ip6_del_rt(net, rt, false);
6184 ip6_del_rt(net, ifp->rt, false);
6187 rt_genid_bump_ipv6(net);
6190 atomic_inc(&net->ipv6.dev_addr_genid);
6193 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
6195 if (likely(ifp->idev->dead == 0))
6196 __ipv6_ifa_notify(event, ifp);
6199 #ifdef CONFIG_SYSCTL
6201 static int addrconf_sysctl_forward(struct ctl_table *ctl, int write,
6202 void *buffer, size_t *lenp, loff_t *ppos)
6204 int *valp = ctl->data;
6207 struct ctl_table lctl;
6211 * ctl->data points to idev->cnf.forwarding, we should
6212 * not modify it until we get the rtnl lock.
6217 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6220 ret = addrconf_fixup_forwarding(ctl, valp, val);
6226 static int addrconf_sysctl_mtu(struct ctl_table *ctl, int write,
6227 void *buffer, size_t *lenp, loff_t *ppos)
6229 struct inet6_dev *idev = ctl->extra1;
6230 int min_mtu = IPV6_MIN_MTU;
6231 struct ctl_table lctl;
6234 lctl.extra1 = &min_mtu;
6235 lctl.extra2 = idev ? &idev->dev->mtu : NULL;
6237 return proc_dointvec_minmax(&lctl, write, buffer, lenp, ppos);
6240 static void dev_disable_change(struct inet6_dev *idev)
6242 struct netdev_notifier_info info;
6244 if (!idev || !idev->dev)
6247 netdev_notifier_info_init(&info, idev->dev);
6248 if (idev->cnf.disable_ipv6)
6249 addrconf_notify(NULL, NETDEV_DOWN, &info);
6251 addrconf_notify(NULL, NETDEV_UP, &info);
6254 static void addrconf_disable_change(struct net *net, __s32 newf)
6256 struct net_device *dev;
6257 struct inet6_dev *idev;
6259 for_each_netdev(net, dev) {
6260 idev = __in6_dev_get(dev);
6262 int changed = (!idev->cnf.disable_ipv6) ^ (!newf);
6263 idev->cnf.disable_ipv6 = newf;
6265 dev_disable_change(idev);
6270 static int addrconf_disable_ipv6(struct ctl_table *table, int *p, int newf)
6275 if (!rtnl_trylock())
6276 return restart_syscall();
6278 net = (struct net *)table->extra2;
6282 if (p == &net->ipv6.devconf_dflt->disable_ipv6) {
6287 if (p == &net->ipv6.devconf_all->disable_ipv6) {
6288 net->ipv6.devconf_dflt->disable_ipv6 = newf;
6289 addrconf_disable_change(net, newf);
6290 } else if ((!newf) ^ (!old))
6291 dev_disable_change((struct inet6_dev *)table->extra1);
6297 static int addrconf_sysctl_disable(struct ctl_table *ctl, int write,
6298 void *buffer, size_t *lenp, loff_t *ppos)
6300 int *valp = ctl->data;
6303 struct ctl_table lctl;
6307 * ctl->data points to idev->cnf.disable_ipv6, we should
6308 * not modify it until we get the rtnl lock.
6313 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6316 ret = addrconf_disable_ipv6(ctl, valp, val);
6322 static int addrconf_sysctl_proxy_ndp(struct ctl_table *ctl, int write,
6323 void *buffer, size_t *lenp, loff_t *ppos)
6325 int *valp = ctl->data;
6330 ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
6333 if (write && old != new) {
6334 struct net *net = ctl->extra2;
6336 if (!rtnl_trylock())
6337 return restart_syscall();
6339 if (valp == &net->ipv6.devconf_dflt->proxy_ndp)
6340 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6341 NETCONFA_PROXY_NEIGH,
6342 NETCONFA_IFINDEX_DEFAULT,
6343 net->ipv6.devconf_dflt);
6344 else if (valp == &net->ipv6.devconf_all->proxy_ndp)
6345 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6346 NETCONFA_PROXY_NEIGH,
6347 NETCONFA_IFINDEX_ALL,
6348 net->ipv6.devconf_all);
6350 struct inet6_dev *idev = ctl->extra1;
6352 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF,
6353 NETCONFA_PROXY_NEIGH,
6363 static int addrconf_sysctl_addr_gen_mode(struct ctl_table *ctl, int write,
6364 void *buffer, size_t *lenp,
6369 struct inet6_dev *idev = (struct inet6_dev *)ctl->extra1;
6370 struct net *net = (struct net *)ctl->extra2;
6371 struct ctl_table tmp = {
6373 .maxlen = sizeof(new_val),
6377 if (!rtnl_trylock())
6378 return restart_syscall();
6380 new_val = *((u32 *)ctl->data);
6382 ret = proc_douintvec(&tmp, write, buffer, lenp, ppos);
6387 if (check_addr_gen_mode(new_val) < 0) {
6393 if (check_stable_privacy(idev, net, new_val) < 0) {
6398 if (idev->cnf.addr_gen_mode != new_val) {
6399 idev->cnf.addr_gen_mode = new_val;
6400 addrconf_dev_config(idev->dev);
6402 } else if (&net->ipv6.devconf_all->addr_gen_mode == ctl->data) {
6403 struct net_device *dev;
6405 net->ipv6.devconf_dflt->addr_gen_mode = new_val;
6406 for_each_netdev(net, dev) {
6407 idev = __in6_dev_get(dev);
6409 idev->cnf.addr_gen_mode != new_val) {
6410 idev->cnf.addr_gen_mode = new_val;
6411 addrconf_dev_config(idev->dev);
6416 *((u32 *)ctl->data) = new_val;
6425 static int addrconf_sysctl_stable_secret(struct ctl_table *ctl, int write,
6426 void *buffer, size_t *lenp,
6430 struct in6_addr addr;
6431 char str[IPV6_MAX_STRLEN];
6432 struct ctl_table lctl = *ctl;
6433 struct net *net = ctl->extra2;
6434 struct ipv6_stable_secret *secret = ctl->data;
6436 if (&net->ipv6.devconf_all->stable_secret == ctl->data)
6439 lctl.maxlen = IPV6_MAX_STRLEN;
6442 if (!rtnl_trylock())
6443 return restart_syscall();
6445 if (!write && !secret->initialized) {
6450 err = snprintf(str, sizeof(str), "%pI6", &secret->secret);
6451 if (err >= sizeof(str)) {
6456 err = proc_dostring(&lctl, write, buffer, lenp, ppos);
6460 if (in6_pton(str, -1, addr.in6_u.u6_addr8, -1, NULL) != 1) {
6465 secret->initialized = true;
6466 secret->secret = addr;
6468 if (&net->ipv6.devconf_dflt->stable_secret == ctl->data) {
6469 struct net_device *dev;
6471 for_each_netdev(net, dev) {
6472 struct inet6_dev *idev = __in6_dev_get(dev);
6475 idev->cnf.addr_gen_mode =
6476 IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6480 struct inet6_dev *idev = ctl->extra1;
6482 idev->cnf.addr_gen_mode = IN6_ADDR_GEN_MODE_STABLE_PRIVACY;
6492 int addrconf_sysctl_ignore_routes_with_linkdown(struct ctl_table *ctl,
6493 int write, void *buffer,
6497 int *valp = ctl->data;
6500 struct ctl_table lctl;
6503 /* ctl->data points to idev->cnf.ignore_routes_when_linkdown
6504 * we should not modify it until we get the rtnl lock.
6509 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6512 ret = addrconf_fixup_linkdown(ctl, valp, val);
6519 void addrconf_set_nopolicy(struct rt6_info *rt, int action)
6523 rt->dst.flags |= DST_NOPOLICY;
6525 rt->dst.flags &= ~DST_NOPOLICY;
6530 void addrconf_disable_policy_idev(struct inet6_dev *idev, int val)
6532 struct inet6_ifaddr *ifa;
6534 read_lock_bh(&idev->lock);
6535 list_for_each_entry(ifa, &idev->addr_list, if_list) {
6536 spin_lock(&ifa->lock);
6538 /* host routes only use builtin fib6_nh */
6539 struct fib6_nh *nh = ifa->rt->fib6_nh;
6543 ifa->rt->dst_nopolicy = val ? true : false;
6544 if (nh->rt6i_pcpu) {
6545 for_each_possible_cpu(cpu) {
6546 struct rt6_info **rtp;
6548 rtp = per_cpu_ptr(nh->rt6i_pcpu, cpu);
6549 addrconf_set_nopolicy(*rtp, val);
6554 spin_unlock(&ifa->lock);
6556 read_unlock_bh(&idev->lock);
6560 int addrconf_disable_policy(struct ctl_table *ctl, int *valp, int val)
6562 struct inet6_dev *idev;
6565 if (!rtnl_trylock())
6566 return restart_syscall();
6570 net = (struct net *)ctl->extra2;
6571 if (valp == &net->ipv6.devconf_dflt->disable_policy) {
6576 if (valp == &net->ipv6.devconf_all->disable_policy) {
6577 struct net_device *dev;
6579 for_each_netdev(net, dev) {
6580 idev = __in6_dev_get(dev);
6582 addrconf_disable_policy_idev(idev, val);
6585 idev = (struct inet6_dev *)ctl->extra1;
6586 addrconf_disable_policy_idev(idev, val);
6593 static int addrconf_sysctl_disable_policy(struct ctl_table *ctl, int write,
6594 void *buffer, size_t *lenp, loff_t *ppos)
6596 int *valp = ctl->data;
6599 struct ctl_table lctl;
6604 ret = proc_dointvec(&lctl, write, buffer, lenp, ppos);
6606 if (write && (*valp != val))
6607 ret = addrconf_disable_policy(ctl, valp, val);
6615 static int minus_one = -1;
6616 static const int two_five_five = 255;
6617 static u32 ioam6_if_id_max = U16_MAX;
6619 static const struct ctl_table addrconf_sysctl[] = {
6621 .procname = "forwarding",
6622 .data = &ipv6_devconf.forwarding,
6623 .maxlen = sizeof(int),
6625 .proc_handler = addrconf_sysctl_forward,
6628 .procname = "hop_limit",
6629 .data = &ipv6_devconf.hop_limit,
6630 .maxlen = sizeof(int),
6632 .proc_handler = proc_dointvec_minmax,
6633 .extra1 = (void *)SYSCTL_ONE,
6634 .extra2 = (void *)&two_five_five,
6638 .data = &ipv6_devconf.mtu6,
6639 .maxlen = sizeof(int),
6641 .proc_handler = addrconf_sysctl_mtu,
6644 .procname = "accept_ra",
6645 .data = &ipv6_devconf.accept_ra,
6646 .maxlen = sizeof(int),
6648 .proc_handler = proc_dointvec,
6651 .procname = "accept_redirects",
6652 .data = &ipv6_devconf.accept_redirects,
6653 .maxlen = sizeof(int),
6655 .proc_handler = proc_dointvec,
6658 .procname = "autoconf",
6659 .data = &ipv6_devconf.autoconf,
6660 .maxlen = sizeof(int),
6662 .proc_handler = proc_dointvec,
6665 .procname = "dad_transmits",
6666 .data = &ipv6_devconf.dad_transmits,
6667 .maxlen = sizeof(int),
6669 .proc_handler = proc_dointvec,
6672 .procname = "router_solicitations",
6673 .data = &ipv6_devconf.rtr_solicits,
6674 .maxlen = sizeof(int),
6676 .proc_handler = proc_dointvec_minmax,
6677 .extra1 = &minus_one,
6680 .procname = "router_solicitation_interval",
6681 .data = &ipv6_devconf.rtr_solicit_interval,
6682 .maxlen = sizeof(int),
6684 .proc_handler = proc_dointvec_jiffies,
6687 .procname = "router_solicitation_max_interval",
6688 .data = &ipv6_devconf.rtr_solicit_max_interval,
6689 .maxlen = sizeof(int),
6691 .proc_handler = proc_dointvec_jiffies,
6694 .procname = "router_solicitation_delay",
6695 .data = &ipv6_devconf.rtr_solicit_delay,
6696 .maxlen = sizeof(int),
6698 .proc_handler = proc_dointvec_jiffies,
6701 .procname = "force_mld_version",
6702 .data = &ipv6_devconf.force_mld_version,
6703 .maxlen = sizeof(int),
6705 .proc_handler = proc_dointvec,
6708 .procname = "mldv1_unsolicited_report_interval",
6710 &ipv6_devconf.mldv1_unsolicited_report_interval,
6711 .maxlen = sizeof(int),
6713 .proc_handler = proc_dointvec_ms_jiffies,
6716 .procname = "mldv2_unsolicited_report_interval",
6718 &ipv6_devconf.mldv2_unsolicited_report_interval,
6719 .maxlen = sizeof(int),
6721 .proc_handler = proc_dointvec_ms_jiffies,
6724 .procname = "use_tempaddr",
6725 .data = &ipv6_devconf.use_tempaddr,
6726 .maxlen = sizeof(int),
6728 .proc_handler = proc_dointvec,
6731 .procname = "temp_valid_lft",
6732 .data = &ipv6_devconf.temp_valid_lft,
6733 .maxlen = sizeof(int),
6735 .proc_handler = proc_dointvec,
6738 .procname = "temp_prefered_lft",
6739 .data = &ipv6_devconf.temp_prefered_lft,
6740 .maxlen = sizeof(int),
6742 .proc_handler = proc_dointvec,
6745 .procname = "regen_max_retry",
6746 .data = &ipv6_devconf.regen_max_retry,
6747 .maxlen = sizeof(int),
6749 .proc_handler = proc_dointvec,
6752 .procname = "max_desync_factor",
6753 .data = &ipv6_devconf.max_desync_factor,
6754 .maxlen = sizeof(int),
6756 .proc_handler = proc_dointvec,
6759 .procname = "max_addresses",
6760 .data = &ipv6_devconf.max_addresses,
6761 .maxlen = sizeof(int),
6763 .proc_handler = proc_dointvec,
6766 .procname = "accept_ra_defrtr",
6767 .data = &ipv6_devconf.accept_ra_defrtr,
6768 .maxlen = sizeof(int),
6770 .proc_handler = proc_dointvec,
6773 .procname = "ra_defrtr_metric",
6774 .data = &ipv6_devconf.ra_defrtr_metric,
6775 .maxlen = sizeof(u32),
6777 .proc_handler = proc_douintvec_minmax,
6778 .extra1 = (void *)SYSCTL_ONE,
6781 .procname = "accept_ra_min_hop_limit",
6782 .data = &ipv6_devconf.accept_ra_min_hop_limit,
6783 .maxlen = sizeof(int),
6785 .proc_handler = proc_dointvec,
6788 .procname = "accept_ra_pinfo",
6789 .data = &ipv6_devconf.accept_ra_pinfo,
6790 .maxlen = sizeof(int),
6792 .proc_handler = proc_dointvec,
6794 #ifdef CONFIG_IPV6_ROUTER_PREF
6796 .procname = "accept_ra_rtr_pref",
6797 .data = &ipv6_devconf.accept_ra_rtr_pref,
6798 .maxlen = sizeof(int),
6800 .proc_handler = proc_dointvec,
6803 .procname = "router_probe_interval",
6804 .data = &ipv6_devconf.rtr_probe_interval,
6805 .maxlen = sizeof(int),
6807 .proc_handler = proc_dointvec_jiffies,
6809 #ifdef CONFIG_IPV6_ROUTE_INFO
6811 .procname = "accept_ra_rt_info_min_plen",
6812 .data = &ipv6_devconf.accept_ra_rt_info_min_plen,
6813 .maxlen = sizeof(int),
6815 .proc_handler = proc_dointvec,
6818 .procname = "accept_ra_rt_info_max_plen",
6819 .data = &ipv6_devconf.accept_ra_rt_info_max_plen,
6820 .maxlen = sizeof(int),
6822 .proc_handler = proc_dointvec,
6827 .procname = "proxy_ndp",
6828 .data = &ipv6_devconf.proxy_ndp,
6829 .maxlen = sizeof(int),
6831 .proc_handler = addrconf_sysctl_proxy_ndp,
6834 .procname = "accept_source_route",
6835 .data = &ipv6_devconf.accept_source_route,
6836 .maxlen = sizeof(int),
6838 .proc_handler = proc_dointvec,
6840 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
6842 .procname = "optimistic_dad",
6843 .data = &ipv6_devconf.optimistic_dad,
6844 .maxlen = sizeof(int),
6846 .proc_handler = proc_dointvec,
6849 .procname = "use_optimistic",
6850 .data = &ipv6_devconf.use_optimistic,
6851 .maxlen = sizeof(int),
6853 .proc_handler = proc_dointvec,
6856 #ifdef CONFIG_IPV6_MROUTE
6858 .procname = "mc_forwarding",
6859 .data = &ipv6_devconf.mc_forwarding,
6860 .maxlen = sizeof(int),
6862 .proc_handler = proc_dointvec,
6866 .procname = "disable_ipv6",
6867 .data = &ipv6_devconf.disable_ipv6,
6868 .maxlen = sizeof(int),
6870 .proc_handler = addrconf_sysctl_disable,
6873 .procname = "accept_dad",
6874 .data = &ipv6_devconf.accept_dad,
6875 .maxlen = sizeof(int),
6877 .proc_handler = proc_dointvec,
6880 .procname = "force_tllao",
6881 .data = &ipv6_devconf.force_tllao,
6882 .maxlen = sizeof(int),
6884 .proc_handler = proc_dointvec
6887 .procname = "ndisc_notify",
6888 .data = &ipv6_devconf.ndisc_notify,
6889 .maxlen = sizeof(int),
6891 .proc_handler = proc_dointvec
6894 .procname = "suppress_frag_ndisc",
6895 .data = &ipv6_devconf.suppress_frag_ndisc,
6896 .maxlen = sizeof(int),
6898 .proc_handler = proc_dointvec
6901 .procname = "accept_ra_from_local",
6902 .data = &ipv6_devconf.accept_ra_from_local,
6903 .maxlen = sizeof(int),
6905 .proc_handler = proc_dointvec,
6908 .procname = "accept_ra_mtu",
6909 .data = &ipv6_devconf.accept_ra_mtu,
6910 .maxlen = sizeof(int),
6912 .proc_handler = proc_dointvec,
6915 .procname = "stable_secret",
6916 .data = &ipv6_devconf.stable_secret,
6917 .maxlen = IPV6_MAX_STRLEN,
6919 .proc_handler = addrconf_sysctl_stable_secret,
6922 .procname = "use_oif_addrs_only",
6923 .data = &ipv6_devconf.use_oif_addrs_only,
6924 .maxlen = sizeof(int),
6926 .proc_handler = proc_dointvec,
6929 .procname = "ignore_routes_with_linkdown",
6930 .data = &ipv6_devconf.ignore_routes_with_linkdown,
6931 .maxlen = sizeof(int),
6933 .proc_handler = addrconf_sysctl_ignore_routes_with_linkdown,
6936 .procname = "drop_unicast_in_l2_multicast",
6937 .data = &ipv6_devconf.drop_unicast_in_l2_multicast,
6938 .maxlen = sizeof(int),
6940 .proc_handler = proc_dointvec,
6943 .procname = "drop_unsolicited_na",
6944 .data = &ipv6_devconf.drop_unsolicited_na,
6945 .maxlen = sizeof(int),
6947 .proc_handler = proc_dointvec,
6950 .procname = "keep_addr_on_down",
6951 .data = &ipv6_devconf.keep_addr_on_down,
6952 .maxlen = sizeof(int),
6954 .proc_handler = proc_dointvec,
6958 .procname = "seg6_enabled",
6959 .data = &ipv6_devconf.seg6_enabled,
6960 .maxlen = sizeof(int),
6962 .proc_handler = proc_dointvec,
6964 #ifdef CONFIG_IPV6_SEG6_HMAC
6966 .procname = "seg6_require_hmac",
6967 .data = &ipv6_devconf.seg6_require_hmac,
6968 .maxlen = sizeof(int),
6970 .proc_handler = proc_dointvec,
6974 .procname = "enhanced_dad",
6975 .data = &ipv6_devconf.enhanced_dad,
6976 .maxlen = sizeof(int),
6978 .proc_handler = proc_dointvec,
6981 .procname = "addr_gen_mode",
6982 .data = &ipv6_devconf.addr_gen_mode,
6983 .maxlen = sizeof(int),
6985 .proc_handler = addrconf_sysctl_addr_gen_mode,
6988 .procname = "disable_policy",
6989 .data = &ipv6_devconf.disable_policy,
6990 .maxlen = sizeof(int),
6992 .proc_handler = addrconf_sysctl_disable_policy,
6995 .procname = "ndisc_tclass",
6996 .data = &ipv6_devconf.ndisc_tclass,
6997 .maxlen = sizeof(int),
6999 .proc_handler = proc_dointvec_minmax,
7000 .extra1 = (void *)SYSCTL_ZERO,
7001 .extra2 = (void *)&two_five_five,
7004 .procname = "rpl_seg_enabled",
7005 .data = &ipv6_devconf.rpl_seg_enabled,
7006 .maxlen = sizeof(int),
7008 .proc_handler = proc_dointvec,
7011 .procname = "ioam6_enabled",
7012 .data = &ipv6_devconf.ioam6_enabled,
7013 .maxlen = sizeof(u8),
7015 .proc_handler = proc_dou8vec_minmax,
7016 .extra1 = (void *)SYSCTL_ZERO,
7017 .extra2 = (void *)SYSCTL_ONE,
7020 .procname = "ioam6_id",
7021 .data = &ipv6_devconf.ioam6_id,
7022 .maxlen = sizeof(u32),
7024 .proc_handler = proc_douintvec_minmax,
7025 .extra1 = (void *)SYSCTL_ZERO,
7026 .extra2 = (void *)&ioam6_if_id_max,
7029 .procname = "ioam6_id_wide",
7030 .data = &ipv6_devconf.ioam6_id_wide,
7031 .maxlen = sizeof(u32),
7033 .proc_handler = proc_douintvec,
7036 .procname = "ndisc_evict_nocarrier",
7037 .data = &ipv6_devconf.ndisc_evict_nocarrier,
7038 .maxlen = sizeof(u8),
7040 .proc_handler = proc_dou8vec_minmax,
7041 .extra1 = (void *)SYSCTL_ZERO,
7042 .extra2 = (void *)SYSCTL_ONE,
7045 .procname = "accept_untracked_na",
7046 .data = &ipv6_devconf.accept_untracked_na,
7047 .maxlen = sizeof(int),
7049 .proc_handler = proc_dointvec_minmax,
7050 .extra1 = SYSCTL_ZERO,
7051 .extra2 = SYSCTL_TWO,
7058 static int __addrconf_sysctl_register(struct net *net, char *dev_name,
7059 struct inet6_dev *idev, struct ipv6_devconf *p)
7062 struct ctl_table *table;
7063 char path[sizeof("net/ipv6/conf/") + IFNAMSIZ];
7065 table = kmemdup(addrconf_sysctl, sizeof(addrconf_sysctl), GFP_KERNEL_ACCOUNT);
7069 for (i = 0; table[i].data; i++) {
7070 table[i].data += (char *)p - (char *)&ipv6_devconf;
7071 /* If one of these is already set, then it is not safe to
7072 * overwrite either of them: this makes proc_dointvec_minmax
7075 if (!table[i].extra1 && !table[i].extra2) {
7076 table[i].extra1 = idev; /* embedded; no ref */
7077 table[i].extra2 = net;
7081 snprintf(path, sizeof(path), "net/ipv6/conf/%s", dev_name);
7083 p->sysctl_header = register_net_sysctl(net, path, table);
7084 if (!p->sysctl_header)
7087 if (!strcmp(dev_name, "all"))
7088 ifindex = NETCONFA_IFINDEX_ALL;
7089 else if (!strcmp(dev_name, "default"))
7090 ifindex = NETCONFA_IFINDEX_DEFAULT;
7092 ifindex = idev->dev->ifindex;
7093 inet6_netconf_notify_devconf(net, RTM_NEWNETCONF, NETCONFA_ALL,
7103 static void __addrconf_sysctl_unregister(struct net *net,
7104 struct ipv6_devconf *p, int ifindex)
7106 struct ctl_table *table;
7108 if (!p->sysctl_header)
7111 table = p->sysctl_header->ctl_table_arg;
7112 unregister_net_sysctl_table(p->sysctl_header);
7113 p->sysctl_header = NULL;
7116 inet6_netconf_notify_devconf(net, RTM_DELNETCONF, 0, ifindex, NULL);
7119 static int addrconf_sysctl_register(struct inet6_dev *idev)
7123 if (!sysctl_dev_name_is_allowed(idev->dev->name))
7126 err = neigh_sysctl_register(idev->dev, idev->nd_parms,
7127 &ndisc_ifinfo_sysctl_change);
7130 err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
7133 neigh_sysctl_unregister(idev->nd_parms);
7138 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
7140 __addrconf_sysctl_unregister(dev_net(idev->dev), &idev->cnf,
7141 idev->dev->ifindex);
7142 neigh_sysctl_unregister(idev->nd_parms);
7148 static int __net_init addrconf_init_net(struct net *net)
7151 struct ipv6_devconf *all, *dflt;
7153 spin_lock_init(&net->ipv6.addrconf_hash_lock);
7154 INIT_DEFERRABLE_WORK(&net->ipv6.addr_chk_work, addrconf_verify_work);
7155 net->ipv6.inet6_addr_lst = kcalloc(IN6_ADDR_HSIZE,
7156 sizeof(struct hlist_head),
7158 if (!net->ipv6.inet6_addr_lst)
7159 goto err_alloc_addr;
7161 all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL);
7165 dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL);
7167 goto err_alloc_dflt;
7169 if (!net_eq(net, &init_net)) {
7170 switch (net_inherit_devconf()) {
7171 case 1: /* copy from init_net */
7172 memcpy(all, init_net.ipv6.devconf_all,
7173 sizeof(ipv6_devconf));
7174 memcpy(dflt, init_net.ipv6.devconf_dflt,
7175 sizeof(ipv6_devconf_dflt));
7177 case 3: /* copy from the current netns */
7178 memcpy(all, current->nsproxy->net_ns->ipv6.devconf_all,
7179 sizeof(ipv6_devconf));
7181 current->nsproxy->net_ns->ipv6.devconf_dflt,
7182 sizeof(ipv6_devconf_dflt));
7186 /* use compiled values */
7191 /* these will be inherited by all namespaces */
7192 dflt->autoconf = ipv6_defaults.autoconf;
7193 dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;
7195 dflt->stable_secret.initialized = false;
7196 all->stable_secret.initialized = false;
7198 net->ipv6.devconf_all = all;
7199 net->ipv6.devconf_dflt = dflt;
7201 #ifdef CONFIG_SYSCTL
7202 err = __addrconf_sysctl_register(net, "all", NULL, all);
7206 err = __addrconf_sysctl_register(net, "default", NULL, dflt);
7212 #ifdef CONFIG_SYSCTL
7214 __addrconf_sysctl_unregister(net, all, NETCONFA_IFINDEX_ALL);
7217 net->ipv6.devconf_dflt = NULL;
7221 net->ipv6.devconf_all = NULL;
7223 kfree(net->ipv6.inet6_addr_lst);
7228 static void __net_exit addrconf_exit_net(struct net *net)
7232 #ifdef CONFIG_SYSCTL
7233 __addrconf_sysctl_unregister(net, net->ipv6.devconf_dflt,
7234 NETCONFA_IFINDEX_DEFAULT);
7235 __addrconf_sysctl_unregister(net, net->ipv6.devconf_all,
7236 NETCONFA_IFINDEX_ALL);
7238 kfree(net->ipv6.devconf_dflt);
7239 net->ipv6.devconf_dflt = NULL;
7240 kfree(net->ipv6.devconf_all);
7241 net->ipv6.devconf_all = NULL;
7243 cancel_delayed_work_sync(&net->ipv6.addr_chk_work);
7245 * Check hash table, then free it.
7247 for (i = 0; i < IN6_ADDR_HSIZE; i++)
7248 WARN_ON_ONCE(!hlist_empty(&net->ipv6.inet6_addr_lst[i]));
7250 kfree(net->ipv6.inet6_addr_lst);
7251 net->ipv6.inet6_addr_lst = NULL;
7254 static struct pernet_operations addrconf_ops = {
7255 .init = addrconf_init_net,
7256 .exit = addrconf_exit_net,
7259 static struct rtnl_af_ops inet6_ops __read_mostly = {
7261 .fill_link_af = inet6_fill_link_af,
7262 .get_link_af_size = inet6_get_link_af_size,
7263 .validate_link_af = inet6_validate_link_af,
7264 .set_link_af = inet6_set_link_af,
7268 * Init / cleanup code
7271 int __init addrconf_init(void)
7273 struct inet6_dev *idev;
7276 err = ipv6_addr_label_init();
7278 pr_crit("%s: cannot initialize default policy table: %d\n",
7283 err = register_pernet_subsys(&addrconf_ops);
7287 addrconf_wq = create_workqueue("ipv6_addrconf");
7294 idev = ipv6_add_dev(blackhole_netdev);
7297 err = PTR_ERR(idev);
7301 ip6_route_init_special_entries();
7303 register_netdevice_notifier(&ipv6_dev_notf);
7305 addrconf_verify(&init_net);
7307 rtnl_af_register(&inet6_ops);
7309 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETLINK,
7310 NULL, inet6_dump_ifinfo, 0);
7314 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWADDR,
7315 inet6_rtm_newaddr, NULL, 0);
7318 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELADDR,
7319 inet6_rtm_deladdr, NULL, 0);
7322 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETADDR,
7323 inet6_rtm_getaddr, inet6_dump_ifaddr,
7324 RTNL_FLAG_DOIT_UNLOCKED);
7327 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETMULTICAST,
7328 NULL, inet6_dump_ifmcaddr, 0);
7331 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETANYCAST,
7332 NULL, inet6_dump_ifacaddr, 0);
7335 err = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETNETCONF,
7336 inet6_netconf_get_devconf,
7337 inet6_netconf_dump_devconf,
7338 RTNL_FLAG_DOIT_UNLOCKED);
7341 err = ipv6_addr_label_rtnl_register();
7347 rtnl_unregister_all(PF_INET6);
7348 rtnl_af_unregister(&inet6_ops);
7349 unregister_netdevice_notifier(&ipv6_dev_notf);
7351 destroy_workqueue(addrconf_wq);
7353 unregister_pernet_subsys(&addrconf_ops);
7355 ipv6_addr_label_cleanup();
7360 void addrconf_cleanup(void)
7362 struct net_device *dev;
7364 unregister_netdevice_notifier(&ipv6_dev_notf);
7365 unregister_pernet_subsys(&addrconf_ops);
7366 ipv6_addr_label_cleanup();
7368 rtnl_af_unregister(&inet6_ops);
7372 /* clean dev list */
7373 for_each_netdev(&init_net, dev) {
7374 if (__in6_dev_get(dev) == NULL)
7376 addrconf_ifdown(dev, true);
7378 addrconf_ifdown(init_net.loopback_dev, true);
7382 destroy_workqueue(addrconf_wq);