2 * Linux INET6 implementation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
55 #include <linux/rtnetlink.h>
57 #include <net/dst_metadata.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
64 #include <net/l3mdev.h>
65 #include <trace/events/fib6.h>
67 #include <linux/uaccess.h>
70 #include <linux/sysctl.h>
74 RT6_NUD_FAIL_HARD = -3,
75 RT6_NUD_FAIL_PROBE = -2,
76 RT6_NUD_FAIL_DO_RR = -1,
80 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
81 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
82 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
83 static unsigned int ip6_mtu(const struct dst_entry *dst);
84 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
85 static void ip6_dst_destroy(struct dst_entry *);
86 static void ip6_dst_ifdown(struct dst_entry *,
87 struct net_device *dev, int how);
88 static int ip6_dst_gc(struct dst_ops *ops);
90 static int ip6_pkt_discard(struct sk_buff *skb);
91 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
92 static int ip6_pkt_prohibit(struct sk_buff *skb);
93 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
94 static void ip6_link_failure(struct sk_buff *skb);
95 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
96 struct sk_buff *skb, u32 mtu);
97 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
99 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
100 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
101 static size_t rt6_nlmsg_size(struct rt6_info *rt);
102 static int rt6_fill_node(struct net *net,
103 struct sk_buff *skb, struct rt6_info *rt,
104 struct in6_addr *dst, struct in6_addr *src,
105 int iif, int type, u32 portid, u32 seq,
108 #ifdef CONFIG_IPV6_ROUTE_INFO
109 static struct rt6_info *rt6_add_route_info(struct net *net,
110 const struct in6_addr *prefix, int prefixlen,
111 const struct in6_addr *gwaddr,
112 struct net_device *dev,
114 static struct rt6_info *rt6_get_route_info(struct net *net,
115 const struct in6_addr *prefix, int prefixlen,
116 const struct in6_addr *gwaddr,
117 struct net_device *dev);
120 struct uncached_list {
122 struct list_head head;
125 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
127 static void rt6_uncached_list_add(struct rt6_info *rt)
129 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
131 rt->rt6i_uncached_list = ul;
133 spin_lock_bh(&ul->lock);
134 list_add_tail(&rt->rt6i_uncached, &ul->head);
135 spin_unlock_bh(&ul->lock);
138 static void rt6_uncached_list_del(struct rt6_info *rt)
140 if (!list_empty(&rt->rt6i_uncached)) {
141 struct uncached_list *ul = rt->rt6i_uncached_list;
143 spin_lock_bh(&ul->lock);
144 list_del(&rt->rt6i_uncached);
145 spin_unlock_bh(&ul->lock);
149 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
151 struct net_device *loopback_dev = net->loopback_dev;
154 if (dev == loopback_dev)
157 for_each_possible_cpu(cpu) {
158 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
161 spin_lock_bh(&ul->lock);
162 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
163 struct inet6_dev *rt_idev = rt->rt6i_idev;
164 struct net_device *rt_dev = rt->dst.dev;
166 if (rt_idev->dev == dev) {
167 rt->rt6i_idev = in6_dev_get(loopback_dev);
168 in6_dev_put(rt_idev);
172 rt->dst.dev = loopback_dev;
173 dev_hold(rt->dst.dev);
177 spin_unlock_bh(&ul->lock);
181 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
183 return dst_metrics_write_ptr(rt->dst.from);
186 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
188 struct rt6_info *rt = (struct rt6_info *)dst;
190 if (rt->rt6i_flags & RTF_PCPU)
191 return rt6_pcpu_cow_metrics(rt);
192 else if (rt->rt6i_flags & RTF_CACHE)
195 return dst_cow_metrics_generic(dst, old);
198 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
202 struct in6_addr *p = &rt->rt6i_gateway;
204 if (!ipv6_addr_any(p))
205 return (const void *) p;
207 return &ipv6_hdr(skb)->daddr;
211 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
215 struct rt6_info *rt = (struct rt6_info *) dst;
218 daddr = choose_neigh_daddr(rt, skb, daddr);
219 n = __ipv6_neigh_lookup(dst->dev, daddr);
222 return neigh_create(&nd_tbl, daddr, dst->dev);
225 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
227 struct net_device *dev = dst->dev;
228 struct rt6_info *rt = (struct rt6_info *)dst;
230 daddr = choose_neigh_daddr(rt, NULL, daddr);
233 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
235 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
237 __ipv6_confirm_neigh(dev, daddr);
240 static struct dst_ops ip6_dst_ops_template = {
244 .check = ip6_dst_check,
245 .default_advmss = ip6_default_advmss,
247 .cow_metrics = ipv6_cow_metrics,
248 .destroy = ip6_dst_destroy,
249 .ifdown = ip6_dst_ifdown,
250 .negative_advice = ip6_negative_advice,
251 .link_failure = ip6_link_failure,
252 .update_pmtu = ip6_rt_update_pmtu,
253 .redirect = rt6_do_redirect,
254 .local_out = __ip6_local_out,
255 .neigh_lookup = ip6_neigh_lookup,
256 .confirm_neigh = ip6_confirm_neigh,
259 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
261 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
263 return mtu ? : dst->dev->mtu;
266 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
267 struct sk_buff *skb, u32 mtu)
271 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
276 static struct dst_ops ip6_dst_blackhole_ops = {
278 .destroy = ip6_dst_destroy,
279 .check = ip6_dst_check,
280 .mtu = ip6_blackhole_mtu,
281 .default_advmss = ip6_default_advmss,
282 .update_pmtu = ip6_rt_blackhole_update_pmtu,
283 .redirect = ip6_rt_blackhole_redirect,
284 .cow_metrics = dst_cow_metrics_generic,
285 .neigh_lookup = ip6_neigh_lookup,
288 static const u32 ip6_template_metrics[RTAX_MAX] = {
289 [RTAX_HOPLIMIT - 1] = 0,
292 static const struct rt6_info ip6_null_entry_template = {
294 .__refcnt = ATOMIC_INIT(1),
296 .obsolete = DST_OBSOLETE_FORCE_CHK,
297 .error = -ENETUNREACH,
298 .input = ip6_pkt_discard,
299 .output = ip6_pkt_discard_out,
301 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
302 .rt6i_protocol = RTPROT_KERNEL,
303 .rt6i_metric = ~(u32) 0,
304 .rt6i_ref = ATOMIC_INIT(1),
307 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
309 static const struct rt6_info ip6_prohibit_entry_template = {
311 .__refcnt = ATOMIC_INIT(1),
313 .obsolete = DST_OBSOLETE_FORCE_CHK,
315 .input = ip6_pkt_prohibit,
316 .output = ip6_pkt_prohibit_out,
318 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
319 .rt6i_protocol = RTPROT_KERNEL,
320 .rt6i_metric = ~(u32) 0,
321 .rt6i_ref = ATOMIC_INIT(1),
324 static const struct rt6_info ip6_blk_hole_entry_template = {
326 .__refcnt = ATOMIC_INIT(1),
328 .obsolete = DST_OBSOLETE_FORCE_CHK,
330 .input = dst_discard,
331 .output = dst_discard_out,
333 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
334 .rt6i_protocol = RTPROT_KERNEL,
335 .rt6i_metric = ~(u32) 0,
336 .rt6i_ref = ATOMIC_INIT(1),
341 static void rt6_info_init(struct rt6_info *rt)
343 struct dst_entry *dst = &rt->dst;
345 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
346 INIT_LIST_HEAD(&rt->rt6i_siblings);
347 INIT_LIST_HEAD(&rt->rt6i_uncached);
350 /* allocate dst with ip6_dst_ops */
351 static struct rt6_info *__ip6_dst_alloc(struct net *net,
352 struct net_device *dev,
355 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
356 1, DST_OBSOLETE_FORCE_CHK, flags);
364 struct rt6_info *ip6_dst_alloc(struct net *net,
365 struct net_device *dev,
368 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
371 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
375 for_each_possible_cpu(cpu) {
378 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
379 /* no one shares rt */
383 dst_release_immediate(&rt->dst);
390 EXPORT_SYMBOL(ip6_dst_alloc);
392 static void ip6_dst_destroy(struct dst_entry *dst)
394 struct rt6_info *rt = (struct rt6_info *)dst;
395 struct dst_entry *from = dst->from;
396 struct inet6_dev *idev;
398 dst_destroy_metrics_generic(dst);
399 free_percpu(rt->rt6i_pcpu);
400 rt6_uncached_list_del(rt);
402 idev = rt->rt6i_idev;
404 rt->rt6i_idev = NULL;
412 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
415 struct rt6_info *rt = (struct rt6_info *)dst;
416 struct inet6_dev *idev = rt->rt6i_idev;
417 struct net_device *loopback_dev =
418 dev_net(dev)->loopback_dev;
420 if (dev != loopback_dev) {
421 if (idev && idev->dev == dev) {
422 struct inet6_dev *loopback_idev =
423 in6_dev_get(loopback_dev);
425 rt->rt6i_idev = loopback_idev;
432 static bool __rt6_check_expired(const struct rt6_info *rt)
434 if (rt->rt6i_flags & RTF_EXPIRES)
435 return time_after(jiffies, rt->dst.expires);
440 static bool rt6_check_expired(const struct rt6_info *rt)
442 if (rt->rt6i_flags & RTF_EXPIRES) {
443 if (time_after(jiffies, rt->dst.expires))
445 } else if (rt->dst.from) {
446 return rt6_check_expired((struct rt6_info *) rt->dst.from);
451 /* Multipath route selection:
452 * Hash based function using packet header and flowlabel.
453 * Adapted from fib_info_hashfn()
455 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
456 const struct flowi6 *fl6)
458 return get_hash_from_flowi6(fl6) % candidate_count;
461 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
462 struct flowi6 *fl6, int oif,
465 struct rt6_info *sibling, *next_sibling;
468 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
469 /* Don't change the route, if route_choosen == 0
470 * (siblings does not include ourself)
473 list_for_each_entry_safe(sibling, next_sibling,
474 &match->rt6i_siblings, rt6i_siblings) {
476 if (route_choosen == 0) {
477 if (rt6_score_route(sibling, oif, strict) < 0)
487 * Route lookup. Any table->tb6_lock is implied.
490 static inline struct rt6_info *rt6_device_match(struct net *net,
492 const struct in6_addr *saddr,
496 struct rt6_info *local = NULL;
497 struct rt6_info *sprt;
499 if (!oif && ipv6_addr_any(saddr))
502 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
503 struct net_device *dev = sprt->dst.dev;
506 if (dev->ifindex == oif)
508 if (dev->flags & IFF_LOOPBACK) {
509 if (!sprt->rt6i_idev ||
510 sprt->rt6i_idev->dev->ifindex != oif) {
511 if (flags & RT6_LOOKUP_F_IFACE)
514 local->rt6i_idev->dev->ifindex == oif)
520 if (ipv6_chk_addr(net, saddr, dev,
521 flags & RT6_LOOKUP_F_IFACE))
530 if (flags & RT6_LOOKUP_F_IFACE)
531 return net->ipv6.ip6_null_entry;
537 #ifdef CONFIG_IPV6_ROUTER_PREF
538 struct __rt6_probe_work {
539 struct work_struct work;
540 struct in6_addr target;
541 struct net_device *dev;
544 static void rt6_probe_deferred(struct work_struct *w)
546 struct in6_addr mcaddr;
547 struct __rt6_probe_work *work =
548 container_of(w, struct __rt6_probe_work, work);
550 addrconf_addr_solict_mult(&work->target, &mcaddr);
551 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
556 static void rt6_probe(struct rt6_info *rt)
558 struct __rt6_probe_work *work;
559 struct neighbour *neigh;
561 * Okay, this does not seem to be appropriate
562 * for now, however, we need to check if it
563 * is really so; aka Router Reachability Probing.
565 * Router Reachability Probe MUST be rate-limited
566 * to no more than one per minute.
568 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
571 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
573 if (neigh->nud_state & NUD_VALID)
577 write_lock(&neigh->lock);
578 if (!(neigh->nud_state & NUD_VALID) &&
581 rt->rt6i_idev->cnf.rtr_probe_interval)) {
582 work = kmalloc(sizeof(*work), GFP_ATOMIC);
584 __neigh_set_probe_once(neigh);
586 write_unlock(&neigh->lock);
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 INIT_WORK(&work->work, rt6_probe_deferred);
593 work->target = rt->rt6i_gateway;
594 dev_hold(rt->dst.dev);
595 work->dev = rt->dst.dev;
596 schedule_work(&work->work);
600 rcu_read_unlock_bh();
603 static inline void rt6_probe(struct rt6_info *rt)
609 * Default Router Selection (RFC 2461 6.3.6)
611 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
613 struct net_device *dev = rt->dst.dev;
614 if (!oif || dev->ifindex == oif)
616 if ((dev->flags & IFF_LOOPBACK) &&
617 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
622 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
624 struct neighbour *neigh;
625 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
627 if (rt->rt6i_flags & RTF_NONEXTHOP ||
628 !(rt->rt6i_flags & RTF_GATEWAY))
629 return RT6_NUD_SUCCEED;
632 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
634 read_lock(&neigh->lock);
635 if (neigh->nud_state & NUD_VALID)
636 ret = RT6_NUD_SUCCEED;
637 #ifdef CONFIG_IPV6_ROUTER_PREF
638 else if (!(neigh->nud_state & NUD_FAILED))
639 ret = RT6_NUD_SUCCEED;
641 ret = RT6_NUD_FAIL_PROBE;
643 read_unlock(&neigh->lock);
645 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
646 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
648 rcu_read_unlock_bh();
653 static int rt6_score_route(struct rt6_info *rt, int oif,
658 m = rt6_check_dev(rt, oif);
659 if (!m && (strict & RT6_LOOKUP_F_IFACE))
660 return RT6_NUD_FAIL_HARD;
661 #ifdef CONFIG_IPV6_ROUTER_PREF
662 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
664 if (strict & RT6_LOOKUP_F_REACHABLE) {
665 int n = rt6_check_neigh(rt);
672 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
673 int *mpri, struct rt6_info *match,
677 bool match_do_rr = false;
678 struct inet6_dev *idev = rt->rt6i_idev;
679 struct net_device *dev = rt->dst.dev;
681 if (dev && !netif_carrier_ok(dev) &&
682 idev->cnf.ignore_routes_with_linkdown &&
683 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
686 if (rt6_check_expired(rt))
689 m = rt6_score_route(rt, oif, strict);
690 if (m == RT6_NUD_FAIL_DO_RR) {
692 m = 0; /* lowest valid score */
693 } else if (m == RT6_NUD_FAIL_HARD) {
697 if (strict & RT6_LOOKUP_F_REACHABLE)
700 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
702 *do_rr = match_do_rr;
710 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
711 struct rt6_info *rr_head,
712 u32 metric, int oif, int strict,
715 struct rt6_info *rt, *match, *cont;
720 for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
721 if (rt->rt6i_metric != metric) {
726 match = find_match(rt, oif, strict, &mpri, match, do_rr);
729 for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
730 if (rt->rt6i_metric != metric) {
735 match = find_match(rt, oif, strict, &mpri, match, do_rr);
741 for (rt = cont; rt; rt = rt->dst.rt6_next)
742 match = find_match(rt, oif, strict, &mpri, match, do_rr);
747 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
749 struct rt6_info *match, *rt0;
755 fn->rr_ptr = rt0 = fn->leaf;
757 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
761 struct rt6_info *next = rt0->dst.rt6_next;
763 /* no entries matched; do round-robin */
764 if (!next || next->rt6i_metric != rt0->rt6i_metric)
771 net = dev_net(rt0->dst.dev);
772 return match ? match : net->ipv6.ip6_null_entry;
775 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
777 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
780 #ifdef CONFIG_IPV6_ROUTE_INFO
781 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
782 const struct in6_addr *gwaddr)
784 struct net *net = dev_net(dev);
785 struct route_info *rinfo = (struct route_info *) opt;
786 struct in6_addr prefix_buf, *prefix;
788 unsigned long lifetime;
791 if (len < sizeof(struct route_info)) {
795 /* Sanity check for prefix_len and length */
796 if (rinfo->length > 3) {
798 } else if (rinfo->prefix_len > 128) {
800 } else if (rinfo->prefix_len > 64) {
801 if (rinfo->length < 2) {
804 } else if (rinfo->prefix_len > 0) {
805 if (rinfo->length < 1) {
810 pref = rinfo->route_pref;
811 if (pref == ICMPV6_ROUTER_PREF_INVALID)
814 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
816 if (rinfo->length == 3)
817 prefix = (struct in6_addr *)rinfo->prefix;
819 /* this function is safe */
820 ipv6_addr_prefix(&prefix_buf,
821 (struct in6_addr *)rinfo->prefix,
823 prefix = &prefix_buf;
826 if (rinfo->prefix_len == 0)
827 rt = rt6_get_dflt_router(gwaddr, dev);
829 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
832 if (rt && !lifetime) {
838 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
841 rt->rt6i_flags = RTF_ROUTEINFO |
842 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
845 if (!addrconf_finite_timeout(lifetime))
846 rt6_clean_expires(rt);
848 rt6_set_expires(rt, jiffies + HZ * lifetime);
856 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
857 struct in6_addr *saddr)
859 struct fib6_node *pn;
861 if (fn->fn_flags & RTN_TL_ROOT)
864 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
865 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
868 if (fn->fn_flags & RTN_RTINFO)
873 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
874 struct fib6_table *table,
875 struct flowi6 *fl6, int flags)
877 struct fib6_node *fn;
880 read_lock_bh(&table->tb6_lock);
881 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
884 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
885 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
886 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
887 if (rt == net->ipv6.ip6_null_entry) {
888 fn = fib6_backtrack(fn, &fl6->saddr);
892 dst_use(&rt->dst, jiffies);
893 read_unlock_bh(&table->tb6_lock);
895 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
901 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
904 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
906 EXPORT_SYMBOL_GPL(ip6_route_lookup);
908 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
909 const struct in6_addr *saddr, int oif, int strict)
911 struct flowi6 fl6 = {
915 struct dst_entry *dst;
916 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
919 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
920 flags |= RT6_LOOKUP_F_HAS_SADDR;
923 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
925 return (struct rt6_info *) dst;
931 EXPORT_SYMBOL(rt6_lookup);
933 /* ip6_ins_rt is called with FREE table->tb6_lock.
934 * It takes new route entry, the addition fails by any reason the
936 * Caller must hold dst before calling it.
939 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
940 struct mx6_config *mxc,
941 struct netlink_ext_ack *extack)
944 struct fib6_table *table;
946 table = rt->rt6i_table;
947 write_lock_bh(&table->tb6_lock);
948 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
949 write_unlock_bh(&table->tb6_lock);
954 int ip6_ins_rt(struct rt6_info *rt)
956 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
957 struct mx6_config mxc = { .mx = NULL, };
959 /* Hold dst to account for the reference from the fib6 tree */
961 return __ip6_ins_rt(rt, &info, &mxc, NULL);
964 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
965 const struct in6_addr *daddr,
966 const struct in6_addr *saddr)
974 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
975 ort = (struct rt6_info *)ort->dst.from;
977 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
982 ip6_rt_copy_init(rt, ort);
983 rt->rt6i_flags |= RTF_CACHE;
985 rt->dst.flags |= DST_HOST;
986 rt->rt6i_dst.addr = *daddr;
987 rt->rt6i_dst.plen = 128;
989 if (!rt6_is_gw_or_nonexthop(ort)) {
990 if (ort->rt6i_dst.plen != 128 &&
991 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
992 rt->rt6i_flags |= RTF_ANYCAST;
993 #ifdef CONFIG_IPV6_SUBTREES
994 if (rt->rt6i_src.plen && saddr) {
995 rt->rt6i_src.addr = *saddr;
996 rt->rt6i_src.plen = 128;
1004 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1006 struct rt6_info *pcpu_rt;
1008 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
1009 rt->dst.dev, rt->dst.flags);
1013 ip6_rt_copy_init(pcpu_rt, rt);
1014 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1015 pcpu_rt->rt6i_flags |= RTF_PCPU;
1019 /* It should be called with read_lock_bh(&tb6_lock) acquired */
1020 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1022 struct rt6_info *pcpu_rt, **p;
1024 p = this_cpu_ptr(rt->rt6i_pcpu);
1028 dst_hold(&pcpu_rt->dst);
1029 rt6_dst_from_metrics_check(pcpu_rt);
1034 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1036 struct fib6_table *table = rt->rt6i_table;
1037 struct rt6_info *pcpu_rt, *prev, **p;
1039 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1041 struct net *net = dev_net(rt->dst.dev);
1043 dst_hold(&net->ipv6.ip6_null_entry->dst);
1044 return net->ipv6.ip6_null_entry;
1047 read_lock_bh(&table->tb6_lock);
1048 if (rt->rt6i_pcpu) {
1049 p = this_cpu_ptr(rt->rt6i_pcpu);
1050 prev = cmpxchg(p, NULL, pcpu_rt);
1052 /* If someone did it before us, return prev instead */
1053 dst_release_immediate(&pcpu_rt->dst);
1057 /* rt has been removed from the fib6 tree
1058 * before we have a chance to acquire the read_lock.
1059 * In this case, don't brother to create a pcpu rt
1060 * since rt is going away anyway. The next
1061 * dst_check() will trigger a re-lookup.
1063 dst_release_immediate(&pcpu_rt->dst);
1066 dst_hold(&pcpu_rt->dst);
1067 rt6_dst_from_metrics_check(pcpu_rt);
1068 read_unlock_bh(&table->tb6_lock);
1072 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1073 int oif, struct flowi6 *fl6, int flags)
1075 struct fib6_node *fn, *saved_fn;
1076 struct rt6_info *rt;
1079 strict |= flags & RT6_LOOKUP_F_IFACE;
1080 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1081 if (net->ipv6.devconf_all->forwarding == 0)
1082 strict |= RT6_LOOKUP_F_REACHABLE;
1084 read_lock_bh(&table->tb6_lock);
1086 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1089 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1093 rt = rt6_select(fn, oif, strict);
1094 if (rt->rt6i_nsiblings)
1095 rt = rt6_multipath_select(rt, fl6, oif, strict);
1096 if (rt == net->ipv6.ip6_null_entry) {
1097 fn = fib6_backtrack(fn, &fl6->saddr);
1099 goto redo_rt6_select;
1100 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1101 /* also consider unreachable route */
1102 strict &= ~RT6_LOOKUP_F_REACHABLE;
1104 goto redo_rt6_select;
1109 if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1110 dst_use(&rt->dst, jiffies);
1111 read_unlock_bh(&table->tb6_lock);
1113 rt6_dst_from_metrics_check(rt);
1115 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1117 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1118 !(rt->rt6i_flags & RTF_GATEWAY))) {
1119 /* Create a RTF_CACHE clone which will not be
1120 * owned by the fib6 tree. It is for the special case where
1121 * the daddr in the skb during the neighbor look-up is different
1122 * from the fl6->daddr used to look-up route here.
1125 struct rt6_info *uncached_rt;
1127 dst_use(&rt->dst, jiffies);
1128 read_unlock_bh(&table->tb6_lock);
1130 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1131 dst_release(&rt->dst);
1134 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1135 * No need for another dst_hold()
1137 rt6_uncached_list_add(uncached_rt);
1139 uncached_rt = net->ipv6.ip6_null_entry;
1140 dst_hold(&uncached_rt->dst);
1143 trace_fib6_table_lookup(net, uncached_rt, table->tb6_id, fl6);
1147 /* Get a percpu copy */
1149 struct rt6_info *pcpu_rt;
1151 rt->dst.lastuse = jiffies;
1153 pcpu_rt = rt6_get_pcpu_route(rt);
1156 read_unlock_bh(&table->tb6_lock);
1158 /* We have to do the read_unlock first
1159 * because rt6_make_pcpu_route() may trigger
1160 * ip6_dst_gc() which will take the write_lock.
1163 read_unlock_bh(&table->tb6_lock);
1164 pcpu_rt = rt6_make_pcpu_route(rt);
1165 dst_release(&rt->dst);
1168 trace_fib6_table_lookup(net, pcpu_rt, table->tb6_id, fl6);
1173 EXPORT_SYMBOL_GPL(ip6_pol_route);
1175 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1176 struct flowi6 *fl6, int flags)
1178 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1181 struct dst_entry *ip6_route_input_lookup(struct net *net,
1182 struct net_device *dev,
1183 struct flowi6 *fl6, int flags)
1185 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1186 flags |= RT6_LOOKUP_F_IFACE;
1188 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1190 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1192 void ip6_route_input(struct sk_buff *skb)
1194 const struct ipv6hdr *iph = ipv6_hdr(skb);
1195 struct net *net = dev_net(skb->dev);
1196 int flags = RT6_LOOKUP_F_HAS_SADDR;
1197 struct ip_tunnel_info *tun_info;
1198 struct flowi6 fl6 = {
1199 .flowi6_iif = skb->dev->ifindex,
1200 .daddr = iph->daddr,
1201 .saddr = iph->saddr,
1202 .flowlabel = ip6_flowinfo(iph),
1203 .flowi6_mark = skb->mark,
1204 .flowi6_proto = iph->nexthdr,
1207 tun_info = skb_tunnel_info(skb);
1208 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1209 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1211 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1214 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1215 struct flowi6 *fl6, int flags)
1217 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1220 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1221 struct flowi6 *fl6, int flags)
1225 if (rt6_need_strict(&fl6->daddr)) {
1226 struct dst_entry *dst;
1228 dst = l3mdev_link_scope_lookup(net, fl6);
1233 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1235 any_src = ipv6_addr_any(&fl6->saddr);
1236 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1237 (fl6->flowi6_oif && any_src))
1238 flags |= RT6_LOOKUP_F_IFACE;
1241 flags |= RT6_LOOKUP_F_HAS_SADDR;
1243 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1245 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1247 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1249 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1251 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1252 struct net_device *loopback_dev = net->loopback_dev;
1253 struct dst_entry *new = NULL;
1255 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
1256 DST_OBSOLETE_NONE, 0);
1262 new->input = dst_discard;
1263 new->output = dst_discard_out;
1265 dst_copy_metrics(new, &ort->dst);
1267 rt->rt6i_idev = in6_dev_get(loopback_dev);
1268 rt->rt6i_gateway = ort->rt6i_gateway;
1269 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
1270 rt->rt6i_metric = 0;
1272 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1273 #ifdef CONFIG_IPV6_SUBTREES
1274 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1278 dst_release(dst_orig);
1279 return new ? new : ERR_PTR(-ENOMEM);
1283 * Destination cache support functions
1286 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1289 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1290 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1293 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1295 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1298 if (rt6_check_expired(rt))
1304 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1306 if (!__rt6_check_expired(rt) &&
1307 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1308 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1314 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1316 struct rt6_info *rt;
1318 rt = (struct rt6_info *) dst;
1320 /* All IPV6 dsts are created with ->obsolete set to the value
1321 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1322 * into this function always.
1325 rt6_dst_from_metrics_check(rt);
1327 if (rt->rt6i_flags & RTF_PCPU ||
1328 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->dst.from))
1329 return rt6_dst_from_check(rt, cookie);
1331 return rt6_check(rt, cookie);
1334 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1336 struct rt6_info *rt = (struct rt6_info *) dst;
1339 if (rt->rt6i_flags & RTF_CACHE) {
1340 if (rt6_check_expired(rt)) {
1352 static void ip6_link_failure(struct sk_buff *skb)
1354 struct rt6_info *rt;
1356 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1358 rt = (struct rt6_info *) skb_dst(skb);
1360 if (rt->rt6i_flags & RTF_CACHE) {
1361 if (dst_hold_safe(&rt->dst))
1363 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1364 rt->rt6i_node->fn_sernum = -1;
1369 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1371 struct net *net = dev_net(rt->dst.dev);
1373 rt->rt6i_flags |= RTF_MODIFIED;
1374 rt->rt6i_pmtu = mtu;
1375 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1378 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
1380 return !(rt->rt6i_flags & RTF_CACHE) &&
1381 (rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
1384 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1385 const struct ipv6hdr *iph, u32 mtu)
1387 const struct in6_addr *daddr, *saddr;
1388 struct rt6_info *rt6 = (struct rt6_info *)dst;
1390 if (rt6->rt6i_flags & RTF_LOCAL)
1393 if (dst_metric_locked(dst, RTAX_MTU))
1397 daddr = &iph->daddr;
1398 saddr = &iph->saddr;
1400 daddr = &sk->sk_v6_daddr;
1401 saddr = &inet6_sk(sk)->saddr;
1406 dst_confirm_neigh(dst, daddr);
1407 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1408 if (mtu >= dst_mtu(dst))
1411 if (!rt6_cache_allowed_for_pmtu(rt6)) {
1412 rt6_do_update_pmtu(rt6, mtu);
1414 struct rt6_info *nrt6;
1416 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1418 rt6_do_update_pmtu(nrt6, mtu);
1420 /* ip6_ins_rt(nrt6) will bump the
1421 * rt6->rt6i_node->fn_sernum
1422 * which will fail the next rt6_check() and
1423 * invalidate the sk->sk_dst_cache.
1426 /* Release the reference taken in
1427 * ip6_rt_cache_alloc()
1429 dst_release(&nrt6->dst);
1434 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1435 struct sk_buff *skb, u32 mtu)
1437 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1440 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1441 int oif, u32 mark, kuid_t uid)
1443 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1444 struct dst_entry *dst;
1447 memset(&fl6, 0, sizeof(fl6));
1448 fl6.flowi6_oif = oif;
1449 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1450 fl6.daddr = iph->daddr;
1451 fl6.saddr = iph->saddr;
1452 fl6.flowlabel = ip6_flowinfo(iph);
1453 fl6.flowi6_uid = uid;
1455 dst = ip6_route_output(net, NULL, &fl6);
1457 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1460 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1462 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1464 struct dst_entry *dst;
1466 ip6_update_pmtu(skb, sock_net(sk), mtu,
1467 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
1469 dst = __sk_dst_get(sk);
1470 if (!dst || !dst->obsolete ||
1471 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
1475 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
1476 ip6_datagram_dst_update(sk, false);
1479 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1481 /* Handle redirects */
1482 struct ip6rd_flowi {
1484 struct in6_addr gateway;
1487 static struct rt6_info *__ip6_route_redirect(struct net *net,
1488 struct fib6_table *table,
1492 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1493 struct rt6_info *rt;
1494 struct fib6_node *fn;
1496 /* Get the "current" route for this destination and
1497 * check if the redirect has come from appropriate router.
1499 * RFC 4861 specifies that redirects should only be
1500 * accepted if they come from the nexthop to the target.
1501 * Due to the way the routes are chosen, this notion
1502 * is a bit fuzzy and one might need to check all possible
1506 read_lock_bh(&table->tb6_lock);
1507 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1509 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1510 if (rt6_check_expired(rt))
1514 if (!(rt->rt6i_flags & RTF_GATEWAY))
1516 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1518 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1524 rt = net->ipv6.ip6_null_entry;
1525 else if (rt->dst.error) {
1526 rt = net->ipv6.ip6_null_entry;
1530 if (rt == net->ipv6.ip6_null_entry) {
1531 fn = fib6_backtrack(fn, &fl6->saddr);
1539 read_unlock_bh(&table->tb6_lock);
1541 trace_fib6_table_lookup(net, rt, table->tb6_id, fl6);
1545 static struct dst_entry *ip6_route_redirect(struct net *net,
1546 const struct flowi6 *fl6,
1547 const struct in6_addr *gateway)
1549 int flags = RT6_LOOKUP_F_HAS_SADDR;
1550 struct ip6rd_flowi rdfl;
1553 rdfl.gateway = *gateway;
1555 return fib6_rule_lookup(net, &rdfl.fl6,
1556 flags, __ip6_route_redirect);
1559 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
1562 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1563 struct dst_entry *dst;
1566 memset(&fl6, 0, sizeof(fl6));
1567 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1568 fl6.flowi6_oif = oif;
1569 fl6.flowi6_mark = mark;
1570 fl6.daddr = iph->daddr;
1571 fl6.saddr = iph->saddr;
1572 fl6.flowlabel = ip6_flowinfo(iph);
1573 fl6.flowi6_uid = uid;
1575 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1576 rt6_do_redirect(dst, NULL, skb);
1579 EXPORT_SYMBOL_GPL(ip6_redirect);
1581 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1584 const struct ipv6hdr *iph = ipv6_hdr(skb);
1585 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1586 struct dst_entry *dst;
1589 memset(&fl6, 0, sizeof(fl6));
1590 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1591 fl6.flowi6_oif = oif;
1592 fl6.flowi6_mark = mark;
1593 fl6.daddr = msg->dest;
1594 fl6.saddr = iph->daddr;
1595 fl6.flowi6_uid = sock_net_uid(net, NULL);
1597 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1598 rt6_do_redirect(dst, NULL, skb);
1602 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1604 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
1607 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1609 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1611 struct net_device *dev = dst->dev;
1612 unsigned int mtu = dst_mtu(dst);
1613 struct net *net = dev_net(dev);
1615 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1617 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1618 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1621 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1622 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1623 * IPV6_MAXPLEN is also valid and means: "any MSS,
1624 * rely only on pmtu discovery"
1626 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1631 static unsigned int ip6_mtu(const struct dst_entry *dst)
1633 const struct rt6_info *rt = (const struct rt6_info *)dst;
1634 unsigned int mtu = rt->rt6i_pmtu;
1635 struct inet6_dev *idev;
1640 mtu = dst_metric_raw(dst, RTAX_MTU);
1647 idev = __in6_dev_get(dst->dev);
1649 mtu = idev->cnf.mtu6;
1653 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
1655 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1658 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1661 struct dst_entry *dst;
1662 struct rt6_info *rt;
1663 struct inet6_dev *idev = in6_dev_get(dev);
1664 struct net *net = dev_net(dev);
1666 if (unlikely(!idev))
1667 return ERR_PTR(-ENODEV);
1669 rt = ip6_dst_alloc(net, dev, 0);
1670 if (unlikely(!rt)) {
1672 dst = ERR_PTR(-ENOMEM);
1676 rt->dst.flags |= DST_HOST;
1677 rt->dst.output = ip6_output;
1678 rt->rt6i_gateway = fl6->daddr;
1679 rt->rt6i_dst.addr = fl6->daddr;
1680 rt->rt6i_dst.plen = 128;
1681 rt->rt6i_idev = idev;
1682 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1684 /* Add this dst into uncached_list so that rt6_ifdown() can
1685 * do proper release of the net_device
1687 rt6_uncached_list_add(rt);
1689 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1695 static int ip6_dst_gc(struct dst_ops *ops)
1697 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1698 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1699 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1700 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1701 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1702 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1705 entries = dst_entries_get_fast(ops);
1706 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1707 entries <= rt_max_size)
1710 net->ipv6.ip6_rt_gc_expire++;
1711 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1712 entries = dst_entries_get_slow(ops);
1713 if (entries < ops->gc_thresh)
1714 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1716 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1717 return entries > rt_max_size;
1720 static int ip6_convert_metrics(struct mx6_config *mxc,
1721 const struct fib6_config *cfg)
1723 bool ecn_ca = false;
1731 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1735 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1736 int type = nla_type(nla);
1741 if (unlikely(type > RTAX_MAX))
1744 if (type == RTAX_CC_ALGO) {
1745 char tmp[TCP_CA_NAME_MAX];
1747 nla_strlcpy(tmp, nla, sizeof(tmp));
1748 val = tcp_ca_get_key_by_name(tmp, &ecn_ca);
1749 if (val == TCP_CA_UNSPEC)
1752 val = nla_get_u32(nla);
1754 if (type == RTAX_HOPLIMIT && val > 255)
1756 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
1760 __set_bit(type - 1, mxc->mx_valid);
1764 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
1765 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
1775 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
1776 struct fib6_config *cfg,
1777 const struct in6_addr *gw_addr)
1779 struct flowi6 fl6 = {
1780 .flowi6_oif = cfg->fc_ifindex,
1782 .saddr = cfg->fc_prefsrc,
1784 struct fib6_table *table;
1785 struct rt6_info *rt;
1786 int flags = RT6_LOOKUP_F_IFACE | RT6_LOOKUP_F_IGNORE_LINKSTATE;
1788 table = fib6_get_table(net, cfg->fc_table);
1792 if (!ipv6_addr_any(&cfg->fc_prefsrc))
1793 flags |= RT6_LOOKUP_F_HAS_SADDR;
1795 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, flags);
1797 /* if table lookup failed, fall back to full lookup */
1798 if (rt == net->ipv6.ip6_null_entry) {
1806 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
1807 struct netlink_ext_ack *extack)
1809 struct net *net = cfg->fc_nlinfo.nl_net;
1810 struct rt6_info *rt = NULL;
1811 struct net_device *dev = NULL;
1812 struct inet6_dev *idev = NULL;
1813 struct fib6_table *table;
1817 /* RTF_PCPU is an internal flag; can not be set by userspace */
1818 if (cfg->fc_flags & RTF_PCPU) {
1819 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
1823 if (cfg->fc_dst_len > 128) {
1824 NL_SET_ERR_MSG(extack, "Invalid prefix length");
1827 if (cfg->fc_src_len > 128) {
1828 NL_SET_ERR_MSG(extack, "Invalid source address length");
1831 #ifndef CONFIG_IPV6_SUBTREES
1832 if (cfg->fc_src_len) {
1833 NL_SET_ERR_MSG(extack,
1834 "Specifying source address requires IPV6_SUBTREES to be enabled");
1838 if (cfg->fc_ifindex) {
1840 dev = dev_get_by_index(net, cfg->fc_ifindex);
1843 idev = in6_dev_get(dev);
1848 if (cfg->fc_metric == 0)
1849 cfg->fc_metric = IP6_RT_PRIO_USER;
1852 if (cfg->fc_nlinfo.nlh &&
1853 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1854 table = fib6_get_table(net, cfg->fc_table);
1856 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1857 table = fib6_new_table(net, cfg->fc_table);
1860 table = fib6_new_table(net, cfg->fc_table);
1866 rt = ip6_dst_alloc(net, NULL,
1867 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1874 if (cfg->fc_flags & RTF_EXPIRES)
1875 rt6_set_expires(rt, jiffies +
1876 clock_t_to_jiffies(cfg->fc_expires));
1878 rt6_clean_expires(rt);
1880 if (cfg->fc_protocol == RTPROT_UNSPEC)
1881 cfg->fc_protocol = RTPROT_BOOT;
1882 rt->rt6i_protocol = cfg->fc_protocol;
1884 addr_type = ipv6_addr_type(&cfg->fc_dst);
1886 if (addr_type & IPV6_ADDR_MULTICAST)
1887 rt->dst.input = ip6_mc_input;
1888 else if (cfg->fc_flags & RTF_LOCAL)
1889 rt->dst.input = ip6_input;
1891 rt->dst.input = ip6_forward;
1893 rt->dst.output = ip6_output;
1895 if (cfg->fc_encap) {
1896 struct lwtunnel_state *lwtstate;
1898 err = lwtunnel_build_state(cfg->fc_encap_type,
1899 cfg->fc_encap, AF_INET6, cfg,
1903 rt->dst.lwtstate = lwtstate_get(lwtstate);
1904 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1905 rt->dst.lwtstate->orig_output = rt->dst.output;
1906 rt->dst.output = lwtunnel_output;
1908 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1909 rt->dst.lwtstate->orig_input = rt->dst.input;
1910 rt->dst.input = lwtunnel_input;
1914 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1915 rt->rt6i_dst.plen = cfg->fc_dst_len;
1916 if (rt->rt6i_dst.plen == 128)
1917 rt->dst.flags |= DST_HOST;
1919 #ifdef CONFIG_IPV6_SUBTREES
1920 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1921 rt->rt6i_src.plen = cfg->fc_src_len;
1924 rt->rt6i_metric = cfg->fc_metric;
1926 /* We cannot add true routes via loopback here,
1927 they would result in kernel looping; promote them to reject routes
1929 if ((cfg->fc_flags & RTF_REJECT) ||
1930 (dev && (dev->flags & IFF_LOOPBACK) &&
1931 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1932 !(cfg->fc_flags & RTF_LOCAL))) {
1933 /* hold loopback dev/idev if we haven't done so. */
1934 if (dev != net->loopback_dev) {
1939 dev = net->loopback_dev;
1941 idev = in6_dev_get(dev);
1947 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1948 switch (cfg->fc_type) {
1950 rt->dst.error = -EINVAL;
1951 rt->dst.output = dst_discard_out;
1952 rt->dst.input = dst_discard;
1955 rt->dst.error = -EACCES;
1956 rt->dst.output = ip6_pkt_prohibit_out;
1957 rt->dst.input = ip6_pkt_prohibit;
1960 case RTN_UNREACHABLE:
1962 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1963 : (cfg->fc_type == RTN_UNREACHABLE)
1964 ? -EHOSTUNREACH : -ENETUNREACH;
1965 rt->dst.output = ip6_pkt_discard_out;
1966 rt->dst.input = ip6_pkt_discard;
1972 if (cfg->fc_flags & RTF_GATEWAY) {
1973 const struct in6_addr *gw_addr;
1976 gw_addr = &cfg->fc_gateway;
1977 gwa_type = ipv6_addr_type(gw_addr);
1979 /* if gw_addr is local we will fail to detect this in case
1980 * address is still TENTATIVE (DAD in progress). rt6_lookup()
1981 * will return already-added prefix route via interface that
1982 * prefix route was assigned to, which might be non-loopback.
1985 if (ipv6_chk_addr_and_flags(net, gw_addr,
1986 gwa_type & IPV6_ADDR_LINKLOCAL ?
1987 dev : NULL, 0, 0)) {
1988 NL_SET_ERR_MSG(extack, "Invalid gateway address");
1991 rt->rt6i_gateway = *gw_addr;
1993 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1994 struct rt6_info *grt = NULL;
1996 /* IPv6 strictly inhibits using not link-local
1997 addresses as nexthop address.
1998 Otherwise, router will not able to send redirects.
1999 It is very good, but in some (rare!) circumstances
2000 (SIT, PtP, NBMA NOARP links) it is handy to allow
2001 some exceptions. --ANK
2002 We allow IPv4-mapped nexthops to support RFC4798-type
2005 if (!(gwa_type & (IPV6_ADDR_UNICAST |
2006 IPV6_ADDR_MAPPED))) {
2007 NL_SET_ERR_MSG(extack,
2008 "Invalid gateway address");
2012 if (cfg->fc_table) {
2013 grt = ip6_nh_lookup_table(net, cfg, gw_addr);
2016 if (grt->rt6i_flags & RTF_GATEWAY ||
2017 (dev && dev != grt->dst.dev)) {
2025 grt = rt6_lookup(net, gw_addr, NULL,
2026 cfg->fc_ifindex, 1);
2028 err = -EHOSTUNREACH;
2032 if (dev != grt->dst.dev) {
2038 idev = grt->rt6i_idev;
2040 in6_dev_hold(grt->rt6i_idev);
2042 if (!(grt->rt6i_flags & RTF_GATEWAY))
2051 NL_SET_ERR_MSG(extack, "Egress device not specified");
2053 } else if (dev->flags & IFF_LOOPBACK) {
2054 NL_SET_ERR_MSG(extack,
2055 "Egress device can not be loopback device for this route");
2064 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2065 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2066 NL_SET_ERR_MSG(extack, "Invalid source address");
2070 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2071 rt->rt6i_prefsrc.plen = 128;
2073 rt->rt6i_prefsrc.plen = 0;
2075 rt->rt6i_flags = cfg->fc_flags;
2079 rt->rt6i_idev = idev;
2080 rt->rt6i_table = table;
2082 cfg->fc_nlinfo.nl_net = dev_net(dev);
2091 dst_release_immediate(&rt->dst);
2093 return ERR_PTR(err);
2096 int ip6_route_add(struct fib6_config *cfg,
2097 struct netlink_ext_ack *extack)
2099 struct mx6_config mxc = { .mx = NULL, };
2100 struct rt6_info *rt;
2103 rt = ip6_route_info_create(cfg, extack);
2110 err = ip6_convert_metrics(&mxc, cfg);
2114 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
2121 dst_release_immediate(&rt->dst);
2126 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
2129 struct fib6_table *table;
2130 struct net *net = dev_net(rt->dst.dev);
2132 if (rt == net->ipv6.ip6_null_entry) {
2137 table = rt->rt6i_table;
2138 write_lock_bh(&table->tb6_lock);
2139 err = fib6_del(rt, info);
2140 write_unlock_bh(&table->tb6_lock);
2147 int ip6_del_rt(struct rt6_info *rt)
2149 struct nl_info info = {
2150 .nl_net = dev_net(rt->dst.dev),
2152 return __ip6_del_rt(rt, &info);
2155 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
2157 struct nl_info *info = &cfg->fc_nlinfo;
2158 struct net *net = info->nl_net;
2159 struct sk_buff *skb = NULL;
2160 struct fib6_table *table;
2163 if (rt == net->ipv6.ip6_null_entry)
2165 table = rt->rt6i_table;
2166 write_lock_bh(&table->tb6_lock);
2168 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
2169 struct rt6_info *sibling, *next_sibling;
2171 /* prefer to send a single notification with all hops */
2172 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
2174 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
2176 if (rt6_fill_node(net, skb, rt,
2177 NULL, NULL, 0, RTM_DELROUTE,
2178 info->portid, seq, 0) < 0) {
2182 info->skip_notify = 1;
2185 list_for_each_entry_safe(sibling, next_sibling,
2188 err = fib6_del(sibling, info);
2194 err = fib6_del(rt, info);
2196 write_unlock_bh(&table->tb6_lock);
2201 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
2202 info->nlh, gfp_any());
2207 static int ip6_route_del(struct fib6_config *cfg,
2208 struct netlink_ext_ack *extack)
2210 struct fib6_table *table;
2211 struct fib6_node *fn;
2212 struct rt6_info *rt;
2215 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2217 NL_SET_ERR_MSG(extack, "FIB table does not exist");
2221 read_lock_bh(&table->tb6_lock);
2223 fn = fib6_locate(&table->tb6_root,
2224 &cfg->fc_dst, cfg->fc_dst_len,
2225 &cfg->fc_src, cfg->fc_src_len);
2228 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2229 if ((rt->rt6i_flags & RTF_CACHE) &&
2230 !(cfg->fc_flags & RTF_CACHE))
2232 if (cfg->fc_ifindex &&
2234 rt->dst.dev->ifindex != cfg->fc_ifindex))
2236 if (cfg->fc_flags & RTF_GATEWAY &&
2237 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2239 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2241 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
2244 read_unlock_bh(&table->tb6_lock);
2246 /* if gateway was specified only delete the one hop */
2247 if (cfg->fc_flags & RTF_GATEWAY)
2248 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2250 return __ip6_del_rt_siblings(rt, cfg);
2253 read_unlock_bh(&table->tb6_lock);
2258 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2260 struct netevent_redirect netevent;
2261 struct rt6_info *rt, *nrt = NULL;
2262 struct ndisc_options ndopts;
2263 struct inet6_dev *in6_dev;
2264 struct neighbour *neigh;
2266 int optlen, on_link;
2269 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2270 optlen -= sizeof(*msg);
2273 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2277 msg = (struct rd_msg *)icmp6_hdr(skb);
2279 if (ipv6_addr_is_multicast(&msg->dest)) {
2280 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2285 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2287 } else if (ipv6_addr_type(&msg->target) !=
2288 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2289 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2293 in6_dev = __in6_dev_get(skb->dev);
2296 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2300 * The IP source address of the Redirect MUST be the same as the current
2301 * first-hop router for the specified ICMP Destination Address.
2304 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
2305 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2310 if (ndopts.nd_opts_tgt_lladdr) {
2311 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2314 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2319 rt = (struct rt6_info *) dst;
2320 if (rt->rt6i_flags & RTF_REJECT) {
2321 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2325 /* Redirect received -> path was valid.
2326 * Look, redirects are sent only in response to data packets,
2327 * so that this nexthop apparently is reachable. --ANK
2329 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
2331 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2336 * We have finally decided to accept it.
2339 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
2340 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2341 NEIGH_UPDATE_F_OVERRIDE|
2342 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2343 NEIGH_UPDATE_F_ISROUTER)),
2344 NDISC_REDIRECT, &ndopts);
2346 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2350 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2352 nrt->rt6i_flags &= ~RTF_GATEWAY;
2354 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2356 if (ip6_ins_rt(nrt))
2359 netevent.old = &rt->dst;
2360 netevent.new = &nrt->dst;
2361 netevent.daddr = &msg->dest;
2362 netevent.neigh = neigh;
2363 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2365 if (rt->rt6i_flags & RTF_CACHE) {
2366 rt = (struct rt6_info *) dst_clone(&rt->dst);
2371 /* Release the reference taken in
2372 * ip6_rt_cache_alloc()
2374 dst_release(&nrt->dst);
2377 neigh_release(neigh);
2381 * Misc support functions
2384 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2386 BUG_ON(from->dst.from);
2388 rt->rt6i_flags &= ~RTF_EXPIRES;
2389 dst_hold(&from->dst);
2390 rt->dst.from = &from->dst;
2391 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2394 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2396 rt->dst.input = ort->dst.input;
2397 rt->dst.output = ort->dst.output;
2398 rt->rt6i_dst = ort->rt6i_dst;
2399 rt->dst.error = ort->dst.error;
2400 rt->rt6i_idev = ort->rt6i_idev;
2402 in6_dev_hold(rt->rt6i_idev);
2403 rt->dst.lastuse = jiffies;
2404 rt->rt6i_gateway = ort->rt6i_gateway;
2405 rt->rt6i_flags = ort->rt6i_flags;
2406 rt6_set_from(rt, ort);
2407 rt->rt6i_metric = ort->rt6i_metric;
2408 #ifdef CONFIG_IPV6_SUBTREES
2409 rt->rt6i_src = ort->rt6i_src;
2411 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2412 rt->rt6i_table = ort->rt6i_table;
2413 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2416 #ifdef CONFIG_IPV6_ROUTE_INFO
2417 static struct rt6_info *rt6_get_route_info(struct net *net,
2418 const struct in6_addr *prefix, int prefixlen,
2419 const struct in6_addr *gwaddr,
2420 struct net_device *dev)
2422 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
2423 int ifindex = dev->ifindex;
2424 struct fib6_node *fn;
2425 struct rt6_info *rt = NULL;
2426 struct fib6_table *table;
2428 table = fib6_get_table(net, tb_id);
2432 read_lock_bh(&table->tb6_lock);
2433 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2437 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2438 if (rt->dst.dev->ifindex != ifindex)
2440 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2442 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2448 read_unlock_bh(&table->tb6_lock);
2452 static struct rt6_info *rt6_add_route_info(struct net *net,
2453 const struct in6_addr *prefix, int prefixlen,
2454 const struct in6_addr *gwaddr,
2455 struct net_device *dev,
2458 struct fib6_config cfg = {
2459 .fc_metric = IP6_RT_PRIO_USER,
2460 .fc_ifindex = dev->ifindex,
2461 .fc_dst_len = prefixlen,
2462 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2463 RTF_UP | RTF_PREF(pref),
2464 .fc_nlinfo.portid = 0,
2465 .fc_nlinfo.nlh = NULL,
2466 .fc_nlinfo.nl_net = net,
2469 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
2470 cfg.fc_dst = *prefix;
2471 cfg.fc_gateway = *gwaddr;
2473 /* We should treat it as a default route if prefix length is 0. */
2475 cfg.fc_flags |= RTF_DEFAULT;
2477 ip6_route_add(&cfg, NULL);
2479 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
2483 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2485 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
2486 struct rt6_info *rt;
2487 struct fib6_table *table;
2489 table = fib6_get_table(dev_net(dev), tb_id);
2493 read_lock_bh(&table->tb6_lock);
2494 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2495 if (dev == rt->dst.dev &&
2496 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2497 ipv6_addr_equal(&rt->rt6i_gateway, addr))
2502 read_unlock_bh(&table->tb6_lock);
2506 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2507 struct net_device *dev,
2510 struct fib6_config cfg = {
2511 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
2512 .fc_metric = IP6_RT_PRIO_USER,
2513 .fc_ifindex = dev->ifindex,
2514 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2515 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2516 .fc_nlinfo.portid = 0,
2517 .fc_nlinfo.nlh = NULL,
2518 .fc_nlinfo.nl_net = dev_net(dev),
2521 cfg.fc_gateway = *gwaddr;
2523 if (!ip6_route_add(&cfg, NULL)) {
2524 struct fib6_table *table;
2526 table = fib6_get_table(dev_net(dev), cfg.fc_table);
2528 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
2531 return rt6_get_dflt_router(gwaddr, dev);
2534 static void __rt6_purge_dflt_routers(struct fib6_table *table)
2536 struct rt6_info *rt;
2539 read_lock_bh(&table->tb6_lock);
2540 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2541 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2542 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2544 read_unlock_bh(&table->tb6_lock);
2549 read_unlock_bh(&table->tb6_lock);
2551 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
2554 void rt6_purge_dflt_routers(struct net *net)
2556 struct fib6_table *table;
2557 struct hlist_head *head;
2562 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
2563 head = &net->ipv6.fib_table_hash[h];
2564 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
2565 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
2566 __rt6_purge_dflt_routers(table);
2573 static void rtmsg_to_fib6_config(struct net *net,
2574 struct in6_rtmsg *rtmsg,
2575 struct fib6_config *cfg)
2577 memset(cfg, 0, sizeof(*cfg));
2579 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
2581 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2582 cfg->fc_metric = rtmsg->rtmsg_metric;
2583 cfg->fc_expires = rtmsg->rtmsg_info;
2584 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2585 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2586 cfg->fc_flags = rtmsg->rtmsg_flags;
2588 cfg->fc_nlinfo.nl_net = net;
2590 cfg->fc_dst = rtmsg->rtmsg_dst;
2591 cfg->fc_src = rtmsg->rtmsg_src;
2592 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2595 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2597 struct fib6_config cfg;
2598 struct in6_rtmsg rtmsg;
2602 case SIOCADDRT: /* Add a route */
2603 case SIOCDELRT: /* Delete a route */
2604 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2606 err = copy_from_user(&rtmsg, arg,
2607 sizeof(struct in6_rtmsg));
2611 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2616 err = ip6_route_add(&cfg, NULL);
2619 err = ip6_route_del(&cfg, NULL);
2633 * Drop the packet on the floor
2636 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2639 struct dst_entry *dst = skb_dst(skb);
2640 switch (ipstats_mib_noroutes) {
2641 case IPSTATS_MIB_INNOROUTES:
2642 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2643 if (type == IPV6_ADDR_ANY) {
2644 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2645 IPSTATS_MIB_INADDRERRORS);
2649 case IPSTATS_MIB_OUTNOROUTES:
2650 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2651 ipstats_mib_noroutes);
2654 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2659 static int ip6_pkt_discard(struct sk_buff *skb)
2661 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2664 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2666 skb->dev = skb_dst(skb)->dev;
2667 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2670 static int ip6_pkt_prohibit(struct sk_buff *skb)
2672 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2675 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
2677 skb->dev = skb_dst(skb)->dev;
2678 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2682 * Allocate a dst for local (unicast / anycast) address.
2685 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2686 const struct in6_addr *addr,
2690 struct net *net = dev_net(idev->dev);
2691 struct net_device *dev = net->loopback_dev;
2692 struct rt6_info *rt;
2694 /* use L3 Master device as loopback for host routes if device
2695 * is enslaved and address is not link local or multicast
2697 if (!rt6_need_strict(addr))
2698 dev = l3mdev_master_dev_rcu(idev->dev) ? : dev;
2700 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
2702 return ERR_PTR(-ENOMEM);
2706 rt->dst.flags |= DST_HOST;
2707 rt->dst.input = ip6_input;
2708 rt->dst.output = ip6_output;
2709 rt->rt6i_idev = idev;
2711 rt->rt6i_protocol = RTPROT_KERNEL;
2712 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2714 rt->rt6i_flags |= RTF_ANYCAST;
2716 rt->rt6i_flags |= RTF_LOCAL;
2718 rt->rt6i_gateway = *addr;
2719 rt->rt6i_dst.addr = *addr;
2720 rt->rt6i_dst.plen = 128;
2721 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
2722 rt->rt6i_table = fib6_get_table(net, tb_id);
2727 /* remove deleted ip from prefsrc entries */
2728 struct arg_dev_net_ip {
2729 struct net_device *dev;
2731 struct in6_addr *addr;
2734 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2736 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2737 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2738 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2740 if (((void *)rt->dst.dev == dev || !dev) &&
2741 rt != net->ipv6.ip6_null_entry &&
2742 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2743 /* remove prefsrc entry */
2744 rt->rt6i_prefsrc.plen = 0;
2749 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2751 struct net *net = dev_net(ifp->idev->dev);
2752 struct arg_dev_net_ip adni = {
2753 .dev = ifp->idev->dev,
2757 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2760 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2761 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2763 /* Remove routers and update dst entries when gateway turn into host. */
2764 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2766 struct in6_addr *gateway = (struct in6_addr *)arg;
2768 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2769 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2770 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2776 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2778 fib6_clean_all(net, fib6_clean_tohost, gateway);
2781 struct arg_dev_net {
2782 struct net_device *dev;
2786 /* called with write lock held for table with rt */
2787 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2789 const struct arg_dev_net *adn = arg;
2790 const struct net_device *dev = adn->dev;
2792 if ((rt->dst.dev == dev || !dev) &&
2793 rt != adn->net->ipv6.ip6_null_entry &&
2794 (rt->rt6i_nsiblings == 0 ||
2795 (dev && netdev_unregistering(dev)) ||
2796 !rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
2802 void rt6_ifdown(struct net *net, struct net_device *dev)
2804 struct arg_dev_net adn = {
2809 fib6_clean_all(net, fib6_ifdown, &adn);
2811 rt6_uncached_list_flush_dev(net, dev);
2814 struct rt6_mtu_change_arg {
2815 struct net_device *dev;
2819 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2821 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2822 struct inet6_dev *idev;
2824 /* In IPv6 pmtu discovery is not optional,
2825 so that RTAX_MTU lock cannot disable it.
2826 We still use this lock to block changes
2827 caused by addrconf/ndisc.
2830 idev = __in6_dev_get(arg->dev);
2834 /* For administrative MTU increase, there is no way to discover
2835 IPv6 PMTU increase, so PMTU increase should be updated here.
2836 Since RFC 1981 doesn't include administrative MTU increase
2837 update PMTU increase is a MUST. (i.e. jumbo frame)
2840 If new MTU is less than route PMTU, this new MTU will be the
2841 lowest MTU in the path, update the route PMTU to reflect PMTU
2842 decreases; if new MTU is greater than route PMTU, and the
2843 old MTU is the lowest MTU in the path, update the route PMTU
2844 to reflect the increase. In this case if the other nodes' MTU
2845 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2848 if (rt->dst.dev == arg->dev &&
2849 dst_metric_raw(&rt->dst, RTAX_MTU) &&
2850 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2851 if (rt->rt6i_flags & RTF_CACHE) {
2852 /* For RTF_CACHE with rt6i_pmtu == 0
2853 * (i.e. a redirected route),
2854 * the metrics of its rt->dst.from has already
2857 if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2858 rt->rt6i_pmtu = arg->mtu;
2859 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2860 (dst_mtu(&rt->dst) < arg->mtu &&
2861 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2862 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2868 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2870 struct rt6_mtu_change_arg arg = {
2875 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2878 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2879 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2880 [RTA_OIF] = { .type = NLA_U32 },
2881 [RTA_IIF] = { .type = NLA_U32 },
2882 [RTA_PRIORITY] = { .type = NLA_U32 },
2883 [RTA_METRICS] = { .type = NLA_NESTED },
2884 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2885 [RTA_PREF] = { .type = NLA_U8 },
2886 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2887 [RTA_ENCAP] = { .type = NLA_NESTED },
2888 [RTA_EXPIRES] = { .type = NLA_U32 },
2889 [RTA_UID] = { .type = NLA_U32 },
2890 [RTA_MARK] = { .type = NLA_U32 },
2893 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2894 struct fib6_config *cfg,
2895 struct netlink_ext_ack *extack)
2898 struct nlattr *tb[RTA_MAX+1];
2902 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
2908 rtm = nlmsg_data(nlh);
2909 memset(cfg, 0, sizeof(*cfg));
2911 cfg->fc_table = rtm->rtm_table;
2912 cfg->fc_dst_len = rtm->rtm_dst_len;
2913 cfg->fc_src_len = rtm->rtm_src_len;
2914 cfg->fc_flags = RTF_UP;
2915 cfg->fc_protocol = rtm->rtm_protocol;
2916 cfg->fc_type = rtm->rtm_type;
2918 if (rtm->rtm_type == RTN_UNREACHABLE ||
2919 rtm->rtm_type == RTN_BLACKHOLE ||
2920 rtm->rtm_type == RTN_PROHIBIT ||
2921 rtm->rtm_type == RTN_THROW)
2922 cfg->fc_flags |= RTF_REJECT;
2924 if (rtm->rtm_type == RTN_LOCAL)
2925 cfg->fc_flags |= RTF_LOCAL;
2927 if (rtm->rtm_flags & RTM_F_CLONED)
2928 cfg->fc_flags |= RTF_CACHE;
2930 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2931 cfg->fc_nlinfo.nlh = nlh;
2932 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2934 if (tb[RTA_GATEWAY]) {
2935 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2936 cfg->fc_flags |= RTF_GATEWAY;
2940 int plen = (rtm->rtm_dst_len + 7) >> 3;
2942 if (nla_len(tb[RTA_DST]) < plen)
2945 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2949 int plen = (rtm->rtm_src_len + 7) >> 3;
2951 if (nla_len(tb[RTA_SRC]) < plen)
2954 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2957 if (tb[RTA_PREFSRC])
2958 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2961 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2963 if (tb[RTA_PRIORITY])
2964 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2966 if (tb[RTA_METRICS]) {
2967 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2968 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2972 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2974 if (tb[RTA_MULTIPATH]) {
2975 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2976 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2978 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
2979 cfg->fc_mp_len, extack);
2985 pref = nla_get_u8(tb[RTA_PREF]);
2986 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2987 pref != ICMPV6_ROUTER_PREF_HIGH)
2988 pref = ICMPV6_ROUTER_PREF_MEDIUM;
2989 cfg->fc_flags |= RTF_PREF(pref);
2993 cfg->fc_encap = tb[RTA_ENCAP];
2995 if (tb[RTA_ENCAP_TYPE]) {
2996 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2998 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
3003 if (tb[RTA_EXPIRES]) {
3004 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
3006 if (addrconf_finite_timeout(timeout)) {
3007 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
3008 cfg->fc_flags |= RTF_EXPIRES;
3018 struct rt6_info *rt6_info;
3019 struct fib6_config r_cfg;
3020 struct mx6_config mxc;
3021 struct list_head next;
3024 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
3028 list_for_each_entry(nh, rt6_nh_list, next) {
3029 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
3030 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
3031 nh->r_cfg.fc_ifindex);
3035 static int ip6_route_info_append(struct list_head *rt6_nh_list,
3036 struct rt6_info *rt, struct fib6_config *r_cfg)
3041 list_for_each_entry(nh, rt6_nh_list, next) {
3042 /* check if rt6_info already exists */
3043 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
3047 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
3051 err = ip6_convert_metrics(&nh->mxc, r_cfg);
3056 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
3057 list_add_tail(&nh->next, rt6_nh_list);
3062 static void ip6_route_mpath_notify(struct rt6_info *rt,
3063 struct rt6_info *rt_last,
3064 struct nl_info *info,
3067 /* if this is an APPEND route, then rt points to the first route
3068 * inserted and rt_last points to last route inserted. Userspace
3069 * wants a consistent dump of the route which starts at the first
3070 * nexthop. Since sibling routes are always added at the end of
3071 * the list, find the first sibling of the last route appended
3073 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
3074 rt = list_first_entry(&rt_last->rt6i_siblings,
3080 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
3083 static int ip6_route_multipath_add(struct fib6_config *cfg,
3084 struct netlink_ext_ack *extack)
3086 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
3087 struct nl_info *info = &cfg->fc_nlinfo;
3088 struct fib6_config r_cfg;
3089 struct rtnexthop *rtnh;
3090 struct rt6_info *rt;
3091 struct rt6_nh *err_nh;
3092 struct rt6_nh *nh, *nh_safe;
3098 int replace = (cfg->fc_nlinfo.nlh &&
3099 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
3100 LIST_HEAD(rt6_nh_list);
3102 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
3103 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
3104 nlflags |= NLM_F_APPEND;
3106 remaining = cfg->fc_mp_len;
3107 rtnh = (struct rtnexthop *)cfg->fc_mp;
3109 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
3110 * rt6_info structs per nexthop
3112 while (rtnh_ok(rtnh, remaining)) {
3113 memcpy(&r_cfg, cfg, sizeof(*cfg));
3114 if (rtnh->rtnh_ifindex)
3115 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3117 attrlen = rtnh_attrlen(rtnh);
3119 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3121 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3123 r_cfg.fc_gateway = nla_get_in6_addr(nla);
3124 r_cfg.fc_flags |= RTF_GATEWAY;
3126 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
3127 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
3129 r_cfg.fc_encap_type = nla_get_u16(nla);
3132 rt = ip6_route_info_create(&r_cfg, extack);
3139 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
3141 dst_release_immediate(&rt->dst);
3145 rtnh = rtnh_next(rtnh, &remaining);
3148 /* for add and replace send one notification with all nexthops.
3149 * Skip the notification in fib6_add_rt2node and send one with
3150 * the full route when done
3152 info->skip_notify = 1;
3155 list_for_each_entry(nh, &rt6_nh_list, next) {
3156 rt_last = nh->rt6_info;
3157 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
3158 /* save reference to first route for notification */
3159 if (!rt_notif && !err)
3160 rt_notif = nh->rt6_info;
3162 /* nh->rt6_info is used or freed at this point, reset to NULL*/
3163 nh->rt6_info = NULL;
3166 ip6_print_replace_route_err(&rt6_nh_list);
3171 /* Because each route is added like a single route we remove
3172 * these flags after the first nexthop: if there is a collision,
3173 * we have already failed to add the first nexthop:
3174 * fib6_add_rt2node() has rejected it; when replacing, old
3175 * nexthops have been replaced by first new, the rest should
3178 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
3183 /* success ... tell user about new route */
3184 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3188 /* send notification for routes that were added so that
3189 * the delete notifications sent by ip6_route_del are
3193 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
3195 /* Delete routes that were already added */
3196 list_for_each_entry(nh, &rt6_nh_list, next) {
3199 ip6_route_del(&nh->r_cfg, extack);
3203 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
3205 dst_release_immediate(&nh->rt6_info->dst);
3207 list_del(&nh->next);
3214 static int ip6_route_multipath_del(struct fib6_config *cfg,
3215 struct netlink_ext_ack *extack)
3217 struct fib6_config r_cfg;
3218 struct rtnexthop *rtnh;
3221 int err = 1, last_err = 0;
3223 remaining = cfg->fc_mp_len;
3224 rtnh = (struct rtnexthop *)cfg->fc_mp;
3226 /* Parse a Multipath Entry */
3227 while (rtnh_ok(rtnh, remaining)) {
3228 memcpy(&r_cfg, cfg, sizeof(*cfg));
3229 if (rtnh->rtnh_ifindex)
3230 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
3232 attrlen = rtnh_attrlen(rtnh);
3234 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
3236 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
3238 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
3239 r_cfg.fc_flags |= RTF_GATEWAY;
3242 err = ip6_route_del(&r_cfg, extack);
3246 rtnh = rtnh_next(rtnh, &remaining);
3252 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3253 struct netlink_ext_ack *extack)
3255 struct fib6_config cfg;
3258 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3263 return ip6_route_multipath_del(&cfg, extack);
3265 cfg.fc_delete_all_nh = 1;
3266 return ip6_route_del(&cfg, extack);
3270 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
3271 struct netlink_ext_ack *extack)
3273 struct fib6_config cfg;
3276 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
3281 return ip6_route_multipath_add(&cfg, extack);
3283 return ip6_route_add(&cfg, extack);
3286 static size_t rt6_nlmsg_size(struct rt6_info *rt)
3288 int nexthop_len = 0;
3290 if (rt->rt6i_nsiblings) {
3291 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
3292 + NLA_ALIGN(sizeof(struct rtnexthop))
3293 + nla_total_size(16) /* RTA_GATEWAY */
3294 + lwtunnel_get_encap_size(rt->dst.lwtstate);
3296 nexthop_len *= rt->rt6i_nsiblings;
3299 return NLMSG_ALIGN(sizeof(struct rtmsg))
3300 + nla_total_size(16) /* RTA_SRC */
3301 + nla_total_size(16) /* RTA_DST */
3302 + nla_total_size(16) /* RTA_GATEWAY */
3303 + nla_total_size(16) /* RTA_PREFSRC */
3304 + nla_total_size(4) /* RTA_TABLE */
3305 + nla_total_size(4) /* RTA_IIF */
3306 + nla_total_size(4) /* RTA_OIF */
3307 + nla_total_size(4) /* RTA_PRIORITY */
3308 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
3309 + nla_total_size(sizeof(struct rta_cacheinfo))
3310 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
3311 + nla_total_size(1) /* RTA_PREF */
3312 + lwtunnel_get_encap_size(rt->dst.lwtstate)
3316 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
3317 unsigned int *flags, bool skip_oif)
3319 if (!netif_running(rt->dst.dev) || !netif_carrier_ok(rt->dst.dev)) {
3320 *flags |= RTNH_F_LINKDOWN;
3321 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
3322 *flags |= RTNH_F_DEAD;
3325 if (rt->rt6i_flags & RTF_GATEWAY) {
3326 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3327 goto nla_put_failure;
3330 /* not needed for multipath encoding b/c it has a rtnexthop struct */
3331 if (!skip_oif && rt->dst.dev &&
3332 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3333 goto nla_put_failure;
3335 if (rt->dst.lwtstate &&
3336 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
3337 goto nla_put_failure;
3345 /* add multipath next hop */
3346 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
3348 struct rtnexthop *rtnh;
3349 unsigned int flags = 0;
3351 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
3353 goto nla_put_failure;
3355 rtnh->rtnh_hops = 0;
3356 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
3358 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
3359 goto nla_put_failure;
3361 rtnh->rtnh_flags = flags;
3363 /* length of rtnetlink header + attributes */
3364 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
3372 static int rt6_fill_node(struct net *net,
3373 struct sk_buff *skb, struct rt6_info *rt,
3374 struct in6_addr *dst, struct in6_addr *src,
3375 int iif, int type, u32 portid, u32 seq,
3378 u32 metrics[RTAX_MAX];
3380 struct nlmsghdr *nlh;
3384 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
3388 rtm = nlmsg_data(nlh);
3389 rtm->rtm_family = AF_INET6;
3390 rtm->rtm_dst_len = rt->rt6i_dst.plen;
3391 rtm->rtm_src_len = rt->rt6i_src.plen;
3394 table = rt->rt6i_table->tb6_id;
3396 table = RT6_TABLE_UNSPEC;
3397 rtm->rtm_table = table;
3398 if (nla_put_u32(skb, RTA_TABLE, table))
3399 goto nla_put_failure;
3400 if (rt->rt6i_flags & RTF_REJECT) {
3401 switch (rt->dst.error) {
3403 rtm->rtm_type = RTN_BLACKHOLE;
3406 rtm->rtm_type = RTN_PROHIBIT;
3409 rtm->rtm_type = RTN_THROW;
3412 rtm->rtm_type = RTN_UNREACHABLE;
3416 else if (rt->rt6i_flags & RTF_LOCAL)
3417 rtm->rtm_type = RTN_LOCAL;
3418 else if (rt->rt6i_flags & RTF_ANYCAST)
3419 rtm->rtm_type = RTN_ANYCAST;
3420 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
3421 rtm->rtm_type = RTN_LOCAL;
3423 rtm->rtm_type = RTN_UNICAST;
3425 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
3426 rtm->rtm_protocol = rt->rt6i_protocol;
3427 if (rt->rt6i_flags & RTF_DYNAMIC)
3428 rtm->rtm_protocol = RTPROT_REDIRECT;
3429 else if (rt->rt6i_flags & RTF_ADDRCONF) {
3430 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
3431 rtm->rtm_protocol = RTPROT_RA;
3433 rtm->rtm_protocol = RTPROT_KERNEL;
3436 if (rt->rt6i_flags & RTF_CACHE)
3437 rtm->rtm_flags |= RTM_F_CLONED;
3440 if (nla_put_in6_addr(skb, RTA_DST, dst))
3441 goto nla_put_failure;
3442 rtm->rtm_dst_len = 128;
3443 } else if (rtm->rtm_dst_len)
3444 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
3445 goto nla_put_failure;
3446 #ifdef CONFIG_IPV6_SUBTREES
3448 if (nla_put_in6_addr(skb, RTA_SRC, src))
3449 goto nla_put_failure;
3450 rtm->rtm_src_len = 128;
3451 } else if (rtm->rtm_src_len &&
3452 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
3453 goto nla_put_failure;
3456 #ifdef CONFIG_IPV6_MROUTE
3457 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
3458 int err = ip6mr_get_route(net, skb, rtm, portid);
3463 goto nla_put_failure;
3466 if (nla_put_u32(skb, RTA_IIF, iif))
3467 goto nla_put_failure;
3469 struct in6_addr saddr_buf;
3470 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
3471 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3472 goto nla_put_failure;
3475 if (rt->rt6i_prefsrc.plen) {
3476 struct in6_addr saddr_buf;
3477 saddr_buf = rt->rt6i_prefsrc.addr;
3478 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3479 goto nla_put_failure;
3482 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3484 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3485 if (rtnetlink_put_metrics(skb, metrics) < 0)
3486 goto nla_put_failure;
3488 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3489 goto nla_put_failure;
3491 /* For multipath routes, walk the siblings list and add
3492 * each as a nexthop within RTA_MULTIPATH.
3494 if (rt->rt6i_nsiblings) {
3495 struct rt6_info *sibling, *next_sibling;
3498 mp = nla_nest_start(skb, RTA_MULTIPATH);
3500 goto nla_put_failure;
3502 if (rt6_add_nexthop(skb, rt) < 0)
3503 goto nla_put_failure;
3505 list_for_each_entry_safe(sibling, next_sibling,
3506 &rt->rt6i_siblings, rt6i_siblings) {
3507 if (rt6_add_nexthop(skb, sibling) < 0)
3508 goto nla_put_failure;
3511 nla_nest_end(skb, mp);
3513 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
3514 goto nla_put_failure;
3517 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3519 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3520 goto nla_put_failure;
3522 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3523 goto nla_put_failure;
3526 nlmsg_end(skb, nlh);
3530 nlmsg_cancel(skb, nlh);
3534 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3536 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3537 struct net *net = arg->net;
3539 if (rt == net->ipv6.ip6_null_entry)
3542 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3543 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3545 /* user wants prefix routes only */
3546 if (rtm->rtm_flags & RTM_F_PREFIX &&
3547 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
3548 /* success since this is not a prefix route */
3553 return rt6_fill_node(net,
3554 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3555 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3559 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3560 struct netlink_ext_ack *extack)
3562 struct net *net = sock_net(in_skb->sk);
3563 struct nlattr *tb[RTA_MAX+1];
3564 int err, iif = 0, oif = 0;
3565 struct dst_entry *dst;
3566 struct rt6_info *rt;
3567 struct sk_buff *skb;
3572 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
3578 memset(&fl6, 0, sizeof(fl6));
3579 rtm = nlmsg_data(nlh);
3580 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
3581 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
3584 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3587 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3591 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3594 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3598 iif = nla_get_u32(tb[RTA_IIF]);
3601 oif = nla_get_u32(tb[RTA_OIF]);
3604 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3607 fl6.flowi6_uid = make_kuid(current_user_ns(),
3608 nla_get_u32(tb[RTA_UID]));
3610 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
3613 struct net_device *dev;
3616 dev = __dev_get_by_index(net, iif);
3622 fl6.flowi6_iif = iif;
3624 if (!ipv6_addr_any(&fl6.saddr))
3625 flags |= RT6_LOOKUP_F_HAS_SADDR;
3628 dst = ip6_route_input_lookup(net, dev, &fl6, flags);
3630 fl6.flowi6_oif = oif;
3633 dst = ip6_route_output(net, NULL, &fl6);
3637 dst = ip6_route_lookup(net, &fl6, 0);
3639 rt = container_of(dst, struct rt6_info, dst);
3640 if (rt->dst.error) {
3641 err = rt->dst.error;
3646 if (rt == net->ipv6.ip6_null_entry) {
3647 err = rt->dst.error;
3652 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3659 skb_dst_set(skb, &rt->dst);
3661 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
3662 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3665 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3666 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3673 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3678 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
3679 unsigned int nlm_flags)
3681 struct sk_buff *skb;
3682 struct net *net = info->nl_net;
3687 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3689 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3693 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3694 event, info->portid, seq, nlm_flags);
3696 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3697 WARN_ON(err == -EMSGSIZE);
3701 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3702 info->nlh, gfp_any());
3706 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3709 static int ip6_route_dev_notify(struct notifier_block *this,
3710 unsigned long event, void *ptr)
3712 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3713 struct net *net = dev_net(dev);
3715 if (!(dev->flags & IFF_LOOPBACK))
3718 if (event == NETDEV_REGISTER) {
3719 net->ipv6.ip6_null_entry->dst.dev = dev;
3720 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3721 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3722 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3723 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3724 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3725 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3727 } else if (event == NETDEV_UNREGISTER &&
3728 dev->reg_state != NETREG_UNREGISTERED) {
3729 /* NETDEV_UNREGISTER could be fired for multiple times by
3730 * netdev_wait_allrefs(). Make sure we only call this once.
3732 in6_dev_put(net->ipv6.ip6_null_entry->rt6i_idev);
3733 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3734 in6_dev_put(net->ipv6.ip6_prohibit_entry->rt6i_idev);
3735 in6_dev_put(net->ipv6.ip6_blk_hole_entry->rt6i_idev);
3746 #ifdef CONFIG_PROC_FS
3748 static const struct file_operations ipv6_route_proc_fops = {
3749 .owner = THIS_MODULE,
3750 .open = ipv6_route_open,
3752 .llseek = seq_lseek,
3753 .release = seq_release_net,
3756 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3758 struct net *net = (struct net *)seq->private;
3759 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3760 net->ipv6.rt6_stats->fib_nodes,
3761 net->ipv6.rt6_stats->fib_route_nodes,
3762 net->ipv6.rt6_stats->fib_rt_alloc,
3763 net->ipv6.rt6_stats->fib_rt_entries,
3764 net->ipv6.rt6_stats->fib_rt_cache,
3765 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3766 net->ipv6.rt6_stats->fib_discarded_routes);
3771 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3773 return single_open_net(inode, file, rt6_stats_seq_show);
3776 static const struct file_operations rt6_stats_seq_fops = {
3777 .owner = THIS_MODULE,
3778 .open = rt6_stats_seq_open,
3780 .llseek = seq_lseek,
3781 .release = single_release_net,
3783 #endif /* CONFIG_PROC_FS */
3785 #ifdef CONFIG_SYSCTL
3788 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3789 void __user *buffer, size_t *lenp, loff_t *ppos)
3796 net = (struct net *)ctl->extra1;
3797 delay = net->ipv6.sysctl.flush_delay;
3798 proc_dointvec(ctl, write, buffer, lenp, ppos);
3799 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3803 struct ctl_table ipv6_route_table_template[] = {
3805 .procname = "flush",
3806 .data = &init_net.ipv6.sysctl.flush_delay,
3807 .maxlen = sizeof(int),
3809 .proc_handler = ipv6_sysctl_rtcache_flush
3812 .procname = "gc_thresh",
3813 .data = &ip6_dst_ops_template.gc_thresh,
3814 .maxlen = sizeof(int),
3816 .proc_handler = proc_dointvec,
3819 .procname = "max_size",
3820 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
3821 .maxlen = sizeof(int),
3823 .proc_handler = proc_dointvec,
3826 .procname = "gc_min_interval",
3827 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3828 .maxlen = sizeof(int),
3830 .proc_handler = proc_dointvec_jiffies,
3833 .procname = "gc_timeout",
3834 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3835 .maxlen = sizeof(int),
3837 .proc_handler = proc_dointvec_jiffies,
3840 .procname = "gc_interval",
3841 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3842 .maxlen = sizeof(int),
3844 .proc_handler = proc_dointvec_jiffies,
3847 .procname = "gc_elasticity",
3848 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3849 .maxlen = sizeof(int),
3851 .proc_handler = proc_dointvec,
3854 .procname = "mtu_expires",
3855 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3856 .maxlen = sizeof(int),
3858 .proc_handler = proc_dointvec_jiffies,
3861 .procname = "min_adv_mss",
3862 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3863 .maxlen = sizeof(int),
3865 .proc_handler = proc_dointvec,
3868 .procname = "gc_min_interval_ms",
3869 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3870 .maxlen = sizeof(int),
3872 .proc_handler = proc_dointvec_ms_jiffies,
3877 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3879 struct ctl_table *table;
3881 table = kmemdup(ipv6_route_table_template,
3882 sizeof(ipv6_route_table_template),
3886 table[0].data = &net->ipv6.sysctl.flush_delay;
3887 table[0].extra1 = net;
3888 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3889 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3890 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3891 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3892 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3893 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3894 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3895 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3896 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3898 /* Don't export sysctls to unprivileged users */
3899 if (net->user_ns != &init_user_ns)
3900 table[0].procname = NULL;
3907 static int __net_init ip6_route_net_init(struct net *net)
3911 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3912 sizeof(net->ipv6.ip6_dst_ops));
3914 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3915 goto out_ip6_dst_ops;
3917 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3918 sizeof(*net->ipv6.ip6_null_entry),
3920 if (!net->ipv6.ip6_null_entry)
3921 goto out_ip6_dst_entries;
3922 net->ipv6.ip6_null_entry->dst.path =
3923 (struct dst_entry *)net->ipv6.ip6_null_entry;
3924 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3925 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3926 ip6_template_metrics, true);
3928 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3929 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3930 sizeof(*net->ipv6.ip6_prohibit_entry),
3932 if (!net->ipv6.ip6_prohibit_entry)
3933 goto out_ip6_null_entry;
3934 net->ipv6.ip6_prohibit_entry->dst.path =
3935 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3936 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3937 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3938 ip6_template_metrics, true);
3940 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3941 sizeof(*net->ipv6.ip6_blk_hole_entry),
3943 if (!net->ipv6.ip6_blk_hole_entry)
3944 goto out_ip6_prohibit_entry;
3945 net->ipv6.ip6_blk_hole_entry->dst.path =
3946 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3947 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3948 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3949 ip6_template_metrics, true);
3952 net->ipv6.sysctl.flush_delay = 0;
3953 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3954 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3955 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3956 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3957 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3958 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3959 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3961 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3967 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3968 out_ip6_prohibit_entry:
3969 kfree(net->ipv6.ip6_prohibit_entry);
3971 kfree(net->ipv6.ip6_null_entry);
3973 out_ip6_dst_entries:
3974 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3979 static void __net_exit ip6_route_net_exit(struct net *net)
3981 kfree(net->ipv6.ip6_null_entry);
3982 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3983 kfree(net->ipv6.ip6_prohibit_entry);
3984 kfree(net->ipv6.ip6_blk_hole_entry);
3986 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3989 static int __net_init ip6_route_net_init_late(struct net *net)
3991 #ifdef CONFIG_PROC_FS
3992 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3993 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3998 static void __net_exit ip6_route_net_exit_late(struct net *net)
4000 #ifdef CONFIG_PROC_FS
4001 remove_proc_entry("ipv6_route", net->proc_net);
4002 remove_proc_entry("rt6_stats", net->proc_net);
4006 static struct pernet_operations ip6_route_net_ops = {
4007 .init = ip6_route_net_init,
4008 .exit = ip6_route_net_exit,
4011 static int __net_init ipv6_inetpeer_init(struct net *net)
4013 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
4017 inet_peer_base_init(bp);
4018 net->ipv6.peers = bp;
4022 static void __net_exit ipv6_inetpeer_exit(struct net *net)
4024 struct inet_peer_base *bp = net->ipv6.peers;
4026 net->ipv6.peers = NULL;
4027 inetpeer_invalidate_tree(bp);
4031 static struct pernet_operations ipv6_inetpeer_ops = {
4032 .init = ipv6_inetpeer_init,
4033 .exit = ipv6_inetpeer_exit,
4036 static struct pernet_operations ip6_route_net_late_ops = {
4037 .init = ip6_route_net_init_late,
4038 .exit = ip6_route_net_exit_late,
4041 static struct notifier_block ip6_route_dev_notifier = {
4042 .notifier_call = ip6_route_dev_notify,
4043 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
4046 void __init ip6_route_init_special_entries(void)
4048 /* Registering of the loopback is done before this portion of code,
4049 * the loopback reference in rt6_info will not be taken, do it
4050 * manually for init_net */
4051 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
4052 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4053 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4054 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
4055 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4056 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
4057 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
4061 int __init ip6_route_init(void)
4067 ip6_dst_ops_template.kmem_cachep =
4068 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
4069 SLAB_HWCACHE_ALIGN, NULL);
4070 if (!ip6_dst_ops_template.kmem_cachep)
4073 ret = dst_entries_init(&ip6_dst_blackhole_ops);
4075 goto out_kmem_cache;
4077 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
4079 goto out_dst_entries;
4081 ret = register_pernet_subsys(&ip6_route_net_ops);
4083 goto out_register_inetpeer;
4085 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
4089 goto out_register_subsys;
4095 ret = fib6_rules_init();
4099 ret = register_pernet_subsys(&ip6_route_net_late_ops);
4101 goto fib6_rules_init;
4104 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
4105 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
4106 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
4107 goto out_register_late_subsys;
4109 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
4111 goto out_register_late_subsys;
4113 for_each_possible_cpu(cpu) {
4114 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
4116 INIT_LIST_HEAD(&ul->head);
4117 spin_lock_init(&ul->lock);
4123 out_register_late_subsys:
4124 unregister_pernet_subsys(&ip6_route_net_late_ops);
4126 fib6_rules_cleanup();
4131 out_register_subsys:
4132 unregister_pernet_subsys(&ip6_route_net_ops);
4133 out_register_inetpeer:
4134 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4136 dst_entries_destroy(&ip6_dst_blackhole_ops);
4138 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
4142 void ip6_route_cleanup(void)
4144 unregister_netdevice_notifier(&ip6_route_dev_notifier);
4145 unregister_pernet_subsys(&ip6_route_net_late_ops);
4146 fib6_rules_cleanup();
4149 unregister_pernet_subsys(&ipv6_inetpeer_ops);
4150 unregister_pernet_subsys(&ip6_route_net_ops);
4151 dst_entries_destroy(&ip6_dst_blackhole_ops);
4152 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);