2 * Linux INET6 implementation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <linux/jhash.h>
48 #include <net/net_namespace.h>
51 #include <net/ip6_fib.h>
52 #include <net/ip6_route.h>
53 #include <net/ndisc.h>
54 #include <net/addrconf.h>
56 #include <linux/rtnetlink.h>
58 #include <net/dst_metadata.h>
60 #include <net/netevent.h>
61 #include <net/netlink.h>
62 #include <net/nexthop.h>
63 #include <net/lwtunnel.h>
64 #include <net/ip_tunnels.h>
65 #include <net/l3mdev.h>
66 #include <trace/events/fib6.h>
68 #include <linux/uaccess.h>
71 #include <linux/sysctl.h>
75 RT6_NUD_FAIL_HARD = -3,
76 RT6_NUD_FAIL_PROBE = -2,
77 RT6_NUD_FAIL_DO_RR = -1,
81 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
82 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
83 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
84 static unsigned int ip6_mtu(const struct dst_entry *dst);
85 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
86 static void ip6_dst_destroy(struct dst_entry *);
87 static void ip6_dst_ifdown(struct dst_entry *,
88 struct net_device *dev, int how);
89 static int ip6_dst_gc(struct dst_ops *ops);
91 static int ip6_pkt_discard(struct sk_buff *skb);
92 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb);
93 static int ip6_pkt_prohibit(struct sk_buff *skb);
94 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb);
95 static void ip6_link_failure(struct sk_buff *skb);
96 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
97 struct sk_buff *skb, u32 mtu);
98 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
100 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
101 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
102 static size_t rt6_nlmsg_size(struct rt6_info *rt);
103 static int rt6_fill_node(struct net *net,
104 struct sk_buff *skb, struct rt6_info *rt,
105 struct in6_addr *dst, struct in6_addr *src,
106 int iif, int type, u32 portid, u32 seq,
108 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
109 struct in6_addr *daddr,
110 struct in6_addr *saddr);
112 #ifdef CONFIG_IPV6_ROUTE_INFO
113 static struct rt6_info *rt6_add_route_info(struct net *net,
114 const struct in6_addr *prefix, int prefixlen,
115 const struct in6_addr *gwaddr,
116 struct net_device *dev,
118 static struct rt6_info *rt6_get_route_info(struct net *net,
119 const struct in6_addr *prefix, int prefixlen,
120 const struct in6_addr *gwaddr,
121 struct net_device *dev);
124 struct uncached_list {
126 struct list_head head;
129 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
131 void rt6_uncached_list_add(struct rt6_info *rt)
133 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
135 rt->rt6i_uncached_list = ul;
137 spin_lock_bh(&ul->lock);
138 list_add_tail(&rt->rt6i_uncached, &ul->head);
139 spin_unlock_bh(&ul->lock);
142 void rt6_uncached_list_del(struct rt6_info *rt)
144 if (!list_empty(&rt->rt6i_uncached)) {
145 struct uncached_list *ul = rt->rt6i_uncached_list;
146 struct net *net = dev_net(rt->dst.dev);
148 spin_lock_bh(&ul->lock);
149 list_del(&rt->rt6i_uncached);
150 atomic_dec(&net->ipv6.rt6_stats->fib_rt_uncache);
151 spin_unlock_bh(&ul->lock);
155 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
157 struct net_device *loopback_dev = net->loopback_dev;
160 if (dev == loopback_dev)
163 for_each_possible_cpu(cpu) {
164 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
167 spin_lock_bh(&ul->lock);
168 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
169 struct inet6_dev *rt_idev = rt->rt6i_idev;
170 struct net_device *rt_dev = rt->dst.dev;
172 if (rt_idev->dev == dev) {
173 rt->rt6i_idev = in6_dev_get(loopback_dev);
174 in6_dev_put(rt_idev);
178 rt->dst.dev = loopback_dev;
179 dev_hold(rt->dst.dev);
183 spin_unlock_bh(&ul->lock);
187 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
189 return dst_metrics_write_ptr(&rt->from->dst);
192 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
194 struct rt6_info *rt = (struct rt6_info *)dst;
196 if (rt->rt6i_flags & RTF_PCPU)
197 return rt6_pcpu_cow_metrics(rt);
198 else if (rt->rt6i_flags & RTF_CACHE)
201 return dst_cow_metrics_generic(dst, old);
204 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
208 struct in6_addr *p = &rt->rt6i_gateway;
210 if (!ipv6_addr_any(p))
211 return (const void *) p;
213 return &ipv6_hdr(skb)->daddr;
217 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
221 struct rt6_info *rt = (struct rt6_info *) dst;
224 daddr = choose_neigh_daddr(rt, skb, daddr);
225 n = __ipv6_neigh_lookup(dst->dev, daddr);
228 return neigh_create(&nd_tbl, daddr, dst->dev);
231 static void ip6_confirm_neigh(const struct dst_entry *dst, const void *daddr)
233 struct net_device *dev = dst->dev;
234 struct rt6_info *rt = (struct rt6_info *)dst;
236 daddr = choose_neigh_daddr(rt, NULL, daddr);
239 if (dev->flags & (IFF_NOARP | IFF_LOOPBACK))
241 if (ipv6_addr_is_multicast((const struct in6_addr *)daddr))
243 __ipv6_confirm_neigh(dev, daddr);
246 static struct dst_ops ip6_dst_ops_template = {
250 .check = ip6_dst_check,
251 .default_advmss = ip6_default_advmss,
253 .cow_metrics = ipv6_cow_metrics,
254 .destroy = ip6_dst_destroy,
255 .ifdown = ip6_dst_ifdown,
256 .negative_advice = ip6_negative_advice,
257 .link_failure = ip6_link_failure,
258 .update_pmtu = ip6_rt_update_pmtu,
259 .redirect = rt6_do_redirect,
260 .local_out = __ip6_local_out,
261 .neigh_lookup = ip6_neigh_lookup,
262 .confirm_neigh = ip6_confirm_neigh,
265 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
267 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
269 return mtu ? : dst->dev->mtu;
272 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
273 struct sk_buff *skb, u32 mtu)
277 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
282 static struct dst_ops ip6_dst_blackhole_ops = {
284 .destroy = ip6_dst_destroy,
285 .check = ip6_dst_check,
286 .mtu = ip6_blackhole_mtu,
287 .default_advmss = ip6_default_advmss,
288 .update_pmtu = ip6_rt_blackhole_update_pmtu,
289 .redirect = ip6_rt_blackhole_redirect,
290 .cow_metrics = dst_cow_metrics_generic,
291 .neigh_lookup = ip6_neigh_lookup,
294 static const u32 ip6_template_metrics[RTAX_MAX] = {
295 [RTAX_HOPLIMIT - 1] = 0,
298 static const struct rt6_info ip6_null_entry_template = {
300 .__refcnt = ATOMIC_INIT(1),
302 .obsolete = DST_OBSOLETE_FORCE_CHK,
303 .error = -ENETUNREACH,
304 .input = ip6_pkt_discard,
305 .output = ip6_pkt_discard_out,
307 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
308 .rt6i_protocol = RTPROT_KERNEL,
309 .rt6i_metric = ~(u32) 0,
310 .rt6i_ref = ATOMIC_INIT(1),
313 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
315 static const struct rt6_info ip6_prohibit_entry_template = {
317 .__refcnt = ATOMIC_INIT(1),
319 .obsolete = DST_OBSOLETE_FORCE_CHK,
321 .input = ip6_pkt_prohibit,
322 .output = ip6_pkt_prohibit_out,
324 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
325 .rt6i_protocol = RTPROT_KERNEL,
326 .rt6i_metric = ~(u32) 0,
327 .rt6i_ref = ATOMIC_INIT(1),
330 static const struct rt6_info ip6_blk_hole_entry_template = {
332 .__refcnt = ATOMIC_INIT(1),
334 .obsolete = DST_OBSOLETE_FORCE_CHK,
336 .input = dst_discard,
337 .output = dst_discard_out,
339 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
340 .rt6i_protocol = RTPROT_KERNEL,
341 .rt6i_metric = ~(u32) 0,
342 .rt6i_ref = ATOMIC_INIT(1),
347 static void rt6_info_init(struct rt6_info *rt)
349 struct dst_entry *dst = &rt->dst;
351 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
352 INIT_LIST_HEAD(&rt->rt6i_siblings);
353 INIT_LIST_HEAD(&rt->rt6i_uncached);
356 /* allocate dst with ip6_dst_ops */
357 static struct rt6_info *__ip6_dst_alloc(struct net *net,
358 struct net_device *dev,
361 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
362 1, DST_OBSOLETE_FORCE_CHK, flags);
366 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
372 struct rt6_info *ip6_dst_alloc(struct net *net,
373 struct net_device *dev,
376 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
379 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
380 if (!rt->rt6i_pcpu) {
381 dst_release_immediate(&rt->dst);
388 EXPORT_SYMBOL(ip6_dst_alloc);
390 static void ip6_dst_destroy(struct dst_entry *dst)
392 struct rt6_info *rt = (struct rt6_info *)dst;
393 struct rt6_exception_bucket *bucket;
394 struct rt6_info *from = rt->from;
395 struct inet6_dev *idev;
397 dst_destroy_metrics_generic(dst);
398 free_percpu(rt->rt6i_pcpu);
399 rt6_uncached_list_del(rt);
401 idev = rt->rt6i_idev;
403 rt->rt6i_idev = NULL;
406 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket, 1);
408 rt->rt6i_exception_bucket = NULL;
413 dst_release(&from->dst);
416 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
419 struct rt6_info *rt = (struct rt6_info *)dst;
420 struct inet6_dev *idev = rt->rt6i_idev;
421 struct net_device *loopback_dev =
422 dev_net(dev)->loopback_dev;
424 if (idev && idev->dev != loopback_dev) {
425 struct inet6_dev *loopback_idev = in6_dev_get(loopback_dev);
427 rt->rt6i_idev = loopback_idev;
433 static bool __rt6_check_expired(const struct rt6_info *rt)
435 if (rt->rt6i_flags & RTF_EXPIRES)
436 return time_after(jiffies, rt->dst.expires);
441 static bool rt6_check_expired(const struct rt6_info *rt)
443 if (rt->rt6i_flags & RTF_EXPIRES) {
444 if (time_after(jiffies, rt->dst.expires))
446 } else if (rt->from) {
447 return rt->dst.obsolete != DST_OBSOLETE_FORCE_CHK ||
448 rt6_check_expired(rt->from);
453 static struct rt6_info *rt6_multipath_select(const struct net *net,
454 struct rt6_info *match,
455 struct flowi6 *fl6, int oif,
456 const struct sk_buff *skb,
459 struct rt6_info *sibling, *next_sibling;
461 /* We might have already computed the hash for ICMPv6 errors. In such
462 * case it will always be non-zero. Otherwise now is the time to do it.
465 fl6->mp_hash = rt6_multipath_hash(net, fl6, skb, NULL);
467 if (fl6->mp_hash <= atomic_read(&match->rt6i_nh_upper_bound))
470 list_for_each_entry_safe(sibling, next_sibling, &match->rt6i_siblings,
472 if (fl6->mp_hash > atomic_read(&sibling->rt6i_nh_upper_bound))
474 if (rt6_score_route(sibling, oif, strict) < 0)
484 * Route lookup. rcu_read_lock() should be held.
487 static inline struct rt6_info *rt6_device_match(struct net *net,
489 const struct in6_addr *saddr,
493 struct rt6_info *local = NULL;
494 struct rt6_info *sprt;
496 if (!oif && ipv6_addr_any(saddr) && !(rt->rt6i_nh_flags & RTNH_F_DEAD))
499 for (sprt = rt; sprt; sprt = rcu_dereference(sprt->rt6_next)) {
500 struct net_device *dev = sprt->dst.dev;
502 if (sprt->rt6i_nh_flags & RTNH_F_DEAD)
506 if (dev->ifindex == oif)
508 if (dev->flags & IFF_LOOPBACK) {
509 if (!sprt->rt6i_idev ||
510 sprt->rt6i_idev->dev->ifindex != oif) {
511 if (flags & RT6_LOOKUP_F_IFACE)
514 local->rt6i_idev->dev->ifindex == oif)
520 if (ipv6_chk_addr(net, saddr, dev,
521 flags & RT6_LOOKUP_F_IFACE))
530 if (flags & RT6_LOOKUP_F_IFACE)
531 return net->ipv6.ip6_null_entry;
534 return rt->rt6i_nh_flags & RTNH_F_DEAD ? net->ipv6.ip6_null_entry : rt;
537 #ifdef CONFIG_IPV6_ROUTER_PREF
538 struct __rt6_probe_work {
539 struct work_struct work;
540 struct in6_addr target;
541 struct net_device *dev;
544 static void rt6_probe_deferred(struct work_struct *w)
546 struct in6_addr mcaddr;
547 struct __rt6_probe_work *work =
548 container_of(w, struct __rt6_probe_work, work);
550 addrconf_addr_solict_mult(&work->target, &mcaddr);
551 ndisc_send_ns(work->dev, &work->target, &mcaddr, NULL, 0);
556 static void rt6_probe(struct rt6_info *rt)
558 struct __rt6_probe_work *work;
559 struct neighbour *neigh;
561 * Okay, this does not seem to be appropriate
562 * for now, however, we need to check if it
563 * is really so; aka Router Reachability Probing.
565 * Router Reachability Probe MUST be rate-limited
566 * to no more than one per minute.
568 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
571 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
573 if (neigh->nud_state & NUD_VALID)
577 write_lock(&neigh->lock);
578 if (!(neigh->nud_state & NUD_VALID) &&
581 rt->rt6i_idev->cnf.rtr_probe_interval)) {
582 work = kmalloc(sizeof(*work), GFP_ATOMIC);
584 __neigh_set_probe_once(neigh);
586 write_unlock(&neigh->lock);
588 work = kmalloc(sizeof(*work), GFP_ATOMIC);
592 INIT_WORK(&work->work, rt6_probe_deferred);
593 work->target = rt->rt6i_gateway;
594 dev_hold(rt->dst.dev);
595 work->dev = rt->dst.dev;
596 schedule_work(&work->work);
600 rcu_read_unlock_bh();
603 static inline void rt6_probe(struct rt6_info *rt)
609 * Default Router Selection (RFC 2461 6.3.6)
611 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
613 struct net_device *dev = rt->dst.dev;
614 if (!oif || dev->ifindex == oif)
616 if ((dev->flags & IFF_LOOPBACK) &&
617 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
622 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
624 struct neighbour *neigh;
625 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
627 if (rt->rt6i_flags & RTF_NONEXTHOP ||
628 !(rt->rt6i_flags & RTF_GATEWAY))
629 return RT6_NUD_SUCCEED;
632 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
634 read_lock(&neigh->lock);
635 if (neigh->nud_state & NUD_VALID)
636 ret = RT6_NUD_SUCCEED;
637 #ifdef CONFIG_IPV6_ROUTER_PREF
638 else if (!(neigh->nud_state & NUD_FAILED))
639 ret = RT6_NUD_SUCCEED;
641 ret = RT6_NUD_FAIL_PROBE;
643 read_unlock(&neigh->lock);
645 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
646 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
648 rcu_read_unlock_bh();
653 static int rt6_score_route(struct rt6_info *rt, int oif,
658 m = rt6_check_dev(rt, oif);
659 if (!m && (strict & RT6_LOOKUP_F_IFACE))
660 return RT6_NUD_FAIL_HARD;
661 #ifdef CONFIG_IPV6_ROUTER_PREF
662 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
664 if (strict & RT6_LOOKUP_F_REACHABLE) {
665 int n = rt6_check_neigh(rt);
672 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
673 int *mpri, struct rt6_info *match,
677 bool match_do_rr = false;
678 struct inet6_dev *idev = rt->rt6i_idev;
680 if (rt->rt6i_nh_flags & RTNH_F_DEAD)
683 if (idev->cnf.ignore_routes_with_linkdown &&
684 rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
685 !(strict & RT6_LOOKUP_F_IGNORE_LINKSTATE))
688 if (rt6_check_expired(rt))
691 m = rt6_score_route(rt, oif, strict);
692 if (m == RT6_NUD_FAIL_DO_RR) {
694 m = 0; /* lowest valid score */
695 } else if (m == RT6_NUD_FAIL_HARD) {
699 if (strict & RT6_LOOKUP_F_REACHABLE)
702 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
704 *do_rr = match_do_rr;
712 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
713 struct rt6_info *leaf,
714 struct rt6_info *rr_head,
715 u32 metric, int oif, int strict,
718 struct rt6_info *rt, *match, *cont;
723 for (rt = rr_head; rt; rt = rcu_dereference(rt->rt6_next)) {
724 if (rt->rt6i_metric != metric) {
729 match = find_match(rt, oif, strict, &mpri, match, do_rr);
732 for (rt = leaf; rt && rt != rr_head;
733 rt = rcu_dereference(rt->rt6_next)) {
734 if (rt->rt6i_metric != metric) {
739 match = find_match(rt, oif, strict, &mpri, match, do_rr);
745 for (rt = cont; rt; rt = rcu_dereference(rt->rt6_next))
746 match = find_match(rt, oif, strict, &mpri, match, do_rr);
751 static struct rt6_info *rt6_select(struct net *net, struct fib6_node *fn,
754 struct rt6_info *leaf = rcu_dereference(fn->leaf);
755 struct rt6_info *match, *rt0;
759 if (!leaf || leaf == net->ipv6.ip6_null_entry)
760 return net->ipv6.ip6_null_entry;
762 rt0 = rcu_dereference(fn->rr_ptr);
766 /* Double check to make sure fn is not an intermediate node
767 * and fn->leaf does not points to its child's leaf
768 * (This might happen if all routes under fn are deleted from
769 * the tree and fib6_repair_tree() is called on the node.)
771 key_plen = rt0->rt6i_dst.plen;
772 #ifdef CONFIG_IPV6_SUBTREES
773 if (rt0->rt6i_src.plen)
774 key_plen = rt0->rt6i_src.plen;
776 if (fn->fn_bit != key_plen)
777 return net->ipv6.ip6_null_entry;
779 match = find_rr_leaf(fn, leaf, rt0, rt0->rt6i_metric, oif, strict,
783 struct rt6_info *next = rcu_dereference(rt0->rt6_next);
785 /* no entries matched; do round-robin */
786 if (!next || next->rt6i_metric != rt0->rt6i_metric)
790 spin_lock_bh(&leaf->rt6i_table->tb6_lock);
791 /* make sure next is not being deleted from the tree */
793 rcu_assign_pointer(fn->rr_ptr, next);
794 spin_unlock_bh(&leaf->rt6i_table->tb6_lock);
798 return match ? match : net->ipv6.ip6_null_entry;
801 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
803 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
806 #ifdef CONFIG_IPV6_ROUTE_INFO
807 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
808 const struct in6_addr *gwaddr)
810 struct net *net = dev_net(dev);
811 struct route_info *rinfo = (struct route_info *) opt;
812 struct in6_addr prefix_buf, *prefix;
814 unsigned long lifetime;
817 if (len < sizeof(struct route_info)) {
821 /* Sanity check for prefix_len and length */
822 if (rinfo->length > 3) {
824 } else if (rinfo->prefix_len > 128) {
826 } else if (rinfo->prefix_len > 64) {
827 if (rinfo->length < 2) {
830 } else if (rinfo->prefix_len > 0) {
831 if (rinfo->length < 1) {
836 pref = rinfo->route_pref;
837 if (pref == ICMPV6_ROUTER_PREF_INVALID)
840 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
842 if (rinfo->length == 3)
843 prefix = (struct in6_addr *)rinfo->prefix;
845 /* this function is safe */
846 ipv6_addr_prefix(&prefix_buf,
847 (struct in6_addr *)rinfo->prefix,
849 prefix = &prefix_buf;
852 if (rinfo->prefix_len == 0)
853 rt = rt6_get_dflt_router(gwaddr, dev);
855 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
858 if (rt && !lifetime) {
864 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr,
867 rt->rt6i_flags = RTF_ROUTEINFO |
868 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
871 if (!addrconf_finite_timeout(lifetime))
872 rt6_clean_expires(rt);
874 rt6_set_expires(rt, jiffies + HZ * lifetime);
882 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
883 struct in6_addr *saddr)
885 struct fib6_node *pn, *sn;
887 if (fn->fn_flags & RTN_TL_ROOT)
889 pn = rcu_dereference(fn->parent);
890 sn = FIB6_SUBTREE(pn);
892 fn = fib6_lookup(sn, NULL, saddr);
895 if (fn->fn_flags & RTN_RTINFO)
900 static bool ip6_hold_safe(struct net *net, struct rt6_info **prt,
903 struct rt6_info *rt = *prt;
905 if (dst_hold_safe(&rt->dst))
908 rt = net->ipv6.ip6_null_entry;
917 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
918 struct fib6_table *table,
920 const struct sk_buff *skb,
923 struct rt6_info *rt, *rt_cache;
924 struct fib6_node *fn;
926 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
927 flags &= ~RT6_LOOKUP_F_IFACE;
930 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
932 rt = rcu_dereference(fn->leaf);
934 rt = net->ipv6.ip6_null_entry;
936 rt = rt6_device_match(net, rt, &fl6->saddr,
937 fl6->flowi6_oif, flags);
938 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
939 rt = rt6_multipath_select(net, rt, fl6, fl6->flowi6_oif,
942 if (rt == net->ipv6.ip6_null_entry) {
943 fn = fib6_backtrack(fn, &fl6->saddr);
947 /* Search through exception table */
948 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
952 if (ip6_hold_safe(net, &rt, true))
953 dst_use_noref(&rt->dst, jiffies);
957 trace_fib6_table_lookup(net, rt, table, fl6);
963 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
964 const struct sk_buff *skb, int flags)
966 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_lookup);
968 EXPORT_SYMBOL_GPL(ip6_route_lookup);
970 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
971 const struct in6_addr *saddr, int oif,
972 const struct sk_buff *skb, int strict)
974 struct flowi6 fl6 = {
978 struct dst_entry *dst;
979 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
982 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
983 flags |= RT6_LOOKUP_F_HAS_SADDR;
986 dst = fib6_rule_lookup(net, &fl6, skb, flags, ip6_pol_route_lookup);
988 return (struct rt6_info *) dst;
994 EXPORT_SYMBOL(rt6_lookup);
996 /* ip6_ins_rt is called with FREE table->tb6_lock.
997 * It takes new route entry, the addition fails by any reason the
999 * Caller must hold dst before calling it.
1002 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
1003 struct mx6_config *mxc,
1004 struct netlink_ext_ack *extack)
1007 struct fib6_table *table;
1009 table = rt->rt6i_table;
1010 spin_lock_bh(&table->tb6_lock);
1011 err = fib6_add(&table->tb6_root, rt, info, mxc, extack);
1012 spin_unlock_bh(&table->tb6_lock);
1017 int ip6_ins_rt(struct rt6_info *rt)
1019 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
1020 struct mx6_config mxc = { .mx = NULL, };
1022 /* Hold dst to account for the reference from the fib6 tree */
1024 return __ip6_ins_rt(rt, &info, &mxc, NULL);
1027 /* called with rcu_lock held */
1028 static struct net_device *ip6_rt_get_dev_rcu(struct rt6_info *rt)
1030 struct net_device *dev = rt->dst.dev;
1032 if (rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) {
1033 /* for copies of local routes, dst->dev needs to be the
1034 * device if it is a master device, the master device if
1035 * device is enslaved, and the loopback as the default
1037 if (netif_is_l3_slave(dev) &&
1038 !rt6_need_strict(&rt->rt6i_dst.addr))
1039 dev = l3mdev_master_dev_rcu(dev);
1040 else if (!netif_is_l3_master(dev))
1041 dev = dev_net(dev)->loopback_dev;
1042 /* last case is netif_is_l3_master(dev) is true in which
1043 * case we want dev returned to be dev
1050 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
1051 const struct in6_addr *daddr,
1052 const struct in6_addr *saddr)
1054 struct net_device *dev;
1055 struct rt6_info *rt;
1061 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1065 dev = ip6_rt_get_dev_rcu(ort);
1066 rt = __ip6_dst_alloc(dev_net(dev), dev, 0);
1071 ip6_rt_copy_init(rt, ort);
1072 rt->rt6i_flags |= RTF_CACHE;
1073 rt->rt6i_metric = 0;
1074 rt->dst.flags |= DST_HOST;
1075 rt->rt6i_dst.addr = *daddr;
1076 rt->rt6i_dst.plen = 128;
1078 if (!rt6_is_gw_or_nonexthop(ort)) {
1079 if (ort->rt6i_dst.plen != 128 &&
1080 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
1081 rt->rt6i_flags |= RTF_ANYCAST;
1082 #ifdef CONFIG_IPV6_SUBTREES
1083 if (rt->rt6i_src.plen && saddr) {
1084 rt->rt6i_src.addr = *saddr;
1085 rt->rt6i_src.plen = 128;
1093 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
1095 struct net_device *dev;
1096 struct rt6_info *pcpu_rt;
1099 dev = ip6_rt_get_dev_rcu(rt);
1100 pcpu_rt = __ip6_dst_alloc(dev_net(dev), dev, rt->dst.flags);
1104 ip6_rt_copy_init(pcpu_rt, rt);
1105 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
1106 pcpu_rt->rt6i_flags |= RTF_PCPU;
1110 /* It should be called with rcu_read_lock() acquired */
1111 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1113 struct rt6_info *pcpu_rt, **p;
1115 p = this_cpu_ptr(rt->rt6i_pcpu);
1118 if (pcpu_rt && ip6_hold_safe(NULL, &pcpu_rt, false))
1119 rt6_dst_from_metrics_check(pcpu_rt);
1124 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1126 struct rt6_info *pcpu_rt, *prev, **p;
1128 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1130 struct net *net = dev_net(rt->dst.dev);
1132 dst_hold(&net->ipv6.ip6_null_entry->dst);
1133 return net->ipv6.ip6_null_entry;
1136 dst_hold(&pcpu_rt->dst);
1137 p = this_cpu_ptr(rt->rt6i_pcpu);
1138 prev = cmpxchg(p, NULL, pcpu_rt);
1141 rt6_dst_from_metrics_check(pcpu_rt);
1145 /* exception hash table implementation
1147 static DEFINE_SPINLOCK(rt6_exception_lock);
1149 /* Remove rt6_ex from hash table and free the memory
1150 * Caller must hold rt6_exception_lock
1152 static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
1153 struct rt6_exception *rt6_ex)
1157 if (!bucket || !rt6_ex)
1160 net = dev_net(rt6_ex->rt6i->dst.dev);
1161 rt6_ex->rt6i->rt6i_node = NULL;
1162 hlist_del_rcu(&rt6_ex->hlist);
1163 rt6_release(rt6_ex->rt6i);
1164 kfree_rcu(rt6_ex, rcu);
1165 WARN_ON_ONCE(!bucket->depth);
1167 net->ipv6.rt6_stats->fib_rt_cache--;
1170 /* Remove oldest rt6_ex in bucket and free the memory
1171 * Caller must hold rt6_exception_lock
1173 static void rt6_exception_remove_oldest(struct rt6_exception_bucket *bucket)
1175 struct rt6_exception *rt6_ex, *oldest = NULL;
1180 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1181 if (!oldest || time_before(rt6_ex->stamp, oldest->stamp))
1184 rt6_remove_exception(bucket, oldest);
1187 static u32 rt6_exception_hash(const struct in6_addr *dst,
1188 const struct in6_addr *src)
1190 static u32 seed __read_mostly;
1193 net_get_random_once(&seed, sizeof(seed));
1194 val = jhash(dst, sizeof(*dst), seed);
1196 #ifdef CONFIG_IPV6_SUBTREES
1198 val = jhash(src, sizeof(*src), val);
1200 return hash_32(val, FIB6_EXCEPTION_BUCKET_SIZE_SHIFT);
1203 /* Helper function to find the cached rt in the hash table
1204 * and update bucket pointer to point to the bucket for this
1205 * (daddr, saddr) pair
1206 * Caller must hold rt6_exception_lock
1208 static struct rt6_exception *
1209 __rt6_find_exception_spinlock(struct rt6_exception_bucket **bucket,
1210 const struct in6_addr *daddr,
1211 const struct in6_addr *saddr)
1213 struct rt6_exception *rt6_ex;
1216 if (!(*bucket) || !daddr)
1219 hval = rt6_exception_hash(daddr, saddr);
1222 hlist_for_each_entry(rt6_ex, &(*bucket)->chain, hlist) {
1223 struct rt6_info *rt6 = rt6_ex->rt6i;
1224 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1226 #ifdef CONFIG_IPV6_SUBTREES
1227 if (matched && saddr)
1228 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1236 /* Helper function to find the cached rt in the hash table
1237 * and update bucket pointer to point to the bucket for this
1238 * (daddr, saddr) pair
1239 * Caller must hold rcu_read_lock()
1241 static struct rt6_exception *
1242 __rt6_find_exception_rcu(struct rt6_exception_bucket **bucket,
1243 const struct in6_addr *daddr,
1244 const struct in6_addr *saddr)
1246 struct rt6_exception *rt6_ex;
1249 WARN_ON_ONCE(!rcu_read_lock_held());
1251 if (!(*bucket) || !daddr)
1254 hval = rt6_exception_hash(daddr, saddr);
1257 hlist_for_each_entry_rcu(rt6_ex, &(*bucket)->chain, hlist) {
1258 struct rt6_info *rt6 = rt6_ex->rt6i;
1259 bool matched = ipv6_addr_equal(daddr, &rt6->rt6i_dst.addr);
1261 #ifdef CONFIG_IPV6_SUBTREES
1262 if (matched && saddr)
1263 matched = ipv6_addr_equal(saddr, &rt6->rt6i_src.addr);
1271 static int rt6_insert_exception(struct rt6_info *nrt,
1272 struct rt6_info *ort)
1274 struct net *net = dev_net(ort->dst.dev);
1275 struct rt6_exception_bucket *bucket;
1276 struct in6_addr *src_key = NULL;
1277 struct rt6_exception *rt6_ex;
1280 /* ort can't be a cache or pcpu route */
1281 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
1283 WARN_ON_ONCE(ort->rt6i_flags & (RTF_CACHE | RTF_PCPU));
1285 spin_lock_bh(&rt6_exception_lock);
1287 if (ort->exception_bucket_flushed) {
1292 bucket = rcu_dereference_protected(ort->rt6i_exception_bucket,
1293 lockdep_is_held(&rt6_exception_lock));
1295 bucket = kcalloc(FIB6_EXCEPTION_BUCKET_SIZE, sizeof(*bucket),
1301 rcu_assign_pointer(ort->rt6i_exception_bucket, bucket);
1304 #ifdef CONFIG_IPV6_SUBTREES
1305 /* rt6i_src.plen != 0 indicates ort is in subtree
1306 * and exception table is indexed by a hash of
1307 * both rt6i_dst and rt6i_src.
1308 * Otherwise, the exception table is indexed by
1309 * a hash of only rt6i_dst.
1311 if (ort->rt6i_src.plen)
1312 src_key = &nrt->rt6i_src.addr;
1315 /* Update rt6i_prefsrc as it could be changed
1316 * in rt6_remove_prefsrc()
1318 nrt->rt6i_prefsrc = ort->rt6i_prefsrc;
1319 /* rt6_mtu_change() might lower mtu on ort.
1320 * Only insert this exception route if its mtu
1321 * is less than ort's mtu value.
1323 if (nrt->rt6i_pmtu >= dst_mtu(&ort->dst)) {
1328 rt6_ex = __rt6_find_exception_spinlock(&bucket, &nrt->rt6i_dst.addr,
1331 rt6_remove_exception(bucket, rt6_ex);
1333 rt6_ex = kzalloc(sizeof(*rt6_ex), GFP_ATOMIC);
1339 rt6_ex->stamp = jiffies;
1340 atomic_inc(&nrt->rt6i_ref);
1341 nrt->rt6i_node = ort->rt6i_node;
1342 hlist_add_head_rcu(&rt6_ex->hlist, &bucket->chain);
1344 net->ipv6.rt6_stats->fib_rt_cache++;
1346 if (bucket->depth > FIB6_MAX_DEPTH)
1347 rt6_exception_remove_oldest(bucket);
1350 spin_unlock_bh(&rt6_exception_lock);
1352 /* Update fn->fn_sernum to invalidate all cached dst */
1354 spin_lock_bh(&ort->rt6i_table->tb6_lock);
1355 fib6_update_sernum(ort);
1356 spin_unlock_bh(&ort->rt6i_table->tb6_lock);
1357 fib6_force_start_gc(net);
1363 void rt6_flush_exceptions(struct rt6_info *rt)
1365 struct rt6_exception_bucket *bucket;
1366 struct rt6_exception *rt6_ex;
1367 struct hlist_node *tmp;
1370 spin_lock_bh(&rt6_exception_lock);
1371 /* Prevent rt6_insert_exception() to recreate the bucket list */
1372 rt->exception_bucket_flushed = 1;
1374 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1375 lockdep_is_held(&rt6_exception_lock));
1379 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1380 hlist_for_each_entry_safe(rt6_ex, tmp, &bucket->chain, hlist)
1381 rt6_remove_exception(bucket, rt6_ex);
1382 WARN_ON_ONCE(bucket->depth);
1387 spin_unlock_bh(&rt6_exception_lock);
1390 /* Find cached rt in the hash table inside passed in rt
1391 * Caller has to hold rcu_read_lock()
1393 static struct rt6_info *rt6_find_cached_rt(struct rt6_info *rt,
1394 struct in6_addr *daddr,
1395 struct in6_addr *saddr)
1397 struct rt6_exception_bucket *bucket;
1398 struct in6_addr *src_key = NULL;
1399 struct rt6_exception *rt6_ex;
1400 struct rt6_info *res = NULL;
1402 bucket = rcu_dereference(rt->rt6i_exception_bucket);
1404 #ifdef CONFIG_IPV6_SUBTREES
1405 /* rt6i_src.plen != 0 indicates rt is in subtree
1406 * and exception table is indexed by a hash of
1407 * both rt6i_dst and rt6i_src.
1408 * Otherwise, the exception table is indexed by
1409 * a hash of only rt6i_dst.
1411 if (rt->rt6i_src.plen)
1414 rt6_ex = __rt6_find_exception_rcu(&bucket, daddr, src_key);
1416 if (rt6_ex && !rt6_check_expired(rt6_ex->rt6i))
1422 /* Remove the passed in cached rt from the hash table that contains it */
1423 int rt6_remove_exception_rt(struct rt6_info *rt)
1425 struct rt6_exception_bucket *bucket;
1426 struct rt6_info *from = rt->from;
1427 struct in6_addr *src_key = NULL;
1428 struct rt6_exception *rt6_ex;
1432 !(rt->rt6i_flags & RTF_CACHE))
1435 if (!rcu_access_pointer(from->rt6i_exception_bucket))
1438 spin_lock_bh(&rt6_exception_lock);
1439 bucket = rcu_dereference_protected(from->rt6i_exception_bucket,
1440 lockdep_is_held(&rt6_exception_lock));
1441 #ifdef CONFIG_IPV6_SUBTREES
1442 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1443 * and exception table is indexed by a hash of
1444 * both rt6i_dst and rt6i_src.
1445 * Otherwise, the exception table is indexed by
1446 * a hash of only rt6i_dst.
1448 if (from->rt6i_src.plen)
1449 src_key = &rt->rt6i_src.addr;
1451 rt6_ex = __rt6_find_exception_spinlock(&bucket,
1455 rt6_remove_exception(bucket, rt6_ex);
1461 spin_unlock_bh(&rt6_exception_lock);
1465 /* Find rt6_ex which contains the passed in rt cache and
1468 static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
1470 struct rt6_exception_bucket *bucket;
1471 struct rt6_info *from = rt->from;
1472 struct in6_addr *src_key = NULL;
1473 struct rt6_exception *rt6_ex;
1476 !(rt->rt6i_flags & RTF_CACHE))
1480 bucket = rcu_dereference(from->rt6i_exception_bucket);
1482 #ifdef CONFIG_IPV6_SUBTREES
1483 /* rt6i_src.plen != 0 indicates 'from' is in subtree
1484 * and exception table is indexed by a hash of
1485 * both rt6i_dst and rt6i_src.
1486 * Otherwise, the exception table is indexed by
1487 * a hash of only rt6i_dst.
1489 if (from->rt6i_src.plen)
1490 src_key = &rt->rt6i_src.addr;
1492 rt6_ex = __rt6_find_exception_rcu(&bucket,
1496 rt6_ex->stamp = jiffies;
1501 static void rt6_exceptions_remove_prefsrc(struct rt6_info *rt)
1503 struct rt6_exception_bucket *bucket;
1504 struct rt6_exception *rt6_ex;
1507 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1508 lockdep_is_held(&rt6_exception_lock));
1511 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1512 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1513 rt6_ex->rt6i->rt6i_prefsrc.plen = 0;
1520 static bool rt6_mtu_change_route_allowed(struct inet6_dev *idev,
1521 struct rt6_info *rt, int mtu)
1523 /* If the new MTU is lower than the route PMTU, this new MTU will be the
1524 * lowest MTU in the path: always allow updating the route PMTU to
1525 * reflect PMTU decreases.
1527 * If the new MTU is higher, and the route PMTU is equal to the local
1528 * MTU, this means the old MTU is the lowest in the path, so allow
1529 * updating it: if other nodes now have lower MTUs, PMTU discovery will
1533 if (dst_mtu(&rt->dst) >= mtu)
1536 if (dst_mtu(&rt->dst) == idev->cnf.mtu6)
1542 static void rt6_exceptions_update_pmtu(struct inet6_dev *idev,
1543 struct rt6_info *rt, int mtu)
1545 struct rt6_exception_bucket *bucket;
1546 struct rt6_exception *rt6_ex;
1549 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1550 lockdep_is_held(&rt6_exception_lock));
1555 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1556 hlist_for_each_entry(rt6_ex, &bucket->chain, hlist) {
1557 struct rt6_info *entry = rt6_ex->rt6i;
1559 /* For RTF_CACHE with rt6i_pmtu == 0 (i.e. a redirected
1560 * route), the metrics of its rt->dst.from have already
1563 if (entry->rt6i_pmtu &&
1564 rt6_mtu_change_route_allowed(idev, entry, mtu))
1565 entry->rt6i_pmtu = mtu;
1571 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
1573 static void rt6_exceptions_clean_tohost(struct rt6_info *rt,
1574 struct in6_addr *gateway)
1576 struct rt6_exception_bucket *bucket;
1577 struct rt6_exception *rt6_ex;
1578 struct hlist_node *tmp;
1581 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1584 spin_lock_bh(&rt6_exception_lock);
1585 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1586 lockdep_is_held(&rt6_exception_lock));
1589 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1590 hlist_for_each_entry_safe(rt6_ex, tmp,
1591 &bucket->chain, hlist) {
1592 struct rt6_info *entry = rt6_ex->rt6i;
1594 if ((entry->rt6i_flags & RTF_CACHE_GATEWAY) ==
1595 RTF_CACHE_GATEWAY &&
1596 ipv6_addr_equal(gateway,
1597 &entry->rt6i_gateway)) {
1598 rt6_remove_exception(bucket, rt6_ex);
1605 spin_unlock_bh(&rt6_exception_lock);
1608 static void rt6_age_examine_exception(struct rt6_exception_bucket *bucket,
1609 struct rt6_exception *rt6_ex,
1610 struct fib6_gc_args *gc_args,
1613 struct rt6_info *rt = rt6_ex->rt6i;
1615 /* we are pruning and obsoleting aged-out and non gateway exceptions
1616 * even if others have still references to them, so that on next
1617 * dst_check() such references can be dropped.
1618 * EXPIRES exceptions - e.g. pmtu-generated ones are pruned when
1619 * expired, independently from their aging, as per RFC 8201 section 4
1621 if (!(rt->rt6i_flags & RTF_EXPIRES)) {
1622 if (time_after_eq(now, rt->dst.lastuse + gc_args->timeout)) {
1623 RT6_TRACE("aging clone %p\n", rt);
1624 rt6_remove_exception(bucket, rt6_ex);
1627 } else if (time_after(jiffies, rt->dst.expires)) {
1628 RT6_TRACE("purging expired route %p\n", rt);
1629 rt6_remove_exception(bucket, rt6_ex);
1633 if (rt->rt6i_flags & RTF_GATEWAY) {
1634 struct neighbour *neigh;
1635 __u8 neigh_flags = 0;
1637 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
1639 neigh_flags = neigh->flags;
1641 if (!(neigh_flags & NTF_ROUTER)) {
1642 RT6_TRACE("purging route %p via non-router but gateway\n",
1644 rt6_remove_exception(bucket, rt6_ex);
1652 void rt6_age_exceptions(struct rt6_info *rt,
1653 struct fib6_gc_args *gc_args,
1656 struct rt6_exception_bucket *bucket;
1657 struct rt6_exception *rt6_ex;
1658 struct hlist_node *tmp;
1661 if (!rcu_access_pointer(rt->rt6i_exception_bucket))
1665 spin_lock(&rt6_exception_lock);
1666 bucket = rcu_dereference_protected(rt->rt6i_exception_bucket,
1667 lockdep_is_held(&rt6_exception_lock));
1670 for (i = 0; i < FIB6_EXCEPTION_BUCKET_SIZE; i++) {
1671 hlist_for_each_entry_safe(rt6_ex, tmp,
1672 &bucket->chain, hlist) {
1673 rt6_age_examine_exception(bucket, rt6_ex,
1679 spin_unlock(&rt6_exception_lock);
1680 rcu_read_unlock_bh();
1683 struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
1684 int oif, struct flowi6 *fl6,
1685 const struct sk_buff *skb, int flags)
1687 struct fib6_node *fn, *saved_fn;
1688 struct rt6_info *rt, *rt_cache;
1691 strict |= flags & RT6_LOOKUP_F_IFACE;
1692 strict |= flags & RT6_LOOKUP_F_IGNORE_LINKSTATE;
1693 if (net->ipv6.devconf_all->forwarding == 0)
1694 strict |= RT6_LOOKUP_F_REACHABLE;
1698 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1701 if (fl6->flowi6_flags & FLOWI_FLAG_SKIP_NH_OIF)
1705 rt = rt6_select(net, fn, oif, strict);
1706 if (rt->rt6i_nsiblings)
1707 rt = rt6_multipath_select(net, rt, fl6, oif, skb, strict);
1708 if (rt == net->ipv6.ip6_null_entry) {
1709 fn = fib6_backtrack(fn, &fl6->saddr);
1711 goto redo_rt6_select;
1712 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1713 /* also consider unreachable route */
1714 strict &= ~RT6_LOOKUP_F_REACHABLE;
1716 goto redo_rt6_select;
1720 /*Search through exception table */
1721 rt_cache = rt6_find_cached_rt(rt, &fl6->daddr, &fl6->saddr);
1725 if (rt == net->ipv6.ip6_null_entry) {
1728 trace_fib6_table_lookup(net, rt, table, fl6);
1730 } else if (rt->rt6i_flags & RTF_CACHE) {
1731 if (ip6_hold_safe(net, &rt, true)) {
1732 dst_use_noref(&rt->dst, jiffies);
1733 rt6_dst_from_metrics_check(rt);
1736 trace_fib6_table_lookup(net, rt, table, fl6);
1738 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1739 !(rt->rt6i_flags & RTF_GATEWAY))) {
1740 /* Create a RTF_CACHE clone which will not be
1741 * owned by the fib6 tree. It is for the special case where
1742 * the daddr in the skb during the neighbor look-up is different
1743 * from the fl6->daddr used to look-up route here.
1746 struct rt6_info *uncached_rt;
1748 if (ip6_hold_safe(net, &rt, true)) {
1749 dst_use_noref(&rt->dst, jiffies);
1753 goto uncached_rt_out;
1757 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1758 dst_release(&rt->dst);
1761 /* Uncached_rt's refcnt is taken during ip6_rt_cache_alloc()
1762 * No need for another dst_hold()
1764 rt6_uncached_list_add(uncached_rt);
1765 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
1767 uncached_rt = net->ipv6.ip6_null_entry;
1768 dst_hold(&uncached_rt->dst);
1772 trace_fib6_table_lookup(net, uncached_rt, table, fl6);
1776 /* Get a percpu copy */
1778 struct rt6_info *pcpu_rt;
1780 dst_use_noref(&rt->dst, jiffies);
1782 pcpu_rt = rt6_get_pcpu_route(rt);
1785 /* atomic_inc_not_zero() is needed when using rcu */
1786 if (atomic_inc_not_zero(&rt->rt6i_ref)) {
1787 /* No dst_hold() on rt is needed because grabbing
1788 * rt->rt6i_ref makes sure rt can't be released.
1790 pcpu_rt = rt6_make_pcpu_route(rt);
1793 /* rt is already removed from tree */
1794 pcpu_rt = net->ipv6.ip6_null_entry;
1795 dst_hold(&pcpu_rt->dst);
1800 trace_fib6_table_lookup(net, pcpu_rt, table, fl6);
1804 EXPORT_SYMBOL_GPL(ip6_pol_route);
1806 static struct rt6_info *ip6_pol_route_input(struct net *net,
1807 struct fib6_table *table,
1809 const struct sk_buff *skb,
1812 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, skb, flags);
1815 struct dst_entry *ip6_route_input_lookup(struct net *net,
1816 struct net_device *dev,
1818 const struct sk_buff *skb,
1821 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1822 flags |= RT6_LOOKUP_F_IFACE;
1824 return fib6_rule_lookup(net, fl6, skb, flags, ip6_pol_route_input);
1826 EXPORT_SYMBOL_GPL(ip6_route_input_lookup);
1828 static void ip6_multipath_l3_keys(const struct sk_buff *skb,
1829 struct flow_keys *keys,
1830 struct flow_keys *flkeys)
1832 const struct ipv6hdr *outer_iph = ipv6_hdr(skb);
1833 const struct ipv6hdr *key_iph = outer_iph;
1834 struct flow_keys *_flkeys = flkeys;
1835 const struct ipv6hdr *inner_iph;
1836 const struct icmp6hdr *icmph;
1837 struct ipv6hdr _inner_iph;
1839 if (likely(outer_iph->nexthdr != IPPROTO_ICMPV6))
1842 icmph = icmp6_hdr(skb);
1843 if (icmph->icmp6_type != ICMPV6_DEST_UNREACH &&
1844 icmph->icmp6_type != ICMPV6_PKT_TOOBIG &&
1845 icmph->icmp6_type != ICMPV6_TIME_EXCEED &&
1846 icmph->icmp6_type != ICMPV6_PARAMPROB)
1849 inner_iph = skb_header_pointer(skb,
1850 skb_transport_offset(skb) + sizeof(*icmph),
1851 sizeof(_inner_iph), &_inner_iph);
1855 key_iph = inner_iph;
1859 keys->addrs.v6addrs.src = _flkeys->addrs.v6addrs.src;
1860 keys->addrs.v6addrs.dst = _flkeys->addrs.v6addrs.dst;
1861 keys->tags.flow_label = _flkeys->tags.flow_label;
1862 keys->basic.ip_proto = _flkeys->basic.ip_proto;
1864 keys->addrs.v6addrs.src = key_iph->saddr;
1865 keys->addrs.v6addrs.dst = key_iph->daddr;
1866 keys->tags.flow_label = ip6_flowinfo(key_iph);
1867 keys->basic.ip_proto = key_iph->nexthdr;
1871 /* if skb is set it will be used and fl6 can be NULL */
1872 u32 rt6_multipath_hash(const struct net *net, const struct flowi6 *fl6,
1873 const struct sk_buff *skb, struct flow_keys *flkeys)
1875 struct flow_keys hash_keys;
1878 switch (ip6_multipath_hash_policy(net)) {
1880 memset(&hash_keys, 0, sizeof(hash_keys));
1881 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1883 ip6_multipath_l3_keys(skb, &hash_keys, flkeys);
1885 hash_keys.addrs.v6addrs.src = fl6->saddr;
1886 hash_keys.addrs.v6addrs.dst = fl6->daddr;
1887 hash_keys.tags.flow_label = (__force u32)fl6->flowlabel;
1888 hash_keys.basic.ip_proto = fl6->flowi6_proto;
1893 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1894 struct flow_keys keys;
1896 /* short-circuit if we already have L4 hash present */
1898 return skb_get_hash_raw(skb) >> 1;
1900 memset(&hash_keys, 0, sizeof(hash_keys));
1903 skb_flow_dissect_flow_keys(skb, &keys, flag);
1906 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1907 hash_keys.addrs.v6addrs.src = flkeys->addrs.v6addrs.src;
1908 hash_keys.addrs.v6addrs.dst = flkeys->addrs.v6addrs.dst;
1909 hash_keys.ports.src = flkeys->ports.src;
1910 hash_keys.ports.dst = flkeys->ports.dst;
1911 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1913 memset(&hash_keys, 0, sizeof(hash_keys));
1914 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1915 hash_keys.addrs.v6addrs.src = fl6->saddr;
1916 hash_keys.addrs.v6addrs.dst = fl6->daddr;
1917 hash_keys.ports.src = fl6->fl6_sport;
1918 hash_keys.ports.dst = fl6->fl6_dport;
1919 hash_keys.basic.ip_proto = fl6->flowi6_proto;
1923 mhash = flow_hash_from_keys(&hash_keys);
1928 void ip6_route_input(struct sk_buff *skb)
1930 const struct ipv6hdr *iph = ipv6_hdr(skb);
1931 struct net *net = dev_net(skb->dev);
1932 int flags = RT6_LOOKUP_F_HAS_SADDR;
1933 struct ip_tunnel_info *tun_info;
1934 struct flowi6 fl6 = {
1935 .flowi6_iif = skb->dev->ifindex,
1936 .daddr = iph->daddr,
1937 .saddr = iph->saddr,
1938 .flowlabel = ip6_flowinfo(iph),
1939 .flowi6_mark = skb->mark,
1940 .flowi6_proto = iph->nexthdr,
1942 struct flow_keys *flkeys = NULL, _flkeys;
1944 tun_info = skb_tunnel_info(skb);
1945 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
1946 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1948 if (fib6_rules_early_flow_dissect(net, skb, &fl6, &_flkeys))
1951 if (unlikely(fl6.flowi6_proto == IPPROTO_ICMPV6))
1952 fl6.mp_hash = rt6_multipath_hash(net, &fl6, skb, flkeys);
1955 ip6_route_input_lookup(net, skb->dev, &fl6, skb, flags));
1958 static struct rt6_info *ip6_pol_route_output(struct net *net,
1959 struct fib6_table *table,
1961 const struct sk_buff *skb,
1964 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, skb, flags);
1967 struct dst_entry *ip6_route_output_flags(struct net *net, const struct sock *sk,
1968 struct flowi6 *fl6, int flags)
1972 if (rt6_need_strict(&fl6->daddr)) {
1973 struct dst_entry *dst;
1975 dst = l3mdev_link_scope_lookup(net, fl6);
1980 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1982 any_src = ipv6_addr_any(&fl6->saddr);
1983 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr) ||
1984 (fl6->flowi6_oif && any_src))
1985 flags |= RT6_LOOKUP_F_IFACE;
1988 flags |= RT6_LOOKUP_F_HAS_SADDR;
1990 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1992 return fib6_rule_lookup(net, fl6, NULL, flags, ip6_pol_route_output);
1994 EXPORT_SYMBOL_GPL(ip6_route_output_flags);
1996 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1998 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1999 struct net_device *loopback_dev = net->loopback_dev;
2000 struct dst_entry *new = NULL;
2002 rt = dst_alloc(&ip6_dst_blackhole_ops, loopback_dev, 1,
2003 DST_OBSOLETE_DEAD, 0);
2006 atomic_inc(&net->ipv6.rt6_stats->fib_rt_alloc);
2010 new->input = dst_discard;
2011 new->output = dst_discard_out;
2013 dst_copy_metrics(new, &ort->dst);
2015 rt->rt6i_idev = in6_dev_get(loopback_dev);
2016 rt->rt6i_gateway = ort->rt6i_gateway;
2017 rt->rt6i_flags = ort->rt6i_flags & ~RTF_PCPU;
2018 rt->rt6i_metric = 0;
2020 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
2021 #ifdef CONFIG_IPV6_SUBTREES
2022 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
2026 dst_release(dst_orig);
2027 return new ? new : ERR_PTR(-ENOMEM);
2031 * Destination cache support functions
2034 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
2037 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(&rt->from->dst))
2038 dst_init_metrics(&rt->dst, dst_metrics_ptr(&rt->from->dst), true);
2041 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
2045 if (!rt6_get_cookie_safe(rt, &rt_cookie) || rt_cookie != cookie)
2048 if (rt6_check_expired(rt))
2054 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
2056 if (!__rt6_check_expired(rt) &&
2057 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
2058 rt6_check(rt->from, cookie))
2064 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
2066 struct rt6_info *rt;
2068 rt = (struct rt6_info *) dst;
2070 /* All IPV6 dsts are created with ->obsolete set to the value
2071 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
2072 * into this function always.
2075 rt6_dst_from_metrics_check(rt);
2077 if (rt->rt6i_flags & RTF_PCPU ||
2078 (unlikely(!list_empty(&rt->rt6i_uncached)) && rt->from))
2079 return rt6_dst_from_check(rt, cookie);
2081 return rt6_check(rt, cookie);
2084 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
2086 struct rt6_info *rt = (struct rt6_info *) dst;
2089 if (rt->rt6i_flags & RTF_CACHE) {
2090 if (rt6_check_expired(rt)) {
2102 static void ip6_link_failure(struct sk_buff *skb)
2104 struct rt6_info *rt;
2106 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
2108 rt = (struct rt6_info *) skb_dst(skb);
2110 if (rt->rt6i_flags & RTF_CACHE) {
2111 if (dst_hold_safe(&rt->dst))
2114 struct fib6_node *fn;
2117 fn = rcu_dereference(rt->rt6i_node);
2118 if (fn && (rt->rt6i_flags & RTF_DEFAULT))
2125 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
2127 struct net *net = dev_net(rt->dst.dev);
2129 rt->rt6i_flags |= RTF_MODIFIED;
2130 rt->rt6i_pmtu = mtu;
2131 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
2134 static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
2136 return !(rt->rt6i_flags & RTF_CACHE) &&
2137 (rt->rt6i_flags & RTF_PCPU ||
2138 rcu_access_pointer(rt->rt6i_node));
2141 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
2142 const struct ipv6hdr *iph, u32 mtu)
2144 const struct in6_addr *daddr, *saddr;
2145 struct rt6_info *rt6 = (struct rt6_info *)dst;
2147 if (rt6->rt6i_flags & RTF_LOCAL)
2150 if (dst_metric_locked(dst, RTAX_MTU))
2154 daddr = &iph->daddr;
2155 saddr = &iph->saddr;
2157 daddr = &sk->sk_v6_daddr;
2158 saddr = &inet6_sk(sk)->saddr;
2163 dst_confirm_neigh(dst, daddr);
2164 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
2165 if (mtu >= dst_mtu(dst))
2168 if (!rt6_cache_allowed_for_pmtu(rt6)) {
2169 rt6_do_update_pmtu(rt6, mtu);
2170 /* update rt6_ex->stamp for cache */
2171 if (rt6->rt6i_flags & RTF_CACHE)
2172 rt6_update_exception_stamp_rt(rt6);
2174 struct rt6_info *nrt6;
2176 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
2178 rt6_do_update_pmtu(nrt6, mtu);
2179 if (rt6_insert_exception(nrt6, rt6))
2180 dst_release_immediate(&nrt6->dst);
2185 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
2186 struct sk_buff *skb, u32 mtu)
2188 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
2191 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
2192 int oif, u32 mark, kuid_t uid)
2194 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2195 struct dst_entry *dst;
2198 memset(&fl6, 0, sizeof(fl6));
2199 fl6.flowi6_oif = oif;
2200 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
2201 fl6.daddr = iph->daddr;
2202 fl6.saddr = iph->saddr;
2203 fl6.flowlabel = ip6_flowinfo(iph);
2204 fl6.flowi6_uid = uid;
2206 dst = ip6_route_output(net, NULL, &fl6);
2208 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
2211 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
2213 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
2215 struct dst_entry *dst;
2217 ip6_update_pmtu(skb, sock_net(sk), mtu,
2218 sk->sk_bound_dev_if, sk->sk_mark, sk->sk_uid);
2220 dst = __sk_dst_get(sk);
2221 if (!dst || !dst->obsolete ||
2222 dst->ops->check(dst, inet6_sk(sk)->dst_cookie))
2226 if (!sock_owned_by_user(sk) && !ipv6_addr_v4mapped(&sk->sk_v6_daddr))
2227 ip6_datagram_dst_update(sk, false);
2230 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
2232 void ip6_sk_dst_store_flow(struct sock *sk, struct dst_entry *dst,
2233 const struct flowi6 *fl6)
2235 #ifdef CONFIG_IPV6_SUBTREES
2236 struct ipv6_pinfo *np = inet6_sk(sk);
2239 ip6_dst_store(sk, dst,
2240 ipv6_addr_equal(&fl6->daddr, &sk->sk_v6_daddr) ?
2241 &sk->sk_v6_daddr : NULL,
2242 #ifdef CONFIG_IPV6_SUBTREES
2243 ipv6_addr_equal(&fl6->saddr, &np->saddr) ?
2249 /* Handle redirects */
2250 struct ip6rd_flowi {
2252 struct in6_addr gateway;
2255 static struct rt6_info *__ip6_route_redirect(struct net *net,
2256 struct fib6_table *table,
2258 const struct sk_buff *skb,
2261 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
2262 struct rt6_info *rt, *rt_cache;
2263 struct fib6_node *fn;
2265 /* Get the "current" route for this destination and
2266 * check if the redirect has come from appropriate router.
2268 * RFC 4861 specifies that redirects should only be
2269 * accepted if they come from the nexthop to the target.
2270 * Due to the way the routes are chosen, this notion
2271 * is a bit fuzzy and one might need to check all possible
2276 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
2278 for_each_fib6_node_rt_rcu(fn) {
2279 if (rt->rt6i_nh_flags & RTNH_F_DEAD)
2281 if (rt6_check_expired(rt))
2285 if (!(rt->rt6i_flags & RTF_GATEWAY))
2287 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
2289 /* rt_cache's gateway might be different from its 'parent'
2290 * in the case of an ip redirect.
2291 * So we keep searching in the exception table if the gateway
2294 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway)) {
2295 rt_cache = rt6_find_cached_rt(rt,
2299 ipv6_addr_equal(&rdfl->gateway,
2300 &rt_cache->rt6i_gateway)) {
2310 rt = net->ipv6.ip6_null_entry;
2311 else if (rt->dst.error) {
2312 rt = net->ipv6.ip6_null_entry;
2316 if (rt == net->ipv6.ip6_null_entry) {
2317 fn = fib6_backtrack(fn, &fl6->saddr);
2323 ip6_hold_safe(net, &rt, true);
2327 trace_fib6_table_lookup(net, rt, table, fl6);
2331 static struct dst_entry *ip6_route_redirect(struct net *net,
2332 const struct flowi6 *fl6,
2333 const struct sk_buff *skb,
2334 const struct in6_addr *gateway)
2336 int flags = RT6_LOOKUP_F_HAS_SADDR;
2337 struct ip6rd_flowi rdfl;
2340 rdfl.gateway = *gateway;
2342 return fib6_rule_lookup(net, &rdfl.fl6, skb,
2343 flags, __ip6_route_redirect);
2346 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark,
2349 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
2350 struct dst_entry *dst;
2353 memset(&fl6, 0, sizeof(fl6));
2354 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2355 fl6.flowi6_oif = oif;
2356 fl6.flowi6_mark = mark;
2357 fl6.daddr = iph->daddr;
2358 fl6.saddr = iph->saddr;
2359 fl6.flowlabel = ip6_flowinfo(iph);
2360 fl6.flowi6_uid = uid;
2362 dst = ip6_route_redirect(net, &fl6, skb, &ipv6_hdr(skb)->saddr);
2363 rt6_do_redirect(dst, NULL, skb);
2366 EXPORT_SYMBOL_GPL(ip6_redirect);
2368 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
2371 const struct ipv6hdr *iph = ipv6_hdr(skb);
2372 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
2373 struct dst_entry *dst;
2376 memset(&fl6, 0, sizeof(fl6));
2377 fl6.flowi6_iif = LOOPBACK_IFINDEX;
2378 fl6.flowi6_oif = oif;
2379 fl6.flowi6_mark = mark;
2380 fl6.daddr = msg->dest;
2381 fl6.saddr = iph->daddr;
2382 fl6.flowi6_uid = sock_net_uid(net, NULL);
2384 dst = ip6_route_redirect(net, &fl6, skb, &iph->saddr);
2385 rt6_do_redirect(dst, NULL, skb);
2389 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
2391 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark,
2394 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
2396 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
2398 struct net_device *dev = dst->dev;
2399 unsigned int mtu = dst_mtu(dst);
2400 struct net *net = dev_net(dev);
2402 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
2404 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
2405 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
2408 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
2409 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
2410 * IPV6_MAXPLEN is also valid and means: "any MSS,
2411 * rely only on pmtu discovery"
2413 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
2418 static unsigned int ip6_mtu(const struct dst_entry *dst)
2420 const struct rt6_info *rt = (const struct rt6_info *)dst;
2421 unsigned int mtu = rt->rt6i_pmtu;
2422 struct inet6_dev *idev;
2427 mtu = dst_metric_raw(dst, RTAX_MTU);
2434 idev = __in6_dev_get(dst->dev);
2436 mtu = idev->cnf.mtu6;
2440 mtu = min_t(unsigned int, mtu, IP6_MAX_MTU);
2442 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
2445 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
2448 struct dst_entry *dst;
2449 struct rt6_info *rt;
2450 struct inet6_dev *idev = in6_dev_get(dev);
2451 struct net *net = dev_net(dev);
2453 if (unlikely(!idev))
2454 return ERR_PTR(-ENODEV);
2456 rt = ip6_dst_alloc(net, dev, 0);
2457 if (unlikely(!rt)) {
2459 dst = ERR_PTR(-ENOMEM);
2463 rt->dst.flags |= DST_HOST;
2464 rt->dst.input = ip6_input;
2465 rt->dst.output = ip6_output;
2466 rt->rt6i_gateway = fl6->daddr;
2467 rt->rt6i_dst.addr = fl6->daddr;
2468 rt->rt6i_dst.plen = 128;
2469 rt->rt6i_idev = idev;
2470 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
2472 /* Add this dst into uncached_list so that rt6_disable_ip() can
2473 * do proper release of the net_device
2475 rt6_uncached_list_add(rt);
2476 atomic_inc(&net->ipv6.rt6_stats->fib_rt_uncache);
2478 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
2484 static int ip6_dst_gc(struct dst_ops *ops)
2486 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
2487 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
2488 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
2489 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
2490 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
2491 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
2494 entries = dst_entries_get_fast(ops);
2495 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
2496 entries <= rt_max_size)
2499 net->ipv6.ip6_rt_gc_expire++;
2500 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
2501 entries = dst_entries_get_slow(ops);
2502 if (entries < ops->gc_thresh)
2503 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
2505 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
2506 return entries > rt_max_size;
2509 static int ip6_convert_metrics(struct mx6_config *mxc,
2510 const struct fib6_config *cfg)
2512 struct net *net = cfg->fc_nlinfo.nl_net;
2513 bool ecn_ca = false;
2521 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
2525 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
2526 int type = nla_type(nla);
2531 if (unlikely(type > RTAX_MAX))
2534 if (type == RTAX_CC_ALGO) {
2535 char tmp[TCP_CA_NAME_MAX];
2537 nla_strlcpy(tmp, nla, sizeof(tmp));
2538 val = tcp_ca_get_key_by_name(net, tmp, &ecn_ca);
2539 if (val == TCP_CA_UNSPEC)
2542 val = nla_get_u32(nla);
2544 if (type == RTAX_HOPLIMIT && val > 255)
2546 if (type == RTAX_FEATURES && (val & ~RTAX_FEATURE_MASK))
2550 __set_bit(type - 1, mxc->mx_valid);
2554 __set_bit(RTAX_FEATURES - 1, mxc->mx_valid);
2555 mp[RTAX_FEATURES - 1] |= DST_FEATURE_ECN_CA;
2565 static struct rt6_info *ip6_nh_lookup_table(struct net *net,
2566 struct fib6_config *cfg,
2567 const struct in6_addr *gw_addr,
2568 u32 tbid, int flags)
2570 struct flowi6 fl6 = {
2571 .flowi6_oif = cfg->fc_ifindex,
2573 .saddr = cfg->fc_prefsrc,
2575 struct fib6_table *table;
2576 struct rt6_info *rt;
2578 table = fib6_get_table(net, tbid);
2582 if (!ipv6_addr_any(&cfg->fc_prefsrc))
2583 flags |= RT6_LOOKUP_F_HAS_SADDR;
2585 flags |= RT6_LOOKUP_F_IGNORE_LINKSTATE;
2586 rt = ip6_pol_route(net, table, cfg->fc_ifindex, &fl6, NULL, flags);
2588 /* if table lookup failed, fall back to full lookup */
2589 if (rt == net->ipv6.ip6_null_entry) {
2597 static int ip6_route_check_nh_onlink(struct net *net,
2598 struct fib6_config *cfg,
2599 const struct net_device *dev,
2600 struct netlink_ext_ack *extack)
2602 u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
2603 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2604 u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
2605 struct rt6_info *grt;
2609 grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
2611 if (!grt->dst.error &&
2612 (grt->rt6i_flags & flags || dev != grt->dst.dev)) {
2613 NL_SET_ERR_MSG(extack,
2614 "Nexthop has invalid gateway or device mismatch");
2624 static int ip6_route_check_nh(struct net *net,
2625 struct fib6_config *cfg,
2626 struct net_device **_dev,
2627 struct inet6_dev **idev)
2629 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2630 struct net_device *dev = _dev ? *_dev : NULL;
2631 struct rt6_info *grt = NULL;
2632 int err = -EHOSTUNREACH;
2634 if (cfg->fc_table) {
2635 int flags = RT6_LOOKUP_F_IFACE;
2637 grt = ip6_nh_lookup_table(net, cfg, gw_addr,
2638 cfg->fc_table, flags);
2640 if (grt->rt6i_flags & RTF_GATEWAY ||
2641 (dev && dev != grt->dst.dev)) {
2649 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, NULL, 1);
2655 if (dev != grt->dst.dev) {
2660 *_dev = dev = grt->dst.dev;
2661 *idev = grt->rt6i_idev;
2663 in6_dev_hold(grt->rt6i_idev);
2666 if (!(grt->rt6i_flags & RTF_GATEWAY))
2675 static int ip6_validate_gw(struct net *net, struct fib6_config *cfg,
2676 struct net_device **_dev, struct inet6_dev **idev,
2677 struct netlink_ext_ack *extack)
2679 const struct in6_addr *gw_addr = &cfg->fc_gateway;
2680 int gwa_type = ipv6_addr_type(gw_addr);
2681 bool skip_dev = gwa_type & IPV6_ADDR_LINKLOCAL ? false : true;
2682 const struct net_device *dev = *_dev;
2683 bool need_addr_check = !dev;
2686 /* if gw_addr is local we will fail to detect this in case
2687 * address is still TENTATIVE (DAD in progress). rt6_lookup()
2688 * will return already-added prefix route via interface that
2689 * prefix route was assigned to, which might be non-loopback.
2692 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2693 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2697 if (gwa_type != (IPV6_ADDR_LINKLOCAL | IPV6_ADDR_UNICAST)) {
2698 /* IPv6 strictly inhibits using not link-local
2699 * addresses as nexthop address.
2700 * Otherwise, router will not able to send redirects.
2701 * It is very good, but in some (rare!) circumstances
2702 * (SIT, PtP, NBMA NOARP links) it is handy to allow
2703 * some exceptions. --ANK
2704 * We allow IPv4-mapped nexthops to support RFC4798-type
2707 if (!(gwa_type & (IPV6_ADDR_UNICAST | IPV6_ADDR_MAPPED))) {
2708 NL_SET_ERR_MSG(extack, "Invalid gateway address");
2712 if (cfg->fc_flags & RTNH_F_ONLINK)
2713 err = ip6_route_check_nh_onlink(net, cfg, dev, extack);
2715 err = ip6_route_check_nh(net, cfg, _dev, idev);
2721 /* reload in case device was changed */
2726 NL_SET_ERR_MSG(extack, "Egress device not specified");
2728 } else if (dev->flags & IFF_LOOPBACK) {
2729 NL_SET_ERR_MSG(extack,
2730 "Egress device can not be loopback device for this route");
2734 /* if we did not check gw_addr above, do so now that the
2735 * egress device has been resolved.
2737 if (need_addr_check &&
2738 ipv6_chk_addr_and_flags(net, gw_addr, dev, skip_dev, 0, 0)) {
2739 NL_SET_ERR_MSG(extack, "Gateway can not be a local address");
2748 static struct rt6_info *ip6_route_info_create(struct fib6_config *cfg,
2749 struct netlink_ext_ack *extack)
2751 struct net *net = cfg->fc_nlinfo.nl_net;
2752 struct rt6_info *rt = NULL;
2753 struct net_device *dev = NULL;
2754 struct inet6_dev *idev = NULL;
2755 struct fib6_table *table;
2759 /* RTF_PCPU is an internal flag; can not be set by userspace */
2760 if (cfg->fc_flags & RTF_PCPU) {
2761 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_PCPU");
2765 /* RTF_CACHE is an internal flag; can not be set by userspace */
2766 if (cfg->fc_flags & RTF_CACHE) {
2767 NL_SET_ERR_MSG(extack, "Userspace can not set RTF_CACHE");
2771 if (cfg->fc_dst_len > 128) {
2772 NL_SET_ERR_MSG(extack, "Invalid prefix length");
2775 if (cfg->fc_src_len > 128) {
2776 NL_SET_ERR_MSG(extack, "Invalid source address length");
2779 #ifndef CONFIG_IPV6_SUBTREES
2780 if (cfg->fc_src_len) {
2781 NL_SET_ERR_MSG(extack,
2782 "Specifying source address requires IPV6_SUBTREES to be enabled");
2786 if (cfg->fc_ifindex) {
2788 dev = dev_get_by_index(net, cfg->fc_ifindex);
2791 idev = in6_dev_get(dev);
2796 if (cfg->fc_metric == 0)
2797 cfg->fc_metric = IP6_RT_PRIO_USER;
2799 if (cfg->fc_flags & RTNH_F_ONLINK) {
2801 NL_SET_ERR_MSG(extack,
2802 "Nexthop device required for onlink");
2807 if (!(dev->flags & IFF_UP)) {
2808 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2815 if (cfg->fc_nlinfo.nlh &&
2816 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
2817 table = fib6_get_table(net, cfg->fc_table);
2819 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
2820 table = fib6_new_table(net, cfg->fc_table);
2823 table = fib6_new_table(net, cfg->fc_table);
2829 rt = ip6_dst_alloc(net, NULL,
2830 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
2837 if (cfg->fc_flags & RTF_EXPIRES)
2838 rt6_set_expires(rt, jiffies +
2839 clock_t_to_jiffies(cfg->fc_expires));
2841 rt6_clean_expires(rt);
2843 if (cfg->fc_protocol == RTPROT_UNSPEC)
2844 cfg->fc_protocol = RTPROT_BOOT;
2845 rt->rt6i_protocol = cfg->fc_protocol;
2847 addr_type = ipv6_addr_type(&cfg->fc_dst);
2849 if (addr_type & IPV6_ADDR_MULTICAST)
2850 rt->dst.input = ip6_mc_input;
2851 else if (cfg->fc_flags & RTF_LOCAL)
2852 rt->dst.input = ip6_input;
2854 rt->dst.input = ip6_forward;
2856 rt->dst.output = ip6_output;
2858 if (cfg->fc_encap) {
2859 struct lwtunnel_state *lwtstate;
2861 err = lwtunnel_build_state(cfg->fc_encap_type,
2862 cfg->fc_encap, AF_INET6, cfg,
2866 rt->dst.lwtstate = lwtstate_get(lwtstate);
2867 lwtunnel_set_redirect(&rt->dst);
2870 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
2871 rt->rt6i_dst.plen = cfg->fc_dst_len;
2872 if (rt->rt6i_dst.plen == 128)
2873 rt->dst.flags |= DST_HOST;
2875 #ifdef CONFIG_IPV6_SUBTREES
2876 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
2877 rt->rt6i_src.plen = cfg->fc_src_len;
2880 rt->rt6i_metric = cfg->fc_metric;
2881 rt->rt6i_nh_weight = 1;
2883 /* We cannot add true routes via loopback here,
2884 they would result in kernel looping; promote them to reject routes
2886 if ((cfg->fc_flags & RTF_REJECT) ||
2887 (dev && (dev->flags & IFF_LOOPBACK) &&
2888 !(addr_type & IPV6_ADDR_LOOPBACK) &&
2889 !(cfg->fc_flags & RTF_LOCAL))) {
2890 /* hold loopback dev/idev if we haven't done so. */
2891 if (dev != net->loopback_dev) {
2896 dev = net->loopback_dev;
2898 idev = in6_dev_get(dev);
2904 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
2905 switch (cfg->fc_type) {
2907 rt->dst.error = -EINVAL;
2908 rt->dst.output = dst_discard_out;
2909 rt->dst.input = dst_discard;
2912 rt->dst.error = -EACCES;
2913 rt->dst.output = ip6_pkt_prohibit_out;
2914 rt->dst.input = ip6_pkt_prohibit;
2917 case RTN_UNREACHABLE:
2919 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
2920 : (cfg->fc_type == RTN_UNREACHABLE)
2921 ? -EHOSTUNREACH : -ENETUNREACH;
2922 rt->dst.output = ip6_pkt_discard_out;
2923 rt->dst.input = ip6_pkt_discard;
2929 if (cfg->fc_flags & RTF_GATEWAY) {
2930 err = ip6_validate_gw(net, cfg, &dev, &idev, extack);
2934 rt->rt6i_gateway = cfg->fc_gateway;
2941 if (idev->cnf.disable_ipv6) {
2942 NL_SET_ERR_MSG(extack, "IPv6 is disabled on nexthop device");
2947 if (!(dev->flags & IFF_UP)) {
2948 NL_SET_ERR_MSG(extack, "Nexthop device is not up");
2953 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
2954 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
2955 NL_SET_ERR_MSG(extack, "Invalid source address");
2959 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
2960 rt->rt6i_prefsrc.plen = 128;
2962 rt->rt6i_prefsrc.plen = 0;
2964 rt->rt6i_flags = cfg->fc_flags;
2967 if (!(rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST)) &&
2968 !netif_carrier_ok(dev))
2969 rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
2970 rt->rt6i_nh_flags |= (cfg->fc_flags & RTNH_F_ONLINK);
2972 rt->rt6i_idev = idev;
2973 rt->rt6i_table = table;
2975 cfg->fc_nlinfo.nl_net = dev_net(dev);
2984 dst_release_immediate(&rt->dst);
2986 return ERR_PTR(err);
2989 int ip6_route_add(struct fib6_config *cfg,
2990 struct netlink_ext_ack *extack)
2992 struct mx6_config mxc = { .mx = NULL, };
2993 struct rt6_info *rt;
2996 rt = ip6_route_info_create(cfg, extack);
3003 err = ip6_convert_metrics(&mxc, cfg);
3007 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc, extack);
3014 dst_release_immediate(&rt->dst);
3019 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
3022 struct fib6_table *table;
3023 struct net *net = dev_net(rt->dst.dev);
3025 if (rt == net->ipv6.ip6_null_entry) {
3030 table = rt->rt6i_table;
3031 spin_lock_bh(&table->tb6_lock);
3032 err = fib6_del(rt, info);
3033 spin_unlock_bh(&table->tb6_lock);
3040 int ip6_del_rt(struct rt6_info *rt)
3042 struct nl_info info = {
3043 .nl_net = dev_net(rt->dst.dev),
3045 return __ip6_del_rt(rt, &info);
3048 static int __ip6_del_rt_siblings(struct rt6_info *rt, struct fib6_config *cfg)
3050 struct nl_info *info = &cfg->fc_nlinfo;
3051 struct net *net = info->nl_net;
3052 struct sk_buff *skb = NULL;
3053 struct fib6_table *table;
3056 if (rt == net->ipv6.ip6_null_entry)
3058 table = rt->rt6i_table;
3059 spin_lock_bh(&table->tb6_lock);
3061 if (rt->rt6i_nsiblings && cfg->fc_delete_all_nh) {
3062 struct rt6_info *sibling, *next_sibling;
3064 /* prefer to send a single notification with all hops */
3065 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3067 u32 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3069 if (rt6_fill_node(net, skb, rt,
3070 NULL, NULL, 0, RTM_DELROUTE,
3071 info->portid, seq, 0) < 0) {
3075 info->skip_notify = 1;
3078 list_for_each_entry_safe(sibling, next_sibling,
3081 err = fib6_del(sibling, info);
3087 err = fib6_del(rt, info);
3089 spin_unlock_bh(&table->tb6_lock);
3094 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3095 info->nlh, gfp_any());
3100 static int ip6_route_del(struct fib6_config *cfg,
3101 struct netlink_ext_ack *extack)
3103 struct rt6_info *rt, *rt_cache;
3104 struct fib6_table *table;
3105 struct fib6_node *fn;
3108 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
3110 NL_SET_ERR_MSG(extack, "FIB table does not exist");
3116 fn = fib6_locate(&table->tb6_root,
3117 &cfg->fc_dst, cfg->fc_dst_len,
3118 &cfg->fc_src, cfg->fc_src_len,
3119 !(cfg->fc_flags & RTF_CACHE));
3122 for_each_fib6_node_rt_rcu(fn) {
3123 if (cfg->fc_flags & RTF_CACHE) {
3124 rt_cache = rt6_find_cached_rt(rt, &cfg->fc_dst,
3130 if (cfg->fc_ifindex &&
3132 rt->dst.dev->ifindex != cfg->fc_ifindex))
3134 if (cfg->fc_flags & RTF_GATEWAY &&
3135 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
3137 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
3139 if (cfg->fc_protocol && cfg->fc_protocol != rt->rt6i_protocol)
3141 if (!dst_hold_safe(&rt->dst))
3145 /* if gateway was specified only delete the one hop */
3146 if (cfg->fc_flags & RTF_GATEWAY)
3147 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
3149 return __ip6_del_rt_siblings(rt, cfg);
3157 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
3159 struct netevent_redirect netevent;
3160 struct rt6_info *rt, *nrt = NULL;
3161 struct ndisc_options ndopts;
3162 struct inet6_dev *in6_dev;
3163 struct neighbour *neigh;
3165 int optlen, on_link;
3168 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
3169 optlen -= sizeof(*msg);
3172 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
3176 msg = (struct rd_msg *)icmp6_hdr(skb);
3178 if (ipv6_addr_is_multicast(&msg->dest)) {
3179 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
3184 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
3186 } else if (ipv6_addr_type(&msg->target) !=
3187 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
3188 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
3192 in6_dev = __in6_dev_get(skb->dev);
3195 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
3199 * The IP source address of the Redirect MUST be the same as the current
3200 * first-hop router for the specified ICMP Destination Address.
3203 if (!ndisc_parse_options(skb->dev, msg->opt, optlen, &ndopts)) {
3204 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
3209 if (ndopts.nd_opts_tgt_lladdr) {
3210 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
3213 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
3218 rt = (struct rt6_info *) dst;
3219 if (rt->rt6i_flags & RTF_REJECT) {
3220 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
3224 /* Redirect received -> path was valid.
3225 * Look, redirects are sent only in response to data packets,
3226 * so that this nexthop apparently is reachable. --ANK
3228 dst_confirm_neigh(&rt->dst, &ipv6_hdr(skb)->saddr);
3230 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
3235 * We have finally decided to accept it.
3238 ndisc_update(skb->dev, neigh, lladdr, NUD_STALE,
3239 NEIGH_UPDATE_F_WEAK_OVERRIDE|
3240 NEIGH_UPDATE_F_OVERRIDE|
3241 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
3242 NEIGH_UPDATE_F_ISROUTER)),
3243 NDISC_REDIRECT, &ndopts);
3245 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
3249 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
3251 nrt->rt6i_flags &= ~RTF_GATEWAY;
3253 nrt->rt6i_protocol = RTPROT_REDIRECT;
3254 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
3256 /* No need to remove rt from the exception table if rt is
3257 * a cached route because rt6_insert_exception() will
3260 if (rt6_insert_exception(nrt, rt)) {
3261 dst_release_immediate(&nrt->dst);
3265 netevent.old = &rt->dst;
3266 netevent.new = &nrt->dst;
3267 netevent.daddr = &msg->dest;
3268 netevent.neigh = neigh;
3269 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
3272 neigh_release(neigh);
3276 * Misc support functions
3279 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
3283 rt->rt6i_flags &= ~RTF_EXPIRES;
3284 dst_hold(&from->dst);
3286 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
3289 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
3291 rt->dst.input = ort->dst.input;
3292 rt->dst.output = ort->dst.output;
3293 rt->rt6i_dst = ort->rt6i_dst;
3294 rt->dst.error = ort->dst.error;
3295 rt->rt6i_idev = ort->rt6i_idev;
3297 in6_dev_hold(rt->rt6i_idev);
3298 rt->dst.lastuse = jiffies;
3299 rt->rt6i_gateway = ort->rt6i_gateway;
3300 rt->rt6i_flags = ort->rt6i_flags;
3301 rt6_set_from(rt, ort);
3302 rt->rt6i_metric = ort->rt6i_metric;
3303 #ifdef CONFIG_IPV6_SUBTREES
3304 rt->rt6i_src = ort->rt6i_src;
3306 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
3307 rt->rt6i_table = ort->rt6i_table;
3308 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
3311 #ifdef CONFIG_IPV6_ROUTE_INFO
3312 static struct rt6_info *rt6_get_route_info(struct net *net,
3313 const struct in6_addr *prefix, int prefixlen,
3314 const struct in6_addr *gwaddr,
3315 struct net_device *dev)
3317 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO;
3318 int ifindex = dev->ifindex;
3319 struct fib6_node *fn;
3320 struct rt6_info *rt = NULL;
3321 struct fib6_table *table;
3323 table = fib6_get_table(net, tb_id);
3328 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0, true);
3332 for_each_fib6_node_rt_rcu(fn) {
3333 if (rt->dst.dev->ifindex != ifindex)
3335 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
3337 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
3339 ip6_hold_safe(NULL, &rt, false);
3347 static struct rt6_info *rt6_add_route_info(struct net *net,
3348 const struct in6_addr *prefix, int prefixlen,
3349 const struct in6_addr *gwaddr,
3350 struct net_device *dev,
3353 struct fib6_config cfg = {
3354 .fc_metric = IP6_RT_PRIO_USER,
3355 .fc_ifindex = dev->ifindex,
3356 .fc_dst_len = prefixlen,
3357 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
3358 RTF_UP | RTF_PREF(pref),
3359 .fc_protocol = RTPROT_RA,
3360 .fc_nlinfo.portid = 0,
3361 .fc_nlinfo.nlh = NULL,
3362 .fc_nlinfo.nl_net = net,
3365 cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO,
3366 cfg.fc_dst = *prefix;
3367 cfg.fc_gateway = *gwaddr;
3369 /* We should treat it as a default route if prefix length is 0. */
3371 cfg.fc_flags |= RTF_DEFAULT;
3373 ip6_route_add(&cfg, NULL);
3375 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, dev);
3379 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
3381 u32 tb_id = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT;
3382 struct rt6_info *rt;
3383 struct fib6_table *table;
3385 table = fib6_get_table(dev_net(dev), tb_id);
3390 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3391 if (dev == rt->dst.dev &&
3392 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
3393 ipv6_addr_equal(&rt->rt6i_gateway, addr))
3397 ip6_hold_safe(NULL, &rt, false);
3402 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
3403 struct net_device *dev,
3406 struct fib6_config cfg = {
3407 .fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_DFLT,
3408 .fc_metric = IP6_RT_PRIO_USER,
3409 .fc_ifindex = dev->ifindex,
3410 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
3411 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
3412 .fc_protocol = RTPROT_RA,
3413 .fc_nlinfo.portid = 0,
3414 .fc_nlinfo.nlh = NULL,
3415 .fc_nlinfo.nl_net = dev_net(dev),
3418 cfg.fc_gateway = *gwaddr;
3420 if (!ip6_route_add(&cfg, NULL)) {
3421 struct fib6_table *table;
3423 table = fib6_get_table(dev_net(dev), cfg.fc_table);
3425 table->flags |= RT6_TABLE_HAS_DFLT_ROUTER;
3428 return rt6_get_dflt_router(gwaddr, dev);
3431 static void __rt6_purge_dflt_routers(struct fib6_table *table)
3433 struct rt6_info *rt;
3437 for_each_fib6_node_rt_rcu(&table->tb6_root) {
3438 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
3439 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
3440 if (dst_hold_safe(&rt->dst)) {
3451 table->flags &= ~RT6_TABLE_HAS_DFLT_ROUTER;
3454 void rt6_purge_dflt_routers(struct net *net)
3456 struct fib6_table *table;
3457 struct hlist_head *head;
3462 for (h = 0; h < FIB6_TABLE_HASHSZ; h++) {
3463 head = &net->ipv6.fib_table_hash[h];
3464 hlist_for_each_entry_rcu(table, head, tb6_hlist) {
3465 if (table->flags & RT6_TABLE_HAS_DFLT_ROUTER)
3466 __rt6_purge_dflt_routers(table);
3473 static void rtmsg_to_fib6_config(struct net *net,
3474 struct in6_rtmsg *rtmsg,
3475 struct fib6_config *cfg)
3477 memset(cfg, 0, sizeof(*cfg));
3479 cfg->fc_table = l3mdev_fib_table_by_index(net, rtmsg->rtmsg_ifindex) ?
3481 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
3482 cfg->fc_metric = rtmsg->rtmsg_metric;
3483 cfg->fc_expires = rtmsg->rtmsg_info;
3484 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
3485 cfg->fc_src_len = rtmsg->rtmsg_src_len;
3486 cfg->fc_flags = rtmsg->rtmsg_flags;
3488 cfg->fc_nlinfo.nl_net = net;
3490 cfg->fc_dst = rtmsg->rtmsg_dst;
3491 cfg->fc_src = rtmsg->rtmsg_src;
3492 cfg->fc_gateway = rtmsg->rtmsg_gateway;
3495 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
3497 struct fib6_config cfg;
3498 struct in6_rtmsg rtmsg;
3502 case SIOCADDRT: /* Add a route */
3503 case SIOCDELRT: /* Delete a route */
3504 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
3506 err = copy_from_user(&rtmsg, arg,
3507 sizeof(struct in6_rtmsg));
3511 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
3516 err = ip6_route_add(&cfg, NULL);
3519 err = ip6_route_del(&cfg, NULL);
3533 * Drop the packet on the floor
3536 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
3539 struct dst_entry *dst = skb_dst(skb);
3540 switch (ipstats_mib_noroutes) {
3541 case IPSTATS_MIB_INNOROUTES:
3542 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
3543 if (type == IPV6_ADDR_ANY) {
3544 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3545 IPSTATS_MIB_INADDRERRORS);
3549 case IPSTATS_MIB_OUTNOROUTES:
3550 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
3551 ipstats_mib_noroutes);
3554 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
3559 static int ip6_pkt_discard(struct sk_buff *skb)
3561 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
3564 static int ip6_pkt_discard_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3566 skb->dev = skb_dst(skb)->dev;
3567 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
3570 static int ip6_pkt_prohibit(struct sk_buff *skb)
3572 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
3575 static int ip6_pkt_prohibit_out(struct net *net, struct sock *sk, struct sk_buff *skb)
3577 skb->dev = skb_dst(skb)->dev;
3578 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
3582 * Allocate a dst for local (unicast / anycast) address.
3585 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
3586 const struct in6_addr *addr,
3590 struct net *net = dev_net(idev->dev);
3591 struct net_device *dev = idev->dev;
3592 struct rt6_info *rt;
3594 rt = ip6_dst_alloc(net, dev, DST_NOCOUNT);
3596 return ERR_PTR(-ENOMEM);
3600 rt->dst.flags |= DST_HOST;
3601 rt->dst.input = ip6_input;
3602 rt->dst.output = ip6_output;
3603 rt->rt6i_idev = idev;
3605 rt->rt6i_protocol = RTPROT_KERNEL;
3606 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
3608 rt->rt6i_flags |= RTF_ANYCAST;
3610 rt->rt6i_flags |= RTF_LOCAL;
3612 rt->rt6i_gateway = *addr;
3613 rt->rt6i_dst.addr = *addr;
3614 rt->rt6i_dst.plen = 128;
3615 tb_id = l3mdev_fib_table(idev->dev) ? : RT6_TABLE_LOCAL;
3616 rt->rt6i_table = fib6_get_table(net, tb_id);
3621 /* remove deleted ip from prefsrc entries */
3622 struct arg_dev_net_ip {
3623 struct net_device *dev;
3625 struct in6_addr *addr;
3628 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
3630 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
3631 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
3632 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
3634 if (((void *)rt->dst.dev == dev || !dev) &&
3635 rt != net->ipv6.ip6_null_entry &&
3636 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
3637 spin_lock_bh(&rt6_exception_lock);
3638 /* remove prefsrc entry */
3639 rt->rt6i_prefsrc.plen = 0;
3640 /* need to update cache as well */
3641 rt6_exceptions_remove_prefsrc(rt);
3642 spin_unlock_bh(&rt6_exception_lock);
3647 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
3649 struct net *net = dev_net(ifp->idev->dev);
3650 struct arg_dev_net_ip adni = {
3651 .dev = ifp->idev->dev,
3655 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
3658 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
3660 /* Remove routers and update dst entries when gateway turn into host. */
3661 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
3663 struct in6_addr *gateway = (struct in6_addr *)arg;
3665 if (((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) &&
3666 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
3670 /* Further clean up cached routes in exception table.
3671 * This is needed because cached route may have a different
3672 * gateway than its 'parent' in the case of an ip redirect.
3674 rt6_exceptions_clean_tohost(rt, gateway);
3679 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
3681 fib6_clean_all(net, fib6_clean_tohost, gateway);
3684 struct arg_netdev_event {
3685 const struct net_device *dev;
3687 unsigned int nh_flags;
3688 unsigned long event;
3692 static struct rt6_info *rt6_multipath_first_sibling(const struct rt6_info *rt)
3694 struct rt6_info *iter;
3695 struct fib6_node *fn;
3697 fn = rcu_dereference_protected(rt->rt6i_node,
3698 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3699 iter = rcu_dereference_protected(fn->leaf,
3700 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3702 if (iter->rt6i_metric == rt->rt6i_metric &&
3703 rt6_qualify_for_ecmp(iter))
3705 iter = rcu_dereference_protected(iter->rt6_next,
3706 lockdep_is_held(&rt->rt6i_table->tb6_lock));
3712 static bool rt6_is_dead(const struct rt6_info *rt)
3714 if (rt->rt6i_nh_flags & RTNH_F_DEAD ||
3715 (rt->rt6i_nh_flags & RTNH_F_LINKDOWN &&
3716 rt->rt6i_idev->cnf.ignore_routes_with_linkdown))
3722 static int rt6_multipath_total_weight(const struct rt6_info *rt)
3724 struct rt6_info *iter;
3727 if (!rt6_is_dead(rt))
3728 total += rt->rt6i_nh_weight;
3730 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings) {
3731 if (!rt6_is_dead(iter))
3732 total += iter->rt6i_nh_weight;
3738 static void rt6_upper_bound_set(struct rt6_info *rt, int *weight, int total)
3740 int upper_bound = -1;
3742 if (!rt6_is_dead(rt)) {
3743 *weight += rt->rt6i_nh_weight;
3744 upper_bound = DIV_ROUND_CLOSEST_ULL((u64) (*weight) << 31,
3747 atomic_set(&rt->rt6i_nh_upper_bound, upper_bound);
3750 static void rt6_multipath_upper_bound_set(struct rt6_info *rt, int total)
3752 struct rt6_info *iter;
3755 rt6_upper_bound_set(rt, &weight, total);
3757 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3758 rt6_upper_bound_set(iter, &weight, total);
3761 void rt6_multipath_rebalance(struct rt6_info *rt)
3763 struct rt6_info *first;
3766 /* In case the entire multipath route was marked for flushing,
3767 * then there is no need to rebalance upon the removal of every
3770 if (!rt->rt6i_nsiblings || rt->should_flush)
3773 /* During lookup routes are evaluated in order, so we need to
3774 * make sure upper bounds are assigned from the first sibling
3777 first = rt6_multipath_first_sibling(rt);
3778 if (WARN_ON_ONCE(!first))
3781 total = rt6_multipath_total_weight(first);
3782 rt6_multipath_upper_bound_set(first, total);
3785 static int fib6_ifup(struct rt6_info *rt, void *p_arg)
3787 const struct arg_netdev_event *arg = p_arg;
3788 const struct net *net = dev_net(arg->dev);
3790 if (rt != net->ipv6.ip6_null_entry && rt->dst.dev == arg->dev) {
3791 rt->rt6i_nh_flags &= ~arg->nh_flags;
3792 fib6_update_sernum_upto_root(dev_net(rt->dst.dev), rt);
3793 rt6_multipath_rebalance(rt);
3799 void rt6_sync_up(struct net_device *dev, unsigned int nh_flags)
3801 struct arg_netdev_event arg = {
3804 .nh_flags = nh_flags,
3808 if (nh_flags & RTNH_F_DEAD && netif_carrier_ok(dev))
3809 arg.nh_flags |= RTNH_F_LINKDOWN;
3811 fib6_clean_all(dev_net(dev), fib6_ifup, &arg);
3814 static bool rt6_multipath_uses_dev(const struct rt6_info *rt,
3815 const struct net_device *dev)
3817 struct rt6_info *iter;
3819 if (rt->dst.dev == dev)
3821 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3822 if (iter->dst.dev == dev)
3828 static void rt6_multipath_flush(struct rt6_info *rt)
3830 struct rt6_info *iter;
3832 rt->should_flush = 1;
3833 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3834 iter->should_flush = 1;
3837 static unsigned int rt6_multipath_dead_count(const struct rt6_info *rt,
3838 const struct net_device *down_dev)
3840 struct rt6_info *iter;
3841 unsigned int dead = 0;
3843 if (rt->dst.dev == down_dev || rt->rt6i_nh_flags & RTNH_F_DEAD)
3845 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3846 if (iter->dst.dev == down_dev ||
3847 iter->rt6i_nh_flags & RTNH_F_DEAD)
3853 static void rt6_multipath_nh_flags_set(struct rt6_info *rt,
3854 const struct net_device *dev,
3855 unsigned int nh_flags)
3857 struct rt6_info *iter;
3859 if (rt->dst.dev == dev)
3860 rt->rt6i_nh_flags |= nh_flags;
3861 list_for_each_entry(iter, &rt->rt6i_siblings, rt6i_siblings)
3862 if (iter->dst.dev == dev)
3863 iter->rt6i_nh_flags |= nh_flags;
3866 /* called with write lock held for table with rt */
3867 static int fib6_ifdown(struct rt6_info *rt, void *p_arg)
3869 const struct arg_netdev_event *arg = p_arg;
3870 const struct net_device *dev = arg->dev;
3871 const struct net *net = dev_net(dev);
3873 if (rt == net->ipv6.ip6_null_entry)
3876 switch (arg->event) {
3877 case NETDEV_UNREGISTER:
3878 return rt->dst.dev == dev ? -1 : 0;
3880 if (rt->should_flush)
3882 if (!rt->rt6i_nsiblings)
3883 return rt->dst.dev == dev ? -1 : 0;
3884 if (rt6_multipath_uses_dev(rt, dev)) {
3887 count = rt6_multipath_dead_count(rt, dev);
3888 if (rt->rt6i_nsiblings + 1 == count) {
3889 rt6_multipath_flush(rt);
3892 rt6_multipath_nh_flags_set(rt, dev, RTNH_F_DEAD |
3894 fib6_update_sernum(rt);
3895 rt6_multipath_rebalance(rt);
3899 if (rt->dst.dev != dev ||
3900 rt->rt6i_flags & (RTF_LOCAL | RTF_ANYCAST))
3902 rt->rt6i_nh_flags |= RTNH_F_LINKDOWN;
3903 rt6_multipath_rebalance(rt);
3910 void rt6_sync_down_dev(struct net_device *dev, unsigned long event)
3912 struct arg_netdev_event arg = {
3919 fib6_clean_all(dev_net(dev), fib6_ifdown, &arg);
3922 void rt6_disable_ip(struct net_device *dev, unsigned long event)
3924 rt6_sync_down_dev(dev, event);
3925 rt6_uncached_list_flush_dev(dev_net(dev), dev);
3926 neigh_ifdown(&nd_tbl, dev);
3929 struct rt6_mtu_change_arg {
3930 struct net_device *dev;
3934 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
3936 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
3937 struct inet6_dev *idev;
3939 /* In IPv6 pmtu discovery is not optional,
3940 so that RTAX_MTU lock cannot disable it.
3941 We still use this lock to block changes
3942 caused by addrconf/ndisc.
3945 idev = __in6_dev_get(arg->dev);
3949 /* For administrative MTU increase, there is no way to discover
3950 IPv6 PMTU increase, so PMTU increase should be updated here.
3951 Since RFC 1981 doesn't include administrative MTU increase
3952 update PMTU increase is a MUST. (i.e. jumbo frame)
3954 if (rt->dst.dev == arg->dev &&
3955 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
3956 spin_lock_bh(&rt6_exception_lock);
3957 if (dst_metric_raw(&rt->dst, RTAX_MTU) &&
3958 rt6_mtu_change_route_allowed(idev, rt, arg->mtu))
3959 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
3960 rt6_exceptions_update_pmtu(idev, rt, arg->mtu);
3961 spin_unlock_bh(&rt6_exception_lock);
3966 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
3968 struct rt6_mtu_change_arg arg = {
3973 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
3976 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
3977 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
3978 [RTA_OIF] = { .type = NLA_U32 },
3979 [RTA_IIF] = { .type = NLA_U32 },
3980 [RTA_PRIORITY] = { .type = NLA_U32 },
3981 [RTA_METRICS] = { .type = NLA_NESTED },
3982 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
3983 [RTA_PREF] = { .type = NLA_U8 },
3984 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
3985 [RTA_ENCAP] = { .type = NLA_NESTED },
3986 [RTA_EXPIRES] = { .type = NLA_U32 },
3987 [RTA_UID] = { .type = NLA_U32 },
3988 [RTA_MARK] = { .type = NLA_U32 },
3991 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
3992 struct fib6_config *cfg,
3993 struct netlink_ext_ack *extack)
3996 struct nlattr *tb[RTA_MAX+1];
4000 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4006 rtm = nlmsg_data(nlh);
4007 memset(cfg, 0, sizeof(*cfg));
4009 cfg->fc_table = rtm->rtm_table;
4010 cfg->fc_dst_len = rtm->rtm_dst_len;
4011 cfg->fc_src_len = rtm->rtm_src_len;
4012 cfg->fc_flags = RTF_UP;
4013 cfg->fc_protocol = rtm->rtm_protocol;
4014 cfg->fc_type = rtm->rtm_type;
4016 if (rtm->rtm_type == RTN_UNREACHABLE ||
4017 rtm->rtm_type == RTN_BLACKHOLE ||
4018 rtm->rtm_type == RTN_PROHIBIT ||
4019 rtm->rtm_type == RTN_THROW)
4020 cfg->fc_flags |= RTF_REJECT;
4022 if (rtm->rtm_type == RTN_LOCAL)
4023 cfg->fc_flags |= RTF_LOCAL;
4025 if (rtm->rtm_flags & RTM_F_CLONED)
4026 cfg->fc_flags |= RTF_CACHE;
4028 cfg->fc_flags |= (rtm->rtm_flags & RTNH_F_ONLINK);
4030 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
4031 cfg->fc_nlinfo.nlh = nlh;
4032 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
4034 if (tb[RTA_GATEWAY]) {
4035 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
4036 cfg->fc_flags |= RTF_GATEWAY;
4040 int plen = (rtm->rtm_dst_len + 7) >> 3;
4042 if (nla_len(tb[RTA_DST]) < plen)
4045 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
4049 int plen = (rtm->rtm_src_len + 7) >> 3;
4051 if (nla_len(tb[RTA_SRC]) < plen)
4054 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
4057 if (tb[RTA_PREFSRC])
4058 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
4061 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
4063 if (tb[RTA_PRIORITY])
4064 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
4066 if (tb[RTA_METRICS]) {
4067 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
4068 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
4072 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
4074 if (tb[RTA_MULTIPATH]) {
4075 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
4076 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
4078 err = lwtunnel_valid_encap_type_attr(cfg->fc_mp,
4079 cfg->fc_mp_len, extack);
4085 pref = nla_get_u8(tb[RTA_PREF]);
4086 if (pref != ICMPV6_ROUTER_PREF_LOW &&
4087 pref != ICMPV6_ROUTER_PREF_HIGH)
4088 pref = ICMPV6_ROUTER_PREF_MEDIUM;
4089 cfg->fc_flags |= RTF_PREF(pref);
4093 cfg->fc_encap = tb[RTA_ENCAP];
4095 if (tb[RTA_ENCAP_TYPE]) {
4096 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
4098 err = lwtunnel_valid_encap_type(cfg->fc_encap_type, extack);
4103 if (tb[RTA_EXPIRES]) {
4104 unsigned long timeout = addrconf_timeout_fixup(nla_get_u32(tb[RTA_EXPIRES]), HZ);
4106 if (addrconf_finite_timeout(timeout)) {
4107 cfg->fc_expires = jiffies_to_clock_t(timeout * HZ);
4108 cfg->fc_flags |= RTF_EXPIRES;
4118 struct rt6_info *rt6_info;
4119 struct fib6_config r_cfg;
4120 struct mx6_config mxc;
4121 struct list_head next;
4124 static void ip6_print_replace_route_err(struct list_head *rt6_nh_list)
4128 list_for_each_entry(nh, rt6_nh_list, next) {
4129 pr_warn("IPV6: multipath route replace failed (check consistency of installed routes): %pI6c nexthop %pI6c ifi %d\n",
4130 &nh->r_cfg.fc_dst, &nh->r_cfg.fc_gateway,
4131 nh->r_cfg.fc_ifindex);
4135 static int ip6_route_info_append(struct list_head *rt6_nh_list,
4136 struct rt6_info *rt, struct fib6_config *r_cfg)
4141 list_for_each_entry(nh, rt6_nh_list, next) {
4142 /* check if rt6_info already exists */
4143 if (rt6_duplicate_nexthop(nh->rt6_info, rt))
4147 nh = kzalloc(sizeof(*nh), GFP_KERNEL);
4151 err = ip6_convert_metrics(&nh->mxc, r_cfg);
4156 memcpy(&nh->r_cfg, r_cfg, sizeof(*r_cfg));
4157 list_add_tail(&nh->next, rt6_nh_list);
4162 static void ip6_route_mpath_notify(struct rt6_info *rt,
4163 struct rt6_info *rt_last,
4164 struct nl_info *info,
4167 /* if this is an APPEND route, then rt points to the first route
4168 * inserted and rt_last points to last route inserted. Userspace
4169 * wants a consistent dump of the route which starts at the first
4170 * nexthop. Since sibling routes are always added at the end of
4171 * the list, find the first sibling of the last route appended
4173 if ((nlflags & NLM_F_APPEND) && rt_last && rt_last->rt6i_nsiblings) {
4174 rt = list_first_entry(&rt_last->rt6i_siblings,
4180 inet6_rt_notify(RTM_NEWROUTE, rt, info, nlflags);
4183 static int ip6_route_multipath_add(struct fib6_config *cfg,
4184 struct netlink_ext_ack *extack)
4186 struct rt6_info *rt_notif = NULL, *rt_last = NULL;
4187 struct nl_info *info = &cfg->fc_nlinfo;
4188 struct fib6_config r_cfg;
4189 struct rtnexthop *rtnh;
4190 struct rt6_info *rt;
4191 struct rt6_nh *err_nh;
4192 struct rt6_nh *nh, *nh_safe;
4198 int replace = (cfg->fc_nlinfo.nlh &&
4199 (cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_REPLACE));
4200 LIST_HEAD(rt6_nh_list);
4202 nlflags = replace ? NLM_F_REPLACE : NLM_F_CREATE;
4203 if (info->nlh && info->nlh->nlmsg_flags & NLM_F_APPEND)
4204 nlflags |= NLM_F_APPEND;
4206 remaining = cfg->fc_mp_len;
4207 rtnh = (struct rtnexthop *)cfg->fc_mp;
4209 /* Parse a Multipath Entry and build a list (rt6_nh_list) of
4210 * rt6_info structs per nexthop
4212 while (rtnh_ok(rtnh, remaining)) {
4213 memcpy(&r_cfg, cfg, sizeof(*cfg));
4214 if (rtnh->rtnh_ifindex)
4215 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4217 attrlen = rtnh_attrlen(rtnh);
4219 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4221 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4223 r_cfg.fc_gateway = nla_get_in6_addr(nla);
4224 r_cfg.fc_flags |= RTF_GATEWAY;
4226 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
4227 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
4229 r_cfg.fc_encap_type = nla_get_u16(nla);
4232 r_cfg.fc_flags |= (rtnh->rtnh_flags & RTNH_F_ONLINK);
4233 rt = ip6_route_info_create(&r_cfg, extack);
4240 rt->rt6i_nh_weight = rtnh->rtnh_hops + 1;
4242 err = ip6_route_info_append(&rt6_nh_list, rt, &r_cfg);
4244 dst_release_immediate(&rt->dst);
4248 rtnh = rtnh_next(rtnh, &remaining);
4251 /* for add and replace send one notification with all nexthops.
4252 * Skip the notification in fib6_add_rt2node and send one with
4253 * the full route when done
4255 info->skip_notify = 1;
4258 list_for_each_entry(nh, &rt6_nh_list, next) {
4259 rt_last = nh->rt6_info;
4260 err = __ip6_ins_rt(nh->rt6_info, info, &nh->mxc, extack);
4261 /* save reference to first route for notification */
4262 if (!rt_notif && !err)
4263 rt_notif = nh->rt6_info;
4265 /* nh->rt6_info is used or freed at this point, reset to NULL*/
4266 nh->rt6_info = NULL;
4269 ip6_print_replace_route_err(&rt6_nh_list);
4274 /* Because each route is added like a single route we remove
4275 * these flags after the first nexthop: if there is a collision,
4276 * we have already failed to add the first nexthop:
4277 * fib6_add_rt2node() has rejected it; when replacing, old
4278 * nexthops have been replaced by first new, the rest should
4281 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
4286 /* success ... tell user about new route */
4287 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4291 /* send notification for routes that were added so that
4292 * the delete notifications sent by ip6_route_del are
4296 ip6_route_mpath_notify(rt_notif, rt_last, info, nlflags);
4298 /* Delete routes that were already added */
4299 list_for_each_entry(nh, &rt6_nh_list, next) {
4302 ip6_route_del(&nh->r_cfg, extack);
4306 list_for_each_entry_safe(nh, nh_safe, &rt6_nh_list, next) {
4308 dst_release_immediate(&nh->rt6_info->dst);
4310 list_del(&nh->next);
4317 static int ip6_route_multipath_del(struct fib6_config *cfg,
4318 struct netlink_ext_ack *extack)
4320 struct fib6_config r_cfg;
4321 struct rtnexthop *rtnh;
4324 int err = 1, last_err = 0;
4326 remaining = cfg->fc_mp_len;
4327 rtnh = (struct rtnexthop *)cfg->fc_mp;
4329 /* Parse a Multipath Entry */
4330 while (rtnh_ok(rtnh, remaining)) {
4331 memcpy(&r_cfg, cfg, sizeof(*cfg));
4332 if (rtnh->rtnh_ifindex)
4333 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
4335 attrlen = rtnh_attrlen(rtnh);
4337 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
4339 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
4341 nla_memcpy(&r_cfg.fc_gateway, nla, 16);
4342 r_cfg.fc_flags |= RTF_GATEWAY;
4345 err = ip6_route_del(&r_cfg, extack);
4349 rtnh = rtnh_next(rtnh, &remaining);
4355 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4356 struct netlink_ext_ack *extack)
4358 struct fib6_config cfg;
4361 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4366 return ip6_route_multipath_del(&cfg, extack);
4368 cfg.fc_delete_all_nh = 1;
4369 return ip6_route_del(&cfg, extack);
4373 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh,
4374 struct netlink_ext_ack *extack)
4376 struct fib6_config cfg;
4379 err = rtm_to_fib6_config(skb, nlh, &cfg, extack);
4384 return ip6_route_multipath_add(&cfg, extack);
4386 return ip6_route_add(&cfg, extack);
4389 static size_t rt6_nlmsg_size(struct rt6_info *rt)
4391 int nexthop_len = 0;
4393 if (rt->rt6i_nsiblings) {
4394 nexthop_len = nla_total_size(0) /* RTA_MULTIPATH */
4395 + NLA_ALIGN(sizeof(struct rtnexthop))
4396 + nla_total_size(16) /* RTA_GATEWAY */
4397 + lwtunnel_get_encap_size(rt->dst.lwtstate);
4399 nexthop_len *= rt->rt6i_nsiblings;
4402 return NLMSG_ALIGN(sizeof(struct rtmsg))
4403 + nla_total_size(16) /* RTA_SRC */
4404 + nla_total_size(16) /* RTA_DST */
4405 + nla_total_size(16) /* RTA_GATEWAY */
4406 + nla_total_size(16) /* RTA_PREFSRC */
4407 + nla_total_size(4) /* RTA_TABLE */
4408 + nla_total_size(4) /* RTA_IIF */
4409 + nla_total_size(4) /* RTA_OIF */
4410 + nla_total_size(4) /* RTA_PRIORITY */
4411 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
4412 + nla_total_size(sizeof(struct rta_cacheinfo))
4413 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
4414 + nla_total_size(1) /* RTA_PREF */
4415 + lwtunnel_get_encap_size(rt->dst.lwtstate)
4419 static int rt6_nexthop_info(struct sk_buff *skb, struct rt6_info *rt,
4420 unsigned int *flags, bool skip_oif)
4422 if (rt->rt6i_nh_flags & RTNH_F_DEAD)
4423 *flags |= RTNH_F_DEAD;
4425 if (rt->rt6i_nh_flags & RTNH_F_LINKDOWN) {
4426 *flags |= RTNH_F_LINKDOWN;
4427 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
4428 *flags |= RTNH_F_DEAD;
4431 if (rt->rt6i_flags & RTF_GATEWAY) {
4432 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
4433 goto nla_put_failure;
4436 *flags |= (rt->rt6i_nh_flags & RTNH_F_ONLINK);
4437 if (rt->rt6i_nh_flags & RTNH_F_OFFLOAD)
4438 *flags |= RTNH_F_OFFLOAD;
4440 /* not needed for multipath encoding b/c it has a rtnexthop struct */
4441 if (!skip_oif && rt->dst.dev &&
4442 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
4443 goto nla_put_failure;
4445 if (rt->dst.lwtstate &&
4446 lwtunnel_fill_encap(skb, rt->dst.lwtstate) < 0)
4447 goto nla_put_failure;
4455 /* add multipath next hop */
4456 static int rt6_add_nexthop(struct sk_buff *skb, struct rt6_info *rt)
4458 struct rtnexthop *rtnh;
4459 unsigned int flags = 0;
4461 rtnh = nla_reserve_nohdr(skb, sizeof(*rtnh));
4463 goto nla_put_failure;
4465 rtnh->rtnh_hops = rt->rt6i_nh_weight - 1;
4466 rtnh->rtnh_ifindex = rt->dst.dev ? rt->dst.dev->ifindex : 0;
4468 if (rt6_nexthop_info(skb, rt, &flags, true) < 0)
4469 goto nla_put_failure;
4471 rtnh->rtnh_flags = flags;
4473 /* length of rtnetlink header + attributes */
4474 rtnh->rtnh_len = nlmsg_get_pos(skb) - (void *)rtnh;
4482 static int rt6_fill_node(struct net *net,
4483 struct sk_buff *skb, struct rt6_info *rt,
4484 struct in6_addr *dst, struct in6_addr *src,
4485 int iif, int type, u32 portid, u32 seq,
4488 u32 metrics[RTAX_MAX];
4490 struct nlmsghdr *nlh;
4494 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
4498 rtm = nlmsg_data(nlh);
4499 rtm->rtm_family = AF_INET6;
4500 rtm->rtm_dst_len = rt->rt6i_dst.plen;
4501 rtm->rtm_src_len = rt->rt6i_src.plen;
4504 table = rt->rt6i_table->tb6_id;
4506 table = RT6_TABLE_UNSPEC;
4507 rtm->rtm_table = table;
4508 if (nla_put_u32(skb, RTA_TABLE, table))
4509 goto nla_put_failure;
4510 if (rt->rt6i_flags & RTF_REJECT) {
4511 switch (rt->dst.error) {
4513 rtm->rtm_type = RTN_BLACKHOLE;
4516 rtm->rtm_type = RTN_PROHIBIT;
4519 rtm->rtm_type = RTN_THROW;
4522 rtm->rtm_type = RTN_UNREACHABLE;
4526 else if (rt->rt6i_flags & RTF_LOCAL)
4527 rtm->rtm_type = RTN_LOCAL;
4528 else if (rt->rt6i_flags & RTF_ANYCAST)
4529 rtm->rtm_type = RTN_ANYCAST;
4530 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
4531 rtm->rtm_type = RTN_LOCAL;
4533 rtm->rtm_type = RTN_UNICAST;
4535 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
4536 rtm->rtm_protocol = rt->rt6i_protocol;
4538 if (rt->rt6i_flags & RTF_CACHE)
4539 rtm->rtm_flags |= RTM_F_CLONED;
4542 if (nla_put_in6_addr(skb, RTA_DST, dst))
4543 goto nla_put_failure;
4544 rtm->rtm_dst_len = 128;
4545 } else if (rtm->rtm_dst_len)
4546 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
4547 goto nla_put_failure;
4548 #ifdef CONFIG_IPV6_SUBTREES
4550 if (nla_put_in6_addr(skb, RTA_SRC, src))
4551 goto nla_put_failure;
4552 rtm->rtm_src_len = 128;
4553 } else if (rtm->rtm_src_len &&
4554 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
4555 goto nla_put_failure;
4558 #ifdef CONFIG_IPV6_MROUTE
4559 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
4560 int err = ip6mr_get_route(net, skb, rtm, portid);
4565 goto nla_put_failure;
4568 if (nla_put_u32(skb, RTA_IIF, iif))
4569 goto nla_put_failure;
4571 struct in6_addr saddr_buf;
4572 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
4573 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4574 goto nla_put_failure;
4577 if (rt->rt6i_prefsrc.plen) {
4578 struct in6_addr saddr_buf;
4579 saddr_buf = rt->rt6i_prefsrc.addr;
4580 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
4581 goto nla_put_failure;
4584 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
4586 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
4587 if (rtnetlink_put_metrics(skb, metrics) < 0)
4588 goto nla_put_failure;
4590 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
4591 goto nla_put_failure;
4593 /* For multipath routes, walk the siblings list and add
4594 * each as a nexthop within RTA_MULTIPATH.
4596 if (rt->rt6i_nsiblings) {
4597 struct rt6_info *sibling, *next_sibling;
4600 mp = nla_nest_start(skb, RTA_MULTIPATH);
4602 goto nla_put_failure;
4604 if (rt6_add_nexthop(skb, rt) < 0)
4605 goto nla_put_failure;
4607 list_for_each_entry_safe(sibling, next_sibling,
4608 &rt->rt6i_siblings, rt6i_siblings) {
4609 if (rt6_add_nexthop(skb, sibling) < 0)
4610 goto nla_put_failure;
4613 nla_nest_end(skb, mp);
4615 if (rt6_nexthop_info(skb, rt, &rtm->rtm_flags, false) < 0)
4616 goto nla_put_failure;
4619 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
4621 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
4622 goto nla_put_failure;
4624 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
4625 goto nla_put_failure;
4628 nlmsg_end(skb, nlh);
4632 nlmsg_cancel(skb, nlh);
4636 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
4638 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
4639 struct net *net = arg->net;
4641 if (rt == net->ipv6.ip6_null_entry)
4644 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
4645 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
4647 /* user wants prefix routes only */
4648 if (rtm->rtm_flags & RTM_F_PREFIX &&
4649 !(rt->rt6i_flags & RTF_PREFIX_RT)) {
4650 /* success since this is not a prefix route */
4655 return rt6_fill_node(net,
4656 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
4657 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
4661 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
4662 struct netlink_ext_ack *extack)
4664 struct net *net = sock_net(in_skb->sk);
4665 struct nlattr *tb[RTA_MAX+1];
4666 int err, iif = 0, oif = 0;
4667 struct dst_entry *dst;
4668 struct rt6_info *rt;
4669 struct sk_buff *skb;
4674 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy,
4680 memset(&fl6, 0, sizeof(fl6));
4681 rtm = nlmsg_data(nlh);
4682 fl6.flowlabel = ip6_make_flowinfo(rtm->rtm_tos, 0);
4683 fibmatch = !!(rtm->rtm_flags & RTM_F_FIB_MATCH);
4686 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
4689 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
4693 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
4696 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
4700 iif = nla_get_u32(tb[RTA_IIF]);
4703 oif = nla_get_u32(tb[RTA_OIF]);
4706 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
4709 fl6.flowi6_uid = make_kuid(current_user_ns(),
4710 nla_get_u32(tb[RTA_UID]));
4712 fl6.flowi6_uid = iif ? INVALID_UID : current_uid();
4715 struct net_device *dev;
4720 dev = dev_get_by_index_rcu(net, iif);
4727 fl6.flowi6_iif = iif;
4729 if (!ipv6_addr_any(&fl6.saddr))
4730 flags |= RT6_LOOKUP_F_HAS_SADDR;
4732 dst = ip6_route_input_lookup(net, dev, &fl6, NULL, flags);
4736 fl6.flowi6_oif = oif;
4738 dst = ip6_route_output(net, NULL, &fl6);
4742 rt = container_of(dst, struct rt6_info, dst);
4743 if (rt->dst.error) {
4744 err = rt->dst.error;
4749 if (rt == net->ipv6.ip6_null_entry) {
4750 err = rt->dst.error;
4755 if (fibmatch && rt->from) {
4756 struct rt6_info *ort = rt->from;
4758 dst_hold(&ort->dst);
4763 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
4770 skb_dst_set(skb, &rt->dst);
4772 err = rt6_fill_node(net, skb, rt, NULL, NULL, iif,
4773 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4776 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
4777 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
4784 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
4789 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info,
4790 unsigned int nlm_flags)
4792 struct sk_buff *skb;
4793 struct net *net = info->nl_net;
4798 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
4800 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
4804 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
4805 event, info->portid, seq, nlm_flags);
4807 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
4808 WARN_ON(err == -EMSGSIZE);
4812 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
4813 info->nlh, gfp_any());
4817 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
4820 static int ip6_route_dev_notify(struct notifier_block *this,
4821 unsigned long event, void *ptr)
4823 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
4824 struct net *net = dev_net(dev);
4826 if (!(dev->flags & IFF_LOOPBACK))
4829 if (event == NETDEV_REGISTER) {
4830 net->ipv6.ip6_null_entry->dst.dev = dev;
4831 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
4832 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4833 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
4834 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
4835 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
4836 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
4838 } else if (event == NETDEV_UNREGISTER &&
4839 dev->reg_state != NETREG_UNREGISTERED) {
4840 /* NETDEV_UNREGISTER could be fired for multiple times by
4841 * netdev_wait_allrefs(). Make sure we only call this once.
4843 in6_dev_put_clear(&net->ipv6.ip6_null_entry->rt6i_idev);
4844 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
4845 in6_dev_put_clear(&net->ipv6.ip6_prohibit_entry->rt6i_idev);
4846 in6_dev_put_clear(&net->ipv6.ip6_blk_hole_entry->rt6i_idev);
4857 #ifdef CONFIG_PROC_FS
4859 static const struct file_operations ipv6_route_proc_fops = {
4860 .open = ipv6_route_open,
4862 .llseek = seq_lseek,
4863 .release = seq_release_net,
4866 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
4868 struct net *net = (struct net *)seq->private;
4869 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
4870 net->ipv6.rt6_stats->fib_nodes,
4871 net->ipv6.rt6_stats->fib_route_nodes,
4872 atomic_read(&net->ipv6.rt6_stats->fib_rt_alloc),
4873 net->ipv6.rt6_stats->fib_rt_entries,
4874 net->ipv6.rt6_stats->fib_rt_cache,
4875 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
4876 net->ipv6.rt6_stats->fib_discarded_routes);
4881 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
4883 return single_open_net(inode, file, rt6_stats_seq_show);
4886 static const struct file_operations rt6_stats_seq_fops = {
4887 .open = rt6_stats_seq_open,
4889 .llseek = seq_lseek,
4890 .release = single_release_net,
4892 #endif /* CONFIG_PROC_FS */
4894 #ifdef CONFIG_SYSCTL
4897 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
4898 void __user *buffer, size_t *lenp, loff_t *ppos)
4905 net = (struct net *)ctl->extra1;
4906 delay = net->ipv6.sysctl.flush_delay;
4907 proc_dointvec(ctl, write, buffer, lenp, ppos);
4908 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
4912 struct ctl_table ipv6_route_table_template[] = {
4914 .procname = "flush",
4915 .data = &init_net.ipv6.sysctl.flush_delay,
4916 .maxlen = sizeof(int),
4918 .proc_handler = ipv6_sysctl_rtcache_flush
4921 .procname = "gc_thresh",
4922 .data = &ip6_dst_ops_template.gc_thresh,
4923 .maxlen = sizeof(int),
4925 .proc_handler = proc_dointvec,
4928 .procname = "max_size",
4929 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
4930 .maxlen = sizeof(int),
4932 .proc_handler = proc_dointvec,
4935 .procname = "gc_min_interval",
4936 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4937 .maxlen = sizeof(int),
4939 .proc_handler = proc_dointvec_jiffies,
4942 .procname = "gc_timeout",
4943 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
4944 .maxlen = sizeof(int),
4946 .proc_handler = proc_dointvec_jiffies,
4949 .procname = "gc_interval",
4950 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
4951 .maxlen = sizeof(int),
4953 .proc_handler = proc_dointvec_jiffies,
4956 .procname = "gc_elasticity",
4957 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
4958 .maxlen = sizeof(int),
4960 .proc_handler = proc_dointvec,
4963 .procname = "mtu_expires",
4964 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
4965 .maxlen = sizeof(int),
4967 .proc_handler = proc_dointvec_jiffies,
4970 .procname = "min_adv_mss",
4971 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
4972 .maxlen = sizeof(int),
4974 .proc_handler = proc_dointvec,
4977 .procname = "gc_min_interval_ms",
4978 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
4979 .maxlen = sizeof(int),
4981 .proc_handler = proc_dointvec_ms_jiffies,
4986 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
4988 struct ctl_table *table;
4990 table = kmemdup(ipv6_route_table_template,
4991 sizeof(ipv6_route_table_template),
4995 table[0].data = &net->ipv6.sysctl.flush_delay;
4996 table[0].extra1 = net;
4997 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
4998 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
4999 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5000 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
5001 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
5002 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
5003 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
5004 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
5005 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
5007 /* Don't export sysctls to unprivileged users */
5008 if (net->user_ns != &init_user_ns)
5009 table[0].procname = NULL;
5016 static int __net_init ip6_route_net_init(struct net *net)
5020 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
5021 sizeof(net->ipv6.ip6_dst_ops));
5023 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
5024 goto out_ip6_dst_ops;
5026 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
5027 sizeof(*net->ipv6.ip6_null_entry),
5029 if (!net->ipv6.ip6_null_entry)
5030 goto out_ip6_dst_entries;
5031 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5032 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
5033 ip6_template_metrics, true);
5035 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5036 net->ipv6.fib6_has_custom_rules = false;
5037 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
5038 sizeof(*net->ipv6.ip6_prohibit_entry),
5040 if (!net->ipv6.ip6_prohibit_entry)
5041 goto out_ip6_null_entry;
5042 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5043 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
5044 ip6_template_metrics, true);
5046 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
5047 sizeof(*net->ipv6.ip6_blk_hole_entry),
5049 if (!net->ipv6.ip6_blk_hole_entry)
5050 goto out_ip6_prohibit_entry;
5051 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
5052 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
5053 ip6_template_metrics, true);
5056 net->ipv6.sysctl.flush_delay = 0;
5057 net->ipv6.sysctl.ip6_rt_max_size = 4096;
5058 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
5059 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
5060 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
5061 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
5062 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
5063 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
5065 net->ipv6.ip6_rt_gc_expire = 30*HZ;
5071 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5072 out_ip6_prohibit_entry:
5073 kfree(net->ipv6.ip6_prohibit_entry);
5075 kfree(net->ipv6.ip6_null_entry);
5077 out_ip6_dst_entries:
5078 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5083 static void __net_exit ip6_route_net_exit(struct net *net)
5085 kfree(net->ipv6.ip6_null_entry);
5086 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5087 kfree(net->ipv6.ip6_prohibit_entry);
5088 kfree(net->ipv6.ip6_blk_hole_entry);
5090 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
5093 static int __net_init ip6_route_net_init_late(struct net *net)
5095 #ifdef CONFIG_PROC_FS
5096 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
5097 proc_create("rt6_stats", 0444, net->proc_net, &rt6_stats_seq_fops);
5102 static void __net_exit ip6_route_net_exit_late(struct net *net)
5104 #ifdef CONFIG_PROC_FS
5105 remove_proc_entry("ipv6_route", net->proc_net);
5106 remove_proc_entry("rt6_stats", net->proc_net);
5110 static struct pernet_operations ip6_route_net_ops = {
5111 .init = ip6_route_net_init,
5112 .exit = ip6_route_net_exit,
5115 static int __net_init ipv6_inetpeer_init(struct net *net)
5117 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
5121 inet_peer_base_init(bp);
5122 net->ipv6.peers = bp;
5126 static void __net_exit ipv6_inetpeer_exit(struct net *net)
5128 struct inet_peer_base *bp = net->ipv6.peers;
5130 net->ipv6.peers = NULL;
5131 inetpeer_invalidate_tree(bp);
5135 static struct pernet_operations ipv6_inetpeer_ops = {
5136 .init = ipv6_inetpeer_init,
5137 .exit = ipv6_inetpeer_exit,
5140 static struct pernet_operations ip6_route_net_late_ops = {
5141 .init = ip6_route_net_init_late,
5142 .exit = ip6_route_net_exit_late,
5145 static struct notifier_block ip6_route_dev_notifier = {
5146 .notifier_call = ip6_route_dev_notify,
5147 .priority = ADDRCONF_NOTIFY_PRIORITY - 10,
5150 void __init ip6_route_init_special_entries(void)
5152 /* Registering of the loopback is done before this portion of code,
5153 * the loopback reference in rt6_info will not be taken, do it
5154 * manually for init_net */
5155 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
5156 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5157 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
5158 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
5159 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5160 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
5161 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
5165 int __init ip6_route_init(void)
5171 ip6_dst_ops_template.kmem_cachep =
5172 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
5173 SLAB_HWCACHE_ALIGN, NULL);
5174 if (!ip6_dst_ops_template.kmem_cachep)
5177 ret = dst_entries_init(&ip6_dst_blackhole_ops);
5179 goto out_kmem_cache;
5181 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
5183 goto out_dst_entries;
5185 ret = register_pernet_subsys(&ip6_route_net_ops);
5187 goto out_register_inetpeer;
5189 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
5193 goto out_register_subsys;
5199 ret = fib6_rules_init();
5203 ret = register_pernet_subsys(&ip6_route_net_late_ops);
5205 goto fib6_rules_init;
5207 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_NEWROUTE,
5208 inet6_rtm_newroute, NULL, 0);
5210 goto out_register_late_subsys;
5212 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_DELROUTE,
5213 inet6_rtm_delroute, NULL, 0);
5215 goto out_register_late_subsys;
5217 ret = rtnl_register_module(THIS_MODULE, PF_INET6, RTM_GETROUTE,
5218 inet6_rtm_getroute, NULL,
5219 RTNL_FLAG_DOIT_UNLOCKED);
5221 goto out_register_late_subsys;
5223 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
5225 goto out_register_late_subsys;
5227 for_each_possible_cpu(cpu) {
5228 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
5230 INIT_LIST_HEAD(&ul->head);
5231 spin_lock_init(&ul->lock);
5237 out_register_late_subsys:
5238 rtnl_unregister_all(PF_INET6);
5239 unregister_pernet_subsys(&ip6_route_net_late_ops);
5241 fib6_rules_cleanup();
5246 out_register_subsys:
5247 unregister_pernet_subsys(&ip6_route_net_ops);
5248 out_register_inetpeer:
5249 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5251 dst_entries_destroy(&ip6_dst_blackhole_ops);
5253 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
5257 void ip6_route_cleanup(void)
5259 unregister_netdevice_notifier(&ip6_route_dev_notifier);
5260 unregister_pernet_subsys(&ip6_route_net_late_ops);
5261 fib6_rules_cleanup();
5264 unregister_pernet_subsys(&ipv6_inetpeer_ops);
5265 unregister_pernet_subsys(&ip6_route_net_ops);
5266 dst_entries_destroy(&ip6_dst_blackhole_ops);
5267 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);