2 * Linux INET6 implementation
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
16 * YOSHIFUJI Hideaki @USAGI
17 * reworked default router selection.
18 * - respect outgoing interface
19 * - select from (probably) reachable routers (i.e.
20 * routers in REACHABLE, STALE, DELAY or PROBE states).
21 * - always select the same router if it is (probably)
22 * reachable. otherwise, round-robin the list.
24 * Fixed routing subtrees.
27 #define pr_fmt(fmt) "IPv6: " fmt
29 #include <linux/capability.h>
30 #include <linux/errno.h>
31 #include <linux/export.h>
32 #include <linux/types.h>
33 #include <linux/times.h>
34 #include <linux/socket.h>
35 #include <linux/sockios.h>
36 #include <linux/net.h>
37 #include <linux/route.h>
38 #include <linux/netdevice.h>
39 #include <linux/in6.h>
40 #include <linux/mroute6.h>
41 #include <linux/init.h>
42 #include <linux/if_arp.h>
43 #include <linux/proc_fs.h>
44 #include <linux/seq_file.h>
45 #include <linux/nsproxy.h>
46 #include <linux/slab.h>
47 #include <net/net_namespace.h>
50 #include <net/ip6_fib.h>
51 #include <net/ip6_route.h>
52 #include <net/ndisc.h>
53 #include <net/addrconf.h>
55 #include <linux/rtnetlink.h>
57 #include <net/dst_metadata.h>
59 #include <net/netevent.h>
60 #include <net/netlink.h>
61 #include <net/nexthop.h>
62 #include <net/lwtunnel.h>
63 #include <net/ip_tunnels.h>
65 #include <asm/uaccess.h>
68 #include <linux/sysctl.h>
72 RT6_NUD_FAIL_HARD = -3,
73 RT6_NUD_FAIL_PROBE = -2,
74 RT6_NUD_FAIL_DO_RR = -1,
78 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort);
79 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie);
80 static unsigned int ip6_default_advmss(const struct dst_entry *dst);
81 static unsigned int ip6_mtu(const struct dst_entry *dst);
82 static struct dst_entry *ip6_negative_advice(struct dst_entry *);
83 static void ip6_dst_destroy(struct dst_entry *);
84 static void ip6_dst_ifdown(struct dst_entry *,
85 struct net_device *dev, int how);
86 static int ip6_dst_gc(struct dst_ops *ops);
88 static int ip6_pkt_discard(struct sk_buff *skb);
89 static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb);
90 static int ip6_pkt_prohibit(struct sk_buff *skb);
91 static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb);
92 static void ip6_link_failure(struct sk_buff *skb);
93 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
94 struct sk_buff *skb, u32 mtu);
95 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk,
97 static void rt6_dst_from_metrics_check(struct rt6_info *rt);
98 static int rt6_score_route(struct rt6_info *rt, int oif, int strict);
100 #ifdef CONFIG_IPV6_ROUTE_INFO
101 static struct rt6_info *rt6_add_route_info(struct net *net,
102 const struct in6_addr *prefix, int prefixlen,
103 const struct in6_addr *gwaddr, int ifindex,
105 static struct rt6_info *rt6_get_route_info(struct net *net,
106 const struct in6_addr *prefix, int prefixlen,
107 const struct in6_addr *gwaddr, int ifindex);
110 struct uncached_list {
112 struct list_head head;
115 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt6_uncached_list);
117 static void rt6_uncached_list_add(struct rt6_info *rt)
119 struct uncached_list *ul = raw_cpu_ptr(&rt6_uncached_list);
121 rt->dst.flags |= DST_NOCACHE;
122 rt->rt6i_uncached_list = ul;
124 spin_lock_bh(&ul->lock);
125 list_add_tail(&rt->rt6i_uncached, &ul->head);
126 spin_unlock_bh(&ul->lock);
129 static void rt6_uncached_list_del(struct rt6_info *rt)
131 if (!list_empty(&rt->rt6i_uncached)) {
132 struct uncached_list *ul = rt->rt6i_uncached_list;
134 spin_lock_bh(&ul->lock);
135 list_del(&rt->rt6i_uncached);
136 spin_unlock_bh(&ul->lock);
140 static void rt6_uncached_list_flush_dev(struct net *net, struct net_device *dev)
142 struct net_device *loopback_dev = net->loopback_dev;
145 for_each_possible_cpu(cpu) {
146 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
149 spin_lock_bh(&ul->lock);
150 list_for_each_entry(rt, &ul->head, rt6i_uncached) {
151 struct inet6_dev *rt_idev = rt->rt6i_idev;
152 struct net_device *rt_dev = rt->dst.dev;
154 if (rt_idev && (rt_idev->dev == dev || !dev) &&
155 rt_idev->dev != loopback_dev) {
156 rt->rt6i_idev = in6_dev_get(loopback_dev);
157 in6_dev_put(rt_idev);
160 if (rt_dev && (rt_dev == dev || !dev) &&
161 rt_dev != loopback_dev) {
162 rt->dst.dev = loopback_dev;
163 dev_hold(rt->dst.dev);
167 spin_unlock_bh(&ul->lock);
171 static u32 *rt6_pcpu_cow_metrics(struct rt6_info *rt)
173 return dst_metrics_write_ptr(rt->dst.from);
176 static u32 *ipv6_cow_metrics(struct dst_entry *dst, unsigned long old)
178 struct rt6_info *rt = (struct rt6_info *)dst;
180 if (rt->rt6i_flags & RTF_PCPU)
181 return rt6_pcpu_cow_metrics(rt);
182 else if (rt->rt6i_flags & RTF_CACHE)
185 return dst_cow_metrics_generic(dst, old);
188 static inline const void *choose_neigh_daddr(struct rt6_info *rt,
192 struct in6_addr *p = &rt->rt6i_gateway;
194 if (!ipv6_addr_any(p))
195 return (const void *) p;
197 return &ipv6_hdr(skb)->daddr;
201 static struct neighbour *ip6_neigh_lookup(const struct dst_entry *dst,
205 struct rt6_info *rt = (struct rt6_info *) dst;
208 daddr = choose_neigh_daddr(rt, skb, daddr);
209 n = __ipv6_neigh_lookup(dst->dev, daddr);
212 return neigh_create(&nd_tbl, daddr, dst->dev);
215 static struct dst_ops ip6_dst_ops_template = {
219 .check = ip6_dst_check,
220 .default_advmss = ip6_default_advmss,
222 .cow_metrics = ipv6_cow_metrics,
223 .destroy = ip6_dst_destroy,
224 .ifdown = ip6_dst_ifdown,
225 .negative_advice = ip6_negative_advice,
226 .link_failure = ip6_link_failure,
227 .update_pmtu = ip6_rt_update_pmtu,
228 .redirect = rt6_do_redirect,
229 .local_out = __ip6_local_out,
230 .neigh_lookup = ip6_neigh_lookup,
233 static unsigned int ip6_blackhole_mtu(const struct dst_entry *dst)
235 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
237 return mtu ? : dst->dev->mtu;
240 static void ip6_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
241 struct sk_buff *skb, u32 mtu)
245 static void ip6_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
250 static u32 *ip6_rt_blackhole_cow_metrics(struct dst_entry *dst,
256 static struct dst_ops ip6_dst_blackhole_ops = {
258 .destroy = ip6_dst_destroy,
259 .check = ip6_dst_check,
260 .mtu = ip6_blackhole_mtu,
261 .default_advmss = ip6_default_advmss,
262 .update_pmtu = ip6_rt_blackhole_update_pmtu,
263 .redirect = ip6_rt_blackhole_redirect,
264 .cow_metrics = ip6_rt_blackhole_cow_metrics,
265 .neigh_lookup = ip6_neigh_lookup,
268 static const u32 ip6_template_metrics[RTAX_MAX] = {
269 [RTAX_HOPLIMIT - 1] = 0,
272 static const struct rt6_info ip6_null_entry_template = {
274 .__refcnt = ATOMIC_INIT(1),
276 .obsolete = DST_OBSOLETE_FORCE_CHK,
277 .error = -ENETUNREACH,
278 .input = ip6_pkt_discard,
279 .output = ip6_pkt_discard_out,
281 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
282 .rt6i_protocol = RTPROT_KERNEL,
283 .rt6i_metric = ~(u32) 0,
284 .rt6i_ref = ATOMIC_INIT(1),
287 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
289 static const struct rt6_info ip6_prohibit_entry_template = {
291 .__refcnt = ATOMIC_INIT(1),
293 .obsolete = DST_OBSOLETE_FORCE_CHK,
295 .input = ip6_pkt_prohibit,
296 .output = ip6_pkt_prohibit_out,
298 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
299 .rt6i_protocol = RTPROT_KERNEL,
300 .rt6i_metric = ~(u32) 0,
301 .rt6i_ref = ATOMIC_INIT(1),
304 static const struct rt6_info ip6_blk_hole_entry_template = {
306 .__refcnt = ATOMIC_INIT(1),
308 .obsolete = DST_OBSOLETE_FORCE_CHK,
310 .input = dst_discard,
311 .output = dst_discard_sk,
313 .rt6i_flags = (RTF_REJECT | RTF_NONEXTHOP),
314 .rt6i_protocol = RTPROT_KERNEL,
315 .rt6i_metric = ~(u32) 0,
316 .rt6i_ref = ATOMIC_INIT(1),
321 /* allocate dst with ip6_dst_ops */
322 static struct rt6_info *__ip6_dst_alloc(struct net *net,
323 struct net_device *dev,
326 struct rt6_info *rt = dst_alloc(&net->ipv6.ip6_dst_ops, dev,
327 0, DST_OBSOLETE_FORCE_CHK, flags);
330 struct dst_entry *dst = &rt->dst;
332 memset(dst + 1, 0, sizeof(*rt) - sizeof(*dst));
333 INIT_LIST_HEAD(&rt->rt6i_siblings);
334 INIT_LIST_HEAD(&rt->rt6i_uncached);
339 static struct rt6_info *ip6_dst_alloc(struct net *net,
340 struct net_device *dev,
343 struct rt6_info *rt = __ip6_dst_alloc(net, dev, flags);
346 rt->rt6i_pcpu = alloc_percpu_gfp(struct rt6_info *, GFP_ATOMIC);
350 for_each_possible_cpu(cpu) {
353 p = per_cpu_ptr(rt->rt6i_pcpu, cpu);
354 /* no one shares rt */
358 dst_destroy((struct dst_entry *)rt);
366 static void ip6_dst_destroy(struct dst_entry *dst)
368 struct rt6_info *rt = (struct rt6_info *)dst;
369 struct dst_entry *from = dst->from;
370 struct inet6_dev *idev;
372 dst_destroy_metrics_generic(dst);
373 free_percpu(rt->rt6i_pcpu);
374 rt6_uncached_list_del(rt);
376 idev = rt->rt6i_idev;
378 rt->rt6i_idev = NULL;
386 static void ip6_dst_ifdown(struct dst_entry *dst, struct net_device *dev,
389 struct rt6_info *rt = (struct rt6_info *)dst;
390 struct inet6_dev *idev = rt->rt6i_idev;
391 struct net_device *loopback_dev =
392 dev_net(dev)->loopback_dev;
394 if (dev != loopback_dev) {
395 if (idev && idev->dev == dev) {
396 struct inet6_dev *loopback_idev =
397 in6_dev_get(loopback_dev);
399 rt->rt6i_idev = loopback_idev;
406 static bool rt6_check_expired(const struct rt6_info *rt)
408 if (rt->rt6i_flags & RTF_EXPIRES) {
409 if (time_after(jiffies, rt->dst.expires))
411 } else if (rt->dst.from) {
412 return rt6_check_expired((struct rt6_info *) rt->dst.from);
417 /* Multipath route selection:
418 * Hash based function using packet header and flowlabel.
419 * Adapted from fib_info_hashfn()
421 static int rt6_info_hash_nhsfn(unsigned int candidate_count,
422 const struct flowi6 *fl6)
424 unsigned int val = fl6->flowi6_proto;
426 val ^= ipv6_addr_hash(&fl6->daddr);
427 val ^= ipv6_addr_hash(&fl6->saddr);
429 /* Work only if this not encapsulated */
430 switch (fl6->flowi6_proto) {
434 val ^= (__force u16)fl6->fl6_sport;
435 val ^= (__force u16)fl6->fl6_dport;
439 val ^= (__force u16)fl6->fl6_icmp_type;
440 val ^= (__force u16)fl6->fl6_icmp_code;
443 /* RFC6438 recommands to use flowlabel */
444 val ^= (__force u32)fl6->flowlabel;
446 /* Perhaps, we need to tune, this function? */
447 val = val ^ (val >> 7) ^ (val >> 12);
448 return val % candidate_count;
451 static struct rt6_info *rt6_multipath_select(struct rt6_info *match,
452 struct flowi6 *fl6, int oif,
455 struct rt6_info *sibling, *next_sibling;
458 route_choosen = rt6_info_hash_nhsfn(match->rt6i_nsiblings + 1, fl6);
459 /* Don't change the route, if route_choosen == 0
460 * (siblings does not include ourself)
463 list_for_each_entry_safe(sibling, next_sibling,
464 &match->rt6i_siblings, rt6i_siblings) {
466 if (route_choosen == 0) {
467 if (rt6_score_route(sibling, oif, strict) < 0)
477 * Route lookup. Any table->tb6_lock is implied.
480 static inline struct rt6_info *rt6_device_match(struct net *net,
482 const struct in6_addr *saddr,
486 struct rt6_info *local = NULL;
487 struct rt6_info *sprt;
489 if (!oif && ipv6_addr_any(saddr))
492 for (sprt = rt; sprt; sprt = sprt->dst.rt6_next) {
493 struct net_device *dev = sprt->dst.dev;
496 if (dev->ifindex == oif)
498 if (dev->flags & IFF_LOOPBACK) {
499 if (!sprt->rt6i_idev ||
500 sprt->rt6i_idev->dev->ifindex != oif) {
501 if (flags & RT6_LOOKUP_F_IFACE && oif)
503 if (local && (!oif ||
504 local->rt6i_idev->dev->ifindex == oif))
510 if (ipv6_chk_addr(net, saddr, dev,
511 flags & RT6_LOOKUP_F_IFACE))
520 if (flags & RT6_LOOKUP_F_IFACE)
521 return net->ipv6.ip6_null_entry;
527 #ifdef CONFIG_IPV6_ROUTER_PREF
528 struct __rt6_probe_work {
529 struct work_struct work;
530 struct in6_addr target;
531 struct net_device *dev;
534 static void rt6_probe_deferred(struct work_struct *w)
536 struct in6_addr mcaddr;
537 struct __rt6_probe_work *work =
538 container_of(w, struct __rt6_probe_work, work);
540 addrconf_addr_solict_mult(&work->target, &mcaddr);
541 ndisc_send_ns(work->dev, NULL, &work->target, &mcaddr, NULL, NULL);
546 static void rt6_probe(struct rt6_info *rt)
548 struct __rt6_probe_work *work;
549 struct neighbour *neigh;
551 * Okay, this does not seem to be appropriate
552 * for now, however, we need to check if it
553 * is really so; aka Router Reachability Probing.
555 * Router Reachability Probe MUST be rate-limited
556 * to no more than one per minute.
558 if (!rt || !(rt->rt6i_flags & RTF_GATEWAY))
561 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
563 if (neigh->nud_state & NUD_VALID)
567 write_lock(&neigh->lock);
568 if (!(neigh->nud_state & NUD_VALID) &&
571 rt->rt6i_idev->cnf.rtr_probe_interval)) {
572 work = kmalloc(sizeof(*work), GFP_ATOMIC);
574 __neigh_set_probe_once(neigh);
576 write_unlock(&neigh->lock);
578 work = kmalloc(sizeof(*work), GFP_ATOMIC);
582 INIT_WORK(&work->work, rt6_probe_deferred);
583 work->target = rt->rt6i_gateway;
584 dev_hold(rt->dst.dev);
585 work->dev = rt->dst.dev;
586 schedule_work(&work->work);
590 rcu_read_unlock_bh();
593 static inline void rt6_probe(struct rt6_info *rt)
599 * Default Router Selection (RFC 2461 6.3.6)
601 static inline int rt6_check_dev(struct rt6_info *rt, int oif)
603 struct net_device *dev = rt->dst.dev;
604 if (!oif || dev->ifindex == oif)
606 if ((dev->flags & IFF_LOOPBACK) &&
607 rt->rt6i_idev && rt->rt6i_idev->dev->ifindex == oif)
612 static inline enum rt6_nud_state rt6_check_neigh(struct rt6_info *rt)
614 struct neighbour *neigh;
615 enum rt6_nud_state ret = RT6_NUD_FAIL_HARD;
617 if (rt->rt6i_flags & RTF_NONEXTHOP ||
618 !(rt->rt6i_flags & RTF_GATEWAY))
619 return RT6_NUD_SUCCEED;
622 neigh = __ipv6_neigh_lookup_noref(rt->dst.dev, &rt->rt6i_gateway);
624 read_lock(&neigh->lock);
625 if (neigh->nud_state & NUD_VALID)
626 ret = RT6_NUD_SUCCEED;
627 #ifdef CONFIG_IPV6_ROUTER_PREF
628 else if (!(neigh->nud_state & NUD_FAILED))
629 ret = RT6_NUD_SUCCEED;
631 ret = RT6_NUD_FAIL_PROBE;
633 read_unlock(&neigh->lock);
635 ret = IS_ENABLED(CONFIG_IPV6_ROUTER_PREF) ?
636 RT6_NUD_SUCCEED : RT6_NUD_FAIL_DO_RR;
638 rcu_read_unlock_bh();
643 static int rt6_score_route(struct rt6_info *rt, int oif,
648 m = rt6_check_dev(rt, oif);
649 if (!m && (strict & RT6_LOOKUP_F_IFACE))
650 return RT6_NUD_FAIL_HARD;
651 #ifdef CONFIG_IPV6_ROUTER_PREF
652 m |= IPV6_DECODE_PREF(IPV6_EXTRACT_PREF(rt->rt6i_flags)) << 2;
654 if (strict & RT6_LOOKUP_F_REACHABLE) {
655 int n = rt6_check_neigh(rt);
662 static struct rt6_info *find_match(struct rt6_info *rt, int oif, int strict,
663 int *mpri, struct rt6_info *match,
667 bool match_do_rr = false;
668 struct inet6_dev *idev = rt->rt6i_idev;
669 struct net_device *dev = rt->dst.dev;
671 if (dev && !netif_carrier_ok(dev) &&
672 idev->cnf.ignore_routes_with_linkdown)
675 if (rt6_check_expired(rt))
678 m = rt6_score_route(rt, oif, strict);
679 if (m == RT6_NUD_FAIL_DO_RR) {
681 m = 0; /* lowest valid score */
682 } else if (m == RT6_NUD_FAIL_HARD) {
686 if (strict & RT6_LOOKUP_F_REACHABLE)
689 /* note that m can be RT6_NUD_FAIL_PROBE at this point */
691 *do_rr = match_do_rr;
699 static struct rt6_info *find_rr_leaf(struct fib6_node *fn,
700 struct rt6_info *rr_head,
701 u32 metric, int oif, int strict,
704 struct rt6_info *rt, *match, *cont;
709 for (rt = rr_head; rt; rt = rt->dst.rt6_next) {
710 if (rt->rt6i_metric != metric) {
715 match = find_match(rt, oif, strict, &mpri, match, do_rr);
718 for (rt = fn->leaf; rt && rt != rr_head; rt = rt->dst.rt6_next) {
719 if (rt->rt6i_metric != metric) {
724 match = find_match(rt, oif, strict, &mpri, match, do_rr);
730 for (rt = cont; rt; rt = rt->dst.rt6_next)
731 match = find_match(rt, oif, strict, &mpri, match, do_rr);
736 static struct rt6_info *rt6_select(struct fib6_node *fn, int oif, int strict)
738 struct rt6_info *match, *rt0;
744 fn->rr_ptr = rt0 = fn->leaf;
746 match = find_rr_leaf(fn, rt0, rt0->rt6i_metric, oif, strict,
750 struct rt6_info *next = rt0->dst.rt6_next;
752 /* no entries matched; do round-robin */
753 if (!next || next->rt6i_metric != rt0->rt6i_metric)
760 net = dev_net(rt0->dst.dev);
761 return match ? match : net->ipv6.ip6_null_entry;
764 static bool rt6_is_gw_or_nonexthop(const struct rt6_info *rt)
766 return (rt->rt6i_flags & (RTF_NONEXTHOP | RTF_GATEWAY));
769 #ifdef CONFIG_IPV6_ROUTE_INFO
770 int rt6_route_rcv(struct net_device *dev, u8 *opt, int len,
771 const struct in6_addr *gwaddr)
773 struct net *net = dev_net(dev);
774 struct route_info *rinfo = (struct route_info *) opt;
775 struct in6_addr prefix_buf, *prefix;
777 unsigned long lifetime;
780 if (len < sizeof(struct route_info)) {
784 /* Sanity check for prefix_len and length */
785 if (rinfo->length > 3) {
787 } else if (rinfo->prefix_len > 128) {
789 } else if (rinfo->prefix_len > 64) {
790 if (rinfo->length < 2) {
793 } else if (rinfo->prefix_len > 0) {
794 if (rinfo->length < 1) {
799 pref = rinfo->route_pref;
800 if (pref == ICMPV6_ROUTER_PREF_INVALID)
803 lifetime = addrconf_timeout_fixup(ntohl(rinfo->lifetime), HZ);
805 if (rinfo->length == 3)
806 prefix = (struct in6_addr *)rinfo->prefix;
808 /* this function is safe */
809 ipv6_addr_prefix(&prefix_buf,
810 (struct in6_addr *)rinfo->prefix,
812 prefix = &prefix_buf;
815 if (rinfo->prefix_len == 0)
816 rt = rt6_get_dflt_router(gwaddr, dev);
818 rt = rt6_get_route_info(net, prefix, rinfo->prefix_len,
819 gwaddr, dev->ifindex);
821 if (rt && !lifetime) {
827 rt = rt6_add_route_info(net, prefix, rinfo->prefix_len, gwaddr, dev->ifindex,
830 rt->rt6i_flags = RTF_ROUTEINFO |
831 (rt->rt6i_flags & ~RTF_PREF_MASK) | RTF_PREF(pref);
834 if (!addrconf_finite_timeout(lifetime))
835 rt6_clean_expires(rt);
837 rt6_set_expires(rt, jiffies + HZ * lifetime);
845 static struct fib6_node* fib6_backtrack(struct fib6_node *fn,
846 struct in6_addr *saddr)
848 struct fib6_node *pn;
850 if (fn->fn_flags & RTN_TL_ROOT)
853 if (FIB6_SUBTREE(pn) && FIB6_SUBTREE(pn) != fn)
854 fn = fib6_lookup(FIB6_SUBTREE(pn), NULL, saddr);
857 if (fn->fn_flags & RTN_RTINFO)
862 static struct rt6_info *ip6_pol_route_lookup(struct net *net,
863 struct fib6_table *table,
864 struct flowi6 *fl6, int flags)
866 struct fib6_node *fn;
869 read_lock_bh(&table->tb6_lock);
870 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
873 rt = rt6_device_match(net, rt, &fl6->saddr, fl6->flowi6_oif, flags);
874 if (rt->rt6i_nsiblings && fl6->flowi6_oif == 0)
875 rt = rt6_multipath_select(rt, fl6, fl6->flowi6_oif, flags);
876 if (rt == net->ipv6.ip6_null_entry) {
877 fn = fib6_backtrack(fn, &fl6->saddr);
881 dst_use(&rt->dst, jiffies);
882 read_unlock_bh(&table->tb6_lock);
887 struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
890 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_lookup);
892 EXPORT_SYMBOL_GPL(ip6_route_lookup);
894 struct rt6_info *rt6_lookup(struct net *net, const struct in6_addr *daddr,
895 const struct in6_addr *saddr, int oif, int strict)
897 struct flowi6 fl6 = {
901 struct dst_entry *dst;
902 int flags = strict ? RT6_LOOKUP_F_IFACE : 0;
905 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
906 flags |= RT6_LOOKUP_F_HAS_SADDR;
909 dst = fib6_rule_lookup(net, &fl6, flags, ip6_pol_route_lookup);
911 return (struct rt6_info *) dst;
917 EXPORT_SYMBOL(rt6_lookup);
919 /* ip6_ins_rt is called with FREE table->tb6_lock.
920 It takes new route entry, the addition fails by any reason the
921 route is freed. In any case, if caller does not hold it, it may
925 static int __ip6_ins_rt(struct rt6_info *rt, struct nl_info *info,
926 struct mx6_config *mxc)
929 struct fib6_table *table;
931 table = rt->rt6i_table;
932 write_lock_bh(&table->tb6_lock);
933 err = fib6_add(&table->tb6_root, rt, info, mxc);
934 write_unlock_bh(&table->tb6_lock);
939 int ip6_ins_rt(struct rt6_info *rt)
941 struct nl_info info = { .nl_net = dev_net(rt->dst.dev), };
942 struct mx6_config mxc = { .mx = NULL, };
944 return __ip6_ins_rt(rt, &info, &mxc);
947 static struct rt6_info *ip6_rt_cache_alloc(struct rt6_info *ort,
948 const struct in6_addr *daddr,
949 const struct in6_addr *saddr)
957 if (ort->rt6i_flags & (RTF_CACHE | RTF_PCPU))
958 ort = (struct rt6_info *)ort->dst.from;
960 rt = __ip6_dst_alloc(dev_net(ort->dst.dev), ort->dst.dev, 0);
965 ip6_rt_copy_init(rt, ort);
966 rt->rt6i_flags |= RTF_CACHE;
968 rt->dst.flags |= DST_HOST;
969 rt->rt6i_dst.addr = *daddr;
970 rt->rt6i_dst.plen = 128;
972 if (!rt6_is_gw_or_nonexthop(ort)) {
973 if (ort->rt6i_dst.plen != 128 &&
974 ipv6_addr_equal(&ort->rt6i_dst.addr, daddr))
975 rt->rt6i_flags |= RTF_ANYCAST;
976 #ifdef CONFIG_IPV6_SUBTREES
977 if (rt->rt6i_src.plen && saddr) {
978 rt->rt6i_src.addr = *saddr;
979 rt->rt6i_src.plen = 128;
987 static struct rt6_info *ip6_rt_pcpu_alloc(struct rt6_info *rt)
989 struct rt6_info *pcpu_rt;
991 pcpu_rt = __ip6_dst_alloc(dev_net(rt->dst.dev),
992 rt->dst.dev, rt->dst.flags);
996 ip6_rt_copy_init(pcpu_rt, rt);
997 pcpu_rt->rt6i_protocol = rt->rt6i_protocol;
998 pcpu_rt->rt6i_flags |= RTF_PCPU;
1002 /* It should be called with read_lock_bh(&tb6_lock) acquired */
1003 static struct rt6_info *rt6_get_pcpu_route(struct rt6_info *rt)
1005 struct rt6_info *pcpu_rt, **p;
1007 p = this_cpu_ptr(rt->rt6i_pcpu);
1011 dst_hold(&pcpu_rt->dst);
1012 rt6_dst_from_metrics_check(pcpu_rt);
1017 static struct rt6_info *rt6_make_pcpu_route(struct rt6_info *rt)
1019 struct fib6_table *table = rt->rt6i_table;
1020 struct rt6_info *pcpu_rt, *prev, **p;
1022 pcpu_rt = ip6_rt_pcpu_alloc(rt);
1024 struct net *net = dev_net(rt->dst.dev);
1026 dst_hold(&net->ipv6.ip6_null_entry->dst);
1027 return net->ipv6.ip6_null_entry;
1030 read_lock_bh(&table->tb6_lock);
1031 if (rt->rt6i_pcpu) {
1032 p = this_cpu_ptr(rt->rt6i_pcpu);
1033 prev = cmpxchg(p, NULL, pcpu_rt);
1035 /* If someone did it before us, return prev instead */
1036 dst_destroy(&pcpu_rt->dst);
1040 /* rt has been removed from the fib6 tree
1041 * before we have a chance to acquire the read_lock.
1042 * In this case, don't brother to create a pcpu rt
1043 * since rt is going away anyway. The next
1044 * dst_check() will trigger a re-lookup.
1046 dst_destroy(&pcpu_rt->dst);
1049 dst_hold(&pcpu_rt->dst);
1050 rt6_dst_from_metrics_check(pcpu_rt);
1051 read_unlock_bh(&table->tb6_lock);
1055 static struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table, int oif,
1056 struct flowi6 *fl6, int flags)
1058 struct fib6_node *fn, *saved_fn;
1059 struct rt6_info *rt;
1062 strict |= flags & RT6_LOOKUP_F_IFACE;
1063 if (net->ipv6.devconf_all->forwarding == 0)
1064 strict |= RT6_LOOKUP_F_REACHABLE;
1066 read_lock_bh(&table->tb6_lock);
1068 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1072 rt = rt6_select(fn, oif, strict);
1073 if (rt->rt6i_nsiblings)
1074 rt = rt6_multipath_select(rt, fl6, oif, strict);
1075 if (rt == net->ipv6.ip6_null_entry) {
1076 fn = fib6_backtrack(fn, &fl6->saddr);
1078 goto redo_rt6_select;
1079 else if (strict & RT6_LOOKUP_F_REACHABLE) {
1080 /* also consider unreachable route */
1081 strict &= ~RT6_LOOKUP_F_REACHABLE;
1083 goto redo_rt6_select;
1088 if (rt == net->ipv6.ip6_null_entry || (rt->rt6i_flags & RTF_CACHE)) {
1089 dst_use(&rt->dst, jiffies);
1090 read_unlock_bh(&table->tb6_lock);
1092 rt6_dst_from_metrics_check(rt);
1094 } else if (unlikely((fl6->flowi6_flags & FLOWI_FLAG_KNOWN_NH) &&
1095 !(rt->rt6i_flags & RTF_GATEWAY))) {
1096 /* Create a RTF_CACHE clone which will not be
1097 * owned by the fib6 tree. It is for the special case where
1098 * the daddr in the skb during the neighbor look-up is different
1099 * from the fl6->daddr used to look-up route here.
1102 struct rt6_info *uncached_rt;
1104 dst_use(&rt->dst, jiffies);
1105 read_unlock_bh(&table->tb6_lock);
1107 uncached_rt = ip6_rt_cache_alloc(rt, &fl6->daddr, NULL);
1108 dst_release(&rt->dst);
1111 rt6_uncached_list_add(uncached_rt);
1113 uncached_rt = net->ipv6.ip6_null_entry;
1115 dst_hold(&uncached_rt->dst);
1119 /* Get a percpu copy */
1121 struct rt6_info *pcpu_rt;
1123 rt->dst.lastuse = jiffies;
1125 pcpu_rt = rt6_get_pcpu_route(rt);
1128 read_unlock_bh(&table->tb6_lock);
1130 /* We have to do the read_unlock first
1131 * because rt6_make_pcpu_route() may trigger
1132 * ip6_dst_gc() which will take the write_lock.
1135 read_unlock_bh(&table->tb6_lock);
1136 pcpu_rt = rt6_make_pcpu_route(rt);
1137 dst_release(&rt->dst);
1145 static struct rt6_info *ip6_pol_route_input(struct net *net, struct fib6_table *table,
1146 struct flowi6 *fl6, int flags)
1148 return ip6_pol_route(net, table, fl6->flowi6_iif, fl6, flags);
1151 static struct dst_entry *ip6_route_input_lookup(struct net *net,
1152 struct net_device *dev,
1153 struct flowi6 *fl6, int flags)
1155 if (rt6_need_strict(&fl6->daddr) && dev->type != ARPHRD_PIMREG)
1156 flags |= RT6_LOOKUP_F_IFACE;
1158 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_input);
1161 void ip6_route_input(struct sk_buff *skb)
1163 const struct ipv6hdr *iph = ipv6_hdr(skb);
1164 struct net *net = dev_net(skb->dev);
1165 int flags = RT6_LOOKUP_F_HAS_SADDR;
1166 struct ip_tunnel_info *tun_info;
1167 struct flowi6 fl6 = {
1168 .flowi6_iif = skb->dev->ifindex,
1169 .daddr = iph->daddr,
1170 .saddr = iph->saddr,
1171 .flowlabel = ip6_flowinfo(iph),
1172 .flowi6_mark = skb->mark,
1173 .flowi6_proto = iph->nexthdr,
1176 tun_info = skb_tunnel_info(skb);
1177 if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
1178 fl6.flowi6_tun_key.tun_id = tun_info->key.tun_id;
1180 skb_dst_set(skb, ip6_route_input_lookup(net, skb->dev, &fl6, flags));
1183 static struct rt6_info *ip6_pol_route_output(struct net *net, struct fib6_table *table,
1184 struct flowi6 *fl6, int flags)
1186 return ip6_pol_route(net, table, fl6->flowi6_oif, fl6, flags);
1189 struct dst_entry *ip6_route_output(struct net *net, const struct sock *sk,
1194 fl6->flowi6_iif = LOOPBACK_IFINDEX;
1196 if ((sk && sk->sk_bound_dev_if) || rt6_need_strict(&fl6->daddr))
1197 flags |= RT6_LOOKUP_F_IFACE;
1199 if (!ipv6_addr_any(&fl6->saddr))
1200 flags |= RT6_LOOKUP_F_HAS_SADDR;
1202 flags |= rt6_srcprefs2flags(inet6_sk(sk)->srcprefs);
1204 return fib6_rule_lookup(net, fl6, flags, ip6_pol_route_output);
1206 EXPORT_SYMBOL(ip6_route_output);
1208 struct dst_entry *ip6_blackhole_route(struct net *net, struct dst_entry *dst_orig)
1210 struct rt6_info *rt, *ort = (struct rt6_info *) dst_orig;
1211 struct dst_entry *new = NULL;
1213 rt = dst_alloc(&ip6_dst_blackhole_ops, ort->dst.dev, 1, DST_OBSOLETE_NONE, 0);
1217 memset(new + 1, 0, sizeof(*rt) - sizeof(*new));
1220 new->input = dst_discard;
1221 new->output = dst_discard_sk;
1223 if (dst_metrics_read_only(&ort->dst))
1224 new->_metrics = ort->dst._metrics;
1226 dst_copy_metrics(new, &ort->dst);
1227 rt->rt6i_idev = ort->rt6i_idev;
1229 in6_dev_hold(rt->rt6i_idev);
1231 rt->rt6i_gateway = ort->rt6i_gateway;
1232 rt->rt6i_flags = ort->rt6i_flags;
1233 rt->rt6i_metric = 0;
1235 memcpy(&rt->rt6i_dst, &ort->rt6i_dst, sizeof(struct rt6key));
1236 #ifdef CONFIG_IPV6_SUBTREES
1237 memcpy(&rt->rt6i_src, &ort->rt6i_src, sizeof(struct rt6key));
1243 dst_release(dst_orig);
1244 return new ? new : ERR_PTR(-ENOMEM);
1248 * Destination cache support functions
1251 static void rt6_dst_from_metrics_check(struct rt6_info *rt)
1254 dst_metrics_ptr(&rt->dst) != dst_metrics_ptr(rt->dst.from))
1255 dst_init_metrics(&rt->dst, dst_metrics_ptr(rt->dst.from), true);
1258 static struct dst_entry *rt6_check(struct rt6_info *rt, u32 cookie)
1260 if (!rt->rt6i_node || (rt->rt6i_node->fn_sernum != cookie))
1263 if (rt6_check_expired(rt))
1269 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
1271 if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1272 rt6_check((struct rt6_info *)(rt->dst.from), cookie))
1278 static struct dst_entry *ip6_dst_check(struct dst_entry *dst, u32 cookie)
1280 struct rt6_info *rt;
1282 rt = (struct rt6_info *) dst;
1284 /* All IPV6 dsts are created with ->obsolete set to the value
1285 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1286 * into this function always.
1289 rt6_dst_from_metrics_check(rt);
1291 if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
1292 return rt6_dst_from_check(rt, cookie);
1294 return rt6_check(rt, cookie);
1297 static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
1299 struct rt6_info *rt = (struct rt6_info *) dst;
1302 if (rt->rt6i_flags & RTF_CACHE) {
1303 if (rt6_check_expired(rt)) {
1315 static void ip6_link_failure(struct sk_buff *skb)
1317 struct rt6_info *rt;
1319 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
1321 rt = (struct rt6_info *) skb_dst(skb);
1323 if (rt->rt6i_flags & RTF_CACHE) {
1327 } else if (rt->rt6i_node && (rt->rt6i_flags & RTF_DEFAULT)) {
1328 rt->rt6i_node->fn_sernum = -1;
1333 static void rt6_do_update_pmtu(struct rt6_info *rt, u32 mtu)
1335 struct net *net = dev_net(rt->dst.dev);
1337 rt->rt6i_flags |= RTF_MODIFIED;
1338 rt->rt6i_pmtu = mtu;
1339 rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
1342 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
1343 const struct ipv6hdr *iph, u32 mtu)
1345 struct rt6_info *rt6 = (struct rt6_info *)dst;
1347 if (rt6->rt6i_flags & RTF_LOCAL)
1351 mtu = max_t(u32, mtu, IPV6_MIN_MTU);
1352 if (mtu >= dst_mtu(dst))
1355 if (rt6->rt6i_flags & RTF_CACHE) {
1356 rt6_do_update_pmtu(rt6, mtu);
1358 const struct in6_addr *daddr, *saddr;
1359 struct rt6_info *nrt6;
1362 daddr = &iph->daddr;
1363 saddr = &iph->saddr;
1365 daddr = &sk->sk_v6_daddr;
1366 saddr = &inet6_sk(sk)->saddr;
1370 nrt6 = ip6_rt_cache_alloc(rt6, daddr, saddr);
1372 rt6_do_update_pmtu(nrt6, mtu);
1374 /* ip6_ins_rt(nrt6) will bump the
1375 * rt6->rt6i_node->fn_sernum
1376 * which will fail the next rt6_check() and
1377 * invalidate the sk->sk_dst_cache.
1384 static void ip6_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1385 struct sk_buff *skb, u32 mtu)
1387 __ip6_rt_update_pmtu(dst, sk, skb ? ipv6_hdr(skb) : NULL, mtu);
1390 void ip6_update_pmtu(struct sk_buff *skb, struct net *net, __be32 mtu,
1393 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1394 struct dst_entry *dst;
1397 memset(&fl6, 0, sizeof(fl6));
1398 fl6.flowi6_oif = oif;
1399 fl6.flowi6_mark = mark ? mark : IP6_REPLY_MARK(net, skb->mark);
1400 fl6.daddr = iph->daddr;
1401 fl6.saddr = iph->saddr;
1402 fl6.flowlabel = ip6_flowinfo(iph);
1404 dst = ip6_route_output(net, NULL, &fl6);
1406 __ip6_rt_update_pmtu(dst, NULL, iph, ntohl(mtu));
1409 EXPORT_SYMBOL_GPL(ip6_update_pmtu);
1411 void ip6_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, __be32 mtu)
1413 ip6_update_pmtu(skb, sock_net(sk), mtu,
1414 sk->sk_bound_dev_if, sk->sk_mark);
1416 EXPORT_SYMBOL_GPL(ip6_sk_update_pmtu);
1418 /* Handle redirects */
1419 struct ip6rd_flowi {
1421 struct in6_addr gateway;
1424 static struct rt6_info *__ip6_route_redirect(struct net *net,
1425 struct fib6_table *table,
1429 struct ip6rd_flowi *rdfl = (struct ip6rd_flowi *)fl6;
1430 struct rt6_info *rt;
1431 struct fib6_node *fn;
1433 /* Get the "current" route for this destination and
1434 * check if the redirect has come from approriate router.
1436 * RFC 4861 specifies that redirects should only be
1437 * accepted if they come from the nexthop to the target.
1438 * Due to the way the routes are chosen, this notion
1439 * is a bit fuzzy and one might need to check all possible
1443 read_lock_bh(&table->tb6_lock);
1444 fn = fib6_lookup(&table->tb6_root, &fl6->daddr, &fl6->saddr);
1446 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
1447 if (rt6_check_expired(rt))
1451 if (!(rt->rt6i_flags & RTF_GATEWAY))
1453 if (fl6->flowi6_oif != rt->dst.dev->ifindex)
1455 if (!ipv6_addr_equal(&rdfl->gateway, &rt->rt6i_gateway))
1461 rt = net->ipv6.ip6_null_entry;
1462 else if (rt->dst.error) {
1463 rt = net->ipv6.ip6_null_entry;
1467 if (rt == net->ipv6.ip6_null_entry) {
1468 fn = fib6_backtrack(fn, &fl6->saddr);
1476 read_unlock_bh(&table->tb6_lock);
1481 static struct dst_entry *ip6_route_redirect(struct net *net,
1482 const struct flowi6 *fl6,
1483 const struct in6_addr *gateway)
1485 int flags = RT6_LOOKUP_F_HAS_SADDR;
1486 struct ip6rd_flowi rdfl;
1489 rdfl.gateway = *gateway;
1491 return fib6_rule_lookup(net, &rdfl.fl6,
1492 flags, __ip6_route_redirect);
1495 void ip6_redirect(struct sk_buff *skb, struct net *net, int oif, u32 mark)
1497 const struct ipv6hdr *iph = (struct ipv6hdr *) skb->data;
1498 struct dst_entry *dst;
1501 memset(&fl6, 0, sizeof(fl6));
1502 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1503 fl6.flowi6_oif = oif;
1504 fl6.flowi6_mark = mark;
1505 fl6.daddr = iph->daddr;
1506 fl6.saddr = iph->saddr;
1507 fl6.flowlabel = ip6_flowinfo(iph);
1509 dst = ip6_route_redirect(net, &fl6, &ipv6_hdr(skb)->saddr);
1510 rt6_do_redirect(dst, NULL, skb);
1513 EXPORT_SYMBOL_GPL(ip6_redirect);
1515 void ip6_redirect_no_header(struct sk_buff *skb, struct net *net, int oif,
1518 const struct ipv6hdr *iph = ipv6_hdr(skb);
1519 const struct rd_msg *msg = (struct rd_msg *)icmp6_hdr(skb);
1520 struct dst_entry *dst;
1523 memset(&fl6, 0, sizeof(fl6));
1524 fl6.flowi6_iif = LOOPBACK_IFINDEX;
1525 fl6.flowi6_oif = oif;
1526 fl6.flowi6_mark = mark;
1527 fl6.daddr = msg->dest;
1528 fl6.saddr = iph->daddr;
1530 dst = ip6_route_redirect(net, &fl6, &iph->saddr);
1531 rt6_do_redirect(dst, NULL, skb);
1535 void ip6_sk_redirect(struct sk_buff *skb, struct sock *sk)
1537 ip6_redirect(skb, sock_net(sk), sk->sk_bound_dev_if, sk->sk_mark);
1539 EXPORT_SYMBOL_GPL(ip6_sk_redirect);
1541 static unsigned int ip6_default_advmss(const struct dst_entry *dst)
1543 struct net_device *dev = dst->dev;
1544 unsigned int mtu = dst_mtu(dst);
1545 struct net *net = dev_net(dev);
1547 mtu -= sizeof(struct ipv6hdr) + sizeof(struct tcphdr);
1549 if (mtu < net->ipv6.sysctl.ip6_rt_min_advmss)
1550 mtu = net->ipv6.sysctl.ip6_rt_min_advmss;
1553 * Maximal non-jumbo IPv6 payload is IPV6_MAXPLEN and
1554 * corresponding MSS is IPV6_MAXPLEN - tcp_header_size.
1555 * IPV6_MAXPLEN is also valid and means: "any MSS,
1556 * rely only on pmtu discovery"
1558 if (mtu > IPV6_MAXPLEN - sizeof(struct tcphdr))
1563 static unsigned int ip6_mtu(const struct dst_entry *dst)
1565 const struct rt6_info *rt = (const struct rt6_info *)dst;
1566 unsigned int mtu = rt->rt6i_pmtu;
1567 struct inet6_dev *idev;
1572 mtu = dst_metric_raw(dst, RTAX_MTU);
1579 idev = __in6_dev_get(dst->dev);
1581 mtu = idev->cnf.mtu6;
1585 return min_t(unsigned int, mtu, IP6_MAX_MTU);
1588 static struct dst_entry *icmp6_dst_gc_list;
1589 static DEFINE_SPINLOCK(icmp6_dst_lock);
1591 struct dst_entry *icmp6_dst_alloc(struct net_device *dev,
1594 struct dst_entry *dst;
1595 struct rt6_info *rt;
1596 struct inet6_dev *idev = in6_dev_get(dev);
1597 struct net *net = dev_net(dev);
1599 if (unlikely(!idev))
1600 return ERR_PTR(-ENODEV);
1602 rt = ip6_dst_alloc(net, dev, 0);
1603 if (unlikely(!rt)) {
1605 dst = ERR_PTR(-ENOMEM);
1609 rt->dst.flags |= DST_HOST;
1610 rt->dst.output = ip6_output;
1611 atomic_set(&rt->dst.__refcnt, 1);
1612 rt->rt6i_gateway = fl6->daddr;
1613 rt->rt6i_dst.addr = fl6->daddr;
1614 rt->rt6i_dst.plen = 128;
1615 rt->rt6i_idev = idev;
1616 dst_metric_set(&rt->dst, RTAX_HOPLIMIT, 0);
1618 spin_lock_bh(&icmp6_dst_lock);
1619 rt->dst.next = icmp6_dst_gc_list;
1620 icmp6_dst_gc_list = &rt->dst;
1621 spin_unlock_bh(&icmp6_dst_lock);
1623 fib6_force_start_gc(net);
1625 dst = xfrm_lookup(net, &rt->dst, flowi6_to_flowi(fl6), NULL, 0);
1631 int icmp6_dst_gc(void)
1633 struct dst_entry *dst, **pprev;
1636 spin_lock_bh(&icmp6_dst_lock);
1637 pprev = &icmp6_dst_gc_list;
1639 while ((dst = *pprev) != NULL) {
1640 if (!atomic_read(&dst->__refcnt)) {
1649 spin_unlock_bh(&icmp6_dst_lock);
1654 static void icmp6_clean_all(int (*func)(struct rt6_info *rt, void *arg),
1657 struct dst_entry *dst, **pprev;
1659 spin_lock_bh(&icmp6_dst_lock);
1660 pprev = &icmp6_dst_gc_list;
1661 while ((dst = *pprev) != NULL) {
1662 struct rt6_info *rt = (struct rt6_info *) dst;
1663 if (func(rt, arg)) {
1670 spin_unlock_bh(&icmp6_dst_lock);
1673 static int ip6_dst_gc(struct dst_ops *ops)
1675 struct net *net = container_of(ops, struct net, ipv6.ip6_dst_ops);
1676 int rt_min_interval = net->ipv6.sysctl.ip6_rt_gc_min_interval;
1677 int rt_max_size = net->ipv6.sysctl.ip6_rt_max_size;
1678 int rt_elasticity = net->ipv6.sysctl.ip6_rt_gc_elasticity;
1679 int rt_gc_timeout = net->ipv6.sysctl.ip6_rt_gc_timeout;
1680 unsigned long rt_last_gc = net->ipv6.ip6_rt_last_gc;
1683 entries = dst_entries_get_fast(ops);
1684 if (time_after(rt_last_gc + rt_min_interval, jiffies) &&
1685 entries <= rt_max_size)
1688 net->ipv6.ip6_rt_gc_expire++;
1689 fib6_run_gc(net->ipv6.ip6_rt_gc_expire, net, true);
1690 entries = dst_entries_get_slow(ops);
1691 if (entries < ops->gc_thresh)
1692 net->ipv6.ip6_rt_gc_expire = rt_gc_timeout>>1;
1694 net->ipv6.ip6_rt_gc_expire -= net->ipv6.ip6_rt_gc_expire>>rt_elasticity;
1695 return entries > rt_max_size;
1698 static int ip6_convert_metrics(struct mx6_config *mxc,
1699 const struct fib6_config *cfg)
1708 mp = kzalloc(sizeof(u32) * RTAX_MAX, GFP_KERNEL);
1712 nla_for_each_attr(nla, cfg->fc_mx, cfg->fc_mx_len, remaining) {
1713 int type = nla_type(nla);
1718 if (unlikely(type > RTAX_MAX))
1720 if (type == RTAX_CC_ALGO) {
1721 char tmp[TCP_CA_NAME_MAX];
1723 nla_strlcpy(tmp, nla, sizeof(tmp));
1724 val = tcp_ca_get_key_by_name(tmp);
1725 if (val == TCP_CA_UNSPEC)
1728 val = nla_get_u32(nla);
1732 __set_bit(type - 1, mxc->mx_valid);
1744 int ip6_route_add(struct fib6_config *cfg)
1747 struct net *net = cfg->fc_nlinfo.nl_net;
1748 struct rt6_info *rt = NULL;
1749 struct net_device *dev = NULL;
1750 struct inet6_dev *idev = NULL;
1751 struct fib6_table *table;
1752 struct mx6_config mxc = { .mx = NULL, };
1755 if (cfg->fc_dst_len > 128 || cfg->fc_src_len > 128)
1757 #ifndef CONFIG_IPV6_SUBTREES
1758 if (cfg->fc_src_len)
1761 if (cfg->fc_ifindex) {
1763 dev = dev_get_by_index(net, cfg->fc_ifindex);
1766 idev = in6_dev_get(dev);
1771 if (cfg->fc_metric == 0)
1772 cfg->fc_metric = IP6_RT_PRIO_USER;
1775 if (cfg->fc_nlinfo.nlh &&
1776 !(cfg->fc_nlinfo.nlh->nlmsg_flags & NLM_F_CREATE)) {
1777 table = fib6_get_table(net, cfg->fc_table);
1779 pr_warn("NLM_F_CREATE should be specified when creating new route\n");
1780 table = fib6_new_table(net, cfg->fc_table);
1783 table = fib6_new_table(net, cfg->fc_table);
1789 rt = ip6_dst_alloc(net, NULL,
1790 (cfg->fc_flags & RTF_ADDRCONF) ? 0 : DST_NOCOUNT);
1797 if (cfg->fc_flags & RTF_EXPIRES)
1798 rt6_set_expires(rt, jiffies +
1799 clock_t_to_jiffies(cfg->fc_expires));
1801 rt6_clean_expires(rt);
1803 if (cfg->fc_protocol == RTPROT_UNSPEC)
1804 cfg->fc_protocol = RTPROT_BOOT;
1805 rt->rt6i_protocol = cfg->fc_protocol;
1807 addr_type = ipv6_addr_type(&cfg->fc_dst);
1809 if (addr_type & IPV6_ADDR_MULTICAST)
1810 rt->dst.input = ip6_mc_input;
1811 else if (cfg->fc_flags & RTF_LOCAL)
1812 rt->dst.input = ip6_input;
1814 rt->dst.input = ip6_forward;
1816 rt->dst.output = ip6_output;
1818 if (cfg->fc_encap) {
1819 struct lwtunnel_state *lwtstate;
1821 err = lwtunnel_build_state(dev, cfg->fc_encap_type,
1822 cfg->fc_encap, AF_INET6, cfg,
1826 rt->dst.lwtstate = lwtstate_get(lwtstate);
1827 if (lwtunnel_output_redirect(rt->dst.lwtstate)) {
1828 rt->dst.lwtstate->orig_output = rt->dst.output;
1829 rt->dst.output = lwtunnel_output;
1831 if (lwtunnel_input_redirect(rt->dst.lwtstate)) {
1832 rt->dst.lwtstate->orig_input = rt->dst.input;
1833 rt->dst.input = lwtunnel_input;
1837 ipv6_addr_prefix(&rt->rt6i_dst.addr, &cfg->fc_dst, cfg->fc_dst_len);
1838 rt->rt6i_dst.plen = cfg->fc_dst_len;
1839 if (rt->rt6i_dst.plen == 128)
1840 rt->dst.flags |= DST_HOST;
1842 #ifdef CONFIG_IPV6_SUBTREES
1843 ipv6_addr_prefix(&rt->rt6i_src.addr, &cfg->fc_src, cfg->fc_src_len);
1844 rt->rt6i_src.plen = cfg->fc_src_len;
1847 rt->rt6i_metric = cfg->fc_metric;
1849 /* We cannot add true routes via loopback here,
1850 they would result in kernel looping; promote them to reject routes
1852 if ((cfg->fc_flags & RTF_REJECT) ||
1853 (dev && (dev->flags & IFF_LOOPBACK) &&
1854 !(addr_type & IPV6_ADDR_LOOPBACK) &&
1855 !(cfg->fc_flags & RTF_LOCAL))) {
1856 /* hold loopback dev/idev if we haven't done so. */
1857 if (dev != net->loopback_dev) {
1862 dev = net->loopback_dev;
1864 idev = in6_dev_get(dev);
1870 rt->rt6i_flags = RTF_REJECT|RTF_NONEXTHOP;
1871 switch (cfg->fc_type) {
1873 rt->dst.error = -EINVAL;
1874 rt->dst.output = dst_discard_sk;
1875 rt->dst.input = dst_discard;
1878 rt->dst.error = -EACCES;
1879 rt->dst.output = ip6_pkt_prohibit_out;
1880 rt->dst.input = ip6_pkt_prohibit;
1884 rt->dst.error = (cfg->fc_type == RTN_THROW) ? -EAGAIN
1886 rt->dst.output = ip6_pkt_discard_out;
1887 rt->dst.input = ip6_pkt_discard;
1893 if (cfg->fc_flags & RTF_GATEWAY) {
1894 const struct in6_addr *gw_addr;
1897 gw_addr = &cfg->fc_gateway;
1898 gwa_type = ipv6_addr_type(gw_addr);
1900 /* if gw_addr is local we will fail to detect this in case
1901 * address is still TENTATIVE (DAD in progress). rt6_lookup()
1902 * will return already-added prefix route via interface that
1903 * prefix route was assigned to, which might be non-loopback.
1906 if (ipv6_chk_addr_and_flags(net, gw_addr,
1907 gwa_type & IPV6_ADDR_LINKLOCAL ?
1911 rt->rt6i_gateway = *gw_addr;
1913 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1914 struct rt6_info *grt;
1916 /* IPv6 strictly inhibits using not link-local
1917 addresses as nexthop address.
1918 Otherwise, router will not able to send redirects.
1919 It is very good, but in some (rare!) circumstances
1920 (SIT, PtP, NBMA NOARP links) it is handy to allow
1921 some exceptions. --ANK
1923 if (!(gwa_type & IPV6_ADDR_UNICAST))
1926 grt = rt6_lookup(net, gw_addr, NULL, cfg->fc_ifindex, 1);
1928 err = -EHOSTUNREACH;
1932 if (dev != grt->dst.dev) {
1938 idev = grt->rt6i_idev;
1940 in6_dev_hold(grt->rt6i_idev);
1942 if (!(grt->rt6i_flags & RTF_GATEWAY))
1950 if (!dev || (dev->flags & IFF_LOOPBACK))
1958 if (!ipv6_addr_any(&cfg->fc_prefsrc)) {
1959 if (!ipv6_chk_addr(net, &cfg->fc_prefsrc, dev, 0)) {
1963 rt->rt6i_prefsrc.addr = cfg->fc_prefsrc;
1964 rt->rt6i_prefsrc.plen = 128;
1966 rt->rt6i_prefsrc.plen = 0;
1968 rt->rt6i_flags = cfg->fc_flags;
1972 rt->rt6i_idev = idev;
1973 rt->rt6i_table = table;
1975 cfg->fc_nlinfo.nl_net = dev_net(dev);
1977 err = ip6_convert_metrics(&mxc, cfg);
1981 err = __ip6_ins_rt(rt, &cfg->fc_nlinfo, &mxc);
1995 static int __ip6_del_rt(struct rt6_info *rt, struct nl_info *info)
1998 struct fib6_table *table;
1999 struct net *net = dev_net(rt->dst.dev);
2001 if (rt == net->ipv6.ip6_null_entry) {
2006 table = rt->rt6i_table;
2007 write_lock_bh(&table->tb6_lock);
2008 err = fib6_del(rt, info);
2009 write_unlock_bh(&table->tb6_lock);
2016 int ip6_del_rt(struct rt6_info *rt)
2018 struct nl_info info = {
2019 .nl_net = dev_net(rt->dst.dev),
2021 return __ip6_del_rt(rt, &info);
2024 static int ip6_route_del(struct fib6_config *cfg)
2026 struct fib6_table *table;
2027 struct fib6_node *fn;
2028 struct rt6_info *rt;
2031 table = fib6_get_table(cfg->fc_nlinfo.nl_net, cfg->fc_table);
2035 read_lock_bh(&table->tb6_lock);
2037 fn = fib6_locate(&table->tb6_root,
2038 &cfg->fc_dst, cfg->fc_dst_len,
2039 &cfg->fc_src, cfg->fc_src_len);
2042 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2043 if ((rt->rt6i_flags & RTF_CACHE) &&
2044 !(cfg->fc_flags & RTF_CACHE))
2046 if (cfg->fc_ifindex &&
2048 rt->dst.dev->ifindex != cfg->fc_ifindex))
2050 if (cfg->fc_flags & RTF_GATEWAY &&
2051 !ipv6_addr_equal(&cfg->fc_gateway, &rt->rt6i_gateway))
2053 if (cfg->fc_metric && cfg->fc_metric != rt->rt6i_metric)
2056 read_unlock_bh(&table->tb6_lock);
2058 return __ip6_del_rt(rt, &cfg->fc_nlinfo);
2061 read_unlock_bh(&table->tb6_lock);
2066 static void rt6_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
2068 struct net *net = dev_net(skb->dev);
2069 struct netevent_redirect netevent;
2070 struct rt6_info *rt, *nrt = NULL;
2071 struct ndisc_options ndopts;
2072 struct inet6_dev *in6_dev;
2073 struct neighbour *neigh;
2075 int optlen, on_link;
2078 optlen = skb_tail_pointer(skb) - skb_transport_header(skb);
2079 optlen -= sizeof(*msg);
2082 net_dbg_ratelimited("rt6_do_redirect: packet too short\n");
2086 msg = (struct rd_msg *)icmp6_hdr(skb);
2088 if (ipv6_addr_is_multicast(&msg->dest)) {
2089 net_dbg_ratelimited("rt6_do_redirect: destination address is multicast\n");
2094 if (ipv6_addr_equal(&msg->dest, &msg->target)) {
2096 } else if (ipv6_addr_type(&msg->target) !=
2097 (IPV6_ADDR_UNICAST|IPV6_ADDR_LINKLOCAL)) {
2098 net_dbg_ratelimited("rt6_do_redirect: target address is not link-local unicast\n");
2102 in6_dev = __in6_dev_get(skb->dev);
2105 if (in6_dev->cnf.forwarding || !in6_dev->cnf.accept_redirects)
2109 * The IP source address of the Redirect MUST be the same as the current
2110 * first-hop router for the specified ICMP Destination Address.
2113 if (!ndisc_parse_options(msg->opt, optlen, &ndopts)) {
2114 net_dbg_ratelimited("rt6_redirect: invalid ND options\n");
2119 if (ndopts.nd_opts_tgt_lladdr) {
2120 lladdr = ndisc_opt_addr_data(ndopts.nd_opts_tgt_lladdr,
2123 net_dbg_ratelimited("rt6_redirect: invalid link-layer address length\n");
2128 rt = (struct rt6_info *) dst;
2129 if (rt == net->ipv6.ip6_null_entry) {
2130 net_dbg_ratelimited("rt6_redirect: source isn't a valid nexthop for redirect target\n");
2134 /* Redirect received -> path was valid.
2135 * Look, redirects are sent only in response to data packets,
2136 * so that this nexthop apparently is reachable. --ANK
2138 dst_confirm(&rt->dst);
2140 neigh = __neigh_lookup(&nd_tbl, &msg->target, skb->dev, 1);
2145 * We have finally decided to accept it.
2148 neigh_update(neigh, lladdr, NUD_STALE,
2149 NEIGH_UPDATE_F_WEAK_OVERRIDE|
2150 NEIGH_UPDATE_F_OVERRIDE|
2151 (on_link ? 0 : (NEIGH_UPDATE_F_OVERRIDE_ISROUTER|
2152 NEIGH_UPDATE_F_ISROUTER))
2155 nrt = ip6_rt_cache_alloc(rt, &msg->dest, NULL);
2159 nrt->rt6i_flags = RTF_GATEWAY|RTF_UP|RTF_DYNAMIC|RTF_CACHE;
2161 nrt->rt6i_flags &= ~RTF_GATEWAY;
2163 nrt->rt6i_gateway = *(struct in6_addr *)neigh->primary_key;
2165 if (ip6_ins_rt(nrt))
2168 netevent.old = &rt->dst;
2169 netevent.new = &nrt->dst;
2170 netevent.daddr = &msg->dest;
2171 netevent.neigh = neigh;
2172 call_netevent_notifiers(NETEVENT_REDIRECT, &netevent);
2174 if (rt->rt6i_flags & RTF_CACHE) {
2175 rt = (struct rt6_info *) dst_clone(&rt->dst);
2180 neigh_release(neigh);
2184 * Misc support functions
2187 static void rt6_set_from(struct rt6_info *rt, struct rt6_info *from)
2189 BUG_ON(from->dst.from);
2191 rt->rt6i_flags &= ~RTF_EXPIRES;
2192 dst_hold(&from->dst);
2193 rt->dst.from = &from->dst;
2194 dst_init_metrics(&rt->dst, dst_metrics_ptr(&from->dst), true);
2197 static void ip6_rt_copy_init(struct rt6_info *rt, struct rt6_info *ort)
2199 rt->dst.input = ort->dst.input;
2200 rt->dst.output = ort->dst.output;
2201 rt->rt6i_dst = ort->rt6i_dst;
2202 rt->dst.error = ort->dst.error;
2203 rt->rt6i_idev = ort->rt6i_idev;
2205 in6_dev_hold(rt->rt6i_idev);
2206 rt->dst.lastuse = jiffies;
2207 rt->rt6i_gateway = ort->rt6i_gateway;
2208 rt->rt6i_flags = ort->rt6i_flags;
2209 rt6_set_from(rt, ort);
2210 rt->rt6i_metric = ort->rt6i_metric;
2211 #ifdef CONFIG_IPV6_SUBTREES
2212 rt->rt6i_src = ort->rt6i_src;
2214 rt->rt6i_prefsrc = ort->rt6i_prefsrc;
2215 rt->rt6i_table = ort->rt6i_table;
2216 rt->dst.lwtstate = lwtstate_get(ort->dst.lwtstate);
2219 #ifdef CONFIG_IPV6_ROUTE_INFO
2220 static struct rt6_info *rt6_get_route_info(struct net *net,
2221 const struct in6_addr *prefix, int prefixlen,
2222 const struct in6_addr *gwaddr, int ifindex)
2224 struct fib6_node *fn;
2225 struct rt6_info *rt = NULL;
2226 struct fib6_table *table;
2228 table = fib6_get_table(net, RT6_TABLE_INFO);
2232 read_lock_bh(&table->tb6_lock);
2233 fn = fib6_locate(&table->tb6_root, prefix, prefixlen, NULL, 0);
2237 for (rt = fn->leaf; rt; rt = rt->dst.rt6_next) {
2238 if (rt->dst.dev->ifindex != ifindex)
2240 if ((rt->rt6i_flags & (RTF_ROUTEINFO|RTF_GATEWAY)) != (RTF_ROUTEINFO|RTF_GATEWAY))
2242 if (!ipv6_addr_equal(&rt->rt6i_gateway, gwaddr))
2248 read_unlock_bh(&table->tb6_lock);
2252 static struct rt6_info *rt6_add_route_info(struct net *net,
2253 const struct in6_addr *prefix, int prefixlen,
2254 const struct in6_addr *gwaddr, int ifindex,
2257 struct fib6_config cfg = {
2258 .fc_table = RT6_TABLE_INFO,
2259 .fc_metric = IP6_RT_PRIO_USER,
2260 .fc_ifindex = ifindex,
2261 .fc_dst_len = prefixlen,
2262 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_ROUTEINFO |
2263 RTF_UP | RTF_PREF(pref),
2264 .fc_nlinfo.portid = 0,
2265 .fc_nlinfo.nlh = NULL,
2266 .fc_nlinfo.nl_net = net,
2269 cfg.fc_dst = *prefix;
2270 cfg.fc_gateway = *gwaddr;
2272 /* We should treat it as a default route if prefix length is 0. */
2274 cfg.fc_flags |= RTF_DEFAULT;
2276 ip6_route_add(&cfg);
2278 return rt6_get_route_info(net, prefix, prefixlen, gwaddr, ifindex);
2282 struct rt6_info *rt6_get_dflt_router(const struct in6_addr *addr, struct net_device *dev)
2284 struct rt6_info *rt;
2285 struct fib6_table *table;
2287 table = fib6_get_table(dev_net(dev), RT6_TABLE_DFLT);
2291 read_lock_bh(&table->tb6_lock);
2292 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2293 if (dev == rt->dst.dev &&
2294 ((rt->rt6i_flags & (RTF_ADDRCONF | RTF_DEFAULT)) == (RTF_ADDRCONF | RTF_DEFAULT)) &&
2295 ipv6_addr_equal(&rt->rt6i_gateway, addr))
2300 read_unlock_bh(&table->tb6_lock);
2304 struct rt6_info *rt6_add_dflt_router(const struct in6_addr *gwaddr,
2305 struct net_device *dev,
2308 struct fib6_config cfg = {
2309 .fc_table = RT6_TABLE_DFLT,
2310 .fc_metric = IP6_RT_PRIO_USER,
2311 .fc_ifindex = dev->ifindex,
2312 .fc_flags = RTF_GATEWAY | RTF_ADDRCONF | RTF_DEFAULT |
2313 RTF_UP | RTF_EXPIRES | RTF_PREF(pref),
2314 .fc_nlinfo.portid = 0,
2315 .fc_nlinfo.nlh = NULL,
2316 .fc_nlinfo.nl_net = dev_net(dev),
2319 cfg.fc_gateway = *gwaddr;
2321 ip6_route_add(&cfg);
2323 return rt6_get_dflt_router(gwaddr, dev);
2326 void rt6_purge_dflt_routers(struct net *net)
2328 struct rt6_info *rt;
2329 struct fib6_table *table;
2331 /* NOTE: Keep consistent with rt6_get_dflt_router */
2332 table = fib6_get_table(net, RT6_TABLE_DFLT);
2337 read_lock_bh(&table->tb6_lock);
2338 for (rt = table->tb6_root.leaf; rt; rt = rt->dst.rt6_next) {
2339 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ADDRCONF) &&
2340 (!rt->rt6i_idev || rt->rt6i_idev->cnf.accept_ra != 2)) {
2342 read_unlock_bh(&table->tb6_lock);
2347 read_unlock_bh(&table->tb6_lock);
2350 static void rtmsg_to_fib6_config(struct net *net,
2351 struct in6_rtmsg *rtmsg,
2352 struct fib6_config *cfg)
2354 memset(cfg, 0, sizeof(*cfg));
2356 cfg->fc_table = RT6_TABLE_MAIN;
2357 cfg->fc_ifindex = rtmsg->rtmsg_ifindex;
2358 cfg->fc_metric = rtmsg->rtmsg_metric;
2359 cfg->fc_expires = rtmsg->rtmsg_info;
2360 cfg->fc_dst_len = rtmsg->rtmsg_dst_len;
2361 cfg->fc_src_len = rtmsg->rtmsg_src_len;
2362 cfg->fc_flags = rtmsg->rtmsg_flags;
2364 cfg->fc_nlinfo.nl_net = net;
2366 cfg->fc_dst = rtmsg->rtmsg_dst;
2367 cfg->fc_src = rtmsg->rtmsg_src;
2368 cfg->fc_gateway = rtmsg->rtmsg_gateway;
2371 int ipv6_route_ioctl(struct net *net, unsigned int cmd, void __user *arg)
2373 struct fib6_config cfg;
2374 struct in6_rtmsg rtmsg;
2378 case SIOCADDRT: /* Add a route */
2379 case SIOCDELRT: /* Delete a route */
2380 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
2382 err = copy_from_user(&rtmsg, arg,
2383 sizeof(struct in6_rtmsg));
2387 rtmsg_to_fib6_config(net, &rtmsg, &cfg);
2392 err = ip6_route_add(&cfg);
2395 err = ip6_route_del(&cfg);
2409 * Drop the packet on the floor
2412 static int ip6_pkt_drop(struct sk_buff *skb, u8 code, int ipstats_mib_noroutes)
2415 struct dst_entry *dst = skb_dst(skb);
2416 switch (ipstats_mib_noroutes) {
2417 case IPSTATS_MIB_INNOROUTES:
2418 type = ipv6_addr_type(&ipv6_hdr(skb)->daddr);
2419 if (type == IPV6_ADDR_ANY) {
2420 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2421 IPSTATS_MIB_INADDRERRORS);
2425 case IPSTATS_MIB_OUTNOROUTES:
2426 IP6_INC_STATS(dev_net(dst->dev), ip6_dst_idev(dst),
2427 ipstats_mib_noroutes);
2430 icmpv6_send(skb, ICMPV6_DEST_UNREACH, code, 0);
2435 static int ip6_pkt_discard(struct sk_buff *skb)
2437 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_INNOROUTES);
2440 static int ip6_pkt_discard_out(struct sock *sk, struct sk_buff *skb)
2442 skb->dev = skb_dst(skb)->dev;
2443 return ip6_pkt_drop(skb, ICMPV6_NOROUTE, IPSTATS_MIB_OUTNOROUTES);
2446 static int ip6_pkt_prohibit(struct sk_buff *skb)
2448 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_INNOROUTES);
2451 static int ip6_pkt_prohibit_out(struct sock *sk, struct sk_buff *skb)
2453 skb->dev = skb_dst(skb)->dev;
2454 return ip6_pkt_drop(skb, ICMPV6_ADM_PROHIBITED, IPSTATS_MIB_OUTNOROUTES);
2458 * Allocate a dst for local (unicast / anycast) address.
2461 struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev,
2462 const struct in6_addr *addr,
2465 struct net *net = dev_net(idev->dev);
2466 struct rt6_info *rt = ip6_dst_alloc(net, net->loopback_dev,
2469 return ERR_PTR(-ENOMEM);
2473 rt->dst.flags |= DST_HOST;
2474 rt->dst.input = ip6_input;
2475 rt->dst.output = ip6_output;
2476 rt->rt6i_idev = idev;
2478 rt->rt6i_flags = RTF_UP | RTF_NONEXTHOP;
2480 rt->rt6i_flags |= RTF_ANYCAST;
2482 rt->rt6i_flags |= RTF_LOCAL;
2484 rt->rt6i_gateway = *addr;
2485 rt->rt6i_dst.addr = *addr;
2486 rt->rt6i_dst.plen = 128;
2487 rt->rt6i_table = fib6_get_table(net, RT6_TABLE_LOCAL);
2489 atomic_set(&rt->dst.__refcnt, 1);
2494 int ip6_route_get_saddr(struct net *net,
2495 struct rt6_info *rt,
2496 const struct in6_addr *daddr,
2498 struct in6_addr *saddr)
2500 struct inet6_dev *idev =
2501 rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
2503 if (rt && rt->rt6i_prefsrc.plen)
2504 *saddr = rt->rt6i_prefsrc.addr;
2506 err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
2507 daddr, prefs, saddr);
2511 /* remove deleted ip from prefsrc entries */
2512 struct arg_dev_net_ip {
2513 struct net_device *dev;
2515 struct in6_addr *addr;
2518 static int fib6_remove_prefsrc(struct rt6_info *rt, void *arg)
2520 struct net_device *dev = ((struct arg_dev_net_ip *)arg)->dev;
2521 struct net *net = ((struct arg_dev_net_ip *)arg)->net;
2522 struct in6_addr *addr = ((struct arg_dev_net_ip *)arg)->addr;
2524 if (((void *)rt->dst.dev == dev || !dev) &&
2525 rt != net->ipv6.ip6_null_entry &&
2526 ipv6_addr_equal(addr, &rt->rt6i_prefsrc.addr)) {
2527 /* remove prefsrc entry */
2528 rt->rt6i_prefsrc.plen = 0;
2533 void rt6_remove_prefsrc(struct inet6_ifaddr *ifp)
2535 struct net *net = dev_net(ifp->idev->dev);
2536 struct arg_dev_net_ip adni = {
2537 .dev = ifp->idev->dev,
2541 fib6_clean_all(net, fib6_remove_prefsrc, &adni);
2544 #define RTF_RA_ROUTER (RTF_ADDRCONF | RTF_DEFAULT | RTF_GATEWAY)
2545 #define RTF_CACHE_GATEWAY (RTF_GATEWAY | RTF_CACHE)
2547 /* Remove routers and update dst entries when gateway turn into host. */
2548 static int fib6_clean_tohost(struct rt6_info *rt, void *arg)
2550 struct in6_addr *gateway = (struct in6_addr *)arg;
2552 if ((((rt->rt6i_flags & RTF_RA_ROUTER) == RTF_RA_ROUTER) ||
2553 ((rt->rt6i_flags & RTF_CACHE_GATEWAY) == RTF_CACHE_GATEWAY)) &&
2554 ipv6_addr_equal(gateway, &rt->rt6i_gateway)) {
2560 void rt6_clean_tohost(struct net *net, struct in6_addr *gateway)
2562 fib6_clean_all(net, fib6_clean_tohost, gateway);
2565 struct arg_dev_net {
2566 struct net_device *dev;
2570 static int fib6_ifdown(struct rt6_info *rt, void *arg)
2572 const struct arg_dev_net *adn = arg;
2573 const struct net_device *dev = adn->dev;
2575 if ((rt->dst.dev == dev || !dev) &&
2576 rt != adn->net->ipv6.ip6_null_entry)
2582 void rt6_ifdown(struct net *net, struct net_device *dev)
2584 struct arg_dev_net adn = {
2589 fib6_clean_all(net, fib6_ifdown, &adn);
2590 icmp6_clean_all(fib6_ifdown, &adn);
2591 rt6_uncached_list_flush_dev(net, dev);
2594 struct rt6_mtu_change_arg {
2595 struct net_device *dev;
2599 static int rt6_mtu_change_route(struct rt6_info *rt, void *p_arg)
2601 struct rt6_mtu_change_arg *arg = (struct rt6_mtu_change_arg *) p_arg;
2602 struct inet6_dev *idev;
2604 /* In IPv6 pmtu discovery is not optional,
2605 so that RTAX_MTU lock cannot disable it.
2606 We still use this lock to block changes
2607 caused by addrconf/ndisc.
2610 idev = __in6_dev_get(arg->dev);
2614 /* For administrative MTU increase, there is no way to discover
2615 IPv6 PMTU increase, so PMTU increase should be updated here.
2616 Since RFC 1981 doesn't include administrative MTU increase
2617 update PMTU increase is a MUST. (i.e. jumbo frame)
2620 If new MTU is less than route PMTU, this new MTU will be the
2621 lowest MTU in the path, update the route PMTU to reflect PMTU
2622 decreases; if new MTU is greater than route PMTU, and the
2623 old MTU is the lowest MTU in the path, update the route PMTU
2624 to reflect the increase. In this case if the other nodes' MTU
2625 also have the lowest MTU, TOO BIG MESSAGE will be lead to
2628 if (rt->dst.dev == arg->dev &&
2629 !dst_metric_locked(&rt->dst, RTAX_MTU)) {
2630 if (rt->rt6i_flags & RTF_CACHE) {
2631 /* For RTF_CACHE with rt6i_pmtu == 0
2632 * (i.e. a redirected route),
2633 * the metrics of its rt->dst.from has already
2636 if (rt->rt6i_pmtu && rt->rt6i_pmtu > arg->mtu)
2637 rt->rt6i_pmtu = arg->mtu;
2638 } else if (dst_mtu(&rt->dst) >= arg->mtu ||
2639 (dst_mtu(&rt->dst) < arg->mtu &&
2640 dst_mtu(&rt->dst) == idev->cnf.mtu6)) {
2641 dst_metric_set(&rt->dst, RTAX_MTU, arg->mtu);
2647 void rt6_mtu_change(struct net_device *dev, unsigned int mtu)
2649 struct rt6_mtu_change_arg arg = {
2654 fib6_clean_all(dev_net(dev), rt6_mtu_change_route, &arg);
2657 static const struct nla_policy rtm_ipv6_policy[RTA_MAX+1] = {
2658 [RTA_GATEWAY] = { .len = sizeof(struct in6_addr) },
2659 [RTA_OIF] = { .type = NLA_U32 },
2660 [RTA_IIF] = { .type = NLA_U32 },
2661 [RTA_PRIORITY] = { .type = NLA_U32 },
2662 [RTA_METRICS] = { .type = NLA_NESTED },
2663 [RTA_MULTIPATH] = { .len = sizeof(struct rtnexthop) },
2664 [RTA_PREF] = { .type = NLA_U8 },
2665 [RTA_ENCAP_TYPE] = { .type = NLA_U16 },
2666 [RTA_ENCAP] = { .type = NLA_NESTED },
2669 static int rtm_to_fib6_config(struct sk_buff *skb, struct nlmsghdr *nlh,
2670 struct fib6_config *cfg)
2673 struct nlattr *tb[RTA_MAX+1];
2677 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
2682 rtm = nlmsg_data(nlh);
2683 memset(cfg, 0, sizeof(*cfg));
2685 cfg->fc_table = rtm->rtm_table;
2686 cfg->fc_dst_len = rtm->rtm_dst_len;
2687 cfg->fc_src_len = rtm->rtm_src_len;
2688 cfg->fc_flags = RTF_UP;
2689 cfg->fc_protocol = rtm->rtm_protocol;
2690 cfg->fc_type = rtm->rtm_type;
2692 if (rtm->rtm_type == RTN_UNREACHABLE ||
2693 rtm->rtm_type == RTN_BLACKHOLE ||
2694 rtm->rtm_type == RTN_PROHIBIT ||
2695 rtm->rtm_type == RTN_THROW)
2696 cfg->fc_flags |= RTF_REJECT;
2698 if (rtm->rtm_type == RTN_LOCAL)
2699 cfg->fc_flags |= RTF_LOCAL;
2701 if (rtm->rtm_flags & RTM_F_CLONED)
2702 cfg->fc_flags |= RTF_CACHE;
2704 cfg->fc_nlinfo.portid = NETLINK_CB(skb).portid;
2705 cfg->fc_nlinfo.nlh = nlh;
2706 cfg->fc_nlinfo.nl_net = sock_net(skb->sk);
2708 if (tb[RTA_GATEWAY]) {
2709 cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
2710 cfg->fc_flags |= RTF_GATEWAY;
2714 int plen = (rtm->rtm_dst_len + 7) >> 3;
2716 if (nla_len(tb[RTA_DST]) < plen)
2719 nla_memcpy(&cfg->fc_dst, tb[RTA_DST], plen);
2723 int plen = (rtm->rtm_src_len + 7) >> 3;
2725 if (nla_len(tb[RTA_SRC]) < plen)
2728 nla_memcpy(&cfg->fc_src, tb[RTA_SRC], plen);
2731 if (tb[RTA_PREFSRC])
2732 cfg->fc_prefsrc = nla_get_in6_addr(tb[RTA_PREFSRC]);
2735 cfg->fc_ifindex = nla_get_u32(tb[RTA_OIF]);
2737 if (tb[RTA_PRIORITY])
2738 cfg->fc_metric = nla_get_u32(tb[RTA_PRIORITY]);
2740 if (tb[RTA_METRICS]) {
2741 cfg->fc_mx = nla_data(tb[RTA_METRICS]);
2742 cfg->fc_mx_len = nla_len(tb[RTA_METRICS]);
2746 cfg->fc_table = nla_get_u32(tb[RTA_TABLE]);
2748 if (tb[RTA_MULTIPATH]) {
2749 cfg->fc_mp = nla_data(tb[RTA_MULTIPATH]);
2750 cfg->fc_mp_len = nla_len(tb[RTA_MULTIPATH]);
2754 pref = nla_get_u8(tb[RTA_PREF]);
2755 if (pref != ICMPV6_ROUTER_PREF_LOW &&
2756 pref != ICMPV6_ROUTER_PREF_HIGH)
2757 pref = ICMPV6_ROUTER_PREF_MEDIUM;
2758 cfg->fc_flags |= RTF_PREF(pref);
2762 cfg->fc_encap = tb[RTA_ENCAP];
2764 if (tb[RTA_ENCAP_TYPE])
2765 cfg->fc_encap_type = nla_get_u16(tb[RTA_ENCAP_TYPE]);
2772 static int ip6_route_multipath(struct fib6_config *cfg, int add)
2774 struct fib6_config r_cfg;
2775 struct rtnexthop *rtnh;
2778 int err = 0, last_err = 0;
2780 remaining = cfg->fc_mp_len;
2782 rtnh = (struct rtnexthop *)cfg->fc_mp;
2784 /* Parse a Multipath Entry */
2785 while (rtnh_ok(rtnh, remaining)) {
2786 memcpy(&r_cfg, cfg, sizeof(*cfg));
2787 if (rtnh->rtnh_ifindex)
2788 r_cfg.fc_ifindex = rtnh->rtnh_ifindex;
2790 attrlen = rtnh_attrlen(rtnh);
2792 struct nlattr *nla, *attrs = rtnh_attrs(rtnh);
2794 nla = nla_find(attrs, attrlen, RTA_GATEWAY);
2796 r_cfg.fc_gateway = nla_get_in6_addr(nla);
2797 r_cfg.fc_flags |= RTF_GATEWAY;
2799 r_cfg.fc_encap = nla_find(attrs, attrlen, RTA_ENCAP);
2800 nla = nla_find(attrs, attrlen, RTA_ENCAP_TYPE);
2802 r_cfg.fc_encap_type = nla_get_u16(nla);
2804 err = add ? ip6_route_add(&r_cfg) : ip6_route_del(&r_cfg);
2807 /* If we are trying to remove a route, do not stop the
2808 * loop when ip6_route_del() fails (because next hop is
2809 * already gone), we should try to remove all next hops.
2812 /* If add fails, we should try to delete all
2813 * next hops that have been already added.
2816 remaining = cfg->fc_mp_len - remaining;
2820 /* Because each route is added like a single route we remove
2821 * these flags after the first nexthop: if there is a collision,
2822 * we have already failed to add the first nexthop:
2823 * fib6_add_rt2node() has rejected it; when replacing, old
2824 * nexthops have been replaced by first new, the rest should
2827 cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
2829 rtnh = rtnh_next(rtnh, &remaining);
2835 static int inet6_rtm_delroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2837 struct fib6_config cfg;
2840 err = rtm_to_fib6_config(skb, nlh, &cfg);
2845 return ip6_route_multipath(&cfg, 0);
2847 return ip6_route_del(&cfg);
2850 static int inet6_rtm_newroute(struct sk_buff *skb, struct nlmsghdr *nlh)
2852 struct fib6_config cfg;
2855 err = rtm_to_fib6_config(skb, nlh, &cfg);
2860 return ip6_route_multipath(&cfg, 1);
2862 return ip6_route_add(&cfg);
2865 static inline size_t rt6_nlmsg_size(struct rt6_info *rt)
2867 return NLMSG_ALIGN(sizeof(struct rtmsg))
2868 + nla_total_size(16) /* RTA_SRC */
2869 + nla_total_size(16) /* RTA_DST */
2870 + nla_total_size(16) /* RTA_GATEWAY */
2871 + nla_total_size(16) /* RTA_PREFSRC */
2872 + nla_total_size(4) /* RTA_TABLE */
2873 + nla_total_size(4) /* RTA_IIF */
2874 + nla_total_size(4) /* RTA_OIF */
2875 + nla_total_size(4) /* RTA_PRIORITY */
2876 + RTAX_MAX * nla_total_size(4) /* RTA_METRICS */
2877 + nla_total_size(sizeof(struct rta_cacheinfo))
2878 + nla_total_size(TCP_CA_NAME_MAX) /* RTAX_CC_ALGO */
2879 + nla_total_size(1) /* RTA_PREF */
2880 + lwtunnel_get_encap_size(rt->dst.lwtstate);
2883 static int rt6_fill_node(struct net *net,
2884 struct sk_buff *skb, struct rt6_info *rt,
2885 struct in6_addr *dst, struct in6_addr *src,
2886 int iif, int type, u32 portid, u32 seq,
2887 int prefix, int nowait, unsigned int flags)
2889 u32 metrics[RTAX_MAX];
2891 struct nlmsghdr *nlh;
2895 if (prefix) { /* user wants prefix routes only */
2896 if (!(rt->rt6i_flags & RTF_PREFIX_RT)) {
2897 /* success since this is not a prefix route */
2902 nlh = nlmsg_put(skb, portid, seq, type, sizeof(*rtm), flags);
2906 rtm = nlmsg_data(nlh);
2907 rtm->rtm_family = AF_INET6;
2908 rtm->rtm_dst_len = rt->rt6i_dst.plen;
2909 rtm->rtm_src_len = rt->rt6i_src.plen;
2912 table = rt->rt6i_table->tb6_id;
2914 table = RT6_TABLE_UNSPEC;
2915 rtm->rtm_table = table;
2916 if (nla_put_u32(skb, RTA_TABLE, table))
2917 goto nla_put_failure;
2918 if (rt->rt6i_flags & RTF_REJECT) {
2919 switch (rt->dst.error) {
2921 rtm->rtm_type = RTN_BLACKHOLE;
2924 rtm->rtm_type = RTN_PROHIBIT;
2927 rtm->rtm_type = RTN_THROW;
2930 rtm->rtm_type = RTN_UNREACHABLE;
2934 else if (rt->rt6i_flags & RTF_LOCAL)
2935 rtm->rtm_type = RTN_LOCAL;
2936 else if (rt->dst.dev && (rt->dst.dev->flags & IFF_LOOPBACK))
2937 rtm->rtm_type = RTN_LOCAL;
2939 rtm->rtm_type = RTN_UNICAST;
2941 if (!netif_carrier_ok(rt->dst.dev)) {
2942 rtm->rtm_flags |= RTNH_F_LINKDOWN;
2943 if (rt->rt6i_idev->cnf.ignore_routes_with_linkdown)
2944 rtm->rtm_flags |= RTNH_F_DEAD;
2946 rtm->rtm_scope = RT_SCOPE_UNIVERSE;
2947 rtm->rtm_protocol = rt->rt6i_protocol;
2948 if (rt->rt6i_flags & RTF_DYNAMIC)
2949 rtm->rtm_protocol = RTPROT_REDIRECT;
2950 else if (rt->rt6i_flags & RTF_ADDRCONF) {
2951 if (rt->rt6i_flags & (RTF_DEFAULT | RTF_ROUTEINFO))
2952 rtm->rtm_protocol = RTPROT_RA;
2954 rtm->rtm_protocol = RTPROT_KERNEL;
2957 if (rt->rt6i_flags & RTF_CACHE)
2958 rtm->rtm_flags |= RTM_F_CLONED;
2961 if (nla_put_in6_addr(skb, RTA_DST, dst))
2962 goto nla_put_failure;
2963 rtm->rtm_dst_len = 128;
2964 } else if (rtm->rtm_dst_len)
2965 if (nla_put_in6_addr(skb, RTA_DST, &rt->rt6i_dst.addr))
2966 goto nla_put_failure;
2967 #ifdef CONFIG_IPV6_SUBTREES
2969 if (nla_put_in6_addr(skb, RTA_SRC, src))
2970 goto nla_put_failure;
2971 rtm->rtm_src_len = 128;
2972 } else if (rtm->rtm_src_len &&
2973 nla_put_in6_addr(skb, RTA_SRC, &rt->rt6i_src.addr))
2974 goto nla_put_failure;
2977 #ifdef CONFIG_IPV6_MROUTE
2978 if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
2979 int err = ip6mr_get_route(net, skb, rtm, nowait);
2984 goto nla_put_failure;
2986 if (err == -EMSGSIZE)
2987 goto nla_put_failure;
2992 if (nla_put_u32(skb, RTA_IIF, iif))
2993 goto nla_put_failure;
2995 struct in6_addr saddr_buf;
2996 if (ip6_route_get_saddr(net, rt, dst, 0, &saddr_buf) == 0 &&
2997 nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
2998 goto nla_put_failure;
3001 if (rt->rt6i_prefsrc.plen) {
3002 struct in6_addr saddr_buf;
3003 saddr_buf = rt->rt6i_prefsrc.addr;
3004 if (nla_put_in6_addr(skb, RTA_PREFSRC, &saddr_buf))
3005 goto nla_put_failure;
3008 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
3010 metrics[RTAX_MTU - 1] = rt->rt6i_pmtu;
3011 if (rtnetlink_put_metrics(skb, metrics) < 0)
3012 goto nla_put_failure;
3014 if (rt->rt6i_flags & RTF_GATEWAY) {
3015 if (nla_put_in6_addr(skb, RTA_GATEWAY, &rt->rt6i_gateway) < 0)
3016 goto nla_put_failure;
3020 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
3021 goto nla_put_failure;
3022 if (nla_put_u32(skb, RTA_PRIORITY, rt->rt6i_metric))
3023 goto nla_put_failure;
3025 expires = (rt->rt6i_flags & RTF_EXPIRES) ? rt->dst.expires - jiffies : 0;
3027 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, rt->dst.error) < 0)
3028 goto nla_put_failure;
3030 if (nla_put_u8(skb, RTA_PREF, IPV6_EXTRACT_PREF(rt->rt6i_flags)))
3031 goto nla_put_failure;
3033 lwtunnel_fill_encap(skb, rt->dst.lwtstate);
3035 nlmsg_end(skb, nlh);
3039 nlmsg_cancel(skb, nlh);
3043 int rt6_dump_route(struct rt6_info *rt, void *p_arg)
3045 struct rt6_rtnl_dump_arg *arg = (struct rt6_rtnl_dump_arg *) p_arg;
3048 if (nlmsg_len(arg->cb->nlh) >= sizeof(struct rtmsg)) {
3049 struct rtmsg *rtm = nlmsg_data(arg->cb->nlh);
3050 prefix = (rtm->rtm_flags & RTM_F_PREFIX) != 0;
3054 return rt6_fill_node(arg->net,
3055 arg->skb, rt, NULL, NULL, 0, RTM_NEWROUTE,
3056 NETLINK_CB(arg->cb->skb).portid, arg->cb->nlh->nlmsg_seq,
3057 prefix, 0, NLM_F_MULTI);
3060 static int inet6_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
3062 struct net *net = sock_net(in_skb->sk);
3063 struct nlattr *tb[RTA_MAX+1];
3064 struct rt6_info *rt;
3065 struct sk_buff *skb;
3068 int err, iif = 0, oif = 0;
3070 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv6_policy);
3075 memset(&fl6, 0, sizeof(fl6));
3078 if (nla_len(tb[RTA_SRC]) < sizeof(struct in6_addr))
3081 fl6.saddr = *(struct in6_addr *)nla_data(tb[RTA_SRC]);
3085 if (nla_len(tb[RTA_DST]) < sizeof(struct in6_addr))
3088 fl6.daddr = *(struct in6_addr *)nla_data(tb[RTA_DST]);
3092 iif = nla_get_u32(tb[RTA_IIF]);
3095 oif = nla_get_u32(tb[RTA_OIF]);
3098 fl6.flowi6_mark = nla_get_u32(tb[RTA_MARK]);
3101 struct net_device *dev;
3104 dev = __dev_get_by_index(net, iif);
3110 fl6.flowi6_iif = iif;
3112 if (!ipv6_addr_any(&fl6.saddr))
3113 flags |= RT6_LOOKUP_F_HAS_SADDR;
3115 rt = (struct rt6_info *)ip6_route_input_lookup(net, dev, &fl6,
3118 fl6.flowi6_oif = oif;
3120 rt = (struct rt6_info *)ip6_route_output(net, NULL, &fl6);
3123 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3130 /* Reserve room for dummy headers, this skb can pass
3131 through good chunk of routing engine.
3133 skb_reset_mac_header(skb);
3134 skb_reserve(skb, MAX_HEADER + sizeof(struct ipv6hdr));
3136 skb_dst_set(skb, &rt->dst);
3138 err = rt6_fill_node(net, skb, rt, &fl6.daddr, &fl6.saddr, iif,
3139 RTM_NEWROUTE, NETLINK_CB(in_skb).portid,
3140 nlh->nlmsg_seq, 0, 0, 0);
3146 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3151 void inet6_rt_notify(int event, struct rt6_info *rt, struct nl_info *info)
3153 struct sk_buff *skb;
3154 struct net *net = info->nl_net;
3159 seq = info->nlh ? info->nlh->nlmsg_seq : 0;
3161 skb = nlmsg_new(rt6_nlmsg_size(rt), gfp_any());
3165 err = rt6_fill_node(net, skb, rt, NULL, NULL, 0,
3166 event, info->portid, seq, 0, 0, 0);
3168 /* -EMSGSIZE implies BUG in rt6_nlmsg_size() */
3169 WARN_ON(err == -EMSGSIZE);
3173 rtnl_notify(skb, net, info->portid, RTNLGRP_IPV6_ROUTE,
3174 info->nlh, gfp_any());
3178 rtnl_set_sk_err(net, RTNLGRP_IPV6_ROUTE, err);
3181 static int ip6_route_dev_notify(struct notifier_block *this,
3182 unsigned long event, void *ptr)
3184 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
3185 struct net *net = dev_net(dev);
3187 if (event == NETDEV_REGISTER && (dev->flags & IFF_LOOPBACK)) {
3188 net->ipv6.ip6_null_entry->dst.dev = dev;
3189 net->ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(dev);
3190 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3191 net->ipv6.ip6_prohibit_entry->dst.dev = dev;
3192 net->ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(dev);
3193 net->ipv6.ip6_blk_hole_entry->dst.dev = dev;
3194 net->ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(dev);
3205 #ifdef CONFIG_PROC_FS
3207 static const struct file_operations ipv6_route_proc_fops = {
3208 .owner = THIS_MODULE,
3209 .open = ipv6_route_open,
3211 .llseek = seq_lseek,
3212 .release = seq_release_net,
3215 static int rt6_stats_seq_show(struct seq_file *seq, void *v)
3217 struct net *net = (struct net *)seq->private;
3218 seq_printf(seq, "%04x %04x %04x %04x %04x %04x %04x\n",
3219 net->ipv6.rt6_stats->fib_nodes,
3220 net->ipv6.rt6_stats->fib_route_nodes,
3221 net->ipv6.rt6_stats->fib_rt_alloc,
3222 net->ipv6.rt6_stats->fib_rt_entries,
3223 net->ipv6.rt6_stats->fib_rt_cache,
3224 dst_entries_get_slow(&net->ipv6.ip6_dst_ops),
3225 net->ipv6.rt6_stats->fib_discarded_routes);
3230 static int rt6_stats_seq_open(struct inode *inode, struct file *file)
3232 return single_open_net(inode, file, rt6_stats_seq_show);
3235 static const struct file_operations rt6_stats_seq_fops = {
3236 .owner = THIS_MODULE,
3237 .open = rt6_stats_seq_open,
3239 .llseek = seq_lseek,
3240 .release = single_release_net,
3242 #endif /* CONFIG_PROC_FS */
3244 #ifdef CONFIG_SYSCTL
3247 int ipv6_sysctl_rtcache_flush(struct ctl_table *ctl, int write,
3248 void __user *buffer, size_t *lenp, loff_t *ppos)
3255 net = (struct net *)ctl->extra1;
3256 delay = net->ipv6.sysctl.flush_delay;
3257 proc_dointvec(ctl, write, buffer, lenp, ppos);
3258 fib6_run_gc(delay <= 0 ? 0 : (unsigned long)delay, net, delay > 0);
3262 struct ctl_table ipv6_route_table_template[] = {
3264 .procname = "flush",
3265 .data = &init_net.ipv6.sysctl.flush_delay,
3266 .maxlen = sizeof(int),
3268 .proc_handler = ipv6_sysctl_rtcache_flush
3271 .procname = "gc_thresh",
3272 .data = &ip6_dst_ops_template.gc_thresh,
3273 .maxlen = sizeof(int),
3275 .proc_handler = proc_dointvec,
3278 .procname = "max_size",
3279 .data = &init_net.ipv6.sysctl.ip6_rt_max_size,
3280 .maxlen = sizeof(int),
3282 .proc_handler = proc_dointvec,
3285 .procname = "gc_min_interval",
3286 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3287 .maxlen = sizeof(int),
3289 .proc_handler = proc_dointvec_jiffies,
3292 .procname = "gc_timeout",
3293 .data = &init_net.ipv6.sysctl.ip6_rt_gc_timeout,
3294 .maxlen = sizeof(int),
3296 .proc_handler = proc_dointvec_jiffies,
3299 .procname = "gc_interval",
3300 .data = &init_net.ipv6.sysctl.ip6_rt_gc_interval,
3301 .maxlen = sizeof(int),
3303 .proc_handler = proc_dointvec_jiffies,
3306 .procname = "gc_elasticity",
3307 .data = &init_net.ipv6.sysctl.ip6_rt_gc_elasticity,
3308 .maxlen = sizeof(int),
3310 .proc_handler = proc_dointvec,
3313 .procname = "mtu_expires",
3314 .data = &init_net.ipv6.sysctl.ip6_rt_mtu_expires,
3315 .maxlen = sizeof(int),
3317 .proc_handler = proc_dointvec_jiffies,
3320 .procname = "min_adv_mss",
3321 .data = &init_net.ipv6.sysctl.ip6_rt_min_advmss,
3322 .maxlen = sizeof(int),
3324 .proc_handler = proc_dointvec,
3327 .procname = "gc_min_interval_ms",
3328 .data = &init_net.ipv6.sysctl.ip6_rt_gc_min_interval,
3329 .maxlen = sizeof(int),
3331 .proc_handler = proc_dointvec_ms_jiffies,
3336 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
3338 struct ctl_table *table;
3340 table = kmemdup(ipv6_route_table_template,
3341 sizeof(ipv6_route_table_template),
3345 table[0].data = &net->ipv6.sysctl.flush_delay;
3346 table[0].extra1 = net;
3347 table[1].data = &net->ipv6.ip6_dst_ops.gc_thresh;
3348 table[2].data = &net->ipv6.sysctl.ip6_rt_max_size;
3349 table[3].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3350 table[4].data = &net->ipv6.sysctl.ip6_rt_gc_timeout;
3351 table[5].data = &net->ipv6.sysctl.ip6_rt_gc_interval;
3352 table[6].data = &net->ipv6.sysctl.ip6_rt_gc_elasticity;
3353 table[7].data = &net->ipv6.sysctl.ip6_rt_mtu_expires;
3354 table[8].data = &net->ipv6.sysctl.ip6_rt_min_advmss;
3355 table[9].data = &net->ipv6.sysctl.ip6_rt_gc_min_interval;
3357 /* Don't export sysctls to unprivileged users */
3358 if (net->user_ns != &init_user_ns)
3359 table[0].procname = NULL;
3366 static int __net_init ip6_route_net_init(struct net *net)
3370 memcpy(&net->ipv6.ip6_dst_ops, &ip6_dst_ops_template,
3371 sizeof(net->ipv6.ip6_dst_ops));
3373 if (dst_entries_init(&net->ipv6.ip6_dst_ops) < 0)
3374 goto out_ip6_dst_ops;
3376 net->ipv6.ip6_null_entry = kmemdup(&ip6_null_entry_template,
3377 sizeof(*net->ipv6.ip6_null_entry),
3379 if (!net->ipv6.ip6_null_entry)
3380 goto out_ip6_dst_entries;
3381 net->ipv6.ip6_null_entry->dst.path =
3382 (struct dst_entry *)net->ipv6.ip6_null_entry;
3383 net->ipv6.ip6_null_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3384 dst_init_metrics(&net->ipv6.ip6_null_entry->dst,
3385 ip6_template_metrics, true);
3387 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3388 net->ipv6.ip6_prohibit_entry = kmemdup(&ip6_prohibit_entry_template,
3389 sizeof(*net->ipv6.ip6_prohibit_entry),
3391 if (!net->ipv6.ip6_prohibit_entry)
3392 goto out_ip6_null_entry;
3393 net->ipv6.ip6_prohibit_entry->dst.path =
3394 (struct dst_entry *)net->ipv6.ip6_prohibit_entry;
3395 net->ipv6.ip6_prohibit_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3396 dst_init_metrics(&net->ipv6.ip6_prohibit_entry->dst,
3397 ip6_template_metrics, true);
3399 net->ipv6.ip6_blk_hole_entry = kmemdup(&ip6_blk_hole_entry_template,
3400 sizeof(*net->ipv6.ip6_blk_hole_entry),
3402 if (!net->ipv6.ip6_blk_hole_entry)
3403 goto out_ip6_prohibit_entry;
3404 net->ipv6.ip6_blk_hole_entry->dst.path =
3405 (struct dst_entry *)net->ipv6.ip6_blk_hole_entry;
3406 net->ipv6.ip6_blk_hole_entry->dst.ops = &net->ipv6.ip6_dst_ops;
3407 dst_init_metrics(&net->ipv6.ip6_blk_hole_entry->dst,
3408 ip6_template_metrics, true);
3411 net->ipv6.sysctl.flush_delay = 0;
3412 net->ipv6.sysctl.ip6_rt_max_size = 4096;
3413 net->ipv6.sysctl.ip6_rt_gc_min_interval = HZ / 2;
3414 net->ipv6.sysctl.ip6_rt_gc_timeout = 60*HZ;
3415 net->ipv6.sysctl.ip6_rt_gc_interval = 30*HZ;
3416 net->ipv6.sysctl.ip6_rt_gc_elasticity = 9;
3417 net->ipv6.sysctl.ip6_rt_mtu_expires = 10*60*HZ;
3418 net->ipv6.sysctl.ip6_rt_min_advmss = IPV6_MIN_MTU - 20 - 40;
3420 net->ipv6.ip6_rt_gc_expire = 30*HZ;
3426 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3427 out_ip6_prohibit_entry:
3428 kfree(net->ipv6.ip6_prohibit_entry);
3430 kfree(net->ipv6.ip6_null_entry);
3432 out_ip6_dst_entries:
3433 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3438 static void __net_exit ip6_route_net_exit(struct net *net)
3440 kfree(net->ipv6.ip6_null_entry);
3441 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3442 kfree(net->ipv6.ip6_prohibit_entry);
3443 kfree(net->ipv6.ip6_blk_hole_entry);
3445 dst_entries_destroy(&net->ipv6.ip6_dst_ops);
3448 static int __net_init ip6_route_net_init_late(struct net *net)
3450 #ifdef CONFIG_PROC_FS
3451 proc_create("ipv6_route", 0, net->proc_net, &ipv6_route_proc_fops);
3452 proc_create("rt6_stats", S_IRUGO, net->proc_net, &rt6_stats_seq_fops);
3457 static void __net_exit ip6_route_net_exit_late(struct net *net)
3459 #ifdef CONFIG_PROC_FS
3460 remove_proc_entry("ipv6_route", net->proc_net);
3461 remove_proc_entry("rt6_stats", net->proc_net);
3465 static struct pernet_operations ip6_route_net_ops = {
3466 .init = ip6_route_net_init,
3467 .exit = ip6_route_net_exit,
3470 static int __net_init ipv6_inetpeer_init(struct net *net)
3472 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3476 inet_peer_base_init(bp);
3477 net->ipv6.peers = bp;
3481 static void __net_exit ipv6_inetpeer_exit(struct net *net)
3483 struct inet_peer_base *bp = net->ipv6.peers;
3485 net->ipv6.peers = NULL;
3486 inetpeer_invalidate_tree(bp);
3490 static struct pernet_operations ipv6_inetpeer_ops = {
3491 .init = ipv6_inetpeer_init,
3492 .exit = ipv6_inetpeer_exit,
3495 static struct pernet_operations ip6_route_net_late_ops = {
3496 .init = ip6_route_net_init_late,
3497 .exit = ip6_route_net_exit_late,
3500 static struct notifier_block ip6_route_dev_notifier = {
3501 .notifier_call = ip6_route_dev_notify,
3505 int __init ip6_route_init(void)
3511 ip6_dst_ops_template.kmem_cachep =
3512 kmem_cache_create("ip6_dst_cache", sizeof(struct rt6_info), 0,
3513 SLAB_HWCACHE_ALIGN, NULL);
3514 if (!ip6_dst_ops_template.kmem_cachep)
3517 ret = dst_entries_init(&ip6_dst_blackhole_ops);
3519 goto out_kmem_cache;
3521 ret = register_pernet_subsys(&ipv6_inetpeer_ops);
3523 goto out_dst_entries;
3525 ret = register_pernet_subsys(&ip6_route_net_ops);
3527 goto out_register_inetpeer;
3529 ip6_dst_blackhole_ops.kmem_cachep = ip6_dst_ops_template.kmem_cachep;
3531 /* Registering of the loopback is done before this portion of code,
3532 * the loopback reference in rt6_info will not be taken, do it
3533 * manually for init_net */
3534 init_net.ipv6.ip6_null_entry->dst.dev = init_net.loopback_dev;
3535 init_net.ipv6.ip6_null_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3536 #ifdef CONFIG_IPV6_MULTIPLE_TABLES
3537 init_net.ipv6.ip6_prohibit_entry->dst.dev = init_net.loopback_dev;
3538 init_net.ipv6.ip6_prohibit_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3539 init_net.ipv6.ip6_blk_hole_entry->dst.dev = init_net.loopback_dev;
3540 init_net.ipv6.ip6_blk_hole_entry->rt6i_idev = in6_dev_get(init_net.loopback_dev);
3544 goto out_register_subsys;
3550 ret = fib6_rules_init();
3554 ret = register_pernet_subsys(&ip6_route_net_late_ops);
3556 goto fib6_rules_init;
3559 if (__rtnl_register(PF_INET6, RTM_NEWROUTE, inet6_rtm_newroute, NULL, NULL) ||
3560 __rtnl_register(PF_INET6, RTM_DELROUTE, inet6_rtm_delroute, NULL, NULL) ||
3561 __rtnl_register(PF_INET6, RTM_GETROUTE, inet6_rtm_getroute, NULL, NULL))
3562 goto out_register_late_subsys;
3564 ret = register_netdevice_notifier(&ip6_route_dev_notifier);
3566 goto out_register_late_subsys;
3568 for_each_possible_cpu(cpu) {
3569 struct uncached_list *ul = per_cpu_ptr(&rt6_uncached_list, cpu);
3571 INIT_LIST_HEAD(&ul->head);
3572 spin_lock_init(&ul->lock);
3578 out_register_late_subsys:
3579 unregister_pernet_subsys(&ip6_route_net_late_ops);
3581 fib6_rules_cleanup();
3586 out_register_subsys:
3587 unregister_pernet_subsys(&ip6_route_net_ops);
3588 out_register_inetpeer:
3589 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3591 dst_entries_destroy(&ip6_dst_blackhole_ops);
3593 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
3597 void ip6_route_cleanup(void)
3599 unregister_netdevice_notifier(&ip6_route_dev_notifier);
3600 unregister_pernet_subsys(&ip6_route_net_late_ops);
3601 fib6_rules_cleanup();
3604 unregister_pernet_subsys(&ipv6_inetpeer_ops);
3605 unregister_pernet_subsys(&ip6_route_net_ops);
3606 dst_entries_destroy(&ip6_dst_blackhole_ops);
3607 kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);