1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/bitops.h>
65 #include <linux/kernel.h>
67 #include <linux/memblock.h>
68 #include <linux/socket.h>
69 #include <linux/errno.h>
71 #include <linux/inet.h>
72 #include <linux/netdevice.h>
73 #include <linux/proc_fs.h>
74 #include <linux/init.h>
75 #include <linux/skbuff.h>
76 #include <linux/inetdevice.h>
77 #include <linux/igmp.h>
78 #include <linux/pkt_sched.h>
79 #include <linux/mroute.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/random.h>
82 #include <linux/rcupdate.h>
83 #include <linux/slab.h>
84 #include <linux/jhash.h>
86 #include <net/dst_metadata.h>
87 #include <net/inet_dscp.h>
88 #include <net/net_namespace.h>
90 #include <net/route.h>
91 #include <net/inetpeer.h>
93 #include <net/ip_fib.h>
94 #include <net/nexthop.h>
98 #include <net/lwtunnel.h>
99 #include <net/netevent.h>
100 #include <net/rtnetlink.h>
102 #include <linux/sysctl.h>
104 #include <net/secure_seq.h>
105 #include <net/ip_tunnels.h>
107 #include "fib_lookup.h"
109 #define RT_GC_TIMEOUT (300*HZ)
111 #define DEFAULT_MIN_PMTU (512 + 20 + 20)
112 #define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
113 #define DEFAULT_MIN_ADVMSS 256
114 static int ip_rt_max_size;
115 static int ip_rt_redirect_number __read_mostly = 9;
116 static int ip_rt_redirect_load __read_mostly = HZ / 50;
117 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
118 static int ip_rt_error_cost __read_mostly = HZ;
119 static int ip_rt_error_burst __read_mostly = 5 * HZ;
121 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
124 * Interface to generic destination cache.
127 INDIRECT_CALLABLE_SCOPE
128 struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
129 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
130 INDIRECT_CALLABLE_SCOPE
131 unsigned int ipv4_mtu(const struct dst_entry *dst);
132 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
133 static void ipv4_link_failure(struct sk_buff *skb);
134 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
135 struct sk_buff *skb, u32 mtu,
137 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
138 struct sk_buff *skb);
139 static void ipv4_dst_destroy(struct dst_entry *dst);
141 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
147 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
150 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
152 static struct dst_ops ipv4_dst_ops = {
154 .check = ipv4_dst_check,
155 .default_advmss = ipv4_default_advmss,
157 .cow_metrics = ipv4_cow_metrics,
158 .destroy = ipv4_dst_destroy,
159 .negative_advice = ipv4_negative_advice,
160 .link_failure = ipv4_link_failure,
161 .update_pmtu = ip_rt_update_pmtu,
162 .redirect = ip_do_redirect,
163 .local_out = __ip_local_out,
164 .neigh_lookup = ipv4_neigh_lookup,
165 .confirm_neigh = ipv4_confirm_neigh,
168 #define ECN_OR_COST(class) TC_PRIO_##class
170 const __u8 ip_tos2prio[16] = {
172 ECN_OR_COST(BESTEFFORT),
174 ECN_OR_COST(BESTEFFORT),
180 ECN_OR_COST(INTERACTIVE),
182 ECN_OR_COST(INTERACTIVE),
183 TC_PRIO_INTERACTIVE_BULK,
184 ECN_OR_COST(INTERACTIVE_BULK),
185 TC_PRIO_INTERACTIVE_BULK,
186 ECN_OR_COST(INTERACTIVE_BULK)
188 EXPORT_SYMBOL(ip_tos2prio);
190 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
191 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
193 #ifdef CONFIG_PROC_FS
194 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
198 return SEQ_START_TOKEN;
201 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
207 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
211 static int rt_cache_seq_show(struct seq_file *seq, void *v)
213 if (v == SEQ_START_TOKEN)
214 seq_printf(seq, "%-127s\n",
215 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
216 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
221 static const struct seq_operations rt_cache_seq_ops = {
222 .start = rt_cache_seq_start,
223 .next = rt_cache_seq_next,
224 .stop = rt_cache_seq_stop,
225 .show = rt_cache_seq_show,
228 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
233 return SEQ_START_TOKEN;
235 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
236 if (!cpu_possible(cpu))
239 return &per_cpu(rt_cache_stat, cpu);
244 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
248 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
249 if (!cpu_possible(cpu))
252 return &per_cpu(rt_cache_stat, cpu);
259 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
264 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
266 struct rt_cache_stat *st = v;
268 if (v == SEQ_START_TOKEN) {
269 seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
273 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x "
274 "%08x %08x %08x %08x %08x %08x "
275 "%08x %08x %08x %08x\n",
276 dst_entries_get_slow(&ipv4_dst_ops),
289 0, /* st->gc_total */
290 0, /* st->gc_ignored */
291 0, /* st->gc_goal_miss */
292 0, /* st->gc_dst_overflow */
293 0, /* st->in_hlist_search */
294 0 /* st->out_hlist_search */
299 static const struct seq_operations rt_cpu_seq_ops = {
300 .start = rt_cpu_seq_start,
301 .next = rt_cpu_seq_next,
302 .stop = rt_cpu_seq_stop,
303 .show = rt_cpu_seq_show,
306 #ifdef CONFIG_IP_ROUTE_CLASSID
307 static int rt_acct_proc_show(struct seq_file *m, void *v)
309 struct ip_rt_acct *dst, *src;
312 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
316 for_each_possible_cpu(i) {
317 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
318 for (j = 0; j < 256; j++) {
319 dst[j].o_bytes += src[j].o_bytes;
320 dst[j].o_packets += src[j].o_packets;
321 dst[j].i_bytes += src[j].i_bytes;
322 dst[j].i_packets += src[j].i_packets;
326 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
332 static int __net_init ip_rt_do_proc_init(struct net *net)
334 struct proc_dir_entry *pde;
336 pde = proc_create_seq("rt_cache", 0444, net->proc_net,
341 pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
346 #ifdef CONFIG_IP_ROUTE_CLASSID
347 pde = proc_create_single("rt_acct", 0, net->proc_net,
354 #ifdef CONFIG_IP_ROUTE_CLASSID
356 remove_proc_entry("rt_cache", net->proc_net_stat);
359 remove_proc_entry("rt_cache", net->proc_net);
364 static void __net_exit ip_rt_do_proc_exit(struct net *net)
366 remove_proc_entry("rt_cache", net->proc_net_stat);
367 remove_proc_entry("rt_cache", net->proc_net);
368 #ifdef CONFIG_IP_ROUTE_CLASSID
369 remove_proc_entry("rt_acct", net->proc_net);
373 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
374 .init = ip_rt_do_proc_init,
375 .exit = ip_rt_do_proc_exit,
378 static int __init ip_rt_proc_init(void)
380 return register_pernet_subsys(&ip_rt_proc_ops);
384 static inline int ip_rt_proc_init(void)
388 #endif /* CONFIG_PROC_FS */
390 static inline bool rt_is_expired(const struct rtable *rth)
392 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
395 void rt_cache_flush(struct net *net)
397 rt_genid_bump_ipv4(net);
400 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
404 const struct rtable *rt = container_of(dst, struct rtable, dst);
405 struct net_device *dev = dst->dev;
410 if (likely(rt->rt_gw_family == AF_INET)) {
411 n = ip_neigh_gw4(dev, rt->rt_gw4);
412 } else if (rt->rt_gw_family == AF_INET6) {
413 n = ip_neigh_gw6(dev, &rt->rt_gw6);
417 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
418 n = ip_neigh_gw4(dev, pkey);
421 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
429 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
431 const struct rtable *rt = container_of(dst, struct rtable, dst);
432 struct net_device *dev = dst->dev;
433 const __be32 *pkey = daddr;
435 if (rt->rt_gw_family == AF_INET) {
436 pkey = (const __be32 *)&rt->rt_gw4;
437 } else if (rt->rt_gw_family == AF_INET6) {
438 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
441 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
444 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
447 /* Hash tables of size 2048..262144 depending on RAM size.
448 * Each bucket uses 8 bytes.
450 static u32 ip_idents_mask __read_mostly;
451 static atomic_t *ip_idents __read_mostly;
452 static u32 *ip_tstamps __read_mostly;
454 /* In order to protect privacy, we add a perturbation to identifiers
455 * if one generator is seldom used. This makes hard for an attacker
456 * to infer how many packets were sent between two points in time.
458 static u32 ip_idents_reserve(u32 hash, int segs)
460 u32 bucket, old, now = (u32)jiffies;
465 bucket = hash & ip_idents_mask;
466 p_tstamp = ip_tstamps + bucket;
467 p_id = ip_idents + bucket;
468 old = READ_ONCE(*p_tstamp);
470 if (old != now && cmpxchg(p_tstamp, old, now) == old)
471 delta = get_random_u32_below(now - old);
473 /* If UBSAN reports an error there, please make sure your compiler
474 * supports -fno-strict-overflow before reporting it that was a bug
475 * in UBSAN, and it has been fixed in GCC-8.
477 return atomic_add_return(segs + delta, p_id) - segs;
480 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
484 /* Note the following code is not safe, but this is okay. */
485 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
486 get_random_bytes(&net->ipv4.ip_id_key,
487 sizeof(net->ipv4.ip_id_key));
489 hash = siphash_3u32((__force u32)iph->daddr,
490 (__force u32)iph->saddr,
492 &net->ipv4.ip_id_key);
493 id = ip_idents_reserve(hash, segs);
496 EXPORT_SYMBOL(__ip_select_ident);
498 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
499 const struct sock *sk, const struct iphdr *iph,
500 int oif, __u8 tos, u8 prot, u32 mark,
503 __u8 scope = RT_SCOPE_UNIVERSE;
506 oif = sk->sk_bound_dev_if;
507 mark = READ_ONCE(sk->sk_mark);
508 tos = ip_sock_rt_tos(sk);
509 scope = ip_sock_rt_scope(sk);
510 prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW :
514 flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
515 prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
516 sock_net_uid(net, sk));
519 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
520 const struct sock *sk)
522 const struct net *net = dev_net(skb->dev);
523 const struct iphdr *iph = ip_hdr(skb);
524 int oif = skb->dev->ifindex;
525 u8 prot = iph->protocol;
526 u32 mark = skb->mark;
529 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
532 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
534 const struct inet_sock *inet = inet_sk(sk);
535 const struct ip_options_rcu *inet_opt;
536 __be32 daddr = inet->inet_daddr;
539 inet_opt = rcu_dereference(inet->inet_opt);
540 if (inet_opt && inet_opt->opt.srr)
541 daddr = inet_opt->opt.faddr;
542 flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
543 ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
544 ip_sock_rt_scope(sk),
545 inet_test_bit(HDRINCL, sk) ?
546 IPPROTO_RAW : sk->sk_protocol,
547 inet_sk_flowi_flags(sk),
548 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
552 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
553 const struct sk_buff *skb)
556 build_skb_flow_key(fl4, skb, sk);
558 build_sk_flow_key(fl4, sk);
561 static DEFINE_SPINLOCK(fnhe_lock);
563 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
567 rt = rcu_dereference(fnhe->fnhe_rth_input);
569 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
570 dst_dev_put(&rt->dst);
571 dst_release(&rt->dst);
573 rt = rcu_dereference(fnhe->fnhe_rth_output);
575 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
576 dst_dev_put(&rt->dst);
577 dst_release(&rt->dst);
581 static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
583 struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
584 struct fib_nh_exception *fnhe, *oldest = NULL;
586 for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
587 fnhe = rcu_dereference_protected(*fnhe_p,
588 lockdep_is_held(&fnhe_lock));
592 time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
597 fnhe_flush_routes(oldest);
598 *oldest_p = oldest->fnhe_next;
599 kfree_rcu(oldest, rcu);
602 static u32 fnhe_hashfun(__be32 daddr)
604 static siphash_aligned_key_t fnhe_hash_key;
607 net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
608 hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
609 return hash_64(hval, FNHE_HASH_SHIFT);
612 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
614 rt->rt_pmtu = fnhe->fnhe_pmtu;
615 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
616 rt->dst.expires = fnhe->fnhe_expires;
619 rt->rt_flags |= RTCF_REDIRECTED;
620 rt->rt_uses_gateway = 1;
621 rt->rt_gw_family = AF_INET;
622 rt->rt_gw4 = fnhe->fnhe_gw;
626 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
627 __be32 gw, u32 pmtu, bool lock,
628 unsigned long expires)
630 struct fnhe_hash_bucket *hash;
631 struct fib_nh_exception *fnhe;
637 genid = fnhe_genid(dev_net(nhc->nhc_dev));
638 hval = fnhe_hashfun(daddr);
640 spin_lock_bh(&fnhe_lock);
642 hash = rcu_dereference(nhc->nhc_exceptions);
644 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
647 rcu_assign_pointer(nhc->nhc_exceptions, hash);
653 for (fnhe = rcu_dereference(hash->chain); fnhe;
654 fnhe = rcu_dereference(fnhe->fnhe_next)) {
655 if (fnhe->fnhe_daddr == daddr)
661 if (fnhe->fnhe_genid != genid)
662 fnhe->fnhe_genid = genid;
666 fnhe->fnhe_pmtu = pmtu;
667 fnhe->fnhe_mtu_locked = lock;
669 fnhe->fnhe_expires = max(1UL, expires);
670 /* Update all cached dsts too */
671 rt = rcu_dereference(fnhe->fnhe_rth_input);
673 fill_route_from_fnhe(rt, fnhe);
674 rt = rcu_dereference(fnhe->fnhe_rth_output);
676 fill_route_from_fnhe(rt, fnhe);
678 /* Randomize max depth to avoid some side channels attacks. */
679 int max_depth = FNHE_RECLAIM_DEPTH +
680 get_random_u32_below(FNHE_RECLAIM_DEPTH);
682 while (depth > max_depth) {
683 fnhe_remove_oldest(hash);
687 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
691 fnhe->fnhe_next = hash->chain;
693 fnhe->fnhe_genid = genid;
694 fnhe->fnhe_daddr = daddr;
696 fnhe->fnhe_pmtu = pmtu;
697 fnhe->fnhe_mtu_locked = lock;
698 fnhe->fnhe_expires = max(1UL, expires);
700 rcu_assign_pointer(hash->chain, fnhe);
702 /* Exception created; mark the cached routes for the nexthop
703 * stale, so anyone caching it rechecks if this exception
706 rt = rcu_dereference(nhc->nhc_rth_input);
708 rt->dst.obsolete = DST_OBSOLETE_KILL;
710 for_each_possible_cpu(i) {
711 struct rtable __rcu **prt;
713 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
714 rt = rcu_dereference(*prt);
716 rt->dst.obsolete = DST_OBSOLETE_KILL;
720 fnhe->fnhe_stamp = jiffies;
723 spin_unlock_bh(&fnhe_lock);
726 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
729 __be32 new_gw = icmp_hdr(skb)->un.gateway;
730 __be32 old_gw = ip_hdr(skb)->saddr;
731 struct net_device *dev = skb->dev;
732 struct in_device *in_dev;
733 struct fib_result res;
737 switch (icmp_hdr(skb)->code & 7) {
739 case ICMP_REDIR_NETTOS:
740 case ICMP_REDIR_HOST:
741 case ICMP_REDIR_HOSTTOS:
748 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
751 in_dev = __in_dev_get_rcu(dev);
756 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
757 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
758 ipv4_is_zeronet(new_gw))
759 goto reject_redirect;
761 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
762 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
763 goto reject_redirect;
764 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
765 goto reject_redirect;
767 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
768 goto reject_redirect;
771 n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
773 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
775 if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
776 neigh_event_send(n, NULL);
778 if (fib_lookup(net, fl4, &res, 0) == 0) {
779 struct fib_nh_common *nhc;
781 fib_select_path(net, &res, fl4, skb);
782 nhc = FIB_RES_NHC(res);
783 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
785 jiffies + ip_rt_gc_timeout);
788 rt->dst.obsolete = DST_OBSOLETE_KILL;
789 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
796 #ifdef CONFIG_IP_ROUTE_VERBOSE
797 if (IN_DEV_LOG_MARTIANS(in_dev)) {
798 const struct iphdr *iph = (const struct iphdr *) skb->data;
799 __be32 daddr = iph->daddr;
800 __be32 saddr = iph->saddr;
802 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
803 " Advised path = %pI4 -> %pI4\n",
804 &old_gw, dev->name, &new_gw,
811 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
815 const struct iphdr *iph = (const struct iphdr *) skb->data;
816 struct net *net = dev_net(skb->dev);
817 int oif = skb->dev->ifindex;
818 u8 prot = iph->protocol;
819 u32 mark = skb->mark;
822 rt = dst_rtable(dst);
824 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
825 __ip_do_redirect(rt, skb, &fl4, true);
828 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
830 struct rtable *rt = dst_rtable(dst);
831 struct dst_entry *ret = dst;
834 if (dst->obsolete > 0) {
837 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
848 * 1. The first ip_rt_redirect_number redirects are sent
849 * with exponential backoff, then we stop sending them at all,
850 * assuming that the host ignores our redirects.
851 * 2. If we did not see packets requiring redirects
852 * during ip_rt_redirect_silence, we assume that the host
853 * forgot redirected route and start to send redirects again.
855 * This algorithm is much cheaper and more intelligent than dumb load limiting
858 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
859 * and "frag. need" (breaks PMTU discovery) in icmp.c.
862 void ip_rt_send_redirect(struct sk_buff *skb)
864 struct rtable *rt = skb_rtable(skb);
865 struct in_device *in_dev;
866 struct inet_peer *peer;
872 in_dev = __in_dev_get_rcu(rt->dst.dev);
873 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
877 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
878 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
881 net = dev_net(rt->dst.dev);
882 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
884 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
885 rt_nexthop(rt, ip_hdr(skb)->daddr));
889 /* No redirected packets during ip_rt_redirect_silence;
890 * reset the algorithm.
892 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
893 peer->rate_tokens = 0;
894 peer->n_redirects = 0;
897 /* Too many ignored redirects; do not send anything
898 * set dst.rate_last to the last seen redirected packet.
900 if (peer->n_redirects >= ip_rt_redirect_number) {
901 peer->rate_last = jiffies;
905 /* Check for load limit; set rate_last to the latest sent
908 if (peer->n_redirects == 0 ||
911 (ip_rt_redirect_load << peer->n_redirects)))) {
912 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
914 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
915 peer->rate_last = jiffies;
917 if (IS_ENABLED(CONFIG_IP_ROUTE_VERBOSE) && log_martians &&
918 peer->n_redirects == ip_rt_redirect_number)
919 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
920 &ip_hdr(skb)->saddr, inet_iif(skb),
921 &ip_hdr(skb)->daddr, &gw);
927 static int ip_error(struct sk_buff *skb)
929 struct rtable *rt = skb_rtable(skb);
930 struct net_device *dev = skb->dev;
931 struct in_device *in_dev;
932 struct inet_peer *peer;
939 if (netif_is_l3_master(skb->dev)) {
940 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
945 in_dev = __in_dev_get_rcu(dev);
947 /* IP on this device is disabled. */
951 net = dev_net(rt->dst.dev);
952 if (!IN_DEV_FORWARD(in_dev)) {
953 switch (rt->dst.error) {
955 SKB_DR_SET(reason, IP_INADDRERRORS);
956 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
960 SKB_DR_SET(reason, IP_INNOROUTES);
961 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
967 switch (rt->dst.error) {
972 code = ICMP_HOST_UNREACH;
975 code = ICMP_NET_UNREACH;
976 SKB_DR_SET(reason, IP_INNOROUTES);
977 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
980 code = ICMP_PKT_FILTERED;
984 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
985 l3mdev_master_ifindex(skb->dev), 1);
990 peer->rate_tokens += now - peer->rate_last;
991 if (peer->rate_tokens > ip_rt_error_burst)
992 peer->rate_tokens = ip_rt_error_burst;
993 peer->rate_last = now;
994 if (peer->rate_tokens >= ip_rt_error_cost)
995 peer->rate_tokens -= ip_rt_error_cost;
1001 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1003 out: kfree_skb_reason(skb, reason);
1007 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1009 struct dst_entry *dst = &rt->dst;
1010 struct net *net = dev_net(dst->dev);
1011 struct fib_result res;
1015 if (ip_mtu_locked(dst))
1018 old_mtu = ipv4_mtu(dst);
1022 if (mtu < net->ipv4.ip_rt_min_pmtu) {
1024 mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
1027 if (rt->rt_pmtu == mtu && !lock &&
1028 time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
1032 if (fib_lookup(net, fl4, &res, 0) == 0) {
1033 struct fib_nh_common *nhc;
1035 fib_select_path(net, &res, fl4, NULL);
1036 nhc = FIB_RES_NHC(res);
1037 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1038 jiffies + net->ipv4.ip_rt_mtu_expires);
1043 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1044 struct sk_buff *skb, u32 mtu,
1047 struct rtable *rt = dst_rtable(dst);
1050 ip_rt_build_flow_key(&fl4, sk, skb);
1052 /* Don't make lookup fail for bridged encapsulations */
1053 if (skb && netif_is_any_bridge_port(skb->dev))
1056 __ip_rt_update_pmtu(rt, &fl4, mtu);
1059 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1060 int oif, u8 protocol)
1062 const struct iphdr *iph = (const struct iphdr *)skb->data;
1065 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1067 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
1069 rt = __ip_route_output_key(net, &fl4);
1071 __ip_rt_update_pmtu(rt, &fl4, mtu);
1075 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1077 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1079 const struct iphdr *iph = (const struct iphdr *)skb->data;
1083 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1085 if (!fl4.flowi4_mark)
1086 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1088 rt = __ip_route_output_key(sock_net(sk), &fl4);
1090 __ip_rt_update_pmtu(rt, &fl4, mtu);
1095 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1097 const struct iphdr *iph = (const struct iphdr *)skb->data;
1100 struct dst_entry *odst = NULL;
1102 struct net *net = sock_net(sk);
1106 if (!ip_sk_accept_pmtu(sk))
1109 odst = sk_dst_get(sk);
1111 if (sock_owned_by_user(sk) || !odst) {
1112 __ipv4_sk_update_pmtu(skb, sk, mtu);
1116 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1118 rt = dst_rtable(odst);
1119 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1120 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1127 __ip_rt_update_pmtu(dst_rtable(xfrm_dst_path(&rt->dst)), &fl4, mtu);
1129 if (!dst_check(&rt->dst, 0)) {
1131 dst_release(&rt->dst);
1133 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1141 sk_dst_set(sk, &rt->dst);
1147 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1149 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1150 int oif, u8 protocol)
1152 const struct iphdr *iph = (const struct iphdr *)skb->data;
1156 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
1157 rt = __ip_route_output_key(net, &fl4);
1159 __ip_do_redirect(rt, skb, &fl4, false);
1163 EXPORT_SYMBOL_GPL(ipv4_redirect);
1165 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1167 const struct iphdr *iph = (const struct iphdr *)skb->data;
1170 struct net *net = sock_net(sk);
1172 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1173 rt = __ip_route_output_key(net, &fl4);
1175 __ip_do_redirect(rt, skb, &fl4, false);
1179 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1181 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1184 struct rtable *rt = dst_rtable(dst);
1186 /* All IPV4 dsts are created with ->obsolete set to the value
1187 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1188 * into this function always.
1190 * When a PMTU/redirect information update invalidates a route,
1191 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1192 * DST_OBSOLETE_DEAD.
1194 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1198 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1200 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1202 struct net_device *dev;
1203 struct ip_options opt;
1206 /* Recompile ip options since IPCB may not be valid anymore.
1207 * Also check we have a reasonable ipv4 header.
1209 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1210 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1213 memset(&opt, 0, sizeof(opt));
1214 if (ip_hdr(skb)->ihl > 5) {
1215 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1217 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1220 dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
1221 res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
1227 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1230 static void ipv4_link_failure(struct sk_buff *skb)
1234 ipv4_send_dest_unreach(skb);
1236 rt = skb_rtable(skb);
1238 dst_set_expires(&rt->dst, 0);
1241 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1243 pr_debug("%s: %pI4 -> %pI4, %s\n",
1244 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1245 skb->dev ? skb->dev->name : "?");
1252 * We do not cache source address of outgoing interface,
1253 * because it is used only by IP RR, TS and SRR options,
1254 * so that it out of fast path.
1256 * BTW remember: "addr" is allowed to be not aligned
1260 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1264 if (rt_is_output_route(rt))
1265 src = ip_hdr(skb)->saddr;
1267 struct fib_result res;
1268 struct iphdr *iph = ip_hdr(skb);
1269 struct flowi4 fl4 = {
1270 .daddr = iph->daddr,
1271 .saddr = iph->saddr,
1272 .flowi4_tos = RT_TOS(iph->tos),
1273 .flowi4_oif = rt->dst.dev->ifindex,
1274 .flowi4_iif = skb->dev->ifindex,
1275 .flowi4_mark = skb->mark,
1279 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1280 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1282 src = inet_select_addr(rt->dst.dev,
1283 rt_nexthop(rt, iph->daddr),
1287 memcpy(addr, &src, 4);
1290 #ifdef CONFIG_IP_ROUTE_CLASSID
1291 static void set_class_tag(struct rtable *rt, u32 tag)
1293 if (!(rt->dst.tclassid & 0xFFFF))
1294 rt->dst.tclassid |= tag & 0xFFFF;
1295 if (!(rt->dst.tclassid & 0xFFFF0000))
1296 rt->dst.tclassid |= tag & 0xFFFF0000;
1300 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1302 struct net *net = dev_net(dst->dev);
1303 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1304 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1305 net->ipv4.ip_rt_min_advmss);
1307 return min(advmss, IPV4_MAX_PMTU - header_size);
1310 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1312 return ip_dst_mtu_maybe_forward(dst, false);
1314 EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1316 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1318 struct fnhe_hash_bucket *hash;
1319 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1320 u32 hval = fnhe_hashfun(daddr);
1322 spin_lock_bh(&fnhe_lock);
1324 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1325 lockdep_is_held(&fnhe_lock));
1328 fnhe_p = &hash->chain;
1329 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1331 if (fnhe->fnhe_daddr == daddr) {
1332 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1333 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1334 /* set fnhe_daddr to 0 to ensure it won't bind with
1335 * new dsts in rt_bind_exception().
1337 fnhe->fnhe_daddr = 0;
1338 fnhe_flush_routes(fnhe);
1339 kfree_rcu(fnhe, rcu);
1342 fnhe_p = &fnhe->fnhe_next;
1343 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1344 lockdep_is_held(&fnhe_lock));
1347 spin_unlock_bh(&fnhe_lock);
1350 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1353 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1354 struct fib_nh_exception *fnhe;
1360 hval = fnhe_hashfun(daddr);
1362 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1363 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1364 if (fnhe->fnhe_daddr == daddr) {
1365 if (fnhe->fnhe_expires &&
1366 time_after(jiffies, fnhe->fnhe_expires)) {
1367 ip_del_fnhe(nhc, daddr);
1377 * 1. mtu on route is locked - use it
1378 * 2. mtu from nexthop exception
1379 * 3. mtu from egress device
1382 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1384 struct fib_nh_common *nhc = res->nhc;
1385 struct net_device *dev = nhc->nhc_dev;
1386 struct fib_info *fi = res->fi;
1389 if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
1390 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1394 struct fib_nh_exception *fnhe;
1396 fnhe = find_exception(nhc, daddr);
1397 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1398 mtu = fnhe->fnhe_pmtu;
1402 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1404 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1407 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1408 __be32 daddr, const bool do_cache)
1412 spin_lock_bh(&fnhe_lock);
1414 if (daddr == fnhe->fnhe_daddr) {
1415 struct rtable __rcu **porig;
1416 struct rtable *orig;
1417 int genid = fnhe_genid(dev_net(rt->dst.dev));
1419 if (rt_is_input_route(rt))
1420 porig = &fnhe->fnhe_rth_input;
1422 porig = &fnhe->fnhe_rth_output;
1423 orig = rcu_dereference(*porig);
1425 if (fnhe->fnhe_genid != genid) {
1426 fnhe->fnhe_genid = genid;
1428 fnhe->fnhe_pmtu = 0;
1429 fnhe->fnhe_expires = 0;
1430 fnhe->fnhe_mtu_locked = false;
1431 fnhe_flush_routes(fnhe);
1434 fill_route_from_fnhe(rt, fnhe);
1437 rt->rt_gw_family = AF_INET;
1442 rcu_assign_pointer(*porig, rt);
1444 dst_dev_put(&orig->dst);
1445 dst_release(&orig->dst);
1450 fnhe->fnhe_stamp = jiffies;
1452 spin_unlock_bh(&fnhe_lock);
1457 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1459 struct rtable *orig, *prev, **p;
1462 if (rt_is_input_route(rt)) {
1463 p = (struct rtable **)&nhc->nhc_rth_input;
1465 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1469 /* hold dst before doing cmpxchg() to avoid race condition
1473 prev = cmpxchg(p, orig, rt);
1476 rt_add_uncached_list(orig);
1477 dst_release(&orig->dst);
1480 dst_release(&rt->dst);
1487 struct uncached_list {
1489 struct list_head head;
1490 struct list_head quarantine;
1493 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1495 void rt_add_uncached_list(struct rtable *rt)
1497 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1499 rt->dst.rt_uncached_list = ul;
1501 spin_lock_bh(&ul->lock);
1502 list_add_tail(&rt->dst.rt_uncached, &ul->head);
1503 spin_unlock_bh(&ul->lock);
1506 void rt_del_uncached_list(struct rtable *rt)
1508 if (!list_empty(&rt->dst.rt_uncached)) {
1509 struct uncached_list *ul = rt->dst.rt_uncached_list;
1511 spin_lock_bh(&ul->lock);
1512 list_del_init(&rt->dst.rt_uncached);
1513 spin_unlock_bh(&ul->lock);
1517 static void ipv4_dst_destroy(struct dst_entry *dst)
1519 ip_dst_metrics_put(dst);
1520 rt_del_uncached_list(dst_rtable(dst));
1523 void rt_flush_dev(struct net_device *dev)
1525 struct rtable *rt, *safe;
1528 for_each_possible_cpu(cpu) {
1529 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1531 if (list_empty(&ul->head))
1534 spin_lock_bh(&ul->lock);
1535 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
1536 if (rt->dst.dev != dev)
1538 rt->dst.dev = blackhole_netdev;
1539 netdev_ref_replace(dev, blackhole_netdev,
1540 &rt->dst.dev_tracker, GFP_ATOMIC);
1541 list_move(&rt->dst.rt_uncached, &ul->quarantine);
1543 spin_unlock_bh(&ul->lock);
1547 static bool rt_cache_valid(const struct rtable *rt)
1550 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1554 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1555 const struct fib_result *res,
1556 struct fib_nh_exception *fnhe,
1557 struct fib_info *fi, u16 type, u32 itag,
1558 const bool do_cache)
1560 bool cached = false;
1563 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1565 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1566 rt->rt_uses_gateway = 1;
1567 rt->rt_gw_family = nhc->nhc_gw_family;
1568 /* only INET and INET6 are supported */
1569 if (likely(nhc->nhc_gw_family == AF_INET))
1570 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1572 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1575 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1577 #ifdef CONFIG_IP_ROUTE_CLASSID
1578 if (nhc->nhc_family == AF_INET) {
1581 nh = container_of(nhc, struct fib_nh, nh_common);
1582 rt->dst.tclassid = nh->nh_tclassid;
1585 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1587 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1589 cached = rt_cache_route(nhc, rt);
1590 if (unlikely(!cached)) {
1591 /* Routes we intend to cache in nexthop exception or
1592 * FIB nexthop have the DST_NOCACHE bit clear.
1593 * However, if we are unsuccessful at storing this
1594 * route into the cache we really need to set it.
1597 rt->rt_gw_family = AF_INET;
1600 rt_add_uncached_list(rt);
1603 rt_add_uncached_list(rt);
1605 #ifdef CONFIG_IP_ROUTE_CLASSID
1606 #ifdef CONFIG_IP_MULTIPLE_TABLES
1607 set_class_tag(rt, res->tclassid);
1609 set_class_tag(rt, itag);
1613 struct rtable *rt_dst_alloc(struct net_device *dev,
1614 unsigned int flags, u16 type,
1619 rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1620 (noxfrm ? DST_NOXFRM : 0));
1623 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1624 rt->rt_flags = flags;
1626 rt->rt_is_input = 0;
1629 rt->rt_mtu_locked = 0;
1630 rt->rt_uses_gateway = 0;
1631 rt->rt_gw_family = 0;
1634 rt->dst.output = ip_output;
1635 if (flags & RTCF_LOCAL)
1636 rt->dst.input = ip_local_deliver;
1641 EXPORT_SYMBOL(rt_dst_alloc);
1643 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1645 struct rtable *new_rt;
1647 new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1651 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1652 new_rt->rt_flags = rt->rt_flags;
1653 new_rt->rt_type = rt->rt_type;
1654 new_rt->rt_is_input = rt->rt_is_input;
1655 new_rt->rt_iif = rt->rt_iif;
1656 new_rt->rt_pmtu = rt->rt_pmtu;
1657 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1658 new_rt->rt_gw_family = rt->rt_gw_family;
1659 if (rt->rt_gw_family == AF_INET)
1660 new_rt->rt_gw4 = rt->rt_gw4;
1661 else if (rt->rt_gw_family == AF_INET6)
1662 new_rt->rt_gw6 = rt->rt_gw6;
1664 new_rt->dst.input = rt->dst.input;
1665 new_rt->dst.output = rt->dst.output;
1666 new_rt->dst.error = rt->dst.error;
1667 new_rt->dst.lastuse = jiffies;
1668 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1672 EXPORT_SYMBOL(rt_dst_clone);
1674 /* called in rcu_read_lock() section */
1675 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1676 u8 tos, struct net_device *dev,
1677 struct in_device *in_dev, u32 *itag)
1681 /* Primary sanity checks. */
1685 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1686 skb->protocol != htons(ETH_P_IP))
1689 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1692 if (ipv4_is_zeronet(saddr)) {
1693 if (!ipv4_is_local_multicast(daddr) &&
1694 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1697 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1705 /* called in rcu_read_lock() section */
1706 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1707 u8 tos, struct net_device *dev, int our)
1709 struct in_device *in_dev = __in_dev_get_rcu(dev);
1710 unsigned int flags = RTCF_MULTICAST;
1715 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1720 flags |= RTCF_LOCAL;
1722 if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1723 IPCB(skb)->flags |= IPSKB_NOPOLICY;
1725 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1730 #ifdef CONFIG_IP_ROUTE_CLASSID
1731 rth->dst.tclassid = itag;
1733 rth->dst.output = ip_rt_bug;
1734 rth->rt_is_input= 1;
1736 #ifdef CONFIG_IP_MROUTE
1737 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1738 rth->dst.input = ip_mr_input;
1740 RT_CACHE_STAT_INC(in_slow_mc);
1743 skb_dst_set(skb, &rth->dst);
1748 static void ip_handle_martian_source(struct net_device *dev,
1749 struct in_device *in_dev,
1750 struct sk_buff *skb,
1754 RT_CACHE_STAT_INC(in_martian_src);
1755 #ifdef CONFIG_IP_ROUTE_VERBOSE
1756 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1758 * RFC1812 recommendation, if source is martian,
1759 * the only hint is MAC header.
1761 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1762 &daddr, &saddr, dev->name);
1763 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1764 print_hex_dump(KERN_WARNING, "ll header: ",
1765 DUMP_PREFIX_OFFSET, 16, 1,
1766 skb_mac_header(skb),
1767 dev->hard_header_len, false);
1773 /* called in rcu_read_lock() section */
1774 static int __mkroute_input(struct sk_buff *skb,
1775 const struct fib_result *res,
1776 struct in_device *in_dev,
1777 __be32 daddr, __be32 saddr, u32 tos)
1779 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1780 struct net_device *dev = nhc->nhc_dev;
1781 struct fib_nh_exception *fnhe;
1784 struct in_device *out_dev;
1788 /* get a working reference to the output device */
1789 out_dev = __in_dev_get_rcu(dev);
1791 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1795 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1796 in_dev->dev, in_dev, &itag);
1798 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1804 do_cache = res->fi && !itag;
1805 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1806 skb->protocol == htons(ETH_P_IP)) {
1809 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1810 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1811 inet_addr_onlink(out_dev, saddr, gw))
1812 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1815 if (skb->protocol != htons(ETH_P_IP)) {
1816 /* Not IP (i.e. ARP). Do not create route, if it is
1817 * invalid for proxy arp. DNAT routes are always valid.
1819 * Proxy arp feature have been extended to allow, ARP
1820 * replies back to the same interface, to support
1821 * Private VLAN switch technologies. See arp.c.
1823 if (out_dev == in_dev &&
1824 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1830 if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1831 IPCB(skb)->flags |= IPSKB_NOPOLICY;
1833 fnhe = find_exception(nhc, daddr);
1836 rth = rcu_dereference(fnhe->fnhe_rth_input);
1838 rth = rcu_dereference(nhc->nhc_rth_input);
1839 if (rt_cache_valid(rth)) {
1840 skb_dst_set_noref(skb, &rth->dst);
1845 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1846 IN_DEV_ORCONF(out_dev, NOXFRM));
1852 rth->rt_is_input = 1;
1853 RT_CACHE_STAT_INC(in_slow_tot);
1855 rth->dst.input = ip_forward;
1857 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1859 lwtunnel_set_redirect(&rth->dst);
1860 skb_dst_set(skb, &rth->dst);
1867 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1868 /* To make ICMP packets follow the right flow, the multipath hash is
1869 * calculated from the inner IP addresses.
1871 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1872 struct flow_keys *hash_keys)
1874 const struct iphdr *outer_iph = ip_hdr(skb);
1875 const struct iphdr *key_iph = outer_iph;
1876 const struct iphdr *inner_iph;
1877 const struct icmphdr *icmph;
1878 struct iphdr _inner_iph;
1879 struct icmphdr _icmph;
1881 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1884 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1887 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1892 if (!icmp_is_err(icmph->type))
1895 inner_iph = skb_header_pointer(skb,
1896 outer_iph->ihl * 4 + sizeof(_icmph),
1897 sizeof(_inner_iph), &_inner_iph);
1901 key_iph = inner_iph;
1903 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1904 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1907 static u32 fib_multipath_custom_hash_outer(const struct net *net,
1908 const struct sk_buff *skb,
1911 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1912 struct flow_keys keys, hash_keys;
1914 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1917 memset(&hash_keys, 0, sizeof(hash_keys));
1918 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1920 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1921 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1922 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1923 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1924 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1925 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1926 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1927 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1928 hash_keys.ports.src = keys.ports.src;
1929 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1930 hash_keys.ports.dst = keys.ports.dst;
1932 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1933 return flow_hash_from_keys(&hash_keys);
1936 static u32 fib_multipath_custom_hash_inner(const struct net *net,
1937 const struct sk_buff *skb,
1940 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1941 struct flow_keys keys, hash_keys;
1943 /* We assume the packet carries an encapsulation, but if none was
1944 * encountered during dissection of the outer flow, then there is no
1945 * point in calling the flow dissector again.
1950 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1953 memset(&hash_keys, 0, sizeof(hash_keys));
1954 skb_flow_dissect_flow_keys(skb, &keys, 0);
1956 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1959 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1960 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1961 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1962 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1963 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1964 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1965 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1966 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1967 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1968 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1969 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1970 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1971 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1972 hash_keys.tags.flow_label = keys.tags.flow_label;
1975 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1976 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1977 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1978 hash_keys.ports.src = keys.ports.src;
1979 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
1980 hash_keys.ports.dst = keys.ports.dst;
1982 return flow_hash_from_keys(&hash_keys);
1985 static u32 fib_multipath_custom_hash_skb(const struct net *net,
1986 const struct sk_buff *skb)
1988 u32 mhash, mhash_inner;
1989 bool has_inner = true;
1991 mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
1992 mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
1994 return jhash_2words(mhash, mhash_inner, 0);
1997 static u32 fib_multipath_custom_hash_fl4(const struct net *net,
1998 const struct flowi4 *fl4)
2000 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
2001 struct flow_keys hash_keys;
2003 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2006 memset(&hash_keys, 0, sizeof(hash_keys));
2007 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2008 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2009 hash_keys.addrs.v4addrs.src = fl4->saddr;
2010 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2011 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2012 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2013 hash_keys.basic.ip_proto = fl4->flowi4_proto;
2014 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2015 hash_keys.ports.src = fl4->fl4_sport;
2016 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2017 hash_keys.ports.dst = fl4->fl4_dport;
2019 return flow_hash_from_keys(&hash_keys);
2022 /* if skb is set it will be used and fl4 can be NULL */
2023 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2024 const struct sk_buff *skb, struct flow_keys *flkeys)
2026 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2027 struct flow_keys hash_keys;
2030 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
2032 memset(&hash_keys, 0, sizeof(hash_keys));
2033 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2035 ip_multipath_l3_keys(skb, &hash_keys);
2037 hash_keys.addrs.v4addrs.src = fl4->saddr;
2038 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2040 mhash = flow_hash_from_keys(&hash_keys);
2043 /* skb is currently provided only when forwarding */
2045 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2046 struct flow_keys keys;
2048 /* short-circuit if we already have L4 hash present */
2050 return skb_get_hash_raw(skb) >> 1;
2052 memset(&hash_keys, 0, sizeof(hash_keys));
2055 skb_flow_dissect_flow_keys(skb, &keys, flag);
2059 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2060 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2061 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2062 hash_keys.ports.src = flkeys->ports.src;
2063 hash_keys.ports.dst = flkeys->ports.dst;
2064 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2066 memset(&hash_keys, 0, sizeof(hash_keys));
2067 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2068 hash_keys.addrs.v4addrs.src = fl4->saddr;
2069 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2070 hash_keys.ports.src = fl4->fl4_sport;
2071 hash_keys.ports.dst = fl4->fl4_dport;
2072 hash_keys.basic.ip_proto = fl4->flowi4_proto;
2074 mhash = flow_hash_from_keys(&hash_keys);
2077 memset(&hash_keys, 0, sizeof(hash_keys));
2078 /* skb is currently provided only when forwarding */
2080 struct flow_keys keys;
2082 skb_flow_dissect_flow_keys(skb, &keys, 0);
2083 /* Inner can be v4 or v6 */
2084 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2085 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2086 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2087 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2088 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2089 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2090 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2091 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2092 hash_keys.tags.flow_label = keys.tags.flow_label;
2093 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2095 /* Same as case 0 */
2096 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2097 ip_multipath_l3_keys(skb, &hash_keys);
2100 /* Same as case 0 */
2101 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2102 hash_keys.addrs.v4addrs.src = fl4->saddr;
2103 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2105 mhash = flow_hash_from_keys(&hash_keys);
2109 mhash = fib_multipath_custom_hash_skb(net, skb);
2111 mhash = fib_multipath_custom_hash_fl4(net, fl4);
2116 mhash = jhash_2words(mhash, multipath_hash, 0);
2120 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2122 static int ip_mkroute_input(struct sk_buff *skb,
2123 struct fib_result *res,
2124 struct in_device *in_dev,
2125 __be32 daddr, __be32 saddr, u32 tos,
2126 struct flow_keys *hkeys)
2128 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2129 if (res->fi && fib_info_num_path(res->fi) > 1) {
2130 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2132 fib_select_multipath(res, h);
2133 IPCB(skb)->flags |= IPSKB_MULTIPATH;
2137 /* create a routing cache entry */
2138 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2141 /* Implements all the saddr-related checks as ip_route_input_slow(),
2142 * assuming daddr is valid and the destination is not a local broadcast one.
2143 * Uses the provided hint instead of performing a route lookup.
2145 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2146 u8 tos, struct net_device *dev,
2147 const struct sk_buff *hint)
2149 struct in_device *in_dev = __in_dev_get_rcu(dev);
2150 struct rtable *rt = skb_rtable(hint);
2151 struct net *net = dev_net(dev);
2158 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2159 goto martian_source;
2161 if (ipv4_is_zeronet(saddr))
2162 goto martian_source;
2164 if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2165 goto martian_source;
2167 if (rt->rt_type != RTN_LOCAL)
2168 goto skip_validate_source;
2170 tos &= IPTOS_RT_MASK;
2171 err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2173 goto martian_source;
2175 skip_validate_source:
2176 skb_dst_copy(skb, hint);
2180 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2184 /* get device for dst_alloc with local routes */
2185 static struct net_device *ip_rt_get_dev(struct net *net,
2186 const struct fib_result *res)
2188 struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2189 struct net_device *dev = NULL;
2192 dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2194 return dev ? : net->loopback_dev;
2198 * NOTE. We drop all the packets that has local source
2199 * addresses, because every properly looped back packet
2200 * must have correct destination already attached by output routine.
2201 * Changes in the enforced policies must be applied also to
2202 * ip_route_use_hint().
2204 * Such approach solves two big problems:
2205 * 1. Not simplex devices are handled properly.
2206 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2207 * called with rcu_read_lock()
2210 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2211 u8 tos, struct net_device *dev,
2212 struct fib_result *res)
2214 struct in_device *in_dev = __in_dev_get_rcu(dev);
2215 struct flow_keys *flkeys = NULL, _flkeys;
2216 struct net *net = dev_net(dev);
2217 struct ip_tunnel_info *tun_info;
2219 unsigned int flags = 0;
2223 bool do_cache = true;
2225 /* IP on this device is disabled. */
2230 /* Check for the most weird martians, which can be not detected
2234 tun_info = skb_tunnel_info(skb);
2235 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2236 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2238 fl4.flowi4_tun_key.tun_id = 0;
2241 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2242 goto martian_source;
2246 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2249 /* Accept zero addresses only to limited broadcast;
2250 * I even do not know to fix it or not. Waiting for complains :-)
2252 if (ipv4_is_zeronet(saddr))
2253 goto martian_source;
2255 if (ipv4_is_zeronet(daddr))
2256 goto martian_destination;
2258 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2259 * and call it once if daddr or/and saddr are loopback addresses
2261 if (ipv4_is_loopback(daddr)) {
2262 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2263 goto martian_destination;
2264 } else if (ipv4_is_loopback(saddr)) {
2265 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2266 goto martian_source;
2270 * Now we are ready to route packet.
2272 fl4.flowi4_l3mdev = 0;
2274 fl4.flowi4_iif = dev->ifindex;
2275 fl4.flowi4_mark = skb->mark;
2276 fl4.flowi4_tos = tos;
2277 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2278 fl4.flowi4_flags = 0;
2281 fl4.flowi4_uid = sock_net_uid(net, NULL);
2282 fl4.flowi4_multipath_hash = 0;
2284 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2287 fl4.flowi4_proto = 0;
2292 err = fib_lookup(net, &fl4, res, 0);
2294 if (!IN_DEV_FORWARD(in_dev))
2295 err = -EHOSTUNREACH;
2299 if (res->type == RTN_BROADCAST) {
2300 if (IN_DEV_BFORWARD(in_dev))
2302 /* not do cache if bc_forwarding is enabled */
2303 if (IPV4_DEVCONF_ALL_RO(net, BC_FORWARDING))
2308 if (res->type == RTN_LOCAL) {
2309 err = fib_validate_source(skb, saddr, daddr, tos,
2310 0, dev, in_dev, &itag);
2312 goto martian_source;
2316 if (!IN_DEV_FORWARD(in_dev)) {
2317 err = -EHOSTUNREACH;
2320 if (res->type != RTN_UNICAST)
2321 goto martian_destination;
2324 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2328 if (skb->protocol != htons(ETH_P_IP))
2331 if (!ipv4_is_zeronet(saddr)) {
2332 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2335 goto martian_source;
2337 flags |= RTCF_BROADCAST;
2338 res->type = RTN_BROADCAST;
2339 RT_CACHE_STAT_INC(in_brd);
2342 if (IN_DEV_ORCONF(in_dev, NOPOLICY))
2343 IPCB(skb)->flags |= IPSKB_NOPOLICY;
2345 do_cache &= res->fi && !itag;
2347 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2349 rth = rcu_dereference(nhc->nhc_rth_input);
2350 if (rt_cache_valid(rth)) {
2351 skb_dst_set_noref(skb, &rth->dst);
2357 rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2358 flags | RTCF_LOCAL, res->type, false);
2362 rth->dst.output= ip_rt_bug;
2363 #ifdef CONFIG_IP_ROUTE_CLASSID
2364 rth->dst.tclassid = itag;
2366 rth->rt_is_input = 1;
2368 RT_CACHE_STAT_INC(in_slow_tot);
2369 if (res->type == RTN_UNREACHABLE) {
2370 rth->dst.input= ip_error;
2371 rth->dst.error= -err;
2372 rth->rt_flags &= ~RTCF_LOCAL;
2376 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2378 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2379 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2380 WARN_ON(rth->dst.input == lwtunnel_input);
2381 rth->dst.lwtstate->orig_input = rth->dst.input;
2382 rth->dst.input = lwtunnel_input;
2385 if (unlikely(!rt_cache_route(nhc, rth)))
2386 rt_add_uncached_list(rth);
2388 skb_dst_set(skb, &rth->dst);
2393 RT_CACHE_STAT_INC(in_no_route);
2394 res->type = RTN_UNREACHABLE;
2400 * Do not cache martian addresses: they should be logged (RFC1812)
2402 martian_destination:
2403 RT_CACHE_STAT_INC(in_martian_dst);
2404 #ifdef CONFIG_IP_ROUTE_VERBOSE
2405 if (IN_DEV_LOG_MARTIANS(in_dev))
2406 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2407 &daddr, &saddr, dev->name);
2419 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2423 /* called with rcu_read_lock held */
2424 static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2425 u8 tos, struct net_device *dev, struct fib_result *res)
2427 /* Multicast recognition logic is moved from route cache to here.
2428 * The problem was that too many Ethernet cards have broken/missing
2429 * hardware multicast filters :-( As result the host on multicasting
2430 * network acquires a lot of useless route cache entries, sort of
2431 * SDR messages from all the world. Now we try to get rid of them.
2432 * Really, provided software IP multicast filter is organized
2433 * reasonably (at least, hashed), it does not result in a slowdown
2434 * comparing with route cache reject entries.
2435 * Note, that multicast routers are not affected, because
2436 * route cache entry is created eventually.
2438 if (ipv4_is_multicast(daddr)) {
2439 struct in_device *in_dev = __in_dev_get_rcu(dev);
2445 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2446 ip_hdr(skb)->protocol);
2448 /* check l3 master if no match yet */
2449 if (!our && netif_is_l3_slave(dev)) {
2450 struct in_device *l3_in_dev;
2452 l3_in_dev = __in_dev_get_rcu(skb->dev);
2454 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2455 ip_hdr(skb)->protocol);
2459 #ifdef CONFIG_IP_MROUTE
2461 (!ipv4_is_local_multicast(daddr) &&
2462 IN_DEV_MFORWARD(in_dev))
2465 err = ip_route_input_mc(skb, daddr, saddr,
2471 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2474 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2475 u8 tos, struct net_device *dev)
2477 struct fib_result res;
2480 tos &= IPTOS_RT_MASK;
2482 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2487 EXPORT_SYMBOL(ip_route_input_noref);
2489 /* called with rcu_read_lock() */
2490 static struct rtable *__mkroute_output(const struct fib_result *res,
2491 const struct flowi4 *fl4, int orig_oif,
2492 struct net_device *dev_out,
2495 struct fib_info *fi = res->fi;
2496 struct fib_nh_exception *fnhe;
2497 struct in_device *in_dev;
2498 u16 type = res->type;
2502 in_dev = __in_dev_get_rcu(dev_out);
2504 return ERR_PTR(-EINVAL);
2506 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2507 if (ipv4_is_loopback(fl4->saddr) &&
2508 !(dev_out->flags & IFF_LOOPBACK) &&
2509 !netif_is_l3_master(dev_out))
2510 return ERR_PTR(-EINVAL);
2512 if (ipv4_is_lbcast(fl4->daddr))
2513 type = RTN_BROADCAST;
2514 else if (ipv4_is_multicast(fl4->daddr))
2515 type = RTN_MULTICAST;
2516 else if (ipv4_is_zeronet(fl4->daddr))
2517 return ERR_PTR(-EINVAL);
2519 if (dev_out->flags & IFF_LOOPBACK)
2520 flags |= RTCF_LOCAL;
2523 if (type == RTN_BROADCAST) {
2524 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2526 } else if (type == RTN_MULTICAST) {
2527 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2528 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2530 flags &= ~RTCF_LOCAL;
2533 /* If multicast route do not exist use
2534 * default one, but do not gateway in this case.
2537 if (fi && res->prefixlen < 4)
2539 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2540 (orig_oif != dev_out->ifindex)) {
2541 /* For local routes that require a particular output interface
2542 * we do not want to cache the result. Caching the result
2543 * causes incorrect behaviour when there are multiple source
2544 * addresses on the interface, the end result being that if the
2545 * intended recipient is waiting on that interface for the
2546 * packet he won't receive it because it will be delivered on
2547 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2548 * be set to the loopback interface as well.
2554 do_cache &= fi != NULL;
2556 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2557 struct rtable __rcu **prth;
2559 fnhe = find_exception(nhc, fl4->daddr);
2563 prth = &fnhe->fnhe_rth_output;
2565 if (unlikely(fl4->flowi4_flags &
2566 FLOWI_FLAG_KNOWN_NH &&
2567 !(nhc->nhc_gw_family &&
2568 nhc->nhc_scope == RT_SCOPE_LINK))) {
2572 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2574 rth = rcu_dereference(*prth);
2575 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2580 rth = rt_dst_alloc(dev_out, flags, type,
2581 IN_DEV_ORCONF(in_dev, NOXFRM));
2583 return ERR_PTR(-ENOBUFS);
2585 rth->rt_iif = orig_oif;
2587 RT_CACHE_STAT_INC(out_slow_tot);
2589 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2590 if (flags & RTCF_LOCAL &&
2591 !(dev_out->flags & IFF_LOOPBACK)) {
2592 rth->dst.output = ip_mc_output;
2593 RT_CACHE_STAT_INC(out_slow_mc);
2595 #ifdef CONFIG_IP_MROUTE
2596 if (type == RTN_MULTICAST) {
2597 if (IN_DEV_MFORWARD(in_dev) &&
2598 !ipv4_is_local_multicast(fl4->daddr)) {
2599 rth->dst.input = ip_mr_input;
2600 rth->dst.output = ip_mc_output;
2606 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2607 lwtunnel_set_redirect(&rth->dst);
2613 * Major route resolver routine.
2616 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2617 const struct sk_buff *skb)
2619 struct fib_result res = {
2627 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2628 fl4->flowi4_tos &= IPTOS_RT_MASK;
2631 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2636 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2638 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2639 struct fib_result *res,
2640 const struct sk_buff *skb)
2642 struct net_device *dev_out = NULL;
2643 int orig_oif = fl4->flowi4_oif;
2644 unsigned int flags = 0;
2649 if (ipv4_is_multicast(fl4->saddr) ||
2650 ipv4_is_lbcast(fl4->saddr) ||
2651 ipv4_is_zeronet(fl4->saddr)) {
2652 rth = ERR_PTR(-EINVAL);
2656 rth = ERR_PTR(-ENETUNREACH);
2658 /* I removed check for oif == dev_out->oif here.
2659 * It was wrong for two reasons:
2660 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2661 * is assigned to multiple interfaces.
2662 * 2. Moreover, we are allowed to send packets with saddr
2663 * of another iface. --ANK
2666 if (fl4->flowi4_oif == 0 &&
2667 (ipv4_is_multicast(fl4->daddr) ||
2668 ipv4_is_lbcast(fl4->daddr))) {
2669 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2670 dev_out = __ip_dev_find(net, fl4->saddr, false);
2674 /* Special hack: user can direct multicasts
2675 * and limited broadcast via necessary interface
2676 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2677 * This hack is not just for fun, it allows
2678 * vic,vat and friends to work.
2679 * They bind socket to loopback, set ttl to zero
2680 * and expect that it will work.
2681 * From the viewpoint of routing cache they are broken,
2682 * because we are not allowed to build multicast path
2683 * with loopback source addr (look, routing cache
2684 * cannot know, that ttl is zero, so that packet
2685 * will not leave this host and route is valid).
2686 * Luckily, this hack is good workaround.
2689 fl4->flowi4_oif = dev_out->ifindex;
2693 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2694 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2695 if (!__ip_dev_find(net, fl4->saddr, false))
2701 if (fl4->flowi4_oif) {
2702 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2703 rth = ERR_PTR(-ENODEV);
2707 /* RACE: Check return value of inet_select_addr instead. */
2708 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2709 rth = ERR_PTR(-ENETUNREACH);
2712 if (ipv4_is_local_multicast(fl4->daddr) ||
2713 ipv4_is_lbcast(fl4->daddr) ||
2714 fl4->flowi4_proto == IPPROTO_IGMP) {
2716 fl4->saddr = inet_select_addr(dev_out, 0,
2721 if (ipv4_is_multicast(fl4->daddr))
2722 fl4->saddr = inet_select_addr(dev_out, 0,
2724 else if (!fl4->daddr)
2725 fl4->saddr = inet_select_addr(dev_out, 0,
2731 fl4->daddr = fl4->saddr;
2733 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2734 dev_out = net->loopback_dev;
2735 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2736 res->type = RTN_LOCAL;
2737 flags |= RTCF_LOCAL;
2741 err = fib_lookup(net, fl4, res, 0);
2745 if (fl4->flowi4_oif &&
2746 (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
2747 /* Apparently, routing tables are wrong. Assume,
2748 * that the destination is on link.
2751 * Because we are allowed to send to iface
2752 * even if it has NO routes and NO assigned
2753 * addresses. When oif is specified, routing
2754 * tables are looked up with only one purpose:
2755 * to catch if destination is gatewayed, rather than
2756 * direct. Moreover, if MSG_DONTROUTE is set,
2757 * we send packet, ignoring both routing tables
2758 * and ifaddr state. --ANK
2761 * We could make it even if oif is unknown,
2762 * likely IPv6, but we do not.
2765 if (fl4->saddr == 0)
2766 fl4->saddr = inet_select_addr(dev_out, 0,
2768 res->type = RTN_UNICAST;
2775 if (res->type == RTN_LOCAL) {
2777 if (res->fi->fib_prefsrc)
2778 fl4->saddr = res->fi->fib_prefsrc;
2780 fl4->saddr = fl4->daddr;
2783 /* L3 master device is the loopback for that domain */
2784 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2787 /* make sure orig_oif points to fib result device even
2788 * though packet rx/tx happens over loopback or l3mdev
2790 orig_oif = FIB_RES_OIF(*res);
2792 fl4->flowi4_oif = dev_out->ifindex;
2793 flags |= RTCF_LOCAL;
2797 fib_select_path(net, res, fl4, skb);
2799 dev_out = FIB_RES_DEV(*res);
2802 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2808 static struct dst_ops ipv4_dst_blackhole_ops = {
2810 .default_advmss = ipv4_default_advmss,
2811 .neigh_lookup = ipv4_neigh_lookup,
2812 .check = dst_blackhole_check,
2813 .cow_metrics = dst_blackhole_cow_metrics,
2814 .update_pmtu = dst_blackhole_update_pmtu,
2815 .redirect = dst_blackhole_redirect,
2816 .mtu = dst_blackhole_mtu,
2819 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2821 struct rtable *ort = dst_rtable(dst_orig);
2824 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
2826 struct dst_entry *new = &rt->dst;
2829 new->input = dst_discard;
2830 new->output = dst_discard_out;
2832 new->dev = net->loopback_dev;
2833 netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC);
2835 rt->rt_is_input = ort->rt_is_input;
2836 rt->rt_iif = ort->rt_iif;
2837 rt->rt_pmtu = ort->rt_pmtu;
2838 rt->rt_mtu_locked = ort->rt_mtu_locked;
2840 rt->rt_genid = rt_genid_ipv4(net);
2841 rt->rt_flags = ort->rt_flags;
2842 rt->rt_type = ort->rt_type;
2843 rt->rt_uses_gateway = ort->rt_uses_gateway;
2844 rt->rt_gw_family = ort->rt_gw_family;
2845 if (rt->rt_gw_family == AF_INET)
2846 rt->rt_gw4 = ort->rt_gw4;
2847 else if (rt->rt_gw_family == AF_INET6)
2848 rt->rt_gw6 = ort->rt_gw6;
2851 dst_release(dst_orig);
2853 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2856 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2857 const struct sock *sk)
2859 struct rtable *rt = __ip_route_output_key(net, flp4);
2864 if (flp4->flowi4_proto) {
2865 flp4->flowi4_oif = rt->dst.dev->ifindex;
2866 rt = dst_rtable(xfrm_lookup_route(net, &rt->dst,
2867 flowi4_to_flowi(flp4),
2873 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2875 /* called with rcu_read_lock held */
2876 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2877 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2878 struct sk_buff *skb, u32 portid, u32 seq,
2882 struct nlmsghdr *nlh;
2883 unsigned long expires = 0;
2885 u32 metrics[RTAX_MAX];
2887 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2891 r = nlmsg_data(nlh);
2892 r->rtm_family = AF_INET;
2893 r->rtm_dst_len = 32;
2895 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2896 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2897 if (nla_put_u32(skb, RTA_TABLE, table_id))
2898 goto nla_put_failure;
2899 r->rtm_type = rt->rt_type;
2900 r->rtm_scope = RT_SCOPE_UNIVERSE;
2901 r->rtm_protocol = RTPROT_UNSPEC;
2902 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2903 if (rt->rt_flags & RTCF_NOTIFY)
2904 r->rtm_flags |= RTM_F_NOTIFY;
2905 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2906 r->rtm_flags |= RTCF_DOREDIRECT;
2908 if (nla_put_in_addr(skb, RTA_DST, dst))
2909 goto nla_put_failure;
2911 r->rtm_src_len = 32;
2912 if (nla_put_in_addr(skb, RTA_SRC, src))
2913 goto nla_put_failure;
2916 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2917 goto nla_put_failure;
2918 if (rt->dst.lwtstate &&
2919 lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2920 goto nla_put_failure;
2921 #ifdef CONFIG_IP_ROUTE_CLASSID
2922 if (rt->dst.tclassid &&
2923 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2924 goto nla_put_failure;
2926 if (fl4 && !rt_is_input_route(rt) &&
2927 fl4->saddr != src) {
2928 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2929 goto nla_put_failure;
2931 if (rt->rt_uses_gateway) {
2932 if (rt->rt_gw_family == AF_INET &&
2933 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2934 goto nla_put_failure;
2935 } else if (rt->rt_gw_family == AF_INET6) {
2936 int alen = sizeof(struct in6_addr);
2940 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2942 goto nla_put_failure;
2944 via = nla_data(nla);
2945 via->rtvia_family = AF_INET6;
2946 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2950 expires = rt->dst.expires;
2952 unsigned long now = jiffies;
2954 if (time_before(now, expires))
2960 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2961 if (rt->rt_pmtu && expires)
2962 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2963 if (rt->rt_mtu_locked && expires)
2964 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2965 if (rtnetlink_put_metrics(skb, metrics) < 0)
2966 goto nla_put_failure;
2969 if (fl4->flowi4_mark &&
2970 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2971 goto nla_put_failure;
2973 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2974 nla_put_u32(skb, RTA_UID,
2975 from_kuid_munged(current_user_ns(),
2977 goto nla_put_failure;
2979 if (rt_is_input_route(rt)) {
2980 #ifdef CONFIG_IP_MROUTE
2981 if (ipv4_is_multicast(dst) &&
2982 !ipv4_is_local_multicast(dst) &&
2983 IPV4_DEVCONF_ALL_RO(net, MC_FORWARDING)) {
2984 int err = ipmr_get_route(net, skb,
2985 fl4->saddr, fl4->daddr,
2991 goto nla_put_failure;
2995 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2996 goto nla_put_failure;
3000 error = rt->dst.error;
3002 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3003 goto nla_put_failure;
3005 nlmsg_end(skb, nlh);
3009 nlmsg_cancel(skb, nlh);
3013 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3014 struct netlink_callback *cb, u32 table_id,
3015 struct fnhe_hash_bucket *bucket, int genid,
3016 int *fa_index, int fa_start, unsigned int flags)
3020 for (i = 0; i < FNHE_HASH_SIZE; i++) {
3021 struct fib_nh_exception *fnhe;
3023 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3024 fnhe = rcu_dereference(fnhe->fnhe_next)) {
3028 if (*fa_index < fa_start)
3031 if (fnhe->fnhe_genid != genid)
3034 if (fnhe->fnhe_expires &&
3035 time_after(jiffies, fnhe->fnhe_expires))
3038 rt = rcu_dereference(fnhe->fnhe_rth_input);
3040 rt = rcu_dereference(fnhe->fnhe_rth_output);
3044 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3045 table_id, NULL, skb,
3046 NETLINK_CB(cb->skb).portid,
3047 cb->nlh->nlmsg_seq, flags);
3058 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3059 u32 table_id, struct fib_info *fi,
3060 int *fa_index, int fa_start, unsigned int flags)
3062 struct net *net = sock_net(cb->skb->sk);
3063 int nhsel, genid = fnhe_genid(net);
3065 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3066 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3067 struct fnhe_hash_bucket *bucket;
3070 if (nhc->nhc_flags & RTNH_F_DEAD)
3074 bucket = rcu_dereference(nhc->nhc_exceptions);
3077 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3078 genid, fa_index, fa_start,
3088 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3089 u8 ip_proto, __be16 sport,
3092 struct sk_buff *skb;
3095 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3099 /* Reserve room for dummy headers, this skb can pass
3100 * through good chunk of routing engine.
3102 skb_reset_mac_header(skb);
3103 skb_reset_network_header(skb);
3104 skb->protocol = htons(ETH_P_IP);
3105 iph = skb_put(skb, sizeof(struct iphdr));
3106 iph->protocol = ip_proto;
3112 skb_set_transport_header(skb, skb->len);
3114 switch (iph->protocol) {
3116 struct udphdr *udph;
3118 udph = skb_put_zero(skb, sizeof(struct udphdr));
3119 udph->source = sport;
3121 udph->len = htons(sizeof(struct udphdr));
3126 struct tcphdr *tcph;
3128 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3129 tcph->source = sport;
3131 tcph->doff = sizeof(struct tcphdr) / 4;
3133 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3137 case IPPROTO_ICMP: {
3138 struct icmphdr *icmph;
3140 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3141 icmph->type = ICMP_ECHO;
3149 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3150 const struct nlmsghdr *nlh,
3152 struct netlink_ext_ack *extack)
3157 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3158 NL_SET_ERR_MSG(extack,
3159 "ipv4: Invalid header for route get request");
3163 if (!netlink_strict_get_check(skb))
3164 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3165 rtm_ipv4_policy, extack);
3167 rtm = nlmsg_data(nlh);
3168 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3169 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3170 rtm->rtm_table || rtm->rtm_protocol ||
3171 rtm->rtm_scope || rtm->rtm_type) {
3172 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3176 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3177 RTM_F_LOOKUP_TABLE |
3179 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3183 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3184 rtm_ipv4_policy, extack);
3188 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3189 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3190 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3194 for (i = 0; i <= RTA_MAX; i++) {
3210 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3218 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3219 struct netlink_ext_ack *extack)
3221 struct net *net = sock_net(in_skb->sk);
3222 struct nlattr *tb[RTA_MAX+1];
3223 u32 table_id = RT_TABLE_MAIN;
3224 __be16 sport = 0, dport = 0;
3225 struct fib_result res = {};
3226 u8 ip_proto = IPPROTO_UDP;
3227 struct rtable *rt = NULL;
3228 struct sk_buff *skb;
3230 struct flowi4 fl4 = {};
3238 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3242 rtm = nlmsg_data(nlh);
3243 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3244 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3245 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3246 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3248 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3250 uid = (iif ? INVALID_UID : current_uid());
3252 if (tb[RTA_IP_PROTO]) {
3253 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3254 &ip_proto, AF_INET, extack);
3260 sport = nla_get_be16(tb[RTA_SPORT]);
3263 dport = nla_get_be16(tb[RTA_DPORT]);
3265 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3271 fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3272 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3273 fl4.flowi4_mark = mark;
3274 fl4.flowi4_uid = uid;
3276 fl4.fl4_sport = sport;
3278 fl4.fl4_dport = dport;
3279 fl4.flowi4_proto = ip_proto;
3284 struct net_device *dev;
3286 dev = dev_get_by_index_rcu(net, iif);
3292 fl4.flowi4_iif = iif; /* for rt_fill_info */
3295 err = ip_route_input_rcu(skb, dst, src,
3296 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3299 rt = skb_rtable(skb);
3300 if (err == 0 && rt->dst.error)
3301 err = -rt->dst.error;
3303 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3304 skb->dev = net->loopback_dev;
3305 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3310 skb_dst_set(skb, &rt->dst);
3316 if (rtm->rtm_flags & RTM_F_NOTIFY)
3317 rt->rt_flags |= RTCF_NOTIFY;
3319 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3320 table_id = res.table ? res.table->tb_id : 0;
3322 /* reset skb for netlink reply msg */
3324 skb_reset_network_header(skb);
3325 skb_reset_transport_header(skb);
3326 skb_reset_mac_header(skb);
3328 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3329 struct fib_rt_info fri;
3332 err = fib_props[res.type].error;
3334 err = -EHOSTUNREACH;
3338 fri.tb_id = table_id;
3339 fri.dst = res.prefix;
3340 fri.dst_len = res.prefixlen;
3341 fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
3342 fri.type = rt->rt_type;
3345 fri.offload_failed = 0;
3347 struct fib_alias *fa;
3349 hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3350 u8 slen = 32 - fri.dst_len;
3352 if (fa->fa_slen == slen &&
3353 fa->tb_id == fri.tb_id &&
3354 fa->fa_dscp == fri.dscp &&
3355 fa->fa_info == res.fi &&
3356 fa->fa_type == fri.type) {
3357 fri.offload = READ_ONCE(fa->offload);
3358 fri.trap = READ_ONCE(fa->trap);
3359 fri.offload_failed =
3360 READ_ONCE(fa->offload_failed);
3365 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3366 nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3368 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3369 NETLINK_CB(in_skb).portid,
3377 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3387 void ip_rt_multicast_event(struct in_device *in_dev)
3389 rt_cache_flush(dev_net(in_dev->dev));
3392 #ifdef CONFIG_SYSCTL
3393 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3394 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3395 static int ip_rt_gc_elasticity __read_mostly = 8;
3396 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3398 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3399 void *buffer, size_t *lenp, loff_t *ppos)
3401 struct net *net = (struct net *)__ctl->extra1;
3404 rt_cache_flush(net);
3405 fnhe_genid_bump(net);
3412 static struct ctl_table ipv4_route_table[] = {
3414 .procname = "gc_thresh",
3415 .data = &ipv4_dst_ops.gc_thresh,
3416 .maxlen = sizeof(int),
3418 .proc_handler = proc_dointvec,
3421 .procname = "max_size",
3422 .data = &ip_rt_max_size,
3423 .maxlen = sizeof(int),
3425 .proc_handler = proc_dointvec,
3428 /* Deprecated. Use gc_min_interval_ms */
3430 .procname = "gc_min_interval",
3431 .data = &ip_rt_gc_min_interval,
3432 .maxlen = sizeof(int),
3434 .proc_handler = proc_dointvec_jiffies,
3437 .procname = "gc_min_interval_ms",
3438 .data = &ip_rt_gc_min_interval,
3439 .maxlen = sizeof(int),
3441 .proc_handler = proc_dointvec_ms_jiffies,
3444 .procname = "gc_timeout",
3445 .data = &ip_rt_gc_timeout,
3446 .maxlen = sizeof(int),
3448 .proc_handler = proc_dointvec_jiffies,
3451 .procname = "gc_interval",
3452 .data = &ip_rt_gc_interval,
3453 .maxlen = sizeof(int),
3455 .proc_handler = proc_dointvec_jiffies,
3458 .procname = "redirect_load",
3459 .data = &ip_rt_redirect_load,
3460 .maxlen = sizeof(int),
3462 .proc_handler = proc_dointvec,
3465 .procname = "redirect_number",
3466 .data = &ip_rt_redirect_number,
3467 .maxlen = sizeof(int),
3469 .proc_handler = proc_dointvec,
3472 .procname = "redirect_silence",
3473 .data = &ip_rt_redirect_silence,
3474 .maxlen = sizeof(int),
3476 .proc_handler = proc_dointvec,
3479 .procname = "error_cost",
3480 .data = &ip_rt_error_cost,
3481 .maxlen = sizeof(int),
3483 .proc_handler = proc_dointvec,
3486 .procname = "error_burst",
3487 .data = &ip_rt_error_burst,
3488 .maxlen = sizeof(int),
3490 .proc_handler = proc_dointvec,
3493 .procname = "gc_elasticity",
3494 .data = &ip_rt_gc_elasticity,
3495 .maxlen = sizeof(int),
3497 .proc_handler = proc_dointvec,
3501 static const char ipv4_route_flush_procname[] = "flush";
3503 static struct ctl_table ipv4_route_netns_table[] = {
3505 .procname = ipv4_route_flush_procname,
3506 .maxlen = sizeof(int),
3508 .proc_handler = ipv4_sysctl_rtcache_flush,
3511 .procname = "min_pmtu",
3512 .data = &init_net.ipv4.ip_rt_min_pmtu,
3513 .maxlen = sizeof(int),
3515 .proc_handler = proc_dointvec_minmax,
3516 .extra1 = &ip_min_valid_pmtu,
3519 .procname = "mtu_expires",
3520 .data = &init_net.ipv4.ip_rt_mtu_expires,
3521 .maxlen = sizeof(int),
3523 .proc_handler = proc_dointvec_jiffies,
3526 .procname = "min_adv_mss",
3527 .data = &init_net.ipv4.ip_rt_min_advmss,
3528 .maxlen = sizeof(int),
3530 .proc_handler = proc_dointvec,
3534 static __net_init int sysctl_route_net_init(struct net *net)
3536 struct ctl_table *tbl;
3537 size_t table_size = ARRAY_SIZE(ipv4_route_netns_table);
3539 tbl = ipv4_route_netns_table;
3540 if (!net_eq(net, &init_net)) {
3543 tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3547 /* Don't export non-whitelisted sysctls to unprivileged users */
3548 if (net->user_ns != &init_user_ns) {
3549 if (tbl[0].procname != ipv4_route_flush_procname)
3553 /* Update the variables to point into the current struct net
3554 * except for the first element flush
3556 for (i = 1; i < table_size; i++)
3557 tbl[i].data += (void *)net - (void *)&init_net;
3559 tbl[0].extra1 = net;
3561 net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route",
3563 if (!net->ipv4.route_hdr)
3568 if (tbl != ipv4_route_netns_table)
3574 static __net_exit void sysctl_route_net_exit(struct net *net)
3576 const struct ctl_table *tbl;
3578 tbl = net->ipv4.route_hdr->ctl_table_arg;
3579 unregister_net_sysctl_table(net->ipv4.route_hdr);
3580 BUG_ON(tbl == ipv4_route_netns_table);
3584 static __net_initdata struct pernet_operations sysctl_route_ops = {
3585 .init = sysctl_route_net_init,
3586 .exit = sysctl_route_net_exit,
3590 static __net_init int netns_ip_rt_init(struct net *net)
3592 /* Set default value for namespaceified sysctls */
3593 net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
3594 net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
3595 net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
3599 static struct pernet_operations __net_initdata ip_rt_ops = {
3600 .init = netns_ip_rt_init,
3603 static __net_init int rt_genid_init(struct net *net)
3605 atomic_set(&net->ipv4.rt_genid, 0);
3606 atomic_set(&net->fnhe_genid, 0);
3607 atomic_set(&net->ipv4.dev_addr_genid, get_random_u32());
3611 static __net_initdata struct pernet_operations rt_genid_ops = {
3612 .init = rt_genid_init,
3615 static int __net_init ipv4_inetpeer_init(struct net *net)
3617 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3621 inet_peer_base_init(bp);
3622 net->ipv4.peers = bp;
3626 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3628 struct inet_peer_base *bp = net->ipv4.peers;
3630 net->ipv4.peers = NULL;
3631 inetpeer_invalidate_tree(bp);
3635 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3636 .init = ipv4_inetpeer_init,
3637 .exit = ipv4_inetpeer_exit,
3640 #ifdef CONFIG_IP_ROUTE_CLASSID
3641 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3642 #endif /* CONFIG_IP_ROUTE_CLASSID */
3644 int __init ip_rt_init(void)
3649 /* For modern hosts, this will use 2 MB of memory */
3650 idents_hash = alloc_large_system_hash("IP idents",
3651 sizeof(*ip_idents) + sizeof(*ip_tstamps),
3653 16, /* one bucket per 64 KB */
3660 ip_idents = idents_hash;
3662 get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3664 ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3666 for_each_possible_cpu(cpu) {
3667 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3669 INIT_LIST_HEAD(&ul->head);
3670 INIT_LIST_HEAD(&ul->quarantine);
3671 spin_lock_init(&ul->lock);
3673 #ifdef CONFIG_IP_ROUTE_CLASSID
3674 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3676 panic("IP: failed to allocate ip_rt_acct\n");
3679 ipv4_dst_ops.kmem_cachep = KMEM_CACHE(rtable,
3680 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
3682 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3684 if (dst_entries_init(&ipv4_dst_ops) < 0)
3685 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3687 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3688 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3690 ipv4_dst_ops.gc_thresh = ~0;
3691 ip_rt_max_size = INT_MAX;
3696 if (ip_rt_proc_init())
3697 pr_err("Unable to create route proc files\n");
3702 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3703 RTNL_FLAG_DOIT_UNLOCKED);
3705 #ifdef CONFIG_SYSCTL
3706 register_pernet_subsys(&sysctl_route_ops);
3708 register_pernet_subsys(&ip_rt_ops);
3709 register_pernet_subsys(&rt_genid_ops);
3710 register_pernet_subsys(&ipv4_inetpeer_ops);
3714 #ifdef CONFIG_SYSCTL
3716 * We really need to sanitize the damn ipv4 init order, then all
3717 * this nonsense will go away.
3719 void __init ip_static_sysctl_init(void)
3721 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);