1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/bitops.h>
65 #include <linux/kernel.h>
67 #include <linux/memblock.h>
68 #include <linux/socket.h>
69 #include <linux/errno.h>
71 #include <linux/inet.h>
72 #include <linux/netdevice.h>
73 #include <linux/proc_fs.h>
74 #include <linux/init.h>
75 #include <linux/skbuff.h>
76 #include <linux/inetdevice.h>
77 #include <linux/igmp.h>
78 #include <linux/pkt_sched.h>
79 #include <linux/mroute.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/random.h>
82 #include <linux/rcupdate.h>
83 #include <linux/slab.h>
84 #include <linux/jhash.h>
86 #include <net/dst_metadata.h>
87 #include <net/inet_dscp.h>
88 #include <net/net_namespace.h>
90 #include <net/route.h>
91 #include <net/inetpeer.h>
93 #include <net/ip_fib.h>
94 #include <net/nexthop.h>
98 #include <net/lwtunnel.h>
99 #include <net/netevent.h>
100 #include <net/rtnetlink.h>
102 #include <linux/sysctl.h>
104 #include <net/secure_seq.h>
105 #include <net/ip_tunnels.h>
107 #include "fib_lookup.h"
109 #define RT_FL_TOS(oldflp4) \
110 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
112 #define RT_GC_TIMEOUT (300*HZ)
114 #define DEFAULT_MIN_PMTU (512 + 20 + 20)
115 #define DEFAULT_MTU_EXPIRES (10 * 60 * HZ)
116 #define DEFAULT_MIN_ADVMSS 256
117 static int ip_rt_max_size;
118 static int ip_rt_redirect_number __read_mostly = 9;
119 static int ip_rt_redirect_load __read_mostly = HZ / 50;
120 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
121 static int ip_rt_error_cost __read_mostly = HZ;
122 static int ip_rt_error_burst __read_mostly = 5 * HZ;
124 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
127 * Interface to generic destination cache.
130 INDIRECT_CALLABLE_SCOPE
131 struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
132 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
133 INDIRECT_CALLABLE_SCOPE
134 unsigned int ipv4_mtu(const struct dst_entry *dst);
135 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
136 static void ipv4_link_failure(struct sk_buff *skb);
137 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
138 struct sk_buff *skb, u32 mtu,
140 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
141 struct sk_buff *skb);
142 static void ipv4_dst_destroy(struct dst_entry *dst);
144 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
150 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
153 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
155 static struct dst_ops ipv4_dst_ops = {
157 .check = ipv4_dst_check,
158 .default_advmss = ipv4_default_advmss,
160 .cow_metrics = ipv4_cow_metrics,
161 .destroy = ipv4_dst_destroy,
162 .negative_advice = ipv4_negative_advice,
163 .link_failure = ipv4_link_failure,
164 .update_pmtu = ip_rt_update_pmtu,
165 .redirect = ip_do_redirect,
166 .local_out = __ip_local_out,
167 .neigh_lookup = ipv4_neigh_lookup,
168 .confirm_neigh = ipv4_confirm_neigh,
171 #define ECN_OR_COST(class) TC_PRIO_##class
173 const __u8 ip_tos2prio[16] = {
175 ECN_OR_COST(BESTEFFORT),
177 ECN_OR_COST(BESTEFFORT),
183 ECN_OR_COST(INTERACTIVE),
185 ECN_OR_COST(INTERACTIVE),
186 TC_PRIO_INTERACTIVE_BULK,
187 ECN_OR_COST(INTERACTIVE_BULK),
188 TC_PRIO_INTERACTIVE_BULK,
189 ECN_OR_COST(INTERACTIVE_BULK)
191 EXPORT_SYMBOL(ip_tos2prio);
193 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
194 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
196 #ifdef CONFIG_PROC_FS
197 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
201 return SEQ_START_TOKEN;
204 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
210 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
214 static int rt_cache_seq_show(struct seq_file *seq, void *v)
216 if (v == SEQ_START_TOKEN)
217 seq_printf(seq, "%-127s\n",
218 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
219 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
224 static const struct seq_operations rt_cache_seq_ops = {
225 .start = rt_cache_seq_start,
226 .next = rt_cache_seq_next,
227 .stop = rt_cache_seq_stop,
228 .show = rt_cache_seq_show,
231 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
236 return SEQ_START_TOKEN;
238 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
239 if (!cpu_possible(cpu))
242 return &per_cpu(rt_cache_stat, cpu);
247 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
251 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
252 if (!cpu_possible(cpu))
255 return &per_cpu(rt_cache_stat, cpu);
262 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
267 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
269 struct rt_cache_stat *st = v;
271 if (v == SEQ_START_TOKEN) {
272 seq_puts(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
276 seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x "
277 "%08x %08x %08x %08x %08x %08x "
278 "%08x %08x %08x %08x\n",
279 dst_entries_get_slow(&ipv4_dst_ops),
292 0, /* st->gc_total */
293 0, /* st->gc_ignored */
294 0, /* st->gc_goal_miss */
295 0, /* st->gc_dst_overflow */
296 0, /* st->in_hlist_search */
297 0 /* st->out_hlist_search */
302 static const struct seq_operations rt_cpu_seq_ops = {
303 .start = rt_cpu_seq_start,
304 .next = rt_cpu_seq_next,
305 .stop = rt_cpu_seq_stop,
306 .show = rt_cpu_seq_show,
309 #ifdef CONFIG_IP_ROUTE_CLASSID
310 static int rt_acct_proc_show(struct seq_file *m, void *v)
312 struct ip_rt_acct *dst, *src;
315 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
319 for_each_possible_cpu(i) {
320 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
321 for (j = 0; j < 256; j++) {
322 dst[j].o_bytes += src[j].o_bytes;
323 dst[j].o_packets += src[j].o_packets;
324 dst[j].i_bytes += src[j].i_bytes;
325 dst[j].i_packets += src[j].i_packets;
329 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
335 static int __net_init ip_rt_do_proc_init(struct net *net)
337 struct proc_dir_entry *pde;
339 pde = proc_create_seq("rt_cache", 0444, net->proc_net,
344 pde = proc_create_seq("rt_cache", 0444, net->proc_net_stat,
349 #ifdef CONFIG_IP_ROUTE_CLASSID
350 pde = proc_create_single("rt_acct", 0, net->proc_net,
357 #ifdef CONFIG_IP_ROUTE_CLASSID
359 remove_proc_entry("rt_cache", net->proc_net_stat);
362 remove_proc_entry("rt_cache", net->proc_net);
367 static void __net_exit ip_rt_do_proc_exit(struct net *net)
369 remove_proc_entry("rt_cache", net->proc_net_stat);
370 remove_proc_entry("rt_cache", net->proc_net);
371 #ifdef CONFIG_IP_ROUTE_CLASSID
372 remove_proc_entry("rt_acct", net->proc_net);
376 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
377 .init = ip_rt_do_proc_init,
378 .exit = ip_rt_do_proc_exit,
381 static int __init ip_rt_proc_init(void)
383 return register_pernet_subsys(&ip_rt_proc_ops);
387 static inline int ip_rt_proc_init(void)
391 #endif /* CONFIG_PROC_FS */
393 static inline bool rt_is_expired(const struct rtable *rth)
395 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
398 void rt_cache_flush(struct net *net)
400 rt_genid_bump_ipv4(net);
403 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
407 const struct rtable *rt = container_of(dst, struct rtable, dst);
408 struct net_device *dev = dst->dev;
413 if (likely(rt->rt_gw_family == AF_INET)) {
414 n = ip_neigh_gw4(dev, rt->rt_gw4);
415 } else if (rt->rt_gw_family == AF_INET6) {
416 n = ip_neigh_gw6(dev, &rt->rt_gw6);
420 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
421 n = ip_neigh_gw4(dev, pkey);
424 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
432 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
434 const struct rtable *rt = container_of(dst, struct rtable, dst);
435 struct net_device *dev = dst->dev;
436 const __be32 *pkey = daddr;
438 if (rt->rt_gw_family == AF_INET) {
439 pkey = (const __be32 *)&rt->rt_gw4;
440 } else if (rt->rt_gw_family == AF_INET6) {
441 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
444 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
447 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
450 /* Hash tables of size 2048..262144 depending on RAM size.
451 * Each bucket uses 8 bytes.
453 static u32 ip_idents_mask __read_mostly;
454 static atomic_t *ip_idents __read_mostly;
455 static u32 *ip_tstamps __read_mostly;
457 /* In order to protect privacy, we add a perturbation to identifiers
458 * if one generator is seldom used. This makes hard for an attacker
459 * to infer how many packets were sent between two points in time.
461 static u32 ip_idents_reserve(u32 hash, int segs)
463 u32 bucket, old, now = (u32)jiffies;
468 bucket = hash & ip_idents_mask;
469 p_tstamp = ip_tstamps + bucket;
470 p_id = ip_idents + bucket;
471 old = READ_ONCE(*p_tstamp);
473 if (old != now && cmpxchg(p_tstamp, old, now) == old)
474 delta = get_random_u32_below(now - old);
476 /* If UBSAN reports an error there, please make sure your compiler
477 * supports -fno-strict-overflow before reporting it that was a bug
478 * in UBSAN, and it has been fixed in GCC-8.
480 return atomic_add_return(segs + delta, p_id) - segs;
483 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
487 /* Note the following code is not safe, but this is okay. */
488 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
489 get_random_bytes(&net->ipv4.ip_id_key,
490 sizeof(net->ipv4.ip_id_key));
492 hash = siphash_3u32((__force u32)iph->daddr,
493 (__force u32)iph->saddr,
495 &net->ipv4.ip_id_key);
496 id = ip_idents_reserve(hash, segs);
499 EXPORT_SYMBOL(__ip_select_ident);
501 static void ip_rt_fix_tos(struct flowi4 *fl4)
503 __u8 tos = RT_FL_TOS(fl4);
505 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
506 if (tos & RTO_ONLINK)
507 fl4->flowi4_scope = RT_SCOPE_LINK;
510 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
511 const struct sock *sk, const struct iphdr *iph,
512 int oif, __u8 tos, u8 prot, u32 mark,
515 __u8 scope = RT_SCOPE_UNIVERSE;
518 oif = sk->sk_bound_dev_if;
519 mark = READ_ONCE(sk->sk_mark);
520 tos = ip_sock_rt_tos(sk);
521 scope = ip_sock_rt_scope(sk);
522 prot = inet_test_bit(HDRINCL, sk) ? IPPROTO_RAW :
526 flowi4_init_output(fl4, oif, mark, tos & IPTOS_RT_MASK, scope,
527 prot, flow_flags, iph->daddr, iph->saddr, 0, 0,
528 sock_net_uid(net, sk));
531 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
532 const struct sock *sk)
534 const struct net *net = dev_net(skb->dev);
535 const struct iphdr *iph = ip_hdr(skb);
536 int oif = skb->dev->ifindex;
537 u8 prot = iph->protocol;
538 u32 mark = skb->mark;
541 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
544 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
546 const struct inet_sock *inet = inet_sk(sk);
547 const struct ip_options_rcu *inet_opt;
548 __be32 daddr = inet->inet_daddr;
551 inet_opt = rcu_dereference(inet->inet_opt);
552 if (inet_opt && inet_opt->opt.srr)
553 daddr = inet_opt->opt.faddr;
554 flowi4_init_output(fl4, sk->sk_bound_dev_if, READ_ONCE(sk->sk_mark),
555 ip_sock_rt_tos(sk) & IPTOS_RT_MASK,
556 ip_sock_rt_scope(sk),
557 inet_test_bit(HDRINCL, sk) ?
558 IPPROTO_RAW : sk->sk_protocol,
559 inet_sk_flowi_flags(sk),
560 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
564 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
565 const struct sk_buff *skb)
568 build_skb_flow_key(fl4, skb, sk);
570 build_sk_flow_key(fl4, sk);
573 static DEFINE_SPINLOCK(fnhe_lock);
575 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
579 rt = rcu_dereference(fnhe->fnhe_rth_input);
581 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
582 dst_dev_put(&rt->dst);
583 dst_release(&rt->dst);
585 rt = rcu_dereference(fnhe->fnhe_rth_output);
587 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
588 dst_dev_put(&rt->dst);
589 dst_release(&rt->dst);
593 static void fnhe_remove_oldest(struct fnhe_hash_bucket *hash)
595 struct fib_nh_exception __rcu **fnhe_p, **oldest_p;
596 struct fib_nh_exception *fnhe, *oldest = NULL;
598 for (fnhe_p = &hash->chain; ; fnhe_p = &fnhe->fnhe_next) {
599 fnhe = rcu_dereference_protected(*fnhe_p,
600 lockdep_is_held(&fnhe_lock));
604 time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp)) {
609 fnhe_flush_routes(oldest);
610 *oldest_p = oldest->fnhe_next;
611 kfree_rcu(oldest, rcu);
614 static u32 fnhe_hashfun(__be32 daddr)
616 static siphash_aligned_key_t fnhe_hash_key;
619 net_get_random_once(&fnhe_hash_key, sizeof(fnhe_hash_key));
620 hval = siphash_1u32((__force u32)daddr, &fnhe_hash_key);
621 return hash_64(hval, FNHE_HASH_SHIFT);
624 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
626 rt->rt_pmtu = fnhe->fnhe_pmtu;
627 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
628 rt->dst.expires = fnhe->fnhe_expires;
631 rt->rt_flags |= RTCF_REDIRECTED;
632 rt->rt_uses_gateway = 1;
633 rt->rt_gw_family = AF_INET;
634 rt->rt_gw4 = fnhe->fnhe_gw;
638 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
639 __be32 gw, u32 pmtu, bool lock,
640 unsigned long expires)
642 struct fnhe_hash_bucket *hash;
643 struct fib_nh_exception *fnhe;
649 genid = fnhe_genid(dev_net(nhc->nhc_dev));
650 hval = fnhe_hashfun(daddr);
652 spin_lock_bh(&fnhe_lock);
654 hash = rcu_dereference(nhc->nhc_exceptions);
656 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
659 rcu_assign_pointer(nhc->nhc_exceptions, hash);
665 for (fnhe = rcu_dereference(hash->chain); fnhe;
666 fnhe = rcu_dereference(fnhe->fnhe_next)) {
667 if (fnhe->fnhe_daddr == daddr)
673 if (fnhe->fnhe_genid != genid)
674 fnhe->fnhe_genid = genid;
678 fnhe->fnhe_pmtu = pmtu;
679 fnhe->fnhe_mtu_locked = lock;
681 fnhe->fnhe_expires = max(1UL, expires);
682 /* Update all cached dsts too */
683 rt = rcu_dereference(fnhe->fnhe_rth_input);
685 fill_route_from_fnhe(rt, fnhe);
686 rt = rcu_dereference(fnhe->fnhe_rth_output);
688 fill_route_from_fnhe(rt, fnhe);
690 /* Randomize max depth to avoid some side channels attacks. */
691 int max_depth = FNHE_RECLAIM_DEPTH +
692 get_random_u32_below(FNHE_RECLAIM_DEPTH);
694 while (depth > max_depth) {
695 fnhe_remove_oldest(hash);
699 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
703 fnhe->fnhe_next = hash->chain;
705 fnhe->fnhe_genid = genid;
706 fnhe->fnhe_daddr = daddr;
708 fnhe->fnhe_pmtu = pmtu;
709 fnhe->fnhe_mtu_locked = lock;
710 fnhe->fnhe_expires = max(1UL, expires);
712 rcu_assign_pointer(hash->chain, fnhe);
714 /* Exception created; mark the cached routes for the nexthop
715 * stale, so anyone caching it rechecks if this exception
718 rt = rcu_dereference(nhc->nhc_rth_input);
720 rt->dst.obsolete = DST_OBSOLETE_KILL;
722 for_each_possible_cpu(i) {
723 struct rtable __rcu **prt;
725 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
726 rt = rcu_dereference(*prt);
728 rt->dst.obsolete = DST_OBSOLETE_KILL;
732 fnhe->fnhe_stamp = jiffies;
735 spin_unlock_bh(&fnhe_lock);
738 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
741 __be32 new_gw = icmp_hdr(skb)->un.gateway;
742 __be32 old_gw = ip_hdr(skb)->saddr;
743 struct net_device *dev = skb->dev;
744 struct in_device *in_dev;
745 struct fib_result res;
749 switch (icmp_hdr(skb)->code & 7) {
751 case ICMP_REDIR_NETTOS:
752 case ICMP_REDIR_HOST:
753 case ICMP_REDIR_HOSTTOS:
760 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
763 in_dev = __in_dev_get_rcu(dev);
768 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
769 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
770 ipv4_is_zeronet(new_gw))
771 goto reject_redirect;
773 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
774 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
775 goto reject_redirect;
776 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
777 goto reject_redirect;
779 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
780 goto reject_redirect;
783 n = __ipv4_neigh_lookup(rt->dst.dev, (__force u32)new_gw);
785 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
787 if (!(READ_ONCE(n->nud_state) & NUD_VALID)) {
788 neigh_event_send(n, NULL);
790 if (fib_lookup(net, fl4, &res, 0) == 0) {
791 struct fib_nh_common *nhc;
793 fib_select_path(net, &res, fl4, skb);
794 nhc = FIB_RES_NHC(res);
795 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
797 jiffies + ip_rt_gc_timeout);
800 rt->dst.obsolete = DST_OBSOLETE_KILL;
801 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
808 #ifdef CONFIG_IP_ROUTE_VERBOSE
809 if (IN_DEV_LOG_MARTIANS(in_dev)) {
810 const struct iphdr *iph = (const struct iphdr *) skb->data;
811 __be32 daddr = iph->daddr;
812 __be32 saddr = iph->saddr;
814 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
815 " Advised path = %pI4 -> %pI4\n",
816 &old_gw, dev->name, &new_gw,
823 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
827 const struct iphdr *iph = (const struct iphdr *) skb->data;
828 struct net *net = dev_net(skb->dev);
829 int oif = skb->dev->ifindex;
830 u8 prot = iph->protocol;
831 u32 mark = skb->mark;
834 rt = (struct rtable *) dst;
836 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
837 __ip_do_redirect(rt, skb, &fl4, true);
840 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
842 struct rtable *rt = (struct rtable *)dst;
843 struct dst_entry *ret = dst;
846 if (dst->obsolete > 0) {
849 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
860 * 1. The first ip_rt_redirect_number redirects are sent
861 * with exponential backoff, then we stop sending them at all,
862 * assuming that the host ignores our redirects.
863 * 2. If we did not see packets requiring redirects
864 * during ip_rt_redirect_silence, we assume that the host
865 * forgot redirected route and start to send redirects again.
867 * This algorithm is much cheaper and more intelligent than dumb load limiting
870 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
871 * and "frag. need" (breaks PMTU discovery) in icmp.c.
874 void ip_rt_send_redirect(struct sk_buff *skb)
876 struct rtable *rt = skb_rtable(skb);
877 struct in_device *in_dev;
878 struct inet_peer *peer;
884 in_dev = __in_dev_get_rcu(rt->dst.dev);
885 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
889 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
890 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
893 net = dev_net(rt->dst.dev);
894 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
896 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
897 rt_nexthop(rt, ip_hdr(skb)->daddr));
901 /* No redirected packets during ip_rt_redirect_silence;
902 * reset the algorithm.
904 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
905 peer->rate_tokens = 0;
906 peer->n_redirects = 0;
909 /* Too many ignored redirects; do not send anything
910 * set dst.rate_last to the last seen redirected packet.
912 if (peer->n_redirects >= ip_rt_redirect_number) {
913 peer->rate_last = jiffies;
917 /* Check for load limit; set rate_last to the latest sent
920 if (peer->n_redirects == 0 ||
923 (ip_rt_redirect_load << peer->n_redirects)))) {
924 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
926 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
927 peer->rate_last = jiffies;
929 #ifdef CONFIG_IP_ROUTE_VERBOSE
931 peer->n_redirects == ip_rt_redirect_number)
932 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
933 &ip_hdr(skb)->saddr, inet_iif(skb),
934 &ip_hdr(skb)->daddr, &gw);
941 static int ip_error(struct sk_buff *skb)
943 struct rtable *rt = skb_rtable(skb);
944 struct net_device *dev = skb->dev;
945 struct in_device *in_dev;
946 struct inet_peer *peer;
953 if (netif_is_l3_master(skb->dev)) {
954 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
959 in_dev = __in_dev_get_rcu(dev);
961 /* IP on this device is disabled. */
965 net = dev_net(rt->dst.dev);
966 if (!IN_DEV_FORWARD(in_dev)) {
967 switch (rt->dst.error) {
969 SKB_DR_SET(reason, IP_INADDRERRORS);
970 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
974 SKB_DR_SET(reason, IP_INNOROUTES);
975 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
981 switch (rt->dst.error) {
986 code = ICMP_HOST_UNREACH;
989 code = ICMP_NET_UNREACH;
990 SKB_DR_SET(reason, IP_INNOROUTES);
991 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
994 code = ICMP_PKT_FILTERED;
998 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
999 l3mdev_master_ifindex(skb->dev), 1);
1004 peer->rate_tokens += now - peer->rate_last;
1005 if (peer->rate_tokens > ip_rt_error_burst)
1006 peer->rate_tokens = ip_rt_error_burst;
1007 peer->rate_last = now;
1008 if (peer->rate_tokens >= ip_rt_error_cost)
1009 peer->rate_tokens -= ip_rt_error_cost;
1015 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1017 out: kfree_skb_reason(skb, reason);
1021 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1023 struct dst_entry *dst = &rt->dst;
1024 struct net *net = dev_net(dst->dev);
1025 struct fib_result res;
1029 if (ip_mtu_locked(dst))
1032 old_mtu = ipv4_mtu(dst);
1036 if (mtu < net->ipv4.ip_rt_min_pmtu) {
1038 mtu = min(old_mtu, net->ipv4.ip_rt_min_pmtu);
1041 if (rt->rt_pmtu == mtu && !lock &&
1042 time_before(jiffies, dst->expires - net->ipv4.ip_rt_mtu_expires / 2))
1046 if (fib_lookup(net, fl4, &res, 0) == 0) {
1047 struct fib_nh_common *nhc;
1049 fib_select_path(net, &res, fl4, NULL);
1050 nhc = FIB_RES_NHC(res);
1051 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1052 jiffies + net->ipv4.ip_rt_mtu_expires);
1057 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1058 struct sk_buff *skb, u32 mtu,
1061 struct rtable *rt = (struct rtable *) dst;
1064 ip_rt_build_flow_key(&fl4, sk, skb);
1066 /* Don't make lookup fail for bridged encapsulations */
1067 if (skb && netif_is_any_bridge_port(skb->dev))
1070 __ip_rt_update_pmtu(rt, &fl4, mtu);
1073 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1074 int oif, u8 protocol)
1076 const struct iphdr *iph = (const struct iphdr *)skb->data;
1079 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1081 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, mark,
1083 rt = __ip_route_output_key(net, &fl4);
1085 __ip_rt_update_pmtu(rt, &fl4, mtu);
1089 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1091 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093 const struct iphdr *iph = (const struct iphdr *)skb->data;
1097 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1099 if (!fl4.flowi4_mark)
1100 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1102 rt = __ip_route_output_key(sock_net(sk), &fl4);
1104 __ip_rt_update_pmtu(rt, &fl4, mtu);
1109 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1111 const struct iphdr *iph = (const struct iphdr *)skb->data;
1114 struct dst_entry *odst = NULL;
1116 struct net *net = sock_net(sk);
1120 if (!ip_sk_accept_pmtu(sk))
1123 odst = sk_dst_get(sk);
1125 if (sock_owned_by_user(sk) || !odst) {
1126 __ipv4_sk_update_pmtu(skb, sk, mtu);
1130 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1132 rt = (struct rtable *)odst;
1133 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1134 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1141 __ip_rt_update_pmtu((struct rtable *)xfrm_dst_path(&rt->dst), &fl4, mtu);
1143 if (!dst_check(&rt->dst, 0)) {
1145 dst_release(&rt->dst);
1147 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1155 sk_dst_set(sk, &rt->dst);
1161 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1163 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1164 int oif, u8 protocol)
1166 const struct iphdr *iph = (const struct iphdr *)skb->data;
1170 __build_flow_key(net, &fl4, NULL, iph, oif, iph->tos, protocol, 0, 0);
1171 rt = __ip_route_output_key(net, &fl4);
1173 __ip_do_redirect(rt, skb, &fl4, false);
1177 EXPORT_SYMBOL_GPL(ipv4_redirect);
1179 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1181 const struct iphdr *iph = (const struct iphdr *)skb->data;
1184 struct net *net = sock_net(sk);
1186 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1187 rt = __ip_route_output_key(net, &fl4);
1189 __ip_do_redirect(rt, skb, &fl4, false);
1193 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1195 INDIRECT_CALLABLE_SCOPE struct dst_entry *ipv4_dst_check(struct dst_entry *dst,
1198 struct rtable *rt = (struct rtable *) dst;
1200 /* All IPV4 dsts are created with ->obsolete set to the value
1201 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1202 * into this function always.
1204 * When a PMTU/redirect information update invalidates a route,
1205 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1206 * DST_OBSOLETE_DEAD.
1208 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1212 EXPORT_INDIRECT_CALLABLE(ipv4_dst_check);
1214 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1216 struct net_device *dev;
1217 struct ip_options opt;
1220 /* Recompile ip options since IPCB may not be valid anymore.
1221 * Also check we have a reasonable ipv4 header.
1223 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1224 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1227 memset(&opt, 0, sizeof(opt));
1228 if (ip_hdr(skb)->ihl > 5) {
1229 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1231 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1234 dev = skb->dev ? skb->dev : skb_rtable(skb)->dst.dev;
1235 res = __ip_options_compile(dev_net(dev), &opt, skb, NULL);
1241 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1244 static void ipv4_link_failure(struct sk_buff *skb)
1248 ipv4_send_dest_unreach(skb);
1250 rt = skb_rtable(skb);
1252 dst_set_expires(&rt->dst, 0);
1255 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1257 pr_debug("%s: %pI4 -> %pI4, %s\n",
1258 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1259 skb->dev ? skb->dev->name : "?");
1266 * We do not cache source address of outgoing interface,
1267 * because it is used only by IP RR, TS and SRR options,
1268 * so that it out of fast path.
1270 * BTW remember: "addr" is allowed to be not aligned
1274 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1278 if (rt_is_output_route(rt))
1279 src = ip_hdr(skb)->saddr;
1281 struct fib_result res;
1282 struct iphdr *iph = ip_hdr(skb);
1283 struct flowi4 fl4 = {
1284 .daddr = iph->daddr,
1285 .saddr = iph->saddr,
1286 .flowi4_tos = RT_TOS(iph->tos),
1287 .flowi4_oif = rt->dst.dev->ifindex,
1288 .flowi4_iif = skb->dev->ifindex,
1289 .flowi4_mark = skb->mark,
1293 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1294 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1296 src = inet_select_addr(rt->dst.dev,
1297 rt_nexthop(rt, iph->daddr),
1301 memcpy(addr, &src, 4);
1304 #ifdef CONFIG_IP_ROUTE_CLASSID
1305 static void set_class_tag(struct rtable *rt, u32 tag)
1307 if (!(rt->dst.tclassid & 0xFFFF))
1308 rt->dst.tclassid |= tag & 0xFFFF;
1309 if (!(rt->dst.tclassid & 0xFFFF0000))
1310 rt->dst.tclassid |= tag & 0xFFFF0000;
1314 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1316 struct net *net = dev_net(dst->dev);
1317 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1318 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1319 net->ipv4.ip_rt_min_advmss);
1321 return min(advmss, IPV4_MAX_PMTU - header_size);
1324 INDIRECT_CALLABLE_SCOPE unsigned int ipv4_mtu(const struct dst_entry *dst)
1326 return ip_dst_mtu_maybe_forward(dst, false);
1328 EXPORT_INDIRECT_CALLABLE(ipv4_mtu);
1330 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1332 struct fnhe_hash_bucket *hash;
1333 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1334 u32 hval = fnhe_hashfun(daddr);
1336 spin_lock_bh(&fnhe_lock);
1338 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1339 lockdep_is_held(&fnhe_lock));
1342 fnhe_p = &hash->chain;
1343 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1345 if (fnhe->fnhe_daddr == daddr) {
1346 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1347 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1348 /* set fnhe_daddr to 0 to ensure it won't bind with
1349 * new dsts in rt_bind_exception().
1351 fnhe->fnhe_daddr = 0;
1352 fnhe_flush_routes(fnhe);
1353 kfree_rcu(fnhe, rcu);
1356 fnhe_p = &fnhe->fnhe_next;
1357 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1358 lockdep_is_held(&fnhe_lock));
1361 spin_unlock_bh(&fnhe_lock);
1364 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1367 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1368 struct fib_nh_exception *fnhe;
1374 hval = fnhe_hashfun(daddr);
1376 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1377 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1378 if (fnhe->fnhe_daddr == daddr) {
1379 if (fnhe->fnhe_expires &&
1380 time_after(jiffies, fnhe->fnhe_expires)) {
1381 ip_del_fnhe(nhc, daddr);
1391 * 1. mtu on route is locked - use it
1392 * 2. mtu from nexthop exception
1393 * 3. mtu from egress device
1396 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1398 struct fib_nh_common *nhc = res->nhc;
1399 struct net_device *dev = nhc->nhc_dev;
1400 struct fib_info *fi = res->fi;
1403 if (READ_ONCE(dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu) ||
1404 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1408 struct fib_nh_exception *fnhe;
1410 fnhe = find_exception(nhc, daddr);
1411 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1412 mtu = fnhe->fnhe_pmtu;
1416 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1418 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1421 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1422 __be32 daddr, const bool do_cache)
1426 spin_lock_bh(&fnhe_lock);
1428 if (daddr == fnhe->fnhe_daddr) {
1429 struct rtable __rcu **porig;
1430 struct rtable *orig;
1431 int genid = fnhe_genid(dev_net(rt->dst.dev));
1433 if (rt_is_input_route(rt))
1434 porig = &fnhe->fnhe_rth_input;
1436 porig = &fnhe->fnhe_rth_output;
1437 orig = rcu_dereference(*porig);
1439 if (fnhe->fnhe_genid != genid) {
1440 fnhe->fnhe_genid = genid;
1442 fnhe->fnhe_pmtu = 0;
1443 fnhe->fnhe_expires = 0;
1444 fnhe->fnhe_mtu_locked = false;
1445 fnhe_flush_routes(fnhe);
1448 fill_route_from_fnhe(rt, fnhe);
1451 rt->rt_gw_family = AF_INET;
1456 rcu_assign_pointer(*porig, rt);
1458 dst_dev_put(&orig->dst);
1459 dst_release(&orig->dst);
1464 fnhe->fnhe_stamp = jiffies;
1466 spin_unlock_bh(&fnhe_lock);
1471 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1473 struct rtable *orig, *prev, **p;
1476 if (rt_is_input_route(rt)) {
1477 p = (struct rtable **)&nhc->nhc_rth_input;
1479 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1483 /* hold dst before doing cmpxchg() to avoid race condition
1487 prev = cmpxchg(p, orig, rt);
1490 rt_add_uncached_list(orig);
1491 dst_release(&orig->dst);
1494 dst_release(&rt->dst);
1501 struct uncached_list {
1503 struct list_head head;
1504 struct list_head quarantine;
1507 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1509 void rt_add_uncached_list(struct rtable *rt)
1511 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1513 rt->dst.rt_uncached_list = ul;
1515 spin_lock_bh(&ul->lock);
1516 list_add_tail(&rt->dst.rt_uncached, &ul->head);
1517 spin_unlock_bh(&ul->lock);
1520 void rt_del_uncached_list(struct rtable *rt)
1522 if (!list_empty(&rt->dst.rt_uncached)) {
1523 struct uncached_list *ul = rt->dst.rt_uncached_list;
1525 spin_lock_bh(&ul->lock);
1526 list_del_init(&rt->dst.rt_uncached);
1527 spin_unlock_bh(&ul->lock);
1531 static void ipv4_dst_destroy(struct dst_entry *dst)
1533 struct rtable *rt = (struct rtable *)dst;
1535 ip_dst_metrics_put(dst);
1536 rt_del_uncached_list(rt);
1539 void rt_flush_dev(struct net_device *dev)
1541 struct rtable *rt, *safe;
1544 for_each_possible_cpu(cpu) {
1545 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1547 if (list_empty(&ul->head))
1550 spin_lock_bh(&ul->lock);
1551 list_for_each_entry_safe(rt, safe, &ul->head, dst.rt_uncached) {
1552 if (rt->dst.dev != dev)
1554 rt->dst.dev = blackhole_netdev;
1555 netdev_ref_replace(dev, blackhole_netdev,
1556 &rt->dst.dev_tracker, GFP_ATOMIC);
1557 list_move(&rt->dst.rt_uncached, &ul->quarantine);
1559 spin_unlock_bh(&ul->lock);
1563 static bool rt_cache_valid(const struct rtable *rt)
1566 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1570 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1571 const struct fib_result *res,
1572 struct fib_nh_exception *fnhe,
1573 struct fib_info *fi, u16 type, u32 itag,
1574 const bool do_cache)
1576 bool cached = false;
1579 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1581 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1582 rt->rt_uses_gateway = 1;
1583 rt->rt_gw_family = nhc->nhc_gw_family;
1584 /* only INET and INET6 are supported */
1585 if (likely(nhc->nhc_gw_family == AF_INET))
1586 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1588 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1591 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1593 #ifdef CONFIG_IP_ROUTE_CLASSID
1594 if (nhc->nhc_family == AF_INET) {
1597 nh = container_of(nhc, struct fib_nh, nh_common);
1598 rt->dst.tclassid = nh->nh_tclassid;
1601 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1603 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1605 cached = rt_cache_route(nhc, rt);
1606 if (unlikely(!cached)) {
1607 /* Routes we intend to cache in nexthop exception or
1608 * FIB nexthop have the DST_NOCACHE bit clear.
1609 * However, if we are unsuccessful at storing this
1610 * route into the cache we really need to set it.
1613 rt->rt_gw_family = AF_INET;
1616 rt_add_uncached_list(rt);
1619 rt_add_uncached_list(rt);
1621 #ifdef CONFIG_IP_ROUTE_CLASSID
1622 #ifdef CONFIG_IP_MULTIPLE_TABLES
1623 set_class_tag(rt, res->tclassid);
1625 set_class_tag(rt, itag);
1629 struct rtable *rt_dst_alloc(struct net_device *dev,
1630 unsigned int flags, u16 type,
1635 rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1636 (noxfrm ? DST_NOXFRM : 0));
1639 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1640 rt->rt_flags = flags;
1642 rt->rt_is_input = 0;
1645 rt->rt_mtu_locked = 0;
1646 rt->rt_uses_gateway = 0;
1647 rt->rt_gw_family = 0;
1650 rt->dst.output = ip_output;
1651 if (flags & RTCF_LOCAL)
1652 rt->dst.input = ip_local_deliver;
1657 EXPORT_SYMBOL(rt_dst_alloc);
1659 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1661 struct rtable *new_rt;
1663 new_rt = dst_alloc(&ipv4_dst_ops, dev, DST_OBSOLETE_FORCE_CHK,
1667 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1668 new_rt->rt_flags = rt->rt_flags;
1669 new_rt->rt_type = rt->rt_type;
1670 new_rt->rt_is_input = rt->rt_is_input;
1671 new_rt->rt_iif = rt->rt_iif;
1672 new_rt->rt_pmtu = rt->rt_pmtu;
1673 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1674 new_rt->rt_gw_family = rt->rt_gw_family;
1675 if (rt->rt_gw_family == AF_INET)
1676 new_rt->rt_gw4 = rt->rt_gw4;
1677 else if (rt->rt_gw_family == AF_INET6)
1678 new_rt->rt_gw6 = rt->rt_gw6;
1680 new_rt->dst.input = rt->dst.input;
1681 new_rt->dst.output = rt->dst.output;
1682 new_rt->dst.error = rt->dst.error;
1683 new_rt->dst.lastuse = jiffies;
1684 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1688 EXPORT_SYMBOL(rt_dst_clone);
1690 /* called in rcu_read_lock() section */
1691 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1692 u8 tos, struct net_device *dev,
1693 struct in_device *in_dev, u32 *itag)
1697 /* Primary sanity checks. */
1701 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1702 skb->protocol != htons(ETH_P_IP))
1705 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1708 if (ipv4_is_zeronet(saddr)) {
1709 if (!ipv4_is_local_multicast(daddr) &&
1710 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1713 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1721 /* called in rcu_read_lock() section */
1722 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1723 u8 tos, struct net_device *dev, int our)
1725 struct in_device *in_dev = __in_dev_get_rcu(dev);
1726 unsigned int flags = RTCF_MULTICAST;
1731 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1736 flags |= RTCF_LOCAL;
1738 if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1739 IPCB(skb)->flags |= IPSKB_NOPOLICY;
1741 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1746 #ifdef CONFIG_IP_ROUTE_CLASSID
1747 rth->dst.tclassid = itag;
1749 rth->dst.output = ip_rt_bug;
1750 rth->rt_is_input= 1;
1752 #ifdef CONFIG_IP_MROUTE
1753 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1754 rth->dst.input = ip_mr_input;
1756 RT_CACHE_STAT_INC(in_slow_mc);
1759 skb_dst_set(skb, &rth->dst);
1764 static void ip_handle_martian_source(struct net_device *dev,
1765 struct in_device *in_dev,
1766 struct sk_buff *skb,
1770 RT_CACHE_STAT_INC(in_martian_src);
1771 #ifdef CONFIG_IP_ROUTE_VERBOSE
1772 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1774 * RFC1812 recommendation, if source is martian,
1775 * the only hint is MAC header.
1777 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1778 &daddr, &saddr, dev->name);
1779 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1780 print_hex_dump(KERN_WARNING, "ll header: ",
1781 DUMP_PREFIX_OFFSET, 16, 1,
1782 skb_mac_header(skb),
1783 dev->hard_header_len, false);
1789 /* called in rcu_read_lock() section */
1790 static int __mkroute_input(struct sk_buff *skb,
1791 const struct fib_result *res,
1792 struct in_device *in_dev,
1793 __be32 daddr, __be32 saddr, u32 tos)
1795 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1796 struct net_device *dev = nhc->nhc_dev;
1797 struct fib_nh_exception *fnhe;
1800 struct in_device *out_dev;
1804 /* get a working reference to the output device */
1805 out_dev = __in_dev_get_rcu(dev);
1807 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1811 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1812 in_dev->dev, in_dev, &itag);
1814 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1820 do_cache = res->fi && !itag;
1821 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1822 skb->protocol == htons(ETH_P_IP)) {
1825 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1826 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1827 inet_addr_onlink(out_dev, saddr, gw))
1828 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1831 if (skb->protocol != htons(ETH_P_IP)) {
1832 /* Not IP (i.e. ARP). Do not create route, if it is
1833 * invalid for proxy arp. DNAT routes are always valid.
1835 * Proxy arp feature have been extended to allow, ARP
1836 * replies back to the same interface, to support
1837 * Private VLAN switch technologies. See arp.c.
1839 if (out_dev == in_dev &&
1840 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1846 if (IN_DEV_ORCONF(in_dev, NOPOLICY))
1847 IPCB(skb)->flags |= IPSKB_NOPOLICY;
1849 fnhe = find_exception(nhc, daddr);
1852 rth = rcu_dereference(fnhe->fnhe_rth_input);
1854 rth = rcu_dereference(nhc->nhc_rth_input);
1855 if (rt_cache_valid(rth)) {
1856 skb_dst_set_noref(skb, &rth->dst);
1861 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1862 IN_DEV_ORCONF(out_dev, NOXFRM));
1868 rth->rt_is_input = 1;
1869 RT_CACHE_STAT_INC(in_slow_tot);
1871 rth->dst.input = ip_forward;
1873 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1875 lwtunnel_set_redirect(&rth->dst);
1876 skb_dst_set(skb, &rth->dst);
1883 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1884 /* To make ICMP packets follow the right flow, the multipath hash is
1885 * calculated from the inner IP addresses.
1887 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1888 struct flow_keys *hash_keys)
1890 const struct iphdr *outer_iph = ip_hdr(skb);
1891 const struct iphdr *key_iph = outer_iph;
1892 const struct iphdr *inner_iph;
1893 const struct icmphdr *icmph;
1894 struct iphdr _inner_iph;
1895 struct icmphdr _icmph;
1897 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1900 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1903 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1908 if (!icmp_is_err(icmph->type))
1911 inner_iph = skb_header_pointer(skb,
1912 outer_iph->ihl * 4 + sizeof(_icmph),
1913 sizeof(_inner_iph), &_inner_iph);
1917 key_iph = inner_iph;
1919 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1920 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1923 static u32 fib_multipath_custom_hash_outer(const struct net *net,
1924 const struct sk_buff *skb,
1927 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1928 struct flow_keys keys, hash_keys;
1930 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
1933 memset(&hash_keys, 0, sizeof(hash_keys));
1934 skb_flow_dissect_flow_keys(skb, &keys, FLOW_DISSECTOR_F_STOP_AT_ENCAP);
1936 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1937 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
1938 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1939 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
1940 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1941 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
1942 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1943 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
1944 hash_keys.ports.src = keys.ports.src;
1945 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
1946 hash_keys.ports.dst = keys.ports.dst;
1948 *p_has_inner = !!(keys.control.flags & FLOW_DIS_ENCAPSULATION);
1949 return flow_hash_from_keys(&hash_keys);
1952 static u32 fib_multipath_custom_hash_inner(const struct net *net,
1953 const struct sk_buff *skb,
1956 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
1957 struct flow_keys keys, hash_keys;
1959 /* We assume the packet carries an encapsulation, but if none was
1960 * encountered during dissection of the outer flow, then there is no
1961 * point in calling the flow dissector again.
1966 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_MASK))
1969 memset(&hash_keys, 0, sizeof(hash_keys));
1970 skb_flow_dissect_flow_keys(skb, &keys, 0);
1972 if (!(keys.control.flags & FLOW_DIS_ENCAPSULATION))
1975 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1976 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1977 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1978 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1979 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1980 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1981 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1982 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1983 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_IP)
1984 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1985 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_IP)
1986 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1987 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_FLOWLABEL)
1988 hash_keys.tags.flow_label = keys.tags.flow_label;
1991 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_IP_PROTO)
1992 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1993 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_SRC_PORT)
1994 hash_keys.ports.src = keys.ports.src;
1995 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_INNER_DST_PORT)
1996 hash_keys.ports.dst = keys.ports.dst;
1998 return flow_hash_from_keys(&hash_keys);
2001 static u32 fib_multipath_custom_hash_skb(const struct net *net,
2002 const struct sk_buff *skb)
2004 u32 mhash, mhash_inner;
2005 bool has_inner = true;
2007 mhash = fib_multipath_custom_hash_outer(net, skb, &has_inner);
2008 mhash_inner = fib_multipath_custom_hash_inner(net, skb, has_inner);
2010 return jhash_2words(mhash, mhash_inner, 0);
2013 static u32 fib_multipath_custom_hash_fl4(const struct net *net,
2014 const struct flowi4 *fl4)
2016 u32 hash_fields = READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_fields);
2017 struct flow_keys hash_keys;
2019 if (!(hash_fields & FIB_MULTIPATH_HASH_FIELD_OUTER_MASK))
2022 memset(&hash_keys, 0, sizeof(hash_keys));
2023 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2024 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_IP)
2025 hash_keys.addrs.v4addrs.src = fl4->saddr;
2026 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_IP)
2027 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2028 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_IP_PROTO)
2029 hash_keys.basic.ip_proto = fl4->flowi4_proto;
2030 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_SRC_PORT)
2031 hash_keys.ports.src = fl4->fl4_sport;
2032 if (hash_fields & FIB_MULTIPATH_HASH_FIELD_DST_PORT)
2033 hash_keys.ports.dst = fl4->fl4_dport;
2035 return flow_hash_from_keys(&hash_keys);
2038 /* if skb is set it will be used and fl4 can be NULL */
2039 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
2040 const struct sk_buff *skb, struct flow_keys *flkeys)
2042 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
2043 struct flow_keys hash_keys;
2046 switch (READ_ONCE(net->ipv4.sysctl_fib_multipath_hash_policy)) {
2048 memset(&hash_keys, 0, sizeof(hash_keys));
2049 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2051 ip_multipath_l3_keys(skb, &hash_keys);
2053 hash_keys.addrs.v4addrs.src = fl4->saddr;
2054 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2056 mhash = flow_hash_from_keys(&hash_keys);
2059 /* skb is currently provided only when forwarding */
2061 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
2062 struct flow_keys keys;
2064 /* short-circuit if we already have L4 hash present */
2066 return skb_get_hash_raw(skb) >> 1;
2068 memset(&hash_keys, 0, sizeof(hash_keys));
2071 skb_flow_dissect_flow_keys(skb, &keys, flag);
2075 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2076 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
2077 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
2078 hash_keys.ports.src = flkeys->ports.src;
2079 hash_keys.ports.dst = flkeys->ports.dst;
2080 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
2082 memset(&hash_keys, 0, sizeof(hash_keys));
2083 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2084 hash_keys.addrs.v4addrs.src = fl4->saddr;
2085 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2086 hash_keys.ports.src = fl4->fl4_sport;
2087 hash_keys.ports.dst = fl4->fl4_dport;
2088 hash_keys.basic.ip_proto = fl4->flowi4_proto;
2090 mhash = flow_hash_from_keys(&hash_keys);
2093 memset(&hash_keys, 0, sizeof(hash_keys));
2094 /* skb is currently provided only when forwarding */
2096 struct flow_keys keys;
2098 skb_flow_dissect_flow_keys(skb, &keys, 0);
2099 /* Inner can be v4 or v6 */
2100 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
2101 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2102 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
2103 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
2104 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
2105 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
2106 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
2107 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
2108 hash_keys.tags.flow_label = keys.tags.flow_label;
2109 hash_keys.basic.ip_proto = keys.basic.ip_proto;
2111 /* Same as case 0 */
2112 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2113 ip_multipath_l3_keys(skb, &hash_keys);
2116 /* Same as case 0 */
2117 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
2118 hash_keys.addrs.v4addrs.src = fl4->saddr;
2119 hash_keys.addrs.v4addrs.dst = fl4->daddr;
2121 mhash = flow_hash_from_keys(&hash_keys);
2125 mhash = fib_multipath_custom_hash_skb(net, skb);
2127 mhash = fib_multipath_custom_hash_fl4(net, fl4);
2132 mhash = jhash_2words(mhash, multipath_hash, 0);
2136 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2138 static int ip_mkroute_input(struct sk_buff *skb,
2139 struct fib_result *res,
2140 struct in_device *in_dev,
2141 __be32 daddr, __be32 saddr, u32 tos,
2142 struct flow_keys *hkeys)
2144 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2145 if (res->fi && fib_info_num_path(res->fi) > 1) {
2146 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2148 fib_select_multipath(res, h);
2149 IPCB(skb)->flags |= IPSKB_MULTIPATH;
2153 /* create a routing cache entry */
2154 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2157 /* Implements all the saddr-related checks as ip_route_input_slow(),
2158 * assuming daddr is valid and the destination is not a local broadcast one.
2159 * Uses the provided hint instead of performing a route lookup.
2161 int ip_route_use_hint(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2162 u8 tos, struct net_device *dev,
2163 const struct sk_buff *hint)
2165 struct in_device *in_dev = __in_dev_get_rcu(dev);
2166 struct rtable *rt = skb_rtable(hint);
2167 struct net *net = dev_net(dev);
2171 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2172 goto martian_source;
2174 if (ipv4_is_zeronet(saddr))
2175 goto martian_source;
2177 if (ipv4_is_loopback(saddr) && !IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2178 goto martian_source;
2180 if (rt->rt_type != RTN_LOCAL)
2181 goto skip_validate_source;
2183 tos &= IPTOS_RT_MASK;
2184 err = fib_validate_source(skb, saddr, daddr, tos, 0, dev, in_dev, &tag);
2186 goto martian_source;
2188 skip_validate_source:
2189 skb_dst_copy(skb, hint);
2193 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2197 /* get device for dst_alloc with local routes */
2198 static struct net_device *ip_rt_get_dev(struct net *net,
2199 const struct fib_result *res)
2201 struct fib_nh_common *nhc = res->fi ? res->nhc : NULL;
2202 struct net_device *dev = NULL;
2205 dev = l3mdev_master_dev_rcu(nhc->nhc_dev);
2207 return dev ? : net->loopback_dev;
2211 * NOTE. We drop all the packets that has local source
2212 * addresses, because every properly looped back packet
2213 * must have correct destination already attached by output routine.
2214 * Changes in the enforced policies must be applied also to
2215 * ip_route_use_hint().
2217 * Such approach solves two big problems:
2218 * 1. Not simplex devices are handled properly.
2219 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2220 * called with rcu_read_lock()
2223 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2224 u8 tos, struct net_device *dev,
2225 struct fib_result *res)
2227 struct in_device *in_dev = __in_dev_get_rcu(dev);
2228 struct flow_keys *flkeys = NULL, _flkeys;
2229 struct net *net = dev_net(dev);
2230 struct ip_tunnel_info *tun_info;
2232 unsigned int flags = 0;
2236 bool do_cache = true;
2238 /* IP on this device is disabled. */
2243 /* Check for the most weird martians, which can be not detected
2247 tun_info = skb_tunnel_info(skb);
2248 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2249 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2251 fl4.flowi4_tun_key.tun_id = 0;
2254 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2255 goto martian_source;
2259 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2262 /* Accept zero addresses only to limited broadcast;
2263 * I even do not know to fix it or not. Waiting for complains :-)
2265 if (ipv4_is_zeronet(saddr))
2266 goto martian_source;
2268 if (ipv4_is_zeronet(daddr))
2269 goto martian_destination;
2271 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2272 * and call it once if daddr or/and saddr are loopback addresses
2274 if (ipv4_is_loopback(daddr)) {
2275 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2276 goto martian_destination;
2277 } else if (ipv4_is_loopback(saddr)) {
2278 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2279 goto martian_source;
2283 * Now we are ready to route packet.
2285 fl4.flowi4_l3mdev = 0;
2287 fl4.flowi4_iif = dev->ifindex;
2288 fl4.flowi4_mark = skb->mark;
2289 fl4.flowi4_tos = tos;
2290 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2291 fl4.flowi4_flags = 0;
2294 fl4.flowi4_uid = sock_net_uid(net, NULL);
2295 fl4.flowi4_multipath_hash = 0;
2297 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2300 fl4.flowi4_proto = 0;
2305 err = fib_lookup(net, &fl4, res, 0);
2307 if (!IN_DEV_FORWARD(in_dev))
2308 err = -EHOSTUNREACH;
2312 if (res->type == RTN_BROADCAST) {
2313 if (IN_DEV_BFORWARD(in_dev))
2315 /* not do cache if bc_forwarding is enabled */
2316 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2321 if (res->type == RTN_LOCAL) {
2322 err = fib_validate_source(skb, saddr, daddr, tos,
2323 0, dev, in_dev, &itag);
2325 goto martian_source;
2329 if (!IN_DEV_FORWARD(in_dev)) {
2330 err = -EHOSTUNREACH;
2333 if (res->type != RTN_UNICAST)
2334 goto martian_destination;
2337 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2341 if (skb->protocol != htons(ETH_P_IP))
2344 if (!ipv4_is_zeronet(saddr)) {
2345 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2348 goto martian_source;
2350 flags |= RTCF_BROADCAST;
2351 res->type = RTN_BROADCAST;
2352 RT_CACHE_STAT_INC(in_brd);
2355 if (IN_DEV_ORCONF(in_dev, NOPOLICY))
2356 IPCB(skb)->flags |= IPSKB_NOPOLICY;
2358 do_cache &= res->fi && !itag;
2360 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2362 rth = rcu_dereference(nhc->nhc_rth_input);
2363 if (rt_cache_valid(rth)) {
2364 skb_dst_set_noref(skb, &rth->dst);
2370 rth = rt_dst_alloc(ip_rt_get_dev(net, res),
2371 flags | RTCF_LOCAL, res->type, false);
2375 rth->dst.output= ip_rt_bug;
2376 #ifdef CONFIG_IP_ROUTE_CLASSID
2377 rth->dst.tclassid = itag;
2379 rth->rt_is_input = 1;
2381 RT_CACHE_STAT_INC(in_slow_tot);
2382 if (res->type == RTN_UNREACHABLE) {
2383 rth->dst.input= ip_error;
2384 rth->dst.error= -err;
2385 rth->rt_flags &= ~RTCF_LOCAL;
2389 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2391 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2392 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2393 WARN_ON(rth->dst.input == lwtunnel_input);
2394 rth->dst.lwtstate->orig_input = rth->dst.input;
2395 rth->dst.input = lwtunnel_input;
2398 if (unlikely(!rt_cache_route(nhc, rth)))
2399 rt_add_uncached_list(rth);
2401 skb_dst_set(skb, &rth->dst);
2406 RT_CACHE_STAT_INC(in_no_route);
2407 res->type = RTN_UNREACHABLE;
2413 * Do not cache martian addresses: they should be logged (RFC1812)
2415 martian_destination:
2416 RT_CACHE_STAT_INC(in_martian_dst);
2417 #ifdef CONFIG_IP_ROUTE_VERBOSE
2418 if (IN_DEV_LOG_MARTIANS(in_dev))
2419 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2420 &daddr, &saddr, dev->name);
2432 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2436 /* called with rcu_read_lock held */
2437 static int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2438 u8 tos, struct net_device *dev, struct fib_result *res)
2440 /* Multicast recognition logic is moved from route cache to here.
2441 * The problem was that too many Ethernet cards have broken/missing
2442 * hardware multicast filters :-( As result the host on multicasting
2443 * network acquires a lot of useless route cache entries, sort of
2444 * SDR messages from all the world. Now we try to get rid of them.
2445 * Really, provided software IP multicast filter is organized
2446 * reasonably (at least, hashed), it does not result in a slowdown
2447 * comparing with route cache reject entries.
2448 * Note, that multicast routers are not affected, because
2449 * route cache entry is created eventually.
2451 if (ipv4_is_multicast(daddr)) {
2452 struct in_device *in_dev = __in_dev_get_rcu(dev);
2458 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2459 ip_hdr(skb)->protocol);
2461 /* check l3 master if no match yet */
2462 if (!our && netif_is_l3_slave(dev)) {
2463 struct in_device *l3_in_dev;
2465 l3_in_dev = __in_dev_get_rcu(skb->dev);
2467 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2468 ip_hdr(skb)->protocol);
2472 #ifdef CONFIG_IP_MROUTE
2474 (!ipv4_is_local_multicast(daddr) &&
2475 IN_DEV_MFORWARD(in_dev))
2478 err = ip_route_input_mc(skb, daddr, saddr,
2484 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2487 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2488 u8 tos, struct net_device *dev)
2490 struct fib_result res;
2493 tos &= IPTOS_RT_MASK;
2495 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2500 EXPORT_SYMBOL(ip_route_input_noref);
2502 /* called with rcu_read_lock() */
2503 static struct rtable *__mkroute_output(const struct fib_result *res,
2504 const struct flowi4 *fl4, int orig_oif,
2505 struct net_device *dev_out,
2508 struct fib_info *fi = res->fi;
2509 struct fib_nh_exception *fnhe;
2510 struct in_device *in_dev;
2511 u16 type = res->type;
2515 in_dev = __in_dev_get_rcu(dev_out);
2517 return ERR_PTR(-EINVAL);
2519 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2520 if (ipv4_is_loopback(fl4->saddr) &&
2521 !(dev_out->flags & IFF_LOOPBACK) &&
2522 !netif_is_l3_master(dev_out))
2523 return ERR_PTR(-EINVAL);
2525 if (ipv4_is_lbcast(fl4->daddr))
2526 type = RTN_BROADCAST;
2527 else if (ipv4_is_multicast(fl4->daddr))
2528 type = RTN_MULTICAST;
2529 else if (ipv4_is_zeronet(fl4->daddr))
2530 return ERR_PTR(-EINVAL);
2532 if (dev_out->flags & IFF_LOOPBACK)
2533 flags |= RTCF_LOCAL;
2536 if (type == RTN_BROADCAST) {
2537 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2539 } else if (type == RTN_MULTICAST) {
2540 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2541 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2543 flags &= ~RTCF_LOCAL;
2546 /* If multicast route do not exist use
2547 * default one, but do not gateway in this case.
2550 if (fi && res->prefixlen < 4)
2552 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2553 (orig_oif != dev_out->ifindex)) {
2554 /* For local routes that require a particular output interface
2555 * we do not want to cache the result. Caching the result
2556 * causes incorrect behaviour when there are multiple source
2557 * addresses on the interface, the end result being that if the
2558 * intended recipient is waiting on that interface for the
2559 * packet he won't receive it because it will be delivered on
2560 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2561 * be set to the loopback interface as well.
2567 do_cache &= fi != NULL;
2569 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2570 struct rtable __rcu **prth;
2572 fnhe = find_exception(nhc, fl4->daddr);
2576 prth = &fnhe->fnhe_rth_output;
2578 if (unlikely(fl4->flowi4_flags &
2579 FLOWI_FLAG_KNOWN_NH &&
2580 !(nhc->nhc_gw_family &&
2581 nhc->nhc_scope == RT_SCOPE_LINK))) {
2585 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2587 rth = rcu_dereference(*prth);
2588 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2593 rth = rt_dst_alloc(dev_out, flags, type,
2594 IN_DEV_ORCONF(in_dev, NOXFRM));
2596 return ERR_PTR(-ENOBUFS);
2598 rth->rt_iif = orig_oif;
2600 RT_CACHE_STAT_INC(out_slow_tot);
2602 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2603 if (flags & RTCF_LOCAL &&
2604 !(dev_out->flags & IFF_LOOPBACK)) {
2605 rth->dst.output = ip_mc_output;
2606 RT_CACHE_STAT_INC(out_slow_mc);
2608 #ifdef CONFIG_IP_MROUTE
2609 if (type == RTN_MULTICAST) {
2610 if (IN_DEV_MFORWARD(in_dev) &&
2611 !ipv4_is_local_multicast(fl4->daddr)) {
2612 rth->dst.input = ip_mr_input;
2613 rth->dst.output = ip_mc_output;
2619 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2620 lwtunnel_set_redirect(&rth->dst);
2626 * Major route resolver routine.
2629 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2630 const struct sk_buff *skb)
2632 struct fib_result res = {
2640 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2644 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2649 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2651 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2652 struct fib_result *res,
2653 const struct sk_buff *skb)
2655 struct net_device *dev_out = NULL;
2656 int orig_oif = fl4->flowi4_oif;
2657 unsigned int flags = 0;
2662 if (ipv4_is_multicast(fl4->saddr) ||
2663 ipv4_is_lbcast(fl4->saddr) ||
2664 ipv4_is_zeronet(fl4->saddr)) {
2665 rth = ERR_PTR(-EINVAL);
2669 rth = ERR_PTR(-ENETUNREACH);
2671 /* I removed check for oif == dev_out->oif here.
2672 * It was wrong for two reasons:
2673 * 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2674 * is assigned to multiple interfaces.
2675 * 2. Moreover, we are allowed to send packets with saddr
2676 * of another iface. --ANK
2679 if (fl4->flowi4_oif == 0 &&
2680 (ipv4_is_multicast(fl4->daddr) ||
2681 ipv4_is_lbcast(fl4->daddr))) {
2682 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2683 dev_out = __ip_dev_find(net, fl4->saddr, false);
2687 /* Special hack: user can direct multicasts
2688 * and limited broadcast via necessary interface
2689 * without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2690 * This hack is not just for fun, it allows
2691 * vic,vat and friends to work.
2692 * They bind socket to loopback, set ttl to zero
2693 * and expect that it will work.
2694 * From the viewpoint of routing cache they are broken,
2695 * because we are not allowed to build multicast path
2696 * with loopback source addr (look, routing cache
2697 * cannot know, that ttl is zero, so that packet
2698 * will not leave this host and route is valid).
2699 * Luckily, this hack is good workaround.
2702 fl4->flowi4_oif = dev_out->ifindex;
2706 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2707 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2708 if (!__ip_dev_find(net, fl4->saddr, false))
2714 if (fl4->flowi4_oif) {
2715 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2716 rth = ERR_PTR(-ENODEV);
2720 /* RACE: Check return value of inet_select_addr instead. */
2721 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2722 rth = ERR_PTR(-ENETUNREACH);
2725 if (ipv4_is_local_multicast(fl4->daddr) ||
2726 ipv4_is_lbcast(fl4->daddr) ||
2727 fl4->flowi4_proto == IPPROTO_IGMP) {
2729 fl4->saddr = inet_select_addr(dev_out, 0,
2734 if (ipv4_is_multicast(fl4->daddr))
2735 fl4->saddr = inet_select_addr(dev_out, 0,
2737 else if (!fl4->daddr)
2738 fl4->saddr = inet_select_addr(dev_out, 0,
2744 fl4->daddr = fl4->saddr;
2746 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2747 dev_out = net->loopback_dev;
2748 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2749 res->type = RTN_LOCAL;
2750 flags |= RTCF_LOCAL;
2754 err = fib_lookup(net, fl4, res, 0);
2758 if (fl4->flowi4_oif &&
2759 (ipv4_is_multicast(fl4->daddr) || !fl4->flowi4_l3mdev)) {
2760 /* Apparently, routing tables are wrong. Assume,
2761 * that the destination is on link.
2764 * Because we are allowed to send to iface
2765 * even if it has NO routes and NO assigned
2766 * addresses. When oif is specified, routing
2767 * tables are looked up with only one purpose:
2768 * to catch if destination is gatewayed, rather than
2769 * direct. Moreover, if MSG_DONTROUTE is set,
2770 * we send packet, ignoring both routing tables
2771 * and ifaddr state. --ANK
2774 * We could make it even if oif is unknown,
2775 * likely IPv6, but we do not.
2778 if (fl4->saddr == 0)
2779 fl4->saddr = inet_select_addr(dev_out, 0,
2781 res->type = RTN_UNICAST;
2788 if (res->type == RTN_LOCAL) {
2790 if (res->fi->fib_prefsrc)
2791 fl4->saddr = res->fi->fib_prefsrc;
2793 fl4->saddr = fl4->daddr;
2796 /* L3 master device is the loopback for that domain */
2797 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2800 /* make sure orig_oif points to fib result device even
2801 * though packet rx/tx happens over loopback or l3mdev
2803 orig_oif = FIB_RES_OIF(*res);
2805 fl4->flowi4_oif = dev_out->ifindex;
2806 flags |= RTCF_LOCAL;
2810 fib_select_path(net, res, fl4, skb);
2812 dev_out = FIB_RES_DEV(*res);
2815 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2821 static struct dst_ops ipv4_dst_blackhole_ops = {
2823 .default_advmss = ipv4_default_advmss,
2824 .neigh_lookup = ipv4_neigh_lookup,
2825 .check = dst_blackhole_check,
2826 .cow_metrics = dst_blackhole_cow_metrics,
2827 .update_pmtu = dst_blackhole_update_pmtu,
2828 .redirect = dst_blackhole_redirect,
2829 .mtu = dst_blackhole_mtu,
2832 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2834 struct rtable *ort = (struct rtable *) dst_orig;
2837 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, DST_OBSOLETE_DEAD, 0);
2839 struct dst_entry *new = &rt->dst;
2842 new->input = dst_discard;
2843 new->output = dst_discard_out;
2845 new->dev = net->loopback_dev;
2846 netdev_hold(new->dev, &new->dev_tracker, GFP_ATOMIC);
2848 rt->rt_is_input = ort->rt_is_input;
2849 rt->rt_iif = ort->rt_iif;
2850 rt->rt_pmtu = ort->rt_pmtu;
2851 rt->rt_mtu_locked = ort->rt_mtu_locked;
2853 rt->rt_genid = rt_genid_ipv4(net);
2854 rt->rt_flags = ort->rt_flags;
2855 rt->rt_type = ort->rt_type;
2856 rt->rt_uses_gateway = ort->rt_uses_gateway;
2857 rt->rt_gw_family = ort->rt_gw_family;
2858 if (rt->rt_gw_family == AF_INET)
2859 rt->rt_gw4 = ort->rt_gw4;
2860 else if (rt->rt_gw_family == AF_INET6)
2861 rt->rt_gw6 = ort->rt_gw6;
2864 dst_release(dst_orig);
2866 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2869 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2870 const struct sock *sk)
2872 struct rtable *rt = __ip_route_output_key(net, flp4);
2877 if (flp4->flowi4_proto) {
2878 flp4->flowi4_oif = rt->dst.dev->ifindex;
2879 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2880 flowi4_to_flowi(flp4),
2886 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2888 /* called with rcu_read_lock held */
2889 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2890 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2891 struct sk_buff *skb, u32 portid, u32 seq,
2895 struct nlmsghdr *nlh;
2896 unsigned long expires = 0;
2898 u32 metrics[RTAX_MAX];
2900 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2904 r = nlmsg_data(nlh);
2905 r->rtm_family = AF_INET;
2906 r->rtm_dst_len = 32;
2908 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2909 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2910 if (nla_put_u32(skb, RTA_TABLE, table_id))
2911 goto nla_put_failure;
2912 r->rtm_type = rt->rt_type;
2913 r->rtm_scope = RT_SCOPE_UNIVERSE;
2914 r->rtm_protocol = RTPROT_UNSPEC;
2915 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2916 if (rt->rt_flags & RTCF_NOTIFY)
2917 r->rtm_flags |= RTM_F_NOTIFY;
2918 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2919 r->rtm_flags |= RTCF_DOREDIRECT;
2921 if (nla_put_in_addr(skb, RTA_DST, dst))
2922 goto nla_put_failure;
2924 r->rtm_src_len = 32;
2925 if (nla_put_in_addr(skb, RTA_SRC, src))
2926 goto nla_put_failure;
2929 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2930 goto nla_put_failure;
2931 if (rt->dst.lwtstate &&
2932 lwtunnel_fill_encap(skb, rt->dst.lwtstate, RTA_ENCAP, RTA_ENCAP_TYPE) < 0)
2933 goto nla_put_failure;
2934 #ifdef CONFIG_IP_ROUTE_CLASSID
2935 if (rt->dst.tclassid &&
2936 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2937 goto nla_put_failure;
2939 if (fl4 && !rt_is_input_route(rt) &&
2940 fl4->saddr != src) {
2941 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2942 goto nla_put_failure;
2944 if (rt->rt_uses_gateway) {
2945 if (rt->rt_gw_family == AF_INET &&
2946 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2947 goto nla_put_failure;
2948 } else if (rt->rt_gw_family == AF_INET6) {
2949 int alen = sizeof(struct in6_addr);
2953 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2955 goto nla_put_failure;
2957 via = nla_data(nla);
2958 via->rtvia_family = AF_INET6;
2959 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2963 expires = rt->dst.expires;
2965 unsigned long now = jiffies;
2967 if (time_before(now, expires))
2973 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2974 if (rt->rt_pmtu && expires)
2975 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2976 if (rt->rt_mtu_locked && expires)
2977 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2978 if (rtnetlink_put_metrics(skb, metrics) < 0)
2979 goto nla_put_failure;
2982 if (fl4->flowi4_mark &&
2983 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2984 goto nla_put_failure;
2986 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2987 nla_put_u32(skb, RTA_UID,
2988 from_kuid_munged(current_user_ns(),
2990 goto nla_put_failure;
2992 if (rt_is_input_route(rt)) {
2993 #ifdef CONFIG_IP_MROUTE
2994 if (ipv4_is_multicast(dst) &&
2995 !ipv4_is_local_multicast(dst) &&
2996 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2997 int err = ipmr_get_route(net, skb,
2998 fl4->saddr, fl4->daddr,
3004 goto nla_put_failure;
3008 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
3009 goto nla_put_failure;
3013 error = rt->dst.error;
3015 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
3016 goto nla_put_failure;
3018 nlmsg_end(skb, nlh);
3022 nlmsg_cancel(skb, nlh);
3026 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
3027 struct netlink_callback *cb, u32 table_id,
3028 struct fnhe_hash_bucket *bucket, int genid,
3029 int *fa_index, int fa_start, unsigned int flags)
3033 for (i = 0; i < FNHE_HASH_SIZE; i++) {
3034 struct fib_nh_exception *fnhe;
3036 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
3037 fnhe = rcu_dereference(fnhe->fnhe_next)) {
3041 if (*fa_index < fa_start)
3044 if (fnhe->fnhe_genid != genid)
3047 if (fnhe->fnhe_expires &&
3048 time_after(jiffies, fnhe->fnhe_expires))
3051 rt = rcu_dereference(fnhe->fnhe_rth_input);
3053 rt = rcu_dereference(fnhe->fnhe_rth_output);
3057 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
3058 table_id, NULL, skb,
3059 NETLINK_CB(cb->skb).portid,
3060 cb->nlh->nlmsg_seq, flags);
3071 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
3072 u32 table_id, struct fib_info *fi,
3073 int *fa_index, int fa_start, unsigned int flags)
3075 struct net *net = sock_net(cb->skb->sk);
3076 int nhsel, genid = fnhe_genid(net);
3078 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
3079 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
3080 struct fnhe_hash_bucket *bucket;
3083 if (nhc->nhc_flags & RTNH_F_DEAD)
3087 bucket = rcu_dereference(nhc->nhc_exceptions);
3090 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
3091 genid, fa_index, fa_start,
3101 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
3102 u8 ip_proto, __be16 sport,
3105 struct sk_buff *skb;
3108 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
3112 /* Reserve room for dummy headers, this skb can pass
3113 * through good chunk of routing engine.
3115 skb_reset_mac_header(skb);
3116 skb_reset_network_header(skb);
3117 skb->protocol = htons(ETH_P_IP);
3118 iph = skb_put(skb, sizeof(struct iphdr));
3119 iph->protocol = ip_proto;
3125 skb_set_transport_header(skb, skb->len);
3127 switch (iph->protocol) {
3129 struct udphdr *udph;
3131 udph = skb_put_zero(skb, sizeof(struct udphdr));
3132 udph->source = sport;
3134 udph->len = htons(sizeof(struct udphdr));
3139 struct tcphdr *tcph;
3141 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
3142 tcph->source = sport;
3144 tcph->doff = sizeof(struct tcphdr) / 4;
3146 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
3150 case IPPROTO_ICMP: {
3151 struct icmphdr *icmph;
3153 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
3154 icmph->type = ICMP_ECHO;
3162 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3163 const struct nlmsghdr *nlh,
3165 struct netlink_ext_ack *extack)
3170 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3171 NL_SET_ERR_MSG(extack,
3172 "ipv4: Invalid header for route get request");
3176 if (!netlink_strict_get_check(skb))
3177 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3178 rtm_ipv4_policy, extack);
3180 rtm = nlmsg_data(nlh);
3181 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3182 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3183 rtm->rtm_table || rtm->rtm_protocol ||
3184 rtm->rtm_scope || rtm->rtm_type) {
3185 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3189 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3190 RTM_F_LOOKUP_TABLE |
3192 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3196 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3197 rtm_ipv4_policy, extack);
3201 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3202 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3203 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3207 for (i = 0; i <= RTA_MAX; i++) {
3223 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3231 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3232 struct netlink_ext_ack *extack)
3234 struct net *net = sock_net(in_skb->sk);
3235 struct nlattr *tb[RTA_MAX+1];
3236 u32 table_id = RT_TABLE_MAIN;
3237 __be16 sport = 0, dport = 0;
3238 struct fib_result res = {};
3239 u8 ip_proto = IPPROTO_UDP;
3240 struct rtable *rt = NULL;
3241 struct sk_buff *skb;
3243 struct flowi4 fl4 = {};
3251 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3255 rtm = nlmsg_data(nlh);
3256 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3257 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3258 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3259 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3261 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3263 uid = (iif ? INVALID_UID : current_uid());
3265 if (tb[RTA_IP_PROTO]) {
3266 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3267 &ip_proto, AF_INET, extack);
3273 sport = nla_get_be16(tb[RTA_SPORT]);
3276 dport = nla_get_be16(tb[RTA_DPORT]);
3278 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3284 fl4.flowi4_tos = rtm->rtm_tos & IPTOS_RT_MASK;
3285 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3286 fl4.flowi4_mark = mark;
3287 fl4.flowi4_uid = uid;
3289 fl4.fl4_sport = sport;
3291 fl4.fl4_dport = dport;
3292 fl4.flowi4_proto = ip_proto;
3297 struct net_device *dev;
3299 dev = dev_get_by_index_rcu(net, iif);
3305 fl4.flowi4_iif = iif; /* for rt_fill_info */
3308 err = ip_route_input_rcu(skb, dst, src,
3309 rtm->rtm_tos & IPTOS_RT_MASK, dev,
3312 rt = skb_rtable(skb);
3313 if (err == 0 && rt->dst.error)
3314 err = -rt->dst.error;
3316 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3317 skb->dev = net->loopback_dev;
3318 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3323 skb_dst_set(skb, &rt->dst);
3329 if (rtm->rtm_flags & RTM_F_NOTIFY)
3330 rt->rt_flags |= RTCF_NOTIFY;
3332 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3333 table_id = res.table ? res.table->tb_id : 0;
3335 /* reset skb for netlink reply msg */
3337 skb_reset_network_header(skb);
3338 skb_reset_transport_header(skb);
3339 skb_reset_mac_header(skb);
3341 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3342 struct fib_rt_info fri;
3345 err = fib_props[res.type].error;
3347 err = -EHOSTUNREACH;
3351 fri.tb_id = table_id;
3352 fri.dst = res.prefix;
3353 fri.dst_len = res.prefixlen;
3354 fri.dscp = inet_dsfield_to_dscp(fl4.flowi4_tos);
3355 fri.type = rt->rt_type;
3358 fri.offload_failed = 0;
3360 struct fib_alias *fa;
3362 hlist_for_each_entry_rcu(fa, res.fa_head, fa_list) {
3363 u8 slen = 32 - fri.dst_len;
3365 if (fa->fa_slen == slen &&
3366 fa->tb_id == fri.tb_id &&
3367 fa->fa_dscp == fri.dscp &&
3368 fa->fa_info == res.fi &&
3369 fa->fa_type == fri.type) {
3370 fri.offload = READ_ONCE(fa->offload);
3371 fri.trap = READ_ONCE(fa->trap);
3372 fri.offload_failed =
3373 READ_ONCE(fa->offload_failed);
3378 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3379 nlh->nlmsg_seq, RTM_NEWROUTE, &fri, 0);
3381 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3382 NETLINK_CB(in_skb).portid,
3390 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3400 void ip_rt_multicast_event(struct in_device *in_dev)
3402 rt_cache_flush(dev_net(in_dev->dev));
3405 #ifdef CONFIG_SYSCTL
3406 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3407 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3408 static int ip_rt_gc_elasticity __read_mostly = 8;
3409 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3411 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3412 void *buffer, size_t *lenp, loff_t *ppos)
3414 struct net *net = (struct net *)__ctl->extra1;
3417 rt_cache_flush(net);
3418 fnhe_genid_bump(net);
3425 static struct ctl_table ipv4_route_table[] = {
3427 .procname = "gc_thresh",
3428 .data = &ipv4_dst_ops.gc_thresh,
3429 .maxlen = sizeof(int),
3431 .proc_handler = proc_dointvec,
3434 .procname = "max_size",
3435 .data = &ip_rt_max_size,
3436 .maxlen = sizeof(int),
3438 .proc_handler = proc_dointvec,
3441 /* Deprecated. Use gc_min_interval_ms */
3443 .procname = "gc_min_interval",
3444 .data = &ip_rt_gc_min_interval,
3445 .maxlen = sizeof(int),
3447 .proc_handler = proc_dointvec_jiffies,
3450 .procname = "gc_min_interval_ms",
3451 .data = &ip_rt_gc_min_interval,
3452 .maxlen = sizeof(int),
3454 .proc_handler = proc_dointvec_ms_jiffies,
3457 .procname = "gc_timeout",
3458 .data = &ip_rt_gc_timeout,
3459 .maxlen = sizeof(int),
3461 .proc_handler = proc_dointvec_jiffies,
3464 .procname = "gc_interval",
3465 .data = &ip_rt_gc_interval,
3466 .maxlen = sizeof(int),
3468 .proc_handler = proc_dointvec_jiffies,
3471 .procname = "redirect_load",
3472 .data = &ip_rt_redirect_load,
3473 .maxlen = sizeof(int),
3475 .proc_handler = proc_dointvec,
3478 .procname = "redirect_number",
3479 .data = &ip_rt_redirect_number,
3480 .maxlen = sizeof(int),
3482 .proc_handler = proc_dointvec,
3485 .procname = "redirect_silence",
3486 .data = &ip_rt_redirect_silence,
3487 .maxlen = sizeof(int),
3489 .proc_handler = proc_dointvec,
3492 .procname = "error_cost",
3493 .data = &ip_rt_error_cost,
3494 .maxlen = sizeof(int),
3496 .proc_handler = proc_dointvec,
3499 .procname = "error_burst",
3500 .data = &ip_rt_error_burst,
3501 .maxlen = sizeof(int),
3503 .proc_handler = proc_dointvec,
3506 .procname = "gc_elasticity",
3507 .data = &ip_rt_gc_elasticity,
3508 .maxlen = sizeof(int),
3510 .proc_handler = proc_dointvec,
3515 static const char ipv4_route_flush_procname[] = "flush";
3517 static struct ctl_table ipv4_route_netns_table[] = {
3519 .procname = ipv4_route_flush_procname,
3520 .maxlen = sizeof(int),
3522 .proc_handler = ipv4_sysctl_rtcache_flush,
3525 .procname = "min_pmtu",
3526 .data = &init_net.ipv4.ip_rt_min_pmtu,
3527 .maxlen = sizeof(int),
3529 .proc_handler = proc_dointvec_minmax,
3530 .extra1 = &ip_min_valid_pmtu,
3533 .procname = "mtu_expires",
3534 .data = &init_net.ipv4.ip_rt_mtu_expires,
3535 .maxlen = sizeof(int),
3537 .proc_handler = proc_dointvec_jiffies,
3540 .procname = "min_adv_mss",
3541 .data = &init_net.ipv4.ip_rt_min_advmss,
3542 .maxlen = sizeof(int),
3544 .proc_handler = proc_dointvec,
3549 static __net_init int sysctl_route_net_init(struct net *net)
3551 struct ctl_table *tbl;
3552 size_t table_size = ARRAY_SIZE(ipv4_route_netns_table);
3554 tbl = ipv4_route_netns_table;
3555 if (!net_eq(net, &init_net)) {
3558 tbl = kmemdup(tbl, sizeof(ipv4_route_netns_table), GFP_KERNEL);
3562 /* Don't export non-whitelisted sysctls to unprivileged users */
3563 if (net->user_ns != &init_user_ns) {
3564 if (tbl[0].procname != ipv4_route_flush_procname) {
3565 tbl[0].procname = NULL;
3570 /* Update the variables to point into the current struct net
3571 * except for the first element flush
3573 for (i = 1; i < ARRAY_SIZE(ipv4_route_netns_table) - 1; i++)
3574 tbl[i].data += (void *)net - (void *)&init_net;
3576 tbl[0].extra1 = net;
3578 net->ipv4.route_hdr = register_net_sysctl_sz(net, "net/ipv4/route",
3580 if (!net->ipv4.route_hdr)
3585 if (tbl != ipv4_route_netns_table)
3591 static __net_exit void sysctl_route_net_exit(struct net *net)
3593 struct ctl_table *tbl;
3595 tbl = net->ipv4.route_hdr->ctl_table_arg;
3596 unregister_net_sysctl_table(net->ipv4.route_hdr);
3597 BUG_ON(tbl == ipv4_route_netns_table);
3601 static __net_initdata struct pernet_operations sysctl_route_ops = {
3602 .init = sysctl_route_net_init,
3603 .exit = sysctl_route_net_exit,
3607 static __net_init int netns_ip_rt_init(struct net *net)
3609 /* Set default value for namespaceified sysctls */
3610 net->ipv4.ip_rt_min_pmtu = DEFAULT_MIN_PMTU;
3611 net->ipv4.ip_rt_mtu_expires = DEFAULT_MTU_EXPIRES;
3612 net->ipv4.ip_rt_min_advmss = DEFAULT_MIN_ADVMSS;
3616 static struct pernet_operations __net_initdata ip_rt_ops = {
3617 .init = netns_ip_rt_init,
3620 static __net_init int rt_genid_init(struct net *net)
3622 atomic_set(&net->ipv4.rt_genid, 0);
3623 atomic_set(&net->fnhe_genid, 0);
3624 atomic_set(&net->ipv4.dev_addr_genid, get_random_u32());
3628 static __net_initdata struct pernet_operations rt_genid_ops = {
3629 .init = rt_genid_init,
3632 static int __net_init ipv4_inetpeer_init(struct net *net)
3634 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3638 inet_peer_base_init(bp);
3639 net->ipv4.peers = bp;
3643 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3645 struct inet_peer_base *bp = net->ipv4.peers;
3647 net->ipv4.peers = NULL;
3648 inetpeer_invalidate_tree(bp);
3652 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3653 .init = ipv4_inetpeer_init,
3654 .exit = ipv4_inetpeer_exit,
3657 #ifdef CONFIG_IP_ROUTE_CLASSID
3658 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3659 #endif /* CONFIG_IP_ROUTE_CLASSID */
3661 int __init ip_rt_init(void)
3666 /* For modern hosts, this will use 2 MB of memory */
3667 idents_hash = alloc_large_system_hash("IP idents",
3668 sizeof(*ip_idents) + sizeof(*ip_tstamps),
3670 16, /* one bucket per 64 KB */
3677 ip_idents = idents_hash;
3679 get_random_bytes(ip_idents, (ip_idents_mask + 1) * sizeof(*ip_idents));
3681 ip_tstamps = idents_hash + (ip_idents_mask + 1) * sizeof(*ip_idents);
3683 for_each_possible_cpu(cpu) {
3684 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3686 INIT_LIST_HEAD(&ul->head);
3687 INIT_LIST_HEAD(&ul->quarantine);
3688 spin_lock_init(&ul->lock);
3690 #ifdef CONFIG_IP_ROUTE_CLASSID
3691 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3693 panic("IP: failed to allocate ip_rt_acct\n");
3696 ipv4_dst_ops.kmem_cachep =
3697 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3698 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3700 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3702 if (dst_entries_init(&ipv4_dst_ops) < 0)
3703 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3705 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3706 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3708 ipv4_dst_ops.gc_thresh = ~0;
3709 ip_rt_max_size = INT_MAX;
3714 if (ip_rt_proc_init())
3715 pr_err("Unable to create route proc files\n");
3720 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3721 RTNL_FLAG_DOIT_UNLOCKED);
3723 #ifdef CONFIG_SYSCTL
3724 register_pernet_subsys(&sysctl_route_ops);
3726 register_pernet_subsys(&ip_rt_ops);
3727 register_pernet_subsys(&rt_genid_ops);
3728 register_pernet_subsys(&ipv4_inetpeer_ops);
3732 #ifdef CONFIG_SYSCTL
3734 * We really need to sanitize the damn ipv4 init order, then all
3735 * this nonsense will go away.
3737 void __init ip_static_sysctl_init(void)
3739 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);