1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * ROUTE - implementation of the IP router.
16 * Alan Cox : Verify area fixes.
17 * Alan Cox : cli() protects routing changes
18 * Rui Oliveira : ICMP routing table updates
20 * Linus Torvalds : Rewrote bits to be sensible
21 * Alan Cox : Added BSD route gw semantics
22 * Alan Cox : Super /proc >4K
23 * Alan Cox : MTU in route table
24 * Alan Cox : MSS actually. Also added the window
26 * Sam Lantinga : Fixed route matching in rt_del()
27 * Alan Cox : Routing cache support.
28 * Alan Cox : Removed compatibility cruft.
29 * Alan Cox : RTF_REJECT support.
30 * Alan Cox : TCP irtt support.
31 * Jonathan Naylor : Added Metric support.
32 * Miquel van Smoorenburg : BSD API fixes.
33 * Miquel van Smoorenburg : Metrics.
34 * Alan Cox : Use __u32 properly
35 * Alan Cox : Aligned routing errors more closely with BSD
36 * our system is still very different.
37 * Alan Cox : Faster /proc handling
38 * Alexey Kuznetsov : Massive rework to support tree based routing,
39 * routing caches and better behaviour.
41 * Olaf Erb : irtt wasn't being copied right.
42 * Bjorn Ekwall : Kerneld route support.
43 * Alan Cox : Multicast fixed (I hope)
44 * Pavel Krauz : Limited broadcast fixed
45 * Mike McLagan : Routing by source
46 * Alexey Kuznetsov : End of old history. Split to fib.c and
47 * route.c and rewritten from scratch.
48 * Andi Kleen : Load-limit warning messages.
49 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
50 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
51 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
52 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
53 * Marc Boucher : routing by fwmark
54 * Robert Olsson : Added rt_cache statistics
55 * Arnaldo C. Melo : Convert proc stuff to seq_file
56 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
57 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
58 * Ilia Sotnikov : Removed TOS from hash calculations
61 #define pr_fmt(fmt) "IPv4: " fmt
63 #include <linux/module.h>
64 #include <linux/uaccess.h>
65 #include <linux/bitops.h>
66 #include <linux/types.h>
67 #include <linux/kernel.h>
69 #include <linux/string.h>
70 #include <linux/socket.h>
71 #include <linux/sockios.h>
72 #include <linux/errno.h>
74 #include <linux/inet.h>
75 #include <linux/netdevice.h>
76 #include <linux/proc_fs.h>
77 #include <linux/init.h>
78 #include <linux/skbuff.h>
79 #include <linux/inetdevice.h>
80 #include <linux/igmp.h>
81 #include <linux/pkt_sched.h>
82 #include <linux/mroute.h>
83 #include <linux/netfilter_ipv4.h>
84 #include <linux/random.h>
85 #include <linux/rcupdate.h>
86 #include <linux/times.h>
87 #include <linux/slab.h>
88 #include <linux/jhash.h>
90 #include <net/dst_metadata.h>
91 #include <net/net_namespace.h>
92 #include <net/protocol.h>
94 #include <net/route.h>
95 #include <net/inetpeer.h>
97 #include <net/ip_fib.h>
98 #include <net/nexthop.h>
101 #include <net/icmp.h>
102 #include <net/xfrm.h>
103 #include <net/lwtunnel.h>
104 #include <net/netevent.h>
105 #include <net/rtnetlink.h>
107 #include <linux/sysctl.h>
109 #include <net/secure_seq.h>
110 #include <net/ip_tunnels.h>
111 #include <net/l3mdev.h>
113 #include "fib_lookup.h"
115 #define RT_FL_TOS(oldflp4) \
116 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
118 #define RT_GC_TIMEOUT (300*HZ)
120 static int ip_rt_max_size;
121 static int ip_rt_redirect_number __read_mostly = 9;
122 static int ip_rt_redirect_load __read_mostly = HZ / 50;
123 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
124 static int ip_rt_error_cost __read_mostly = HZ;
125 static int ip_rt_error_burst __read_mostly = 5 * HZ;
126 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
127 static u32 ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
128 static int ip_rt_min_advmss __read_mostly = 256;
130 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
133 * Interface to generic destination cache.
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void ipv4_link_failure(struct sk_buff *skb);
141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb, u32 mtu);
143 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb);
145 static void ipv4_dst_destroy(struct dst_entry *dst);
147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
156 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr);
158 static struct dst_ops ipv4_dst_ops = {
160 .check = ipv4_dst_check,
161 .default_advmss = ipv4_default_advmss,
163 .cow_metrics = ipv4_cow_metrics,
164 .destroy = ipv4_dst_destroy,
165 .negative_advice = ipv4_negative_advice,
166 .link_failure = ipv4_link_failure,
167 .update_pmtu = ip_rt_update_pmtu,
168 .redirect = ip_do_redirect,
169 .local_out = __ip_local_out,
170 .neigh_lookup = ipv4_neigh_lookup,
171 .confirm_neigh = ipv4_confirm_neigh,
174 #define ECN_OR_COST(class) TC_PRIO_##class
176 const __u8 ip_tos2prio[16] = {
178 ECN_OR_COST(BESTEFFORT),
180 ECN_OR_COST(BESTEFFORT),
186 ECN_OR_COST(INTERACTIVE),
188 ECN_OR_COST(INTERACTIVE),
189 TC_PRIO_INTERACTIVE_BULK,
190 ECN_OR_COST(INTERACTIVE_BULK),
191 TC_PRIO_INTERACTIVE_BULK,
192 ECN_OR_COST(INTERACTIVE_BULK)
194 EXPORT_SYMBOL(ip_tos2prio);
196 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
197 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
199 #ifdef CONFIG_PROC_FS
200 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
204 return SEQ_START_TOKEN;
207 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
213 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
217 static int rt_cache_seq_show(struct seq_file *seq, void *v)
219 if (v == SEQ_START_TOKEN)
220 seq_printf(seq, "%-127s\n",
221 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
222 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
227 static const struct seq_operations rt_cache_seq_ops = {
228 .start = rt_cache_seq_start,
229 .next = rt_cache_seq_next,
230 .stop = rt_cache_seq_stop,
231 .show = rt_cache_seq_show,
234 static int rt_cache_seq_open(struct inode *inode, struct file *file)
236 return seq_open(file, &rt_cache_seq_ops);
239 static const struct file_operations rt_cache_seq_fops = {
240 .open = rt_cache_seq_open,
243 .release = seq_release,
247 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
252 return SEQ_START_TOKEN;
254 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
255 if (!cpu_possible(cpu))
258 return &per_cpu(rt_cache_stat, cpu);
263 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
267 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
268 if (!cpu_possible(cpu))
271 return &per_cpu(rt_cache_stat, cpu);
277 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
282 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
284 struct rt_cache_stat *st = v;
286 if (v == SEQ_START_TOKEN) {
287 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
291 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
292 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
293 dst_entries_get_slow(&ipv4_dst_ops),
306 0, /* st->gc_total */
307 0, /* st->gc_ignored */
308 0, /* st->gc_goal_miss */
309 0, /* st->gc_dst_overflow */
310 0, /* st->in_hlist_search */
311 0 /* st->out_hlist_search */
316 static const struct seq_operations rt_cpu_seq_ops = {
317 .start = rt_cpu_seq_start,
318 .next = rt_cpu_seq_next,
319 .stop = rt_cpu_seq_stop,
320 .show = rt_cpu_seq_show,
324 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
326 return seq_open(file, &rt_cpu_seq_ops);
329 static const struct file_operations rt_cpu_seq_fops = {
330 .open = rt_cpu_seq_open,
333 .release = seq_release,
336 #ifdef CONFIG_IP_ROUTE_CLASSID
337 static int rt_acct_proc_show(struct seq_file *m, void *v)
339 struct ip_rt_acct *dst, *src;
342 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346 for_each_possible_cpu(i) {
347 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
348 for (j = 0; j < 256; j++) {
349 dst[j].o_bytes += src[j].o_bytes;
350 dst[j].o_packets += src[j].o_packets;
351 dst[j].i_bytes += src[j].i_bytes;
352 dst[j].i_packets += src[j].i_packets;
356 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
362 static int __net_init ip_rt_do_proc_init(struct net *net)
364 struct proc_dir_entry *pde;
366 pde = proc_create("rt_cache", 0444, net->proc_net,
371 pde = proc_create("rt_cache", 0444,
372 net->proc_net_stat, &rt_cpu_seq_fops);
376 #ifdef CONFIG_IP_ROUTE_CLASSID
377 pde = proc_create_single("rt_acct", 0, net->proc_net,
384 #ifdef CONFIG_IP_ROUTE_CLASSID
386 remove_proc_entry("rt_cache", net->proc_net_stat);
389 remove_proc_entry("rt_cache", net->proc_net);
394 static void __net_exit ip_rt_do_proc_exit(struct net *net)
396 remove_proc_entry("rt_cache", net->proc_net_stat);
397 remove_proc_entry("rt_cache", net->proc_net);
398 #ifdef CONFIG_IP_ROUTE_CLASSID
399 remove_proc_entry("rt_acct", net->proc_net);
403 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
404 .init = ip_rt_do_proc_init,
405 .exit = ip_rt_do_proc_exit,
408 static int __init ip_rt_proc_init(void)
410 return register_pernet_subsys(&ip_rt_proc_ops);
414 static inline int ip_rt_proc_init(void)
418 #endif /* CONFIG_PROC_FS */
420 static inline bool rt_is_expired(const struct rtable *rth)
422 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
425 void rt_cache_flush(struct net *net)
427 rt_genid_bump_ipv4(net);
430 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
434 const struct rtable *rt = container_of(dst, struct rtable, dst);
435 struct net_device *dev = dst->dev;
440 if (likely(rt->rt_gw_family == AF_INET)) {
441 n = ip_neigh_gw4(dev, rt->rt_gw4);
442 } else if (rt->rt_gw_family == AF_INET6) {
443 n = ip_neigh_gw6(dev, &rt->rt_gw6);
447 pkey = skb ? ip_hdr(skb)->daddr : *((__be32 *) daddr);
448 n = ip_neigh_gw4(dev, pkey);
451 if (!IS_ERR(n) && !refcount_inc_not_zero(&n->refcnt))
454 rcu_read_unlock_bh();
459 static void ipv4_confirm_neigh(const struct dst_entry *dst, const void *daddr)
461 const struct rtable *rt = container_of(dst, struct rtable, dst);
462 struct net_device *dev = dst->dev;
463 const __be32 *pkey = daddr;
465 if (rt->rt_gw_family == AF_INET) {
466 pkey = (const __be32 *)&rt->rt_gw4;
467 } else if (rt->rt_gw_family == AF_INET6) {
468 return __ipv6_confirm_neigh_stub(dev, &rt->rt_gw6);
471 (RTCF_MULTICAST | RTCF_BROADCAST | RTCF_LOCAL))) {
474 __ipv4_confirm_neigh(dev, *(__force u32 *)pkey);
477 #define IP_IDENTS_SZ 2048u
479 static atomic_t *ip_idents __read_mostly;
480 static u32 *ip_tstamps __read_mostly;
482 /* In order to protect privacy, we add a perturbation to identifiers
483 * if one generator is seldom used. This makes hard for an attacker
484 * to infer how many packets were sent between two points in time.
486 u32 ip_idents_reserve(u32 hash, int segs)
488 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
489 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
490 u32 old = READ_ONCE(*p_tstamp);
491 u32 now = (u32)jiffies;
494 if (old != now && cmpxchg(p_tstamp, old, now) == old)
495 delta = prandom_u32_max(now - old);
497 /* Do not use atomic_add_return() as it makes UBSAN unhappy */
499 old = (u32)atomic_read(p_id);
500 new = old + delta + segs;
501 } while (atomic_cmpxchg(p_id, old, new) != old);
505 EXPORT_SYMBOL(ip_idents_reserve);
507 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
511 /* Note the following code is not safe, but this is okay. */
512 if (unlikely(siphash_key_is_zero(&net->ipv4.ip_id_key)))
513 get_random_bytes(&net->ipv4.ip_id_key,
514 sizeof(net->ipv4.ip_id_key));
516 hash = siphash_3u32((__force u32)iph->daddr,
517 (__force u32)iph->saddr,
519 &net->ipv4.ip_id_key);
520 id = ip_idents_reserve(hash, segs);
523 EXPORT_SYMBOL(__ip_select_ident);
525 static void __build_flow_key(const struct net *net, struct flowi4 *fl4,
526 const struct sock *sk,
527 const struct iphdr *iph,
529 u8 prot, u32 mark, int flow_flags)
532 const struct inet_sock *inet = inet_sk(sk);
534 oif = sk->sk_bound_dev_if;
536 tos = RT_CONN_FLAGS(sk);
537 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
539 flowi4_init_output(fl4, oif, mark, tos,
540 RT_SCOPE_UNIVERSE, prot,
542 iph->daddr, iph->saddr, 0, 0,
543 sock_net_uid(net, sk));
546 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
547 const struct sock *sk)
549 const struct net *net = dev_net(skb->dev);
550 const struct iphdr *iph = ip_hdr(skb);
551 int oif = skb->dev->ifindex;
552 u8 tos = RT_TOS(iph->tos);
553 u8 prot = iph->protocol;
554 u32 mark = skb->mark;
556 __build_flow_key(net, fl4, sk, iph, oif, tos, prot, mark, 0);
559 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
561 const struct inet_sock *inet = inet_sk(sk);
562 const struct ip_options_rcu *inet_opt;
563 __be32 daddr = inet->inet_daddr;
566 inet_opt = rcu_dereference(inet->inet_opt);
567 if (inet_opt && inet_opt->opt.srr)
568 daddr = inet_opt->opt.faddr;
569 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
570 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
571 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
572 inet_sk_flowi_flags(sk),
573 daddr, inet->inet_saddr, 0, 0, sk->sk_uid);
577 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
578 const struct sk_buff *skb)
581 build_skb_flow_key(fl4, skb, sk);
583 build_sk_flow_key(fl4, sk);
586 static DEFINE_SPINLOCK(fnhe_lock);
588 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
592 rt = rcu_dereference(fnhe->fnhe_rth_input);
594 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
595 dst_dev_put(&rt->dst);
596 dst_release(&rt->dst);
598 rt = rcu_dereference(fnhe->fnhe_rth_output);
600 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
601 dst_dev_put(&rt->dst);
602 dst_release(&rt->dst);
606 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
608 struct fib_nh_exception *fnhe, *oldest;
610 oldest = rcu_dereference(hash->chain);
611 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
612 fnhe = rcu_dereference(fnhe->fnhe_next)) {
613 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
616 fnhe_flush_routes(oldest);
620 static inline u32 fnhe_hashfun(__be32 daddr)
622 static u32 fnhe_hashrnd __read_mostly;
625 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
626 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
627 return hash_32(hval, FNHE_HASH_SHIFT);
630 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
632 rt->rt_pmtu = fnhe->fnhe_pmtu;
633 rt->rt_mtu_locked = fnhe->fnhe_mtu_locked;
634 rt->dst.expires = fnhe->fnhe_expires;
637 rt->rt_flags |= RTCF_REDIRECTED;
638 rt->rt_uses_gateway = 1;
639 rt->rt_gw_family = AF_INET;
640 rt->rt_gw4 = fnhe->fnhe_gw;
644 static void update_or_create_fnhe(struct fib_nh_common *nhc, __be32 daddr,
645 __be32 gw, u32 pmtu, bool lock,
646 unsigned long expires)
648 struct fnhe_hash_bucket *hash;
649 struct fib_nh_exception *fnhe;
655 genid = fnhe_genid(dev_net(nhc->nhc_dev));
656 hval = fnhe_hashfun(daddr);
658 spin_lock_bh(&fnhe_lock);
660 hash = rcu_dereference(nhc->nhc_exceptions);
662 hash = kcalloc(FNHE_HASH_SIZE, sizeof(*hash), GFP_ATOMIC);
665 rcu_assign_pointer(nhc->nhc_exceptions, hash);
671 for (fnhe = rcu_dereference(hash->chain); fnhe;
672 fnhe = rcu_dereference(fnhe->fnhe_next)) {
673 if (fnhe->fnhe_daddr == daddr)
679 if (fnhe->fnhe_genid != genid)
680 fnhe->fnhe_genid = genid;
684 fnhe->fnhe_pmtu = pmtu;
685 fnhe->fnhe_mtu_locked = lock;
687 fnhe->fnhe_expires = max(1UL, expires);
688 /* Update all cached dsts too */
689 rt = rcu_dereference(fnhe->fnhe_rth_input);
691 fill_route_from_fnhe(rt, fnhe);
692 rt = rcu_dereference(fnhe->fnhe_rth_output);
694 fill_route_from_fnhe(rt, fnhe);
696 if (depth > FNHE_RECLAIM_DEPTH)
697 fnhe = fnhe_oldest(hash);
699 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
703 fnhe->fnhe_next = hash->chain;
704 rcu_assign_pointer(hash->chain, fnhe);
706 fnhe->fnhe_genid = genid;
707 fnhe->fnhe_daddr = daddr;
709 fnhe->fnhe_pmtu = pmtu;
710 fnhe->fnhe_mtu_locked = lock;
711 fnhe->fnhe_expires = max(1UL, expires);
713 /* Exception created; mark the cached routes for the nexthop
714 * stale, so anyone caching it rechecks if this exception
717 rt = rcu_dereference(nhc->nhc_rth_input);
719 rt->dst.obsolete = DST_OBSOLETE_KILL;
721 for_each_possible_cpu(i) {
722 struct rtable __rcu **prt;
723 prt = per_cpu_ptr(nhc->nhc_pcpu_rth_output, i);
724 rt = rcu_dereference(*prt);
726 rt->dst.obsolete = DST_OBSOLETE_KILL;
730 fnhe->fnhe_stamp = jiffies;
733 spin_unlock_bh(&fnhe_lock);
736 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
739 __be32 new_gw = icmp_hdr(skb)->un.gateway;
740 __be32 old_gw = ip_hdr(skb)->saddr;
741 struct net_device *dev = skb->dev;
742 struct in_device *in_dev;
743 struct fib_result res;
747 switch (icmp_hdr(skb)->code & 7) {
749 case ICMP_REDIR_NETTOS:
750 case ICMP_REDIR_HOST:
751 case ICMP_REDIR_HOSTTOS:
758 if (rt->rt_gw_family != AF_INET || rt->rt_gw4 != old_gw)
761 in_dev = __in_dev_get_rcu(dev);
766 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
767 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
768 ipv4_is_zeronet(new_gw))
769 goto reject_redirect;
771 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
772 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
773 goto reject_redirect;
774 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
775 goto reject_redirect;
777 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
778 goto reject_redirect;
781 n = __ipv4_neigh_lookup(rt->dst.dev, new_gw);
783 n = neigh_create(&arp_tbl, &new_gw, rt->dst.dev);
785 if (!(n->nud_state & NUD_VALID)) {
786 neigh_event_send(n, NULL);
788 if (fib_lookup(net, fl4, &res, 0) == 0) {
789 struct fib_nh_common *nhc = FIB_RES_NHC(res);
791 update_or_create_fnhe(nhc, fl4->daddr, new_gw,
793 jiffies + ip_rt_gc_timeout);
796 rt->dst.obsolete = DST_OBSOLETE_KILL;
797 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
804 #ifdef CONFIG_IP_ROUTE_VERBOSE
805 if (IN_DEV_LOG_MARTIANS(in_dev)) {
806 const struct iphdr *iph = (const struct iphdr *) skb->data;
807 __be32 daddr = iph->daddr;
808 __be32 saddr = iph->saddr;
810 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
811 " Advised path = %pI4 -> %pI4\n",
812 &old_gw, dev->name, &new_gw,
819 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
823 const struct iphdr *iph = (const struct iphdr *) skb->data;
824 struct net *net = dev_net(skb->dev);
825 int oif = skb->dev->ifindex;
826 u8 tos = RT_TOS(iph->tos);
827 u8 prot = iph->protocol;
828 u32 mark = skb->mark;
830 rt = (struct rtable *) dst;
832 __build_flow_key(net, &fl4, sk, iph, oif, tos, prot, mark, 0);
833 __ip_do_redirect(rt, skb, &fl4, true);
836 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
838 struct rtable *rt = (struct rtable *)dst;
839 struct dst_entry *ret = dst;
842 if (dst->obsolete > 0) {
845 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
856 * 1. The first ip_rt_redirect_number redirects are sent
857 * with exponential backoff, then we stop sending them at all,
858 * assuming that the host ignores our redirects.
859 * 2. If we did not see packets requiring redirects
860 * during ip_rt_redirect_silence, we assume that the host
861 * forgot redirected route and start to send redirects again.
863 * This algorithm is much cheaper and more intelligent than dumb load limiting
866 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
867 * and "frag. need" (breaks PMTU discovery) in icmp.c.
870 void ip_rt_send_redirect(struct sk_buff *skb)
872 struct rtable *rt = skb_rtable(skb);
873 struct in_device *in_dev;
874 struct inet_peer *peer;
880 in_dev = __in_dev_get_rcu(rt->dst.dev);
881 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
885 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
886 vif = l3mdev_master_ifindex_rcu(rt->dst.dev);
889 net = dev_net(rt->dst.dev);
890 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, vif, 1);
892 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
893 rt_nexthop(rt, ip_hdr(skb)->daddr));
897 /* No redirected packets during ip_rt_redirect_silence;
898 * reset the algorithm.
900 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence)) {
901 peer->rate_tokens = 0;
902 peer->n_redirects = 0;
905 /* Too many ignored redirects; do not send anything
906 * set dst.rate_last to the last seen redirected packet.
908 if (peer->n_redirects >= ip_rt_redirect_number) {
909 peer->rate_last = jiffies;
913 /* Check for load limit; set rate_last to the latest sent
916 if (peer->rate_tokens == 0 ||
919 (ip_rt_redirect_load << peer->n_redirects)))) {
920 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
922 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
923 peer->rate_last = jiffies;
925 #ifdef CONFIG_IP_ROUTE_VERBOSE
927 peer->n_redirects == ip_rt_redirect_number)
928 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
929 &ip_hdr(skb)->saddr, inet_iif(skb),
930 &ip_hdr(skb)->daddr, &gw);
937 static int ip_error(struct sk_buff *skb)
939 struct rtable *rt = skb_rtable(skb);
940 struct net_device *dev = skb->dev;
941 struct in_device *in_dev;
942 struct inet_peer *peer;
948 if (netif_is_l3_master(skb->dev)) {
949 dev = __dev_get_by_index(dev_net(skb->dev), IPCB(skb)->iif);
954 in_dev = __in_dev_get_rcu(dev);
956 /* IP on this device is disabled. */
960 net = dev_net(rt->dst.dev);
961 if (!IN_DEV_FORWARD(in_dev)) {
962 switch (rt->dst.error) {
964 __IP_INC_STATS(net, IPSTATS_MIB_INADDRERRORS);
968 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
974 switch (rt->dst.error) {
979 code = ICMP_HOST_UNREACH;
982 code = ICMP_NET_UNREACH;
983 __IP_INC_STATS(net, IPSTATS_MIB_INNOROUTES);
986 code = ICMP_PKT_FILTERED;
990 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr,
991 l3mdev_master_ifindex(skb->dev), 1);
996 peer->rate_tokens += now - peer->rate_last;
997 if (peer->rate_tokens > ip_rt_error_burst)
998 peer->rate_tokens = ip_rt_error_burst;
999 peer->rate_last = now;
1000 if (peer->rate_tokens >= ip_rt_error_cost)
1001 peer->rate_tokens -= ip_rt_error_cost;
1007 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
1009 out: kfree_skb(skb);
1013 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
1015 struct dst_entry *dst = &rt->dst;
1016 u32 old_mtu = ipv4_mtu(dst);
1017 struct fib_result res;
1020 if (ip_mtu_locked(dst))
1026 if (mtu < ip_rt_min_pmtu) {
1028 mtu = min(old_mtu, ip_rt_min_pmtu);
1031 if (rt->rt_pmtu == mtu && !lock &&
1032 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
1036 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
1037 struct fib_nh_common *nhc = FIB_RES_NHC(res);
1039 update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock,
1040 jiffies + ip_rt_mtu_expires);
1045 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
1046 struct sk_buff *skb, u32 mtu)
1048 struct rtable *rt = (struct rtable *) dst;
1051 ip_rt_build_flow_key(&fl4, sk, skb);
1052 __ip_rt_update_pmtu(rt, &fl4, mtu);
1055 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1056 int oif, u8 protocol)
1058 const struct iphdr *iph = (const struct iphdr *) skb->data;
1061 u32 mark = IP4_REPLY_MARK(net, skb->mark);
1063 __build_flow_key(net, &fl4, NULL, iph, oif,
1064 RT_TOS(iph->tos), protocol, mark, 0);
1065 rt = __ip_route_output_key(net, &fl4);
1067 __ip_rt_update_pmtu(rt, &fl4, mtu);
1071 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1073 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1075 const struct iphdr *iph = (const struct iphdr *) skb->data;
1079 __build_flow_key(sock_net(sk), &fl4, sk, iph, 0, 0, 0, 0, 0);
1081 if (!fl4.flowi4_mark)
1082 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1084 rt = __ip_route_output_key(sock_net(sk), &fl4);
1086 __ip_rt_update_pmtu(rt, &fl4, mtu);
1091 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1093 const struct iphdr *iph = (const struct iphdr *) skb->data;
1096 struct dst_entry *odst = NULL;
1098 struct net *net = sock_net(sk);
1102 if (!ip_sk_accept_pmtu(sk))
1105 odst = sk_dst_get(sk);
1107 if (sock_owned_by_user(sk) || !odst) {
1108 __ipv4_sk_update_pmtu(skb, sk, mtu);
1112 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1114 rt = (struct rtable *)odst;
1115 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1116 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1123 __ip_rt_update_pmtu((struct rtable *) xfrm_dst_path(&rt->dst), &fl4, mtu);
1125 if (!dst_check(&rt->dst, 0)) {
1127 dst_release(&rt->dst);
1129 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1137 sk_dst_set(sk, &rt->dst);
1143 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1145 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1146 int oif, u8 protocol)
1148 const struct iphdr *iph = (const struct iphdr *) skb->data;
1152 __build_flow_key(net, &fl4, NULL, iph, oif,
1153 RT_TOS(iph->tos), protocol, 0, 0);
1154 rt = __ip_route_output_key(net, &fl4);
1156 __ip_do_redirect(rt, skb, &fl4, false);
1160 EXPORT_SYMBOL_GPL(ipv4_redirect);
1162 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1164 const struct iphdr *iph = (const struct iphdr *) skb->data;
1167 struct net *net = sock_net(sk);
1169 __build_flow_key(net, &fl4, sk, iph, 0, 0, 0, 0, 0);
1170 rt = __ip_route_output_key(net, &fl4);
1172 __ip_do_redirect(rt, skb, &fl4, false);
1176 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1178 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1180 struct rtable *rt = (struct rtable *) dst;
1182 /* All IPV4 dsts are created with ->obsolete set to the value
1183 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1184 * into this function always.
1186 * When a PMTU/redirect information update invalidates a route,
1187 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1188 * DST_OBSOLETE_DEAD.
1190 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1195 static void ipv4_send_dest_unreach(struct sk_buff *skb)
1197 struct ip_options opt;
1200 /* Recompile ip options since IPCB may not be valid anymore.
1201 * Also check we have a reasonable ipv4 header.
1203 if (!pskb_network_may_pull(skb, sizeof(struct iphdr)) ||
1204 ip_hdr(skb)->version != 4 || ip_hdr(skb)->ihl < 5)
1207 memset(&opt, 0, sizeof(opt));
1208 if (ip_hdr(skb)->ihl > 5) {
1209 if (!pskb_network_may_pull(skb, ip_hdr(skb)->ihl * 4))
1211 opt.optlen = ip_hdr(skb)->ihl * 4 - sizeof(struct iphdr);
1214 res = __ip_options_compile(dev_net(skb->dev), &opt, skb, NULL);
1220 __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0, &opt);
1223 static void ipv4_link_failure(struct sk_buff *skb)
1227 ipv4_send_dest_unreach(skb);
1229 rt = skb_rtable(skb);
1231 dst_set_expires(&rt->dst, 0);
1234 static int ip_rt_bug(struct net *net, struct sock *sk, struct sk_buff *skb)
1236 pr_debug("%s: %pI4 -> %pI4, %s\n",
1237 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1238 skb->dev ? skb->dev->name : "?");
1245 We do not cache source address of outgoing interface,
1246 because it is used only by IP RR, TS and SRR options,
1247 so that it out of fast path.
1249 BTW remember: "addr" is allowed to be not aligned
1253 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1257 if (rt_is_output_route(rt))
1258 src = ip_hdr(skb)->saddr;
1260 struct fib_result res;
1261 struct iphdr *iph = ip_hdr(skb);
1262 struct flowi4 fl4 = {
1263 .daddr = iph->daddr,
1264 .saddr = iph->saddr,
1265 .flowi4_tos = RT_TOS(iph->tos),
1266 .flowi4_oif = rt->dst.dev->ifindex,
1267 .flowi4_iif = skb->dev->ifindex,
1268 .flowi4_mark = skb->mark,
1272 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1273 src = fib_result_prefsrc(dev_net(rt->dst.dev), &res);
1275 src = inet_select_addr(rt->dst.dev,
1276 rt_nexthop(rt, iph->daddr),
1280 memcpy(addr, &src, 4);
1283 #ifdef CONFIG_IP_ROUTE_CLASSID
1284 static void set_class_tag(struct rtable *rt, u32 tag)
1286 if (!(rt->dst.tclassid & 0xFFFF))
1287 rt->dst.tclassid |= tag & 0xFFFF;
1288 if (!(rt->dst.tclassid & 0xFFFF0000))
1289 rt->dst.tclassid |= tag & 0xFFFF0000;
1293 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1295 unsigned int header_size = sizeof(struct tcphdr) + sizeof(struct iphdr);
1296 unsigned int advmss = max_t(unsigned int, ipv4_mtu(dst) - header_size,
1299 return min(advmss, IPV4_MAX_PMTU - header_size);
1302 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1304 const struct rtable *rt = (const struct rtable *) dst;
1305 unsigned int mtu = rt->rt_pmtu;
1307 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1308 mtu = dst_metric_raw(dst, RTAX_MTU);
1313 mtu = READ_ONCE(dst->dev->mtu);
1315 if (unlikely(ip_mtu_locked(dst))) {
1316 if (rt->rt_uses_gateway && mtu > 576)
1320 mtu = min_t(unsigned int, mtu, IP_MAX_MTU);
1322 return mtu - lwtunnel_headroom(dst->lwtstate, mtu);
1325 static void ip_del_fnhe(struct fib_nh_common *nhc, __be32 daddr)
1327 struct fnhe_hash_bucket *hash;
1328 struct fib_nh_exception *fnhe, __rcu **fnhe_p;
1329 u32 hval = fnhe_hashfun(daddr);
1331 spin_lock_bh(&fnhe_lock);
1333 hash = rcu_dereference_protected(nhc->nhc_exceptions,
1334 lockdep_is_held(&fnhe_lock));
1337 fnhe_p = &hash->chain;
1338 fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
1340 if (fnhe->fnhe_daddr == daddr) {
1341 rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
1342 fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
1343 /* set fnhe_daddr to 0 to ensure it won't bind with
1344 * new dsts in rt_bind_exception().
1346 fnhe->fnhe_daddr = 0;
1347 fnhe_flush_routes(fnhe);
1348 kfree_rcu(fnhe, rcu);
1351 fnhe_p = &fnhe->fnhe_next;
1352 fnhe = rcu_dereference_protected(fnhe->fnhe_next,
1353 lockdep_is_held(&fnhe_lock));
1356 spin_unlock_bh(&fnhe_lock);
1359 static struct fib_nh_exception *find_exception(struct fib_nh_common *nhc,
1362 struct fnhe_hash_bucket *hash = rcu_dereference(nhc->nhc_exceptions);
1363 struct fib_nh_exception *fnhe;
1369 hval = fnhe_hashfun(daddr);
1371 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1372 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1373 if (fnhe->fnhe_daddr == daddr) {
1374 if (fnhe->fnhe_expires &&
1375 time_after(jiffies, fnhe->fnhe_expires)) {
1376 ip_del_fnhe(nhc, daddr);
1386 * 1. mtu on route is locked - use it
1387 * 2. mtu from nexthop exception
1388 * 3. mtu from egress device
1391 u32 ip_mtu_from_fib_result(struct fib_result *res, __be32 daddr)
1393 struct fib_nh_common *nhc = res->nhc;
1394 struct net_device *dev = nhc->nhc_dev;
1395 struct fib_info *fi = res->fi;
1398 if (dev_net(dev)->ipv4.sysctl_ip_fwd_use_pmtu ||
1399 fi->fib_metrics->metrics[RTAX_LOCK - 1] & (1 << RTAX_MTU))
1403 struct fib_nh_exception *fnhe;
1405 fnhe = find_exception(nhc, daddr);
1406 if (fnhe && !time_after_eq(jiffies, fnhe->fnhe_expires))
1407 mtu = fnhe->fnhe_pmtu;
1411 mtu = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
1413 return mtu - lwtunnel_headroom(nhc->nhc_lwtstate, mtu);
1416 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1417 __be32 daddr, const bool do_cache)
1421 spin_lock_bh(&fnhe_lock);
1423 if (daddr == fnhe->fnhe_daddr) {
1424 struct rtable __rcu **porig;
1425 struct rtable *orig;
1426 int genid = fnhe_genid(dev_net(rt->dst.dev));
1428 if (rt_is_input_route(rt))
1429 porig = &fnhe->fnhe_rth_input;
1431 porig = &fnhe->fnhe_rth_output;
1432 orig = rcu_dereference(*porig);
1434 if (fnhe->fnhe_genid != genid) {
1435 fnhe->fnhe_genid = genid;
1437 fnhe->fnhe_pmtu = 0;
1438 fnhe->fnhe_expires = 0;
1439 fnhe->fnhe_mtu_locked = false;
1440 fnhe_flush_routes(fnhe);
1443 fill_route_from_fnhe(rt, fnhe);
1446 rt->rt_gw_family = AF_INET;
1451 rcu_assign_pointer(*porig, rt);
1453 dst_dev_put(&orig->dst);
1454 dst_release(&orig->dst);
1459 fnhe->fnhe_stamp = jiffies;
1461 spin_unlock_bh(&fnhe_lock);
1466 static bool rt_cache_route(struct fib_nh_common *nhc, struct rtable *rt)
1468 struct rtable *orig, *prev, **p;
1471 if (rt_is_input_route(rt)) {
1472 p = (struct rtable **)&nhc->nhc_rth_input;
1474 p = (struct rtable **)raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
1478 /* hold dst before doing cmpxchg() to avoid race condition
1482 prev = cmpxchg(p, orig, rt);
1485 rt_add_uncached_list(orig);
1486 dst_release(&orig->dst);
1489 dst_release(&rt->dst);
1496 struct uncached_list {
1498 struct list_head head;
1501 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1503 void rt_add_uncached_list(struct rtable *rt)
1505 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1507 rt->rt_uncached_list = ul;
1509 spin_lock_bh(&ul->lock);
1510 list_add_tail(&rt->rt_uncached, &ul->head);
1511 spin_unlock_bh(&ul->lock);
1514 void rt_del_uncached_list(struct rtable *rt)
1516 if (!list_empty(&rt->rt_uncached)) {
1517 struct uncached_list *ul = rt->rt_uncached_list;
1519 spin_lock_bh(&ul->lock);
1520 list_del(&rt->rt_uncached);
1521 spin_unlock_bh(&ul->lock);
1525 static void ipv4_dst_destroy(struct dst_entry *dst)
1527 struct rtable *rt = (struct rtable *)dst;
1529 ip_dst_metrics_put(dst);
1530 rt_del_uncached_list(rt);
1533 void rt_flush_dev(struct net_device *dev)
1538 for_each_possible_cpu(cpu) {
1539 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1541 spin_lock_bh(&ul->lock);
1542 list_for_each_entry(rt, &ul->head, rt_uncached) {
1543 if (rt->dst.dev != dev)
1545 rt->dst.dev = blackhole_netdev;
1546 dev_hold(rt->dst.dev);
1549 spin_unlock_bh(&ul->lock);
1553 static bool rt_cache_valid(const struct rtable *rt)
1556 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1560 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1561 const struct fib_result *res,
1562 struct fib_nh_exception *fnhe,
1563 struct fib_info *fi, u16 type, u32 itag,
1564 const bool do_cache)
1566 bool cached = false;
1569 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1571 if (nhc->nhc_gw_family && nhc->nhc_scope == RT_SCOPE_LINK) {
1572 rt->rt_uses_gateway = 1;
1573 rt->rt_gw_family = nhc->nhc_gw_family;
1574 /* only INET and INET6 are supported */
1575 if (likely(nhc->nhc_gw_family == AF_INET))
1576 rt->rt_gw4 = nhc->nhc_gw.ipv4;
1578 rt->rt_gw6 = nhc->nhc_gw.ipv6;
1581 ip_dst_init_metrics(&rt->dst, fi->fib_metrics);
1583 #ifdef CONFIG_IP_ROUTE_CLASSID
1584 if (nhc->nhc_family == AF_INET) {
1587 nh = container_of(nhc, struct fib_nh, nh_common);
1588 rt->dst.tclassid = nh->nh_tclassid;
1591 rt->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
1593 cached = rt_bind_exception(rt, fnhe, daddr, do_cache);
1595 cached = rt_cache_route(nhc, rt);
1596 if (unlikely(!cached)) {
1597 /* Routes we intend to cache in nexthop exception or
1598 * FIB nexthop have the DST_NOCACHE bit clear.
1599 * However, if we are unsuccessful at storing this
1600 * route into the cache we really need to set it.
1603 rt->rt_gw_family = AF_INET;
1606 rt_add_uncached_list(rt);
1609 rt_add_uncached_list(rt);
1611 #ifdef CONFIG_IP_ROUTE_CLASSID
1612 #ifdef CONFIG_IP_MULTIPLE_TABLES
1613 set_class_tag(rt, res->tclassid);
1615 set_class_tag(rt, itag);
1619 struct rtable *rt_dst_alloc(struct net_device *dev,
1620 unsigned int flags, u16 type,
1621 bool nopolicy, bool noxfrm, bool will_cache)
1625 rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1626 (will_cache ? 0 : DST_HOST) |
1627 (nopolicy ? DST_NOPOLICY : 0) |
1628 (noxfrm ? DST_NOXFRM : 0));
1631 rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1632 rt->rt_flags = flags;
1634 rt->rt_is_input = 0;
1637 rt->rt_mtu_locked = 0;
1638 rt->rt_uses_gateway = 0;
1639 rt->rt_gw_family = 0;
1641 INIT_LIST_HEAD(&rt->rt_uncached);
1643 rt->dst.output = ip_output;
1644 if (flags & RTCF_LOCAL)
1645 rt->dst.input = ip_local_deliver;
1650 EXPORT_SYMBOL(rt_dst_alloc);
1652 struct rtable *rt_dst_clone(struct net_device *dev, struct rtable *rt)
1654 struct rtable *new_rt;
1656 new_rt = dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1660 new_rt->rt_genid = rt_genid_ipv4(dev_net(dev));
1661 new_rt->rt_flags = rt->rt_flags;
1662 new_rt->rt_type = rt->rt_type;
1663 new_rt->rt_is_input = rt->rt_is_input;
1664 new_rt->rt_iif = rt->rt_iif;
1665 new_rt->rt_pmtu = rt->rt_pmtu;
1666 new_rt->rt_mtu_locked = rt->rt_mtu_locked;
1667 new_rt->rt_gw_family = rt->rt_gw_family;
1668 if (rt->rt_gw_family == AF_INET)
1669 new_rt->rt_gw4 = rt->rt_gw4;
1670 else if (rt->rt_gw_family == AF_INET6)
1671 new_rt->rt_gw6 = rt->rt_gw6;
1672 INIT_LIST_HEAD(&new_rt->rt_uncached);
1674 new_rt->dst.flags |= DST_HOST;
1675 new_rt->dst.input = rt->dst.input;
1676 new_rt->dst.output = rt->dst.output;
1677 new_rt->dst.error = rt->dst.error;
1678 new_rt->dst.lastuse = jiffies;
1679 new_rt->dst.lwtstate = lwtstate_get(rt->dst.lwtstate);
1683 EXPORT_SYMBOL(rt_dst_clone);
1685 /* called in rcu_read_lock() section */
1686 int ip_mc_validate_source(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1687 u8 tos, struct net_device *dev,
1688 struct in_device *in_dev, u32 *itag)
1692 /* Primary sanity checks. */
1696 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1697 skb->protocol != htons(ETH_P_IP))
1700 if (ipv4_is_loopback(saddr) && !IN_DEV_ROUTE_LOCALNET(in_dev))
1703 if (ipv4_is_zeronet(saddr)) {
1704 if (!ipv4_is_local_multicast(daddr) &&
1705 ip_hdr(skb)->protocol != IPPROTO_IGMP)
1708 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1716 /* called in rcu_read_lock() section */
1717 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1718 u8 tos, struct net_device *dev, int our)
1720 struct in_device *in_dev = __in_dev_get_rcu(dev);
1721 unsigned int flags = RTCF_MULTICAST;
1726 err = ip_mc_validate_source(skb, daddr, saddr, tos, dev, in_dev, &itag);
1731 flags |= RTCF_LOCAL;
1733 rth = rt_dst_alloc(dev_net(dev)->loopback_dev, flags, RTN_MULTICAST,
1734 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1738 #ifdef CONFIG_IP_ROUTE_CLASSID
1739 rth->dst.tclassid = itag;
1741 rth->dst.output = ip_rt_bug;
1742 rth->rt_is_input= 1;
1744 #ifdef CONFIG_IP_MROUTE
1745 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1746 rth->dst.input = ip_mr_input;
1748 RT_CACHE_STAT_INC(in_slow_mc);
1750 skb_dst_set(skb, &rth->dst);
1755 static void ip_handle_martian_source(struct net_device *dev,
1756 struct in_device *in_dev,
1757 struct sk_buff *skb,
1761 RT_CACHE_STAT_INC(in_martian_src);
1762 #ifdef CONFIG_IP_ROUTE_VERBOSE
1763 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1765 * RFC1812 recommendation, if source is martian,
1766 * the only hint is MAC header.
1768 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1769 &daddr, &saddr, dev->name);
1770 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1771 print_hex_dump(KERN_WARNING, "ll header: ",
1772 DUMP_PREFIX_OFFSET, 16, 1,
1773 skb_mac_header(skb),
1774 dev->hard_header_len, false);
1780 /* called in rcu_read_lock() section */
1781 static int __mkroute_input(struct sk_buff *skb,
1782 const struct fib_result *res,
1783 struct in_device *in_dev,
1784 __be32 daddr, __be32 saddr, u32 tos)
1786 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
1787 struct net_device *dev = nhc->nhc_dev;
1788 struct fib_nh_exception *fnhe;
1791 struct in_device *out_dev;
1795 /* get a working reference to the output device */
1796 out_dev = __in_dev_get_rcu(dev);
1798 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1802 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1803 in_dev->dev, in_dev, &itag);
1805 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1811 do_cache = res->fi && !itag;
1812 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1813 skb->protocol == htons(ETH_P_IP)) {
1816 gw = nhc->nhc_gw_family == AF_INET ? nhc->nhc_gw.ipv4 : 0;
1817 if (IN_DEV_SHARED_MEDIA(out_dev) ||
1818 inet_addr_onlink(out_dev, saddr, gw))
1819 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1822 if (skb->protocol != htons(ETH_P_IP)) {
1823 /* Not IP (i.e. ARP). Do not create route, if it is
1824 * invalid for proxy arp. DNAT routes are always valid.
1826 * Proxy arp feature have been extended to allow, ARP
1827 * replies back to the same interface, to support
1828 * Private VLAN switch technologies. See arp.c.
1830 if (out_dev == in_dev &&
1831 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1837 fnhe = find_exception(nhc, daddr);
1840 rth = rcu_dereference(fnhe->fnhe_rth_input);
1842 rth = rcu_dereference(nhc->nhc_rth_input);
1843 if (rt_cache_valid(rth)) {
1844 skb_dst_set_noref(skb, &rth->dst);
1849 rth = rt_dst_alloc(out_dev->dev, 0, res->type,
1850 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1851 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1857 rth->rt_is_input = 1;
1858 RT_CACHE_STAT_INC(in_slow_tot);
1860 rth->dst.input = ip_forward;
1862 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag,
1864 lwtunnel_set_redirect(&rth->dst);
1865 skb_dst_set(skb, &rth->dst);
1872 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1873 /* To make ICMP packets follow the right flow, the multipath hash is
1874 * calculated from the inner IP addresses.
1876 static void ip_multipath_l3_keys(const struct sk_buff *skb,
1877 struct flow_keys *hash_keys)
1879 const struct iphdr *outer_iph = ip_hdr(skb);
1880 const struct iphdr *key_iph = outer_iph;
1881 const struct iphdr *inner_iph;
1882 const struct icmphdr *icmph;
1883 struct iphdr _inner_iph;
1884 struct icmphdr _icmph;
1886 if (likely(outer_iph->protocol != IPPROTO_ICMP))
1889 if (unlikely((outer_iph->frag_off & htons(IP_OFFSET)) != 0))
1892 icmph = skb_header_pointer(skb, outer_iph->ihl * 4, sizeof(_icmph),
1897 if (icmph->type != ICMP_DEST_UNREACH &&
1898 icmph->type != ICMP_REDIRECT &&
1899 icmph->type != ICMP_TIME_EXCEEDED &&
1900 icmph->type != ICMP_PARAMETERPROB)
1903 inner_iph = skb_header_pointer(skb,
1904 outer_iph->ihl * 4 + sizeof(_icmph),
1905 sizeof(_inner_iph), &_inner_iph);
1909 key_iph = inner_iph;
1911 hash_keys->addrs.v4addrs.src = key_iph->saddr;
1912 hash_keys->addrs.v4addrs.dst = key_iph->daddr;
1915 /* if skb is set it will be used and fl4 can be NULL */
1916 int fib_multipath_hash(const struct net *net, const struct flowi4 *fl4,
1917 const struct sk_buff *skb, struct flow_keys *flkeys)
1919 u32 multipath_hash = fl4 ? fl4->flowi4_multipath_hash : 0;
1920 struct flow_keys hash_keys;
1923 switch (net->ipv4.sysctl_fib_multipath_hash_policy) {
1925 memset(&hash_keys, 0, sizeof(hash_keys));
1926 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1928 ip_multipath_l3_keys(skb, &hash_keys);
1930 hash_keys.addrs.v4addrs.src = fl4->saddr;
1931 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1935 /* skb is currently provided only when forwarding */
1937 unsigned int flag = FLOW_DISSECTOR_F_STOP_AT_ENCAP;
1938 struct flow_keys keys;
1940 /* short-circuit if we already have L4 hash present */
1942 return skb_get_hash_raw(skb) >> 1;
1944 memset(&hash_keys, 0, sizeof(hash_keys));
1947 skb_flow_dissect_flow_keys(skb, &keys, flag);
1951 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1952 hash_keys.addrs.v4addrs.src = flkeys->addrs.v4addrs.src;
1953 hash_keys.addrs.v4addrs.dst = flkeys->addrs.v4addrs.dst;
1954 hash_keys.ports.src = flkeys->ports.src;
1955 hash_keys.ports.dst = flkeys->ports.dst;
1956 hash_keys.basic.ip_proto = flkeys->basic.ip_proto;
1958 memset(&hash_keys, 0, sizeof(hash_keys));
1959 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1960 hash_keys.addrs.v4addrs.src = fl4->saddr;
1961 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1962 hash_keys.ports.src = fl4->fl4_sport;
1963 hash_keys.ports.dst = fl4->fl4_dport;
1964 hash_keys.basic.ip_proto = fl4->flowi4_proto;
1968 memset(&hash_keys, 0, sizeof(hash_keys));
1969 /* skb is currently provided only when forwarding */
1971 struct flow_keys keys;
1973 skb_flow_dissect_flow_keys(skb, &keys, 0);
1974 /* Inner can be v4 or v6 */
1975 if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV4_ADDRS) {
1976 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1977 hash_keys.addrs.v4addrs.src = keys.addrs.v4addrs.src;
1978 hash_keys.addrs.v4addrs.dst = keys.addrs.v4addrs.dst;
1979 } else if (keys.control.addr_type == FLOW_DISSECTOR_KEY_IPV6_ADDRS) {
1980 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
1981 hash_keys.addrs.v6addrs.src = keys.addrs.v6addrs.src;
1982 hash_keys.addrs.v6addrs.dst = keys.addrs.v6addrs.dst;
1983 hash_keys.tags.flow_label = keys.tags.flow_label;
1984 hash_keys.basic.ip_proto = keys.basic.ip_proto;
1986 /* Same as case 0 */
1987 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1988 ip_multipath_l3_keys(skb, &hash_keys);
1991 /* Same as case 0 */
1992 hash_keys.control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
1993 hash_keys.addrs.v4addrs.src = fl4->saddr;
1994 hash_keys.addrs.v4addrs.dst = fl4->daddr;
1998 mhash = flow_hash_from_keys(&hash_keys);
2001 mhash = jhash_2words(mhash, multipath_hash, 0);
2005 #endif /* CONFIG_IP_ROUTE_MULTIPATH */
2007 static int ip_mkroute_input(struct sk_buff *skb,
2008 struct fib_result *res,
2009 struct in_device *in_dev,
2010 __be32 daddr, __be32 saddr, u32 tos,
2011 struct flow_keys *hkeys)
2013 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2014 if (res->fi && fib_info_num_path(res->fi) > 1) {
2015 int h = fib_multipath_hash(res->fi->fib_net, NULL, skb, hkeys);
2017 fib_select_multipath(res, h);
2021 /* create a routing cache entry */
2022 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
2026 * NOTE. We drop all the packets that has local source
2027 * addresses, because every properly looped back packet
2028 * must have correct destination already attached by output routine.
2030 * Such approach solves two big problems:
2031 * 1. Not simplex devices are handled properly.
2032 * 2. IP spoofing attempts are filtered with 100% of guarantee.
2033 * called with rcu_read_lock()
2036 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2037 u8 tos, struct net_device *dev,
2038 struct fib_result *res)
2040 struct in_device *in_dev = __in_dev_get_rcu(dev);
2041 struct flow_keys *flkeys = NULL, _flkeys;
2042 struct net *net = dev_net(dev);
2043 struct ip_tunnel_info *tun_info;
2045 unsigned int flags = 0;
2049 bool do_cache = true;
2051 /* IP on this device is disabled. */
2056 /* Check for the most weird martians, which can be not detected
2060 tun_info = skb_tunnel_info(skb);
2061 if (tun_info && !(tun_info->mode & IP_TUNNEL_INFO_TX))
2062 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
2064 fl4.flowi4_tun_key.tun_id = 0;
2067 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
2068 goto martian_source;
2072 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
2075 /* Accept zero addresses only to limited broadcast;
2076 * I even do not know to fix it or not. Waiting for complains :-)
2078 if (ipv4_is_zeronet(saddr))
2079 goto martian_source;
2081 if (ipv4_is_zeronet(daddr))
2082 goto martian_destination;
2084 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
2085 * and call it once if daddr or/and saddr are loopback addresses
2087 if (ipv4_is_loopback(daddr)) {
2088 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2089 goto martian_destination;
2090 } else if (ipv4_is_loopback(saddr)) {
2091 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
2092 goto martian_source;
2096 * Now we are ready to route packet.
2099 fl4.flowi4_iif = dev->ifindex;
2100 fl4.flowi4_mark = skb->mark;
2101 fl4.flowi4_tos = tos;
2102 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
2103 fl4.flowi4_flags = 0;
2106 fl4.flowi4_uid = sock_net_uid(net, NULL);
2108 if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) {
2111 fl4.flowi4_proto = 0;
2116 err = fib_lookup(net, &fl4, res, 0);
2118 if (!IN_DEV_FORWARD(in_dev))
2119 err = -EHOSTUNREACH;
2123 if (res->type == RTN_BROADCAST) {
2124 if (IN_DEV_BFORWARD(in_dev))
2126 /* not do cache if bc_forwarding is enabled */
2127 if (IPV4_DEVCONF_ALL(net, BC_FORWARDING))
2132 if (res->type == RTN_LOCAL) {
2133 err = fib_validate_source(skb, saddr, daddr, tos,
2134 0, dev, in_dev, &itag);
2136 goto martian_source;
2140 if (!IN_DEV_FORWARD(in_dev)) {
2141 err = -EHOSTUNREACH;
2144 if (res->type != RTN_UNICAST)
2145 goto martian_destination;
2148 err = ip_mkroute_input(skb, res, in_dev, daddr, saddr, tos, flkeys);
2152 if (skb->protocol != htons(ETH_P_IP))
2155 if (!ipv4_is_zeronet(saddr)) {
2156 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
2159 goto martian_source;
2161 flags |= RTCF_BROADCAST;
2162 res->type = RTN_BROADCAST;
2163 RT_CACHE_STAT_INC(in_brd);
2166 do_cache &= res->fi && !itag;
2168 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2170 rth = rcu_dereference(nhc->nhc_rth_input);
2171 if (rt_cache_valid(rth)) {
2172 skb_dst_set_noref(skb, &rth->dst);
2178 rth = rt_dst_alloc(l3mdev_master_dev_rcu(dev) ? : net->loopback_dev,
2179 flags | RTCF_LOCAL, res->type,
2180 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
2184 rth->dst.output= ip_rt_bug;
2185 #ifdef CONFIG_IP_ROUTE_CLASSID
2186 rth->dst.tclassid = itag;
2188 rth->rt_is_input = 1;
2190 RT_CACHE_STAT_INC(in_slow_tot);
2191 if (res->type == RTN_UNREACHABLE) {
2192 rth->dst.input= ip_error;
2193 rth->dst.error= -err;
2194 rth->rt_flags &= ~RTCF_LOCAL;
2198 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2200 rth->dst.lwtstate = lwtstate_get(nhc->nhc_lwtstate);
2201 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
2202 WARN_ON(rth->dst.input == lwtunnel_input);
2203 rth->dst.lwtstate->orig_input = rth->dst.input;
2204 rth->dst.input = lwtunnel_input;
2207 if (unlikely(!rt_cache_route(nhc, rth)))
2208 rt_add_uncached_list(rth);
2210 skb_dst_set(skb, &rth->dst);
2215 RT_CACHE_STAT_INC(in_no_route);
2216 res->type = RTN_UNREACHABLE;
2222 * Do not cache martian addresses: they should be logged (RFC1812)
2224 martian_destination:
2225 RT_CACHE_STAT_INC(in_martian_dst);
2226 #ifdef CONFIG_IP_ROUTE_VERBOSE
2227 if (IN_DEV_LOG_MARTIANS(in_dev))
2228 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
2229 &daddr, &saddr, dev->name);
2241 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
2245 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2246 u8 tos, struct net_device *dev)
2248 struct fib_result res;
2251 tos &= IPTOS_RT_MASK;
2253 err = ip_route_input_rcu(skb, daddr, saddr, tos, dev, &res);
2258 EXPORT_SYMBOL(ip_route_input_noref);
2260 /* called with rcu_read_lock held */
2261 int ip_route_input_rcu(struct sk_buff *skb, __be32 daddr, __be32 saddr,
2262 u8 tos, struct net_device *dev, struct fib_result *res)
2264 /* Multicast recognition logic is moved from route cache to here.
2265 The problem was that too many Ethernet cards have broken/missing
2266 hardware multicast filters :-( As result the host on multicasting
2267 network acquires a lot of useless route cache entries, sort of
2268 SDR messages from all the world. Now we try to get rid of them.
2269 Really, provided software IP multicast filter is organized
2270 reasonably (at least, hashed), it does not result in a slowdown
2271 comparing with route cache reject entries.
2272 Note, that multicast routers are not affected, because
2273 route cache entry is created eventually.
2275 if (ipv4_is_multicast(daddr)) {
2276 struct in_device *in_dev = __in_dev_get_rcu(dev);
2282 our = ip_check_mc_rcu(in_dev, daddr, saddr,
2283 ip_hdr(skb)->protocol);
2285 /* check l3 master if no match yet */
2286 if (!our && netif_is_l3_slave(dev)) {
2287 struct in_device *l3_in_dev;
2289 l3_in_dev = __in_dev_get_rcu(skb->dev);
2291 our = ip_check_mc_rcu(l3_in_dev, daddr, saddr,
2292 ip_hdr(skb)->protocol);
2296 #ifdef CONFIG_IP_MROUTE
2298 (!ipv4_is_local_multicast(daddr) &&
2299 IN_DEV_MFORWARD(in_dev))
2302 err = ip_route_input_mc(skb, daddr, saddr,
2308 return ip_route_input_slow(skb, daddr, saddr, tos, dev, res);
2311 /* called with rcu_read_lock() */
2312 static struct rtable *__mkroute_output(const struct fib_result *res,
2313 const struct flowi4 *fl4, int orig_oif,
2314 struct net_device *dev_out,
2317 struct fib_info *fi = res->fi;
2318 struct fib_nh_exception *fnhe;
2319 struct in_device *in_dev;
2320 u16 type = res->type;
2324 in_dev = __in_dev_get_rcu(dev_out);
2326 return ERR_PTR(-EINVAL);
2328 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
2329 if (ipv4_is_loopback(fl4->saddr) &&
2330 !(dev_out->flags & IFF_LOOPBACK) &&
2331 !netif_is_l3_master(dev_out))
2332 return ERR_PTR(-EINVAL);
2334 if (ipv4_is_lbcast(fl4->daddr))
2335 type = RTN_BROADCAST;
2336 else if (ipv4_is_multicast(fl4->daddr))
2337 type = RTN_MULTICAST;
2338 else if (ipv4_is_zeronet(fl4->daddr))
2339 return ERR_PTR(-EINVAL);
2341 if (dev_out->flags & IFF_LOOPBACK)
2342 flags |= RTCF_LOCAL;
2345 if (type == RTN_BROADCAST) {
2346 flags |= RTCF_BROADCAST | RTCF_LOCAL;
2348 } else if (type == RTN_MULTICAST) {
2349 flags |= RTCF_MULTICAST | RTCF_LOCAL;
2350 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
2352 flags &= ~RTCF_LOCAL;
2355 /* If multicast route do not exist use
2356 * default one, but do not gateway in this case.
2359 if (fi && res->prefixlen < 4)
2361 } else if ((type == RTN_LOCAL) && (orig_oif != 0) &&
2362 (orig_oif != dev_out->ifindex)) {
2363 /* For local routes that require a particular output interface
2364 * we do not want to cache the result. Caching the result
2365 * causes incorrect behaviour when there are multiple source
2366 * addresses on the interface, the end result being that if the
2367 * intended recipient is waiting on that interface for the
2368 * packet he won't receive it because it will be delivered on
2369 * the loopback interface and the IP_PKTINFO ipi_ifindex will
2370 * be set to the loopback interface as well.
2376 do_cache &= fi != NULL;
2378 struct fib_nh_common *nhc = FIB_RES_NHC(*res);
2379 struct rtable __rcu **prth;
2381 fnhe = find_exception(nhc, fl4->daddr);
2385 prth = &fnhe->fnhe_rth_output;
2387 if (unlikely(fl4->flowi4_flags &
2388 FLOWI_FLAG_KNOWN_NH &&
2389 !(nhc->nhc_gw_family &&
2390 nhc->nhc_scope == RT_SCOPE_LINK))) {
2394 prth = raw_cpu_ptr(nhc->nhc_pcpu_rth_output);
2396 rth = rcu_dereference(*prth);
2397 if (rt_cache_valid(rth) && dst_hold_safe(&rth->dst))
2402 rth = rt_dst_alloc(dev_out, flags, type,
2403 IN_DEV_CONF_GET(in_dev, NOPOLICY),
2404 IN_DEV_CONF_GET(in_dev, NOXFRM),
2407 return ERR_PTR(-ENOBUFS);
2409 rth->rt_iif = orig_oif;
2411 RT_CACHE_STAT_INC(out_slow_tot);
2413 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2414 if (flags & RTCF_LOCAL &&
2415 !(dev_out->flags & IFF_LOOPBACK)) {
2416 rth->dst.output = ip_mc_output;
2417 RT_CACHE_STAT_INC(out_slow_mc);
2419 #ifdef CONFIG_IP_MROUTE
2420 if (type == RTN_MULTICAST) {
2421 if (IN_DEV_MFORWARD(in_dev) &&
2422 !ipv4_is_local_multicast(fl4->daddr)) {
2423 rth->dst.input = ip_mr_input;
2424 rth->dst.output = ip_mc_output;
2430 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0, do_cache);
2431 lwtunnel_set_redirect(&rth->dst);
2437 * Major route resolver routine.
2440 struct rtable *ip_route_output_key_hash(struct net *net, struct flowi4 *fl4,
2441 const struct sk_buff *skb)
2443 __u8 tos = RT_FL_TOS(fl4);
2444 struct fib_result res = {
2452 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2453 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2454 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2455 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2458 rth = ip_route_output_key_hash_rcu(net, fl4, &res, skb);
2463 EXPORT_SYMBOL_GPL(ip_route_output_key_hash);
2465 struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4,
2466 struct fib_result *res,
2467 const struct sk_buff *skb)
2469 struct net_device *dev_out = NULL;
2470 int orig_oif = fl4->flowi4_oif;
2471 unsigned int flags = 0;
2476 if (ipv4_is_multicast(fl4->saddr) ||
2477 ipv4_is_lbcast(fl4->saddr) ||
2478 ipv4_is_zeronet(fl4->saddr)) {
2479 rth = ERR_PTR(-EINVAL);
2483 rth = ERR_PTR(-ENETUNREACH);
2485 /* I removed check for oif == dev_out->oif here.
2486 It was wrong for two reasons:
2487 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2488 is assigned to multiple interfaces.
2489 2. Moreover, we are allowed to send packets with saddr
2490 of another iface. --ANK
2493 if (fl4->flowi4_oif == 0 &&
2494 (ipv4_is_multicast(fl4->daddr) ||
2495 ipv4_is_lbcast(fl4->daddr))) {
2496 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2497 dev_out = __ip_dev_find(net, fl4->saddr, false);
2501 /* Special hack: user can direct multicasts
2502 and limited broadcast via necessary interface
2503 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2504 This hack is not just for fun, it allows
2505 vic,vat and friends to work.
2506 They bind socket to loopback, set ttl to zero
2507 and expect that it will work.
2508 From the viewpoint of routing cache they are broken,
2509 because we are not allowed to build multicast path
2510 with loopback source addr (look, routing cache
2511 cannot know, that ttl is zero, so that packet
2512 will not leave this host and route is valid).
2513 Luckily, this hack is good workaround.
2516 fl4->flowi4_oif = dev_out->ifindex;
2520 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2521 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2522 if (!__ip_dev_find(net, fl4->saddr, false))
2528 if (fl4->flowi4_oif) {
2529 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2530 rth = ERR_PTR(-ENODEV);
2534 /* RACE: Check return value of inet_select_addr instead. */
2535 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2536 rth = ERR_PTR(-ENETUNREACH);
2539 if (ipv4_is_local_multicast(fl4->daddr) ||
2540 ipv4_is_lbcast(fl4->daddr) ||
2541 fl4->flowi4_proto == IPPROTO_IGMP) {
2543 fl4->saddr = inet_select_addr(dev_out, 0,
2548 if (ipv4_is_multicast(fl4->daddr))
2549 fl4->saddr = inet_select_addr(dev_out, 0,
2551 else if (!fl4->daddr)
2552 fl4->saddr = inet_select_addr(dev_out, 0,
2558 fl4->daddr = fl4->saddr;
2560 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2561 dev_out = net->loopback_dev;
2562 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2563 res->type = RTN_LOCAL;
2564 flags |= RTCF_LOCAL;
2568 err = fib_lookup(net, fl4, res, 0);
2572 if (fl4->flowi4_oif &&
2573 (ipv4_is_multicast(fl4->daddr) ||
2574 !netif_index_is_l3_master(net, fl4->flowi4_oif))) {
2575 /* Apparently, routing tables are wrong. Assume,
2576 that the destination is on link.
2579 Because we are allowed to send to iface
2580 even if it has NO routes and NO assigned
2581 addresses. When oif is specified, routing
2582 tables are looked up with only one purpose:
2583 to catch if destination is gatewayed, rather than
2584 direct. Moreover, if MSG_DONTROUTE is set,
2585 we send packet, ignoring both routing tables
2586 and ifaddr state. --ANK
2589 We could make it even if oif is unknown,
2590 likely IPv6, but we do not.
2593 if (fl4->saddr == 0)
2594 fl4->saddr = inet_select_addr(dev_out, 0,
2596 res->type = RTN_UNICAST;
2603 if (res->type == RTN_LOCAL) {
2605 if (res->fi->fib_prefsrc)
2606 fl4->saddr = res->fi->fib_prefsrc;
2608 fl4->saddr = fl4->daddr;
2611 /* L3 master device is the loopback for that domain */
2612 dev_out = l3mdev_master_dev_rcu(FIB_RES_DEV(*res)) ? :
2615 /* make sure orig_oif points to fib result device even
2616 * though packet rx/tx happens over loopback or l3mdev
2618 orig_oif = FIB_RES_OIF(*res);
2620 fl4->flowi4_oif = dev_out->ifindex;
2621 flags |= RTCF_LOCAL;
2625 fib_select_path(net, res, fl4, skb);
2627 dev_out = FIB_RES_DEV(*res);
2628 fl4->flowi4_oif = dev_out->ifindex;
2632 rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags);
2638 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2643 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2645 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2647 return mtu ? : dst->dev->mtu;
2650 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2651 struct sk_buff *skb, u32 mtu)
2655 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2656 struct sk_buff *skb)
2660 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2666 static struct dst_ops ipv4_dst_blackhole_ops = {
2668 .check = ipv4_blackhole_dst_check,
2669 .mtu = ipv4_blackhole_mtu,
2670 .default_advmss = ipv4_default_advmss,
2671 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2672 .redirect = ipv4_rt_blackhole_redirect,
2673 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2674 .neigh_lookup = ipv4_neigh_lookup,
2677 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2679 struct rtable *ort = (struct rtable *) dst_orig;
2682 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_DEAD, 0);
2684 struct dst_entry *new = &rt->dst;
2687 new->input = dst_discard;
2688 new->output = dst_discard_out;
2690 new->dev = net->loopback_dev;
2694 rt->rt_is_input = ort->rt_is_input;
2695 rt->rt_iif = ort->rt_iif;
2696 rt->rt_pmtu = ort->rt_pmtu;
2697 rt->rt_mtu_locked = ort->rt_mtu_locked;
2699 rt->rt_genid = rt_genid_ipv4(net);
2700 rt->rt_flags = ort->rt_flags;
2701 rt->rt_type = ort->rt_type;
2702 rt->rt_uses_gateway = ort->rt_uses_gateway;
2703 rt->rt_gw_family = ort->rt_gw_family;
2704 if (rt->rt_gw_family == AF_INET)
2705 rt->rt_gw4 = ort->rt_gw4;
2706 else if (rt->rt_gw_family == AF_INET6)
2707 rt->rt_gw6 = ort->rt_gw6;
2709 INIT_LIST_HEAD(&rt->rt_uncached);
2712 dst_release(dst_orig);
2714 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2717 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2718 const struct sock *sk)
2720 struct rtable *rt = __ip_route_output_key(net, flp4);
2725 if (flp4->flowi4_proto)
2726 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2727 flowi4_to_flowi(flp4),
2732 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2734 /* called with rcu_read_lock held */
2735 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2736 struct rtable *rt, u32 table_id, struct flowi4 *fl4,
2737 struct sk_buff *skb, u32 portid, u32 seq,
2741 struct nlmsghdr *nlh;
2742 unsigned long expires = 0;
2744 u32 metrics[RTAX_MAX];
2746 nlh = nlmsg_put(skb, portid, seq, RTM_NEWROUTE, sizeof(*r), flags);
2750 r = nlmsg_data(nlh);
2751 r->rtm_family = AF_INET;
2752 r->rtm_dst_len = 32;
2754 r->rtm_tos = fl4 ? fl4->flowi4_tos : 0;
2755 r->rtm_table = table_id < 256 ? table_id : RT_TABLE_COMPAT;
2756 if (nla_put_u32(skb, RTA_TABLE, table_id))
2757 goto nla_put_failure;
2758 r->rtm_type = rt->rt_type;
2759 r->rtm_scope = RT_SCOPE_UNIVERSE;
2760 r->rtm_protocol = RTPROT_UNSPEC;
2761 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2762 if (rt->rt_flags & RTCF_NOTIFY)
2763 r->rtm_flags |= RTM_F_NOTIFY;
2764 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2765 r->rtm_flags |= RTCF_DOREDIRECT;
2767 if (nla_put_in_addr(skb, RTA_DST, dst))
2768 goto nla_put_failure;
2770 r->rtm_src_len = 32;
2771 if (nla_put_in_addr(skb, RTA_SRC, src))
2772 goto nla_put_failure;
2775 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2776 goto nla_put_failure;
2777 #ifdef CONFIG_IP_ROUTE_CLASSID
2778 if (rt->dst.tclassid &&
2779 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2780 goto nla_put_failure;
2782 if (fl4 && !rt_is_input_route(rt) &&
2783 fl4->saddr != src) {
2784 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2785 goto nla_put_failure;
2787 if (rt->rt_uses_gateway) {
2788 if (rt->rt_gw_family == AF_INET &&
2789 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gw4)) {
2790 goto nla_put_failure;
2791 } else if (rt->rt_gw_family == AF_INET6) {
2792 int alen = sizeof(struct in6_addr);
2796 nla = nla_reserve(skb, RTA_VIA, alen + 2);
2798 goto nla_put_failure;
2800 via = nla_data(nla);
2801 via->rtvia_family = AF_INET6;
2802 memcpy(via->rtvia_addr, &rt->rt_gw6, alen);
2806 expires = rt->dst.expires;
2808 unsigned long now = jiffies;
2810 if (time_before(now, expires))
2816 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2817 if (rt->rt_pmtu && expires)
2818 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2819 if (rt->rt_mtu_locked && expires)
2820 metrics[RTAX_LOCK - 1] |= BIT(RTAX_MTU);
2821 if (rtnetlink_put_metrics(skb, metrics) < 0)
2822 goto nla_put_failure;
2825 if (fl4->flowi4_mark &&
2826 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2827 goto nla_put_failure;
2829 if (!uid_eq(fl4->flowi4_uid, INVALID_UID) &&
2830 nla_put_u32(skb, RTA_UID,
2831 from_kuid_munged(current_user_ns(),
2833 goto nla_put_failure;
2835 if (rt_is_input_route(rt)) {
2836 #ifdef CONFIG_IP_MROUTE
2837 if (ipv4_is_multicast(dst) &&
2838 !ipv4_is_local_multicast(dst) &&
2839 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2840 int err = ipmr_get_route(net, skb,
2841 fl4->saddr, fl4->daddr,
2847 goto nla_put_failure;
2851 if (nla_put_u32(skb, RTA_IIF, fl4->flowi4_iif))
2852 goto nla_put_failure;
2856 error = rt->dst.error;
2858 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2859 goto nla_put_failure;
2861 nlmsg_end(skb, nlh);
2865 nlmsg_cancel(skb, nlh);
2869 static int fnhe_dump_bucket(struct net *net, struct sk_buff *skb,
2870 struct netlink_callback *cb, u32 table_id,
2871 struct fnhe_hash_bucket *bucket, int genid,
2872 int *fa_index, int fa_start, unsigned int flags)
2876 for (i = 0; i < FNHE_HASH_SIZE; i++) {
2877 struct fib_nh_exception *fnhe;
2879 for (fnhe = rcu_dereference(bucket[i].chain); fnhe;
2880 fnhe = rcu_dereference(fnhe->fnhe_next)) {
2884 if (*fa_index < fa_start)
2887 if (fnhe->fnhe_genid != genid)
2890 if (fnhe->fnhe_expires &&
2891 time_after(jiffies, fnhe->fnhe_expires))
2894 rt = rcu_dereference(fnhe->fnhe_rth_input);
2896 rt = rcu_dereference(fnhe->fnhe_rth_output);
2900 err = rt_fill_info(net, fnhe->fnhe_daddr, 0, rt,
2901 table_id, NULL, skb,
2902 NETLINK_CB(cb->skb).portid,
2903 cb->nlh->nlmsg_seq, flags);
2914 int fib_dump_info_fnhe(struct sk_buff *skb, struct netlink_callback *cb,
2915 u32 table_id, struct fib_info *fi,
2916 int *fa_index, int fa_start, unsigned int flags)
2918 struct net *net = sock_net(cb->skb->sk);
2919 int nhsel, genid = fnhe_genid(net);
2921 for (nhsel = 0; nhsel < fib_info_num_path(fi); nhsel++) {
2922 struct fib_nh_common *nhc = fib_info_nhc(fi, nhsel);
2923 struct fnhe_hash_bucket *bucket;
2926 if (nhc->nhc_flags & RTNH_F_DEAD)
2930 bucket = rcu_dereference(nhc->nhc_exceptions);
2933 err = fnhe_dump_bucket(net, skb, cb, table_id, bucket,
2934 genid, fa_index, fa_start,
2944 static struct sk_buff *inet_rtm_getroute_build_skb(__be32 src, __be32 dst,
2945 u8 ip_proto, __be16 sport,
2948 struct sk_buff *skb;
2951 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2955 /* Reserve room for dummy headers, this skb can pass
2956 * through good chunk of routing engine.
2958 skb_reset_mac_header(skb);
2959 skb_reset_network_header(skb);
2960 skb->protocol = htons(ETH_P_IP);
2961 iph = skb_put(skb, sizeof(struct iphdr));
2962 iph->protocol = ip_proto;
2968 skb_set_transport_header(skb, skb->len);
2970 switch (iph->protocol) {
2972 struct udphdr *udph;
2974 udph = skb_put_zero(skb, sizeof(struct udphdr));
2975 udph->source = sport;
2977 udph->len = sizeof(struct udphdr);
2982 struct tcphdr *tcph;
2984 tcph = skb_put_zero(skb, sizeof(struct tcphdr));
2985 tcph->source = sport;
2987 tcph->doff = sizeof(struct tcphdr) / 4;
2989 tcph->check = ~tcp_v4_check(sizeof(struct tcphdr),
2993 case IPPROTO_ICMP: {
2994 struct icmphdr *icmph;
2996 icmph = skb_put_zero(skb, sizeof(struct icmphdr));
2997 icmph->type = ICMP_ECHO;
3005 static int inet_rtm_valid_getroute_req(struct sk_buff *skb,
3006 const struct nlmsghdr *nlh,
3008 struct netlink_ext_ack *extack)
3013 if (nlh->nlmsg_len < nlmsg_msg_size(sizeof(*rtm))) {
3014 NL_SET_ERR_MSG(extack,
3015 "ipv4: Invalid header for route get request");
3019 if (!netlink_strict_get_check(skb))
3020 return nlmsg_parse_deprecated(nlh, sizeof(*rtm), tb, RTA_MAX,
3021 rtm_ipv4_policy, extack);
3023 rtm = nlmsg_data(nlh);
3024 if ((rtm->rtm_src_len && rtm->rtm_src_len != 32) ||
3025 (rtm->rtm_dst_len && rtm->rtm_dst_len != 32) ||
3026 rtm->rtm_table || rtm->rtm_protocol ||
3027 rtm->rtm_scope || rtm->rtm_type) {
3028 NL_SET_ERR_MSG(extack, "ipv4: Invalid values in header for route get request");
3032 if (rtm->rtm_flags & ~(RTM_F_NOTIFY |
3033 RTM_F_LOOKUP_TABLE |
3035 NL_SET_ERR_MSG(extack, "ipv4: Unsupported rtm_flags for route get request");
3039 err = nlmsg_parse_deprecated_strict(nlh, sizeof(*rtm), tb, RTA_MAX,
3040 rtm_ipv4_policy, extack);
3044 if ((tb[RTA_SRC] && !rtm->rtm_src_len) ||
3045 (tb[RTA_DST] && !rtm->rtm_dst_len)) {
3046 NL_SET_ERR_MSG(extack, "ipv4: rtm_src_len and rtm_dst_len must be 32 for IPv4");
3050 for (i = 0; i <= RTA_MAX; i++) {
3066 NL_SET_ERR_MSG(extack, "ipv4: Unsupported attribute in route get request");
3074 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh,
3075 struct netlink_ext_ack *extack)
3077 struct net *net = sock_net(in_skb->sk);
3078 struct nlattr *tb[RTA_MAX+1];
3079 u32 table_id = RT_TABLE_MAIN;
3080 __be16 sport = 0, dport = 0;
3081 struct fib_result res = {};
3082 u8 ip_proto = IPPROTO_UDP;
3083 struct rtable *rt = NULL;
3084 struct sk_buff *skb;
3086 struct flowi4 fl4 = {};
3094 err = inet_rtm_valid_getroute_req(in_skb, nlh, tb, extack);
3098 rtm = nlmsg_data(nlh);
3099 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
3100 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
3101 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
3102 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
3104 uid = make_kuid(current_user_ns(), nla_get_u32(tb[RTA_UID]));
3106 uid = (iif ? INVALID_UID : current_uid());
3108 if (tb[RTA_IP_PROTO]) {
3109 err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
3110 &ip_proto, AF_INET, extack);
3116 sport = nla_get_be16(tb[RTA_SPORT]);
3119 dport = nla_get_be16(tb[RTA_DPORT]);
3121 skb = inet_rtm_getroute_build_skb(src, dst, ip_proto, sport, dport);
3127 fl4.flowi4_tos = rtm->rtm_tos;
3128 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
3129 fl4.flowi4_mark = mark;
3130 fl4.flowi4_uid = uid;
3132 fl4.fl4_sport = sport;
3134 fl4.fl4_dport = dport;
3135 fl4.flowi4_proto = ip_proto;
3140 struct net_device *dev;
3142 dev = dev_get_by_index_rcu(net, iif);
3148 fl4.flowi4_iif = iif; /* for rt_fill_info */
3151 err = ip_route_input_rcu(skb, dst, src, rtm->rtm_tos,
3154 rt = skb_rtable(skb);
3155 if (err == 0 && rt->dst.error)
3156 err = -rt->dst.error;
3158 fl4.flowi4_iif = LOOPBACK_IFINDEX;
3159 skb->dev = net->loopback_dev;
3160 rt = ip_route_output_key_hash_rcu(net, &fl4, &res, skb);
3165 skb_dst_set(skb, &rt->dst);
3171 if (rtm->rtm_flags & RTM_F_NOTIFY)
3172 rt->rt_flags |= RTCF_NOTIFY;
3174 if (rtm->rtm_flags & RTM_F_LOOKUP_TABLE)
3175 table_id = res.table ? res.table->tb_id : 0;
3177 /* reset skb for netlink reply msg */
3179 skb_reset_network_header(skb);
3180 skb_reset_transport_header(skb);
3181 skb_reset_mac_header(skb);
3183 if (rtm->rtm_flags & RTM_F_FIB_MATCH) {
3185 err = fib_props[res.type].error;
3187 err = -EHOSTUNREACH;
3190 err = fib_dump_info(skb, NETLINK_CB(in_skb).portid,
3191 nlh->nlmsg_seq, RTM_NEWROUTE, table_id,
3192 rt->rt_type, res.prefix, res.prefixlen,
3193 fl4.flowi4_tos, res.fi, 0);
3195 err = rt_fill_info(net, dst, src, rt, table_id, &fl4, skb,
3196 NETLINK_CB(in_skb).portid,
3204 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
3214 void ip_rt_multicast_event(struct in_device *in_dev)
3216 rt_cache_flush(dev_net(in_dev->dev));
3219 #ifdef CONFIG_SYSCTL
3220 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
3221 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
3222 static int ip_rt_gc_elasticity __read_mostly = 8;
3223 static int ip_min_valid_pmtu __read_mostly = IPV4_MIN_MTU;
3225 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
3226 void __user *buffer,
3227 size_t *lenp, loff_t *ppos)
3229 struct net *net = (struct net *)__ctl->extra1;
3232 rt_cache_flush(net);
3233 fnhe_genid_bump(net);
3240 static struct ctl_table ipv4_route_table[] = {
3242 .procname = "gc_thresh",
3243 .data = &ipv4_dst_ops.gc_thresh,
3244 .maxlen = sizeof(int),
3246 .proc_handler = proc_dointvec,
3249 .procname = "max_size",
3250 .data = &ip_rt_max_size,
3251 .maxlen = sizeof(int),
3253 .proc_handler = proc_dointvec,
3256 /* Deprecated. Use gc_min_interval_ms */
3258 .procname = "gc_min_interval",
3259 .data = &ip_rt_gc_min_interval,
3260 .maxlen = sizeof(int),
3262 .proc_handler = proc_dointvec_jiffies,
3265 .procname = "gc_min_interval_ms",
3266 .data = &ip_rt_gc_min_interval,
3267 .maxlen = sizeof(int),
3269 .proc_handler = proc_dointvec_ms_jiffies,
3272 .procname = "gc_timeout",
3273 .data = &ip_rt_gc_timeout,
3274 .maxlen = sizeof(int),
3276 .proc_handler = proc_dointvec_jiffies,
3279 .procname = "gc_interval",
3280 .data = &ip_rt_gc_interval,
3281 .maxlen = sizeof(int),
3283 .proc_handler = proc_dointvec_jiffies,
3286 .procname = "redirect_load",
3287 .data = &ip_rt_redirect_load,
3288 .maxlen = sizeof(int),
3290 .proc_handler = proc_dointvec,
3293 .procname = "redirect_number",
3294 .data = &ip_rt_redirect_number,
3295 .maxlen = sizeof(int),
3297 .proc_handler = proc_dointvec,
3300 .procname = "redirect_silence",
3301 .data = &ip_rt_redirect_silence,
3302 .maxlen = sizeof(int),
3304 .proc_handler = proc_dointvec,
3307 .procname = "error_cost",
3308 .data = &ip_rt_error_cost,
3309 .maxlen = sizeof(int),
3311 .proc_handler = proc_dointvec,
3314 .procname = "error_burst",
3315 .data = &ip_rt_error_burst,
3316 .maxlen = sizeof(int),
3318 .proc_handler = proc_dointvec,
3321 .procname = "gc_elasticity",
3322 .data = &ip_rt_gc_elasticity,
3323 .maxlen = sizeof(int),
3325 .proc_handler = proc_dointvec,
3328 .procname = "mtu_expires",
3329 .data = &ip_rt_mtu_expires,
3330 .maxlen = sizeof(int),
3332 .proc_handler = proc_dointvec_jiffies,
3335 .procname = "min_pmtu",
3336 .data = &ip_rt_min_pmtu,
3337 .maxlen = sizeof(int),
3339 .proc_handler = proc_dointvec_minmax,
3340 .extra1 = &ip_min_valid_pmtu,
3343 .procname = "min_adv_mss",
3344 .data = &ip_rt_min_advmss,
3345 .maxlen = sizeof(int),
3347 .proc_handler = proc_dointvec,
3352 static const char ipv4_route_flush_procname[] = "flush";
3354 static struct ctl_table ipv4_route_flush_table[] = {
3356 .procname = ipv4_route_flush_procname,
3357 .maxlen = sizeof(int),
3359 .proc_handler = ipv4_sysctl_rtcache_flush,
3364 static __net_init int sysctl_route_net_init(struct net *net)
3366 struct ctl_table *tbl;
3368 tbl = ipv4_route_flush_table;
3369 if (!net_eq(net, &init_net)) {
3370 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
3374 /* Don't export non-whitelisted sysctls to unprivileged users */
3375 if (net->user_ns != &init_user_ns) {
3376 if (tbl[0].procname != ipv4_route_flush_procname)
3377 tbl[0].procname = NULL;
3380 tbl[0].extra1 = net;
3382 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
3383 if (!net->ipv4.route_hdr)
3388 if (tbl != ipv4_route_flush_table)
3394 static __net_exit void sysctl_route_net_exit(struct net *net)
3396 struct ctl_table *tbl;
3398 tbl = net->ipv4.route_hdr->ctl_table_arg;
3399 unregister_net_sysctl_table(net->ipv4.route_hdr);
3400 BUG_ON(tbl == ipv4_route_flush_table);
3404 static __net_initdata struct pernet_operations sysctl_route_ops = {
3405 .init = sysctl_route_net_init,
3406 .exit = sysctl_route_net_exit,
3410 static __net_init int rt_genid_init(struct net *net)
3412 atomic_set(&net->ipv4.rt_genid, 0);
3413 atomic_set(&net->fnhe_genid, 0);
3414 atomic_set(&net->ipv4.dev_addr_genid, get_random_int());
3418 static __net_initdata struct pernet_operations rt_genid_ops = {
3419 .init = rt_genid_init,
3422 static int __net_init ipv4_inetpeer_init(struct net *net)
3424 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
3428 inet_peer_base_init(bp);
3429 net->ipv4.peers = bp;
3433 static void __net_exit ipv4_inetpeer_exit(struct net *net)
3435 struct inet_peer_base *bp = net->ipv4.peers;
3437 net->ipv4.peers = NULL;
3438 inetpeer_invalidate_tree(bp);
3442 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
3443 .init = ipv4_inetpeer_init,
3444 .exit = ipv4_inetpeer_exit,
3447 #ifdef CONFIG_IP_ROUTE_CLASSID
3448 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
3449 #endif /* CONFIG_IP_ROUTE_CLASSID */
3451 int __init ip_rt_init(void)
3455 ip_idents = kmalloc_array(IP_IDENTS_SZ, sizeof(*ip_idents),
3458 panic("IP: failed to allocate ip_idents\n");
3460 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
3462 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
3464 panic("IP: failed to allocate ip_tstamps\n");
3466 for_each_possible_cpu(cpu) {
3467 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
3469 INIT_LIST_HEAD(&ul->head);
3470 spin_lock_init(&ul->lock);
3472 #ifdef CONFIG_IP_ROUTE_CLASSID
3473 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
3475 panic("IP: failed to allocate ip_rt_acct\n");
3478 ipv4_dst_ops.kmem_cachep =
3479 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
3480 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3482 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
3484 if (dst_entries_init(&ipv4_dst_ops) < 0)
3485 panic("IP: failed to allocate ipv4_dst_ops counter\n");
3487 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
3488 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
3490 ipv4_dst_ops.gc_thresh = ~0;
3491 ip_rt_max_size = INT_MAX;
3496 if (ip_rt_proc_init())
3497 pr_err("Unable to create route proc files\n");
3502 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL,
3503 RTNL_FLAG_DOIT_UNLOCKED);
3505 #ifdef CONFIG_SYSCTL
3506 register_pernet_subsys(&sysctl_route_ops);
3508 register_pernet_subsys(&rt_genid_ops);
3509 register_pernet_subsys(&ipv4_inetpeer_ops);
3513 #ifdef CONFIG_SYSCTL
3515 * We really need to sanitize the damn ipv4 init order, then all
3516 * this nonsense will go away.
3518 void __init ip_static_sysctl_init(void)
3520 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);