2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * ROUTE - implementation of the IP router.
15 * Alan Cox : Verify area fixes.
16 * Alan Cox : cli() protects routing changes
17 * Rui Oliveira : ICMP routing table updates
19 * Linus Torvalds : Rewrote bits to be sensible
20 * Alan Cox : Added BSD route gw semantics
21 * Alan Cox : Super /proc >4K
22 * Alan Cox : MTU in route table
23 * Alan Cox : MSS actually. Also added the window
25 * Sam Lantinga : Fixed route matching in rt_del()
26 * Alan Cox : Routing cache support.
27 * Alan Cox : Removed compatibility cruft.
28 * Alan Cox : RTF_REJECT support.
29 * Alan Cox : TCP irtt support.
30 * Jonathan Naylor : Added Metric support.
31 * Miquel van Smoorenburg : BSD API fixes.
32 * Miquel van Smoorenburg : Metrics.
33 * Alan Cox : Use __u32 properly
34 * Alan Cox : Aligned routing errors more closely with BSD
35 * our system is still very different.
36 * Alan Cox : Faster /proc handling
37 * Alexey Kuznetsov : Massive rework to support tree based routing,
38 * routing caches and better behaviour.
40 * Olaf Erb : irtt wasn't being copied right.
41 * Bjorn Ekwall : Kerneld route support.
42 * Alan Cox : Multicast fixed (I hope)
43 * Pavel Krauz : Limited broadcast fixed
44 * Mike McLagan : Routing by source
45 * Alexey Kuznetsov : End of old history. Split to fib.c and
46 * route.c and rewritten from scratch.
47 * Andi Kleen : Load-limit warning messages.
48 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
49 * Vitaly E. Lavrov : Race condition in ip_route_input_slow.
50 * Tobias Ringstrom : Uninitialized res.type in ip_route_output_slow.
51 * Vladimir V. Ivanov : IP rule info (flowid) is really useful.
52 * Marc Boucher : routing by fwmark
53 * Robert Olsson : Added rt_cache statistics
54 * Arnaldo C. Melo : Convert proc stuff to seq_file
55 * Eric Dumazet : hashed spinlocks and rt_check_expire() fixes.
56 * Ilia Sotnikov : Ignore TOS on PMTUD and Redirect
57 * Ilia Sotnikov : Removed TOS from hash calculations
59 * This program is free software; you can redistribute it and/or
60 * modify it under the terms of the GNU General Public License
61 * as published by the Free Software Foundation; either version
62 * 2 of the License, or (at your option) any later version.
65 #define pr_fmt(fmt) "IPv4: " fmt
67 #include <linux/module.h>
68 #include <asm/uaccess.h>
69 #include <linux/bitops.h>
70 #include <linux/types.h>
71 #include <linux/kernel.h>
73 #include <linux/string.h>
74 #include <linux/socket.h>
75 #include <linux/sockios.h>
76 #include <linux/errno.h>
78 #include <linux/inet.h>
79 #include <linux/netdevice.h>
80 #include <linux/proc_fs.h>
81 #include <linux/init.h>
82 #include <linux/skbuff.h>
83 #include <linux/inetdevice.h>
84 #include <linux/igmp.h>
85 #include <linux/pkt_sched.h>
86 #include <linux/mroute.h>
87 #include <linux/netfilter_ipv4.h>
88 #include <linux/random.h>
89 #include <linux/rcupdate.h>
90 #include <linux/times.h>
91 #include <linux/slab.h>
92 #include <linux/jhash.h>
94 #include <net/dst_metadata.h>
95 #include <net/net_namespace.h>
96 #include <net/protocol.h>
98 #include <net/route.h>
99 #include <net/inetpeer.h>
100 #include <net/sock.h>
101 #include <net/ip_fib.h>
104 #include <net/icmp.h>
105 #include <net/xfrm.h>
106 #include <net/lwtunnel.h>
107 #include <net/netevent.h>
108 #include <net/rtnetlink.h>
110 #include <linux/sysctl.h>
111 #include <linux/kmemleak.h>
113 #include <net/secure_seq.h>
114 #include <net/ip_tunnels.h>
117 #define RT_FL_TOS(oldflp4) \
118 ((oldflp4)->flowi4_tos & (IPTOS_RT_MASK | RTO_ONLINK))
120 #define RT_GC_TIMEOUT (300*HZ)
122 static int ip_rt_max_size;
123 static int ip_rt_redirect_number __read_mostly = 9;
124 static int ip_rt_redirect_load __read_mostly = HZ / 50;
125 static int ip_rt_redirect_silence __read_mostly = ((HZ / 50) << (9 + 1));
126 static int ip_rt_error_cost __read_mostly = HZ;
127 static int ip_rt_error_burst __read_mostly = 5 * HZ;
128 static int ip_rt_mtu_expires __read_mostly = 10 * 60 * HZ;
129 static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
130 static int ip_rt_min_advmss __read_mostly = 256;
133 * Interface to generic destination cache.
136 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie);
137 static unsigned int ipv4_default_advmss(const struct dst_entry *dst);
138 static unsigned int ipv4_mtu(const struct dst_entry *dst);
139 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst);
140 static void ipv4_link_failure(struct sk_buff *skb);
141 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
142 struct sk_buff *skb, u32 mtu);
143 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk,
144 struct sk_buff *skb);
145 static void ipv4_dst_destroy(struct dst_entry *dst);
147 static u32 *ipv4_cow_metrics(struct dst_entry *dst, unsigned long old)
153 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
157 static struct dst_ops ipv4_dst_ops = {
159 .check = ipv4_dst_check,
160 .default_advmss = ipv4_default_advmss,
162 .cow_metrics = ipv4_cow_metrics,
163 .destroy = ipv4_dst_destroy,
164 .negative_advice = ipv4_negative_advice,
165 .link_failure = ipv4_link_failure,
166 .update_pmtu = ip_rt_update_pmtu,
167 .redirect = ip_do_redirect,
168 .local_out = __ip_local_out,
169 .neigh_lookup = ipv4_neigh_lookup,
172 #define ECN_OR_COST(class) TC_PRIO_##class
174 const __u8 ip_tos2prio[16] = {
176 ECN_OR_COST(BESTEFFORT),
178 ECN_OR_COST(BESTEFFORT),
184 ECN_OR_COST(INTERACTIVE),
186 ECN_OR_COST(INTERACTIVE),
187 TC_PRIO_INTERACTIVE_BULK,
188 ECN_OR_COST(INTERACTIVE_BULK),
189 TC_PRIO_INTERACTIVE_BULK,
190 ECN_OR_COST(INTERACTIVE_BULK)
192 EXPORT_SYMBOL(ip_tos2prio);
194 static DEFINE_PER_CPU(struct rt_cache_stat, rt_cache_stat);
195 #define RT_CACHE_STAT_INC(field) raw_cpu_inc(rt_cache_stat.field)
197 #ifdef CONFIG_PROC_FS
198 static void *rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
202 return SEQ_START_TOKEN;
205 static void *rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
211 static void rt_cache_seq_stop(struct seq_file *seq, void *v)
215 static int rt_cache_seq_show(struct seq_file *seq, void *v)
217 if (v == SEQ_START_TOKEN)
218 seq_printf(seq, "%-127s\n",
219 "Iface\tDestination\tGateway \tFlags\t\tRefCnt\tUse\t"
220 "Metric\tSource\t\tMTU\tWindow\tIRTT\tTOS\tHHRef\t"
225 static const struct seq_operations rt_cache_seq_ops = {
226 .start = rt_cache_seq_start,
227 .next = rt_cache_seq_next,
228 .stop = rt_cache_seq_stop,
229 .show = rt_cache_seq_show,
232 static int rt_cache_seq_open(struct inode *inode, struct file *file)
234 return seq_open(file, &rt_cache_seq_ops);
237 static const struct file_operations rt_cache_seq_fops = {
238 .owner = THIS_MODULE,
239 .open = rt_cache_seq_open,
242 .release = seq_release,
246 static void *rt_cpu_seq_start(struct seq_file *seq, loff_t *pos)
251 return SEQ_START_TOKEN;
253 for (cpu = *pos-1; cpu < nr_cpu_ids; ++cpu) {
254 if (!cpu_possible(cpu))
257 return &per_cpu(rt_cache_stat, cpu);
262 static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
266 for (cpu = *pos; cpu < nr_cpu_ids; ++cpu) {
267 if (!cpu_possible(cpu))
270 return &per_cpu(rt_cache_stat, cpu);
276 static void rt_cpu_seq_stop(struct seq_file *seq, void *v)
281 static int rt_cpu_seq_show(struct seq_file *seq, void *v)
283 struct rt_cache_stat *st = v;
285 if (v == SEQ_START_TOKEN) {
286 seq_printf(seq, "entries in_hit in_slow_tot in_slow_mc in_no_route in_brd in_martian_dst in_martian_src out_hit out_slow_tot out_slow_mc gc_total gc_ignored gc_goal_miss gc_dst_overflow in_hlist_search out_hlist_search\n");
290 seq_printf(seq,"%08x %08x %08x %08x %08x %08x %08x %08x "
291 " %08x %08x %08x %08x %08x %08x %08x %08x %08x \n",
292 dst_entries_get_slow(&ipv4_dst_ops),
305 0, /* st->gc_total */
306 0, /* st->gc_ignored */
307 0, /* st->gc_goal_miss */
308 0, /* st->gc_dst_overflow */
309 0, /* st->in_hlist_search */
310 0 /* st->out_hlist_search */
315 static const struct seq_operations rt_cpu_seq_ops = {
316 .start = rt_cpu_seq_start,
317 .next = rt_cpu_seq_next,
318 .stop = rt_cpu_seq_stop,
319 .show = rt_cpu_seq_show,
323 static int rt_cpu_seq_open(struct inode *inode, struct file *file)
325 return seq_open(file, &rt_cpu_seq_ops);
328 static const struct file_operations rt_cpu_seq_fops = {
329 .owner = THIS_MODULE,
330 .open = rt_cpu_seq_open,
333 .release = seq_release,
336 #ifdef CONFIG_IP_ROUTE_CLASSID
337 static int rt_acct_proc_show(struct seq_file *m, void *v)
339 struct ip_rt_acct *dst, *src;
342 dst = kcalloc(256, sizeof(struct ip_rt_acct), GFP_KERNEL);
346 for_each_possible_cpu(i) {
347 src = (struct ip_rt_acct *)per_cpu_ptr(ip_rt_acct, i);
348 for (j = 0; j < 256; j++) {
349 dst[j].o_bytes += src[j].o_bytes;
350 dst[j].o_packets += src[j].o_packets;
351 dst[j].i_bytes += src[j].i_bytes;
352 dst[j].i_packets += src[j].i_packets;
356 seq_write(m, dst, 256 * sizeof(struct ip_rt_acct));
361 static int rt_acct_proc_open(struct inode *inode, struct file *file)
363 return single_open(file, rt_acct_proc_show, NULL);
366 static const struct file_operations rt_acct_proc_fops = {
367 .owner = THIS_MODULE,
368 .open = rt_acct_proc_open,
371 .release = single_release,
375 static int __net_init ip_rt_do_proc_init(struct net *net)
377 struct proc_dir_entry *pde;
379 pde = proc_create("rt_cache", S_IRUGO, net->proc_net,
384 pde = proc_create("rt_cache", S_IRUGO,
385 net->proc_net_stat, &rt_cpu_seq_fops);
389 #ifdef CONFIG_IP_ROUTE_CLASSID
390 pde = proc_create("rt_acct", 0, net->proc_net, &rt_acct_proc_fops);
396 #ifdef CONFIG_IP_ROUTE_CLASSID
398 remove_proc_entry("rt_cache", net->proc_net_stat);
401 remove_proc_entry("rt_cache", net->proc_net);
406 static void __net_exit ip_rt_do_proc_exit(struct net *net)
408 remove_proc_entry("rt_cache", net->proc_net_stat);
409 remove_proc_entry("rt_cache", net->proc_net);
410 #ifdef CONFIG_IP_ROUTE_CLASSID
411 remove_proc_entry("rt_acct", net->proc_net);
415 static struct pernet_operations ip_rt_proc_ops __net_initdata = {
416 .init = ip_rt_do_proc_init,
417 .exit = ip_rt_do_proc_exit,
420 static int __init ip_rt_proc_init(void)
422 return register_pernet_subsys(&ip_rt_proc_ops);
426 static inline int ip_rt_proc_init(void)
430 #endif /* CONFIG_PROC_FS */
432 static inline bool rt_is_expired(const struct rtable *rth)
434 return rth->rt_genid != rt_genid_ipv4(dev_net(rth->dst.dev));
437 void rt_cache_flush(struct net *net)
439 rt_genid_bump_ipv4(net);
442 static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
446 struct net_device *dev = dst->dev;
447 const __be32 *pkey = daddr;
448 const struct rtable *rt;
451 rt = (const struct rtable *) dst;
453 pkey = (const __be32 *) &rt->rt_gateway;
455 pkey = &ip_hdr(skb)->daddr;
457 n = __ipv4_neigh_lookup(dev, *(__force u32 *)pkey);
460 return neigh_create(&arp_tbl, pkey, dev);
463 #define IP_IDENTS_SZ 2048u
465 static atomic_t *ip_idents __read_mostly;
466 static u32 *ip_tstamps __read_mostly;
468 /* In order to protect privacy, we add a perturbation to identifiers
469 * if one generator is seldom used. This makes hard for an attacker
470 * to infer how many packets were sent between two points in time.
472 u32 ip_idents_reserve(u32 hash, int segs)
474 u32 *p_tstamp = ip_tstamps + hash % IP_IDENTS_SZ;
475 atomic_t *p_id = ip_idents + hash % IP_IDENTS_SZ;
476 u32 old = ACCESS_ONCE(*p_tstamp);
477 u32 now = (u32)jiffies;
480 if (old != now && cmpxchg(p_tstamp, old, now) == old)
481 delta = prandom_u32_max(now - old);
483 return atomic_add_return(segs + delta, p_id) - segs;
485 EXPORT_SYMBOL(ip_idents_reserve);
487 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs)
489 static u32 ip_idents_hashrnd __read_mostly;
492 net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
494 hash = jhash_3words((__force u32)iph->daddr,
495 (__force u32)iph->saddr,
496 iph->protocol ^ net_hash_mix(net),
498 id = ip_idents_reserve(hash, segs);
501 EXPORT_SYMBOL(__ip_select_ident);
503 static void __build_flow_key(struct flowi4 *fl4, const struct sock *sk,
504 const struct iphdr *iph,
506 u8 prot, u32 mark, int flow_flags)
509 const struct inet_sock *inet = inet_sk(sk);
511 oif = sk->sk_bound_dev_if;
513 tos = RT_CONN_FLAGS(sk);
514 prot = inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol;
516 flowi4_init_output(fl4, oif, mark, tos,
517 RT_SCOPE_UNIVERSE, prot,
519 iph->daddr, iph->saddr, 0, 0);
522 static void build_skb_flow_key(struct flowi4 *fl4, const struct sk_buff *skb,
523 const struct sock *sk)
525 const struct iphdr *iph = ip_hdr(skb);
526 int oif = skb->dev->ifindex;
527 u8 tos = RT_TOS(iph->tos);
528 u8 prot = iph->protocol;
529 u32 mark = skb->mark;
531 __build_flow_key(fl4, sk, iph, oif, tos, prot, mark, 0);
534 static void build_sk_flow_key(struct flowi4 *fl4, const struct sock *sk)
536 const struct inet_sock *inet = inet_sk(sk);
537 const struct ip_options_rcu *inet_opt;
538 __be32 daddr = inet->inet_daddr;
541 inet_opt = rcu_dereference(inet->inet_opt);
542 if (inet_opt && inet_opt->opt.srr)
543 daddr = inet_opt->opt.faddr;
544 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
545 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
546 inet->hdrincl ? IPPROTO_RAW : sk->sk_protocol,
547 inet_sk_flowi_flags(sk),
548 daddr, inet->inet_saddr, 0, 0);
552 static void ip_rt_build_flow_key(struct flowi4 *fl4, const struct sock *sk,
553 const struct sk_buff *skb)
556 build_skb_flow_key(fl4, skb, sk);
558 build_sk_flow_key(fl4, sk);
561 static inline void rt_free(struct rtable *rt)
563 call_rcu(&rt->dst.rcu_head, dst_rcu_free);
566 static DEFINE_SPINLOCK(fnhe_lock);
568 static void fnhe_flush_routes(struct fib_nh_exception *fnhe)
572 rt = rcu_dereference(fnhe->fnhe_rth_input);
574 RCU_INIT_POINTER(fnhe->fnhe_rth_input, NULL);
577 rt = rcu_dereference(fnhe->fnhe_rth_output);
579 RCU_INIT_POINTER(fnhe->fnhe_rth_output, NULL);
584 static struct fib_nh_exception *fnhe_oldest(struct fnhe_hash_bucket *hash)
586 struct fib_nh_exception *fnhe, *oldest;
588 oldest = rcu_dereference(hash->chain);
589 for (fnhe = rcu_dereference(oldest->fnhe_next); fnhe;
590 fnhe = rcu_dereference(fnhe->fnhe_next)) {
591 if (time_before(fnhe->fnhe_stamp, oldest->fnhe_stamp))
594 fnhe_flush_routes(oldest);
598 static inline u32 fnhe_hashfun(__be32 daddr)
600 static u32 fnhe_hashrnd __read_mostly;
603 net_get_random_once(&fnhe_hashrnd, sizeof(fnhe_hashrnd));
604 hval = jhash_1word((__force u32) daddr, fnhe_hashrnd);
605 return hash_32(hval, FNHE_HASH_SHIFT);
608 static void fill_route_from_fnhe(struct rtable *rt, struct fib_nh_exception *fnhe)
610 rt->rt_pmtu = fnhe->fnhe_pmtu;
611 rt->dst.expires = fnhe->fnhe_expires;
614 rt->rt_flags |= RTCF_REDIRECTED;
615 rt->rt_gateway = fnhe->fnhe_gw;
616 rt->rt_uses_gateway = 1;
620 static void update_or_create_fnhe(struct fib_nh *nh, __be32 daddr, __be32 gw,
621 u32 pmtu, unsigned long expires)
623 struct fnhe_hash_bucket *hash;
624 struct fib_nh_exception *fnhe;
628 u32 hval = fnhe_hashfun(daddr);
630 spin_lock_bh(&fnhe_lock);
632 hash = rcu_dereference(nh->nh_exceptions);
634 hash = kzalloc(FNHE_HASH_SIZE * sizeof(*hash), GFP_ATOMIC);
637 rcu_assign_pointer(nh->nh_exceptions, hash);
643 for (fnhe = rcu_dereference(hash->chain); fnhe;
644 fnhe = rcu_dereference(fnhe->fnhe_next)) {
645 if (fnhe->fnhe_daddr == daddr)
654 fnhe->fnhe_pmtu = pmtu;
655 fnhe->fnhe_expires = max(1UL, expires);
657 /* Update all cached dsts too */
658 rt = rcu_dereference(fnhe->fnhe_rth_input);
660 fill_route_from_fnhe(rt, fnhe);
661 rt = rcu_dereference(fnhe->fnhe_rth_output);
663 fill_route_from_fnhe(rt, fnhe);
665 if (depth > FNHE_RECLAIM_DEPTH)
666 fnhe = fnhe_oldest(hash);
668 fnhe = kzalloc(sizeof(*fnhe), GFP_ATOMIC);
672 fnhe->fnhe_next = hash->chain;
673 rcu_assign_pointer(hash->chain, fnhe);
675 fnhe->fnhe_genid = fnhe_genid(dev_net(nh->nh_dev));
676 fnhe->fnhe_daddr = daddr;
678 fnhe->fnhe_pmtu = pmtu;
679 fnhe->fnhe_expires = expires;
681 /* Exception created; mark the cached routes for the nexthop
682 * stale, so anyone caching it rechecks if this exception
685 rt = rcu_dereference(nh->nh_rth_input);
687 rt->dst.obsolete = DST_OBSOLETE_KILL;
689 for_each_possible_cpu(i) {
690 struct rtable __rcu **prt;
691 prt = per_cpu_ptr(nh->nh_pcpu_rth_output, i);
692 rt = rcu_dereference(*prt);
694 rt->dst.obsolete = DST_OBSOLETE_KILL;
698 fnhe->fnhe_stamp = jiffies;
701 spin_unlock_bh(&fnhe_lock);
704 static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flowi4 *fl4,
707 __be32 new_gw = icmp_hdr(skb)->un.gateway;
708 __be32 old_gw = ip_hdr(skb)->saddr;
709 struct net_device *dev = skb->dev;
710 struct in_device *in_dev;
711 struct fib_result res;
715 switch (icmp_hdr(skb)->code & 7) {
717 case ICMP_REDIR_NETTOS:
718 case ICMP_REDIR_HOST:
719 case ICMP_REDIR_HOSTTOS:
726 if (rt->rt_gateway != old_gw)
729 in_dev = __in_dev_get_rcu(dev);
734 if (new_gw == old_gw || !IN_DEV_RX_REDIRECTS(in_dev) ||
735 ipv4_is_multicast(new_gw) || ipv4_is_lbcast(new_gw) ||
736 ipv4_is_zeronet(new_gw))
737 goto reject_redirect;
739 if (!IN_DEV_SHARED_MEDIA(in_dev)) {
740 if (!inet_addr_onlink(in_dev, new_gw, old_gw))
741 goto reject_redirect;
742 if (IN_DEV_SEC_REDIRECTS(in_dev) && ip_fib_check_default(new_gw, dev))
743 goto reject_redirect;
745 if (inet_addr_type(net, new_gw) != RTN_UNICAST)
746 goto reject_redirect;
749 n = ipv4_neigh_lookup(&rt->dst, NULL, &new_gw);
751 if (!(n->nud_state & NUD_VALID)) {
752 neigh_event_send(n, NULL);
754 if (fib_lookup(net, fl4, &res, 0) == 0) {
755 struct fib_nh *nh = &FIB_RES_NH(res);
757 update_or_create_fnhe(nh, fl4->daddr, new_gw,
761 rt->dst.obsolete = DST_OBSOLETE_KILL;
762 call_netevent_notifiers(NETEVENT_NEIGH_UPDATE, n);
769 #ifdef CONFIG_IP_ROUTE_VERBOSE
770 if (IN_DEV_LOG_MARTIANS(in_dev)) {
771 const struct iphdr *iph = (const struct iphdr *) skb->data;
772 __be32 daddr = iph->daddr;
773 __be32 saddr = iph->saddr;
775 net_info_ratelimited("Redirect from %pI4 on %s about %pI4 ignored\n"
776 " Advised path = %pI4 -> %pI4\n",
777 &old_gw, dev->name, &new_gw,
784 static void ip_do_redirect(struct dst_entry *dst, struct sock *sk, struct sk_buff *skb)
788 const struct iphdr *iph = (const struct iphdr *) skb->data;
789 int oif = skb->dev->ifindex;
790 u8 tos = RT_TOS(iph->tos);
791 u8 prot = iph->protocol;
792 u32 mark = skb->mark;
794 rt = (struct rtable *) dst;
796 __build_flow_key(&fl4, sk, iph, oif, tos, prot, mark, 0);
797 __ip_do_redirect(rt, skb, &fl4, true);
800 static struct dst_entry *ipv4_negative_advice(struct dst_entry *dst)
802 struct rtable *rt = (struct rtable *)dst;
803 struct dst_entry *ret = dst;
806 if (dst->obsolete > 0) {
809 } else if ((rt->rt_flags & RTCF_REDIRECTED) ||
820 * 1. The first ip_rt_redirect_number redirects are sent
821 * with exponential backoff, then we stop sending them at all,
822 * assuming that the host ignores our redirects.
823 * 2. If we did not see packets requiring redirects
824 * during ip_rt_redirect_silence, we assume that the host
825 * forgot redirected route and start to send redirects again.
827 * This algorithm is much cheaper and more intelligent than dumb load limiting
830 * NOTE. Do not forget to inhibit load limiting for redirects (redundant)
831 * and "frag. need" (breaks PMTU discovery) in icmp.c.
834 void ip_rt_send_redirect(struct sk_buff *skb)
836 struct rtable *rt = skb_rtable(skb);
837 struct in_device *in_dev;
838 struct inet_peer *peer;
843 in_dev = __in_dev_get_rcu(rt->dst.dev);
844 if (!in_dev || !IN_DEV_TX_REDIRECTS(in_dev)) {
848 log_martians = IN_DEV_LOG_MARTIANS(in_dev);
851 net = dev_net(rt->dst.dev);
852 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
854 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST,
855 rt_nexthop(rt, ip_hdr(skb)->daddr));
859 /* No redirected packets during ip_rt_redirect_silence;
860 * reset the algorithm.
862 if (time_after(jiffies, peer->rate_last + ip_rt_redirect_silence))
863 peer->rate_tokens = 0;
865 /* Too many ignored redirects; do not send anything
866 * set dst.rate_last to the last seen redirected packet.
868 if (peer->rate_tokens >= ip_rt_redirect_number) {
869 peer->rate_last = jiffies;
873 /* Check for load limit; set rate_last to the latest sent
876 if (peer->rate_tokens == 0 ||
879 (ip_rt_redirect_load << peer->rate_tokens)))) {
880 __be32 gw = rt_nexthop(rt, ip_hdr(skb)->daddr);
882 icmp_send(skb, ICMP_REDIRECT, ICMP_REDIR_HOST, gw);
883 peer->rate_last = jiffies;
885 #ifdef CONFIG_IP_ROUTE_VERBOSE
887 peer->rate_tokens == ip_rt_redirect_number)
888 net_warn_ratelimited("host %pI4/if%d ignores redirects for %pI4 to %pI4\n",
889 &ip_hdr(skb)->saddr, inet_iif(skb),
890 &ip_hdr(skb)->daddr, &gw);
897 static int ip_error(struct sk_buff *skb)
899 struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
900 struct rtable *rt = skb_rtable(skb);
901 struct inet_peer *peer;
907 /* IP on this device is disabled. */
911 net = dev_net(rt->dst.dev);
912 if (!IN_DEV_FORWARD(in_dev)) {
913 switch (rt->dst.error) {
915 IP_INC_STATS_BH(net, IPSTATS_MIB_INADDRERRORS);
919 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
925 switch (rt->dst.error) {
930 code = ICMP_HOST_UNREACH;
933 code = ICMP_NET_UNREACH;
934 IP_INC_STATS_BH(net, IPSTATS_MIB_INNOROUTES);
937 code = ICMP_PKT_FILTERED;
941 peer = inet_getpeer_v4(net->ipv4.peers, ip_hdr(skb)->saddr, 1);
946 peer->rate_tokens += now - peer->rate_last;
947 if (peer->rate_tokens > ip_rt_error_burst)
948 peer->rate_tokens = ip_rt_error_burst;
949 peer->rate_last = now;
950 if (peer->rate_tokens >= ip_rt_error_cost)
951 peer->rate_tokens -= ip_rt_error_cost;
957 icmp_send(skb, ICMP_DEST_UNREACH, code, 0);
963 static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
965 struct dst_entry *dst = &rt->dst;
966 struct fib_result res;
968 if (dst_metric_locked(dst, RTAX_MTU))
971 if (ipv4_mtu(dst) < mtu)
974 if (mtu < ip_rt_min_pmtu)
975 mtu = ip_rt_min_pmtu;
977 if (rt->rt_pmtu == mtu &&
978 time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
982 if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) {
983 struct fib_nh *nh = &FIB_RES_NH(res);
985 update_or_create_fnhe(nh, fl4->daddr, 0, mtu,
986 jiffies + ip_rt_mtu_expires);
991 static void ip_rt_update_pmtu(struct dst_entry *dst, struct sock *sk,
992 struct sk_buff *skb, u32 mtu)
994 struct rtable *rt = (struct rtable *) dst;
997 ip_rt_build_flow_key(&fl4, sk, skb);
998 __ip_rt_update_pmtu(rt, &fl4, mtu);
1001 void ipv4_update_pmtu(struct sk_buff *skb, struct net *net, u32 mtu,
1002 int oif, u32 mark, u8 protocol, int flow_flags)
1004 const struct iphdr *iph = (const struct iphdr *) skb->data;
1009 mark = IP4_REPLY_MARK(net, skb->mark);
1011 __build_flow_key(&fl4, NULL, iph, oif,
1012 RT_TOS(iph->tos), protocol, mark, flow_flags);
1013 rt = __ip_route_output_key(net, &fl4);
1015 __ip_rt_update_pmtu(rt, &fl4, mtu);
1019 EXPORT_SYMBOL_GPL(ipv4_update_pmtu);
1021 static void __ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1023 const struct iphdr *iph = (const struct iphdr *) skb->data;
1027 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1029 if (!fl4.flowi4_mark)
1030 fl4.flowi4_mark = IP4_REPLY_MARK(sock_net(sk), skb->mark);
1032 rt = __ip_route_output_key(sock_net(sk), &fl4);
1034 __ip_rt_update_pmtu(rt, &fl4, mtu);
1039 void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1041 const struct iphdr *iph = (const struct iphdr *) skb->data;
1044 struct dst_entry *odst = NULL;
1049 if (!ip_sk_accept_pmtu(sk))
1052 odst = sk_dst_get(sk);
1054 if (sock_owned_by_user(sk) || !odst) {
1055 __ipv4_sk_update_pmtu(skb, sk, mtu);
1059 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1061 rt = (struct rtable *)odst;
1062 if (odst->obsolete && !odst->ops->check(odst, 0)) {
1063 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1070 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1072 if (!dst_check(&rt->dst, 0)) {
1074 dst_release(&rt->dst);
1076 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1084 sk_dst_set(sk, &rt->dst);
1090 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1092 void ipv4_redirect(struct sk_buff *skb, struct net *net,
1093 int oif, u32 mark, u8 protocol, int flow_flags)
1095 const struct iphdr *iph = (const struct iphdr *) skb->data;
1099 __build_flow_key(&fl4, NULL, iph, oif,
1100 RT_TOS(iph->tos), protocol, mark, flow_flags);
1101 rt = __ip_route_output_key(net, &fl4);
1103 __ip_do_redirect(rt, skb, &fl4, false);
1107 EXPORT_SYMBOL_GPL(ipv4_redirect);
1109 void ipv4_sk_redirect(struct sk_buff *skb, struct sock *sk)
1111 const struct iphdr *iph = (const struct iphdr *) skb->data;
1115 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1116 rt = __ip_route_output_key(sock_net(sk), &fl4);
1118 __ip_do_redirect(rt, skb, &fl4, false);
1122 EXPORT_SYMBOL_GPL(ipv4_sk_redirect);
1124 static struct dst_entry *ipv4_dst_check(struct dst_entry *dst, u32 cookie)
1126 struct rtable *rt = (struct rtable *) dst;
1128 /* All IPV4 dsts are created with ->obsolete set to the value
1129 * DST_OBSOLETE_FORCE_CHK which forces validation calls down
1130 * into this function always.
1132 * When a PMTU/redirect information update invalidates a route,
1133 * this is indicated by setting obsolete to DST_OBSOLETE_KILL or
1134 * DST_OBSOLETE_DEAD by dst_free().
1136 if (dst->obsolete != DST_OBSOLETE_FORCE_CHK || rt_is_expired(rt))
1141 static void ipv4_link_failure(struct sk_buff *skb)
1145 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
1147 rt = skb_rtable(skb);
1149 dst_set_expires(&rt->dst, 0);
1152 static int ip_rt_bug(struct sock *sk, struct sk_buff *skb)
1154 pr_debug("%s: %pI4 -> %pI4, %s\n",
1155 __func__, &ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr,
1156 skb->dev ? skb->dev->name : "?");
1163 We do not cache source address of outgoing interface,
1164 because it is used only by IP RR, TS and SRR options,
1165 so that it out of fast path.
1167 BTW remember: "addr" is allowed to be not aligned
1171 void ip_rt_get_source(u8 *addr, struct sk_buff *skb, struct rtable *rt)
1175 if (rt_is_output_route(rt))
1176 src = ip_hdr(skb)->saddr;
1178 struct fib_result res;
1184 memset(&fl4, 0, sizeof(fl4));
1185 fl4.daddr = iph->daddr;
1186 fl4.saddr = iph->saddr;
1187 fl4.flowi4_tos = RT_TOS(iph->tos);
1188 fl4.flowi4_oif = rt->dst.dev->ifindex;
1189 fl4.flowi4_iif = skb->dev->ifindex;
1190 fl4.flowi4_mark = skb->mark;
1193 if (fib_lookup(dev_net(rt->dst.dev), &fl4, &res, 0) == 0)
1194 src = FIB_RES_PREFSRC(dev_net(rt->dst.dev), res);
1196 src = inet_select_addr(rt->dst.dev,
1197 rt_nexthop(rt, iph->daddr),
1201 memcpy(addr, &src, 4);
1204 #ifdef CONFIG_IP_ROUTE_CLASSID
1205 static void set_class_tag(struct rtable *rt, u32 tag)
1207 if (!(rt->dst.tclassid & 0xFFFF))
1208 rt->dst.tclassid |= tag & 0xFFFF;
1209 if (!(rt->dst.tclassid & 0xFFFF0000))
1210 rt->dst.tclassid |= tag & 0xFFFF0000;
1214 static unsigned int ipv4_default_advmss(const struct dst_entry *dst)
1216 unsigned int advmss = dst_metric_raw(dst, RTAX_ADVMSS);
1219 advmss = max_t(unsigned int, dst->dev->mtu - 40,
1221 if (advmss > 65535 - 40)
1222 advmss = 65535 - 40;
1227 static unsigned int ipv4_mtu(const struct dst_entry *dst)
1229 const struct rtable *rt = (const struct rtable *) dst;
1230 unsigned int mtu = rt->rt_pmtu;
1232 if (!mtu || time_after_eq(jiffies, rt->dst.expires))
1233 mtu = dst_metric_raw(dst, RTAX_MTU);
1238 mtu = dst->dev->mtu;
1240 if (unlikely(dst_metric_locked(dst, RTAX_MTU))) {
1241 if (rt->rt_uses_gateway && mtu > 576)
1245 return min_t(unsigned int, mtu, IP_MAX_MTU);
1248 static struct fib_nh_exception *find_exception(struct fib_nh *nh, __be32 daddr)
1250 struct fnhe_hash_bucket *hash = rcu_dereference(nh->nh_exceptions);
1251 struct fib_nh_exception *fnhe;
1257 hval = fnhe_hashfun(daddr);
1259 for (fnhe = rcu_dereference(hash[hval].chain); fnhe;
1260 fnhe = rcu_dereference(fnhe->fnhe_next)) {
1261 if (fnhe->fnhe_daddr == daddr)
1267 static bool rt_bind_exception(struct rtable *rt, struct fib_nh_exception *fnhe,
1272 spin_lock_bh(&fnhe_lock);
1274 if (daddr == fnhe->fnhe_daddr) {
1275 struct rtable __rcu **porig;
1276 struct rtable *orig;
1277 int genid = fnhe_genid(dev_net(rt->dst.dev));
1279 if (rt_is_input_route(rt))
1280 porig = &fnhe->fnhe_rth_input;
1282 porig = &fnhe->fnhe_rth_output;
1283 orig = rcu_dereference(*porig);
1285 if (fnhe->fnhe_genid != genid) {
1286 fnhe->fnhe_genid = genid;
1288 fnhe->fnhe_pmtu = 0;
1289 fnhe->fnhe_expires = 0;
1290 fnhe_flush_routes(fnhe);
1293 fill_route_from_fnhe(rt, fnhe);
1294 if (!rt->rt_gateway)
1295 rt->rt_gateway = daddr;
1297 if (!(rt->dst.flags & DST_NOCACHE)) {
1298 rcu_assign_pointer(*porig, rt);
1304 fnhe->fnhe_stamp = jiffies;
1306 spin_unlock_bh(&fnhe_lock);
1311 static bool rt_cache_route(struct fib_nh *nh, struct rtable *rt)
1313 struct rtable *orig, *prev, **p;
1316 if (rt_is_input_route(rt)) {
1317 p = (struct rtable **)&nh->nh_rth_input;
1319 p = (struct rtable **)raw_cpu_ptr(nh->nh_pcpu_rth_output);
1323 prev = cmpxchg(p, orig, rt);
1333 struct uncached_list {
1335 struct list_head head;
1338 static DEFINE_PER_CPU_ALIGNED(struct uncached_list, rt_uncached_list);
1340 static void rt_add_uncached_list(struct rtable *rt)
1342 struct uncached_list *ul = raw_cpu_ptr(&rt_uncached_list);
1344 rt->rt_uncached_list = ul;
1346 spin_lock_bh(&ul->lock);
1347 list_add_tail(&rt->rt_uncached, &ul->head);
1348 spin_unlock_bh(&ul->lock);
1351 static void ipv4_dst_destroy(struct dst_entry *dst)
1353 struct rtable *rt = (struct rtable *) dst;
1355 if (!list_empty(&rt->rt_uncached)) {
1356 struct uncached_list *ul = rt->rt_uncached_list;
1358 spin_lock_bh(&ul->lock);
1359 list_del(&rt->rt_uncached);
1360 spin_unlock_bh(&ul->lock);
1364 void rt_flush_dev(struct net_device *dev)
1366 struct net *net = dev_net(dev);
1370 for_each_possible_cpu(cpu) {
1371 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
1373 spin_lock_bh(&ul->lock);
1374 list_for_each_entry(rt, &ul->head, rt_uncached) {
1375 if (rt->dst.dev != dev)
1377 rt->dst.dev = net->loopback_dev;
1378 dev_hold(rt->dst.dev);
1381 spin_unlock_bh(&ul->lock);
1385 static bool rt_cache_valid(const struct rtable *rt)
1388 rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
1392 static void rt_set_nexthop(struct rtable *rt, __be32 daddr,
1393 const struct fib_result *res,
1394 struct fib_nh_exception *fnhe,
1395 struct fib_info *fi, u16 type, u32 itag)
1397 bool cached = false;
1400 struct fib_nh *nh = &FIB_RES_NH(*res);
1402 if (nh->nh_gw && nh->nh_scope == RT_SCOPE_LINK) {
1403 rt->rt_gateway = nh->nh_gw;
1404 rt->rt_uses_gateway = 1;
1406 dst_init_metrics(&rt->dst, fi->fib_metrics, true);
1407 #ifdef CONFIG_IP_ROUTE_CLASSID
1408 rt->dst.tclassid = nh->nh_tclassid;
1410 rt->dst.lwtstate = lwtstate_get(nh->nh_lwtstate);
1412 cached = rt_bind_exception(rt, fnhe, daddr);
1413 else if (!(rt->dst.flags & DST_NOCACHE))
1414 cached = rt_cache_route(nh, rt);
1415 if (unlikely(!cached)) {
1416 /* Routes we intend to cache in nexthop exception or
1417 * FIB nexthop have the DST_NOCACHE bit clear.
1418 * However, if we are unsuccessful at storing this
1419 * route into the cache we really need to set it.
1421 rt->dst.flags |= DST_NOCACHE;
1422 if (!rt->rt_gateway)
1423 rt->rt_gateway = daddr;
1424 rt_add_uncached_list(rt);
1427 rt_add_uncached_list(rt);
1429 #ifdef CONFIG_IP_ROUTE_CLASSID
1430 #ifdef CONFIG_IP_MULTIPLE_TABLES
1431 set_class_tag(rt, res->tclassid);
1433 set_class_tag(rt, itag);
1437 static struct rtable *rt_dst_alloc(struct net_device *dev,
1438 bool nopolicy, bool noxfrm, bool will_cache)
1440 return dst_alloc(&ipv4_dst_ops, dev, 1, DST_OBSOLETE_FORCE_CHK,
1441 (will_cache ? 0 : (DST_HOST | DST_NOCACHE)) |
1442 (nopolicy ? DST_NOPOLICY : 0) |
1443 (noxfrm ? DST_NOXFRM : 0));
1446 /* called in rcu_read_lock() section */
1447 static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1448 u8 tos, struct net_device *dev, int our)
1451 struct in_device *in_dev = __in_dev_get_rcu(dev);
1455 /* Primary sanity checks. */
1460 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr) ||
1461 skb->protocol != htons(ETH_P_IP))
1464 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1465 if (ipv4_is_loopback(saddr))
1468 if (ipv4_is_zeronet(saddr)) {
1469 if (!ipv4_is_local_multicast(daddr))
1472 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1477 rth = rt_dst_alloc(dev_net(dev)->loopback_dev,
1478 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, false);
1482 #ifdef CONFIG_IP_ROUTE_CLASSID
1483 rth->dst.tclassid = itag;
1485 rth->dst.output = ip_rt_bug;
1487 rth->rt_genid = rt_genid_ipv4(dev_net(dev));
1488 rth->rt_flags = RTCF_MULTICAST;
1489 rth->rt_type = RTN_MULTICAST;
1490 rth->rt_is_input= 1;
1493 rth->rt_gateway = 0;
1494 rth->rt_uses_gateway = 0;
1495 INIT_LIST_HEAD(&rth->rt_uncached);
1497 rth->dst.input= ip_local_deliver;
1498 rth->rt_flags |= RTCF_LOCAL;
1501 #ifdef CONFIG_IP_MROUTE
1502 if (!ipv4_is_local_multicast(daddr) && IN_DEV_MFORWARD(in_dev))
1503 rth->dst.input = ip_mr_input;
1505 RT_CACHE_STAT_INC(in_slow_mc);
1507 skb_dst_set(skb, &rth->dst);
1519 static void ip_handle_martian_source(struct net_device *dev,
1520 struct in_device *in_dev,
1521 struct sk_buff *skb,
1525 RT_CACHE_STAT_INC(in_martian_src);
1526 #ifdef CONFIG_IP_ROUTE_VERBOSE
1527 if (IN_DEV_LOG_MARTIANS(in_dev) && net_ratelimit()) {
1529 * RFC1812 recommendation, if source is martian,
1530 * the only hint is MAC header.
1532 pr_warn("martian source %pI4 from %pI4, on dev %s\n",
1533 &daddr, &saddr, dev->name);
1534 if (dev->hard_header_len && skb_mac_header_was_set(skb)) {
1535 print_hex_dump(KERN_WARNING, "ll header: ",
1536 DUMP_PREFIX_OFFSET, 16, 1,
1537 skb_mac_header(skb),
1538 dev->hard_header_len, true);
1544 /* called in rcu_read_lock() section */
1545 static int __mkroute_input(struct sk_buff *skb,
1546 const struct fib_result *res,
1547 struct in_device *in_dev,
1548 __be32 daddr, __be32 saddr, u32 tos)
1550 struct fib_nh_exception *fnhe;
1553 struct in_device *out_dev;
1557 /* get a working reference to the output device */
1558 out_dev = __in_dev_get_rcu(FIB_RES_DEV(*res));
1560 net_crit_ratelimited("Bug in ip_route_input_slow(). Please report.\n");
1564 err = fib_validate_source(skb, saddr, daddr, tos, FIB_RES_OIF(*res),
1565 in_dev->dev, in_dev, &itag);
1567 ip_handle_martian_source(in_dev->dev, in_dev, skb, daddr,
1573 do_cache = res->fi && !itag;
1574 if (out_dev == in_dev && err && IN_DEV_TX_REDIRECTS(out_dev) &&
1575 skb->protocol == htons(ETH_P_IP) &&
1576 (IN_DEV_SHARED_MEDIA(out_dev) ||
1577 inet_addr_onlink(out_dev, saddr, FIB_RES_GW(*res))))
1578 IPCB(skb)->flags |= IPSKB_DOREDIRECT;
1580 if (skb->protocol != htons(ETH_P_IP)) {
1581 /* Not IP (i.e. ARP). Do not create route, if it is
1582 * invalid for proxy arp. DNAT routes are always valid.
1584 * Proxy arp feature have been extended to allow, ARP
1585 * replies back to the same interface, to support
1586 * Private VLAN switch technologies. See arp.c.
1588 if (out_dev == in_dev &&
1589 IN_DEV_PROXY_ARP_PVLAN(in_dev) == 0) {
1595 fnhe = find_exception(&FIB_RES_NH(*res), daddr);
1598 rth = rcu_dereference(fnhe->fnhe_rth_input);
1600 rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
1602 if (rt_cache_valid(rth)) {
1603 skb_dst_set_noref(skb, &rth->dst);
1608 rth = rt_dst_alloc(out_dev->dev,
1609 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1610 IN_DEV_CONF_GET(out_dev, NOXFRM), do_cache);
1616 rth->rt_genid = rt_genid_ipv4(dev_net(rth->dst.dev));
1618 rth->rt_type = res->type;
1619 rth->rt_is_input = 1;
1622 rth->rt_gateway = 0;
1623 rth->rt_uses_gateway = 0;
1624 INIT_LIST_HEAD(&rth->rt_uncached);
1625 RT_CACHE_STAT_INC(in_slow_tot);
1627 rth->dst.input = ip_forward;
1628 rth->dst.output = ip_output;
1630 rt_set_nexthop(rth, daddr, res, fnhe, res->fi, res->type, itag);
1631 if (lwtunnel_output_redirect(rth->dst.lwtstate)) {
1632 rth->dst.lwtstate->orig_output = rth->dst.output;
1633 rth->dst.output = lwtunnel_output;
1635 if (lwtunnel_input_redirect(rth->dst.lwtstate)) {
1636 rth->dst.lwtstate->orig_input = rth->dst.input;
1637 rth->dst.input = lwtunnel_input;
1639 skb_dst_set(skb, &rth->dst);
1646 static int ip_mkroute_input(struct sk_buff *skb,
1647 struct fib_result *res,
1648 const struct flowi4 *fl4,
1649 struct in_device *in_dev,
1650 __be32 daddr, __be32 saddr, u32 tos)
1652 #ifdef CONFIG_IP_ROUTE_MULTIPATH
1653 if (res->fi && res->fi->fib_nhs > 1)
1654 fib_select_multipath(res);
1657 /* create a routing cache entry */
1658 return __mkroute_input(skb, res, in_dev, daddr, saddr, tos);
1662 * NOTE. We drop all the packets that has local source
1663 * addresses, because every properly looped back packet
1664 * must have correct destination already attached by output routine.
1666 * Such approach solves two big problems:
1667 * 1. Not simplex devices are handled properly.
1668 * 2. IP spoofing attempts are filtered with 100% of guarantee.
1669 * called with rcu_read_lock()
1672 static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1673 u8 tos, struct net_device *dev)
1675 struct fib_result res;
1676 struct in_device *in_dev = __in_dev_get_rcu(dev);
1677 struct ip_tunnel_info *tun_info;
1679 unsigned int flags = 0;
1683 struct net *net = dev_net(dev);
1686 /* IP on this device is disabled. */
1691 /* Check for the most weird martians, which can be not detected
1695 tun_info = skb_tunnel_info(skb);
1696 if (tun_info && tun_info->mode == IP_TUNNEL_INFO_RX)
1697 fl4.flowi4_tun_key.tun_id = tun_info->key.tun_id;
1699 fl4.flowi4_tun_key.tun_id = 0;
1702 if (ipv4_is_multicast(saddr) || ipv4_is_lbcast(saddr))
1703 goto martian_source;
1706 if (ipv4_is_lbcast(daddr) || (saddr == 0 && daddr == 0))
1709 /* Accept zero addresses only to limited broadcast;
1710 * I even do not know to fix it or not. Waiting for complains :-)
1712 if (ipv4_is_zeronet(saddr))
1713 goto martian_source;
1715 if (ipv4_is_zeronet(daddr))
1716 goto martian_destination;
1718 /* Following code try to avoid calling IN_DEV_NET_ROUTE_LOCALNET(),
1719 * and call it once if daddr or/and saddr are loopback addresses
1721 if (ipv4_is_loopback(daddr)) {
1722 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1723 goto martian_destination;
1724 } else if (ipv4_is_loopback(saddr)) {
1725 if (!IN_DEV_NET_ROUTE_LOCALNET(in_dev, net))
1726 goto martian_source;
1730 * Now we are ready to route packet.
1733 fl4.flowi4_iif = vrf_master_ifindex_rcu(dev) ? : dev->ifindex;
1734 fl4.flowi4_mark = skb->mark;
1735 fl4.flowi4_tos = tos;
1736 fl4.flowi4_scope = RT_SCOPE_UNIVERSE;
1739 err = fib_lookup(net, &fl4, &res, 0);
1741 if (!IN_DEV_FORWARD(in_dev))
1742 err = -EHOSTUNREACH;
1746 if (res.type == RTN_BROADCAST)
1749 if (res.type == RTN_LOCAL) {
1750 err = fib_validate_source(skb, saddr, daddr, tos,
1751 0, dev, in_dev, &itag);
1753 goto martian_source_keep_err;
1757 if (!IN_DEV_FORWARD(in_dev)) {
1758 err = -EHOSTUNREACH;
1761 if (res.type != RTN_UNICAST)
1762 goto martian_destination;
1764 err = ip_mkroute_input(skb, &res, &fl4, in_dev, daddr, saddr, tos);
1768 if (skb->protocol != htons(ETH_P_IP))
1771 if (!ipv4_is_zeronet(saddr)) {
1772 err = fib_validate_source(skb, saddr, 0, tos, 0, dev,
1775 goto martian_source_keep_err;
1777 flags |= RTCF_BROADCAST;
1778 res.type = RTN_BROADCAST;
1779 RT_CACHE_STAT_INC(in_brd);
1785 rth = rcu_dereference(FIB_RES_NH(res).nh_rth_input);
1786 if (rt_cache_valid(rth)) {
1787 skb_dst_set_noref(skb, &rth->dst);
1795 rth = rt_dst_alloc(net->loopback_dev,
1796 IN_DEV_CONF_GET(in_dev, NOPOLICY), false, do_cache);
1800 rth->dst.input= ip_local_deliver;
1801 rth->dst.output= ip_rt_bug;
1802 #ifdef CONFIG_IP_ROUTE_CLASSID
1803 rth->dst.tclassid = itag;
1806 rth->rt_genid = rt_genid_ipv4(net);
1807 rth->rt_flags = flags|RTCF_LOCAL;
1808 rth->rt_type = res.type;
1809 rth->rt_is_input = 1;
1812 rth->rt_gateway = 0;
1813 rth->rt_uses_gateway = 0;
1814 INIT_LIST_HEAD(&rth->rt_uncached);
1816 RT_CACHE_STAT_INC(in_slow_tot);
1817 if (res.type == RTN_UNREACHABLE) {
1818 rth->dst.input= ip_error;
1819 rth->dst.error= -err;
1820 rth->rt_flags &= ~RTCF_LOCAL;
1823 if (unlikely(!rt_cache_route(&FIB_RES_NH(res), rth))) {
1824 rth->dst.flags |= DST_NOCACHE;
1825 rt_add_uncached_list(rth);
1828 skb_dst_set(skb, &rth->dst);
1833 RT_CACHE_STAT_INC(in_no_route);
1834 res.type = RTN_UNREACHABLE;
1839 * Do not cache martian addresses: they should be logged (RFC1812)
1841 martian_destination:
1842 RT_CACHE_STAT_INC(in_martian_dst);
1843 #ifdef CONFIG_IP_ROUTE_VERBOSE
1844 if (IN_DEV_LOG_MARTIANS(in_dev))
1845 net_warn_ratelimited("martian destination %pI4 from %pI4, dev %s\n",
1846 &daddr, &saddr, dev->name);
1859 martian_source_keep_err:
1860 ip_handle_martian_source(dev, in_dev, skb, daddr, saddr);
1864 int ip_route_input_noref(struct sk_buff *skb, __be32 daddr, __be32 saddr,
1865 u8 tos, struct net_device *dev)
1871 /* Multicast recognition logic is moved from route cache to here.
1872 The problem was that too many Ethernet cards have broken/missing
1873 hardware multicast filters :-( As result the host on multicasting
1874 network acquires a lot of useless route cache entries, sort of
1875 SDR messages from all the world. Now we try to get rid of them.
1876 Really, provided software IP multicast filter is organized
1877 reasonably (at least, hashed), it does not result in a slowdown
1878 comparing with route cache reject entries.
1879 Note, that multicast routers are not affected, because
1880 route cache entry is created eventually.
1882 if (ipv4_is_multicast(daddr)) {
1883 struct in_device *in_dev = __in_dev_get_rcu(dev);
1886 int our = ip_check_mc_rcu(in_dev, daddr, saddr,
1887 ip_hdr(skb)->protocol);
1889 #ifdef CONFIG_IP_MROUTE
1891 (!ipv4_is_local_multicast(daddr) &&
1892 IN_DEV_MFORWARD(in_dev))
1895 int res = ip_route_input_mc(skb, daddr, saddr,
1904 res = ip_route_input_slow(skb, daddr, saddr, tos, dev);
1908 EXPORT_SYMBOL(ip_route_input_noref);
1910 /* called with rcu_read_lock() */
1911 static struct rtable *__mkroute_output(const struct fib_result *res,
1912 const struct flowi4 *fl4, int orig_oif,
1913 struct net_device *dev_out,
1916 struct fib_info *fi = res->fi;
1917 struct fib_nh_exception *fnhe;
1918 struct in_device *in_dev;
1919 u16 type = res->type;
1923 in_dev = __in_dev_get_rcu(dev_out);
1925 return ERR_PTR(-EINVAL);
1927 if (likely(!IN_DEV_ROUTE_LOCALNET(in_dev)))
1928 if (ipv4_is_loopback(fl4->saddr) && !(dev_out->flags & IFF_LOOPBACK))
1929 return ERR_PTR(-EINVAL);
1931 if (ipv4_is_lbcast(fl4->daddr))
1932 type = RTN_BROADCAST;
1933 else if (ipv4_is_multicast(fl4->daddr))
1934 type = RTN_MULTICAST;
1935 else if (ipv4_is_zeronet(fl4->daddr))
1936 return ERR_PTR(-EINVAL);
1938 if (dev_out->flags & IFF_LOOPBACK)
1939 flags |= RTCF_LOCAL;
1942 if (type == RTN_BROADCAST) {
1943 flags |= RTCF_BROADCAST | RTCF_LOCAL;
1945 } else if (type == RTN_MULTICAST) {
1946 flags |= RTCF_MULTICAST | RTCF_LOCAL;
1947 if (!ip_check_mc_rcu(in_dev, fl4->daddr, fl4->saddr,
1949 flags &= ~RTCF_LOCAL;
1952 /* If multicast route do not exist use
1953 * default one, but do not gateway in this case.
1956 if (fi && res->prefixlen < 4)
1961 do_cache &= fi != NULL;
1963 struct rtable __rcu **prth;
1964 struct fib_nh *nh = &FIB_RES_NH(*res);
1966 fnhe = find_exception(nh, fl4->daddr);
1968 prth = &fnhe->fnhe_rth_output;
1970 if (unlikely(fl4->flowi4_flags &
1971 FLOWI_FLAG_KNOWN_NH &&
1973 nh->nh_scope == RT_SCOPE_LINK))) {
1977 prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
1979 rth = rcu_dereference(*prth);
1980 if (rt_cache_valid(rth)) {
1981 dst_hold(&rth->dst);
1987 rth = rt_dst_alloc(dev_out,
1988 IN_DEV_CONF_GET(in_dev, NOPOLICY),
1989 IN_DEV_CONF_GET(in_dev, NOXFRM),
1992 return ERR_PTR(-ENOBUFS);
1994 rth->dst.output = ip_output;
1996 rth->rt_genid = rt_genid_ipv4(dev_net(dev_out));
1997 rth->rt_flags = flags;
1998 rth->rt_type = type;
1999 rth->rt_is_input = 0;
2000 rth->rt_iif = orig_oif ? : 0;
2002 rth->rt_gateway = 0;
2003 rth->rt_uses_gateway = 0;
2004 INIT_LIST_HEAD(&rth->rt_uncached);
2005 RT_CACHE_STAT_INC(out_slow_tot);
2007 if (flags & RTCF_LOCAL)
2008 rth->dst.input = ip_local_deliver;
2009 if (flags & (RTCF_BROADCAST | RTCF_MULTICAST)) {
2010 if (flags & RTCF_LOCAL &&
2011 !(dev_out->flags & IFF_LOOPBACK)) {
2012 rth->dst.output = ip_mc_output;
2013 RT_CACHE_STAT_INC(out_slow_mc);
2015 #ifdef CONFIG_IP_MROUTE
2016 if (type == RTN_MULTICAST) {
2017 if (IN_DEV_MFORWARD(in_dev) &&
2018 !ipv4_is_local_multicast(fl4->daddr)) {
2019 rth->dst.input = ip_mr_input;
2020 rth->dst.output = ip_mc_output;
2026 rt_set_nexthop(rth, fl4->daddr, res, fnhe, fi, type, 0);
2027 if (lwtunnel_output_redirect(rth->dst.lwtstate))
2028 rth->dst.output = lwtunnel_output;
2034 * Major route resolver routine.
2037 struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2039 struct net_device *dev_out = NULL;
2040 __u8 tos = RT_FL_TOS(fl4);
2041 unsigned int flags = 0;
2042 struct fib_result res;
2050 orig_oif = fl4->flowi4_oif;
2052 fl4->flowi4_iif = LOOPBACK_IFINDEX;
2053 fl4->flowi4_tos = tos & IPTOS_RT_MASK;
2054 fl4->flowi4_scope = ((tos & RTO_ONLINK) ?
2055 RT_SCOPE_LINK : RT_SCOPE_UNIVERSE);
2059 rth = ERR_PTR(-EINVAL);
2060 if (ipv4_is_multicast(fl4->saddr) ||
2061 ipv4_is_lbcast(fl4->saddr) ||
2062 ipv4_is_zeronet(fl4->saddr))
2065 /* I removed check for oif == dev_out->oif here.
2066 It was wrong for two reasons:
2067 1. ip_dev_find(net, saddr) can return wrong iface, if saddr
2068 is assigned to multiple interfaces.
2069 2. Moreover, we are allowed to send packets with saddr
2070 of another iface. --ANK
2073 if (fl4->flowi4_oif == 0 &&
2074 (ipv4_is_multicast(fl4->daddr) ||
2075 ipv4_is_lbcast(fl4->daddr))) {
2076 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2077 dev_out = __ip_dev_find(net, fl4->saddr, false);
2081 /* Special hack: user can direct multicasts
2082 and limited broadcast via necessary interface
2083 without fiddling with IP_MULTICAST_IF or IP_PKTINFO.
2084 This hack is not just for fun, it allows
2085 vic,vat and friends to work.
2086 They bind socket to loopback, set ttl to zero
2087 and expect that it will work.
2088 From the viewpoint of routing cache they are broken,
2089 because we are not allowed to build multicast path
2090 with loopback source addr (look, routing cache
2091 cannot know, that ttl is zero, so that packet
2092 will not leave this host and route is valid).
2093 Luckily, this hack is good workaround.
2096 fl4->flowi4_oif = dev_out->ifindex;
2100 if (!(fl4->flowi4_flags & FLOWI_FLAG_ANYSRC)) {
2101 /* It is equivalent to inet_addr_type(saddr) == RTN_LOCAL */
2102 if (!__ip_dev_find(net, fl4->saddr, false))
2108 if (fl4->flowi4_oif) {
2109 dev_out = dev_get_by_index_rcu(net, fl4->flowi4_oif);
2110 rth = ERR_PTR(-ENODEV);
2114 /* RACE: Check return value of inet_select_addr instead. */
2115 if (!(dev_out->flags & IFF_UP) || !__in_dev_get_rcu(dev_out)) {
2116 rth = ERR_PTR(-ENETUNREACH);
2119 if (ipv4_is_local_multicast(fl4->daddr) ||
2120 ipv4_is_lbcast(fl4->daddr) ||
2121 fl4->flowi4_proto == IPPROTO_IGMP) {
2123 fl4->saddr = inet_select_addr(dev_out, 0,
2128 if (ipv4_is_multicast(fl4->daddr))
2129 fl4->saddr = inet_select_addr(dev_out, 0,
2131 else if (!fl4->daddr)
2132 fl4->saddr = inet_select_addr(dev_out, 0,
2135 if (netif_is_vrf(dev_out) &&
2136 !(fl4->flowi4_flags & FLOWI_FLAG_VRFSRC)) {
2137 rth = vrf_dev_get_rth(dev_out);
2143 fl4->daddr = fl4->saddr;
2145 fl4->daddr = fl4->saddr = htonl(INADDR_LOOPBACK);
2146 dev_out = net->loopback_dev;
2147 fl4->flowi4_oif = LOOPBACK_IFINDEX;
2148 res.type = RTN_LOCAL;
2149 flags |= RTCF_LOCAL;
2153 if (fib_lookup(net, fl4, &res, 0)) {
2156 if (fl4->flowi4_oif) {
2157 /* Apparently, routing tables are wrong. Assume,
2158 that the destination is on link.
2161 Because we are allowed to send to iface
2162 even if it has NO routes and NO assigned
2163 addresses. When oif is specified, routing
2164 tables are looked up with only one purpose:
2165 to catch if destination is gatewayed, rather than
2166 direct. Moreover, if MSG_DONTROUTE is set,
2167 we send packet, ignoring both routing tables
2168 and ifaddr state. --ANK
2171 We could make it even if oif is unknown,
2172 likely IPv6, but we do not.
2175 if (fl4->saddr == 0)
2176 fl4->saddr = inet_select_addr(dev_out, 0,
2178 res.type = RTN_UNICAST;
2181 rth = ERR_PTR(-ENETUNREACH);
2185 if (res.type == RTN_LOCAL) {
2187 if (res.fi->fib_prefsrc)
2188 fl4->saddr = res.fi->fib_prefsrc;
2190 fl4->saddr = fl4->daddr;
2192 dev_out = net->loopback_dev;
2193 fl4->flowi4_oif = dev_out->ifindex;
2194 flags |= RTCF_LOCAL;
2198 #ifdef CONFIG_IP_ROUTE_MULTIPATH
2199 if (res.fi->fib_nhs > 1 && fl4->flowi4_oif == 0)
2200 fib_select_multipath(&res);
2203 if (!res.prefixlen &&
2204 res.table->tb_num_default > 1 &&
2205 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2206 fib_select_default(fl4, &res);
2209 fl4->saddr = FIB_RES_PREFSRC(net, res);
2211 dev_out = FIB_RES_DEV(res);
2212 fl4->flowi4_oif = dev_out->ifindex;
2216 rth = __mkroute_output(&res, fl4, orig_oif, dev_out, flags);
2222 EXPORT_SYMBOL_GPL(__ip_route_output_key);
2224 static struct dst_entry *ipv4_blackhole_dst_check(struct dst_entry *dst, u32 cookie)
2229 static unsigned int ipv4_blackhole_mtu(const struct dst_entry *dst)
2231 unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
2233 return mtu ? : dst->dev->mtu;
2236 static void ipv4_rt_blackhole_update_pmtu(struct dst_entry *dst, struct sock *sk,
2237 struct sk_buff *skb, u32 mtu)
2241 static void ipv4_rt_blackhole_redirect(struct dst_entry *dst, struct sock *sk,
2242 struct sk_buff *skb)
2246 static u32 *ipv4_rt_blackhole_cow_metrics(struct dst_entry *dst,
2252 static struct dst_ops ipv4_dst_blackhole_ops = {
2254 .check = ipv4_blackhole_dst_check,
2255 .mtu = ipv4_blackhole_mtu,
2256 .default_advmss = ipv4_default_advmss,
2257 .update_pmtu = ipv4_rt_blackhole_update_pmtu,
2258 .redirect = ipv4_rt_blackhole_redirect,
2259 .cow_metrics = ipv4_rt_blackhole_cow_metrics,
2260 .neigh_lookup = ipv4_neigh_lookup,
2263 struct dst_entry *ipv4_blackhole_route(struct net *net, struct dst_entry *dst_orig)
2265 struct rtable *ort = (struct rtable *) dst_orig;
2268 rt = dst_alloc(&ipv4_dst_blackhole_ops, NULL, 1, DST_OBSOLETE_NONE, 0);
2270 struct dst_entry *new = &rt->dst;
2273 new->input = dst_discard;
2274 new->output = dst_discard_sk;
2276 new->dev = ort->dst.dev;
2280 rt->rt_is_input = ort->rt_is_input;
2281 rt->rt_iif = ort->rt_iif;
2282 rt->rt_pmtu = ort->rt_pmtu;
2284 rt->rt_genid = rt_genid_ipv4(net);
2285 rt->rt_flags = ort->rt_flags;
2286 rt->rt_type = ort->rt_type;
2287 rt->rt_gateway = ort->rt_gateway;
2288 rt->rt_uses_gateway = ort->rt_uses_gateway;
2290 INIT_LIST_HEAD(&rt->rt_uncached);
2294 dst_release(dst_orig);
2296 return rt ? &rt->dst : ERR_PTR(-ENOMEM);
2299 struct rtable *ip_route_output_flow(struct net *net, struct flowi4 *flp4,
2302 struct rtable *rt = __ip_route_output_key(net, flp4);
2307 if (flp4->flowi4_proto)
2308 rt = (struct rtable *)xfrm_lookup_route(net, &rt->dst,
2309 flowi4_to_flowi(flp4),
2314 EXPORT_SYMBOL_GPL(ip_route_output_flow);
2316 static int rt_fill_info(struct net *net, __be32 dst, __be32 src,
2317 struct flowi4 *fl4, struct sk_buff *skb, u32 portid,
2318 u32 seq, int event, int nowait, unsigned int flags)
2320 struct rtable *rt = skb_rtable(skb);
2322 struct nlmsghdr *nlh;
2323 unsigned long expires = 0;
2325 u32 metrics[RTAX_MAX];
2327 nlh = nlmsg_put(skb, portid, seq, event, sizeof(*r), flags);
2331 r = nlmsg_data(nlh);
2332 r->rtm_family = AF_INET;
2333 r->rtm_dst_len = 32;
2335 r->rtm_tos = fl4->flowi4_tos;
2336 r->rtm_table = RT_TABLE_MAIN;
2337 if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN))
2338 goto nla_put_failure;
2339 r->rtm_type = rt->rt_type;
2340 r->rtm_scope = RT_SCOPE_UNIVERSE;
2341 r->rtm_protocol = RTPROT_UNSPEC;
2342 r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
2343 if (rt->rt_flags & RTCF_NOTIFY)
2344 r->rtm_flags |= RTM_F_NOTIFY;
2345 if (IPCB(skb)->flags & IPSKB_DOREDIRECT)
2346 r->rtm_flags |= RTCF_DOREDIRECT;
2348 if (nla_put_in_addr(skb, RTA_DST, dst))
2349 goto nla_put_failure;
2351 r->rtm_src_len = 32;
2352 if (nla_put_in_addr(skb, RTA_SRC, src))
2353 goto nla_put_failure;
2356 nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex))
2357 goto nla_put_failure;
2358 #ifdef CONFIG_IP_ROUTE_CLASSID
2359 if (rt->dst.tclassid &&
2360 nla_put_u32(skb, RTA_FLOW, rt->dst.tclassid))
2361 goto nla_put_failure;
2363 if (!rt_is_input_route(rt) &&
2364 fl4->saddr != src) {
2365 if (nla_put_in_addr(skb, RTA_PREFSRC, fl4->saddr))
2366 goto nla_put_failure;
2368 if (rt->rt_uses_gateway &&
2369 nla_put_in_addr(skb, RTA_GATEWAY, rt->rt_gateway))
2370 goto nla_put_failure;
2372 expires = rt->dst.expires;
2374 unsigned long now = jiffies;
2376 if (time_before(now, expires))
2382 memcpy(metrics, dst_metrics_ptr(&rt->dst), sizeof(metrics));
2383 if (rt->rt_pmtu && expires)
2384 metrics[RTAX_MTU - 1] = rt->rt_pmtu;
2385 if (rtnetlink_put_metrics(skb, metrics) < 0)
2386 goto nla_put_failure;
2388 if (fl4->flowi4_mark &&
2389 nla_put_u32(skb, RTA_MARK, fl4->flowi4_mark))
2390 goto nla_put_failure;
2392 error = rt->dst.error;
2394 if (rt_is_input_route(rt)) {
2395 #ifdef CONFIG_IP_MROUTE
2396 if (ipv4_is_multicast(dst) && !ipv4_is_local_multicast(dst) &&
2397 IPV4_DEVCONF_ALL(net, MC_FORWARDING)) {
2398 int err = ipmr_get_route(net, skb,
2399 fl4->saddr, fl4->daddr,
2405 goto nla_put_failure;
2407 if (err == -EMSGSIZE)
2408 goto nla_put_failure;
2414 if (nla_put_u32(skb, RTA_IIF, skb->dev->ifindex))
2415 goto nla_put_failure;
2418 if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires, error) < 0)
2419 goto nla_put_failure;
2421 nlmsg_end(skb, nlh);
2425 nlmsg_cancel(skb, nlh);
2429 static int inet_rtm_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh)
2431 struct net *net = sock_net(in_skb->sk);
2433 struct nlattr *tb[RTA_MAX+1];
2434 struct rtable *rt = NULL;
2441 struct sk_buff *skb;
2443 err = nlmsg_parse(nlh, sizeof(*rtm), tb, RTA_MAX, rtm_ipv4_policy);
2447 rtm = nlmsg_data(nlh);
2449 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
2455 /* Reserve room for dummy headers, this skb can pass
2456 through good chunk of routing engine.
2458 skb_reset_mac_header(skb);
2459 skb_reset_network_header(skb);
2461 /* Bugfix: need to give ip_route_input enough of an IP header to not gag. */
2462 ip_hdr(skb)->protocol = IPPROTO_ICMP;
2463 skb_reserve(skb, MAX_HEADER + sizeof(struct iphdr));
2465 src = tb[RTA_SRC] ? nla_get_in_addr(tb[RTA_SRC]) : 0;
2466 dst = tb[RTA_DST] ? nla_get_in_addr(tb[RTA_DST]) : 0;
2467 iif = tb[RTA_IIF] ? nla_get_u32(tb[RTA_IIF]) : 0;
2468 mark = tb[RTA_MARK] ? nla_get_u32(tb[RTA_MARK]) : 0;
2470 memset(&fl4, 0, sizeof(fl4));
2473 fl4.flowi4_tos = rtm->rtm_tos;
2474 fl4.flowi4_oif = tb[RTA_OIF] ? nla_get_u32(tb[RTA_OIF]) : 0;
2475 fl4.flowi4_mark = mark;
2478 struct net_device *dev;
2480 dev = __dev_get_by_index(net, iif);
2486 skb->protocol = htons(ETH_P_IP);
2490 err = ip_route_input(skb, dst, src, rtm->rtm_tos, dev);
2493 rt = skb_rtable(skb);
2494 if (err == 0 && rt->dst.error)
2495 err = -rt->dst.error;
2497 rt = ip_route_output_key(net, &fl4);
2507 skb_dst_set(skb, &rt->dst);
2508 if (rtm->rtm_flags & RTM_F_NOTIFY)
2509 rt->rt_flags |= RTCF_NOTIFY;
2511 err = rt_fill_info(net, dst, src, &fl4, skb,
2512 NETLINK_CB(in_skb).portid, nlh->nlmsg_seq,
2513 RTM_NEWROUTE, 0, 0);
2517 err = rtnl_unicast(skb, net, NETLINK_CB(in_skb).portid);
2526 void ip_rt_multicast_event(struct in_device *in_dev)
2528 rt_cache_flush(dev_net(in_dev->dev));
2531 #ifdef CONFIG_SYSCTL
2532 static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
2533 static int ip_rt_gc_interval __read_mostly = 60 * HZ;
2534 static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
2535 static int ip_rt_gc_elasticity __read_mostly = 8;
2537 static int ipv4_sysctl_rtcache_flush(struct ctl_table *__ctl, int write,
2538 void __user *buffer,
2539 size_t *lenp, loff_t *ppos)
2541 struct net *net = (struct net *)__ctl->extra1;
2544 rt_cache_flush(net);
2545 fnhe_genid_bump(net);
2552 static struct ctl_table ipv4_route_table[] = {
2554 .procname = "gc_thresh",
2555 .data = &ipv4_dst_ops.gc_thresh,
2556 .maxlen = sizeof(int),
2558 .proc_handler = proc_dointvec,
2561 .procname = "max_size",
2562 .data = &ip_rt_max_size,
2563 .maxlen = sizeof(int),
2565 .proc_handler = proc_dointvec,
2568 /* Deprecated. Use gc_min_interval_ms */
2570 .procname = "gc_min_interval",
2571 .data = &ip_rt_gc_min_interval,
2572 .maxlen = sizeof(int),
2574 .proc_handler = proc_dointvec_jiffies,
2577 .procname = "gc_min_interval_ms",
2578 .data = &ip_rt_gc_min_interval,
2579 .maxlen = sizeof(int),
2581 .proc_handler = proc_dointvec_ms_jiffies,
2584 .procname = "gc_timeout",
2585 .data = &ip_rt_gc_timeout,
2586 .maxlen = sizeof(int),
2588 .proc_handler = proc_dointvec_jiffies,
2591 .procname = "gc_interval",
2592 .data = &ip_rt_gc_interval,
2593 .maxlen = sizeof(int),
2595 .proc_handler = proc_dointvec_jiffies,
2598 .procname = "redirect_load",
2599 .data = &ip_rt_redirect_load,
2600 .maxlen = sizeof(int),
2602 .proc_handler = proc_dointvec,
2605 .procname = "redirect_number",
2606 .data = &ip_rt_redirect_number,
2607 .maxlen = sizeof(int),
2609 .proc_handler = proc_dointvec,
2612 .procname = "redirect_silence",
2613 .data = &ip_rt_redirect_silence,
2614 .maxlen = sizeof(int),
2616 .proc_handler = proc_dointvec,
2619 .procname = "error_cost",
2620 .data = &ip_rt_error_cost,
2621 .maxlen = sizeof(int),
2623 .proc_handler = proc_dointvec,
2626 .procname = "error_burst",
2627 .data = &ip_rt_error_burst,
2628 .maxlen = sizeof(int),
2630 .proc_handler = proc_dointvec,
2633 .procname = "gc_elasticity",
2634 .data = &ip_rt_gc_elasticity,
2635 .maxlen = sizeof(int),
2637 .proc_handler = proc_dointvec,
2640 .procname = "mtu_expires",
2641 .data = &ip_rt_mtu_expires,
2642 .maxlen = sizeof(int),
2644 .proc_handler = proc_dointvec_jiffies,
2647 .procname = "min_pmtu",
2648 .data = &ip_rt_min_pmtu,
2649 .maxlen = sizeof(int),
2651 .proc_handler = proc_dointvec,
2654 .procname = "min_adv_mss",
2655 .data = &ip_rt_min_advmss,
2656 .maxlen = sizeof(int),
2658 .proc_handler = proc_dointvec,
2663 static struct ctl_table ipv4_route_flush_table[] = {
2665 .procname = "flush",
2666 .maxlen = sizeof(int),
2668 .proc_handler = ipv4_sysctl_rtcache_flush,
2673 static __net_init int sysctl_route_net_init(struct net *net)
2675 struct ctl_table *tbl;
2677 tbl = ipv4_route_flush_table;
2678 if (!net_eq(net, &init_net)) {
2679 tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
2683 /* Don't export sysctls to unprivileged users */
2684 if (net->user_ns != &init_user_ns)
2685 tbl[0].procname = NULL;
2687 tbl[0].extra1 = net;
2689 net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
2690 if (!net->ipv4.route_hdr)
2695 if (tbl != ipv4_route_flush_table)
2701 static __net_exit void sysctl_route_net_exit(struct net *net)
2703 struct ctl_table *tbl;
2705 tbl = net->ipv4.route_hdr->ctl_table_arg;
2706 unregister_net_sysctl_table(net->ipv4.route_hdr);
2707 BUG_ON(tbl == ipv4_route_flush_table);
2711 static __net_initdata struct pernet_operations sysctl_route_ops = {
2712 .init = sysctl_route_net_init,
2713 .exit = sysctl_route_net_exit,
2717 static __net_init int rt_genid_init(struct net *net)
2719 atomic_set(&net->ipv4.rt_genid, 0);
2720 atomic_set(&net->fnhe_genid, 0);
2721 get_random_bytes(&net->ipv4.dev_addr_genid,
2722 sizeof(net->ipv4.dev_addr_genid));
2726 static __net_initdata struct pernet_operations rt_genid_ops = {
2727 .init = rt_genid_init,
2730 static int __net_init ipv4_inetpeer_init(struct net *net)
2732 struct inet_peer_base *bp = kmalloc(sizeof(*bp), GFP_KERNEL);
2736 inet_peer_base_init(bp);
2737 net->ipv4.peers = bp;
2741 static void __net_exit ipv4_inetpeer_exit(struct net *net)
2743 struct inet_peer_base *bp = net->ipv4.peers;
2745 net->ipv4.peers = NULL;
2746 inetpeer_invalidate_tree(bp);
2750 static __net_initdata struct pernet_operations ipv4_inetpeer_ops = {
2751 .init = ipv4_inetpeer_init,
2752 .exit = ipv4_inetpeer_exit,
2755 #ifdef CONFIG_IP_ROUTE_CLASSID
2756 struct ip_rt_acct __percpu *ip_rt_acct __read_mostly;
2757 #endif /* CONFIG_IP_ROUTE_CLASSID */
2759 int __init ip_rt_init(void)
2764 ip_idents = kmalloc(IP_IDENTS_SZ * sizeof(*ip_idents), GFP_KERNEL);
2766 panic("IP: failed to allocate ip_idents\n");
2768 prandom_bytes(ip_idents, IP_IDENTS_SZ * sizeof(*ip_idents));
2770 ip_tstamps = kcalloc(IP_IDENTS_SZ, sizeof(*ip_tstamps), GFP_KERNEL);
2772 panic("IP: failed to allocate ip_tstamps\n");
2774 for_each_possible_cpu(cpu) {
2775 struct uncached_list *ul = &per_cpu(rt_uncached_list, cpu);
2777 INIT_LIST_HEAD(&ul->head);
2778 spin_lock_init(&ul->lock);
2780 #ifdef CONFIG_IP_ROUTE_CLASSID
2781 ip_rt_acct = __alloc_percpu(256 * sizeof(struct ip_rt_acct), __alignof__(struct ip_rt_acct));
2783 panic("IP: failed to allocate ip_rt_acct\n");
2786 ipv4_dst_ops.kmem_cachep =
2787 kmem_cache_create("ip_dst_cache", sizeof(struct rtable), 0,
2788 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2790 ipv4_dst_blackhole_ops.kmem_cachep = ipv4_dst_ops.kmem_cachep;
2792 if (dst_entries_init(&ipv4_dst_ops) < 0)
2793 panic("IP: failed to allocate ipv4_dst_ops counter\n");
2795 if (dst_entries_init(&ipv4_dst_blackhole_ops) < 0)
2796 panic("IP: failed to allocate ipv4_dst_blackhole_ops counter\n");
2798 ipv4_dst_ops.gc_thresh = ~0;
2799 ip_rt_max_size = INT_MAX;
2804 if (ip_rt_proc_init())
2805 pr_err("Unable to create route proc files\n");
2810 rtnl_register(PF_INET, RTM_GETROUTE, inet_rtm_getroute, NULL, NULL);
2812 #ifdef CONFIG_SYSCTL
2813 register_pernet_subsys(&sysctl_route_ops);
2815 register_pernet_subsys(&rt_genid_ops);
2816 register_pernet_subsys(&ipv4_inetpeer_ops);
2820 #ifdef CONFIG_SYSCTL
2822 * We really need to sanitize the damn ipv4 init order, then all
2823 * this nonsense will go away.
2825 void __init ip_static_sysctl_init(void)
2827 register_net_sysctl(&init_net, "net/ipv4/route", ipv4_route_table);