1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * The Internet Protocol (IP) module.
19 * Alan Cox : Commented a couple of minor bits of surplus code
20 * Alan Cox : Undefining IP_FORWARD doesn't include the code
21 * (just stops a compiler warning).
22 * Alan Cox : Frames with >=MAX_ROUTE record routes, strict routes or loose routes
23 * are junked rather than corrupting things.
24 * Alan Cox : Frames to bad broadcast subnets are dumped
25 * We used to process them non broadcast and
26 * boy could that cause havoc.
27 * Alan Cox : ip_forward sets the free flag on the
28 * new frame it queues. Still crap because
29 * it copies the frame but at least it
30 * doesn't eat memory too.
31 * Alan Cox : Generic queue code and memory fixes.
32 * Fred Van Kempen : IP fragment support (borrowed from NET2E)
33 * Gerhard Koerting: Forward fragmented frames correctly.
34 * Gerhard Koerting: Fixes to my fix of the above 8-).
35 * Gerhard Koerting: IP interface addressing fix.
36 * Linus Torvalds : More robustness checks
37 * Alan Cox : Even more checks: Still not as robust as it ought to be
38 * Alan Cox : Save IP header pointer for later
39 * Alan Cox : ip option setting
40 * Alan Cox : Use ip_tos/ip_ttl settings
41 * Alan Cox : Fragmentation bogosity removed
43 * Dmitry Gorodchanin : Send of a raw packet crash fix.
44 * Alan Cox : Silly ip bug when an overlength
45 * fragment turns up. Now frees the
47 * Linus Torvalds/ : Memory leakage on fragmentation
48 * Alan Cox : handling.
49 * Gerhard Koerting: Forwarding uses IP priority hints
50 * Teemu Rantanen : Fragment problems.
51 * Alan Cox : General cleanup, comments and reformat
52 * Alan Cox : SNMP statistics
53 * Alan Cox : BSD address rule semantics. Also see
54 * UDP as there is a nasty checksum issue
55 * if you do things the wrong way.
56 * Alan Cox : Always defrag, moved IP_FORWARD to the config.in file
57 * Alan Cox : IP options adjust sk->priority.
58 * Pedro Roque : Fix mtu/length error in ip_forward.
59 * Alan Cox : Avoid ip_chk_addr when possible.
60 * Richard Underwood : IP multicasting.
61 * Alan Cox : Cleaned up multicast handlers.
62 * Alan Cox : RAW sockets demultiplex in the BSD style.
63 * Gunther Mayer : Fix the SNMP reporting typo
64 * Alan Cox : Always in group 224.0.0.1
65 * Pauline Middelink : Fast ip_checksum update when forwarding
66 * Masquerading support.
67 * Alan Cox : Multicast loopback error for 224.0.0.1
68 * Alan Cox : IP_MULTICAST_LOOP option.
69 * Alan Cox : Use notifiers.
70 * Bjorn Ekwall : Removed ip_csum (from slhc.c too)
71 * Bjorn Ekwall : Moved ip_fast_csum to ip.h (inline!)
72 * Stefan Becker : Send out ICMP HOST REDIRECT
73 * Arnt Gulbrandsen : ip_build_xmit
74 * Alan Cox : Per socket routing cache
75 * Alan Cox : Fixed routing cache, added header cache.
76 * Alan Cox : Loopback didn't work right in original ip_build_xmit - fixed it.
77 * Alan Cox : Only send ICMP_REDIRECT if src/dest are the same net.
78 * Alan Cox : Incoming IP option handling.
79 * Alan Cox : Set saddr on raw output frames as per BSD.
80 * Alan Cox : Stopped broadcast source route explosions.
81 * Alan Cox : Can disable source routing
82 * Takeshi Sone : Masquerading didn't work.
83 * Dave Bonn,Alan Cox : Faster IP forwarding whenever possible.
84 * Alan Cox : Memory leaks, tramples, misc debugging.
85 * Alan Cox : Fixed multicast (by popular demand 8))
86 * Alan Cox : Fixed forwarding (by even more popular demand 8))
87 * Alan Cox : Fixed SNMP statistics [I think]
88 * Gerhard Koerting : IP fragmentation forwarding fix
89 * Alan Cox : Device lock against page fault.
90 * Alan Cox : IP_HDRINCL facility.
91 * Werner Almesberger : Zero fragment bug
92 * Alan Cox : RAW IP frame length bug
93 * Alan Cox : Outgoing firewall on build_xmit
94 * A.N.Kuznetsov : IP_OPTIONS support throughout the kernel
95 * Alan Cox : Multicast routing hooks
96 * Jos Vos : Do accounting *before* call_in_firewall
97 * Willy Konynenberg : Transparent proxying support
100 * IP fragmentation wants rewriting cleanly. The RFC815 algorithm is much more efficient
101 * and could be made very efficient with the addition of some virtual memory hacks to permit
102 * the allocation of a buffer that can then be 'grown' by twiddling page tables.
103 * Output fragmentation wants updating along with the buffer management to use a single
104 * interleaved copy algorithm so that fragmenting has a one copy overhead. Actual packet
105 * output should probably do its own fragmentation at the UDP/RAW layer. TCP shouldn't cause
106 * fragmentation anyway.
109 #define pr_fmt(fmt) "IPv4: " fmt
111 #include <linux/module.h>
112 #include <linux/types.h>
113 #include <linux/kernel.h>
114 #include <linux/string.h>
115 #include <linux/errno.h>
116 #include <linux/slab.h>
118 #include <linux/net.h>
119 #include <linux/socket.h>
120 #include <linux/sockios.h>
121 #include <linux/in.h>
122 #include <linux/inet.h>
123 #include <linux/inetdevice.h>
124 #include <linux/netdevice.h>
125 #include <linux/etherdevice.h>
126 #include <linux/indirect_call_wrapper.h>
128 #include <net/snmp.h>
130 #include <net/protocol.h>
131 #include <net/route.h>
132 #include <linux/skbuff.h>
133 #include <net/sock.h>
135 #include <net/icmp.h>
137 #include <net/checksum.h>
138 #include <net/inet_ecn.h>
139 #include <linux/netfilter_ipv4.h>
140 #include <net/xfrm.h>
141 #include <linux/mroute.h>
142 #include <linux/netlink.h>
143 #include <net/dst_metadata.h>
146 * Process Router Attention IP option (RFC 2113)
148 bool ip_call_ra_chain(struct sk_buff *skb)
150 struct ip_ra_chain *ra;
151 u8 protocol = ip_hdr(skb)->protocol;
152 struct sock *last = NULL;
153 struct net_device *dev = skb->dev;
154 struct net *net = dev_net(dev);
156 for (ra = rcu_dereference(net->ipv4.ra_chain); ra; ra = rcu_dereference(ra->next)) {
157 struct sock *sk = ra->sk;
159 /* If socket is bound to an interface, only report
160 * the packet if it came from that interface.
162 if (sk && inet_sk(sk)->inet_num == protocol &&
163 (!sk->sk_bound_dev_if ||
164 sk->sk_bound_dev_if == dev->ifindex)) {
165 if (ip_is_fragment(ip_hdr(skb))) {
166 if (ip_defrag(net, skb, IP_DEFRAG_CALL_RA_CHAIN))
170 struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
185 INDIRECT_CALLABLE_DECLARE(int udp_rcv(struct sk_buff *));
186 INDIRECT_CALLABLE_DECLARE(int tcp_v4_rcv(struct sk_buff *));
187 void ip_protocol_deliver_rcu(struct net *net, struct sk_buff *skb, int protocol)
189 const struct net_protocol *ipprot;
193 raw = raw_local_deliver(skb, protocol);
195 ipprot = rcu_dereference(inet_protos[protocol]);
197 if (!ipprot->no_policy) {
198 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
199 kfree_skb_reason(skb,
200 SKB_DROP_REASON_XFRM_POLICY);
205 ret = INDIRECT_CALL_2(ipprot->handler, tcp_v4_rcv, udp_rcv,
211 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
214 if (xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
215 __IP_INC_STATS(net, IPSTATS_MIB_INUNKNOWNPROTOS);
216 icmp_send(skb, ICMP_DEST_UNREACH,
217 ICMP_PROT_UNREACH, 0);
219 kfree_skb_reason(skb, SKB_DROP_REASON_IP_NOPROTO);
221 __IP_INC_STATS(net, IPSTATS_MIB_INDELIVERS);
227 static int ip_local_deliver_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
229 skb_clear_delivery_time(skb);
230 __skb_pull(skb, skb_network_header_len(skb));
233 ip_protocol_deliver_rcu(net, skb, ip_hdr(skb)->protocol);
240 * Deliver IP Packets to the higher protocol layers.
242 int ip_local_deliver(struct sk_buff *skb)
245 * Reassemble IP fragments.
247 struct net *net = dev_net(skb->dev);
249 if (ip_is_fragment(ip_hdr(skb))) {
250 if (ip_defrag(net, skb, IP_DEFRAG_LOCAL_DELIVER))
254 return NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_IN,
255 net, NULL, skb, skb->dev, NULL,
256 ip_local_deliver_finish);
258 EXPORT_SYMBOL(ip_local_deliver);
260 static inline bool ip_rcv_options(struct sk_buff *skb, struct net_device *dev)
262 struct ip_options *opt;
263 const struct iphdr *iph;
265 /* It looks as overkill, because not all
266 IP options require packet mangling.
267 But it is the easiest for now, especially taking
268 into account that combination of IP options
269 and running sniffer is extremely rare condition.
272 if (skb_cow(skb, skb_headroom(skb))) {
273 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INDISCARDS);
278 opt = &(IPCB(skb)->opt);
279 opt->optlen = iph->ihl*4 - sizeof(struct iphdr);
281 if (ip_options_compile(dev_net(dev), opt, skb)) {
282 __IP_INC_STATS(dev_net(dev), IPSTATS_MIB_INHDRERRORS);
286 if (unlikely(opt->srr)) {
287 struct in_device *in_dev = __in_dev_get_rcu(dev);
290 if (!IN_DEV_SOURCE_ROUTE(in_dev)) {
291 if (IN_DEV_LOG_MARTIANS(in_dev))
292 net_info_ratelimited("source route option %pI4 -> %pI4\n",
299 if (ip_options_rcv_srr(skb, dev))
308 static bool ip_can_use_hint(const struct sk_buff *skb, const struct iphdr *iph,
309 const struct sk_buff *hint)
311 return hint && !skb_dst(skb) && ip_hdr(hint)->daddr == iph->daddr &&
312 ip_hdr(hint)->tos == iph->tos;
315 int tcp_v4_early_demux(struct sk_buff *skb);
316 int udp_v4_early_demux(struct sk_buff *skb);
317 static int ip_rcv_finish_core(struct net *net, struct sock *sk,
318 struct sk_buff *skb, struct net_device *dev,
319 const struct sk_buff *hint)
321 const struct iphdr *iph = ip_hdr(skb);
322 int err, drop_reason;
325 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
327 if (ip_can_use_hint(skb, iph, hint)) {
328 err = ip_route_use_hint(skb, iph->daddr, iph->saddr, iph->tos,
334 if (READ_ONCE(net->ipv4.sysctl_ip_early_demux) &&
337 !ip_is_fragment(iph)) {
338 switch (iph->protocol) {
340 if (READ_ONCE(net->ipv4.sysctl_tcp_early_demux)) {
341 tcp_v4_early_demux(skb);
343 /* must reload iph, skb->head might have changed */
348 if (READ_ONCE(net->ipv4.sysctl_udp_early_demux)) {
349 err = udp_v4_early_demux(skb);
353 /* must reload iph, skb->head might have changed */
361 * Initialise the virtual path cache for the packet. It describes
362 * how the packet travels inside Linux networking.
364 if (!skb_valid_dst(skb)) {
365 err = ip_route_input_noref(skb, iph->daddr, iph->saddr,
370 struct in_device *in_dev = __in_dev_get_rcu(dev);
372 if (in_dev && IN_DEV_ORCONF(in_dev, NOPOLICY))
373 IPCB(skb)->flags |= IPSKB_NOPOLICY;
376 #ifdef CONFIG_IP_ROUTE_CLASSID
377 if (unlikely(skb_dst(skb)->tclassid)) {
378 struct ip_rt_acct *st = this_cpu_ptr(ip_rt_acct);
379 u32 idx = skb_dst(skb)->tclassid;
380 st[idx&0xFF].o_packets++;
381 st[idx&0xFF].o_bytes += skb->len;
382 st[(idx>>16)&0xFF].i_packets++;
383 st[(idx>>16)&0xFF].i_bytes += skb->len;
387 if (iph->ihl > 5 && ip_rcv_options(skb, dev))
390 rt = skb_rtable(skb);
391 if (rt->rt_type == RTN_MULTICAST) {
392 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INMCAST, skb->len);
393 } else if (rt->rt_type == RTN_BROADCAST) {
394 __IP_UPD_PO_STATS(net, IPSTATS_MIB_INBCAST, skb->len);
395 } else if (skb->pkt_type == PACKET_BROADCAST ||
396 skb->pkt_type == PACKET_MULTICAST) {
397 struct in_device *in_dev = __in_dev_get_rcu(dev);
401 * When a host sends a datagram to a link-layer broadcast
402 * address, the IP destination address MUST be a legal IP
403 * broadcast or IP multicast address.
405 * A host SHOULD silently discard a datagram that is received
406 * via a link-layer broadcast (see Section 2.4) but does not
407 * specify an IP multicast or broadcast destination address.
409 * This doesn't explicitly say L2 *broadcast*, but broadcast is
410 * in a way a form of multicast and the most common use case for
411 * this is 802.11 protecting against cross-station spoofing (the
412 * so-called "hole-196" attack) so do it for both.
415 IN_DEV_ORCONF(in_dev, DROP_UNICAST_IN_L2_MULTICAST)) {
416 drop_reason = SKB_DROP_REASON_UNICAST_IN_L2_MULTICAST;
421 return NET_RX_SUCCESS;
424 kfree_skb_reason(skb, drop_reason);
429 drop_reason = SKB_DROP_REASON_IP_RPFILTER;
430 __NET_INC_STATS(net, LINUX_MIB_IPRPFILTER);
435 static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
437 struct net_device *dev = skb->dev;
440 /* if ingress device is enslaved to an L3 master device pass the
441 * skb to its handler for processing
443 skb = l3mdev_ip_rcv(skb);
445 return NET_RX_SUCCESS;
447 ret = ip_rcv_finish_core(net, sk, skb, dev, NULL);
448 if (ret != NET_RX_DROP)
449 ret = dst_input(skb);
454 * Main IP Receive routine.
456 static struct sk_buff *ip_rcv_core(struct sk_buff *skb, struct net *net)
458 const struct iphdr *iph;
462 /* When the interface is in promisc. mode, drop all the crap
463 * that it receives, do not try to analyse it.
465 if (skb->pkt_type == PACKET_OTHERHOST) {
466 dev_core_stats_rx_otherhost_dropped_inc(skb->dev);
467 drop_reason = SKB_DROP_REASON_OTHERHOST;
471 __IP_UPD_PO_STATS(net, IPSTATS_MIB_IN, skb->len);
473 skb = skb_share_check(skb, GFP_ATOMIC);
475 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
479 drop_reason = SKB_DROP_REASON_NOT_SPECIFIED;
480 if (!pskb_may_pull(skb, sizeof(struct iphdr)))
486 * RFC1122: 3.2.1.2 MUST silently discard any IP frame that fails the checksum.
488 * Is the datagram acceptable?
490 * 1. Length at least the size of an ip header
492 * 3. Checksums correctly. [Speed optimisation for later, skip loopback checksums]
493 * 4. Doesn't have a bogus length
496 if (iph->ihl < 5 || iph->version != 4)
499 BUILD_BUG_ON(IPSTATS_MIB_ECT1PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_1);
500 BUILD_BUG_ON(IPSTATS_MIB_ECT0PKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_ECT_0);
501 BUILD_BUG_ON(IPSTATS_MIB_CEPKTS != IPSTATS_MIB_NOECTPKTS + INET_ECN_CE);
503 IPSTATS_MIB_NOECTPKTS + (iph->tos & INET_ECN_MASK),
504 max_t(unsigned short, 1, skb_shinfo(skb)->gso_segs));
506 if (!pskb_may_pull(skb, iph->ihl*4))
511 if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl)))
514 len = iph_totlen(skb, iph);
515 if (skb->len < len) {
516 drop_reason = SKB_DROP_REASON_PKT_TOO_SMALL;
517 __IP_INC_STATS(net, IPSTATS_MIB_INTRUNCATEDPKTS);
519 } else if (len < (iph->ihl*4))
522 /* Our transport medium may have padded the buffer out. Now we know it
523 * is IP we can trim to the true length of the frame.
524 * Note this now means skb->len holds ntohs(iph->tot_len).
526 if (pskb_trim_rcsum(skb, len)) {
527 __IP_INC_STATS(net, IPSTATS_MIB_INDISCARDS);
532 skb->transport_header = skb->network_header + iph->ihl*4;
534 /* Remove any debris in the socket control block */
535 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
536 IPCB(skb)->iif = skb->skb_iif;
538 /* Must drop socket now because of tproxy. */
539 if (!skb_sk_is_prefetched(skb))
545 drop_reason = SKB_DROP_REASON_IP_CSUM;
546 __IP_INC_STATS(net, IPSTATS_MIB_CSUMERRORS);
548 if (drop_reason == SKB_DROP_REASON_NOT_SPECIFIED)
549 drop_reason = SKB_DROP_REASON_IP_INHDR;
550 __IP_INC_STATS(net, IPSTATS_MIB_INHDRERRORS);
552 kfree_skb_reason(skb, drop_reason);
558 * IP receive entry point
560 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
561 struct net_device *orig_dev)
563 struct net *net = dev_net(dev);
565 skb = ip_rcv_core(skb, net);
569 return NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING,
570 net, NULL, skb, dev, NULL,
574 static void ip_sublist_rcv_finish(struct list_head *head)
576 struct sk_buff *skb, *next;
578 list_for_each_entry_safe(skb, next, head, list) {
579 skb_list_del_init(skb);
584 static struct sk_buff *ip_extract_route_hint(const struct net *net,
585 struct sk_buff *skb, int rt_type)
587 if (fib4_has_custom_rules(net) || rt_type == RTN_BROADCAST)
593 static void ip_list_rcv_finish(struct net *net, struct sock *sk,
594 struct list_head *head)
596 struct sk_buff *skb, *next, *hint = NULL;
597 struct dst_entry *curr_dst = NULL;
598 struct list_head sublist;
600 INIT_LIST_HEAD(&sublist);
601 list_for_each_entry_safe(skb, next, head, list) {
602 struct net_device *dev = skb->dev;
603 struct dst_entry *dst;
605 skb_list_del_init(skb);
606 /* if ingress device is enslaved to an L3 master device pass the
607 * skb to its handler for processing
609 skb = l3mdev_ip_rcv(skb);
612 if (ip_rcv_finish_core(net, sk, skb, dev, hint) == NET_RX_DROP)
616 if (curr_dst != dst) {
617 hint = ip_extract_route_hint(net, skb,
618 ((struct rtable *)dst)->rt_type);
620 /* dispatch old sublist */
621 if (!list_empty(&sublist))
622 ip_sublist_rcv_finish(&sublist);
623 /* start new sublist */
624 INIT_LIST_HEAD(&sublist);
627 list_add_tail(&skb->list, &sublist);
629 /* dispatch final sublist */
630 ip_sublist_rcv_finish(&sublist);
633 static void ip_sublist_rcv(struct list_head *head, struct net_device *dev,
636 NF_HOOK_LIST(NFPROTO_IPV4, NF_INET_PRE_ROUTING, net, NULL,
637 head, dev, NULL, ip_rcv_finish);
638 ip_list_rcv_finish(net, NULL, head);
641 /* Receive a list of IP packets */
642 void ip_list_rcv(struct list_head *head, struct packet_type *pt,
643 struct net_device *orig_dev)
645 struct net_device *curr_dev = NULL;
646 struct net *curr_net = NULL;
647 struct sk_buff *skb, *next;
648 struct list_head sublist;
650 INIT_LIST_HEAD(&sublist);
651 list_for_each_entry_safe(skb, next, head, list) {
652 struct net_device *dev = skb->dev;
653 struct net *net = dev_net(dev);
655 skb_list_del_init(skb);
656 skb = ip_rcv_core(skb, net);
660 if (curr_dev != dev || curr_net != net) {
661 /* dispatch old sublist */
662 if (!list_empty(&sublist))
663 ip_sublist_rcv(&sublist, curr_dev, curr_net);
664 /* start new sublist */
665 INIT_LIST_HEAD(&sublist);
669 list_add_tail(&skb->list, &sublist);
671 /* dispatch final sublist */
672 if (!list_empty(&sublist))
673 ip_sublist_rcv(&sublist, curr_dev, curr_net);