2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the IP module.
8 * Version: @(#)ip.h 1.0.2 05/07/93
15 * Mike McLagan : Routing by source
17 * This program is free software; you can redistribute it and/or
18 * modify it under the terms of the GNU General Public License
19 * as published by the Free Software Foundation; either version
20 * 2 of the License, or (at your option) any later version.
25 #include <linux/types.h>
28 #include <linux/skbuff.h>
30 #include <net/inet_sock.h>
31 #include <net/route.h>
34 #include <net/flow_dissector.h>
38 struct inet_skb_parm {
40 struct ip_options opt; /* Compiled IP options */
43 #define IPSKB_FORWARDED BIT(0)
44 #define IPSKB_XFRM_TUNNEL_SIZE BIT(1)
45 #define IPSKB_XFRM_TRANSFORMED BIT(2)
46 #define IPSKB_FRAG_COMPLETE BIT(3)
47 #define IPSKB_REROUTED BIT(4)
48 #define IPSKB_DOREDIRECT BIT(5)
49 #define IPSKB_FRAG_PMTU BIT(6)
50 #define IPSKB_FRAG_SEGS BIT(7)
55 static inline unsigned int ip_hdrlen(const struct sk_buff *skb)
57 return ip_hdr(skb)->ihl * 4;
61 struct sockcm_cookie sockc;
64 struct ip_options_rcu *opt;
71 #define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
72 #define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
75 struct ip_ra_chain __rcu *next;
78 void (*destructor)(struct sock *);
79 struct sock *saved_sk;
84 extern struct ip_ra_chain __rcu *ip_ra_chain;
87 #define IP_CE 0x8000 /* Flag: "Congestion" */
88 #define IP_DF 0x4000 /* Flag: "Don't Fragment" */
89 #define IP_MF 0x2000 /* Flag: "More Fragments" */
90 #define IP_OFFSET 0x1FFF /* "Fragment Offset" part */
92 #define IP_FRAG_TIME (30 * HZ) /* fragment lifetime */
100 int igmp_mc_init(void);
103 * Functions provided by ip.c
106 int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
107 __be32 saddr, __be32 daddr,
108 struct ip_options_rcu *opt);
109 int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
110 struct net_device *orig_dev);
111 int ip_local_deliver(struct sk_buff *skb);
112 int ip_mr_input(struct sk_buff *skb);
113 int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
114 int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb);
115 int ip_do_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
116 int (*output)(struct net *, struct sock *, struct sk_buff *));
117 void ip_send_check(struct iphdr *ip);
118 int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
119 int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
121 int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
123 int ip_append_data(struct sock *sk, struct flowi4 *fl4,
124 int getfrag(void *from, char *to, int offset, int len,
125 int odd, struct sk_buff *skb),
126 void *from, int len, int protolen,
127 struct ipcm_cookie *ipc,
130 int ip_generic_getfrag(void *from, char *to, int offset, int len, int odd,
131 struct sk_buff *skb);
132 ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
133 int offset, size_t size, int flags);
134 struct sk_buff *__ip_make_skb(struct sock *sk, struct flowi4 *fl4,
135 struct sk_buff_head *queue,
136 struct inet_cork *cork);
137 int ip_send_skb(struct net *net, struct sk_buff *skb);
138 int ip_push_pending_frames(struct sock *sk, struct flowi4 *fl4);
139 void ip_flush_pending_frames(struct sock *sk);
140 struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
141 int getfrag(void *from, char *to, int offset,
142 int len, int odd, struct sk_buff *skb),
143 void *from, int length, int transhdrlen,
144 struct ipcm_cookie *ipc, struct rtable **rtp,
147 static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
149 return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
152 static inline __u8 get_rttos(struct ipcm_cookie* ipc, struct inet_sock *inet)
154 return (ipc->tos != -1) ? RT_TOS(ipc->tos) : RT_TOS(inet->tos);
157 static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
159 return (ipc->tos != -1) ? RT_CONN_FLAGS_TOS(sk, ipc->tos) : RT_CONN_FLAGS(sk);
163 int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
164 int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
166 void ip4_datagram_release_cb(struct sock *sk);
168 struct ip_reply_arg {
172 int csumoffset; /* u16 offset of csum in iov[0].iov_base */
173 /* -1 if not needed */
178 #define IP_REPLY_ARG_NOSRCCHECK 1
180 static inline __u8 ip_reply_arg_flowi_flags(const struct ip_reply_arg *arg)
182 return (arg->flags & IP_REPLY_ARG_NOSRCCHECK) ? FLOWI_FLAG_ANYSRC : 0;
185 void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb,
186 const struct ip_options *sopt,
187 __be32 daddr, __be32 saddr,
188 const struct ip_reply_arg *arg,
191 #define IP_INC_STATS(net, field) SNMP_INC_STATS64((net)->mib.ip_statistics, field)
192 #define __IP_INC_STATS(net, field) __SNMP_INC_STATS64((net)->mib.ip_statistics, field)
193 #define IP_ADD_STATS(net, field, val) SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
194 #define __IP_ADD_STATS(net, field, val) __SNMP_ADD_STATS64((net)->mib.ip_statistics, field, val)
195 #define IP_UPD_PO_STATS(net, field, val) SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
196 #define __IP_UPD_PO_STATS(net, field, val) __SNMP_UPD_PO_STATS64((net)->mib.ip_statistics, field, val)
197 #define NET_INC_STATS(net, field) SNMP_INC_STATS((net)->mib.net_statistics, field)
198 #define __NET_INC_STATS(net, field) __SNMP_INC_STATS((net)->mib.net_statistics, field)
199 #define NET_ADD_STATS(net, field, adnd) SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
200 #define __NET_ADD_STATS(net, field, adnd) __SNMP_ADD_STATS((net)->mib.net_statistics, field, adnd)
202 u64 snmp_get_cpu_field(void __percpu *mib, int cpu, int offct);
203 unsigned long snmp_fold_field(void __percpu *mib, int offt);
204 #if BITS_PER_LONG==32
205 u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
206 size_t syncp_offset);
207 u64 snmp_fold_field64(void __percpu *mib, int offt, size_t sync_off);
209 static inline u64 snmp_get_cpu_field64(void __percpu *mib, int cpu, int offct,
212 return snmp_get_cpu_field(mib, cpu, offct);
216 static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_off)
218 return snmp_fold_field(mib, offt);
222 #define snmp_get_cpu_field64_batch(buff64, stats_list, mib_statistic, offset) \
225 for_each_possible_cpu(c) { \
226 for (i = 0; stats_list[i].name; i++) \
227 buff64[i] += snmp_get_cpu_field64( \
229 c, stats_list[i].entry, \
234 #define snmp_get_cpu_field_batch(buff, stats_list, mib_statistic) \
237 for_each_possible_cpu(c) { \
238 for (i = 0; stats_list[i].name; i++) \
239 buff[i] += snmp_get_cpu_field( \
241 c, stats_list[i].entry); \
245 void inet_get_local_port_range(struct net *net, int *low, int *high);
248 static inline int inet_is_local_reserved_port(struct net *net, int port)
250 if (!net->ipv4.sysctl_local_reserved_ports)
252 return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
255 static inline bool sysctl_dev_name_is_allowed(const char *name)
257 return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
261 static inline int inet_is_local_reserved_port(struct net *net, int port)
267 __be32 inet_current_timestamp(void);
269 /* From inetpeer.c */
270 extern int inet_peer_threshold;
271 extern int inet_peer_minttl;
272 extern int inet_peer_maxttl;
274 void ipfrag_init(void);
276 void ip_static_sysctl_init(void);
278 #define IP4_REPLY_MARK(net, mark) \
279 ((net)->ipv4.sysctl_fwmark_reflect ? (mark) : 0)
281 static inline bool ip_is_fragment(const struct iphdr *iph)
283 return (iph->frag_off & htons(IP_MF | IP_OFFSET)) != 0;
289 /* The function in 2.2 was invalid, producing wrong result for
290 * check=0xFEFF. It was noticed by Arthur Skawina _year_ ago. --ANK(000625) */
292 int ip_decrease_ttl(struct iphdr *iph)
294 u32 check = (__force u32)iph->check;
295 check += (__force u32)htons(0x0100);
296 iph->check = (__force __sum16)(check + (check>=0xFFFF));
301 int ip_dont_fragment(const struct sock *sk, const struct dst_entry *dst)
303 u8 pmtudisc = READ_ONCE(inet_sk(sk)->pmtudisc);
305 return pmtudisc == IP_PMTUDISC_DO ||
306 (pmtudisc == IP_PMTUDISC_WANT &&
307 !(dst_metric_locked(dst, RTAX_MTU)));
310 static inline bool ip_sk_accept_pmtu(const struct sock *sk)
312 return inet_sk(sk)->pmtudisc != IP_PMTUDISC_INTERFACE &&
313 inet_sk(sk)->pmtudisc != IP_PMTUDISC_OMIT;
316 static inline bool ip_sk_use_pmtu(const struct sock *sk)
318 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_PROBE;
321 static inline bool ip_sk_ignore_df(const struct sock *sk)
323 return inet_sk(sk)->pmtudisc < IP_PMTUDISC_DO ||
324 inet_sk(sk)->pmtudisc == IP_PMTUDISC_OMIT;
327 static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
330 struct net *net = dev_net(dst->dev);
332 if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
333 dst_metric_locked(dst, RTAX_MTU) ||
337 return min(dst->dev->mtu, IP_MAX_MTU);
340 static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
341 const struct sk_buff *skb)
343 if (!sk || !sk_fullsock(sk) || ip_sk_use_pmtu(sk)) {
344 bool forwarding = IPCB(skb)->flags & IPSKB_FORWARDED;
346 return ip_dst_mtu_maybe_forward(skb_dst(skb), forwarding);
349 return min(skb_dst(skb)->dev->mtu, IP_MAX_MTU);
352 u32 ip_idents_reserve(u32 hash, int segs);
353 void __ip_select_ident(struct net *net, struct iphdr *iph, int segs);
355 static inline void ip_select_ident_segs(struct net *net, struct sk_buff *skb,
356 struct sock *sk, int segs)
358 struct iphdr *iph = ip_hdr(skb);
360 if ((iph->frag_off & htons(IP_DF)) && !skb->ignore_df) {
361 /* This is only to work around buggy Windows95/2000
362 * VJ compression implementations. If the ID field
363 * does not change, they drop every other packet in
364 * a TCP stream using header compression.
366 if (sk && inet_sk(sk)->inet_daddr) {
367 iph->id = htons(inet_sk(sk)->inet_id);
368 inet_sk(sk)->inet_id += segs;
373 __ip_select_ident(net, iph, segs);
377 static inline void ip_select_ident(struct net *net, struct sk_buff *skb,
380 ip_select_ident_segs(net, skb, sk, 1);
383 static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
385 return csum_tcpudp_nofold(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
389 /* copy IPv4 saddr & daddr to flow_keys, possibly using 64bit load/store
390 * Equivalent to : flow->v4addrs.src = iph->saddr;
391 * flow->v4addrs.dst = iph->daddr;
393 static inline void iph_to_flow_copy_v4addrs(struct flow_keys *flow,
394 const struct iphdr *iph)
396 BUILD_BUG_ON(offsetof(typeof(flow->addrs), v4addrs.dst) !=
397 offsetof(typeof(flow->addrs), v4addrs.src) +
398 sizeof(flow->addrs.v4addrs.src));
399 memcpy(&flow->addrs.v4addrs, &iph->saddr, sizeof(flow->addrs.v4addrs));
400 flow->control.addr_type = FLOW_DISSECTOR_KEY_IPV4_ADDRS;
403 static inline __wsum inet_gro_compute_pseudo(struct sk_buff *skb, int proto)
405 const struct iphdr *iph = skb_gro_network_header(skb);
407 return csum_tcpudp_nofold(iph->saddr, iph->daddr,
408 skb_gro_len(skb), proto, 0);
412 * Map a multicast IP onto multicast MAC for type ethernet.
415 static inline void ip_eth_mc_map(__be32 naddr, char *buf)
417 __u32 addr=ntohl(naddr);
429 * Map a multicast IP onto multicast MAC for type IP-over-InfiniBand.
430 * Leave P_Key as 0 to be filled in by driver.
433 static inline void ip_ib_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
436 unsigned char scope = broadcast[5] & 0xF;
438 buf[0] = 0; /* Reserved */
439 buf[1] = 0xff; /* Multicast QPN */
444 buf[5] = 0x10 | scope; /* scope from broadcast address */
445 buf[6] = 0x40; /* IPv4 signature */
447 buf[8] = broadcast[8]; /* P_Key */
448 buf[9] = broadcast[9];
455 buf[19] = addr & 0xff;
457 buf[18] = addr & 0xff;
459 buf[17] = addr & 0xff;
461 buf[16] = addr & 0x0f;
464 static inline void ip_ipgre_mc_map(__be32 naddr, const unsigned char *broadcast, char *buf)
466 if ((broadcast[0] | broadcast[1] | broadcast[2] | broadcast[3]) != 0)
467 memcpy(buf, broadcast, 4);
469 memcpy(buf, &naddr, sizeof(naddr));
472 #if IS_ENABLED(CONFIG_IPV6)
473 #include <linux/ipv6.h>
476 static __inline__ void inet_reset_saddr(struct sock *sk)
478 inet_sk(sk)->inet_rcv_saddr = inet_sk(sk)->inet_saddr = 0;
479 #if IS_ENABLED(CONFIG_IPV6)
480 if (sk->sk_family == PF_INET6) {
481 struct ipv6_pinfo *np = inet6_sk(sk);
483 memset(&np->saddr, 0, sizeof(np->saddr));
484 memset(&sk->sk_v6_rcv_saddr, 0, sizeof(sk->sk_v6_rcv_saddr));
491 static inline unsigned int ipv4_addr_hash(__be32 ip)
493 return (__force unsigned int) ip;
496 bool ip_call_ra_chain(struct sk_buff *skb);
499 * Functions provided by ip_fragment.c
502 enum ip_defrag_users {
503 IP_DEFRAG_LOCAL_DELIVER,
504 IP_DEFRAG_CALL_RA_CHAIN,
505 IP_DEFRAG_CONNTRACK_IN,
506 __IP_DEFRAG_CONNTRACK_IN_END = IP_DEFRAG_CONNTRACK_IN + USHRT_MAX,
507 IP_DEFRAG_CONNTRACK_OUT,
508 __IP_DEFRAG_CONNTRACK_OUT_END = IP_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
509 IP_DEFRAG_CONNTRACK_BRIDGE_IN,
510 __IP_DEFRAG_CONNTRACK_BRIDGE_IN = IP_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
518 /* Return true if the value of 'user' is between 'lower_bond'
519 * and 'upper_bond' inclusively.
521 static inline bool ip_defrag_user_in_between(u32 user,
522 enum ip_defrag_users lower_bond,
523 enum ip_defrag_users upper_bond)
525 return user >= lower_bond && user <= upper_bond;
528 int ip_defrag(struct net *net, struct sk_buff *skb, u32 user);
530 struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user);
532 static inline struct sk_buff *ip_check_defrag(struct net *net, struct sk_buff *skb, u32 user)
537 int ip_frag_mem(struct net *net);
540 * Functions provided by ip_forward.c
543 int ip_forward(struct sk_buff *skb);
546 * Functions provided by ip_options.c
549 void ip_options_build(struct sk_buff *skb, struct ip_options *opt,
550 __be32 daddr, struct rtable *rt, int is_frag);
552 int __ip_options_echo(struct ip_options *dopt, struct sk_buff *skb,
553 const struct ip_options *sopt);
554 static inline int ip_options_echo(struct ip_options *dopt, struct sk_buff *skb)
556 return __ip_options_echo(dopt, skb, &IPCB(skb)->opt);
559 void ip_options_fragment(struct sk_buff *skb);
560 int ip_options_compile(struct net *net, struct ip_options *opt,
561 struct sk_buff *skb);
562 int ip_options_get(struct net *net, struct ip_options_rcu **optp,
563 unsigned char *data, int optlen);
564 int ip_options_get_from_user(struct net *net, struct ip_options_rcu **optp,
565 unsigned char __user *data, int optlen);
566 void ip_options_undo(struct ip_options *opt);
567 void ip_forward_options(struct sk_buff *skb);
568 int ip_options_rcv_srr(struct sk_buff *skb);
571 * Functions provided by ip_sockglue.c
574 void ipv4_pktinfo_prepare(const struct sock *sk, struct sk_buff *skb);
575 void ip_cmsg_recv_offset(struct msghdr *msg, struct sk_buff *skb, int offset);
576 int ip_cmsg_send(struct sock *sk, struct msghdr *msg,
577 struct ipcm_cookie *ipc, bool allow_ipv6);
578 int ip_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
579 unsigned int optlen);
580 int ip_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
582 int compat_ip_setsockopt(struct sock *sk, int level, int optname,
583 char __user *optval, unsigned int optlen);
584 int compat_ip_getsockopt(struct sock *sk, int level, int optname,
585 char __user *optval, int __user *optlen);
586 int ip_ra_control(struct sock *sk, unsigned char on,
587 void (*destructor)(struct sock *));
589 int ip_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len);
590 void ip_icmp_error(struct sock *sk, struct sk_buff *skb, int err, __be16 port,
591 u32 info, u8 *payload);
592 void ip_local_error(struct sock *sk, int err, __be32 daddr, __be16 dport,
595 static inline void ip_cmsg_recv(struct msghdr *msg, struct sk_buff *skb)
597 ip_cmsg_recv_offset(msg, skb, 0);
600 bool icmp_global_allow(void);
601 extern int sysctl_icmp_msgs_per_sec;
602 extern int sysctl_icmp_msgs_burst;
604 #ifdef CONFIG_PROC_FS
605 int ip_misc_proc_init(void);