2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 /* This will initiate an outgoing connection. */
142 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
144 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
145 struct inet_sock *inet = inet_sk(sk);
146 struct tcp_sock *tp = tcp_sk(sk);
147 __be16 orig_sport, orig_dport;
148 __be32 daddr, nexthop;
152 struct ip_options_rcu *inet_opt;
154 if (addr_len < sizeof(struct sockaddr_in))
157 if (usin->sin_family != AF_INET)
158 return -EAFNOSUPPORT;
160 nexthop = daddr = usin->sin_addr.s_addr;
161 inet_opt = rcu_dereference_protected(inet->inet_opt,
162 sock_owned_by_user(sk));
163 if (inet_opt && inet_opt->opt.srr) {
166 nexthop = inet_opt->opt.faddr;
169 orig_sport = inet->inet_sport;
170 orig_dport = usin->sin_port;
171 fl4 = &inet->cork.fl.u.ip4;
172 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
173 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
175 orig_sport, orig_dport, sk, true);
178 if (err == -ENETUNREACH)
179 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
183 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
188 if (!inet_opt || !inet_opt->opt.srr)
191 if (!inet->inet_saddr)
192 inet->inet_saddr = fl4->saddr;
193 inet->inet_rcv_saddr = inet->inet_saddr;
195 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
196 /* Reset inherited state */
197 tp->rx_opt.ts_recent = 0;
198 tp->rx_opt.ts_recent_stamp = 0;
199 if (likely(!tp->repair))
203 if (tcp_death_row.sysctl_tw_recycle &&
204 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
205 tcp_fetch_timewait_stamp(sk, &rt->dst);
207 inet->inet_dport = usin->sin_port;
208 inet->inet_daddr = daddr;
210 inet_csk(sk)->icsk_ext_hdr_len = 0;
212 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
214 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
216 /* Socket identity is still unknown (sport may be zero).
217 * However we set state to SYN-SENT and not releasing socket
218 * lock select source port, enter ourselves into the hash tables and
219 * complete initialization after this.
221 tcp_set_state(sk, TCP_SYN_SENT);
222 err = inet_hash_connect(&tcp_death_row, sk);
226 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
227 inet->inet_sport, inet->inet_dport, sk);
233 /* OK, now commit destination to socket. */
234 sk->sk_gso_type = SKB_GSO_TCPV4;
235 sk_setup_caps(sk, &rt->dst);
237 if (!tp->write_seq && likely(!tp->repair))
238 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
243 inet->inet_id = tp->write_seq ^ jiffies;
245 err = tcp_connect(sk);
255 * This unhashes the socket and releases the local port,
258 tcp_set_state(sk, TCP_CLOSE);
260 sk->sk_route_caps = 0;
261 inet->inet_dport = 0;
264 EXPORT_SYMBOL(tcp_v4_connect);
267 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
268 * It can be called through tcp_release_cb() if socket was owned by user
269 * at the time tcp_v4_err() was called to handle ICMP message.
271 static void tcp_v4_mtu_reduced(struct sock *sk)
273 struct dst_entry *dst;
274 struct inet_sock *inet = inet_sk(sk);
275 u32 mtu = tcp_sk(sk)->mtu_info;
277 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
278 * send out by Linux are always <576bytes so they should go through
281 if (sk->sk_state == TCP_LISTEN)
284 dst = inet_csk_update_pmtu(sk, mtu);
288 /* Something is about to be wrong... Remember soft error
289 * for the case, if this connection will not able to recover.
291 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
292 sk->sk_err_soft = EMSGSIZE;
296 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
297 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
298 tcp_sync_mss(sk, mtu);
300 /* Resend the TCP packet because it's
301 * clear that the old packet has been
302 * dropped. This is the new "fast" path mtu
305 tcp_simple_retransmit(sk);
306 } /* else let the usual retransmit timer handle it */
309 static void do_redirect(struct sk_buff *skb, struct sock *sk)
311 struct dst_entry *dst = __sk_dst_check(sk, 0);
314 dst->ops->redirect(dst, sk, skb);
318 * This routine is called by the ICMP module when it gets some
319 * sort of error condition. If err < 0 then the socket should
320 * be closed and the error returned to the user. If err > 0
321 * it's just the icmp type << 8 | icmp code. After adjustment
322 * header points to the first 8 bytes of the tcp header. We need
323 * to find the appropriate port.
325 * The locking strategy used here is very "optimistic". When
326 * someone else accesses the socket the ICMP is just dropped
327 * and for some paths there is no check at all.
328 * A more general error queue to queue errors for later handling
329 * is probably better.
333 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
335 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
336 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
337 struct inet_connection_sock *icsk;
339 struct inet_sock *inet;
340 const int type = icmp_hdr(icmp_skb)->type;
341 const int code = icmp_hdr(icmp_skb)->code;
344 struct request_sock *req;
348 struct net *net = dev_net(icmp_skb->dev);
350 if (icmp_skb->len < (iph->ihl << 2) + 8) {
351 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
355 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
356 iph->saddr, th->source, inet_iif(icmp_skb));
358 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
361 if (sk->sk_state == TCP_TIME_WAIT) {
362 inet_twsk_put(inet_twsk(sk));
367 /* If too many ICMPs get dropped on busy
368 * servers this needs to be solved differently.
369 * We do take care of PMTU discovery (RFC1191) special case :
370 * we can receive locally generated ICMP messages while socket is held.
372 if (sock_owned_by_user(sk) &&
373 type != ICMP_DEST_UNREACH &&
374 code != ICMP_FRAG_NEEDED)
375 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
377 if (sk->sk_state == TCP_CLOSE)
380 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
381 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
387 req = tp->fastopen_rsk;
388 seq = ntohl(th->seq);
389 if (sk->sk_state != TCP_LISTEN &&
390 !between(seq, tp->snd_una, tp->snd_nxt) &&
391 (req == NULL || seq != tcp_rsk(req)->snt_isn)) {
392 /* For a Fast Open socket, allow seq to be snt_isn. */
393 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
399 do_redirect(icmp_skb, sk);
401 case ICMP_SOURCE_QUENCH:
402 /* Just silently ignore these. */
404 case ICMP_PARAMETERPROB:
407 case ICMP_DEST_UNREACH:
408 if (code > NR_ICMP_UNREACH)
411 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
413 if (!sock_owned_by_user(sk)) {
414 tcp_v4_mtu_reduced(sk);
416 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
422 err = icmp_err_convert[code].errno;
423 /* check if icmp_skb allows revert of backoff
424 * (see draft-zimmermann-tcp-lcd) */
425 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
427 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
431 /* XXX (TFO) - revisit the following logic for TFO */
433 if (sock_owned_by_user(sk))
436 icsk->icsk_backoff--;
437 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
438 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
441 skb = tcp_write_queue_head(sk);
444 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
445 tcp_time_stamp - TCP_SKB_CB(skb)->when);
448 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
449 remaining, TCP_RTO_MAX);
451 /* RTO revert clocked out retransmission.
452 * Will retransmit now */
453 tcp_retransmit_timer(sk);
457 case ICMP_TIME_EXCEEDED:
464 /* XXX (TFO) - if it's a TFO socket and has been accepted, rather
465 * than following the TCP_SYN_RECV case and closing the socket,
466 * we ignore the ICMP error and keep trying like a fully established
467 * socket. Is this the right thing to do?
469 if (req && req->sk == NULL)
472 switch (sk->sk_state) {
473 struct request_sock *req, **prev;
475 if (sock_owned_by_user(sk))
478 req = inet_csk_search_req(sk, &prev, th->dest,
479 iph->daddr, iph->saddr);
483 /* ICMPs are not backlogged, hence we cannot get
484 an established socket here.
488 if (seq != tcp_rsk(req)->snt_isn) {
489 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
494 * Still in SYN_RECV, just remove it silently.
495 * There is no good way to pass the error to the newly
496 * created socket, and POSIX does not want network
497 * errors returned from accept().
499 inet_csk_reqsk_queue_drop(sk, req, prev);
503 case TCP_SYN_RECV: /* Cannot happen.
504 It can f.e. if SYNs crossed,
507 if (!sock_owned_by_user(sk)) {
510 sk->sk_error_report(sk);
514 sk->sk_err_soft = err;
519 /* If we've already connected we will keep trying
520 * until we time out, or the user gives up.
522 * rfc1122 4.2.3.9 allows to consider as hard errors
523 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
524 * but it is obsoleted by pmtu discovery).
526 * Note, that in modern internet, where routing is unreliable
527 * and in each dark corner broken firewalls sit, sending random
528 * errors ordered by their masters even this two messages finally lose
529 * their original sense (even Linux sends invalid PORT_UNREACHs)
531 * Now we are in compliance with RFCs.
536 if (!sock_owned_by_user(sk) && inet->recverr) {
538 sk->sk_error_report(sk);
539 } else { /* Only an error on timeout */
540 sk->sk_err_soft = err;
548 static void __tcp_v4_send_check(struct sk_buff *skb,
549 __be32 saddr, __be32 daddr)
551 struct tcphdr *th = tcp_hdr(skb);
553 if (skb->ip_summed == CHECKSUM_PARTIAL) {
554 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
555 skb->csum_start = skb_transport_header(skb) - skb->head;
556 skb->csum_offset = offsetof(struct tcphdr, check);
558 th->check = tcp_v4_check(skb->len, saddr, daddr,
565 /* This routine computes an IPv4 TCP checksum. */
566 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
568 const struct inet_sock *inet = inet_sk(sk);
570 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
572 EXPORT_SYMBOL(tcp_v4_send_check);
574 int tcp_v4_gso_send_check(struct sk_buff *skb)
576 const struct iphdr *iph;
579 if (!pskb_may_pull(skb, sizeof(*th)))
586 skb->ip_summed = CHECKSUM_PARTIAL;
587 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
592 * This routine will send an RST to the other tcp.
594 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
596 * Answer: if a packet caused RST, it is not for a socket
597 * existing in our system, if it is matched to a socket,
598 * it is just duplicate segment or bug in other side's TCP.
599 * So that we build reply only basing on parameters
600 * arrived with segment.
601 * Exception: precedence violation. We do not implement it in any case.
604 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
606 const struct tcphdr *th = tcp_hdr(skb);
609 #ifdef CONFIG_TCP_MD5SIG
610 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
613 struct ip_reply_arg arg;
614 #ifdef CONFIG_TCP_MD5SIG
615 struct tcp_md5sig_key *key;
616 const __u8 *hash_location = NULL;
617 unsigned char newhash[16];
619 struct sock *sk1 = NULL;
623 /* Never send a reset in response to a reset. */
627 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
630 /* Swap the send and the receive. */
631 memset(&rep, 0, sizeof(rep));
632 rep.th.dest = th->source;
633 rep.th.source = th->dest;
634 rep.th.doff = sizeof(struct tcphdr) / 4;
638 rep.th.seq = th->ack_seq;
641 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
642 skb->len - (th->doff << 2));
645 memset(&arg, 0, sizeof(arg));
646 arg.iov[0].iov_base = (unsigned char *)&rep;
647 arg.iov[0].iov_len = sizeof(rep.th);
649 #ifdef CONFIG_TCP_MD5SIG
650 hash_location = tcp_parse_md5sig_option(th);
651 if (!sk && hash_location) {
653 * active side is lost. Try to find listening socket through
654 * source port, and then find md5 key through listening socket.
655 * we are not loose security here:
656 * Incoming packet is checked with md5 hash with finding key,
657 * no RST generated if md5 hash doesn't match.
659 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
660 &tcp_hashinfo, ip_hdr(skb)->daddr,
661 ntohs(th->source), inet_iif(skb));
662 /* don't send rst if it can't find key */
666 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
667 &ip_hdr(skb)->saddr, AF_INET);
671 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
672 if (genhash || memcmp(hash_location, newhash, 16) != 0)
675 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
681 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
683 (TCPOPT_MD5SIG << 8) |
685 /* Update length and the length the header thinks exists */
686 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
687 rep.th.doff = arg.iov[0].iov_len / 4;
689 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
690 key, ip_hdr(skb)->saddr,
691 ip_hdr(skb)->daddr, &rep.th);
694 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
695 ip_hdr(skb)->saddr, /* XXX */
696 arg.iov[0].iov_len, IPPROTO_TCP, 0);
697 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
698 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
699 /* When socket is gone, all binding information is lost.
700 * routing might fail in this case. No choice here, if we choose to force
701 * input interface, we will misroute in case of asymmetric route.
704 arg.bound_dev_if = sk->sk_bound_dev_if;
706 net = dev_net(skb_dst(skb)->dev);
707 arg.tos = ip_hdr(skb)->tos;
708 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
709 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
711 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
712 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
714 #ifdef CONFIG_TCP_MD5SIG
723 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
724 outside socket context is ugly, certainly. What can I do?
727 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
728 u32 win, u32 ts, int oif,
729 struct tcp_md5sig_key *key,
730 int reply_flags, u8 tos)
732 const struct tcphdr *th = tcp_hdr(skb);
735 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
736 #ifdef CONFIG_TCP_MD5SIG
737 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
741 struct ip_reply_arg arg;
742 struct net *net = dev_net(skb_dst(skb)->dev);
744 memset(&rep.th, 0, sizeof(struct tcphdr));
745 memset(&arg, 0, sizeof(arg));
747 arg.iov[0].iov_base = (unsigned char *)&rep;
748 arg.iov[0].iov_len = sizeof(rep.th);
750 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
751 (TCPOPT_TIMESTAMP << 8) |
753 rep.opt[1] = htonl(tcp_time_stamp);
754 rep.opt[2] = htonl(ts);
755 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
758 /* Swap the send and the receive. */
759 rep.th.dest = th->source;
760 rep.th.source = th->dest;
761 rep.th.doff = arg.iov[0].iov_len / 4;
762 rep.th.seq = htonl(seq);
763 rep.th.ack_seq = htonl(ack);
765 rep.th.window = htons(win);
767 #ifdef CONFIG_TCP_MD5SIG
769 int offset = (ts) ? 3 : 0;
771 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
773 (TCPOPT_MD5SIG << 8) |
775 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
776 rep.th.doff = arg.iov[0].iov_len/4;
778 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
779 key, ip_hdr(skb)->saddr,
780 ip_hdr(skb)->daddr, &rep.th);
783 arg.flags = reply_flags;
784 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
785 ip_hdr(skb)->saddr, /* XXX */
786 arg.iov[0].iov_len, IPPROTO_TCP, 0);
787 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
789 arg.bound_dev_if = oif;
791 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
792 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
794 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
797 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
799 struct inet_timewait_sock *tw = inet_twsk(sk);
800 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
802 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
803 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
806 tcp_twsk_md5_key(tcptw),
807 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
814 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
815 struct request_sock *req)
817 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
818 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
820 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
821 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
822 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
825 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
827 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
832 * Send a SYN-ACK after having received a SYN.
833 * This still operates on a request_sock only, not on a big
836 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
837 struct request_sock *req,
838 struct request_values *rvp,
842 const struct inet_request_sock *ireq = inet_rsk(req);
845 struct sk_buff * skb;
847 /* First, grab a route. */
848 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
851 skb = tcp_make_synack(sk, dst, req, rvp, NULL);
854 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
856 skb_set_queue_mapping(skb, queue_mapping);
857 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
860 err = net_xmit_eval(err);
861 if (!tcp_rsk(req)->snt_synack && !err)
862 tcp_rsk(req)->snt_synack = tcp_time_stamp;
868 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
869 struct request_values *rvp)
871 int res = tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
874 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
879 * IPv4 request_sock destructor.
881 static void tcp_v4_reqsk_destructor(struct request_sock *req)
883 kfree(inet_rsk(req)->opt);
887 * Return true if a syncookie should be sent
889 bool tcp_syn_flood_action(struct sock *sk,
890 const struct sk_buff *skb,
893 const char *msg = "Dropping request";
894 bool want_cookie = false;
895 struct listen_sock *lopt;
899 #ifdef CONFIG_SYN_COOKIES
900 if (sysctl_tcp_syncookies) {
901 msg = "Sending cookies";
903 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
906 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
908 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
909 if (!lopt->synflood_warned) {
910 lopt->synflood_warned = 1;
911 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
912 proto, ntohs(tcp_hdr(skb)->dest), msg);
916 EXPORT_SYMBOL(tcp_syn_flood_action);
919 * Save and compile IPv4 options into the request_sock if needed.
921 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
923 const struct ip_options *opt = &(IPCB(skb)->opt);
924 struct ip_options_rcu *dopt = NULL;
926 if (opt && opt->optlen) {
927 int opt_size = sizeof(*dopt) + opt->optlen;
929 dopt = kmalloc(opt_size, GFP_ATOMIC);
931 if (ip_options_echo(&dopt->opt, skb)) {
940 #ifdef CONFIG_TCP_MD5SIG
942 * RFC2385 MD5 checksumming requires a mapping of
943 * IP address->MD5 Key.
944 * We need to maintain these in the sk structure.
947 /* Find the Key structure for an address. */
948 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
949 const union tcp_md5_addr *addr,
952 struct tcp_sock *tp = tcp_sk(sk);
953 struct tcp_md5sig_key *key;
954 struct hlist_node *pos;
955 unsigned int size = sizeof(struct in_addr);
956 struct tcp_md5sig_info *md5sig;
958 /* caller either holds rcu_read_lock() or socket lock */
959 md5sig = rcu_dereference_check(tp->md5sig_info,
960 sock_owned_by_user(sk) ||
961 lockdep_is_held(&sk->sk_lock.slock));
964 #if IS_ENABLED(CONFIG_IPV6)
965 if (family == AF_INET6)
966 size = sizeof(struct in6_addr);
968 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
969 if (key->family != family)
971 if (!memcmp(&key->addr, addr, size))
976 EXPORT_SYMBOL(tcp_md5_do_lookup);
978 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
979 struct sock *addr_sk)
981 union tcp_md5_addr *addr;
983 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
984 return tcp_md5_do_lookup(sk, addr, AF_INET);
986 EXPORT_SYMBOL(tcp_v4_md5_lookup);
988 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
989 struct request_sock *req)
991 union tcp_md5_addr *addr;
993 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
994 return tcp_md5_do_lookup(sk, addr, AF_INET);
997 /* This can be called on a newly created socket, from other files */
998 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
999 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
1001 /* Add Key to the list */
1002 struct tcp_md5sig_key *key;
1003 struct tcp_sock *tp = tcp_sk(sk);
1004 struct tcp_md5sig_info *md5sig;
1006 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1008 /* Pre-existing entry - just update that one. */
1009 memcpy(key->key, newkey, newkeylen);
1010 key->keylen = newkeylen;
1014 md5sig = rcu_dereference_protected(tp->md5sig_info,
1015 sock_owned_by_user(sk));
1017 md5sig = kmalloc(sizeof(*md5sig), gfp);
1021 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1022 INIT_HLIST_HEAD(&md5sig->head);
1023 rcu_assign_pointer(tp->md5sig_info, md5sig);
1026 key = sock_kmalloc(sk, sizeof(*key), gfp);
1029 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1030 sock_kfree_s(sk, key, sizeof(*key));
1034 memcpy(key->key, newkey, newkeylen);
1035 key->keylen = newkeylen;
1036 key->family = family;
1037 memcpy(&key->addr, addr,
1038 (family == AF_INET6) ? sizeof(struct in6_addr) :
1039 sizeof(struct in_addr));
1040 hlist_add_head_rcu(&key->node, &md5sig->head);
1043 EXPORT_SYMBOL(tcp_md5_do_add);
1045 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1047 struct tcp_sock *tp = tcp_sk(sk);
1048 struct tcp_md5sig_key *key;
1049 struct tcp_md5sig_info *md5sig;
1051 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1054 hlist_del_rcu(&key->node);
1055 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1056 kfree_rcu(key, rcu);
1057 md5sig = rcu_dereference_protected(tp->md5sig_info,
1058 sock_owned_by_user(sk));
1059 if (hlist_empty(&md5sig->head))
1060 tcp_free_md5sig_pool();
1063 EXPORT_SYMBOL(tcp_md5_do_del);
1065 static void tcp_clear_md5_list(struct sock *sk)
1067 struct tcp_sock *tp = tcp_sk(sk);
1068 struct tcp_md5sig_key *key;
1069 struct hlist_node *pos, *n;
1070 struct tcp_md5sig_info *md5sig;
1072 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1074 if (!hlist_empty(&md5sig->head))
1075 tcp_free_md5sig_pool();
1076 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1077 hlist_del_rcu(&key->node);
1078 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1079 kfree_rcu(key, rcu);
1083 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1086 struct tcp_md5sig cmd;
1087 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1089 if (optlen < sizeof(cmd))
1092 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1095 if (sin->sin_family != AF_INET)
1098 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1099 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1102 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1105 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1106 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1110 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1111 __be32 daddr, __be32 saddr, int nbytes)
1113 struct tcp4_pseudohdr *bp;
1114 struct scatterlist sg;
1116 bp = &hp->md5_blk.ip4;
1119 * 1. the TCP pseudo-header (in the order: source IP address,
1120 * destination IP address, zero-padded protocol number, and
1126 bp->protocol = IPPROTO_TCP;
1127 bp->len = cpu_to_be16(nbytes);
1129 sg_init_one(&sg, bp, sizeof(*bp));
1130 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1133 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1134 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1136 struct tcp_md5sig_pool *hp;
1137 struct hash_desc *desc;
1139 hp = tcp_get_md5sig_pool();
1141 goto clear_hash_noput;
1142 desc = &hp->md5_desc;
1144 if (crypto_hash_init(desc))
1146 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1148 if (tcp_md5_hash_header(hp, th))
1150 if (tcp_md5_hash_key(hp, key))
1152 if (crypto_hash_final(desc, md5_hash))
1155 tcp_put_md5sig_pool();
1159 tcp_put_md5sig_pool();
1161 memset(md5_hash, 0, 16);
1165 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1166 const struct sock *sk, const struct request_sock *req,
1167 const struct sk_buff *skb)
1169 struct tcp_md5sig_pool *hp;
1170 struct hash_desc *desc;
1171 const struct tcphdr *th = tcp_hdr(skb);
1172 __be32 saddr, daddr;
1175 saddr = inet_sk(sk)->inet_saddr;
1176 daddr = inet_sk(sk)->inet_daddr;
1178 saddr = inet_rsk(req)->loc_addr;
1179 daddr = inet_rsk(req)->rmt_addr;
1181 const struct iphdr *iph = ip_hdr(skb);
1186 hp = tcp_get_md5sig_pool();
1188 goto clear_hash_noput;
1189 desc = &hp->md5_desc;
1191 if (crypto_hash_init(desc))
1194 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1196 if (tcp_md5_hash_header(hp, th))
1198 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1200 if (tcp_md5_hash_key(hp, key))
1202 if (crypto_hash_final(desc, md5_hash))
1205 tcp_put_md5sig_pool();
1209 tcp_put_md5sig_pool();
1211 memset(md5_hash, 0, 16);
1214 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1216 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1219 * This gets called for each TCP segment that arrives
1220 * so we want to be efficient.
1221 * We have 3 drop cases:
1222 * o No MD5 hash and one expected.
1223 * o MD5 hash and we're not expecting one.
1224 * o MD5 hash and its wrong.
1226 const __u8 *hash_location = NULL;
1227 struct tcp_md5sig_key *hash_expected;
1228 const struct iphdr *iph = ip_hdr(skb);
1229 const struct tcphdr *th = tcp_hdr(skb);
1231 unsigned char newhash[16];
1233 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1235 hash_location = tcp_parse_md5sig_option(th);
1237 /* We've parsed the options - do we have a hash? */
1238 if (!hash_expected && !hash_location)
1241 if (hash_expected && !hash_location) {
1242 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1246 if (!hash_expected && hash_location) {
1247 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1251 /* Okay, so this is hash_expected and hash_location -
1252 * so we need to calculate the checksum.
1254 genhash = tcp_v4_md5_hash_skb(newhash,
1258 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1259 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1260 &iph->saddr, ntohs(th->source),
1261 &iph->daddr, ntohs(th->dest),
1262 genhash ? " tcp_v4_calc_md5_hash failed"
1271 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1273 .obj_size = sizeof(struct tcp_request_sock),
1274 .rtx_syn_ack = tcp_v4_rtx_synack,
1275 .send_ack = tcp_v4_reqsk_send_ack,
1276 .destructor = tcp_v4_reqsk_destructor,
1277 .send_reset = tcp_v4_send_reset,
1278 .syn_ack_timeout = tcp_syn_ack_timeout,
1281 #ifdef CONFIG_TCP_MD5SIG
1282 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1283 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1284 .calc_md5_hash = tcp_v4_md5_hash_skb,
1288 static bool tcp_fastopen_check(struct sock *sk, struct sk_buff *skb,
1289 struct request_sock *req,
1290 struct tcp_fastopen_cookie *foc,
1291 struct tcp_fastopen_cookie *valid_foc)
1293 bool skip_cookie = false;
1294 struct fastopen_queue *fastopenq;
1296 if (likely(!fastopen_cookie_present(foc))) {
1297 /* See include/net/tcp.h for the meaning of these knobs */
1298 if ((sysctl_tcp_fastopen & TFO_SERVER_ALWAYS) ||
1299 ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_REQD) &&
1300 (TCP_SKB_CB(skb)->end_seq != TCP_SKB_CB(skb)->seq + 1)))
1301 skip_cookie = true; /* no cookie to validate */
1305 fastopenq = inet_csk(sk)->icsk_accept_queue.fastopenq;
1306 /* A FO option is present; bump the counter. */
1307 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPFASTOPENPASSIVE);
1309 /* Make sure the listener has enabled fastopen, and we don't
1310 * exceed the max # of pending TFO requests allowed before trying
1311 * to validating the cookie in order to avoid burning CPU cycles
1314 * XXX (TFO) - The implication of checking the max_qlen before
1315 * processing a cookie request is that clients can't differentiate
1316 * between qlen overflow causing Fast Open to be disabled
1317 * temporarily vs a server not supporting Fast Open at all.
1319 if ((sysctl_tcp_fastopen & TFO_SERVER_ENABLE) == 0 ||
1320 fastopenq == NULL || fastopenq->max_qlen == 0)
1323 if (fastopenq->qlen >= fastopenq->max_qlen) {
1324 struct request_sock *req1;
1325 spin_lock(&fastopenq->lock);
1326 req1 = fastopenq->rskq_rst_head;
1327 if ((req1 == NULL) || time_after(req1->expires, jiffies)) {
1328 spin_unlock(&fastopenq->lock);
1329 NET_INC_STATS_BH(sock_net(sk),
1330 LINUX_MIB_TCPFASTOPENLISTENOVERFLOW);
1331 /* Avoid bumping LINUX_MIB_TCPFASTOPENPASSIVEFAIL*/
1335 fastopenq->rskq_rst_head = req1->dl_next;
1337 spin_unlock(&fastopenq->lock);
1341 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1344 if (foc->len == TCP_FASTOPEN_COOKIE_SIZE) {
1345 if ((sysctl_tcp_fastopen & TFO_SERVER_COOKIE_NOT_CHKED) == 0) {
1346 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1347 if ((valid_foc->len != TCP_FASTOPEN_COOKIE_SIZE) ||
1348 memcmp(&foc->val[0], &valid_foc->val[0],
1349 TCP_FASTOPEN_COOKIE_SIZE) != 0)
1351 valid_foc->len = -1;
1353 /* Acknowledge the data received from the peer. */
1354 tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1356 } else if (foc->len == 0) { /* Client requesting a cookie */
1357 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1358 NET_INC_STATS_BH(sock_net(sk),
1359 LINUX_MIB_TCPFASTOPENCOOKIEREQD);
1361 /* Client sent a cookie with wrong size. Treat it
1362 * the same as invalid and return a valid one.
1364 tcp_fastopen_cookie_gen(ip_hdr(skb)->saddr, valid_foc);
1369 static int tcp_v4_conn_req_fastopen(struct sock *sk,
1370 struct sk_buff *skb,
1371 struct sk_buff *skb_synack,
1372 struct request_sock *req,
1373 struct request_values *rvp)
1375 struct tcp_sock *tp = tcp_sk(sk);
1376 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
1377 const struct inet_request_sock *ireq = inet_rsk(req);
1381 req->num_retrans = 0;
1382 req->num_timeout = 0;
1385 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL);
1386 if (child == NULL) {
1387 NET_INC_STATS_BH(sock_net(sk),
1388 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1389 kfree_skb(skb_synack);
1392 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1393 ireq->rmt_addr, ireq->opt);
1394 err = net_xmit_eval(err);
1396 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1397 /* XXX (TFO) - is it ok to ignore error and continue? */
1399 spin_lock(&queue->fastopenq->lock);
1400 queue->fastopenq->qlen++;
1401 spin_unlock(&queue->fastopenq->lock);
1403 /* Initialize the child socket. Have to fix some values to take
1404 * into account the child is a Fast Open socket and is created
1405 * only out of the bits carried in the SYN packet.
1409 tp->fastopen_rsk = req;
1410 /* Do a hold on the listner sk so that if the listener is being
1411 * closed, the child that has been accepted can live on and still
1412 * access listen_lock.
1415 tcp_rsk(req)->listener = sk;
1417 /* RFC1323: The window in SYN & SYN/ACK segments is never
1418 * scaled. So correct it appropriately.
1420 tp->snd_wnd = ntohs(tcp_hdr(skb)->window);
1422 /* Activate the retrans timer so that SYNACK can be retransmitted.
1423 * The request socket is not added to the SYN table of the parent
1424 * because it's been added to the accept queue directly.
1426 inet_csk_reset_xmit_timer(child, ICSK_TIME_RETRANS,
1427 TCP_TIMEOUT_INIT, TCP_RTO_MAX);
1429 /* Add the child socket directly into the accept queue */
1430 inet_csk_reqsk_queue_add(sk, req, child);
1432 /* Now finish processing the fastopen child socket. */
1433 inet_csk(child)->icsk_af_ops->rebuild_header(child);
1434 tcp_init_congestion_control(child);
1435 tcp_mtup_init(child);
1436 tcp_init_buffer_space(child);
1437 tcp_init_metrics(child);
1439 /* Queue the data carried in the SYN packet. We need to first
1440 * bump skb's refcnt because the caller will attempt to free it.
1442 * XXX (TFO) - we honor a zero-payload TFO request for now.
1443 * (Any reason not to?)
1445 if (TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq + 1) {
1446 /* Don't queue the skb if there is no payload in SYN.
1447 * XXX (TFO) - How about SYN+FIN?
1449 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1453 __skb_pull(skb, tcp_hdr(skb)->doff * 4);
1454 skb_set_owner_r(skb, child);
1455 __skb_queue_tail(&child->sk_receive_queue, skb);
1456 tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
1457 tp->syn_data_acked = 1;
1459 sk->sk_data_ready(sk, 0);
1460 bh_unlock_sock(child);
1462 WARN_ON(req->sk == NULL);
1466 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1468 struct tcp_extend_values tmp_ext;
1469 struct tcp_options_received tmp_opt;
1470 const u8 *hash_location;
1471 struct request_sock *req;
1472 struct inet_request_sock *ireq;
1473 struct tcp_sock *tp = tcp_sk(sk);
1474 struct dst_entry *dst = NULL;
1475 __be32 saddr = ip_hdr(skb)->saddr;
1476 __be32 daddr = ip_hdr(skb)->daddr;
1477 __u32 isn = TCP_SKB_CB(skb)->when;
1478 bool want_cookie = false;
1480 struct tcp_fastopen_cookie foc = { .len = -1 };
1481 struct tcp_fastopen_cookie valid_foc = { .len = -1 };
1482 struct sk_buff *skb_synack;
1485 /* Never answer to SYNs send to broadcast or multicast */
1486 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1489 /* TW buckets are converted to open requests without
1490 * limitations, they conserve resources and peer is
1491 * evidently real one.
1493 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1494 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1499 /* Accept backlog is full. If we have already queued enough
1500 * of warm entries in syn queue, drop request. It is better than
1501 * clogging syn queue with openreqs with exponentially increasing
1504 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1507 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1511 #ifdef CONFIG_TCP_MD5SIG
1512 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1515 tcp_clear_options(&tmp_opt);
1516 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1517 tmp_opt.user_mss = tp->rx_opt.user_mss;
1518 tcp_parse_options(skb, &tmp_opt, &hash_location, 0,
1519 want_cookie ? NULL : &foc);
1521 if (tmp_opt.cookie_plus > 0 &&
1522 tmp_opt.saw_tstamp &&
1523 !tp->rx_opt.cookie_out_never &&
1524 (sysctl_tcp_cookie_size > 0 ||
1525 (tp->cookie_values != NULL &&
1526 tp->cookie_values->cookie_desired > 0))) {
1528 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1529 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1531 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1532 goto drop_and_release;
1534 /* Secret recipe starts with IP addresses */
1535 *mess++ ^= (__force u32)daddr;
1536 *mess++ ^= (__force u32)saddr;
1538 /* plus variable length Initiator Cookie */
1541 *c++ ^= *hash_location++;
1543 want_cookie = false; /* not our kind of cookie */
1544 tmp_ext.cookie_out_never = 0; /* false */
1545 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1546 } else if (!tp->rx_opt.cookie_in_always) {
1547 /* redundant indications, but ensure initialization. */
1548 tmp_ext.cookie_out_never = 1; /* true */
1549 tmp_ext.cookie_plus = 0;
1551 goto drop_and_release;
1553 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1555 if (want_cookie && !tmp_opt.saw_tstamp)
1556 tcp_clear_options(&tmp_opt);
1558 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1559 tcp_openreq_init(req, &tmp_opt, skb);
1561 ireq = inet_rsk(req);
1562 ireq->loc_addr = daddr;
1563 ireq->rmt_addr = saddr;
1564 ireq->no_srccheck = inet_sk(sk)->transparent;
1565 ireq->opt = tcp_v4_save_options(skb);
1567 if (security_inet_conn_request(sk, skb, req))
1570 if (!want_cookie || tmp_opt.tstamp_ok)
1571 TCP_ECN_create_request(req, skb, sock_net(sk));
1574 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1575 req->cookie_ts = tmp_opt.tstamp_ok;
1577 /* VJ's idea. We save last timestamp seen
1578 * from the destination in peer table, when entering
1579 * state TIME-WAIT, and check against it before
1580 * accepting new connection request.
1582 * If "isn" is not zero, this request hit alive
1583 * timewait bucket, so that all the necessary checks
1584 * are made in the function processing timewait state.
1586 if (tmp_opt.saw_tstamp &&
1587 tcp_death_row.sysctl_tw_recycle &&
1588 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1589 fl4.daddr == saddr) {
1590 if (!tcp_peer_is_proven(req, dst, true)) {
1591 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1592 goto drop_and_release;
1595 /* Kill the following clause, if you dislike this way. */
1596 else if (!sysctl_tcp_syncookies &&
1597 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1598 (sysctl_max_syn_backlog >> 2)) &&
1599 !tcp_peer_is_proven(req, dst, false)) {
1600 /* Without syncookies last quarter of
1601 * backlog is filled with destinations,
1602 * proven to be alive.
1603 * It means that we continue to communicate
1604 * to destinations, already remembered
1605 * to the moment of synflood.
1607 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1608 &saddr, ntohs(tcp_hdr(skb)->source));
1609 goto drop_and_release;
1612 isn = tcp_v4_init_sequence(skb);
1614 tcp_rsk(req)->snt_isn = isn;
1617 dst = inet_csk_route_req(sk, &fl4, req);
1621 do_fastopen = tcp_fastopen_check(sk, skb, req, &foc, &valid_foc);
1623 /* We don't call tcp_v4_send_synack() directly because we need
1624 * to make sure a child socket can be created successfully before
1625 * sending back synack!
1627 * XXX (TFO) - Ideally one would simply call tcp_v4_send_synack()
1628 * (or better yet, call tcp_send_synack() in the child context
1629 * directly, but will have to fix bunch of other code first)
1630 * after syn_recv_sock() except one will need to first fix the
1631 * latter to remove its dependency on the current implementation
1632 * of tcp_v4_send_synack()->tcp_select_initial_window().
1634 skb_synack = tcp_make_synack(sk, dst, req,
1635 (struct request_values *)&tmp_ext,
1636 fastopen_cookie_present(&valid_foc) ? &valid_foc : NULL);
1639 __tcp_v4_send_check(skb_synack, ireq->loc_addr, ireq->rmt_addr);
1640 skb_set_queue_mapping(skb_synack, skb_get_queue_mapping(skb));
1644 if (likely(!do_fastopen)) {
1646 err = ip_build_and_send_pkt(skb_synack, sk, ireq->loc_addr,
1647 ireq->rmt_addr, ireq->opt);
1648 err = net_xmit_eval(err);
1649 if (err || want_cookie)
1652 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1653 tcp_rsk(req)->listener = NULL;
1654 /* Add the request_sock to the SYN table */
1655 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1656 if (fastopen_cookie_present(&foc) && foc.len != 0)
1657 NET_INC_STATS_BH(sock_net(sk),
1658 LINUX_MIB_TCPFASTOPENPASSIVEFAIL);
1659 } else if (tcp_v4_conn_req_fastopen(sk, skb, skb_synack, req,
1660 (struct request_values *)&tmp_ext))
1672 EXPORT_SYMBOL(tcp_v4_conn_request);
1676 * The three way handshake has completed - we got a valid synack -
1677 * now create the new socket.
1679 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1680 struct request_sock *req,
1681 struct dst_entry *dst)
1683 struct inet_request_sock *ireq;
1684 struct inet_sock *newinet;
1685 struct tcp_sock *newtp;
1687 #ifdef CONFIG_TCP_MD5SIG
1688 struct tcp_md5sig_key *key;
1690 struct ip_options_rcu *inet_opt;
1692 if (sk_acceptq_is_full(sk))
1695 newsk = tcp_create_openreq_child(sk, req, skb);
1699 newsk->sk_gso_type = SKB_GSO_TCPV4;
1700 inet_sk_rx_dst_set(newsk, skb);
1702 newtp = tcp_sk(newsk);
1703 newinet = inet_sk(newsk);
1704 ireq = inet_rsk(req);
1705 newinet->inet_daddr = ireq->rmt_addr;
1706 newinet->inet_rcv_saddr = ireq->loc_addr;
1707 newinet->inet_saddr = ireq->loc_addr;
1708 inet_opt = ireq->opt;
1709 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1711 newinet->mc_index = inet_iif(skb);
1712 newinet->mc_ttl = ip_hdr(skb)->ttl;
1713 newinet->rcv_tos = ip_hdr(skb)->tos;
1714 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1716 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1717 newinet->inet_id = newtp->write_seq ^ jiffies;
1720 dst = inet_csk_route_child_sock(sk, newsk, req);
1724 /* syncookie case : see end of cookie_v4_check() */
1726 sk_setup_caps(newsk, dst);
1728 tcp_mtup_init(newsk);
1729 tcp_sync_mss(newsk, dst_mtu(dst));
1730 newtp->advmss = dst_metric_advmss(dst);
1731 if (tcp_sk(sk)->rx_opt.user_mss &&
1732 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1733 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1735 tcp_initialize_rcv_mss(newsk);
1736 tcp_synack_rtt_meas(newsk, req);
1737 newtp->total_retrans = req->num_retrans;
1739 #ifdef CONFIG_TCP_MD5SIG
1740 /* Copy over the MD5 key from the original socket */
1741 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1745 * We're using one, so create a matching key
1746 * on the newsk structure. If we fail to get
1747 * memory, then we end up not copying the key
1750 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1751 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1752 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1756 if (__inet_inherit_port(sk, newsk) < 0)
1758 __inet_hash_nolisten(newsk, NULL);
1763 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1767 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1770 inet_csk_prepare_forced_close(newsk);
1774 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1776 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1778 struct tcphdr *th = tcp_hdr(skb);
1779 const struct iphdr *iph = ip_hdr(skb);
1781 struct request_sock **prev;
1782 /* Find possible connection requests. */
1783 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1784 iph->saddr, iph->daddr);
1786 return tcp_check_req(sk, skb, req, prev, false);
1788 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1789 th->source, iph->daddr, th->dest, inet_iif(skb));
1792 if (nsk->sk_state != TCP_TIME_WAIT) {
1796 inet_twsk_put(inet_twsk(nsk));
1800 #ifdef CONFIG_SYN_COOKIES
1802 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1807 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1809 const struct iphdr *iph = ip_hdr(skb);
1811 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1812 if (!tcp_v4_check(skb->len, iph->saddr,
1813 iph->daddr, skb->csum)) {
1814 skb->ip_summed = CHECKSUM_UNNECESSARY;
1819 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1820 skb->len, IPPROTO_TCP, 0);
1822 if (skb->len <= 76) {
1823 return __skb_checksum_complete(skb);
1829 /* The socket must have it's spinlock held when we get
1832 * We have a potential double-lock case here, so even when
1833 * doing backlog processing we use the BH locking scheme.
1834 * This is because we cannot sleep with the original spinlock
1837 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1840 #ifdef CONFIG_TCP_MD5SIG
1842 * We really want to reject the packet as early as possible
1844 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1845 * o There is an MD5 option and we're not expecting one
1847 if (tcp_v4_inbound_md5_hash(sk, skb))
1851 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1852 struct dst_entry *dst = sk->sk_rx_dst;
1854 sock_rps_save_rxhash(sk, skb);
1856 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1857 dst->ops->check(dst, 0) == NULL) {
1859 sk->sk_rx_dst = NULL;
1862 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1869 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1872 if (sk->sk_state == TCP_LISTEN) {
1873 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1878 sock_rps_save_rxhash(nsk, skb);
1879 if (tcp_child_process(sk, nsk, skb)) {
1886 sock_rps_save_rxhash(sk, skb);
1888 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1895 tcp_v4_send_reset(rsk, skb);
1898 /* Be careful here. If this function gets more complicated and
1899 * gcc suffers from register pressure on the x86, sk (in %ebx)
1900 * might be destroyed here. This current version compiles correctly,
1901 * but you have been warned.
1906 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1909 EXPORT_SYMBOL(tcp_v4_do_rcv);
1911 void tcp_v4_early_demux(struct sk_buff *skb)
1913 const struct iphdr *iph;
1914 const struct tcphdr *th;
1917 if (skb->pkt_type != PACKET_HOST)
1920 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1926 if (th->doff < sizeof(struct tcphdr) / 4)
1929 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1930 iph->saddr, th->source,
1931 iph->daddr, ntohs(th->dest),
1935 skb->destructor = sock_edemux;
1936 if (sk->sk_state != TCP_TIME_WAIT) {
1937 struct dst_entry *dst = sk->sk_rx_dst;
1940 dst = dst_check(dst, 0);
1942 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1943 skb_dst_set_noref(skb, dst);
1952 int tcp_v4_rcv(struct sk_buff *skb)
1954 const struct iphdr *iph;
1955 const struct tcphdr *th;
1958 struct net *net = dev_net(skb->dev);
1960 if (skb->pkt_type != PACKET_HOST)
1963 /* Count it even if it's bad */
1964 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1966 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1971 if (th->doff < sizeof(struct tcphdr) / 4)
1973 if (!pskb_may_pull(skb, th->doff * 4))
1976 /* An explanation is required here, I think.
1977 * Packet length and doff are validated by header prediction,
1978 * provided case of th->doff==0 is eliminated.
1979 * So, we defer the checks. */
1980 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1985 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1986 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1987 skb->len - th->doff * 4);
1988 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1989 TCP_SKB_CB(skb)->when = 0;
1990 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1991 TCP_SKB_CB(skb)->sacked = 0;
1993 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1998 if (sk->sk_state == TCP_TIME_WAIT)
2001 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
2002 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
2003 goto discard_and_relse;
2006 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
2007 goto discard_and_relse;
2010 if (sk_filter(sk, skb))
2011 goto discard_and_relse;
2015 bh_lock_sock_nested(sk);
2017 if (!sock_owned_by_user(sk)) {
2018 #ifdef CONFIG_NET_DMA
2019 struct tcp_sock *tp = tcp_sk(sk);
2020 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
2021 tp->ucopy.dma_chan = net_dma_find_channel();
2022 if (tp->ucopy.dma_chan)
2023 ret = tcp_v4_do_rcv(sk, skb);
2027 if (!tcp_prequeue(sk, skb))
2028 ret = tcp_v4_do_rcv(sk, skb);
2030 } else if (unlikely(sk_add_backlog(sk, skb,
2031 sk->sk_rcvbuf + sk->sk_sndbuf))) {
2033 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
2034 goto discard_and_relse;
2043 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
2046 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2048 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2050 tcp_v4_send_reset(NULL, skb);
2054 /* Discard frame. */
2063 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
2064 inet_twsk_put(inet_twsk(sk));
2068 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
2069 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
2070 inet_twsk_put(inet_twsk(sk));
2073 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
2075 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
2077 iph->daddr, th->dest,
2080 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
2081 inet_twsk_put(inet_twsk(sk));
2085 /* Fall through to ACK */
2088 tcp_v4_timewait_ack(sk, skb);
2092 case TCP_TW_SUCCESS:;
2097 static struct timewait_sock_ops tcp_timewait_sock_ops = {
2098 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
2099 .twsk_unique = tcp_twsk_unique,
2100 .twsk_destructor= tcp_twsk_destructor,
2103 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
2105 struct dst_entry *dst = skb_dst(skb);
2108 sk->sk_rx_dst = dst;
2109 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
2111 EXPORT_SYMBOL(inet_sk_rx_dst_set);
2113 const struct inet_connection_sock_af_ops ipv4_specific = {
2114 .queue_xmit = ip_queue_xmit,
2115 .send_check = tcp_v4_send_check,
2116 .rebuild_header = inet_sk_rebuild_header,
2117 .sk_rx_dst_set = inet_sk_rx_dst_set,
2118 .conn_request = tcp_v4_conn_request,
2119 .syn_recv_sock = tcp_v4_syn_recv_sock,
2120 .net_header_len = sizeof(struct iphdr),
2121 .setsockopt = ip_setsockopt,
2122 .getsockopt = ip_getsockopt,
2123 .addr2sockaddr = inet_csk_addr2sockaddr,
2124 .sockaddr_len = sizeof(struct sockaddr_in),
2125 .bind_conflict = inet_csk_bind_conflict,
2126 #ifdef CONFIG_COMPAT
2127 .compat_setsockopt = compat_ip_setsockopt,
2128 .compat_getsockopt = compat_ip_getsockopt,
2131 EXPORT_SYMBOL(ipv4_specific);
2133 #ifdef CONFIG_TCP_MD5SIG
2134 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
2135 .md5_lookup = tcp_v4_md5_lookup,
2136 .calc_md5_hash = tcp_v4_md5_hash_skb,
2137 .md5_parse = tcp_v4_parse_md5_keys,
2141 /* NOTE: A lot of things set to zero explicitly by call to
2142 * sk_alloc() so need not be done here.
2144 static int tcp_v4_init_sock(struct sock *sk)
2146 struct inet_connection_sock *icsk = inet_csk(sk);
2150 icsk->icsk_af_ops = &ipv4_specific;
2152 #ifdef CONFIG_TCP_MD5SIG
2153 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
2159 void tcp_v4_destroy_sock(struct sock *sk)
2161 struct tcp_sock *tp = tcp_sk(sk);
2163 tcp_clear_xmit_timers(sk);
2165 tcp_cleanup_congestion_control(sk);
2167 /* Cleanup up the write buffer. */
2168 tcp_write_queue_purge(sk);
2170 /* Cleans up our, hopefully empty, out_of_order_queue. */
2171 __skb_queue_purge(&tp->out_of_order_queue);
2173 #ifdef CONFIG_TCP_MD5SIG
2174 /* Clean up the MD5 key list, if any */
2175 if (tp->md5sig_info) {
2176 tcp_clear_md5_list(sk);
2177 kfree_rcu(tp->md5sig_info, rcu);
2178 tp->md5sig_info = NULL;
2182 #ifdef CONFIG_NET_DMA
2183 /* Cleans up our sk_async_wait_queue */
2184 __skb_queue_purge(&sk->sk_async_wait_queue);
2187 /* Clean prequeue, it must be empty really */
2188 __skb_queue_purge(&tp->ucopy.prequeue);
2190 /* Clean up a referenced TCP bind bucket. */
2191 if (inet_csk(sk)->icsk_bind_hash)
2194 /* TCP Cookie Transactions */
2195 if (tp->cookie_values != NULL) {
2196 kref_put(&tp->cookie_values->kref,
2197 tcp_cookie_values_release);
2198 tp->cookie_values = NULL;
2200 BUG_ON(tp->fastopen_rsk != NULL);
2202 /* If socket is aborted during connect operation */
2203 tcp_free_fastopen_req(tp);
2205 sk_sockets_allocated_dec(sk);
2206 sock_release_memcg(sk);
2208 EXPORT_SYMBOL(tcp_v4_destroy_sock);
2210 #ifdef CONFIG_PROC_FS
2211 /* Proc filesystem TCP sock list dumping. */
2213 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
2215 return hlist_nulls_empty(head) ? NULL :
2216 list_entry(head->first, struct inet_timewait_sock, tw_node);
2219 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
2221 return !is_a_nulls(tw->tw_node.next) ?
2222 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
2226 * Get next listener socket follow cur. If cur is NULL, get first socket
2227 * starting from bucket given in st->bucket; when st->bucket is zero the
2228 * very first socket in the hash table is returned.
2230 static void *listening_get_next(struct seq_file *seq, void *cur)
2232 struct inet_connection_sock *icsk;
2233 struct hlist_nulls_node *node;
2234 struct sock *sk = cur;
2235 struct inet_listen_hashbucket *ilb;
2236 struct tcp_iter_state *st = seq->private;
2237 struct net *net = seq_file_net(seq);
2240 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2241 spin_lock_bh(&ilb->lock);
2242 sk = sk_nulls_head(&ilb->head);
2246 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2250 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2251 struct request_sock *req = cur;
2253 icsk = inet_csk(st->syn_wait_sk);
2257 if (req->rsk_ops->family == st->family) {
2263 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2266 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2268 sk = sk_nulls_next(st->syn_wait_sk);
2269 st->state = TCP_SEQ_STATE_LISTENING;
2270 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2272 icsk = inet_csk(sk);
2273 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2274 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2276 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2277 sk = sk_nulls_next(sk);
2280 sk_nulls_for_each_from(sk, node) {
2281 if (!net_eq(sock_net(sk), net))
2283 if (sk->sk_family == st->family) {
2287 icsk = inet_csk(sk);
2288 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2289 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2291 st->uid = sock_i_uid(sk);
2292 st->syn_wait_sk = sk;
2293 st->state = TCP_SEQ_STATE_OPENREQ;
2297 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2299 spin_unlock_bh(&ilb->lock);
2301 if (++st->bucket < INET_LHTABLE_SIZE) {
2302 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2303 spin_lock_bh(&ilb->lock);
2304 sk = sk_nulls_head(&ilb->head);
2312 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2314 struct tcp_iter_state *st = seq->private;
2319 rc = listening_get_next(seq, NULL);
2321 while (rc && *pos) {
2322 rc = listening_get_next(seq, rc);
2328 static inline bool empty_bucket(struct tcp_iter_state *st)
2330 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2331 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2335 * Get first established socket starting from bucket given in st->bucket.
2336 * If st->bucket is zero, the very first socket in the hash is returned.
2338 static void *established_get_first(struct seq_file *seq)
2340 struct tcp_iter_state *st = seq->private;
2341 struct net *net = seq_file_net(seq);
2345 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2347 struct hlist_nulls_node *node;
2348 struct inet_timewait_sock *tw;
2349 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2351 /* Lockless fast path for the common case of empty buckets */
2352 if (empty_bucket(st))
2356 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2357 if (sk->sk_family != st->family ||
2358 !net_eq(sock_net(sk), net)) {
2364 st->state = TCP_SEQ_STATE_TIME_WAIT;
2365 inet_twsk_for_each(tw, node,
2366 &tcp_hashinfo.ehash[st->bucket].twchain) {
2367 if (tw->tw_family != st->family ||
2368 !net_eq(twsk_net(tw), net)) {
2374 spin_unlock_bh(lock);
2375 st->state = TCP_SEQ_STATE_ESTABLISHED;
2381 static void *established_get_next(struct seq_file *seq, void *cur)
2383 struct sock *sk = cur;
2384 struct inet_timewait_sock *tw;
2385 struct hlist_nulls_node *node;
2386 struct tcp_iter_state *st = seq->private;
2387 struct net *net = seq_file_net(seq);
2392 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2396 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2403 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2404 st->state = TCP_SEQ_STATE_ESTABLISHED;
2406 /* Look for next non empty bucket */
2408 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2411 if (st->bucket > tcp_hashinfo.ehash_mask)
2414 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2415 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2417 sk = sk_nulls_next(sk);
2419 sk_nulls_for_each_from(sk, node) {
2420 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2424 st->state = TCP_SEQ_STATE_TIME_WAIT;
2425 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2433 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2435 struct tcp_iter_state *st = seq->private;
2439 rc = established_get_first(seq);
2442 rc = established_get_next(seq, rc);
2448 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2451 struct tcp_iter_state *st = seq->private;
2453 st->state = TCP_SEQ_STATE_LISTENING;
2454 rc = listening_get_idx(seq, &pos);
2457 st->state = TCP_SEQ_STATE_ESTABLISHED;
2458 rc = established_get_idx(seq, pos);
2464 static void *tcp_seek_last_pos(struct seq_file *seq)
2466 struct tcp_iter_state *st = seq->private;
2467 int offset = st->offset;
2468 int orig_num = st->num;
2471 switch (st->state) {
2472 case TCP_SEQ_STATE_OPENREQ:
2473 case TCP_SEQ_STATE_LISTENING:
2474 if (st->bucket >= INET_LHTABLE_SIZE)
2476 st->state = TCP_SEQ_STATE_LISTENING;
2477 rc = listening_get_next(seq, NULL);
2478 while (offset-- && rc)
2479 rc = listening_get_next(seq, rc);
2484 case TCP_SEQ_STATE_ESTABLISHED:
2485 case TCP_SEQ_STATE_TIME_WAIT:
2486 st->state = TCP_SEQ_STATE_ESTABLISHED;
2487 if (st->bucket > tcp_hashinfo.ehash_mask)
2489 rc = established_get_first(seq);
2490 while (offset-- && rc)
2491 rc = established_get_next(seq, rc);
2499 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2501 struct tcp_iter_state *st = seq->private;
2504 if (*pos && *pos == st->last_pos) {
2505 rc = tcp_seek_last_pos(seq);
2510 st->state = TCP_SEQ_STATE_LISTENING;
2514 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2517 st->last_pos = *pos;
2521 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2523 struct tcp_iter_state *st = seq->private;
2526 if (v == SEQ_START_TOKEN) {
2527 rc = tcp_get_idx(seq, 0);
2531 switch (st->state) {
2532 case TCP_SEQ_STATE_OPENREQ:
2533 case TCP_SEQ_STATE_LISTENING:
2534 rc = listening_get_next(seq, v);
2536 st->state = TCP_SEQ_STATE_ESTABLISHED;
2539 rc = established_get_first(seq);
2542 case TCP_SEQ_STATE_ESTABLISHED:
2543 case TCP_SEQ_STATE_TIME_WAIT:
2544 rc = established_get_next(seq, v);
2549 st->last_pos = *pos;
2553 static void tcp_seq_stop(struct seq_file *seq, void *v)
2555 struct tcp_iter_state *st = seq->private;
2557 switch (st->state) {
2558 case TCP_SEQ_STATE_OPENREQ:
2560 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2561 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2563 case TCP_SEQ_STATE_LISTENING:
2564 if (v != SEQ_START_TOKEN)
2565 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2567 case TCP_SEQ_STATE_TIME_WAIT:
2568 case TCP_SEQ_STATE_ESTABLISHED:
2570 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2575 int tcp_seq_open(struct inode *inode, struct file *file)
2577 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2578 struct tcp_iter_state *s;
2581 err = seq_open_net(inode, file, &afinfo->seq_ops,
2582 sizeof(struct tcp_iter_state));
2586 s = ((struct seq_file *)file->private_data)->private;
2587 s->family = afinfo->family;
2591 EXPORT_SYMBOL(tcp_seq_open);
2593 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2596 struct proc_dir_entry *p;
2598 afinfo->seq_ops.start = tcp_seq_start;
2599 afinfo->seq_ops.next = tcp_seq_next;
2600 afinfo->seq_ops.stop = tcp_seq_stop;
2602 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2603 afinfo->seq_fops, afinfo);
2608 EXPORT_SYMBOL(tcp_proc_register);
2610 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2612 proc_net_remove(net, afinfo->name);
2614 EXPORT_SYMBOL(tcp_proc_unregister);
2616 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2617 struct seq_file *f, int i, kuid_t uid, int *len)
2619 const struct inet_request_sock *ireq = inet_rsk(req);
2620 long delta = req->expires - jiffies;
2622 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2623 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2626 ntohs(inet_sk(sk)->inet_sport),
2628 ntohs(ireq->rmt_port),
2630 0, 0, /* could print option size, but that is af dependent. */
2631 1, /* timers active (only the expire timer) */
2632 jiffies_delta_to_clock_t(delta),
2634 from_kuid_munged(seq_user_ns(f), uid),
2635 0, /* non standard timer */
2636 0, /* open_requests have no inode */
2637 atomic_read(&sk->sk_refcnt),
2642 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2645 unsigned long timer_expires;
2646 const struct tcp_sock *tp = tcp_sk(sk);
2647 const struct inet_connection_sock *icsk = inet_csk(sk);
2648 const struct inet_sock *inet = inet_sk(sk);
2649 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2650 __be32 dest = inet->inet_daddr;
2651 __be32 src = inet->inet_rcv_saddr;
2652 __u16 destp = ntohs(inet->inet_dport);
2653 __u16 srcp = ntohs(inet->inet_sport);
2656 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2658 timer_expires = icsk->icsk_timeout;
2659 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2661 timer_expires = icsk->icsk_timeout;
2662 } else if (timer_pending(&sk->sk_timer)) {
2664 timer_expires = sk->sk_timer.expires;
2667 timer_expires = jiffies;
2670 if (sk->sk_state == TCP_LISTEN)
2671 rx_queue = sk->sk_ack_backlog;
2674 * because we dont lock socket, we might find a transient negative value
2676 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2678 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2679 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2680 i, src, srcp, dest, destp, sk->sk_state,
2681 tp->write_seq - tp->snd_una,
2684 jiffies_delta_to_clock_t(timer_expires - jiffies),
2685 icsk->icsk_retransmits,
2686 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2687 icsk->icsk_probes_out,
2689 atomic_read(&sk->sk_refcnt), sk,
2690 jiffies_to_clock_t(icsk->icsk_rto),
2691 jiffies_to_clock_t(icsk->icsk_ack.ato),
2692 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2694 sk->sk_state == TCP_LISTEN ?
2695 (fastopenq ? fastopenq->max_qlen : 0) :
2696 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh),
2700 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2701 struct seq_file *f, int i, int *len)
2705 long delta = tw->tw_ttd - jiffies;
2707 dest = tw->tw_daddr;
2708 src = tw->tw_rcv_saddr;
2709 destp = ntohs(tw->tw_dport);
2710 srcp = ntohs(tw->tw_sport);
2712 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2713 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2714 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2715 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2716 atomic_read(&tw->tw_refcnt), tw, len);
2721 static int tcp4_seq_show(struct seq_file *seq, void *v)
2723 struct tcp_iter_state *st;
2726 if (v == SEQ_START_TOKEN) {
2727 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2728 " sl local_address rem_address st tx_queue "
2729 "rx_queue tr tm->when retrnsmt uid timeout "
2735 switch (st->state) {
2736 case TCP_SEQ_STATE_LISTENING:
2737 case TCP_SEQ_STATE_ESTABLISHED:
2738 get_tcp4_sock(v, seq, st->num, &len);
2740 case TCP_SEQ_STATE_OPENREQ:
2741 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2743 case TCP_SEQ_STATE_TIME_WAIT:
2744 get_timewait4_sock(v, seq, st->num, &len);
2747 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2752 static const struct file_operations tcp_afinfo_seq_fops = {
2753 .owner = THIS_MODULE,
2754 .open = tcp_seq_open,
2756 .llseek = seq_lseek,
2757 .release = seq_release_net
2760 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2763 .seq_fops = &tcp_afinfo_seq_fops,
2765 .show = tcp4_seq_show,
2769 static int __net_init tcp4_proc_init_net(struct net *net)
2771 return tcp_proc_register(net, &tcp4_seq_afinfo);
2774 static void __net_exit tcp4_proc_exit_net(struct net *net)
2776 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2779 static struct pernet_operations tcp4_net_ops = {
2780 .init = tcp4_proc_init_net,
2781 .exit = tcp4_proc_exit_net,
2784 int __init tcp4_proc_init(void)
2786 return register_pernet_subsys(&tcp4_net_ops);
2789 void tcp4_proc_exit(void)
2791 unregister_pernet_subsys(&tcp4_net_ops);
2793 #endif /* CONFIG_PROC_FS */
2795 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2797 const struct iphdr *iph = skb_gro_network_header(skb);
2801 switch (skb->ip_summed) {
2802 case CHECKSUM_COMPLETE:
2803 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2805 skb->ip_summed = CHECKSUM_UNNECESSARY;
2809 NAPI_GRO_CB(skb)->flush = 1;
2813 wsum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
2814 skb_gro_len(skb), IPPROTO_TCP, 0);
2815 sum = csum_fold(skb_checksum(skb,
2816 skb_gro_offset(skb),
2822 skb->ip_summed = CHECKSUM_UNNECESSARY;
2826 return tcp_gro_receive(head, skb);
2829 int tcp4_gro_complete(struct sk_buff *skb)
2831 const struct iphdr *iph = ip_hdr(skb);
2832 struct tcphdr *th = tcp_hdr(skb);
2834 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2835 iph->saddr, iph->daddr, 0);
2836 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2838 return tcp_gro_complete(skb);
2841 struct proto tcp_prot = {
2843 .owner = THIS_MODULE,
2845 .connect = tcp_v4_connect,
2846 .disconnect = tcp_disconnect,
2847 .accept = inet_csk_accept,
2849 .init = tcp_v4_init_sock,
2850 .destroy = tcp_v4_destroy_sock,
2851 .shutdown = tcp_shutdown,
2852 .setsockopt = tcp_setsockopt,
2853 .getsockopt = tcp_getsockopt,
2854 .recvmsg = tcp_recvmsg,
2855 .sendmsg = tcp_sendmsg,
2856 .sendpage = tcp_sendpage,
2857 .backlog_rcv = tcp_v4_do_rcv,
2858 .release_cb = tcp_release_cb,
2859 .mtu_reduced = tcp_v4_mtu_reduced,
2861 .unhash = inet_unhash,
2862 .get_port = inet_csk_get_port,
2863 .enter_memory_pressure = tcp_enter_memory_pressure,
2864 .sockets_allocated = &tcp_sockets_allocated,
2865 .orphan_count = &tcp_orphan_count,
2866 .memory_allocated = &tcp_memory_allocated,
2867 .memory_pressure = &tcp_memory_pressure,
2868 .sysctl_wmem = sysctl_tcp_wmem,
2869 .sysctl_rmem = sysctl_tcp_rmem,
2870 .max_header = MAX_TCP_HEADER,
2871 .obj_size = sizeof(struct tcp_sock),
2872 .slab_flags = SLAB_DESTROY_BY_RCU,
2873 .twsk_prot = &tcp_timewait_sock_ops,
2874 .rsk_prot = &tcp_request_sock_ops,
2875 .h.hashinfo = &tcp_hashinfo,
2876 .no_autobind = true,
2877 #ifdef CONFIG_COMPAT
2878 .compat_setsockopt = compat_tcp_setsockopt,
2879 .compat_getsockopt = compat_tcp_getsockopt,
2881 #ifdef CONFIG_MEMCG_KMEM
2882 .init_cgroup = tcp_init_cgroup,
2883 .destroy_cgroup = tcp_destroy_cgroup,
2884 .proto_cgroup = tcp_proto_cgroup,
2887 EXPORT_SYMBOL(tcp_prot);
2889 static int __net_init tcp_sk_init(struct net *net)
2891 net->ipv4.sysctl_tcp_ecn = 2;
2895 static void __net_exit tcp_sk_exit(struct net *net)
2899 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2901 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2904 static struct pernet_operations __net_initdata tcp_sk_ops = {
2905 .init = tcp_sk_init,
2906 .exit = tcp_sk_exit,
2907 .exit_batch = tcp_sk_exit_batch,
2910 void __init tcp_v4_init(void)
2912 inet_hashinfo_init(&tcp_hashinfo);
2913 if (register_pernet_subsys(&tcp_sk_ops))
2914 panic("Failed to create the TCP control socket.\n");