2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
78 #include <net/busy_poll.h>
80 #include <linux/inet.h>
81 #include <linux/ipv6.h>
82 #include <linux/stddef.h>
83 #include <linux/proc_fs.h>
84 #include <linux/seq_file.h>
86 #include <linux/crypto.h>
87 #include <linux/scatterlist.h>
89 int sysctl_tcp_tw_reuse __read_mostly;
90 int sysctl_tcp_low_latency __read_mostly;
91 EXPORT_SYMBOL(sysctl_tcp_low_latency);
94 #ifdef CONFIG_TCP_MD5SIG
95 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
96 __be32 daddr, __be32 saddr, const struct tcphdr *th);
99 struct inet_hashinfo tcp_hashinfo;
100 EXPORT_SYMBOL(tcp_hashinfo);
102 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
104 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
107 tcp_hdr(skb)->source);
110 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
112 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
113 struct tcp_sock *tp = tcp_sk(sk);
115 /* With PAWS, it is safe from the viewpoint
116 of data integrity. Even without PAWS it is safe provided sequence
117 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
119 Actually, the idea is close to VJ's one, only timestamp cache is
120 held not per host, but per port pair and TW bucket is used as state
123 If TW bucket has been already destroyed we fall back to VJ's scheme
124 and use initial timestamp retrieved from peer table.
126 if (tcptw->tw_ts_recent_stamp &&
127 (twp == NULL || (sysctl_tcp_tw_reuse &&
128 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
129 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
130 if (tp->write_seq == 0)
132 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
133 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
140 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
142 /* This will initiate an outgoing connection. */
143 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
145 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
146 struct inet_sock *inet = inet_sk(sk);
147 struct tcp_sock *tp = tcp_sk(sk);
148 __be16 orig_sport, orig_dport;
149 __be32 daddr, nexthop;
153 struct ip_options_rcu *inet_opt;
155 if (addr_len < sizeof(struct sockaddr_in))
158 if (usin->sin_family != AF_INET)
159 return -EAFNOSUPPORT;
161 nexthop = daddr = usin->sin_addr.s_addr;
162 inet_opt = rcu_dereference_protected(inet->inet_opt,
163 sock_owned_by_user(sk));
164 if (inet_opt && inet_opt->opt.srr) {
167 nexthop = inet_opt->opt.faddr;
170 orig_sport = inet->inet_sport;
171 orig_dport = usin->sin_port;
172 fl4 = &inet->cork.fl.u.ip4;
173 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
174 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
176 orig_sport, orig_dport, sk);
179 if (err == -ENETUNREACH)
180 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
184 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
189 if (!inet_opt || !inet_opt->opt.srr)
192 if (!inet->inet_saddr)
193 inet->inet_saddr = fl4->saddr;
194 inet->inet_rcv_saddr = inet->inet_saddr;
196 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
197 /* Reset inherited state */
198 tp->rx_opt.ts_recent = 0;
199 tp->rx_opt.ts_recent_stamp = 0;
200 if (likely(!tp->repair))
204 if (tcp_death_row.sysctl_tw_recycle &&
205 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
206 tcp_fetch_timewait_stamp(sk, &rt->dst);
208 inet->inet_dport = usin->sin_port;
209 inet->inet_daddr = daddr;
211 inet_csk(sk)->icsk_ext_hdr_len = 0;
213 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
215 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
217 /* Socket identity is still unknown (sport may be zero).
218 * However we set state to SYN-SENT and not releasing socket
219 * lock select source port, enter ourselves into the hash tables and
220 * complete initialization after this.
222 tcp_set_state(sk, TCP_SYN_SENT);
223 err = inet_hash_connect(&tcp_death_row, sk);
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk);
234 /* OK, now commit destination to socket. */
235 sk->sk_gso_type = SKB_GSO_TCPV4;
236 sk_setup_caps(sk, &rt->dst);
238 if (!tp->write_seq && likely(!tp->repair))
239 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
244 inet->inet_id = tp->write_seq ^ jiffies;
246 err = tcp_connect(sk);
256 * This unhashes the socket and releases the local port,
259 tcp_set_state(sk, TCP_CLOSE);
261 sk->sk_route_caps = 0;
262 inet->inet_dport = 0;
265 EXPORT_SYMBOL(tcp_v4_connect);
268 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269 * It can be called through tcp_release_cb() if socket was owned by user
270 * at the time tcp_v4_err() was called to handle ICMP message.
272 static void tcp_v4_mtu_reduced(struct sock *sk)
274 struct dst_entry *dst;
275 struct inet_sock *inet = inet_sk(sk);
276 u32 mtu = tcp_sk(sk)->mtu_info;
278 dst = inet_csk_update_pmtu(sk, mtu);
282 /* Something is about to be wrong... Remember soft error
283 * for the case, if this connection will not able to recover.
285 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286 sk->sk_err_soft = EMSGSIZE;
290 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291 ip_sk_accept_pmtu(sk) &&
292 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293 tcp_sync_mss(sk, mtu);
295 /* Resend the TCP packet because it's
296 * clear that the old packet has been
297 * dropped. This is the new "fast" path mtu
300 tcp_simple_retransmit(sk);
301 } /* else let the usual retransmit timer handle it */
304 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 struct dst_entry *dst = __sk_dst_check(sk, 0);
309 dst->ops->redirect(dst, sk, skb);
313 * This routine is called by the ICMP module when it gets some
314 * sort of error condition. If err < 0 then the socket should
315 * be closed and the error returned to the user. If err > 0
316 * it's just the icmp type << 8 | icmp code. After adjustment
317 * header points to the first 8 bytes of the tcp header. We need
318 * to find the appropriate port.
320 * The locking strategy used here is very "optimistic". When
321 * someone else accesses the socket the ICMP is just dropped
322 * and for some paths there is no check at all.
323 * A more general error queue to queue errors for later handling
324 * is probably better.
328 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
330 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
331 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
332 struct inet_connection_sock *icsk;
334 struct inet_sock *inet;
335 const int type = icmp_hdr(icmp_skb)->type;
336 const int code = icmp_hdr(icmp_skb)->code;
339 struct request_sock *fastopen;
343 struct net *net = dev_net(icmp_skb->dev);
345 if (icmp_skb->len < (iph->ihl << 2) + 8) {
346 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
350 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
351 iph->saddr, th->source, inet_iif(icmp_skb));
353 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
356 if (sk->sk_state == TCP_TIME_WAIT) {
357 inet_twsk_put(inet_twsk(sk));
362 /* If too many ICMPs get dropped on busy
363 * servers this needs to be solved differently.
364 * We do take care of PMTU discovery (RFC1191) special case :
365 * we can receive locally generated ICMP messages while socket is held.
367 if (sock_owned_by_user(sk)) {
368 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
369 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
371 if (sk->sk_state == TCP_CLOSE)
374 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
375 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
381 seq = ntohl(th->seq);
382 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
383 fastopen = tp->fastopen_rsk;
384 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
385 if (sk->sk_state != TCP_LISTEN &&
386 !between(seq, snd_una, tp->snd_nxt)) {
387 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
393 do_redirect(icmp_skb, sk);
395 case ICMP_SOURCE_QUENCH:
396 /* Just silently ignore these. */
398 case ICMP_PARAMETERPROB:
401 case ICMP_DEST_UNREACH:
402 if (code > NR_ICMP_UNREACH)
405 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
406 /* We are not interested in TCP_LISTEN and open_requests
407 * (SYN-ACKs send out by Linux are always <576bytes so
408 * they should go through unfragmented).
410 if (sk->sk_state == TCP_LISTEN)
414 if (!sock_owned_by_user(sk)) {
415 tcp_v4_mtu_reduced(sk);
417 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
423 err = icmp_err_convert[code].errno;
424 /* check if icmp_skb allows revert of backoff
425 * (see draft-zimmermann-tcp-lcd) */
426 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
428 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
429 !icsk->icsk_backoff || fastopen)
432 if (sock_owned_by_user(sk))
435 icsk->icsk_backoff--;
436 inet_csk(sk)->icsk_rto = (tp->srtt_us ? __tcp_set_rto(tp) :
437 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
440 skb = tcp_write_queue_head(sk);
443 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
444 tcp_time_stamp - TCP_SKB_CB(skb)->when);
447 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
448 remaining, TCP_RTO_MAX);
450 /* RTO revert clocked out retransmission.
451 * Will retransmit now */
452 tcp_retransmit_timer(sk);
456 case ICMP_TIME_EXCEEDED:
463 switch (sk->sk_state) {
464 struct request_sock *req, **prev;
466 if (sock_owned_by_user(sk))
469 req = inet_csk_search_req(sk, &prev, th->dest,
470 iph->daddr, iph->saddr);
474 /* ICMPs are not backlogged, hence we cannot get
475 an established socket here.
479 if (seq != tcp_rsk(req)->snt_isn) {
480 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
485 * Still in SYN_RECV, just remove it silently.
486 * There is no good way to pass the error to the newly
487 * created socket, and POSIX does not want network
488 * errors returned from accept().
490 inet_csk_reqsk_queue_drop(sk, req, prev);
491 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
496 /* Only in fast or simultaneous open. If a fast open socket is
497 * is already accepted it is treated as a connected one below.
499 if (fastopen && fastopen->sk == NULL)
502 if (!sock_owned_by_user(sk)) {
505 sk->sk_error_report(sk);
509 sk->sk_err_soft = err;
514 /* If we've already connected we will keep trying
515 * until we time out, or the user gives up.
517 * rfc1122 4.2.3.9 allows to consider as hard errors
518 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
519 * but it is obsoleted by pmtu discovery).
521 * Note, that in modern internet, where routing is unreliable
522 * and in each dark corner broken firewalls sit, sending random
523 * errors ordered by their masters even this two messages finally lose
524 * their original sense (even Linux sends invalid PORT_UNREACHs)
526 * Now we are in compliance with RFCs.
531 if (!sock_owned_by_user(sk) && inet->recverr) {
533 sk->sk_error_report(sk);
534 } else { /* Only an error on timeout */
535 sk->sk_err_soft = err;
543 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
545 struct tcphdr *th = tcp_hdr(skb);
547 if (skb->ip_summed == CHECKSUM_PARTIAL) {
548 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
549 skb->csum_start = skb_transport_header(skb) - skb->head;
550 skb->csum_offset = offsetof(struct tcphdr, check);
552 th->check = tcp_v4_check(skb->len, saddr, daddr,
559 /* This routine computes an IPv4 TCP checksum. */
560 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
562 const struct inet_sock *inet = inet_sk(sk);
564 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
566 EXPORT_SYMBOL(tcp_v4_send_check);
569 * This routine will send an RST to the other tcp.
571 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
573 * Answer: if a packet caused RST, it is not for a socket
574 * existing in our system, if it is matched to a socket,
575 * it is just duplicate segment or bug in other side's TCP.
576 * So that we build reply only basing on parameters
577 * arrived with segment.
578 * Exception: precedence violation. We do not implement it in any case.
581 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
583 const struct tcphdr *th = tcp_hdr(skb);
586 #ifdef CONFIG_TCP_MD5SIG
587 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
590 struct ip_reply_arg arg;
591 #ifdef CONFIG_TCP_MD5SIG
592 struct tcp_md5sig_key *key;
593 const __u8 *hash_location = NULL;
594 unsigned char newhash[16];
596 struct sock *sk1 = NULL;
600 /* Never send a reset in response to a reset. */
604 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
607 /* Swap the send and the receive. */
608 memset(&rep, 0, sizeof(rep));
609 rep.th.dest = th->source;
610 rep.th.source = th->dest;
611 rep.th.doff = sizeof(struct tcphdr) / 4;
615 rep.th.seq = th->ack_seq;
618 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
619 skb->len - (th->doff << 2));
622 memset(&arg, 0, sizeof(arg));
623 arg.iov[0].iov_base = (unsigned char *)&rep;
624 arg.iov[0].iov_len = sizeof(rep.th);
626 #ifdef CONFIG_TCP_MD5SIG
627 hash_location = tcp_parse_md5sig_option(th);
628 if (!sk && hash_location) {
630 * active side is lost. Try to find listening socket through
631 * source port, and then find md5 key through listening socket.
632 * we are not loose security here:
633 * Incoming packet is checked with md5 hash with finding key,
634 * no RST generated if md5 hash doesn't match.
636 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
637 &tcp_hashinfo, ip_hdr(skb)->saddr,
638 th->source, ip_hdr(skb)->daddr,
639 ntohs(th->source), inet_iif(skb));
640 /* don't send rst if it can't find key */
644 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
645 &ip_hdr(skb)->saddr, AF_INET);
649 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
650 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
659 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
661 (TCPOPT_MD5SIG << 8) |
663 /* Update length and the length the header thinks exists */
664 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
665 rep.th.doff = arg.iov[0].iov_len / 4;
667 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
668 key, ip_hdr(skb)->saddr,
669 ip_hdr(skb)->daddr, &rep.th);
672 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
673 ip_hdr(skb)->saddr, /* XXX */
674 arg.iov[0].iov_len, IPPROTO_TCP, 0);
675 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
676 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
677 /* When socket is gone, all binding information is lost.
678 * routing might fail in this case. No choice here, if we choose to force
679 * input interface, we will misroute in case of asymmetric route.
682 arg.bound_dev_if = sk->sk_bound_dev_if;
684 net = dev_net(skb_dst(skb)->dev);
685 arg.tos = ip_hdr(skb)->tos;
686 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
687 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
689 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
690 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
692 #ifdef CONFIG_TCP_MD5SIG
701 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
702 outside socket context is ugly, certainly. What can I do?
705 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
706 u32 win, u32 tsval, u32 tsecr, int oif,
707 struct tcp_md5sig_key *key,
708 int reply_flags, u8 tos)
710 const struct tcphdr *th = tcp_hdr(skb);
713 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
714 #ifdef CONFIG_TCP_MD5SIG
715 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 struct ip_reply_arg arg;
720 struct net *net = dev_net(skb_dst(skb)->dev);
722 memset(&rep.th, 0, sizeof(struct tcphdr));
723 memset(&arg, 0, sizeof(arg));
725 arg.iov[0].iov_base = (unsigned char *)&rep;
726 arg.iov[0].iov_len = sizeof(rep.th);
728 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
729 (TCPOPT_TIMESTAMP << 8) |
731 rep.opt[1] = htonl(tsval);
732 rep.opt[2] = htonl(tsecr);
733 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
736 /* Swap the send and the receive. */
737 rep.th.dest = th->source;
738 rep.th.source = th->dest;
739 rep.th.doff = arg.iov[0].iov_len / 4;
740 rep.th.seq = htonl(seq);
741 rep.th.ack_seq = htonl(ack);
743 rep.th.window = htons(win);
745 #ifdef CONFIG_TCP_MD5SIG
747 int offset = (tsecr) ? 3 : 0;
749 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
751 (TCPOPT_MD5SIG << 8) |
753 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
754 rep.th.doff = arg.iov[0].iov_len/4;
756 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
757 key, ip_hdr(skb)->saddr,
758 ip_hdr(skb)->daddr, &rep.th);
761 arg.flags = reply_flags;
762 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
763 ip_hdr(skb)->saddr, /* XXX */
764 arg.iov[0].iov_len, IPPROTO_TCP, 0);
765 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
767 arg.bound_dev_if = oif;
769 ip_send_unicast_reply(net, skb, ip_hdr(skb)->saddr,
770 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
772 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
775 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
777 struct inet_timewait_sock *tw = inet_twsk(sk);
778 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
780 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
781 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
782 tcp_time_stamp + tcptw->tw_ts_offset,
785 tcp_twsk_md5_key(tcptw),
786 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
793 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
794 struct request_sock *req)
796 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
797 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
799 tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
800 tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
801 tcp_rsk(req)->rcv_nxt, req->rcv_wnd,
805 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
807 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
812 * Send a SYN-ACK after having received a SYN.
813 * This still operates on a request_sock only, not on a big
816 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
817 struct request_sock *req,
819 struct tcp_fastopen_cookie *foc)
821 const struct inet_request_sock *ireq = inet_rsk(req);
826 /* First, grab a route. */
827 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
830 skb = tcp_make_synack(sk, dst, req, foc);
833 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
835 skb_set_queue_mapping(skb, queue_mapping);
836 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
839 err = net_xmit_eval(err);
840 if (!tcp_rsk(req)->snt_synack && !err)
841 tcp_rsk(req)->snt_synack = tcp_time_stamp;
847 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
849 int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
852 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
853 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
859 * IPv4 request_sock destructor.
861 static void tcp_v4_reqsk_destructor(struct request_sock *req)
863 kfree(inet_rsk(req)->opt);
867 * Return true if a syncookie should be sent
869 bool tcp_syn_flood_action(struct sock *sk,
870 const struct sk_buff *skb,
873 const char *msg = "Dropping request";
874 bool want_cookie = false;
875 struct listen_sock *lopt;
877 #ifdef CONFIG_SYN_COOKIES
878 if (sysctl_tcp_syncookies) {
879 msg = "Sending cookies";
881 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
884 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
886 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
887 if (!lopt->synflood_warned && sysctl_tcp_syncookies != 2) {
888 lopt->synflood_warned = 1;
889 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
890 proto, ntohs(tcp_hdr(skb)->dest), msg);
894 EXPORT_SYMBOL(tcp_syn_flood_action);
897 * Save and compile IPv4 options into the request_sock if needed.
899 static struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
901 const struct ip_options *opt = &(IPCB(skb)->opt);
902 struct ip_options_rcu *dopt = NULL;
904 if (opt && opt->optlen) {
905 int opt_size = sizeof(*dopt) + opt->optlen;
907 dopt = kmalloc(opt_size, GFP_ATOMIC);
909 if (ip_options_echo(&dopt->opt, skb)) {
918 #ifdef CONFIG_TCP_MD5SIG
920 * RFC2385 MD5 checksumming requires a mapping of
921 * IP address->MD5 Key.
922 * We need to maintain these in the sk structure.
925 /* Find the Key structure for an address. */
926 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
927 const union tcp_md5_addr *addr,
930 struct tcp_sock *tp = tcp_sk(sk);
931 struct tcp_md5sig_key *key;
932 unsigned int size = sizeof(struct in_addr);
933 struct tcp_md5sig_info *md5sig;
935 /* caller either holds rcu_read_lock() or socket lock */
936 md5sig = rcu_dereference_check(tp->md5sig_info,
937 sock_owned_by_user(sk) ||
938 lockdep_is_held(&sk->sk_lock.slock));
941 #if IS_ENABLED(CONFIG_IPV6)
942 if (family == AF_INET6)
943 size = sizeof(struct in6_addr);
945 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
946 if (key->family != family)
948 if (!memcmp(&key->addr, addr, size))
953 EXPORT_SYMBOL(tcp_md5_do_lookup);
955 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
956 struct sock *addr_sk)
958 union tcp_md5_addr *addr;
960 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
961 return tcp_md5_do_lookup(sk, addr, AF_INET);
963 EXPORT_SYMBOL(tcp_v4_md5_lookup);
965 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
966 struct request_sock *req)
968 union tcp_md5_addr *addr;
970 addr = (union tcp_md5_addr *)&inet_rsk(req)->ir_rmt_addr;
971 return tcp_md5_do_lookup(sk, addr, AF_INET);
974 /* This can be called on a newly created socket, from other files */
975 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
976 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
978 /* Add Key to the list */
979 struct tcp_md5sig_key *key;
980 struct tcp_sock *tp = tcp_sk(sk);
981 struct tcp_md5sig_info *md5sig;
983 key = tcp_md5_do_lookup(sk, addr, family);
985 /* Pre-existing entry - just update that one. */
986 memcpy(key->key, newkey, newkeylen);
987 key->keylen = newkeylen;
991 md5sig = rcu_dereference_protected(tp->md5sig_info,
992 sock_owned_by_user(sk));
994 md5sig = kmalloc(sizeof(*md5sig), gfp);
998 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
999 INIT_HLIST_HEAD(&md5sig->head);
1000 rcu_assign_pointer(tp->md5sig_info, md5sig);
1003 key = sock_kmalloc(sk, sizeof(*key), gfp);
1006 if (!tcp_alloc_md5sig_pool()) {
1007 sock_kfree_s(sk, key, sizeof(*key));
1011 memcpy(key->key, newkey, newkeylen);
1012 key->keylen = newkeylen;
1013 key->family = family;
1014 memcpy(&key->addr, addr,
1015 (family == AF_INET6) ? sizeof(struct in6_addr) :
1016 sizeof(struct in_addr));
1017 hlist_add_head_rcu(&key->node, &md5sig->head);
1020 EXPORT_SYMBOL(tcp_md5_do_add);
1022 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1024 struct tcp_md5sig_key *key;
1026 key = tcp_md5_do_lookup(sk, addr, family);
1029 hlist_del_rcu(&key->node);
1030 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1031 kfree_rcu(key, rcu);
1034 EXPORT_SYMBOL(tcp_md5_do_del);
1036 static void tcp_clear_md5_list(struct sock *sk)
1038 struct tcp_sock *tp = tcp_sk(sk);
1039 struct tcp_md5sig_key *key;
1040 struct hlist_node *n;
1041 struct tcp_md5sig_info *md5sig;
1043 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1045 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
1046 hlist_del_rcu(&key->node);
1047 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1048 kfree_rcu(key, rcu);
1052 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1055 struct tcp_md5sig cmd;
1056 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1058 if (optlen < sizeof(cmd))
1061 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1064 if (sin->sin_family != AF_INET)
1067 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1068 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1071 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1074 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1075 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1079 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1080 __be32 daddr, __be32 saddr, int nbytes)
1082 struct tcp4_pseudohdr *bp;
1083 struct scatterlist sg;
1085 bp = &hp->md5_blk.ip4;
1088 * 1. the TCP pseudo-header (in the order: source IP address,
1089 * destination IP address, zero-padded protocol number, and
1095 bp->protocol = IPPROTO_TCP;
1096 bp->len = cpu_to_be16(nbytes);
1098 sg_init_one(&sg, bp, sizeof(*bp));
1099 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1102 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1103 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1105 struct tcp_md5sig_pool *hp;
1106 struct hash_desc *desc;
1108 hp = tcp_get_md5sig_pool();
1110 goto clear_hash_noput;
1111 desc = &hp->md5_desc;
1113 if (crypto_hash_init(desc))
1115 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1117 if (tcp_md5_hash_header(hp, th))
1119 if (tcp_md5_hash_key(hp, key))
1121 if (crypto_hash_final(desc, md5_hash))
1124 tcp_put_md5sig_pool();
1128 tcp_put_md5sig_pool();
1130 memset(md5_hash, 0, 16);
1134 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1135 const struct sock *sk, const struct request_sock *req,
1136 const struct sk_buff *skb)
1138 struct tcp_md5sig_pool *hp;
1139 struct hash_desc *desc;
1140 const struct tcphdr *th = tcp_hdr(skb);
1141 __be32 saddr, daddr;
1144 saddr = inet_sk(sk)->inet_saddr;
1145 daddr = inet_sk(sk)->inet_daddr;
1147 saddr = inet_rsk(req)->ir_loc_addr;
1148 daddr = inet_rsk(req)->ir_rmt_addr;
1150 const struct iphdr *iph = ip_hdr(skb);
1155 hp = tcp_get_md5sig_pool();
1157 goto clear_hash_noput;
1158 desc = &hp->md5_desc;
1160 if (crypto_hash_init(desc))
1163 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1165 if (tcp_md5_hash_header(hp, th))
1167 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1169 if (tcp_md5_hash_key(hp, key))
1171 if (crypto_hash_final(desc, md5_hash))
1174 tcp_put_md5sig_pool();
1178 tcp_put_md5sig_pool();
1180 memset(md5_hash, 0, 16);
1183 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1185 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1188 * This gets called for each TCP segment that arrives
1189 * so we want to be efficient.
1190 * We have 3 drop cases:
1191 * o No MD5 hash and one expected.
1192 * o MD5 hash and we're not expecting one.
1193 * o MD5 hash and its wrong.
1195 const __u8 *hash_location = NULL;
1196 struct tcp_md5sig_key *hash_expected;
1197 const struct iphdr *iph = ip_hdr(skb);
1198 const struct tcphdr *th = tcp_hdr(skb);
1200 unsigned char newhash[16];
1202 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1204 hash_location = tcp_parse_md5sig_option(th);
1206 /* We've parsed the options - do we have a hash? */
1207 if (!hash_expected && !hash_location)
1210 if (hash_expected && !hash_location) {
1211 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1215 if (!hash_expected && hash_location) {
1216 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1220 /* Okay, so this is hash_expected and hash_location -
1221 * so we need to calculate the checksum.
1223 genhash = tcp_v4_md5_hash_skb(newhash,
1227 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1228 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1229 &iph->saddr, ntohs(th->source),
1230 &iph->daddr, ntohs(th->dest),
1231 genhash ? " tcp_v4_calc_md5_hash failed"
1240 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1242 .obj_size = sizeof(struct tcp_request_sock),
1243 .rtx_syn_ack = tcp_v4_rtx_synack,
1244 .send_ack = tcp_v4_reqsk_send_ack,
1245 .destructor = tcp_v4_reqsk_destructor,
1246 .send_reset = tcp_v4_send_reset,
1247 .syn_ack_timeout = tcp_syn_ack_timeout,
1250 #ifdef CONFIG_TCP_MD5SIG
1251 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1252 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1253 .calc_md5_hash = tcp_v4_md5_hash_skb,
1257 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1259 struct tcp_options_received tmp_opt;
1260 struct request_sock *req;
1261 struct inet_request_sock *ireq;
1262 struct tcp_sock *tp = tcp_sk(sk);
1263 struct dst_entry *dst = NULL;
1264 __be32 saddr = ip_hdr(skb)->saddr;
1265 __be32 daddr = ip_hdr(skb)->daddr;
1266 __u32 isn = TCP_SKB_CB(skb)->when;
1267 bool want_cookie = false, fastopen;
1269 struct tcp_fastopen_cookie foc = { .len = -1 };
1272 /* Never answer to SYNs send to broadcast or multicast */
1273 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1276 /* TW buckets are converted to open requests without
1277 * limitations, they conserve resources and peer is
1278 * evidently real one.
1280 if ((sysctl_tcp_syncookies == 2 ||
1281 inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1282 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1287 /* Accept backlog is full. If we have already queued enough
1288 * of warm entries in syn queue, drop request. It is better than
1289 * clogging syn queue with openreqs with exponentially increasing
1292 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1293 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1297 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1301 #ifdef CONFIG_TCP_MD5SIG
1302 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1305 tcp_clear_options(&tmp_opt);
1306 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1307 tmp_opt.user_mss = tp->rx_opt.user_mss;
1308 tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1310 if (want_cookie && !tmp_opt.saw_tstamp)
1311 tcp_clear_options(&tmp_opt);
1313 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1314 tcp_openreq_init(req, &tmp_opt, skb, sk);
1316 ireq = inet_rsk(req);
1317 ireq->ir_loc_addr = daddr;
1318 ireq->ir_rmt_addr = saddr;
1319 ireq->no_srccheck = inet_sk(sk)->transparent;
1320 ireq->opt = tcp_v4_save_options(skb);
1322 if (security_inet_conn_request(sk, skb, req))
1325 if (!want_cookie || tmp_opt.tstamp_ok)
1326 TCP_ECN_create_request(req, skb, sock_net(sk));
1329 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1330 req->cookie_ts = tmp_opt.tstamp_ok;
1332 /* VJ's idea. We save last timestamp seen
1333 * from the destination in peer table, when entering
1334 * state TIME-WAIT, and check against it before
1335 * accepting new connection request.
1337 * If "isn" is not zero, this request hit alive
1338 * timewait bucket, so that all the necessary checks
1339 * are made in the function processing timewait state.
1341 if (tmp_opt.saw_tstamp &&
1342 tcp_death_row.sysctl_tw_recycle &&
1343 (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
1344 fl4.daddr == saddr) {
1345 if (!tcp_peer_is_proven(req, dst, true)) {
1346 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1347 goto drop_and_release;
1350 /* Kill the following clause, if you dislike this way. */
1351 else if (!sysctl_tcp_syncookies &&
1352 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1353 (sysctl_max_syn_backlog >> 2)) &&
1354 !tcp_peer_is_proven(req, dst, false)) {
1355 /* Without syncookies last quarter of
1356 * backlog is filled with destinations,
1357 * proven to be alive.
1358 * It means that we continue to communicate
1359 * to destinations, already remembered
1360 * to the moment of synflood.
1362 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1363 &saddr, ntohs(tcp_hdr(skb)->source));
1364 goto drop_and_release;
1367 isn = tcp_v4_init_sequence(skb);
1369 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
1372 tcp_rsk(req)->snt_isn = isn;
1373 tcp_openreq_init_rwin(req, sk, dst);
1374 fastopen = !want_cookie &&
1375 tcp_try_fastopen(sk, skb, req, &foc, dst);
1376 err = tcp_v4_send_synack(sk, dst, req,
1377 skb_get_queue_mapping(skb), &foc);
1379 if (err || want_cookie)
1382 tcp_rsk(req)->listener = NULL;
1383 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1393 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1396 EXPORT_SYMBOL(tcp_v4_conn_request);
1400 * The three way handshake has completed - we got a valid synack -
1401 * now create the new socket.
1403 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1404 struct request_sock *req,
1405 struct dst_entry *dst)
1407 struct inet_request_sock *ireq;
1408 struct inet_sock *newinet;
1409 struct tcp_sock *newtp;
1411 #ifdef CONFIG_TCP_MD5SIG
1412 struct tcp_md5sig_key *key;
1414 struct ip_options_rcu *inet_opt;
1416 if (sk_acceptq_is_full(sk))
1419 newsk = tcp_create_openreq_child(sk, req, skb);
1423 newsk->sk_gso_type = SKB_GSO_TCPV4;
1424 inet_sk_rx_dst_set(newsk, skb);
1426 newtp = tcp_sk(newsk);
1427 newinet = inet_sk(newsk);
1428 ireq = inet_rsk(req);
1429 newinet->inet_daddr = ireq->ir_rmt_addr;
1430 newinet->inet_rcv_saddr = ireq->ir_loc_addr;
1431 newinet->inet_saddr = ireq->ir_loc_addr;
1432 inet_opt = ireq->opt;
1433 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1435 newinet->mc_index = inet_iif(skb);
1436 newinet->mc_ttl = ip_hdr(skb)->ttl;
1437 newinet->rcv_tos = ip_hdr(skb)->tos;
1438 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1440 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1441 newinet->inet_id = newtp->write_seq ^ jiffies;
1444 dst = inet_csk_route_child_sock(sk, newsk, req);
1448 /* syncookie case : see end of cookie_v4_check() */
1450 sk_setup_caps(newsk, dst);
1452 tcp_sync_mss(newsk, dst_mtu(dst));
1453 newtp->advmss = dst_metric_advmss(dst);
1454 if (tcp_sk(sk)->rx_opt.user_mss &&
1455 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1456 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1458 tcp_initialize_rcv_mss(newsk);
1460 #ifdef CONFIG_TCP_MD5SIG
1461 /* Copy over the MD5 key from the original socket */
1462 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1466 * We're using one, so create a matching key
1467 * on the newsk structure. If we fail to get
1468 * memory, then we end up not copying the key
1471 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1472 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1473 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1477 if (__inet_inherit_port(sk, newsk) < 0)
1479 __inet_hash_nolisten(newsk, NULL);
1484 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1488 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1491 inet_csk_prepare_forced_close(newsk);
1495 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1497 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1499 struct tcphdr *th = tcp_hdr(skb);
1500 const struct iphdr *iph = ip_hdr(skb);
1502 struct request_sock **prev;
1503 /* Find possible connection requests. */
1504 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1505 iph->saddr, iph->daddr);
1507 return tcp_check_req(sk, skb, req, prev, false);
1509 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1510 th->source, iph->daddr, th->dest, inet_iif(skb));
1513 if (nsk->sk_state != TCP_TIME_WAIT) {
1517 inet_twsk_put(inet_twsk(nsk));
1521 #ifdef CONFIG_SYN_COOKIES
1523 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1528 /* The socket must have it's spinlock held when we get
1531 * We have a potential double-lock case here, so even when
1532 * doing backlog processing we use the BH locking scheme.
1533 * This is because we cannot sleep with the original spinlock
1536 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1539 #ifdef CONFIG_TCP_MD5SIG
1541 * We really want to reject the packet as early as possible
1543 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1544 * o There is an MD5 option and we're not expecting one
1546 if (tcp_v4_inbound_md5_hash(sk, skb))
1550 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1551 struct dst_entry *dst = sk->sk_rx_dst;
1553 sock_rps_save_rxhash(sk, skb);
1555 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1556 dst->ops->check(dst, 0) == NULL) {
1558 sk->sk_rx_dst = NULL;
1561 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1565 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1568 if (sk->sk_state == TCP_LISTEN) {
1569 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1574 sock_rps_save_rxhash(nsk, skb);
1575 if (tcp_child_process(sk, nsk, skb)) {
1582 sock_rps_save_rxhash(sk, skb);
1584 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1591 tcp_v4_send_reset(rsk, skb);
1594 /* Be careful here. If this function gets more complicated and
1595 * gcc suffers from register pressure on the x86, sk (in %ebx)
1596 * might be destroyed here. This current version compiles correctly,
1597 * but you have been warned.
1602 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1603 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1606 EXPORT_SYMBOL(tcp_v4_do_rcv);
1608 void tcp_v4_early_demux(struct sk_buff *skb)
1610 const struct iphdr *iph;
1611 const struct tcphdr *th;
1614 if (skb->pkt_type != PACKET_HOST)
1617 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1623 if (th->doff < sizeof(struct tcphdr) / 4)
1626 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1627 iph->saddr, th->source,
1628 iph->daddr, ntohs(th->dest),
1632 skb->destructor = sock_edemux;
1633 if (sk->sk_state != TCP_TIME_WAIT) {
1634 struct dst_entry *dst = sk->sk_rx_dst;
1637 dst = dst_check(dst, 0);
1639 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1640 skb_dst_set_noref(skb, dst);
1645 /* Packet is added to VJ-style prequeue for processing in process
1646 * context, if a reader task is waiting. Apparently, this exciting
1647 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1648 * failed somewhere. Latency? Burstiness? Well, at least now we will
1649 * see, why it failed. 8)8) --ANK
1652 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1654 struct tcp_sock *tp = tcp_sk(sk);
1656 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1659 if (skb->len <= tcp_hdrlen(skb) &&
1660 skb_queue_len(&tp->ucopy.prequeue) == 0)
1664 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1665 tp->ucopy.memory += skb->truesize;
1666 if (tp->ucopy.memory > sk->sk_rcvbuf) {
1667 struct sk_buff *skb1;
1669 BUG_ON(sock_owned_by_user(sk));
1671 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1672 sk_backlog_rcv(sk, skb1);
1673 NET_INC_STATS_BH(sock_net(sk),
1674 LINUX_MIB_TCPPREQUEUEDROPPED);
1677 tp->ucopy.memory = 0;
1678 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1679 wake_up_interruptible_sync_poll(sk_sleep(sk),
1680 POLLIN | POLLRDNORM | POLLRDBAND);
1681 if (!inet_csk_ack_scheduled(sk))
1682 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1683 (3 * tcp_rto_min(sk)) / 4,
1688 EXPORT_SYMBOL(tcp_prequeue);
1694 int tcp_v4_rcv(struct sk_buff *skb)
1696 const struct iphdr *iph;
1697 const struct tcphdr *th;
1700 struct net *net = dev_net(skb->dev);
1702 if (skb->pkt_type != PACKET_HOST)
1705 /* Count it even if it's bad */
1706 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1708 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1713 if (th->doff < sizeof(struct tcphdr) / 4)
1715 if (!pskb_may_pull(skb, th->doff * 4))
1718 /* An explanation is required here, I think.
1719 * Packet length and doff are validated by header prediction,
1720 * provided case of th->doff==0 is eliminated.
1721 * So, we defer the checks. */
1723 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1728 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1729 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1730 skb->len - th->doff * 4);
1731 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1732 TCP_SKB_CB(skb)->when = 0;
1733 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1734 TCP_SKB_CB(skb)->sacked = 0;
1736 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1741 if (sk->sk_state == TCP_TIME_WAIT)
1744 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1745 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1746 goto discard_and_relse;
1749 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1750 goto discard_and_relse;
1753 if (sk_filter(sk, skb))
1754 goto discard_and_relse;
1756 sk_mark_napi_id(sk, skb);
1759 bh_lock_sock_nested(sk);
1761 if (!sock_owned_by_user(sk)) {
1762 #ifdef CONFIG_NET_DMA
1763 struct tcp_sock *tp = tcp_sk(sk);
1764 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1765 tp->ucopy.dma_chan = net_dma_find_channel();
1766 if (tp->ucopy.dma_chan)
1767 ret = tcp_v4_do_rcv(sk, skb);
1771 if (!tcp_prequeue(sk, skb))
1772 ret = tcp_v4_do_rcv(sk, skb);
1774 } else if (unlikely(sk_add_backlog(sk, skb,
1775 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1777 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1778 goto discard_and_relse;
1787 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1790 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1792 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1794 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1796 tcp_v4_send_reset(NULL, skb);
1800 /* Discard frame. */
1809 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1810 inet_twsk_put(inet_twsk(sk));
1814 if (skb->len < (th->doff << 2)) {
1815 inet_twsk_put(inet_twsk(sk));
1818 if (tcp_checksum_complete(skb)) {
1819 inet_twsk_put(inet_twsk(sk));
1822 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1824 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1826 iph->saddr, th->source,
1827 iph->daddr, th->dest,
1830 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1831 inet_twsk_put(inet_twsk(sk));
1835 /* Fall through to ACK */
1838 tcp_v4_timewait_ack(sk, skb);
1842 case TCP_TW_SUCCESS:;
1847 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1848 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1849 .twsk_unique = tcp_twsk_unique,
1850 .twsk_destructor= tcp_twsk_destructor,
1853 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1855 struct dst_entry *dst = skb_dst(skb);
1858 sk->sk_rx_dst = dst;
1859 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1861 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1863 const struct inet_connection_sock_af_ops ipv4_specific = {
1864 .queue_xmit = ip_queue_xmit,
1865 .send_check = tcp_v4_send_check,
1866 .rebuild_header = inet_sk_rebuild_header,
1867 .sk_rx_dst_set = inet_sk_rx_dst_set,
1868 .conn_request = tcp_v4_conn_request,
1869 .syn_recv_sock = tcp_v4_syn_recv_sock,
1870 .net_header_len = sizeof(struct iphdr),
1871 .setsockopt = ip_setsockopt,
1872 .getsockopt = ip_getsockopt,
1873 .addr2sockaddr = inet_csk_addr2sockaddr,
1874 .sockaddr_len = sizeof(struct sockaddr_in),
1875 .bind_conflict = inet_csk_bind_conflict,
1876 #ifdef CONFIG_COMPAT
1877 .compat_setsockopt = compat_ip_setsockopt,
1878 .compat_getsockopt = compat_ip_getsockopt,
1881 EXPORT_SYMBOL(ipv4_specific);
1883 #ifdef CONFIG_TCP_MD5SIG
1884 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1885 .md5_lookup = tcp_v4_md5_lookup,
1886 .calc_md5_hash = tcp_v4_md5_hash_skb,
1887 .md5_parse = tcp_v4_parse_md5_keys,
1891 /* NOTE: A lot of things set to zero explicitly by call to
1892 * sk_alloc() so need not be done here.
1894 static int tcp_v4_init_sock(struct sock *sk)
1896 struct inet_connection_sock *icsk = inet_csk(sk);
1900 icsk->icsk_af_ops = &ipv4_specific;
1902 #ifdef CONFIG_TCP_MD5SIG
1903 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1909 void tcp_v4_destroy_sock(struct sock *sk)
1911 struct tcp_sock *tp = tcp_sk(sk);
1913 tcp_clear_xmit_timers(sk);
1915 tcp_cleanup_congestion_control(sk);
1917 /* Cleanup up the write buffer. */
1918 tcp_write_queue_purge(sk);
1920 /* Cleans up our, hopefully empty, out_of_order_queue. */
1921 __skb_queue_purge(&tp->out_of_order_queue);
1923 #ifdef CONFIG_TCP_MD5SIG
1924 /* Clean up the MD5 key list, if any */
1925 if (tp->md5sig_info) {
1926 tcp_clear_md5_list(sk);
1927 kfree_rcu(tp->md5sig_info, rcu);
1928 tp->md5sig_info = NULL;
1932 #ifdef CONFIG_NET_DMA
1933 /* Cleans up our sk_async_wait_queue */
1934 __skb_queue_purge(&sk->sk_async_wait_queue);
1937 /* Clean prequeue, it must be empty really */
1938 __skb_queue_purge(&tp->ucopy.prequeue);
1940 /* Clean up a referenced TCP bind bucket. */
1941 if (inet_csk(sk)->icsk_bind_hash)
1944 BUG_ON(tp->fastopen_rsk != NULL);
1946 /* If socket is aborted during connect operation */
1947 tcp_free_fastopen_req(tp);
1949 sk_sockets_allocated_dec(sk);
1950 sock_release_memcg(sk);
1952 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1954 #ifdef CONFIG_PROC_FS
1955 /* Proc filesystem TCP sock list dumping. */
1958 * Get next listener socket follow cur. If cur is NULL, get first socket
1959 * starting from bucket given in st->bucket; when st->bucket is zero the
1960 * very first socket in the hash table is returned.
1962 static void *listening_get_next(struct seq_file *seq, void *cur)
1964 struct inet_connection_sock *icsk;
1965 struct hlist_nulls_node *node;
1966 struct sock *sk = cur;
1967 struct inet_listen_hashbucket *ilb;
1968 struct tcp_iter_state *st = seq->private;
1969 struct net *net = seq_file_net(seq);
1972 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1973 spin_lock_bh(&ilb->lock);
1974 sk = sk_nulls_head(&ilb->head);
1978 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1982 if (st->state == TCP_SEQ_STATE_OPENREQ) {
1983 struct request_sock *req = cur;
1985 icsk = inet_csk(st->syn_wait_sk);
1989 if (req->rsk_ops->family == st->family) {
1995 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
1998 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2000 sk = sk_nulls_next(st->syn_wait_sk);
2001 st->state = TCP_SEQ_STATE_LISTENING;
2002 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2004 icsk = inet_csk(sk);
2005 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2006 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2008 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2009 sk = sk_nulls_next(sk);
2012 sk_nulls_for_each_from(sk, node) {
2013 if (!net_eq(sock_net(sk), net))
2015 if (sk->sk_family == st->family) {
2019 icsk = inet_csk(sk);
2020 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2021 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2023 st->uid = sock_i_uid(sk);
2024 st->syn_wait_sk = sk;
2025 st->state = TCP_SEQ_STATE_OPENREQ;
2029 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2031 spin_unlock_bh(&ilb->lock);
2033 if (++st->bucket < INET_LHTABLE_SIZE) {
2034 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2035 spin_lock_bh(&ilb->lock);
2036 sk = sk_nulls_head(&ilb->head);
2044 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2046 struct tcp_iter_state *st = seq->private;
2051 rc = listening_get_next(seq, NULL);
2053 while (rc && *pos) {
2054 rc = listening_get_next(seq, rc);
2060 static inline bool empty_bucket(const struct tcp_iter_state *st)
2062 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
2066 * Get first established socket starting from bucket given in st->bucket.
2067 * If st->bucket is zero, the very first socket in the hash is returned.
2069 static void *established_get_first(struct seq_file *seq)
2071 struct tcp_iter_state *st = seq->private;
2072 struct net *net = seq_file_net(seq);
2076 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2078 struct hlist_nulls_node *node;
2079 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2081 /* Lockless fast path for the common case of empty buckets */
2082 if (empty_bucket(st))
2086 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2087 if (sk->sk_family != st->family ||
2088 !net_eq(sock_net(sk), net)) {
2094 spin_unlock_bh(lock);
2100 static void *established_get_next(struct seq_file *seq, void *cur)
2102 struct sock *sk = cur;
2103 struct hlist_nulls_node *node;
2104 struct tcp_iter_state *st = seq->private;
2105 struct net *net = seq_file_net(seq);
2110 sk = sk_nulls_next(sk);
2112 sk_nulls_for_each_from(sk, node) {
2113 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2117 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2119 return established_get_first(seq);
2122 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2124 struct tcp_iter_state *st = seq->private;
2128 rc = established_get_first(seq);
2131 rc = established_get_next(seq, rc);
2137 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2140 struct tcp_iter_state *st = seq->private;
2142 st->state = TCP_SEQ_STATE_LISTENING;
2143 rc = listening_get_idx(seq, &pos);
2146 st->state = TCP_SEQ_STATE_ESTABLISHED;
2147 rc = established_get_idx(seq, pos);
2153 static void *tcp_seek_last_pos(struct seq_file *seq)
2155 struct tcp_iter_state *st = seq->private;
2156 int offset = st->offset;
2157 int orig_num = st->num;
2160 switch (st->state) {
2161 case TCP_SEQ_STATE_OPENREQ:
2162 case TCP_SEQ_STATE_LISTENING:
2163 if (st->bucket >= INET_LHTABLE_SIZE)
2165 st->state = TCP_SEQ_STATE_LISTENING;
2166 rc = listening_get_next(seq, NULL);
2167 while (offset-- && rc)
2168 rc = listening_get_next(seq, rc);
2172 st->state = TCP_SEQ_STATE_ESTABLISHED;
2174 case TCP_SEQ_STATE_ESTABLISHED:
2175 if (st->bucket > tcp_hashinfo.ehash_mask)
2177 rc = established_get_first(seq);
2178 while (offset-- && rc)
2179 rc = established_get_next(seq, rc);
2187 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2189 struct tcp_iter_state *st = seq->private;
2192 if (*pos && *pos == st->last_pos) {
2193 rc = tcp_seek_last_pos(seq);
2198 st->state = TCP_SEQ_STATE_LISTENING;
2202 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2205 st->last_pos = *pos;
2209 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2211 struct tcp_iter_state *st = seq->private;
2214 if (v == SEQ_START_TOKEN) {
2215 rc = tcp_get_idx(seq, 0);
2219 switch (st->state) {
2220 case TCP_SEQ_STATE_OPENREQ:
2221 case TCP_SEQ_STATE_LISTENING:
2222 rc = listening_get_next(seq, v);
2224 st->state = TCP_SEQ_STATE_ESTABLISHED;
2227 rc = established_get_first(seq);
2230 case TCP_SEQ_STATE_ESTABLISHED:
2231 rc = established_get_next(seq, v);
2236 st->last_pos = *pos;
2240 static void tcp_seq_stop(struct seq_file *seq, void *v)
2242 struct tcp_iter_state *st = seq->private;
2244 switch (st->state) {
2245 case TCP_SEQ_STATE_OPENREQ:
2247 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2248 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2250 case TCP_SEQ_STATE_LISTENING:
2251 if (v != SEQ_START_TOKEN)
2252 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2254 case TCP_SEQ_STATE_ESTABLISHED:
2256 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2261 int tcp_seq_open(struct inode *inode, struct file *file)
2263 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2264 struct tcp_iter_state *s;
2267 err = seq_open_net(inode, file, &afinfo->seq_ops,
2268 sizeof(struct tcp_iter_state));
2272 s = ((struct seq_file *)file->private_data)->private;
2273 s->family = afinfo->family;
2277 EXPORT_SYMBOL(tcp_seq_open);
2279 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2282 struct proc_dir_entry *p;
2284 afinfo->seq_ops.start = tcp_seq_start;
2285 afinfo->seq_ops.next = tcp_seq_next;
2286 afinfo->seq_ops.stop = tcp_seq_stop;
2288 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2289 afinfo->seq_fops, afinfo);
2294 EXPORT_SYMBOL(tcp_proc_register);
2296 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2298 remove_proc_entry(afinfo->name, net->proc_net);
2300 EXPORT_SYMBOL(tcp_proc_unregister);
2302 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2303 struct seq_file *f, int i, kuid_t uid)
2305 const struct inet_request_sock *ireq = inet_rsk(req);
2306 long delta = req->expires - jiffies;
2308 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2309 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2312 ntohs(inet_sk(sk)->inet_sport),
2314 ntohs(ireq->ir_rmt_port),
2316 0, 0, /* could print option size, but that is af dependent. */
2317 1, /* timers active (only the expire timer) */
2318 jiffies_delta_to_clock_t(delta),
2320 from_kuid_munged(seq_user_ns(f), uid),
2321 0, /* non standard timer */
2322 0, /* open_requests have no inode */
2323 atomic_read(&sk->sk_refcnt),
2327 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2330 unsigned long timer_expires;
2331 const struct tcp_sock *tp = tcp_sk(sk);
2332 const struct inet_connection_sock *icsk = inet_csk(sk);
2333 const struct inet_sock *inet = inet_sk(sk);
2334 struct fastopen_queue *fastopenq = icsk->icsk_accept_queue.fastopenq;
2335 __be32 dest = inet->inet_daddr;
2336 __be32 src = inet->inet_rcv_saddr;
2337 __u16 destp = ntohs(inet->inet_dport);
2338 __u16 srcp = ntohs(inet->inet_sport);
2341 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2342 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2343 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2345 timer_expires = icsk->icsk_timeout;
2346 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2348 timer_expires = icsk->icsk_timeout;
2349 } else if (timer_pending(&sk->sk_timer)) {
2351 timer_expires = sk->sk_timer.expires;
2354 timer_expires = jiffies;
2357 if (sk->sk_state == TCP_LISTEN)
2358 rx_queue = sk->sk_ack_backlog;
2361 * because we dont lock socket, we might find a transient negative value
2363 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2365 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2366 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2367 i, src, srcp, dest, destp, sk->sk_state,
2368 tp->write_seq - tp->snd_una,
2371 jiffies_delta_to_clock_t(timer_expires - jiffies),
2372 icsk->icsk_retransmits,
2373 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2374 icsk->icsk_probes_out,
2376 atomic_read(&sk->sk_refcnt), sk,
2377 jiffies_to_clock_t(icsk->icsk_rto),
2378 jiffies_to_clock_t(icsk->icsk_ack.ato),
2379 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2381 sk->sk_state == TCP_LISTEN ?
2382 (fastopenq ? fastopenq->max_qlen : 0) :
2383 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2386 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2387 struct seq_file *f, int i)
2391 s32 delta = tw->tw_ttd - inet_tw_time_stamp();
2393 dest = tw->tw_daddr;
2394 src = tw->tw_rcv_saddr;
2395 destp = ntohs(tw->tw_dport);
2396 srcp = ntohs(tw->tw_sport);
2398 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2399 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2400 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2401 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2402 atomic_read(&tw->tw_refcnt), tw);
2407 static int tcp4_seq_show(struct seq_file *seq, void *v)
2409 struct tcp_iter_state *st;
2410 struct sock *sk = v;
2412 seq_setwidth(seq, TMPSZ - 1);
2413 if (v == SEQ_START_TOKEN) {
2414 seq_puts(seq, " sl local_address rem_address st tx_queue "
2415 "rx_queue tr tm->when retrnsmt uid timeout "
2421 switch (st->state) {
2422 case TCP_SEQ_STATE_LISTENING:
2423 case TCP_SEQ_STATE_ESTABLISHED:
2424 if (sk->sk_state == TCP_TIME_WAIT)
2425 get_timewait4_sock(v, seq, st->num);
2427 get_tcp4_sock(v, seq, st->num);
2429 case TCP_SEQ_STATE_OPENREQ:
2430 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid);
2438 static const struct file_operations tcp_afinfo_seq_fops = {
2439 .owner = THIS_MODULE,
2440 .open = tcp_seq_open,
2442 .llseek = seq_lseek,
2443 .release = seq_release_net
2446 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2449 .seq_fops = &tcp_afinfo_seq_fops,
2451 .show = tcp4_seq_show,
2455 static int __net_init tcp4_proc_init_net(struct net *net)
2457 return tcp_proc_register(net, &tcp4_seq_afinfo);
2460 static void __net_exit tcp4_proc_exit_net(struct net *net)
2462 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2465 static struct pernet_operations tcp4_net_ops = {
2466 .init = tcp4_proc_init_net,
2467 .exit = tcp4_proc_exit_net,
2470 int __init tcp4_proc_init(void)
2472 return register_pernet_subsys(&tcp4_net_ops);
2475 void tcp4_proc_exit(void)
2477 unregister_pernet_subsys(&tcp4_net_ops);
2479 #endif /* CONFIG_PROC_FS */
2481 struct proto tcp_prot = {
2483 .owner = THIS_MODULE,
2485 .connect = tcp_v4_connect,
2486 .disconnect = tcp_disconnect,
2487 .accept = inet_csk_accept,
2489 .init = tcp_v4_init_sock,
2490 .destroy = tcp_v4_destroy_sock,
2491 .shutdown = tcp_shutdown,
2492 .setsockopt = tcp_setsockopt,
2493 .getsockopt = tcp_getsockopt,
2494 .recvmsg = tcp_recvmsg,
2495 .sendmsg = tcp_sendmsg,
2496 .sendpage = tcp_sendpage,
2497 .backlog_rcv = tcp_v4_do_rcv,
2498 .release_cb = tcp_release_cb,
2499 .mtu_reduced = tcp_v4_mtu_reduced,
2501 .unhash = inet_unhash,
2502 .get_port = inet_csk_get_port,
2503 .enter_memory_pressure = tcp_enter_memory_pressure,
2504 .stream_memory_free = tcp_stream_memory_free,
2505 .sockets_allocated = &tcp_sockets_allocated,
2506 .orphan_count = &tcp_orphan_count,
2507 .memory_allocated = &tcp_memory_allocated,
2508 .memory_pressure = &tcp_memory_pressure,
2509 .sysctl_mem = sysctl_tcp_mem,
2510 .sysctl_wmem = sysctl_tcp_wmem,
2511 .sysctl_rmem = sysctl_tcp_rmem,
2512 .max_header = MAX_TCP_HEADER,
2513 .obj_size = sizeof(struct tcp_sock),
2514 .slab_flags = SLAB_DESTROY_BY_RCU,
2515 .twsk_prot = &tcp_timewait_sock_ops,
2516 .rsk_prot = &tcp_request_sock_ops,
2517 .h.hashinfo = &tcp_hashinfo,
2518 .no_autobind = true,
2519 #ifdef CONFIG_COMPAT
2520 .compat_setsockopt = compat_tcp_setsockopt,
2521 .compat_getsockopt = compat_tcp_getsockopt,
2523 #ifdef CONFIG_MEMCG_KMEM
2524 .init_cgroup = tcp_init_cgroup,
2525 .destroy_cgroup = tcp_destroy_cgroup,
2526 .proto_cgroup = tcp_proto_cgroup,
2529 EXPORT_SYMBOL(tcp_prot);
2531 static int __net_init tcp_sk_init(struct net *net)
2533 net->ipv4.sysctl_tcp_ecn = 2;
2537 static void __net_exit tcp_sk_exit(struct net *net)
2541 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2543 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2546 static struct pernet_operations __net_initdata tcp_sk_ops = {
2547 .init = tcp_sk_init,
2548 .exit = tcp_sk_exit,
2549 .exit_batch = tcp_sk_exit_batch,
2552 void __init tcp_v4_init(void)
2554 inet_hashinfo_init(&tcp_hashinfo);
2555 if (register_pernet_subsys(&tcp_sk_ops))
2556 panic("Failed to create the TCP control socket.\n");