2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/netdma.h>
76 #include <net/secure_seq.h>
77 #include <net/tcp_memcontrol.h>
79 #include <linux/inet.h>
80 #include <linux/ipv6.h>
81 #include <linux/stddef.h>
82 #include <linux/proc_fs.h>
83 #include <linux/seq_file.h>
85 #include <linux/crypto.h>
86 #include <linux/scatterlist.h>
88 int sysctl_tcp_tw_reuse __read_mostly;
89 int sysctl_tcp_low_latency __read_mostly;
90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
93 #ifdef CONFIG_TCP_MD5SIG
94 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
95 __be32 daddr, __be32 saddr, const struct tcphdr *th);
98 struct inet_hashinfo tcp_hashinfo;
99 EXPORT_SYMBOL(tcp_hashinfo);
101 static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
103 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
106 tcp_hdr(skb)->source);
109 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
111 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
112 struct tcp_sock *tp = tcp_sk(sk);
114 /* With PAWS, it is safe from the viewpoint
115 of data integrity. Even without PAWS it is safe provided sequence
116 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
118 Actually, the idea is close to VJ's one, only timestamp cache is
119 held not per host, but per port pair and TW bucket is used as state
122 If TW bucket has been already destroyed we fall back to VJ's scheme
123 and use initial timestamp retrieved from peer table.
125 if (tcptw->tw_ts_recent_stamp &&
126 (twp == NULL || (sysctl_tcp_tw_reuse &&
127 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
128 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
129 if (tp->write_seq == 0)
131 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
132 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
139 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
141 static int tcp_repair_connect(struct sock *sk)
143 tcp_connect_init(sk);
144 tcp_finish_connect(sk, NULL);
149 /* This will initiate an outgoing connection. */
150 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
152 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
153 struct inet_sock *inet = inet_sk(sk);
154 struct tcp_sock *tp = tcp_sk(sk);
155 __be16 orig_sport, orig_dport;
156 __be32 daddr, nexthop;
160 struct ip_options_rcu *inet_opt;
162 if (addr_len < sizeof(struct sockaddr_in))
165 if (usin->sin_family != AF_INET)
166 return -EAFNOSUPPORT;
168 nexthop = daddr = usin->sin_addr.s_addr;
169 inet_opt = rcu_dereference_protected(inet->inet_opt,
170 sock_owned_by_user(sk));
171 if (inet_opt && inet_opt->opt.srr) {
174 nexthop = inet_opt->opt.faddr;
177 orig_sport = inet->inet_sport;
178 orig_dport = usin->sin_port;
179 fl4 = &inet->cork.fl.u.ip4;
180 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
181 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
183 orig_sport, orig_dport, sk, true);
186 if (err == -ENETUNREACH)
187 IP_INC_STATS_BH(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
191 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
196 if (!inet_opt || !inet_opt->opt.srr)
199 if (!inet->inet_saddr)
200 inet->inet_saddr = fl4->saddr;
201 inet->inet_rcv_saddr = inet->inet_saddr;
203 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
204 /* Reset inherited state */
205 tp->rx_opt.ts_recent = 0;
206 tp->rx_opt.ts_recent_stamp = 0;
207 if (likely(!tp->repair))
211 if (tcp_death_row.sysctl_tw_recycle &&
212 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
213 tcp_fetch_timewait_stamp(sk, &rt->dst);
215 inet->inet_dport = usin->sin_port;
216 inet->inet_daddr = daddr;
218 inet_csk(sk)->icsk_ext_hdr_len = 0;
220 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
222 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
224 /* Socket identity is still unknown (sport may be zero).
225 * However we set state to SYN-SENT and not releasing socket
226 * lock select source port, enter ourselves into the hash tables and
227 * complete initialization after this.
229 tcp_set_state(sk, TCP_SYN_SENT);
230 err = inet_hash_connect(&tcp_death_row, sk);
234 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
235 inet->inet_sport, inet->inet_dport, sk);
241 /* OK, now commit destination to socket. */
242 sk->sk_gso_type = SKB_GSO_TCPV4;
243 sk_setup_caps(sk, &rt->dst);
245 if (!tp->write_seq && likely(!tp->repair))
246 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
251 inet->inet_id = tp->write_seq ^ jiffies;
253 if (likely(!tp->repair))
254 err = tcp_connect(sk);
256 err = tcp_repair_connect(sk);
266 * This unhashes the socket and releases the local port,
269 tcp_set_state(sk, TCP_CLOSE);
271 sk->sk_route_caps = 0;
272 inet->inet_dport = 0;
275 EXPORT_SYMBOL(tcp_v4_connect);
278 * This routine does path mtu discovery as defined in RFC1191.
280 static void do_pmtu_discovery(struct sock *sk, const struct iphdr *iph, u32 mtu)
282 struct dst_entry *dst;
283 struct inet_sock *inet = inet_sk(sk);
285 /* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs
286 * send out by Linux are always <576bytes so they should go through
289 if (sk->sk_state == TCP_LISTEN)
292 /* We don't check in the destentry if pmtu discovery is forbidden
293 * on this route. We just assume that no packet_to_big packets
294 * are send back when pmtu discovery is not active.
295 * There is a small race when the user changes this flag in the
296 * route, but I think that's acceptable.
298 if ((dst = __sk_dst_check(sk, 0)) == NULL)
301 dst->ops->update_pmtu(dst, mtu);
303 /* Something is about to be wrong... Remember soft error
304 * for the case, if this connection will not able to recover.
306 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
307 sk->sk_err_soft = EMSGSIZE;
311 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
312 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
313 tcp_sync_mss(sk, mtu);
315 /* Resend the TCP packet because it's
316 * clear that the old packet has been
317 * dropped. This is the new "fast" path mtu
320 tcp_simple_retransmit(sk);
321 } /* else let the usual retransmit timer handle it */
324 static void do_redirect(struct sk_buff *skb, struct sock *sk)
326 struct dst_entry *dst = __sk_dst_check(sk, 0);
328 if (dst && dst->ops->redirect)
329 dst->ops->redirect(dst, skb);
333 * This routine is called by the ICMP module when it gets some
334 * sort of error condition. If err < 0 then the socket should
335 * be closed and the error returned to the user. If err > 0
336 * it's just the icmp type << 8 | icmp code. After adjustment
337 * header points to the first 8 bytes of the tcp header. We need
338 * to find the appropriate port.
340 * The locking strategy used here is very "optimistic". When
341 * someone else accesses the socket the ICMP is just dropped
342 * and for some paths there is no check at all.
343 * A more general error queue to queue errors for later handling
344 * is probably better.
348 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
350 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
351 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
352 struct inet_connection_sock *icsk;
354 struct inet_sock *inet;
355 const int type = icmp_hdr(icmp_skb)->type;
356 const int code = icmp_hdr(icmp_skb)->code;
362 struct net *net = dev_net(icmp_skb->dev);
364 if (icmp_skb->len < (iph->ihl << 2) + 8) {
365 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
369 sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
370 iph->saddr, th->source, inet_iif(icmp_skb));
372 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
375 if (sk->sk_state == TCP_TIME_WAIT) {
376 inet_twsk_put(inet_twsk(sk));
381 /* If too many ICMPs get dropped on busy
382 * servers this needs to be solved differently.
384 if (sock_owned_by_user(sk))
385 NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
387 if (sk->sk_state == TCP_CLOSE)
390 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
391 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
397 seq = ntohl(th->seq);
398 if (sk->sk_state != TCP_LISTEN &&
399 !between(seq, tp->snd_una, tp->snd_nxt)) {
400 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
406 do_redirect(icmp_skb, sk);
408 case ICMP_SOURCE_QUENCH:
409 /* Just silently ignore these. */
411 case ICMP_PARAMETERPROB:
414 case ICMP_DEST_UNREACH:
415 if (code > NR_ICMP_UNREACH)
418 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
419 if (!sock_owned_by_user(sk))
420 do_pmtu_discovery(sk, iph, info);
424 err = icmp_err_convert[code].errno;
425 /* check if icmp_skb allows revert of backoff
426 * (see draft-zimmermann-tcp-lcd) */
427 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
429 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
433 if (sock_owned_by_user(sk))
436 icsk->icsk_backoff--;
437 inet_csk(sk)->icsk_rto = (tp->srtt ? __tcp_set_rto(tp) :
438 TCP_TIMEOUT_INIT) << icsk->icsk_backoff;
441 skb = tcp_write_queue_head(sk);
444 remaining = icsk->icsk_rto - min(icsk->icsk_rto,
445 tcp_time_stamp - TCP_SKB_CB(skb)->when);
448 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
449 remaining, TCP_RTO_MAX);
451 /* RTO revert clocked out retransmission.
452 * Will retransmit now */
453 tcp_retransmit_timer(sk);
457 case ICMP_TIME_EXCEEDED:
464 switch (sk->sk_state) {
465 struct request_sock *req, **prev;
467 if (sock_owned_by_user(sk))
470 req = inet_csk_search_req(sk, &prev, th->dest,
471 iph->daddr, iph->saddr);
475 /* ICMPs are not backlogged, hence we cannot get
476 an established socket here.
480 if (seq != tcp_rsk(req)->snt_isn) {
481 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
486 * Still in SYN_RECV, just remove it silently.
487 * There is no good way to pass the error to the newly
488 * created socket, and POSIX does not want network
489 * errors returned from accept().
491 inet_csk_reqsk_queue_drop(sk, req, prev);
495 case TCP_SYN_RECV: /* Cannot happen.
496 It can f.e. if SYNs crossed.
498 if (!sock_owned_by_user(sk)) {
501 sk->sk_error_report(sk);
505 sk->sk_err_soft = err;
510 /* If we've already connected we will keep trying
511 * until we time out, or the user gives up.
513 * rfc1122 4.2.3.9 allows to consider as hard errors
514 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
515 * but it is obsoleted by pmtu discovery).
517 * Note, that in modern internet, where routing is unreliable
518 * and in each dark corner broken firewalls sit, sending random
519 * errors ordered by their masters even this two messages finally lose
520 * their original sense (even Linux sends invalid PORT_UNREACHs)
522 * Now we are in compliance with RFCs.
527 if (!sock_owned_by_user(sk) && inet->recverr) {
529 sk->sk_error_report(sk);
530 } else { /* Only an error on timeout */
531 sk->sk_err_soft = err;
539 static void __tcp_v4_send_check(struct sk_buff *skb,
540 __be32 saddr, __be32 daddr)
542 struct tcphdr *th = tcp_hdr(skb);
544 if (skb->ip_summed == CHECKSUM_PARTIAL) {
545 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
546 skb->csum_start = skb_transport_header(skb) - skb->head;
547 skb->csum_offset = offsetof(struct tcphdr, check);
549 th->check = tcp_v4_check(skb->len, saddr, daddr,
556 /* This routine computes an IPv4 TCP checksum. */
557 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 const struct inet_sock *inet = inet_sk(sk);
561 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 EXPORT_SYMBOL(tcp_v4_send_check);
565 int tcp_v4_gso_send_check(struct sk_buff *skb)
567 const struct iphdr *iph;
570 if (!pskb_may_pull(skb, sizeof(*th)))
577 skb->ip_summed = CHECKSUM_PARTIAL;
578 __tcp_v4_send_check(skb, iph->saddr, iph->daddr);
583 * This routine will send an RST to the other tcp.
585 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
587 * Answer: if a packet caused RST, it is not for a socket
588 * existing in our system, if it is matched to a socket,
589 * it is just duplicate segment or bug in other side's TCP.
590 * So that we build reply only basing on parameters
591 * arrived with segment.
592 * Exception: precedence violation. We do not implement it in any case.
595 static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
597 const struct tcphdr *th = tcp_hdr(skb);
600 #ifdef CONFIG_TCP_MD5SIG
601 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
604 struct ip_reply_arg arg;
605 #ifdef CONFIG_TCP_MD5SIG
606 struct tcp_md5sig_key *key;
607 const __u8 *hash_location = NULL;
608 unsigned char newhash[16];
610 struct sock *sk1 = NULL;
614 /* Never send a reset in response to a reset. */
618 if (skb_rtable(skb)->rt_type != RTN_LOCAL)
621 /* Swap the send and the receive. */
622 memset(&rep, 0, sizeof(rep));
623 rep.th.dest = th->source;
624 rep.th.source = th->dest;
625 rep.th.doff = sizeof(struct tcphdr) / 4;
629 rep.th.seq = th->ack_seq;
632 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
633 skb->len - (th->doff << 2));
636 memset(&arg, 0, sizeof(arg));
637 arg.iov[0].iov_base = (unsigned char *)&rep;
638 arg.iov[0].iov_len = sizeof(rep.th);
640 #ifdef CONFIG_TCP_MD5SIG
641 hash_location = tcp_parse_md5sig_option(th);
642 if (!sk && hash_location) {
644 * active side is lost. Try to find listening socket through
645 * source port, and then find md5 key through listening socket.
646 * we are not loose security here:
647 * Incoming packet is checked with md5 hash with finding key,
648 * no RST generated if md5 hash doesn't match.
650 sk1 = __inet_lookup_listener(dev_net(skb_dst(skb)->dev),
651 &tcp_hashinfo, ip_hdr(skb)->daddr,
652 ntohs(th->source), inet_iif(skb));
653 /* don't send rst if it can't find key */
657 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
658 &ip_hdr(skb)->saddr, AF_INET);
662 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, NULL, skb);
663 if (genhash || memcmp(hash_location, newhash, 16) != 0)
666 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
672 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
674 (TCPOPT_MD5SIG << 8) |
676 /* Update length and the length the header thinks exists */
677 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
678 rep.th.doff = arg.iov[0].iov_len / 4;
680 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
681 key, ip_hdr(skb)->saddr,
682 ip_hdr(skb)->daddr, &rep.th);
685 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
686 ip_hdr(skb)->saddr, /* XXX */
687 arg.iov[0].iov_len, IPPROTO_TCP, 0);
688 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
689 arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
690 /* When socket is gone, all binding information is lost.
691 * routing might fail in this case. using iif for oif to
692 * make sure we can deliver it
694 arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb);
696 net = dev_net(skb_dst(skb)->dev);
697 arg.tos = ip_hdr(skb)->tos;
698 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
699 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
701 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
702 TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
704 #ifdef CONFIG_TCP_MD5SIG
713 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
714 outside socket context is ugly, certainly. What can I do?
717 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
718 u32 win, u32 ts, int oif,
719 struct tcp_md5sig_key *key,
720 int reply_flags, u8 tos)
722 const struct tcphdr *th = tcp_hdr(skb);
725 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
726 #ifdef CONFIG_TCP_MD5SIG
727 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
731 struct ip_reply_arg arg;
732 struct net *net = dev_net(skb_dst(skb)->dev);
734 memset(&rep.th, 0, sizeof(struct tcphdr));
735 memset(&arg, 0, sizeof(arg));
737 arg.iov[0].iov_base = (unsigned char *)&rep;
738 arg.iov[0].iov_len = sizeof(rep.th);
740 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
741 (TCPOPT_TIMESTAMP << 8) |
743 rep.opt[1] = htonl(tcp_time_stamp);
744 rep.opt[2] = htonl(ts);
745 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
748 /* Swap the send and the receive. */
749 rep.th.dest = th->source;
750 rep.th.source = th->dest;
751 rep.th.doff = arg.iov[0].iov_len / 4;
752 rep.th.seq = htonl(seq);
753 rep.th.ack_seq = htonl(ack);
755 rep.th.window = htons(win);
757 #ifdef CONFIG_TCP_MD5SIG
759 int offset = (ts) ? 3 : 0;
761 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
763 (TCPOPT_MD5SIG << 8) |
765 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
766 rep.th.doff = arg.iov[0].iov_len/4;
768 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
769 key, ip_hdr(skb)->saddr,
770 ip_hdr(skb)->daddr, &rep.th);
773 arg.flags = reply_flags;
774 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
775 ip_hdr(skb)->saddr, /* XXX */
776 arg.iov[0].iov_len, IPPROTO_TCP, 0);
777 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
779 arg.bound_dev_if = oif;
781 ip_send_unicast_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr,
782 ip_hdr(skb)->daddr, &arg, arg.iov[0].iov_len);
784 TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
787 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
789 struct inet_timewait_sock *tw = inet_twsk(sk);
790 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
792 tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
793 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
796 tcp_twsk_md5_key(tcptw),
797 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
804 static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
805 struct request_sock *req)
807 tcp_v4_send_ack(skb, tcp_rsk(req)->snt_isn + 1,
808 tcp_rsk(req)->rcv_isn + 1, req->rcv_wnd,
811 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
813 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
818 * Send a SYN-ACK after having received a SYN.
819 * This still operates on a request_sock only, not on a big
822 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
823 struct request_sock *req,
824 struct request_values *rvp,
828 const struct inet_request_sock *ireq = inet_rsk(req);
831 struct sk_buff * skb;
833 /* First, grab a route. */
834 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req, nocache)) == NULL)
837 skb = tcp_make_synack(sk, dst, req, rvp);
840 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
842 skb_set_queue_mapping(skb, queue_mapping);
843 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
846 err = net_xmit_eval(err);
852 static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
853 struct request_values *rvp)
855 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
856 return tcp_v4_send_synack(sk, NULL, req, rvp, 0, false);
860 * IPv4 request_sock destructor.
862 static void tcp_v4_reqsk_destructor(struct request_sock *req)
864 kfree(inet_rsk(req)->opt);
868 * Return true if a syncookie should be sent
870 bool tcp_syn_flood_action(struct sock *sk,
871 const struct sk_buff *skb,
874 const char *msg = "Dropping request";
875 bool want_cookie = false;
876 struct listen_sock *lopt;
880 #ifdef CONFIG_SYN_COOKIES
881 if (sysctl_tcp_syncookies) {
882 msg = "Sending cookies";
884 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDOCOOKIES);
887 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPREQQFULLDROP);
889 lopt = inet_csk(sk)->icsk_accept_queue.listen_opt;
890 if (!lopt->synflood_warned) {
891 lopt->synflood_warned = 1;
892 pr_info("%s: Possible SYN flooding on port %d. %s. Check SNMP counters.\n",
893 proto, ntohs(tcp_hdr(skb)->dest), msg);
897 EXPORT_SYMBOL(tcp_syn_flood_action);
900 * Save and compile IPv4 options into the request_sock if needed.
902 static struct ip_options_rcu *tcp_v4_save_options(struct sock *sk,
905 const struct ip_options *opt = &(IPCB(skb)->opt);
906 struct ip_options_rcu *dopt = NULL;
908 if (opt && opt->optlen) {
909 int opt_size = sizeof(*dopt) + opt->optlen;
911 dopt = kmalloc(opt_size, GFP_ATOMIC);
913 if (ip_options_echo(&dopt->opt, skb)) {
922 #ifdef CONFIG_TCP_MD5SIG
924 * RFC2385 MD5 checksumming requires a mapping of
925 * IP address->MD5 Key.
926 * We need to maintain these in the sk structure.
929 /* Find the Key structure for an address. */
930 struct tcp_md5sig_key *tcp_md5_do_lookup(struct sock *sk,
931 const union tcp_md5_addr *addr,
934 struct tcp_sock *tp = tcp_sk(sk);
935 struct tcp_md5sig_key *key;
936 struct hlist_node *pos;
937 unsigned int size = sizeof(struct in_addr);
938 struct tcp_md5sig_info *md5sig;
940 /* caller either holds rcu_read_lock() or socket lock */
941 md5sig = rcu_dereference_check(tp->md5sig_info,
942 sock_owned_by_user(sk) ||
943 lockdep_is_held(&sk->sk_lock.slock));
946 #if IS_ENABLED(CONFIG_IPV6)
947 if (family == AF_INET6)
948 size = sizeof(struct in6_addr);
950 hlist_for_each_entry_rcu(key, pos, &md5sig->head, node) {
951 if (key->family != family)
953 if (!memcmp(&key->addr, addr, size))
958 EXPORT_SYMBOL(tcp_md5_do_lookup);
960 struct tcp_md5sig_key *tcp_v4_md5_lookup(struct sock *sk,
961 struct sock *addr_sk)
963 union tcp_md5_addr *addr;
965 addr = (union tcp_md5_addr *)&inet_sk(addr_sk)->inet_daddr;
966 return tcp_md5_do_lookup(sk, addr, AF_INET);
968 EXPORT_SYMBOL(tcp_v4_md5_lookup);
970 static struct tcp_md5sig_key *tcp_v4_reqsk_md5_lookup(struct sock *sk,
971 struct request_sock *req)
973 union tcp_md5_addr *addr;
975 addr = (union tcp_md5_addr *)&inet_rsk(req)->rmt_addr;
976 return tcp_md5_do_lookup(sk, addr, AF_INET);
979 /* This can be called on a newly created socket, from other files */
980 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
981 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
983 /* Add Key to the list */
984 struct tcp_md5sig_key *key;
985 struct tcp_sock *tp = tcp_sk(sk);
986 struct tcp_md5sig_info *md5sig;
988 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
990 /* Pre-existing entry - just update that one. */
991 memcpy(key->key, newkey, newkeylen);
992 key->keylen = newkeylen;
996 md5sig = rcu_dereference_protected(tp->md5sig_info,
997 sock_owned_by_user(sk));
999 md5sig = kmalloc(sizeof(*md5sig), gfp);
1003 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
1004 INIT_HLIST_HEAD(&md5sig->head);
1005 rcu_assign_pointer(tp->md5sig_info, md5sig);
1008 key = sock_kmalloc(sk, sizeof(*key), gfp);
1011 if (hlist_empty(&md5sig->head) && !tcp_alloc_md5sig_pool(sk)) {
1012 sock_kfree_s(sk, key, sizeof(*key));
1016 memcpy(key->key, newkey, newkeylen);
1017 key->keylen = newkeylen;
1018 key->family = family;
1019 memcpy(&key->addr, addr,
1020 (family == AF_INET6) ? sizeof(struct in6_addr) :
1021 sizeof(struct in_addr));
1022 hlist_add_head_rcu(&key->node, &md5sig->head);
1025 EXPORT_SYMBOL(tcp_md5_do_add);
1027 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
1029 struct tcp_sock *tp = tcp_sk(sk);
1030 struct tcp_md5sig_key *key;
1031 struct tcp_md5sig_info *md5sig;
1033 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&addr, AF_INET);
1036 hlist_del_rcu(&key->node);
1037 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1038 kfree_rcu(key, rcu);
1039 md5sig = rcu_dereference_protected(tp->md5sig_info,
1040 sock_owned_by_user(sk));
1041 if (hlist_empty(&md5sig->head))
1042 tcp_free_md5sig_pool();
1045 EXPORT_SYMBOL(tcp_md5_do_del);
1047 void tcp_clear_md5_list(struct sock *sk)
1049 struct tcp_sock *tp = tcp_sk(sk);
1050 struct tcp_md5sig_key *key;
1051 struct hlist_node *pos, *n;
1052 struct tcp_md5sig_info *md5sig;
1054 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
1056 if (!hlist_empty(&md5sig->head))
1057 tcp_free_md5sig_pool();
1058 hlist_for_each_entry_safe(key, pos, n, &md5sig->head, node) {
1059 hlist_del_rcu(&key->node);
1060 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1061 kfree_rcu(key, rcu);
1065 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1068 struct tcp_md5sig cmd;
1069 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1071 if (optlen < sizeof(cmd))
1074 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1077 if (sin->sin_family != AF_INET)
1080 if (!cmd.tcpm_key || !cmd.tcpm_keylen)
1081 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1084 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1087 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1088 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1092 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1093 __be32 daddr, __be32 saddr, int nbytes)
1095 struct tcp4_pseudohdr *bp;
1096 struct scatterlist sg;
1098 bp = &hp->md5_blk.ip4;
1101 * 1. the TCP pseudo-header (in the order: source IP address,
1102 * destination IP address, zero-padded protocol number, and
1108 bp->protocol = IPPROTO_TCP;
1109 bp->len = cpu_to_be16(nbytes);
1111 sg_init_one(&sg, bp, sizeof(*bp));
1112 return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1115 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1116 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1118 struct tcp_md5sig_pool *hp;
1119 struct hash_desc *desc;
1121 hp = tcp_get_md5sig_pool();
1123 goto clear_hash_noput;
1124 desc = &hp->md5_desc;
1126 if (crypto_hash_init(desc))
1128 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1130 if (tcp_md5_hash_header(hp, th))
1132 if (tcp_md5_hash_key(hp, key))
1134 if (crypto_hash_final(desc, md5_hash))
1137 tcp_put_md5sig_pool();
1141 tcp_put_md5sig_pool();
1143 memset(md5_hash, 0, 16);
1147 int tcp_v4_md5_hash_skb(char *md5_hash, struct tcp_md5sig_key *key,
1148 const struct sock *sk, const struct request_sock *req,
1149 const struct sk_buff *skb)
1151 struct tcp_md5sig_pool *hp;
1152 struct hash_desc *desc;
1153 const struct tcphdr *th = tcp_hdr(skb);
1154 __be32 saddr, daddr;
1157 saddr = inet_sk(sk)->inet_saddr;
1158 daddr = inet_sk(sk)->inet_daddr;
1160 saddr = inet_rsk(req)->loc_addr;
1161 daddr = inet_rsk(req)->rmt_addr;
1163 const struct iphdr *iph = ip_hdr(skb);
1168 hp = tcp_get_md5sig_pool();
1170 goto clear_hash_noput;
1171 desc = &hp->md5_desc;
1173 if (crypto_hash_init(desc))
1176 if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1178 if (tcp_md5_hash_header(hp, th))
1180 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1182 if (tcp_md5_hash_key(hp, key))
1184 if (crypto_hash_final(desc, md5_hash))
1187 tcp_put_md5sig_pool();
1191 tcp_put_md5sig_pool();
1193 memset(md5_hash, 0, 16);
1196 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1198 static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
1201 * This gets called for each TCP segment that arrives
1202 * so we want to be efficient.
1203 * We have 3 drop cases:
1204 * o No MD5 hash and one expected.
1205 * o MD5 hash and we're not expecting one.
1206 * o MD5 hash and its wrong.
1208 const __u8 *hash_location = NULL;
1209 struct tcp_md5sig_key *hash_expected;
1210 const struct iphdr *iph = ip_hdr(skb);
1211 const struct tcphdr *th = tcp_hdr(skb);
1213 unsigned char newhash[16];
1215 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1217 hash_location = tcp_parse_md5sig_option(th);
1219 /* We've parsed the options - do we have a hash? */
1220 if (!hash_expected && !hash_location)
1223 if (hash_expected && !hash_location) {
1224 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1228 if (!hash_expected && hash_location) {
1229 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1233 /* Okay, so this is hash_expected and hash_location -
1234 * so we need to calculate the checksum.
1236 genhash = tcp_v4_md5_hash_skb(newhash,
1240 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1241 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1242 &iph->saddr, ntohs(th->source),
1243 &iph->daddr, ntohs(th->dest),
1244 genhash ? " tcp_v4_calc_md5_hash failed"
1253 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1255 .obj_size = sizeof(struct tcp_request_sock),
1256 .rtx_syn_ack = tcp_v4_rtx_synack,
1257 .send_ack = tcp_v4_reqsk_send_ack,
1258 .destructor = tcp_v4_reqsk_destructor,
1259 .send_reset = tcp_v4_send_reset,
1260 .syn_ack_timeout = tcp_syn_ack_timeout,
1263 #ifdef CONFIG_TCP_MD5SIG
1264 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1265 .md5_lookup = tcp_v4_reqsk_md5_lookup,
1266 .calc_md5_hash = tcp_v4_md5_hash_skb,
1270 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1272 struct tcp_extend_values tmp_ext;
1273 struct tcp_options_received tmp_opt;
1274 const u8 *hash_location;
1275 struct request_sock *req;
1276 struct inet_request_sock *ireq;
1277 struct tcp_sock *tp = tcp_sk(sk);
1278 struct dst_entry *dst = NULL;
1279 __be32 saddr = ip_hdr(skb)->saddr;
1280 __be32 daddr = ip_hdr(skb)->daddr;
1281 __u32 isn = TCP_SKB_CB(skb)->when;
1282 bool want_cookie = false;
1284 /* Never answer to SYNs send to broadcast or multicast */
1285 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1288 /* TW buckets are converted to open requests without
1289 * limitations, they conserve resources and peer is
1290 * evidently real one.
1292 if (inet_csk_reqsk_queue_is_full(sk) && !isn) {
1293 want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1298 /* Accept backlog is full. If we have already queued enough
1299 * of warm entries in syn queue, drop request. It is better than
1300 * clogging syn queue with openreqs with exponentially increasing
1303 if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
1306 req = inet_reqsk_alloc(&tcp_request_sock_ops);
1310 #ifdef CONFIG_TCP_MD5SIG
1311 tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1314 tcp_clear_options(&tmp_opt);
1315 tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
1316 tmp_opt.user_mss = tp->rx_opt.user_mss;
1317 tcp_parse_options(skb, &tmp_opt, &hash_location, 0);
1319 if (tmp_opt.cookie_plus > 0 &&
1320 tmp_opt.saw_tstamp &&
1321 !tp->rx_opt.cookie_out_never &&
1322 (sysctl_tcp_cookie_size > 0 ||
1323 (tp->cookie_values != NULL &&
1324 tp->cookie_values->cookie_desired > 0))) {
1326 u32 *mess = &tmp_ext.cookie_bakery[COOKIE_DIGEST_WORDS];
1327 int l = tmp_opt.cookie_plus - TCPOLEN_COOKIE_BASE;
1329 if (tcp_cookie_generator(&tmp_ext.cookie_bakery[0]) != 0)
1330 goto drop_and_release;
1332 /* Secret recipe starts with IP addresses */
1333 *mess++ ^= (__force u32)daddr;
1334 *mess++ ^= (__force u32)saddr;
1336 /* plus variable length Initiator Cookie */
1339 *c++ ^= *hash_location++;
1341 want_cookie = false; /* not our kind of cookie */
1342 tmp_ext.cookie_out_never = 0; /* false */
1343 tmp_ext.cookie_plus = tmp_opt.cookie_plus;
1344 } else if (!tp->rx_opt.cookie_in_always) {
1345 /* redundant indications, but ensure initialization. */
1346 tmp_ext.cookie_out_never = 1; /* true */
1347 tmp_ext.cookie_plus = 0;
1349 goto drop_and_release;
1351 tmp_ext.cookie_in_always = tp->rx_opt.cookie_in_always;
1353 if (want_cookie && !tmp_opt.saw_tstamp)
1354 tcp_clear_options(&tmp_opt);
1356 tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1357 tcp_openreq_init(req, &tmp_opt, skb);
1359 ireq = inet_rsk(req);
1360 ireq->loc_addr = daddr;
1361 ireq->rmt_addr = saddr;
1362 ireq->no_srccheck = inet_sk(sk)->transparent;
1363 ireq->opt = tcp_v4_save_options(sk, skb);
1365 if (security_inet_conn_request(sk, skb, req))
1368 if (!want_cookie || tmp_opt.tstamp_ok)
1369 TCP_ECN_create_request(req, skb);
1372 isn = cookie_v4_init_sequence(sk, skb, &req->mss);
1373 req->cookie_ts = tmp_opt.tstamp_ok;
1377 /* VJ's idea. We save last timestamp seen
1378 * from the destination in peer table, when entering
1379 * state TIME-WAIT, and check against it before
1380 * accepting new connection request.
1382 * If "isn" is not zero, this request hit alive
1383 * timewait bucket, so that all the necessary checks
1384 * are made in the function processing timewait state.
1386 if (tmp_opt.saw_tstamp &&
1387 tcp_death_row.sysctl_tw_recycle &&
1388 (dst = inet_csk_route_req(sk, &fl4, req, want_cookie)) != NULL &&
1389 fl4.daddr == saddr) {
1390 if (!tcp_peer_is_proven(req, dst, true)) {
1391 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1392 goto drop_and_release;
1395 /* Kill the following clause, if you dislike this way. */
1396 else if (!sysctl_tcp_syncookies &&
1397 (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1398 (sysctl_max_syn_backlog >> 2)) &&
1399 !tcp_peer_is_proven(req, dst, false)) {
1400 /* Without syncookies last quarter of
1401 * backlog is filled with destinations,
1402 * proven to be alive.
1403 * It means that we continue to communicate
1404 * to destinations, already remembered
1405 * to the moment of synflood.
1407 LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1408 &saddr, ntohs(tcp_hdr(skb)->source));
1409 goto drop_and_release;
1412 isn = tcp_v4_init_sequence(skb);
1414 tcp_rsk(req)->snt_isn = isn;
1415 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1417 if (tcp_v4_send_synack(sk, dst, req,
1418 (struct request_values *)&tmp_ext,
1419 skb_get_queue_mapping(skb),
1424 inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1434 EXPORT_SYMBOL(tcp_v4_conn_request);
1438 * The three way handshake has completed - we got a valid synack -
1439 * now create the new socket.
1441 struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
1442 struct request_sock *req,
1443 struct dst_entry *dst)
1445 struct inet_request_sock *ireq;
1446 struct inet_sock *newinet;
1447 struct tcp_sock *newtp;
1449 #ifdef CONFIG_TCP_MD5SIG
1450 struct tcp_md5sig_key *key;
1452 struct ip_options_rcu *inet_opt;
1454 if (sk_acceptq_is_full(sk))
1457 newsk = tcp_create_openreq_child(sk, req, skb);
1461 newsk->sk_gso_type = SKB_GSO_TCPV4;
1463 newtp = tcp_sk(newsk);
1464 newinet = inet_sk(newsk);
1465 ireq = inet_rsk(req);
1466 newinet->inet_daddr = ireq->rmt_addr;
1467 newinet->inet_rcv_saddr = ireq->loc_addr;
1468 newinet->inet_saddr = ireq->loc_addr;
1469 inet_opt = ireq->opt;
1470 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1472 newinet->mc_index = inet_iif(skb);
1473 newinet->mc_ttl = ip_hdr(skb)->ttl;
1474 newinet->rcv_tos = ip_hdr(skb)->tos;
1475 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1477 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1478 newinet->inet_id = newtp->write_seq ^ jiffies;
1481 dst = inet_csk_route_child_sock(sk, newsk, req);
1485 /* syncookie case : see end of cookie_v4_check() */
1487 sk_setup_caps(newsk, dst);
1489 tcp_mtup_init(newsk);
1490 tcp_sync_mss(newsk, dst_mtu(dst));
1491 newtp->advmss = dst_metric_advmss(dst);
1492 if (tcp_sk(sk)->rx_opt.user_mss &&
1493 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1494 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1496 tcp_initialize_rcv_mss(newsk);
1497 if (tcp_rsk(req)->snt_synack)
1498 tcp_valid_rtt_meas(newsk,
1499 tcp_time_stamp - tcp_rsk(req)->snt_synack);
1500 newtp->total_retrans = req->retrans;
1502 #ifdef CONFIG_TCP_MD5SIG
1503 /* Copy over the MD5 key from the original socket */
1504 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1508 * We're using one, so create a matching key
1509 * on the newsk structure. If we fail to get
1510 * memory, then we end up not copying the key
1513 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1514 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1515 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1519 if (__inet_inherit_port(sk, newsk) < 0)
1521 __inet_hash_nolisten(newsk, NULL);
1526 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1530 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1533 tcp_clear_xmit_timers(newsk);
1534 tcp_cleanup_congestion_control(newsk);
1535 bh_unlock_sock(newsk);
1539 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1541 static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1543 struct tcphdr *th = tcp_hdr(skb);
1544 const struct iphdr *iph = ip_hdr(skb);
1546 struct request_sock **prev;
1547 /* Find possible connection requests. */
1548 struct request_sock *req = inet_csk_search_req(sk, &prev, th->source,
1549 iph->saddr, iph->daddr);
1551 return tcp_check_req(sk, skb, req, prev);
1553 nsk = inet_lookup_established(sock_net(sk), &tcp_hashinfo, iph->saddr,
1554 th->source, iph->daddr, th->dest, inet_iif(skb));
1557 if (nsk->sk_state != TCP_TIME_WAIT) {
1561 inet_twsk_put(inet_twsk(nsk));
1565 #ifdef CONFIG_SYN_COOKIES
1567 sk = cookie_v4_check(sk, skb, &(IPCB(skb)->opt));
1572 static __sum16 tcp_v4_checksum_init(struct sk_buff *skb)
1574 const struct iphdr *iph = ip_hdr(skb);
1576 if (skb->ip_summed == CHECKSUM_COMPLETE) {
1577 if (!tcp_v4_check(skb->len, iph->saddr,
1578 iph->daddr, skb->csum)) {
1579 skb->ip_summed = CHECKSUM_UNNECESSARY;
1584 skb->csum = csum_tcpudp_nofold(iph->saddr, iph->daddr,
1585 skb->len, IPPROTO_TCP, 0);
1587 if (skb->len <= 76) {
1588 return __skb_checksum_complete(skb);
1594 /* The socket must have it's spinlock held when we get
1597 * We have a potential double-lock case here, so even when
1598 * doing backlog processing we use the BH locking scheme.
1599 * This is because we cannot sleep with the original spinlock
1602 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1605 #ifdef CONFIG_TCP_MD5SIG
1607 * We really want to reject the packet as early as possible
1609 * o We're expecting an MD5'd packet and this is no MD5 tcp option
1610 * o There is an MD5 option and we're not expecting one
1612 if (tcp_v4_inbound_md5_hash(sk, skb))
1616 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1617 sock_rps_save_rxhash(sk, skb);
1618 if (tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len)) {
1625 if (skb->len < tcp_hdrlen(skb) || tcp_checksum_complete(skb))
1628 if (sk->sk_state == TCP_LISTEN) {
1629 struct sock *nsk = tcp_v4_hnd_req(sk, skb);
1634 sock_rps_save_rxhash(nsk, skb);
1635 if (tcp_child_process(sk, nsk, skb)) {
1642 sock_rps_save_rxhash(sk, skb);
1644 if (tcp_rcv_state_process(sk, skb, tcp_hdr(skb), skb->len)) {
1651 tcp_v4_send_reset(rsk, skb);
1654 /* Be careful here. If this function gets more complicated and
1655 * gcc suffers from register pressure on the x86, sk (in %ebx)
1656 * might be destroyed here. This current version compiles correctly,
1657 * but you have been warned.
1662 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1665 EXPORT_SYMBOL(tcp_v4_do_rcv);
1667 void tcp_v4_early_demux(struct sk_buff *skb)
1669 struct net *net = dev_net(skb->dev);
1670 const struct iphdr *iph;
1671 const struct tcphdr *th;
1672 struct net_device *dev;
1675 if (skb->pkt_type != PACKET_HOST)
1678 if (!pskb_may_pull(skb, ip_hdrlen(skb) + sizeof(struct tcphdr)))
1682 th = (struct tcphdr *) ((char *)iph + ip_hdrlen(skb));
1684 if (th->doff < sizeof(struct tcphdr) / 4)
1687 if (!pskb_may_pull(skb, ip_hdrlen(skb) + th->doff * 4))
1691 sk = __inet_lookup_established(net, &tcp_hashinfo,
1692 iph->saddr, th->source,
1693 iph->daddr, ntohs(th->dest),
1697 skb->destructor = sock_edemux;
1698 if (sk->sk_state != TCP_TIME_WAIT) {
1699 struct dst_entry *dst = sk->sk_rx_dst;
1701 dst = dst_check(dst, 0);
1703 struct rtable *rt = (struct rtable *) dst;
1705 if (rt->rt_iif == dev->ifindex)
1706 skb_dst_set_noref(skb, dst);
1716 int tcp_v4_rcv(struct sk_buff *skb)
1718 const struct iphdr *iph;
1719 const struct tcphdr *th;
1722 struct net *net = dev_net(skb->dev);
1724 if (skb->pkt_type != PACKET_HOST)
1727 /* Count it even if it's bad */
1728 TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1730 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1735 if (th->doff < sizeof(struct tcphdr) / 4)
1737 if (!pskb_may_pull(skb, th->doff * 4))
1740 /* An explanation is required here, I think.
1741 * Packet length and doff are validated by header prediction,
1742 * provided case of th->doff==0 is eliminated.
1743 * So, we defer the checks. */
1744 if (!skb_csum_unnecessary(skb) && tcp_v4_checksum_init(skb))
1749 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1750 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1751 skb->len - th->doff * 4);
1752 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1753 TCP_SKB_CB(skb)->when = 0;
1754 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1755 TCP_SKB_CB(skb)->sacked = 0;
1757 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1762 if (sk->sk_state == TCP_TIME_WAIT)
1765 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1766 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1767 goto discard_and_relse;
1770 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1771 goto discard_and_relse;
1774 if (sk_filter(sk, skb))
1775 goto discard_and_relse;
1779 bh_lock_sock_nested(sk);
1781 if (!sock_owned_by_user(sk)) {
1782 #ifdef CONFIG_NET_DMA
1783 struct tcp_sock *tp = tcp_sk(sk);
1784 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1785 tp->ucopy.dma_chan = net_dma_find_channel();
1786 if (tp->ucopy.dma_chan)
1787 ret = tcp_v4_do_rcv(sk, skb);
1791 if (!tcp_prequeue(sk, skb))
1792 ret = tcp_v4_do_rcv(sk, skb);
1794 } else if (unlikely(sk_add_backlog(sk, skb,
1795 sk->sk_rcvbuf + sk->sk_sndbuf))) {
1797 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1798 goto discard_and_relse;
1807 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1810 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1812 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1814 tcp_v4_send_reset(NULL, skb);
1818 /* Discard frame. */
1827 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1828 inet_twsk_put(inet_twsk(sk));
1832 if (skb->len < (th->doff << 2) || tcp_checksum_complete(skb)) {
1833 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1834 inet_twsk_put(inet_twsk(sk));
1837 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1839 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1841 iph->daddr, th->dest,
1844 inet_twsk_deschedule(inet_twsk(sk), &tcp_death_row);
1845 inet_twsk_put(inet_twsk(sk));
1849 /* Fall through to ACK */
1852 tcp_v4_timewait_ack(sk, skb);
1856 case TCP_TW_SUCCESS:;
1861 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1862 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1863 .twsk_unique = tcp_twsk_unique,
1864 .twsk_destructor= tcp_twsk_destructor,
1867 const struct inet_connection_sock_af_ops ipv4_specific = {
1868 .queue_xmit = ip_queue_xmit,
1869 .send_check = tcp_v4_send_check,
1870 .rebuild_header = inet_sk_rebuild_header,
1871 .conn_request = tcp_v4_conn_request,
1872 .syn_recv_sock = tcp_v4_syn_recv_sock,
1873 .net_header_len = sizeof(struct iphdr),
1874 .setsockopt = ip_setsockopt,
1875 .getsockopt = ip_getsockopt,
1876 .addr2sockaddr = inet_csk_addr2sockaddr,
1877 .sockaddr_len = sizeof(struct sockaddr_in),
1878 .bind_conflict = inet_csk_bind_conflict,
1879 #ifdef CONFIG_COMPAT
1880 .compat_setsockopt = compat_ip_setsockopt,
1881 .compat_getsockopt = compat_ip_getsockopt,
1884 EXPORT_SYMBOL(ipv4_specific);
1886 #ifdef CONFIG_TCP_MD5SIG
1887 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1888 .md5_lookup = tcp_v4_md5_lookup,
1889 .calc_md5_hash = tcp_v4_md5_hash_skb,
1890 .md5_parse = tcp_v4_parse_md5_keys,
1894 /* NOTE: A lot of things set to zero explicitly by call to
1895 * sk_alloc() so need not be done here.
1897 static int tcp_v4_init_sock(struct sock *sk)
1899 struct inet_connection_sock *icsk = inet_csk(sk);
1903 icsk->icsk_af_ops = &ipv4_specific;
1905 #ifdef CONFIG_TCP_MD5SIG
1906 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1912 void tcp_v4_destroy_sock(struct sock *sk)
1914 struct tcp_sock *tp = tcp_sk(sk);
1916 tcp_clear_xmit_timers(sk);
1918 tcp_cleanup_congestion_control(sk);
1920 /* Cleanup up the write buffer. */
1921 tcp_write_queue_purge(sk);
1923 /* Cleans up our, hopefully empty, out_of_order_queue. */
1924 __skb_queue_purge(&tp->out_of_order_queue);
1926 #ifdef CONFIG_TCP_MD5SIG
1927 /* Clean up the MD5 key list, if any */
1928 if (tp->md5sig_info) {
1929 tcp_clear_md5_list(sk);
1930 kfree_rcu(tp->md5sig_info, rcu);
1931 tp->md5sig_info = NULL;
1935 #ifdef CONFIG_NET_DMA
1936 /* Cleans up our sk_async_wait_queue */
1937 __skb_queue_purge(&sk->sk_async_wait_queue);
1940 /* Clean prequeue, it must be empty really */
1941 __skb_queue_purge(&tp->ucopy.prequeue);
1943 /* Clean up a referenced TCP bind bucket. */
1944 if (inet_csk(sk)->icsk_bind_hash)
1948 * If sendmsg cached page exists, toss it.
1950 if (sk->sk_sndmsg_page) {
1951 __free_page(sk->sk_sndmsg_page);
1952 sk->sk_sndmsg_page = NULL;
1955 /* TCP Cookie Transactions */
1956 if (tp->cookie_values != NULL) {
1957 kref_put(&tp->cookie_values->kref,
1958 tcp_cookie_values_release);
1959 tp->cookie_values = NULL;
1962 sk_sockets_allocated_dec(sk);
1963 sock_release_memcg(sk);
1965 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1967 #ifdef CONFIG_PROC_FS
1968 /* Proc filesystem TCP sock list dumping. */
1970 static inline struct inet_timewait_sock *tw_head(struct hlist_nulls_head *head)
1972 return hlist_nulls_empty(head) ? NULL :
1973 list_entry(head->first, struct inet_timewait_sock, tw_node);
1976 static inline struct inet_timewait_sock *tw_next(struct inet_timewait_sock *tw)
1978 return !is_a_nulls(tw->tw_node.next) ?
1979 hlist_nulls_entry(tw->tw_node.next, typeof(*tw), tw_node) : NULL;
1983 * Get next listener socket follow cur. If cur is NULL, get first socket
1984 * starting from bucket given in st->bucket; when st->bucket is zero the
1985 * very first socket in the hash table is returned.
1987 static void *listening_get_next(struct seq_file *seq, void *cur)
1989 struct inet_connection_sock *icsk;
1990 struct hlist_nulls_node *node;
1991 struct sock *sk = cur;
1992 struct inet_listen_hashbucket *ilb;
1993 struct tcp_iter_state *st = seq->private;
1994 struct net *net = seq_file_net(seq);
1997 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1998 spin_lock_bh(&ilb->lock);
1999 sk = sk_nulls_head(&ilb->head);
2003 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2007 if (st->state == TCP_SEQ_STATE_OPENREQ) {
2008 struct request_sock *req = cur;
2010 icsk = inet_csk(st->syn_wait_sk);
2014 if (req->rsk_ops->family == st->family) {
2020 if (++st->sbucket >= icsk->icsk_accept_queue.listen_opt->nr_table_entries)
2023 req = icsk->icsk_accept_queue.listen_opt->syn_table[st->sbucket];
2025 sk = sk_nulls_next(st->syn_wait_sk);
2026 st->state = TCP_SEQ_STATE_LISTENING;
2027 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2029 icsk = inet_csk(sk);
2030 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2031 if (reqsk_queue_len(&icsk->icsk_accept_queue))
2033 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2034 sk = sk_nulls_next(sk);
2037 sk_nulls_for_each_from(sk, node) {
2038 if (!net_eq(sock_net(sk), net))
2040 if (sk->sk_family == st->family) {
2044 icsk = inet_csk(sk);
2045 read_lock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2046 if (reqsk_queue_len(&icsk->icsk_accept_queue)) {
2048 st->uid = sock_i_uid(sk);
2049 st->syn_wait_sk = sk;
2050 st->state = TCP_SEQ_STATE_OPENREQ;
2054 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2056 spin_unlock_bh(&ilb->lock);
2058 if (++st->bucket < INET_LHTABLE_SIZE) {
2059 ilb = &tcp_hashinfo.listening_hash[st->bucket];
2060 spin_lock_bh(&ilb->lock);
2061 sk = sk_nulls_head(&ilb->head);
2069 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
2071 struct tcp_iter_state *st = seq->private;
2076 rc = listening_get_next(seq, NULL);
2078 while (rc && *pos) {
2079 rc = listening_get_next(seq, rc);
2085 static inline bool empty_bucket(struct tcp_iter_state *st)
2087 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain) &&
2088 hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].twchain);
2092 * Get first established socket starting from bucket given in st->bucket.
2093 * If st->bucket is zero, the very first socket in the hash is returned.
2095 static void *established_get_first(struct seq_file *seq)
2097 struct tcp_iter_state *st = seq->private;
2098 struct net *net = seq_file_net(seq);
2102 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
2104 struct hlist_nulls_node *node;
2105 struct inet_timewait_sock *tw;
2106 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
2108 /* Lockless fast path for the common case of empty buckets */
2109 if (empty_bucket(st))
2113 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
2114 if (sk->sk_family != st->family ||
2115 !net_eq(sock_net(sk), net)) {
2121 st->state = TCP_SEQ_STATE_TIME_WAIT;
2122 inet_twsk_for_each(tw, node,
2123 &tcp_hashinfo.ehash[st->bucket].twchain) {
2124 if (tw->tw_family != st->family ||
2125 !net_eq(twsk_net(tw), net)) {
2131 spin_unlock_bh(lock);
2132 st->state = TCP_SEQ_STATE_ESTABLISHED;
2138 static void *established_get_next(struct seq_file *seq, void *cur)
2140 struct sock *sk = cur;
2141 struct inet_timewait_sock *tw;
2142 struct hlist_nulls_node *node;
2143 struct tcp_iter_state *st = seq->private;
2144 struct net *net = seq_file_net(seq);
2149 if (st->state == TCP_SEQ_STATE_TIME_WAIT) {
2153 while (tw && (tw->tw_family != st->family || !net_eq(twsk_net(tw), net))) {
2160 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2161 st->state = TCP_SEQ_STATE_ESTABLISHED;
2163 /* Look for next non empty bucket */
2165 while (++st->bucket <= tcp_hashinfo.ehash_mask &&
2168 if (st->bucket > tcp_hashinfo.ehash_mask)
2171 spin_lock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2172 sk = sk_nulls_head(&tcp_hashinfo.ehash[st->bucket].chain);
2174 sk = sk_nulls_next(sk);
2176 sk_nulls_for_each_from(sk, node) {
2177 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
2181 st->state = TCP_SEQ_STATE_TIME_WAIT;
2182 tw = tw_head(&tcp_hashinfo.ehash[st->bucket].twchain);
2190 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2192 struct tcp_iter_state *st = seq->private;
2196 rc = established_get_first(seq);
2199 rc = established_get_next(seq, rc);
2205 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2208 struct tcp_iter_state *st = seq->private;
2210 st->state = TCP_SEQ_STATE_LISTENING;
2211 rc = listening_get_idx(seq, &pos);
2214 st->state = TCP_SEQ_STATE_ESTABLISHED;
2215 rc = established_get_idx(seq, pos);
2221 static void *tcp_seek_last_pos(struct seq_file *seq)
2223 struct tcp_iter_state *st = seq->private;
2224 int offset = st->offset;
2225 int orig_num = st->num;
2228 switch (st->state) {
2229 case TCP_SEQ_STATE_OPENREQ:
2230 case TCP_SEQ_STATE_LISTENING:
2231 if (st->bucket >= INET_LHTABLE_SIZE)
2233 st->state = TCP_SEQ_STATE_LISTENING;
2234 rc = listening_get_next(seq, NULL);
2235 while (offset-- && rc)
2236 rc = listening_get_next(seq, rc);
2241 case TCP_SEQ_STATE_ESTABLISHED:
2242 case TCP_SEQ_STATE_TIME_WAIT:
2243 st->state = TCP_SEQ_STATE_ESTABLISHED;
2244 if (st->bucket > tcp_hashinfo.ehash_mask)
2246 rc = established_get_first(seq);
2247 while (offset-- && rc)
2248 rc = established_get_next(seq, rc);
2256 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2258 struct tcp_iter_state *st = seq->private;
2261 if (*pos && *pos == st->last_pos) {
2262 rc = tcp_seek_last_pos(seq);
2267 st->state = TCP_SEQ_STATE_LISTENING;
2271 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2274 st->last_pos = *pos;
2278 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2280 struct tcp_iter_state *st = seq->private;
2283 if (v == SEQ_START_TOKEN) {
2284 rc = tcp_get_idx(seq, 0);
2288 switch (st->state) {
2289 case TCP_SEQ_STATE_OPENREQ:
2290 case TCP_SEQ_STATE_LISTENING:
2291 rc = listening_get_next(seq, v);
2293 st->state = TCP_SEQ_STATE_ESTABLISHED;
2296 rc = established_get_first(seq);
2299 case TCP_SEQ_STATE_ESTABLISHED:
2300 case TCP_SEQ_STATE_TIME_WAIT:
2301 rc = established_get_next(seq, v);
2306 st->last_pos = *pos;
2310 static void tcp_seq_stop(struct seq_file *seq, void *v)
2312 struct tcp_iter_state *st = seq->private;
2314 switch (st->state) {
2315 case TCP_SEQ_STATE_OPENREQ:
2317 struct inet_connection_sock *icsk = inet_csk(st->syn_wait_sk);
2318 read_unlock_bh(&icsk->icsk_accept_queue.syn_wait_lock);
2320 case TCP_SEQ_STATE_LISTENING:
2321 if (v != SEQ_START_TOKEN)
2322 spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2324 case TCP_SEQ_STATE_TIME_WAIT:
2325 case TCP_SEQ_STATE_ESTABLISHED:
2327 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2332 int tcp_seq_open(struct inode *inode, struct file *file)
2334 struct tcp_seq_afinfo *afinfo = PDE(inode)->data;
2335 struct tcp_iter_state *s;
2338 err = seq_open_net(inode, file, &afinfo->seq_ops,
2339 sizeof(struct tcp_iter_state));
2343 s = ((struct seq_file *)file->private_data)->private;
2344 s->family = afinfo->family;
2348 EXPORT_SYMBOL(tcp_seq_open);
2350 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2353 struct proc_dir_entry *p;
2355 afinfo->seq_ops.start = tcp_seq_start;
2356 afinfo->seq_ops.next = tcp_seq_next;
2357 afinfo->seq_ops.stop = tcp_seq_stop;
2359 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2360 afinfo->seq_fops, afinfo);
2365 EXPORT_SYMBOL(tcp_proc_register);
2367 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2369 proc_net_remove(net, afinfo->name);
2371 EXPORT_SYMBOL(tcp_proc_unregister);
2373 static void get_openreq4(const struct sock *sk, const struct request_sock *req,
2374 struct seq_file *f, int i, int uid, int *len)
2376 const struct inet_request_sock *ireq = inet_rsk(req);
2377 int ttd = req->expires - jiffies;
2379 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2380 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %u %d %pK%n",
2383 ntohs(inet_sk(sk)->inet_sport),
2385 ntohs(ireq->rmt_port),
2387 0, 0, /* could print option size, but that is af dependent. */
2388 1, /* timers active (only the expire timer) */
2389 jiffies_to_clock_t(ttd),
2392 0, /* non standard timer */
2393 0, /* open_requests have no inode */
2394 atomic_read(&sk->sk_refcnt),
2399 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i, int *len)
2402 unsigned long timer_expires;
2403 const struct tcp_sock *tp = tcp_sk(sk);
2404 const struct inet_connection_sock *icsk = inet_csk(sk);
2405 const struct inet_sock *inet = inet_sk(sk);
2406 __be32 dest = inet->inet_daddr;
2407 __be32 src = inet->inet_rcv_saddr;
2408 __u16 destp = ntohs(inet->inet_dport);
2409 __u16 srcp = ntohs(inet->inet_sport);
2412 if (icsk->icsk_pending == ICSK_TIME_RETRANS) {
2414 timer_expires = icsk->icsk_timeout;
2415 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2417 timer_expires = icsk->icsk_timeout;
2418 } else if (timer_pending(&sk->sk_timer)) {
2420 timer_expires = sk->sk_timer.expires;
2423 timer_expires = jiffies;
2426 if (sk->sk_state == TCP_LISTEN)
2427 rx_queue = sk->sk_ack_backlog;
2430 * because we dont lock socket, we might find a transient negative value
2432 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2434 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2435 "%08X %5d %8d %lu %d %pK %lu %lu %u %u %d%n",
2436 i, src, srcp, dest, destp, sk->sk_state,
2437 tp->write_seq - tp->snd_una,
2440 jiffies_to_clock_t(timer_expires - jiffies),
2441 icsk->icsk_retransmits,
2443 icsk->icsk_probes_out,
2445 atomic_read(&sk->sk_refcnt), sk,
2446 jiffies_to_clock_t(icsk->icsk_rto),
2447 jiffies_to_clock_t(icsk->icsk_ack.ato),
2448 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2450 tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh,
2454 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2455 struct seq_file *f, int i, int *len)
2459 int ttd = tw->tw_ttd - jiffies;
2464 dest = tw->tw_daddr;
2465 src = tw->tw_rcv_saddr;
2466 destp = ntohs(tw->tw_dport);
2467 srcp = ntohs(tw->tw_sport);
2469 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2470 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK%n",
2471 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2472 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
2473 atomic_read(&tw->tw_refcnt), tw, len);
2478 static int tcp4_seq_show(struct seq_file *seq, void *v)
2480 struct tcp_iter_state *st;
2483 if (v == SEQ_START_TOKEN) {
2484 seq_printf(seq, "%-*s\n", TMPSZ - 1,
2485 " sl local_address rem_address st tx_queue "
2486 "rx_queue tr tm->when retrnsmt uid timeout "
2492 switch (st->state) {
2493 case TCP_SEQ_STATE_LISTENING:
2494 case TCP_SEQ_STATE_ESTABLISHED:
2495 get_tcp4_sock(v, seq, st->num, &len);
2497 case TCP_SEQ_STATE_OPENREQ:
2498 get_openreq4(st->syn_wait_sk, v, seq, st->num, st->uid, &len);
2500 case TCP_SEQ_STATE_TIME_WAIT:
2501 get_timewait4_sock(v, seq, st->num, &len);
2504 seq_printf(seq, "%*s\n", TMPSZ - 1 - len, "");
2509 static const struct file_operations tcp_afinfo_seq_fops = {
2510 .owner = THIS_MODULE,
2511 .open = tcp_seq_open,
2513 .llseek = seq_lseek,
2514 .release = seq_release_net
2517 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2520 .seq_fops = &tcp_afinfo_seq_fops,
2522 .show = tcp4_seq_show,
2526 static int __net_init tcp4_proc_init_net(struct net *net)
2528 return tcp_proc_register(net, &tcp4_seq_afinfo);
2531 static void __net_exit tcp4_proc_exit_net(struct net *net)
2533 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2536 static struct pernet_operations tcp4_net_ops = {
2537 .init = tcp4_proc_init_net,
2538 .exit = tcp4_proc_exit_net,
2541 int __init tcp4_proc_init(void)
2543 return register_pernet_subsys(&tcp4_net_ops);
2546 void tcp4_proc_exit(void)
2548 unregister_pernet_subsys(&tcp4_net_ops);
2550 #endif /* CONFIG_PROC_FS */
2552 struct sk_buff **tcp4_gro_receive(struct sk_buff **head, struct sk_buff *skb)
2554 const struct iphdr *iph = skb_gro_network_header(skb);
2556 switch (skb->ip_summed) {
2557 case CHECKSUM_COMPLETE:
2558 if (!tcp_v4_check(skb_gro_len(skb), iph->saddr, iph->daddr,
2560 skb->ip_summed = CHECKSUM_UNNECESSARY;
2566 NAPI_GRO_CB(skb)->flush = 1;
2570 return tcp_gro_receive(head, skb);
2573 int tcp4_gro_complete(struct sk_buff *skb)
2575 const struct iphdr *iph = ip_hdr(skb);
2576 struct tcphdr *th = tcp_hdr(skb);
2578 th->check = ~tcp_v4_check(skb->len - skb_transport_offset(skb),
2579 iph->saddr, iph->daddr, 0);
2580 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
2582 return tcp_gro_complete(skb);
2585 struct proto tcp_prot = {
2587 .owner = THIS_MODULE,
2589 .connect = tcp_v4_connect,
2590 .disconnect = tcp_disconnect,
2591 .accept = inet_csk_accept,
2593 .init = tcp_v4_init_sock,
2594 .destroy = tcp_v4_destroy_sock,
2595 .shutdown = tcp_shutdown,
2596 .setsockopt = tcp_setsockopt,
2597 .getsockopt = tcp_getsockopt,
2598 .recvmsg = tcp_recvmsg,
2599 .sendmsg = tcp_sendmsg,
2600 .sendpage = tcp_sendpage,
2601 .backlog_rcv = tcp_v4_do_rcv,
2602 .release_cb = tcp_release_cb,
2604 .unhash = inet_unhash,
2605 .get_port = inet_csk_get_port,
2606 .enter_memory_pressure = tcp_enter_memory_pressure,
2607 .sockets_allocated = &tcp_sockets_allocated,
2608 .orphan_count = &tcp_orphan_count,
2609 .memory_allocated = &tcp_memory_allocated,
2610 .memory_pressure = &tcp_memory_pressure,
2611 .sysctl_wmem = sysctl_tcp_wmem,
2612 .sysctl_rmem = sysctl_tcp_rmem,
2613 .max_header = MAX_TCP_HEADER,
2614 .obj_size = sizeof(struct tcp_sock),
2615 .slab_flags = SLAB_DESTROY_BY_RCU,
2616 .twsk_prot = &tcp_timewait_sock_ops,
2617 .rsk_prot = &tcp_request_sock_ops,
2618 .h.hashinfo = &tcp_hashinfo,
2619 .no_autobind = true,
2620 #ifdef CONFIG_COMPAT
2621 .compat_setsockopt = compat_tcp_setsockopt,
2622 .compat_getsockopt = compat_tcp_getsockopt,
2624 #ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
2625 .init_cgroup = tcp_init_cgroup,
2626 .destroy_cgroup = tcp_destroy_cgroup,
2627 .proto_cgroup = tcp_proto_cgroup,
2630 EXPORT_SYMBOL(tcp_prot);
2632 static int __net_init tcp_sk_init(struct net *net)
2634 return inet_ctl_sock_create(&net->ipv4.tcp_sock,
2635 PF_INET, SOCK_RAW, IPPROTO_TCP, net);
2638 static void __net_exit tcp_sk_exit(struct net *net)
2640 inet_ctl_sock_destroy(net->ipv4.tcp_sock);
2643 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2645 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2648 static struct pernet_operations __net_initdata tcp_sk_ops = {
2649 .init = tcp_sk_init,
2650 .exit = tcp_sk_exit,
2651 .exit_batch = tcp_sk_exit_batch,
2654 void __init tcp_v4_init(void)
2656 inet_hashinfo_init(&tcp_hashinfo);
2657 if (register_pernet_subsys(&tcp_sk_ops))
2658 panic("Failed to create the TCP control socket.\n");