2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * IPv4 specific functions
13 * linux/ipv4/tcp_input.c
14 * linux/ipv4/tcp_output.c
16 * See tcp.c for author information
18 * This program is free software; you can redistribute it and/or
19 * modify it under the terms of the GNU General Public License
20 * as published by the Free Software Foundation; either version
21 * 2 of the License, or (at your option) any later version.
26 * David S. Miller : New socket lookup architecture.
27 * This code is dedicated to John Dyson.
28 * David S. Miller : Change semantics of established hash,
29 * half is devoted to TIME_WAIT sockets
30 * and the rest go in the other half.
31 * Andi Kleen : Add support for syncookies and fixed
32 * some bugs: ip options weren't passed to
33 * the TCP layer, missed a check for an
35 * Andi Kleen : Implemented fast path mtu discovery.
36 * Fixed many serious bugs in the
37 * request_sock handling and moved
38 * most of it into the af independent code.
39 * Added tail drop and some other bugfixes.
40 * Added new listen semantics.
41 * Mike McLagan : Routing by source
42 * Juan Jose Ciarlante: ip_dynaddr bits
43 * Andi Kleen: various fixes.
44 * Vitaly E. Lavrov : Transparent proxy revived after year
46 * Andi Kleen : Fix new listen.
47 * Andi Kleen : Fix accept error reporting.
48 * YOSHIFUJI Hideaki @USAGI and: Support IPV6_V6ONLY socket option, which
49 * Alexey Kuznetsov allow both IPv4 and IPv6 sockets to bind
50 * a single port at the same time.
53 #define pr_fmt(fmt) "TCP: " fmt
55 #include <linux/bottom_half.h>
56 #include <linux/types.h>
57 #include <linux/fcntl.h>
58 #include <linux/module.h>
59 #include <linux/random.h>
60 #include <linux/cache.h>
61 #include <linux/jhash.h>
62 #include <linux/init.h>
63 #include <linux/times.h>
64 #include <linux/slab.h>
66 #include <net/net_namespace.h>
68 #include <net/inet_hashtables.h>
70 #include <net/transp_v6.h>
72 #include <net/inet_common.h>
73 #include <net/timewait_sock.h>
75 #include <net/secure_seq.h>
76 #include <net/busy_poll.h>
78 #include <linux/inet.h>
79 #include <linux/ipv6.h>
80 #include <linux/stddef.h>
81 #include <linux/proc_fs.h>
82 #include <linux/seq_file.h>
84 #include <crypto/hash.h>
85 #include <linux/scatterlist.h>
87 int sysctl_tcp_tw_reuse __read_mostly;
88 int sysctl_tcp_low_latency __read_mostly;
90 #ifdef CONFIG_TCP_MD5SIG
91 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
92 __be32 daddr, __be32 saddr, const struct tcphdr *th);
95 struct inet_hashinfo tcp_hashinfo;
96 EXPORT_SYMBOL(tcp_hashinfo);
98 static __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
100 return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103 tcp_hdr(skb)->source);
106 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
108 const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
109 struct tcp_sock *tp = tcp_sk(sk);
111 /* With PAWS, it is safe from the viewpoint
112 of data integrity. Even without PAWS it is safe provided sequence
113 spaces do not overlap i.e. at data rates <= 80Mbit/sec.
115 Actually, the idea is close to VJ's one, only timestamp cache is
116 held not per host, but per port pair and TW bucket is used as state
119 If TW bucket has been already destroyed we fall back to VJ's scheme
120 and use initial timestamp retrieved from peer table.
122 if (tcptw->tw_ts_recent_stamp &&
123 (!twp || (sysctl_tcp_tw_reuse &&
124 get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
125 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
126 if (tp->write_seq == 0)
128 tp->rx_opt.ts_recent = tcptw->tw_ts_recent;
129 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
136 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
138 /* This will initiate an outgoing connection. */
139 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
141 struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
142 struct inet_sock *inet = inet_sk(sk);
143 struct tcp_sock *tp = tcp_sk(sk);
144 __be16 orig_sport, orig_dport;
145 __be32 daddr, nexthop;
149 struct ip_options_rcu *inet_opt;
151 if (addr_len < sizeof(struct sockaddr_in))
154 if (usin->sin_family != AF_INET)
155 return -EAFNOSUPPORT;
157 nexthop = daddr = usin->sin_addr.s_addr;
158 inet_opt = rcu_dereference_protected(inet->inet_opt,
159 lockdep_sock_is_held(sk));
160 if (inet_opt && inet_opt->opt.srr) {
163 nexthop = inet_opt->opt.faddr;
166 orig_sport = inet->inet_sport;
167 orig_dport = usin->sin_port;
168 fl4 = &inet->cork.fl.u.ip4;
169 rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
170 RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
172 orig_sport, orig_dport, sk);
175 if (err == -ENETUNREACH)
176 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
180 if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
185 if (!inet_opt || !inet_opt->opt.srr)
188 if (!inet->inet_saddr)
189 inet->inet_saddr = fl4->saddr;
190 sk_rcv_saddr_set(sk, inet->inet_saddr);
192 if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
193 /* Reset inherited state */
194 tp->rx_opt.ts_recent = 0;
195 tp->rx_opt.ts_recent_stamp = 0;
196 if (likely(!tp->repair))
200 if (tcp_death_row.sysctl_tw_recycle &&
201 !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
202 tcp_fetch_timewait_stamp(sk, &rt->dst);
204 inet->inet_dport = usin->sin_port;
205 sk_daddr_set(sk, daddr);
207 inet_csk(sk)->icsk_ext_hdr_len = 0;
209 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
211 tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
213 /* Socket identity is still unknown (sport may be zero).
214 * However we set state to SYN-SENT and not releasing socket
215 * lock select source port, enter ourselves into the hash tables and
216 * complete initialization after this.
218 tcp_set_state(sk, TCP_SYN_SENT);
219 err = inet_hash_connect(&tcp_death_row, sk);
225 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
226 inet->inet_sport, inet->inet_dport, sk);
232 /* OK, now commit destination to socket. */
233 sk->sk_gso_type = SKB_GSO_TCPV4;
234 sk_setup_caps(sk, &rt->dst);
236 if (!tp->write_seq && likely(!tp->repair))
237 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
242 inet->inet_id = tp->write_seq ^ jiffies;
244 err = tcp_connect(sk);
254 * This unhashes the socket and releases the local port,
257 tcp_set_state(sk, TCP_CLOSE);
259 sk->sk_route_caps = 0;
260 inet->inet_dport = 0;
263 EXPORT_SYMBOL(tcp_v4_connect);
266 * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
267 * It can be called through tcp_release_cb() if socket was owned by user
268 * at the time tcp_v4_err() was called to handle ICMP message.
270 void tcp_v4_mtu_reduced(struct sock *sk)
272 struct dst_entry *dst;
273 struct inet_sock *inet = inet_sk(sk);
274 u32 mtu = tcp_sk(sk)->mtu_info;
276 dst = inet_csk_update_pmtu(sk, mtu);
280 /* Something is about to be wrong... Remember soft error
281 * for the case, if this connection will not able to recover.
283 if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
284 sk->sk_err_soft = EMSGSIZE;
288 if (inet->pmtudisc != IP_PMTUDISC_DONT &&
289 ip_sk_accept_pmtu(sk) &&
290 inet_csk(sk)->icsk_pmtu_cookie > mtu) {
291 tcp_sync_mss(sk, mtu);
293 /* Resend the TCP packet because it's
294 * clear that the old packet has been
295 * dropped. This is the new "fast" path mtu
298 tcp_simple_retransmit(sk);
299 } /* else let the usual retransmit timer handle it */
301 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
303 static void do_redirect(struct sk_buff *skb, struct sock *sk)
305 struct dst_entry *dst = __sk_dst_check(sk, 0);
308 dst->ops->redirect(dst, sk, skb);
312 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
313 void tcp_req_err(struct sock *sk, u32 seq, bool abort)
315 struct request_sock *req = inet_reqsk(sk);
316 struct net *net = sock_net(sk);
318 /* ICMPs are not backlogged, hence we cannot get
319 * an established socket here.
321 if (seq != tcp_rsk(req)->snt_isn) {
322 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
325 * Still in SYN_RECV, just remove it silently.
326 * There is no good way to pass the error to the newly
327 * created socket, and POSIX does not want network
328 * errors returned from accept().
330 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
331 tcp_listendrop(req->rsk_listener);
335 EXPORT_SYMBOL(tcp_req_err);
338 * This routine is called by the ICMP module when it gets some
339 * sort of error condition. If err < 0 then the socket should
340 * be closed and the error returned to the user. If err > 0
341 * it's just the icmp type << 8 | icmp code. After adjustment
342 * header points to the first 8 bytes of the tcp header. We need
343 * to find the appropriate port.
345 * The locking strategy used here is very "optimistic". When
346 * someone else accesses the socket the ICMP is just dropped
347 * and for some paths there is no check at all.
348 * A more general error queue to queue errors for later handling
349 * is probably better.
353 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
355 const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
356 struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
357 struct inet_connection_sock *icsk;
359 struct inet_sock *inet;
360 const int type = icmp_hdr(icmp_skb)->type;
361 const int code = icmp_hdr(icmp_skb)->code;
364 struct request_sock *fastopen;
368 struct net *net = dev_net(icmp_skb->dev);
370 sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
371 th->dest, iph->saddr, ntohs(th->source),
374 __ICMP_INC_STATS(net, ICMP_MIB_INERRORS);
377 if (sk->sk_state == TCP_TIME_WAIT) {
378 inet_twsk_put(inet_twsk(sk));
381 seq = ntohl(th->seq);
382 if (sk->sk_state == TCP_NEW_SYN_RECV)
383 return tcp_req_err(sk, seq,
384 type == ICMP_PARAMETERPROB ||
385 type == ICMP_TIME_EXCEEDED ||
386 (type == ICMP_DEST_UNREACH &&
387 (code == ICMP_NET_UNREACH ||
388 code == ICMP_HOST_UNREACH)));
391 /* If too many ICMPs get dropped on busy
392 * servers this needs to be solved differently.
393 * We do take care of PMTU discovery (RFC1191) special case :
394 * we can receive locally generated ICMP messages while socket is held.
396 if (sock_owned_by_user(sk)) {
397 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
398 __NET_INC_STATS(net, LINUX_MIB_LOCKDROPPEDICMPS);
400 if (sk->sk_state == TCP_CLOSE)
403 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
404 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
410 /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
411 fastopen = tp->fastopen_rsk;
412 snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
413 if (sk->sk_state != TCP_LISTEN &&
414 !between(seq, snd_una, tp->snd_nxt)) {
415 __NET_INC_STATS(net, LINUX_MIB_OUTOFWINDOWICMPS);
421 do_redirect(icmp_skb, sk);
423 case ICMP_SOURCE_QUENCH:
424 /* Just silently ignore these. */
426 case ICMP_PARAMETERPROB:
429 case ICMP_DEST_UNREACH:
430 if (code > NR_ICMP_UNREACH)
433 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
434 /* We are not interested in TCP_LISTEN and open_requests
435 * (SYN-ACKs send out by Linux are always <576bytes so
436 * they should go through unfragmented).
438 if (sk->sk_state == TCP_LISTEN)
442 if (!sock_owned_by_user(sk)) {
443 tcp_v4_mtu_reduced(sk);
445 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
451 err = icmp_err_convert[code].errno;
452 /* check if icmp_skb allows revert of backoff
453 * (see draft-zimmermann-tcp-lcd) */
454 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
456 if (seq != tp->snd_una || !icsk->icsk_retransmits ||
457 !icsk->icsk_backoff || fastopen)
460 if (sock_owned_by_user(sk))
463 icsk->icsk_backoff--;
464 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
466 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
468 skb = tcp_write_queue_head(sk);
471 remaining = icsk->icsk_rto -
473 tcp_time_stamp - tcp_skb_timestamp(skb));
476 inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
477 remaining, TCP_RTO_MAX);
479 /* RTO revert clocked out retransmission.
480 * Will retransmit now */
481 tcp_retransmit_timer(sk);
485 case ICMP_TIME_EXCEEDED:
492 switch (sk->sk_state) {
495 /* Only in fast or simultaneous open. If a fast open socket is
496 * is already accepted it is treated as a connected one below.
498 if (fastopen && !fastopen->sk)
501 if (!sock_owned_by_user(sk)) {
504 sk->sk_error_report(sk);
508 sk->sk_err_soft = err;
513 /* If we've already connected we will keep trying
514 * until we time out, or the user gives up.
516 * rfc1122 4.2.3.9 allows to consider as hard errors
517 * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
518 * but it is obsoleted by pmtu discovery).
520 * Note, that in modern internet, where routing is unreliable
521 * and in each dark corner broken firewalls sit, sending random
522 * errors ordered by their masters even this two messages finally lose
523 * their original sense (even Linux sends invalid PORT_UNREACHs)
525 * Now we are in compliance with RFCs.
530 if (!sock_owned_by_user(sk) && inet->recverr) {
532 sk->sk_error_report(sk);
533 } else { /* Only an error on timeout */
534 sk->sk_err_soft = err;
542 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
544 struct tcphdr *th = tcp_hdr(skb);
546 if (skb->ip_summed == CHECKSUM_PARTIAL) {
547 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
548 skb->csum_start = skb_transport_header(skb) - skb->head;
549 skb->csum_offset = offsetof(struct tcphdr, check);
551 th->check = tcp_v4_check(skb->len, saddr, daddr,
558 /* This routine computes an IPv4 TCP checksum. */
559 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
561 const struct inet_sock *inet = inet_sk(sk);
563 __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
565 EXPORT_SYMBOL(tcp_v4_send_check);
568 * This routine will send an RST to the other tcp.
570 * Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
572 * Answer: if a packet caused RST, it is not for a socket
573 * existing in our system, if it is matched to a socket,
574 * it is just duplicate segment or bug in other side's TCP.
575 * So that we build reply only basing on parameters
576 * arrived with segment.
577 * Exception: precedence violation. We do not implement it in any case.
580 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
582 const struct tcphdr *th = tcp_hdr(skb);
585 #ifdef CONFIG_TCP_MD5SIG
586 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
589 struct ip_reply_arg arg;
590 #ifdef CONFIG_TCP_MD5SIG
591 struct tcp_md5sig_key *key = NULL;
592 const __u8 *hash_location = NULL;
593 unsigned char newhash[16];
595 struct sock *sk1 = NULL;
599 /* Never send a reset in response to a reset. */
603 /* If sk not NULL, it means we did a successful lookup and incoming
604 * route had to be correct. prequeue might have dropped our dst.
606 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
609 /* Swap the send and the receive. */
610 memset(&rep, 0, sizeof(rep));
611 rep.th.dest = th->source;
612 rep.th.source = th->dest;
613 rep.th.doff = sizeof(struct tcphdr) / 4;
617 rep.th.seq = th->ack_seq;
620 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
621 skb->len - (th->doff << 2));
624 memset(&arg, 0, sizeof(arg));
625 arg.iov[0].iov_base = (unsigned char *)&rep;
626 arg.iov[0].iov_len = sizeof(rep.th);
628 net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
629 #ifdef CONFIG_TCP_MD5SIG
631 hash_location = tcp_parse_md5sig_option(th);
632 if (sk && sk_fullsock(sk)) {
633 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
634 &ip_hdr(skb)->saddr, AF_INET);
635 } else if (hash_location) {
637 * active side is lost. Try to find listening socket through
638 * source port, and then find md5 key through listening socket.
639 * we are not loose security here:
640 * Incoming packet is checked with md5 hash with finding key,
641 * no RST generated if md5 hash doesn't match.
643 sk1 = __inet_lookup_listener(net, &tcp_hashinfo, NULL, 0,
645 th->source, ip_hdr(skb)->daddr,
646 ntohs(th->source), inet_iif(skb));
647 /* don't send rst if it can't find key */
651 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
652 &ip_hdr(skb)->saddr, AF_INET);
657 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
658 if (genhash || memcmp(hash_location, newhash, 16) != 0)
664 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
666 (TCPOPT_MD5SIG << 8) |
668 /* Update length and the length the header thinks exists */
669 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
670 rep.th.doff = arg.iov[0].iov_len / 4;
672 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
673 key, ip_hdr(skb)->saddr,
674 ip_hdr(skb)->daddr, &rep.th);
677 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
678 ip_hdr(skb)->saddr, /* XXX */
679 arg.iov[0].iov_len, IPPROTO_TCP, 0);
680 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
681 arg.flags = (sk && inet_sk_transparent(sk)) ? IP_REPLY_ARG_NOSRCCHECK : 0;
683 /* When socket is gone, all binding information is lost.
684 * routing might fail in this case. No choice here, if we choose to force
685 * input interface, we will misroute in case of asymmetric route.
688 arg.bound_dev_if = sk->sk_bound_dev_if;
690 BUILD_BUG_ON(offsetof(struct sock, sk_bound_dev_if) !=
691 offsetof(struct inet_timewait_sock, tw_bound_dev_if));
693 arg.tos = ip_hdr(skb)->tos;
694 arg.uid = sock_net_uid(net, sk && sk_fullsock(sk) ? sk : NULL);
696 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
697 skb, &TCP_SKB_CB(skb)->header.h4.opt,
698 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
699 &arg, arg.iov[0].iov_len);
701 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
702 __TCP_INC_STATS(net, TCP_MIB_OUTRSTS);
705 #ifdef CONFIG_TCP_MD5SIG
711 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
712 outside socket context is ugly, certainly. What can I do?
715 static void tcp_v4_send_ack(const struct sock *sk,
716 struct sk_buff *skb, u32 seq, u32 ack,
717 u32 win, u32 tsval, u32 tsecr, int oif,
718 struct tcp_md5sig_key *key,
719 int reply_flags, u8 tos)
721 const struct tcphdr *th = tcp_hdr(skb);
724 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
725 #ifdef CONFIG_TCP_MD5SIG
726 + (TCPOLEN_MD5SIG_ALIGNED >> 2)
730 struct net *net = sock_net(sk);
731 struct ip_reply_arg arg;
733 memset(&rep.th, 0, sizeof(struct tcphdr));
734 memset(&arg, 0, sizeof(arg));
736 arg.iov[0].iov_base = (unsigned char *)&rep;
737 arg.iov[0].iov_len = sizeof(rep.th);
739 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
740 (TCPOPT_TIMESTAMP << 8) |
742 rep.opt[1] = htonl(tsval);
743 rep.opt[2] = htonl(tsecr);
744 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
747 /* Swap the send and the receive. */
748 rep.th.dest = th->source;
749 rep.th.source = th->dest;
750 rep.th.doff = arg.iov[0].iov_len / 4;
751 rep.th.seq = htonl(seq);
752 rep.th.ack_seq = htonl(ack);
754 rep.th.window = htons(win);
756 #ifdef CONFIG_TCP_MD5SIG
758 int offset = (tsecr) ? 3 : 0;
760 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
762 (TCPOPT_MD5SIG << 8) |
764 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
765 rep.th.doff = arg.iov[0].iov_len/4;
767 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
768 key, ip_hdr(skb)->saddr,
769 ip_hdr(skb)->daddr, &rep.th);
772 arg.flags = reply_flags;
773 arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
774 ip_hdr(skb)->saddr, /* XXX */
775 arg.iov[0].iov_len, IPPROTO_TCP, 0);
776 arg.csumoffset = offsetof(struct tcphdr, check) / 2;
778 arg.bound_dev_if = oif;
780 arg.uid = sock_net_uid(net, sk_fullsock(sk) ? sk : NULL);
782 ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
783 skb, &TCP_SKB_CB(skb)->header.h4.opt,
784 ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
785 &arg, arg.iov[0].iov_len);
787 __TCP_INC_STATS(net, TCP_MIB_OUTSEGS);
791 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
793 struct inet_timewait_sock *tw = inet_twsk(sk);
794 struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
796 tcp_v4_send_ack(sk, skb,
797 tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
798 tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
799 tcp_time_stamp + tcptw->tw_ts_offset,
802 tcp_twsk_md5_key(tcptw),
803 tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
810 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
811 struct request_sock *req)
813 /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
814 * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
816 u32 seq = (sk->sk_state == TCP_LISTEN) ? tcp_rsk(req)->snt_isn + 1 :
820 * The window field (SEG.WND) of every outgoing segment, with the
821 * exception of <SYN> segments, MUST be right-shifted by
822 * Rcv.Wind.Shift bits:
824 tcp_v4_send_ack(sk, skb, seq,
825 tcp_rsk(req)->rcv_nxt,
826 req->rsk_rcv_wnd >> inet_rsk(req)->rcv_wscale,
830 tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
832 inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
837 * Send a SYN-ACK after having received a SYN.
838 * This still operates on a request_sock only, not on a big
841 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
843 struct request_sock *req,
844 struct tcp_fastopen_cookie *foc,
845 enum tcp_synack_type synack_type)
847 const struct inet_request_sock *ireq = inet_rsk(req);
852 /* First, grab a route. */
853 if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
856 skb = tcp_make_synack(sk, dst, req, foc, synack_type);
859 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
861 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
864 err = net_xmit_eval(err);
871 * IPv4 request_sock destructor.
873 static void tcp_v4_reqsk_destructor(struct request_sock *req)
875 kfree(inet_rsk(req)->opt);
878 #ifdef CONFIG_TCP_MD5SIG
880 * RFC2385 MD5 checksumming requires a mapping of
881 * IP address->MD5 Key.
882 * We need to maintain these in the sk structure.
885 /* Find the Key structure for an address. */
886 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
887 const union tcp_md5_addr *addr,
890 const struct tcp_sock *tp = tcp_sk(sk);
891 struct tcp_md5sig_key *key;
892 unsigned int size = sizeof(struct in_addr);
893 const struct tcp_md5sig_info *md5sig;
895 /* caller either holds rcu_read_lock() or socket lock */
896 md5sig = rcu_dereference_check(tp->md5sig_info,
897 lockdep_sock_is_held(sk));
900 #if IS_ENABLED(CONFIG_IPV6)
901 if (family == AF_INET6)
902 size = sizeof(struct in6_addr);
904 hlist_for_each_entry_rcu(key, &md5sig->head, node) {
905 if (key->family != family)
907 if (!memcmp(&key->addr, addr, size))
912 EXPORT_SYMBOL(tcp_md5_do_lookup);
914 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
915 const struct sock *addr_sk)
917 const union tcp_md5_addr *addr;
919 addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
920 return tcp_md5_do_lookup(sk, addr, AF_INET);
922 EXPORT_SYMBOL(tcp_v4_md5_lookup);
924 /* This can be called on a newly created socket, from other files */
925 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
926 int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
928 /* Add Key to the list */
929 struct tcp_md5sig_key *key;
930 struct tcp_sock *tp = tcp_sk(sk);
931 struct tcp_md5sig_info *md5sig;
933 key = tcp_md5_do_lookup(sk, addr, family);
935 /* Pre-existing entry - just update that one. */
936 memcpy(key->key, newkey, newkeylen);
937 key->keylen = newkeylen;
941 md5sig = rcu_dereference_protected(tp->md5sig_info,
942 lockdep_sock_is_held(sk));
944 md5sig = kmalloc(sizeof(*md5sig), gfp);
948 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
949 INIT_HLIST_HEAD(&md5sig->head);
950 rcu_assign_pointer(tp->md5sig_info, md5sig);
953 key = sock_kmalloc(sk, sizeof(*key), gfp);
956 if (!tcp_alloc_md5sig_pool()) {
957 sock_kfree_s(sk, key, sizeof(*key));
961 memcpy(key->key, newkey, newkeylen);
962 key->keylen = newkeylen;
963 key->family = family;
964 memcpy(&key->addr, addr,
965 (family == AF_INET6) ? sizeof(struct in6_addr) :
966 sizeof(struct in_addr));
967 hlist_add_head_rcu(&key->node, &md5sig->head);
970 EXPORT_SYMBOL(tcp_md5_do_add);
972 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
974 struct tcp_md5sig_key *key;
976 key = tcp_md5_do_lookup(sk, addr, family);
979 hlist_del_rcu(&key->node);
980 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
984 EXPORT_SYMBOL(tcp_md5_do_del);
986 static void tcp_clear_md5_list(struct sock *sk)
988 struct tcp_sock *tp = tcp_sk(sk);
989 struct tcp_md5sig_key *key;
990 struct hlist_node *n;
991 struct tcp_md5sig_info *md5sig;
993 md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
995 hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
996 hlist_del_rcu(&key->node);
997 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
1002 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
1005 struct tcp_md5sig cmd;
1006 struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
1008 if (optlen < sizeof(cmd))
1011 if (copy_from_user(&cmd, optval, sizeof(cmd)))
1014 if (sin->sin_family != AF_INET)
1017 if (!cmd.tcpm_keylen)
1018 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1021 if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1024 return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1025 AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1029 static int tcp_v4_md5_hash_headers(struct tcp_md5sig_pool *hp,
1030 __be32 daddr, __be32 saddr,
1031 const struct tcphdr *th, int nbytes)
1033 struct tcp4_pseudohdr *bp;
1034 struct scatterlist sg;
1041 bp->protocol = IPPROTO_TCP;
1042 bp->len = cpu_to_be16(nbytes);
1044 _th = (struct tcphdr *)(bp + 1);
1045 memcpy(_th, th, sizeof(*th));
1048 sg_init_one(&sg, bp, sizeof(*bp) + sizeof(*th));
1049 ahash_request_set_crypt(hp->md5_req, &sg, NULL,
1050 sizeof(*bp) + sizeof(*th));
1051 return crypto_ahash_update(hp->md5_req);
1054 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1055 __be32 daddr, __be32 saddr, const struct tcphdr *th)
1057 struct tcp_md5sig_pool *hp;
1058 struct ahash_request *req;
1060 hp = tcp_get_md5sig_pool();
1062 goto clear_hash_noput;
1065 if (crypto_ahash_init(req))
1067 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, th->doff << 2))
1069 if (tcp_md5_hash_key(hp, key))
1071 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1072 if (crypto_ahash_final(req))
1075 tcp_put_md5sig_pool();
1079 tcp_put_md5sig_pool();
1081 memset(md5_hash, 0, 16);
1085 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1086 const struct sock *sk,
1087 const struct sk_buff *skb)
1089 struct tcp_md5sig_pool *hp;
1090 struct ahash_request *req;
1091 const struct tcphdr *th = tcp_hdr(skb);
1092 __be32 saddr, daddr;
1094 if (sk) { /* valid for establish/request sockets */
1095 saddr = sk->sk_rcv_saddr;
1096 daddr = sk->sk_daddr;
1098 const struct iphdr *iph = ip_hdr(skb);
1103 hp = tcp_get_md5sig_pool();
1105 goto clear_hash_noput;
1108 if (crypto_ahash_init(req))
1111 if (tcp_v4_md5_hash_headers(hp, daddr, saddr, th, skb->len))
1113 if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1115 if (tcp_md5_hash_key(hp, key))
1117 ahash_request_set_crypt(req, NULL, md5_hash, 0);
1118 if (crypto_ahash_final(req))
1121 tcp_put_md5sig_pool();
1125 tcp_put_md5sig_pool();
1127 memset(md5_hash, 0, 16);
1130 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1134 /* Called with rcu_read_lock() */
1135 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1136 const struct sk_buff *skb)
1138 #ifdef CONFIG_TCP_MD5SIG
1140 * This gets called for each TCP segment that arrives
1141 * so we want to be efficient.
1142 * We have 3 drop cases:
1143 * o No MD5 hash and one expected.
1144 * o MD5 hash and we're not expecting one.
1145 * o MD5 hash and its wrong.
1147 const __u8 *hash_location = NULL;
1148 struct tcp_md5sig_key *hash_expected;
1149 const struct iphdr *iph = ip_hdr(skb);
1150 const struct tcphdr *th = tcp_hdr(skb);
1152 unsigned char newhash[16];
1154 hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1156 hash_location = tcp_parse_md5sig_option(th);
1158 /* We've parsed the options - do we have a hash? */
1159 if (!hash_expected && !hash_location)
1162 if (hash_expected && !hash_location) {
1163 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1167 if (!hash_expected && hash_location) {
1168 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1172 /* Okay, so this is hash_expected and hash_location -
1173 * so we need to calculate the checksum.
1175 genhash = tcp_v4_md5_hash_skb(newhash,
1179 if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1180 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMD5FAILURE);
1181 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1182 &iph->saddr, ntohs(th->source),
1183 &iph->daddr, ntohs(th->dest),
1184 genhash ? " tcp_v4_calc_md5_hash failed"
1193 static void tcp_v4_init_req(struct request_sock *req,
1194 const struct sock *sk_listener,
1195 struct sk_buff *skb)
1197 struct inet_request_sock *ireq = inet_rsk(req);
1199 sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1200 sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1201 ireq->opt = tcp_v4_save_options(skb);
1204 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1206 const struct request_sock *req,
1209 struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1212 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1221 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1223 .obj_size = sizeof(struct tcp_request_sock),
1224 .rtx_syn_ack = tcp_rtx_synack,
1225 .send_ack = tcp_v4_reqsk_send_ack,
1226 .destructor = tcp_v4_reqsk_destructor,
1227 .send_reset = tcp_v4_send_reset,
1228 .syn_ack_timeout = tcp_syn_ack_timeout,
1231 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1232 .mss_clamp = TCP_MSS_DEFAULT,
1233 #ifdef CONFIG_TCP_MD5SIG
1234 .req_md5_lookup = tcp_v4_md5_lookup,
1235 .calc_md5_hash = tcp_v4_md5_hash_skb,
1237 .init_req = tcp_v4_init_req,
1238 #ifdef CONFIG_SYN_COOKIES
1239 .cookie_init_seq = cookie_v4_init_sequence,
1241 .route_req = tcp_v4_route_req,
1242 .init_seq = tcp_v4_init_sequence,
1243 .send_synack = tcp_v4_send_synack,
1246 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1248 /* Never answer to SYNs send to broadcast or multicast */
1249 if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1252 return tcp_conn_request(&tcp_request_sock_ops,
1253 &tcp_request_sock_ipv4_ops, sk, skb);
1259 EXPORT_SYMBOL(tcp_v4_conn_request);
1263 * The three way handshake has completed - we got a valid synack -
1264 * now create the new socket.
1266 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1267 struct request_sock *req,
1268 struct dst_entry *dst,
1269 struct request_sock *req_unhash,
1272 struct inet_request_sock *ireq;
1273 struct inet_sock *newinet;
1274 struct tcp_sock *newtp;
1276 #ifdef CONFIG_TCP_MD5SIG
1277 struct tcp_md5sig_key *key;
1279 struct ip_options_rcu *inet_opt;
1281 if (sk_acceptq_is_full(sk))
1284 newsk = tcp_create_openreq_child(sk, req, skb);
1288 newsk->sk_gso_type = SKB_GSO_TCPV4;
1289 inet_sk_rx_dst_set(newsk, skb);
1291 newtp = tcp_sk(newsk);
1292 newinet = inet_sk(newsk);
1293 ireq = inet_rsk(req);
1294 sk_daddr_set(newsk, ireq->ir_rmt_addr);
1295 sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1296 newsk->sk_bound_dev_if = ireq->ir_iif;
1297 newinet->inet_saddr = ireq->ir_loc_addr;
1298 inet_opt = ireq->opt;
1299 rcu_assign_pointer(newinet->inet_opt, inet_opt);
1301 newinet->mc_index = inet_iif(skb);
1302 newinet->mc_ttl = ip_hdr(skb)->ttl;
1303 newinet->rcv_tos = ip_hdr(skb)->tos;
1304 inet_csk(newsk)->icsk_ext_hdr_len = 0;
1306 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1307 newinet->inet_id = newtp->write_seq ^ jiffies;
1310 dst = inet_csk_route_child_sock(sk, newsk, req);
1314 /* syncookie case : see end of cookie_v4_check() */
1316 sk_setup_caps(newsk, dst);
1318 tcp_ca_openreq_child(newsk, dst);
1320 tcp_sync_mss(newsk, dst_mtu(dst));
1321 newtp->advmss = dst_metric_advmss(dst);
1322 if (tcp_sk(sk)->rx_opt.user_mss &&
1323 tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1324 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1326 tcp_initialize_rcv_mss(newsk);
1328 #ifdef CONFIG_TCP_MD5SIG
1329 /* Copy over the MD5 key from the original socket */
1330 key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1334 * We're using one, so create a matching key
1335 * on the newsk structure. If we fail to get
1336 * memory, then we end up not copying the key
1339 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1340 AF_INET, key->key, key->keylen, GFP_ATOMIC);
1341 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1345 if (__inet_inherit_port(sk, newsk) < 0)
1347 *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1349 tcp_move_syn(newtp, req);
1354 NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1361 inet_csk_prepare_forced_close(newsk);
1365 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1367 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1369 #ifdef CONFIG_SYN_COOKIES
1370 const struct tcphdr *th = tcp_hdr(skb);
1373 sk = cookie_v4_check(sk, skb);
1378 /* The socket must have it's spinlock held when we get
1379 * here, unless it is a TCP_LISTEN socket.
1381 * We have a potential double-lock case here, so even when
1382 * doing backlog processing we use the BH locking scheme.
1383 * This is because we cannot sleep with the original spinlock
1386 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1390 if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1391 struct dst_entry *dst = sk->sk_rx_dst;
1393 sock_rps_save_rxhash(sk, skb);
1394 sk_mark_napi_id(sk, skb);
1396 if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1397 !dst->ops->check(dst, 0)) {
1399 sk->sk_rx_dst = NULL;
1402 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1406 if (tcp_checksum_complete(skb))
1409 if (sk->sk_state == TCP_LISTEN) {
1410 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1415 sock_rps_save_rxhash(nsk, skb);
1416 sk_mark_napi_id(nsk, skb);
1417 if (tcp_child_process(sk, nsk, skb)) {
1424 sock_rps_save_rxhash(sk, skb);
1426 if (tcp_rcv_state_process(sk, skb)) {
1433 tcp_v4_send_reset(rsk, skb);
1436 /* Be careful here. If this function gets more complicated and
1437 * gcc suffers from register pressure on the x86, sk (in %ebx)
1438 * might be destroyed here. This current version compiles correctly,
1439 * but you have been warned.
1444 TCP_INC_STATS(sock_net(sk), TCP_MIB_CSUMERRORS);
1445 TCP_INC_STATS(sock_net(sk), TCP_MIB_INERRS);
1448 EXPORT_SYMBOL(tcp_v4_do_rcv);
1450 void tcp_v4_early_demux(struct sk_buff *skb)
1452 const struct iphdr *iph;
1453 const struct tcphdr *th;
1456 if (skb->pkt_type != PACKET_HOST)
1459 if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1465 if (th->doff < sizeof(struct tcphdr) / 4)
1468 sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1469 iph->saddr, th->source,
1470 iph->daddr, ntohs(th->dest),
1474 skb->destructor = sock_edemux;
1475 if (sk_fullsock(sk)) {
1476 struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1479 dst = dst_check(dst, 0);
1481 inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1482 skb_dst_set_noref(skb, dst);
1487 /* Packet is added to VJ-style prequeue for processing in process
1488 * context, if a reader task is waiting. Apparently, this exciting
1489 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1490 * failed somewhere. Latency? Burstiness? Well, at least now we will
1491 * see, why it failed. 8)8) --ANK
1494 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1496 struct tcp_sock *tp = tcp_sk(sk);
1498 if (sysctl_tcp_low_latency || !tp->ucopy.task)
1501 if (skb->len <= tcp_hdrlen(skb) &&
1502 skb_queue_len(&tp->ucopy.prequeue) == 0)
1505 /* Before escaping RCU protected region, we need to take care of skb
1506 * dst. Prequeue is only enabled for established sockets.
1507 * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1508 * Instead of doing full sk_rx_dst validity here, let's perform
1509 * an optimistic check.
1511 if (likely(sk->sk_rx_dst))
1514 skb_dst_force_safe(skb);
1516 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1517 tp->ucopy.memory += skb->truesize;
1518 if (skb_queue_len(&tp->ucopy.prequeue) >= 32 ||
1519 tp->ucopy.memory + atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
1520 struct sk_buff *skb1;
1522 BUG_ON(sock_owned_by_user(sk));
1523 __NET_ADD_STATS(sock_net(sk), LINUX_MIB_TCPPREQUEUEDROPPED,
1524 skb_queue_len(&tp->ucopy.prequeue));
1526 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1527 sk_backlog_rcv(sk, skb1);
1529 tp->ucopy.memory = 0;
1530 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1531 wake_up_interruptible_sync_poll(sk_sleep(sk),
1532 POLLIN | POLLRDNORM | POLLRDBAND);
1533 if (!inet_csk_ack_scheduled(sk))
1534 inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1535 (3 * tcp_rto_min(sk)) / 4,
1540 EXPORT_SYMBOL(tcp_prequeue);
1542 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb)
1544 u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
1546 /* Only socket owner can try to collapse/prune rx queues
1547 * to reduce memory overhead, so add a little headroom here.
1548 * Few sockets backlog are possibly concurrently non empty.
1552 /* In case all data was pulled from skb frags (in __pskb_pull_tail()),
1553 * we can fix skb->truesize to its real value to avoid future drops.
1554 * This is valid because skb is not yet charged to the socket.
1555 * It has been noticed pure SACK packets were sometimes dropped
1556 * (if cooked by drivers without copybreak feature).
1559 skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
1561 if (unlikely(sk_add_backlog(sk, skb, limit))) {
1563 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPBACKLOGDROP);
1568 EXPORT_SYMBOL(tcp_add_backlog);
1574 int tcp_v4_rcv(struct sk_buff *skb)
1576 struct net *net = dev_net(skb->dev);
1577 const struct iphdr *iph;
1578 const struct tcphdr *th;
1583 if (skb->pkt_type != PACKET_HOST)
1586 /* Count it even if it's bad */
1587 __TCP_INC_STATS(net, TCP_MIB_INSEGS);
1589 if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1592 th = (const struct tcphdr *)skb->data;
1594 if (unlikely(th->doff < sizeof(struct tcphdr) / 4))
1596 if (!pskb_may_pull(skb, th->doff * 4))
1599 /* An explanation is required here, I think.
1600 * Packet length and doff are validated by header prediction,
1601 * provided case of th->doff==0 is eliminated.
1602 * So, we defer the checks. */
1604 if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1607 th = (const struct tcphdr *)skb->data;
1609 /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1610 * barrier() makes sure compiler wont play fool^Waliasing games.
1612 memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1613 sizeof(struct inet_skb_parm));
1616 TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1617 TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1618 skb->len - th->doff * 4);
1619 TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1620 TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1621 TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1622 TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1623 TCP_SKB_CB(skb)->sacked = 0;
1626 sk = __inet_lookup_skb(&tcp_hashinfo, skb, __tcp_hdrlen(th), th->source,
1627 th->dest, &refcounted);
1632 if (sk->sk_state == TCP_TIME_WAIT)
1635 if (sk->sk_state == TCP_NEW_SYN_RECV) {
1636 struct request_sock *req = inet_reqsk(sk);
1639 sk = req->rsk_listener;
1640 if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
1641 sk_drops_add(sk, skb);
1645 if (unlikely(sk->sk_state != TCP_LISTEN)) {
1646 inet_csk_reqsk_queue_drop_and_put(sk, req);
1649 /* We own a reference on the listener, increase it again
1650 * as we might lose it too soon.
1654 nsk = tcp_check_req(sk, skb, req, false);
1657 goto discard_and_relse;
1661 } else if (tcp_child_process(sk, nsk, skb)) {
1662 tcp_v4_send_reset(nsk, skb);
1663 goto discard_and_relse;
1669 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1670 __NET_INC_STATS(net, LINUX_MIB_TCPMINTTLDROP);
1671 goto discard_and_relse;
1674 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1675 goto discard_and_relse;
1677 if (tcp_v4_inbound_md5_hash(sk, skb))
1678 goto discard_and_relse;
1682 if (sk_filter(sk, skb))
1683 goto discard_and_relse;
1687 if (sk->sk_state == TCP_LISTEN) {
1688 ret = tcp_v4_do_rcv(sk, skb);
1689 goto put_and_return;
1692 sk_incoming_cpu_update(sk);
1694 bh_lock_sock_nested(sk);
1695 tcp_segs_in(tcp_sk(sk), skb);
1697 if (!sock_owned_by_user(sk)) {
1698 if (!tcp_prequeue(sk, skb))
1699 ret = tcp_v4_do_rcv(sk, skb);
1700 } else if (tcp_add_backlog(sk, skb)) {
1701 goto discard_and_relse;
1712 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1715 if (tcp_checksum_complete(skb)) {
1717 __TCP_INC_STATS(net, TCP_MIB_CSUMERRORS);
1719 __TCP_INC_STATS(net, TCP_MIB_INERRS);
1721 tcp_v4_send_reset(NULL, skb);
1725 /* Discard frame. */
1730 sk_drops_add(sk, skb);
1736 if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1737 inet_twsk_put(inet_twsk(sk));
1741 if (tcp_checksum_complete(skb)) {
1742 inet_twsk_put(inet_twsk(sk));
1745 switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1747 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1750 iph->saddr, th->source,
1751 iph->daddr, th->dest,
1754 inet_twsk_deschedule_put(inet_twsk(sk));
1759 /* Fall through to ACK */
1762 tcp_v4_timewait_ack(sk, skb);
1765 tcp_v4_send_reset(sk, skb);
1766 inet_twsk_deschedule_put(inet_twsk(sk));
1768 case TCP_TW_SUCCESS:;
1773 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1774 .twsk_obj_size = sizeof(struct tcp_timewait_sock),
1775 .twsk_unique = tcp_twsk_unique,
1776 .twsk_destructor= tcp_twsk_destructor,
1779 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1781 struct dst_entry *dst = skb_dst(skb);
1783 if (dst && dst_hold_safe(dst)) {
1784 sk->sk_rx_dst = dst;
1785 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1788 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1790 const struct inet_connection_sock_af_ops ipv4_specific = {
1791 .queue_xmit = ip_queue_xmit,
1792 .send_check = tcp_v4_send_check,
1793 .rebuild_header = inet_sk_rebuild_header,
1794 .sk_rx_dst_set = inet_sk_rx_dst_set,
1795 .conn_request = tcp_v4_conn_request,
1796 .syn_recv_sock = tcp_v4_syn_recv_sock,
1797 .net_header_len = sizeof(struct iphdr),
1798 .setsockopt = ip_setsockopt,
1799 .getsockopt = ip_getsockopt,
1800 .addr2sockaddr = inet_csk_addr2sockaddr,
1801 .sockaddr_len = sizeof(struct sockaddr_in),
1802 .bind_conflict = inet_csk_bind_conflict,
1803 #ifdef CONFIG_COMPAT
1804 .compat_setsockopt = compat_ip_setsockopt,
1805 .compat_getsockopt = compat_ip_getsockopt,
1807 .mtu_reduced = tcp_v4_mtu_reduced,
1809 EXPORT_SYMBOL(ipv4_specific);
1811 #ifdef CONFIG_TCP_MD5SIG
1812 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1813 .md5_lookup = tcp_v4_md5_lookup,
1814 .calc_md5_hash = tcp_v4_md5_hash_skb,
1815 .md5_parse = tcp_v4_parse_md5_keys,
1819 /* NOTE: A lot of things set to zero explicitly by call to
1820 * sk_alloc() so need not be done here.
1822 static int tcp_v4_init_sock(struct sock *sk)
1824 struct inet_connection_sock *icsk = inet_csk(sk);
1828 icsk->icsk_af_ops = &ipv4_specific;
1830 #ifdef CONFIG_TCP_MD5SIG
1831 tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1837 void tcp_v4_destroy_sock(struct sock *sk)
1839 struct tcp_sock *tp = tcp_sk(sk);
1841 tcp_clear_xmit_timers(sk);
1843 tcp_cleanup_congestion_control(sk);
1845 /* Cleanup up the write buffer. */
1846 tcp_write_queue_purge(sk);
1848 /* Cleans up our, hopefully empty, out_of_order_queue. */
1849 skb_rbtree_purge(&tp->out_of_order_queue);
1851 #ifdef CONFIG_TCP_MD5SIG
1852 /* Clean up the MD5 key list, if any */
1853 if (tp->md5sig_info) {
1854 tcp_clear_md5_list(sk);
1855 kfree_rcu(tp->md5sig_info, rcu);
1856 tp->md5sig_info = NULL;
1860 /* Clean prequeue, it must be empty really */
1861 __skb_queue_purge(&tp->ucopy.prequeue);
1863 /* Clean up a referenced TCP bind bucket. */
1864 if (inet_csk(sk)->icsk_bind_hash)
1867 BUG_ON(tp->fastopen_rsk);
1869 /* If socket is aborted during connect operation */
1870 tcp_free_fastopen_req(tp);
1871 tcp_saved_syn_free(tp);
1874 sk_sockets_allocated_dec(sk);
1877 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1879 #ifdef CONFIG_PROC_FS
1880 /* Proc filesystem TCP sock list dumping. */
1883 * Get next listener socket follow cur. If cur is NULL, get first socket
1884 * starting from bucket given in st->bucket; when st->bucket is zero the
1885 * very first socket in the hash table is returned.
1887 static void *listening_get_next(struct seq_file *seq, void *cur)
1889 struct tcp_iter_state *st = seq->private;
1890 struct net *net = seq_file_net(seq);
1891 struct inet_listen_hashbucket *ilb;
1892 struct sock *sk = cur;
1896 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1897 spin_lock(&ilb->lock);
1898 sk = sk_head(&ilb->head);
1902 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1908 sk_for_each_from(sk) {
1909 if (!net_eq(sock_net(sk), net))
1911 if (sk->sk_family == st->family)
1914 spin_unlock(&ilb->lock);
1916 if (++st->bucket < INET_LHTABLE_SIZE)
1921 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1923 struct tcp_iter_state *st = seq->private;
1928 rc = listening_get_next(seq, NULL);
1930 while (rc && *pos) {
1931 rc = listening_get_next(seq, rc);
1937 static inline bool empty_bucket(const struct tcp_iter_state *st)
1939 return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1943 * Get first established socket starting from bucket given in st->bucket.
1944 * If st->bucket is zero, the very first socket in the hash is returned.
1946 static void *established_get_first(struct seq_file *seq)
1948 struct tcp_iter_state *st = seq->private;
1949 struct net *net = seq_file_net(seq);
1953 for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1955 struct hlist_nulls_node *node;
1956 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1958 /* Lockless fast path for the common case of empty buckets */
1959 if (empty_bucket(st))
1963 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1964 if (sk->sk_family != st->family ||
1965 !net_eq(sock_net(sk), net)) {
1971 spin_unlock_bh(lock);
1977 static void *established_get_next(struct seq_file *seq, void *cur)
1979 struct sock *sk = cur;
1980 struct hlist_nulls_node *node;
1981 struct tcp_iter_state *st = seq->private;
1982 struct net *net = seq_file_net(seq);
1987 sk = sk_nulls_next(sk);
1989 sk_nulls_for_each_from(sk, node) {
1990 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1994 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1996 return established_get_first(seq);
1999 static void *established_get_idx(struct seq_file *seq, loff_t pos)
2001 struct tcp_iter_state *st = seq->private;
2005 rc = established_get_first(seq);
2008 rc = established_get_next(seq, rc);
2014 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
2017 struct tcp_iter_state *st = seq->private;
2019 st->state = TCP_SEQ_STATE_LISTENING;
2020 rc = listening_get_idx(seq, &pos);
2023 st->state = TCP_SEQ_STATE_ESTABLISHED;
2024 rc = established_get_idx(seq, pos);
2030 static void *tcp_seek_last_pos(struct seq_file *seq)
2032 struct tcp_iter_state *st = seq->private;
2033 int offset = st->offset;
2034 int orig_num = st->num;
2037 switch (st->state) {
2038 case TCP_SEQ_STATE_LISTENING:
2039 if (st->bucket >= INET_LHTABLE_SIZE)
2041 st->state = TCP_SEQ_STATE_LISTENING;
2042 rc = listening_get_next(seq, NULL);
2043 while (offset-- && rc)
2044 rc = listening_get_next(seq, rc);
2048 st->state = TCP_SEQ_STATE_ESTABLISHED;
2050 case TCP_SEQ_STATE_ESTABLISHED:
2051 if (st->bucket > tcp_hashinfo.ehash_mask)
2053 rc = established_get_first(seq);
2054 while (offset-- && rc)
2055 rc = established_get_next(seq, rc);
2063 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2065 struct tcp_iter_state *st = seq->private;
2068 if (*pos && *pos == st->last_pos) {
2069 rc = tcp_seek_last_pos(seq);
2074 st->state = TCP_SEQ_STATE_LISTENING;
2078 rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2081 st->last_pos = *pos;
2085 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2087 struct tcp_iter_state *st = seq->private;
2090 if (v == SEQ_START_TOKEN) {
2091 rc = tcp_get_idx(seq, 0);
2095 switch (st->state) {
2096 case TCP_SEQ_STATE_LISTENING:
2097 rc = listening_get_next(seq, v);
2099 st->state = TCP_SEQ_STATE_ESTABLISHED;
2102 rc = established_get_first(seq);
2105 case TCP_SEQ_STATE_ESTABLISHED:
2106 rc = established_get_next(seq, v);
2111 st->last_pos = *pos;
2115 static void tcp_seq_stop(struct seq_file *seq, void *v)
2117 struct tcp_iter_state *st = seq->private;
2119 switch (st->state) {
2120 case TCP_SEQ_STATE_LISTENING:
2121 if (v != SEQ_START_TOKEN)
2122 spin_unlock(&tcp_hashinfo.listening_hash[st->bucket].lock);
2124 case TCP_SEQ_STATE_ESTABLISHED:
2126 spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2131 int tcp_seq_open(struct inode *inode, struct file *file)
2133 struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2134 struct tcp_iter_state *s;
2137 err = seq_open_net(inode, file, &afinfo->seq_ops,
2138 sizeof(struct tcp_iter_state));
2142 s = ((struct seq_file *)file->private_data)->private;
2143 s->family = afinfo->family;
2147 EXPORT_SYMBOL(tcp_seq_open);
2149 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2152 struct proc_dir_entry *p;
2154 afinfo->seq_ops.start = tcp_seq_start;
2155 afinfo->seq_ops.next = tcp_seq_next;
2156 afinfo->seq_ops.stop = tcp_seq_stop;
2158 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2159 afinfo->seq_fops, afinfo);
2164 EXPORT_SYMBOL(tcp_proc_register);
2166 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2168 remove_proc_entry(afinfo->name, net->proc_net);
2170 EXPORT_SYMBOL(tcp_proc_unregister);
2172 static void get_openreq4(const struct request_sock *req,
2173 struct seq_file *f, int i)
2175 const struct inet_request_sock *ireq = inet_rsk(req);
2176 long delta = req->rsk_timer.expires - jiffies;
2178 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2179 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2184 ntohs(ireq->ir_rmt_port),
2186 0, 0, /* could print option size, but that is af dependent. */
2187 1, /* timers active (only the expire timer) */
2188 jiffies_delta_to_clock_t(delta),
2190 from_kuid_munged(seq_user_ns(f),
2191 sock_i_uid(req->rsk_listener)),
2192 0, /* non standard timer */
2193 0, /* open_requests have no inode */
2198 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2201 unsigned long timer_expires;
2202 const struct tcp_sock *tp = tcp_sk(sk);
2203 const struct inet_connection_sock *icsk = inet_csk(sk);
2204 const struct inet_sock *inet = inet_sk(sk);
2205 const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2206 __be32 dest = inet->inet_daddr;
2207 __be32 src = inet->inet_rcv_saddr;
2208 __u16 destp = ntohs(inet->inet_dport);
2209 __u16 srcp = ntohs(inet->inet_sport);
2213 if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2214 icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2215 icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2217 timer_expires = icsk->icsk_timeout;
2218 } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2220 timer_expires = icsk->icsk_timeout;
2221 } else if (timer_pending(&sk->sk_timer)) {
2223 timer_expires = sk->sk_timer.expires;
2226 timer_expires = jiffies;
2229 state = sk_state_load(sk);
2230 if (state == TCP_LISTEN)
2231 rx_queue = sk->sk_ack_backlog;
2233 /* Because we don't lock the socket,
2234 * we might find a transient negative value.
2236 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2238 seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2239 "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2240 i, src, srcp, dest, destp, state,
2241 tp->write_seq - tp->snd_una,
2244 jiffies_delta_to_clock_t(timer_expires - jiffies),
2245 icsk->icsk_retransmits,
2246 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2247 icsk->icsk_probes_out,
2249 atomic_read(&sk->sk_refcnt), sk,
2250 jiffies_to_clock_t(icsk->icsk_rto),
2251 jiffies_to_clock_t(icsk->icsk_ack.ato),
2252 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2254 state == TCP_LISTEN ?
2255 fastopenq->max_qlen :
2256 (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2259 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2260 struct seq_file *f, int i)
2262 long delta = tw->tw_timer.expires - jiffies;
2266 dest = tw->tw_daddr;
2267 src = tw->tw_rcv_saddr;
2268 destp = ntohs(tw->tw_dport);
2269 srcp = ntohs(tw->tw_sport);
2271 seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2272 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2273 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2274 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2275 atomic_read(&tw->tw_refcnt), tw);
2280 static int tcp4_seq_show(struct seq_file *seq, void *v)
2282 struct tcp_iter_state *st;
2283 struct sock *sk = v;
2285 seq_setwidth(seq, TMPSZ - 1);
2286 if (v == SEQ_START_TOKEN) {
2287 seq_puts(seq, " sl local_address rem_address st tx_queue "
2288 "rx_queue tr tm->when retrnsmt uid timeout "
2294 if (sk->sk_state == TCP_TIME_WAIT)
2295 get_timewait4_sock(v, seq, st->num);
2296 else if (sk->sk_state == TCP_NEW_SYN_RECV)
2297 get_openreq4(v, seq, st->num);
2299 get_tcp4_sock(v, seq, st->num);
2305 static const struct file_operations tcp_afinfo_seq_fops = {
2306 .owner = THIS_MODULE,
2307 .open = tcp_seq_open,
2309 .llseek = seq_lseek,
2310 .release = seq_release_net
2313 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2316 .seq_fops = &tcp_afinfo_seq_fops,
2318 .show = tcp4_seq_show,
2322 static int __net_init tcp4_proc_init_net(struct net *net)
2324 return tcp_proc_register(net, &tcp4_seq_afinfo);
2327 static void __net_exit tcp4_proc_exit_net(struct net *net)
2329 tcp_proc_unregister(net, &tcp4_seq_afinfo);
2332 static struct pernet_operations tcp4_net_ops = {
2333 .init = tcp4_proc_init_net,
2334 .exit = tcp4_proc_exit_net,
2337 int __init tcp4_proc_init(void)
2339 return register_pernet_subsys(&tcp4_net_ops);
2342 void tcp4_proc_exit(void)
2344 unregister_pernet_subsys(&tcp4_net_ops);
2346 #endif /* CONFIG_PROC_FS */
2348 struct proto tcp_prot = {
2350 .owner = THIS_MODULE,
2352 .connect = tcp_v4_connect,
2353 .disconnect = tcp_disconnect,
2354 .accept = inet_csk_accept,
2356 .init = tcp_v4_init_sock,
2357 .destroy = tcp_v4_destroy_sock,
2358 .shutdown = tcp_shutdown,
2359 .setsockopt = tcp_setsockopt,
2360 .getsockopt = tcp_getsockopt,
2361 .recvmsg = tcp_recvmsg,
2362 .sendmsg = tcp_sendmsg,
2363 .sendpage = tcp_sendpage,
2364 .backlog_rcv = tcp_v4_do_rcv,
2365 .release_cb = tcp_release_cb,
2367 .unhash = inet_unhash,
2368 .get_port = inet_csk_get_port,
2369 .enter_memory_pressure = tcp_enter_memory_pressure,
2370 .stream_memory_free = tcp_stream_memory_free,
2371 .sockets_allocated = &tcp_sockets_allocated,
2372 .orphan_count = &tcp_orphan_count,
2373 .memory_allocated = &tcp_memory_allocated,
2374 .memory_pressure = &tcp_memory_pressure,
2375 .sysctl_mem = sysctl_tcp_mem,
2376 .sysctl_wmem = sysctl_tcp_wmem,
2377 .sysctl_rmem = sysctl_tcp_rmem,
2378 .max_header = MAX_TCP_HEADER,
2379 .obj_size = sizeof(struct tcp_sock),
2380 .slab_flags = SLAB_DESTROY_BY_RCU,
2381 .twsk_prot = &tcp_timewait_sock_ops,
2382 .rsk_prot = &tcp_request_sock_ops,
2383 .h.hashinfo = &tcp_hashinfo,
2384 .no_autobind = true,
2385 #ifdef CONFIG_COMPAT
2386 .compat_setsockopt = compat_tcp_setsockopt,
2387 .compat_getsockopt = compat_tcp_getsockopt,
2389 .diag_destroy = tcp_abort,
2391 EXPORT_SYMBOL(tcp_prot);
2393 static void __net_exit tcp_sk_exit(struct net *net)
2397 for_each_possible_cpu(cpu)
2398 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2399 free_percpu(net->ipv4.tcp_sk);
2402 static int __net_init tcp_sk_init(struct net *net)
2406 net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2407 if (!net->ipv4.tcp_sk)
2410 for_each_possible_cpu(cpu) {
2413 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2417 sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
2418 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2421 net->ipv4.sysctl_tcp_ecn = 2;
2422 net->ipv4.sysctl_tcp_ecn_fallback = 1;
2424 net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2425 net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2426 net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2428 net->ipv4.sysctl_tcp_keepalive_time = TCP_KEEPALIVE_TIME;
2429 net->ipv4.sysctl_tcp_keepalive_probes = TCP_KEEPALIVE_PROBES;
2430 net->ipv4.sysctl_tcp_keepalive_intvl = TCP_KEEPALIVE_INTVL;
2432 net->ipv4.sysctl_tcp_syn_retries = TCP_SYN_RETRIES;
2433 net->ipv4.sysctl_tcp_synack_retries = TCP_SYNACK_RETRIES;
2434 net->ipv4.sysctl_tcp_syncookies = 1;
2435 net->ipv4.sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
2436 net->ipv4.sysctl_tcp_retries1 = TCP_RETR1;
2437 net->ipv4.sysctl_tcp_retries2 = TCP_RETR2;
2438 net->ipv4.sysctl_tcp_orphan_retries = 0;
2439 net->ipv4.sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
2440 net->ipv4.sysctl_tcp_notsent_lowat = UINT_MAX;
2449 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2451 inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2454 static struct pernet_operations __net_initdata tcp_sk_ops = {
2455 .init = tcp_sk_init,
2456 .exit = tcp_sk_exit,
2457 .exit_batch = tcp_sk_exit_batch,
2460 void __init tcp_v4_init(void)
2462 inet_hashinfo_init(&tcp_hashinfo);
2463 if (register_pernet_subsys(&tcp_sk_ops))
2464 panic("Failed to create the TCP control socket.\n");