1 // SPDX-License-Identifier: GPL-2.0-only
3 * INET An implementation of the TCP/IP protocol suite for the LINUX
4 * operating system. INET is implemented using the BSD Socket
5 * interface as the means of communication with the user level.
7 * Implementation of the Transmission Control Protocol(TCP).
24 #include <net/busy_poll.h>
25 #include <net/rstreason.h>
27 static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win)
31 if (after(end_seq, s_win) && before(seq, e_win))
33 return seq == e_win && seq == end_seq;
36 static enum tcp_tw_status
37 tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw,
38 const struct sk_buff *skb, int mib_idx)
40 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
42 if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx,
43 &tcptw->tw_last_oow_ack_time)) {
44 /* Send ACK. Note, we do not put the bucket,
45 * it will be released by caller.
50 /* We are rate-limiting, so just release the tw sock and drop skb. */
52 return TCP_TW_SUCCESS;
55 static void twsk_rcv_nxt_update(struct tcp_timewait_sock *tcptw, u32 seq)
58 struct tcp_ao_info *ao;
60 ao = rcu_dereference(tcptw->ao_info);
61 if (unlikely(ao && seq < tcptw->tw_rcv_nxt))
62 WRITE_ONCE(ao->rcv_sne, ao->rcv_sne + 1);
64 tcptw->tw_rcv_nxt = seq;
68 * * Main purpose of TIME-WAIT state is to close connection gracefully,
69 * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN
70 * (and, probably, tail of data) and one or more our ACKs are lost.
71 * * What is TIME-WAIT timeout? It is associated with maximal packet
72 * lifetime in the internet, which results in wrong conclusion, that
73 * it is set to catch "old duplicate segments" wandering out of their path.
74 * It is not quite correct. This timeout is calculated so that it exceeds
75 * maximal retransmission timeout enough to allow to lose one (or more)
76 * segments sent by peer and our ACKs. This time may be calculated from RTO.
77 * * When TIME-WAIT socket receives RST, it means that another end
78 * finally closed and we are allowed to kill TIME-WAIT too.
79 * * Second purpose of TIME-WAIT is catching old duplicate segments.
80 * Well, certainly it is pure paranoia, but if we load TIME-WAIT
81 * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs.
82 * * If we invented some more clever way to catch duplicates
83 * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs.
85 * The algorithm below is based on FORMAL INTERPRETATION of RFCs.
86 * When you compare it to RFCs, please, read section SEGMENT ARRIVES
87 * from the very beginning.
89 * NOTE. With recycling (and later with fin-wait-2) TW bucket
90 * is _not_ stateless. It means, that strictly speaking we must
91 * spinlock it. I do not want! Well, probability of misbehaviour
92 * is ridiculously low and, seems, we could use some mb() tricks
93 * to avoid misread sequence numbers, states etc. --ANK
95 * We don't need to initialize tmp_out.sack_ok as we don't use the results
98 tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb,
99 const struct tcphdr *th, u32 *tw_isn)
101 struct tcp_options_received tmp_opt;
102 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
103 bool paws_reject = false;
106 tmp_opt.saw_tstamp = 0;
107 ts_recent_stamp = READ_ONCE(tcptw->tw_ts_recent_stamp);
108 if (th->doff > (sizeof(*th) >> 2) && ts_recent_stamp) {
109 tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL);
111 if (tmp_opt.saw_tstamp) {
112 if (tmp_opt.rcv_tsecr)
113 tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset;
114 tmp_opt.ts_recent = READ_ONCE(tcptw->tw_ts_recent);
115 tmp_opt.ts_recent_stamp = ts_recent_stamp;
116 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
120 if (tw->tw_substate == TCP_FIN_WAIT2) {
121 /* Just repeat all the checks of tcp_rcv_state_process() */
123 /* Out of window, send ACK */
125 !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq,
127 tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd))
128 return tcp_timewait_check_oow_rate_limit(
129 tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2);
134 if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt))
139 !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) ||
140 TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) {
142 return TCP_TW_SUCCESS;
145 /* New data or FIN. If new data arrive after half-duplex close,
149 TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1)
152 /* FIN arrived, enter true time-wait state. */
153 tw->tw_substate = TCP_TIME_WAIT;
154 twsk_rcv_nxt_update(tcptw, TCP_SKB_CB(skb)->end_seq);
156 if (tmp_opt.saw_tstamp) {
157 WRITE_ONCE(tcptw->tw_ts_recent_stamp,
158 ktime_get_seconds());
159 WRITE_ONCE(tcptw->tw_ts_recent,
163 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
168 * Now real TIME-WAIT state.
171 * "When a connection is [...] on TIME-WAIT state [...]
172 * [a TCP] MAY accept a new SYN from the remote TCP to
173 * reopen the connection directly, if it:
175 * (1) assigns its initial sequence number for the new
176 * connection to be larger than the largest sequence
177 * number it used on the previous connection incarnation,
180 * (2) returns to TIME-WAIT state if the SYN turns out
181 * to be an old duplicate".
185 (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt &&
186 (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) {
187 /* In window segment, it may be only reset or bare ack. */
190 /* This is TIME_WAIT assassination, in two flavors.
191 * Oh well... nobody has a sufficient solution to this
194 if (!READ_ONCE(twsk_net(tw)->ipv4.sysctl_tcp_rfc1337)) {
196 inet_twsk_deschedule_put(tw);
197 return TCP_TW_SUCCESS;
200 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
203 if (tmp_opt.saw_tstamp) {
204 WRITE_ONCE(tcptw->tw_ts_recent,
206 WRITE_ONCE(tcptw->tw_ts_recent_stamp,
207 ktime_get_seconds());
211 return TCP_TW_SUCCESS;
214 /* Out of window segment.
216 All the segments are ACKed immediately.
218 The only exception is new SYN. We accept it, if it is
219 not old duplicate and we are not in danger to be killed
220 by delayed old duplicates. RFC check is that it has
221 newer sequence number works at rates <40Mbit/sec.
222 However, if paws works, it is reliable AND even more,
223 we even may relax silly seq space cutoff.
225 RED-PEN: we violate main RFC requirement, if this SYN will appear
226 old duplicate (i.e. we receive RST in reply to SYN-ACK),
227 we must return socket to time-wait state. It is not good,
231 if (th->syn && !th->rst && !th->ack && !paws_reject &&
232 (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) ||
233 (tmp_opt.saw_tstamp &&
234 (s32)(READ_ONCE(tcptw->tw_ts_recent) - tmp_opt.rcv_tsval) < 0))) {
235 u32 isn = tcptw->tw_snd_nxt + 65535 + 2;
243 __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED);
246 /* In this case we must reset the TIMEWAIT timer.
248 * If it is ACKless SYN it may be both old duplicate
249 * and new good SYN with random sequence number <rcv_nxt.
250 * Do not reschedule in the last case.
252 if (paws_reject || th->ack)
253 inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN);
255 return tcp_timewait_check_oow_rate_limit(
256 tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT);
259 return TCP_TW_SUCCESS;
261 EXPORT_SYMBOL(tcp_timewait_state_process);
263 static void tcp_time_wait_init(struct sock *sk, struct tcp_timewait_sock *tcptw)
265 #ifdef CONFIG_TCP_MD5SIG
266 const struct tcp_sock *tp = tcp_sk(sk);
267 struct tcp_md5sig_key *key;
270 * The timewait bucket does not have the key DB from the
271 * sock structure. We just make a quick copy of the
272 * md5 key being used (if indeed we are using one)
273 * so the timewait ack generating code has the key.
275 tcptw->tw_md5_key = NULL;
276 if (!static_branch_unlikely(&tcp_md5_needed.key))
279 key = tp->af_specific->md5_lookup(sk, sk);
281 tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC);
282 if (!tcptw->tw_md5_key)
284 if (!static_key_fast_inc_not_disabled(&tcp_md5_needed.key.key))
286 tcp_md5_add_sigpool();
291 kfree(tcptw->tw_md5_key);
292 tcptw->tw_md5_key = NULL;
297 * Move a socket to time-wait or dead fin-wait-2 state.
299 void tcp_time_wait(struct sock *sk, int state, int timeo)
301 const struct inet_connection_sock *icsk = inet_csk(sk);
302 struct tcp_sock *tp = tcp_sk(sk);
303 struct net *net = sock_net(sk);
304 struct inet_timewait_sock *tw;
306 tw = inet_twsk_alloc(sk, &net->ipv4.tcp_death_row, state);
309 struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw);
310 const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
312 tw->tw_transparent = inet_test_bit(TRANSPARENT, sk);
313 tw->tw_mark = sk->sk_mark;
314 tw->tw_priority = READ_ONCE(sk->sk_priority);
315 tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale;
316 tcptw->tw_rcv_nxt = tp->rcv_nxt;
317 tcptw->tw_snd_nxt = tp->snd_nxt;
318 tcptw->tw_rcv_wnd = tcp_receive_window(tp);
319 tcptw->tw_ts_recent = tp->rx_opt.ts_recent;
320 tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp;
321 tcptw->tw_ts_offset = tp->tsoffset;
322 tw->tw_usec_ts = tp->tcp_usec_ts;
323 tcptw->tw_last_oow_ack_time = 0;
324 tcptw->tw_tx_delay = tp->tcp_tx_delay;
325 tw->tw_txhash = sk->sk_txhash;
326 #if IS_ENABLED(CONFIG_IPV6)
327 if (tw->tw_family == PF_INET6) {
328 struct ipv6_pinfo *np = inet6_sk(sk);
330 tw->tw_v6_daddr = sk->sk_v6_daddr;
331 tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
332 tw->tw_tclass = np->tclass;
333 tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
334 tw->tw_ipv6only = sk->sk_ipv6only;
338 tcp_time_wait_init(sk, tcptw);
339 tcp_ao_time_wait(tcptw, tp);
341 /* Get the TIME_WAIT timeout firing. */
345 if (state == TCP_TIME_WAIT)
346 timeo = TCP_TIMEWAIT_LEN;
349 * Note that access to tw after this point is illegal.
351 inet_twsk_hashdance_schedule(tw, sk, net->ipv4.tcp_death_row.hashinfo, timeo);
353 /* Sorry, if we're out of memory, just CLOSE this
354 * socket up. We've got bigger problems than
355 * non-graceful socket closings.
357 NET_INC_STATS(net, LINUX_MIB_TCPTIMEWAITOVERFLOW);
360 tcp_update_metrics(sk);
363 EXPORT_SYMBOL(tcp_time_wait);
365 #ifdef CONFIG_TCP_MD5SIG
366 static void tcp_md5_twsk_free_rcu(struct rcu_head *head)
368 struct tcp_md5sig_key *key;
370 key = container_of(head, struct tcp_md5sig_key, rcu);
372 static_branch_slow_dec_deferred(&tcp_md5_needed);
373 tcp_md5_release_sigpool();
377 void tcp_twsk_destructor(struct sock *sk)
379 #ifdef CONFIG_TCP_MD5SIG
380 if (static_branch_unlikely(&tcp_md5_needed.key)) {
381 struct tcp_timewait_sock *twsk = tcp_twsk(sk);
383 if (twsk->tw_md5_key)
384 call_rcu(&twsk->tw_md5_key->rcu, tcp_md5_twsk_free_rcu);
387 tcp_ao_destroy_sock(sk, true);
389 EXPORT_SYMBOL_GPL(tcp_twsk_destructor);
391 void tcp_twsk_purge(struct list_head *net_exit_list)
393 bool purged_once = false;
396 list_for_each_entry(net, net_exit_list, exit_list) {
397 if (net->ipv4.tcp_death_row.hashinfo->pernet) {
398 /* Even if tw_refcount == 1, we must clean up kernel reqsk */
399 inet_twsk_purge(net->ipv4.tcp_death_row.hashinfo);
400 } else if (!purged_once) {
401 inet_twsk_purge(&tcp_hashinfo);
407 /* Warning : This function is called without sk_listener being locked.
408 * Be sure to read socket fields once, as their value could change under us.
410 void tcp_openreq_init_rwin(struct request_sock *req,
411 const struct sock *sk_listener,
412 const struct dst_entry *dst)
414 struct inet_request_sock *ireq = inet_rsk(req);
415 const struct tcp_sock *tp = tcp_sk(sk_listener);
416 int full_space = tcp_full_space(sk_listener);
422 mss = tcp_mss_clamp(tp, dst_metric_advmss(dst));
423 window_clamp = READ_ONCE(tp->window_clamp);
424 /* Set this up on the first call only */
425 req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW);
427 /* limit the window selection if the user enforce a smaller rx buffer */
428 if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK &&
429 (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0))
430 req->rsk_window_clamp = full_space;
432 rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req);
434 rcv_wnd = dst_metric(dst, RTAX_INITRWND);
435 else if (full_space < rcv_wnd * mss)
436 full_space = rcv_wnd * mss;
438 /* tcp_full_space because it is guaranteed to be the first packet */
439 tcp_select_initial_window(sk_listener, full_space,
440 mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0),
442 &req->rsk_window_clamp,
446 ireq->rcv_wscale = rcv_wscale;
448 EXPORT_SYMBOL(tcp_openreq_init_rwin);
450 static void tcp_ecn_openreq_child(struct tcp_sock *tp,
451 const struct request_sock *req)
453 tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0;
456 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
458 struct inet_connection_sock *icsk = inet_csk(sk);
459 u32 ca_key = dst_metric(dst, RTAX_CC_ALGO);
460 bool ca_got_dst = false;
462 if (ca_key != TCP_CA_UNSPEC) {
463 const struct tcp_congestion_ops *ca;
466 ca = tcp_ca_find_key(ca_key);
467 if (likely(ca && bpf_try_module_get(ca, ca->owner))) {
468 icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst);
469 icsk->icsk_ca_ops = ca;
475 /* If no valid choice made yet, assign current system default ca. */
477 (!icsk->icsk_ca_setsockopt ||
478 !bpf_try_module_get(icsk->icsk_ca_ops, icsk->icsk_ca_ops->owner)))
479 tcp_assign_congestion_control(sk);
481 tcp_set_ca_state(sk, TCP_CA_Open);
483 EXPORT_SYMBOL_GPL(tcp_ca_openreq_child);
485 static void smc_check_reset_syn_req(const struct tcp_sock *oldtp,
486 struct request_sock *req,
487 struct tcp_sock *newtp)
489 #if IS_ENABLED(CONFIG_SMC)
490 struct inet_request_sock *ireq;
492 if (static_branch_unlikely(&tcp_have_smc)) {
493 ireq = inet_rsk(req);
494 if (oldtp->syn_smc && !ireq->smc_ok)
500 /* This is not only more efficient than what we used to do, it eliminates
501 * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM
503 * Actually, we could lots of memory writes here. tp of listening
504 * socket contains all necessary default parameters.
506 struct sock *tcp_create_openreq_child(const struct sock *sk,
507 struct request_sock *req,
510 struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC);
511 const struct inet_request_sock *ireq = inet_rsk(req);
512 struct tcp_request_sock *treq = tcp_rsk(req);
513 struct inet_connection_sock *newicsk;
514 const struct tcp_sock *oldtp;
515 struct tcp_sock *newtp;
521 newicsk = inet_csk(newsk);
522 newtp = tcp_sk(newsk);
525 smc_check_reset_syn_req(oldtp, req, newtp);
527 /* Now setup tcp_sock */
528 newtp->pred_flags = 0;
530 seq = treq->rcv_isn + 1;
531 newtp->rcv_wup = seq;
532 WRITE_ONCE(newtp->copied_seq, seq);
533 WRITE_ONCE(newtp->rcv_nxt, seq);
536 seq = treq->snt_isn + 1;
537 newtp->snd_sml = newtp->snd_una = seq;
538 WRITE_ONCE(newtp->snd_nxt, seq);
541 INIT_LIST_HEAD(&newtp->tsq_node);
542 INIT_LIST_HEAD(&newtp->tsorted_sent_queue);
544 tcp_init_wl(newtp, treq->rcv_isn);
546 minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U);
547 newicsk->icsk_ack.lrcvtime = tcp_jiffies32;
549 newtp->lsndtime = tcp_jiffies32;
550 newsk->sk_txhash = READ_ONCE(treq->txhash);
551 newtp->total_retrans = req->num_retrans;
553 tcp_init_xmit_timers(newsk);
554 WRITE_ONCE(newtp->write_seq, newtp->pushed_seq = treq->snt_isn + 1);
556 if (sock_flag(newsk, SOCK_KEEPOPEN))
557 inet_csk_reset_keepalive_timer(newsk,
558 keepalive_time_when(newtp));
560 newtp->rx_opt.tstamp_ok = ireq->tstamp_ok;
561 newtp->rx_opt.sack_ok = ireq->sack_ok;
562 newtp->window_clamp = req->rsk_window_clamp;
563 newtp->rcv_ssthresh = req->rsk_rcv_wnd;
564 newtp->rcv_wnd = req->rsk_rcv_wnd;
565 newtp->rx_opt.wscale_ok = ireq->wscale_ok;
566 if (newtp->rx_opt.wscale_ok) {
567 newtp->rx_opt.snd_wscale = ireq->snd_wscale;
568 newtp->rx_opt.rcv_wscale = ireq->rcv_wscale;
570 newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0;
571 newtp->window_clamp = min(newtp->window_clamp, 65535U);
573 newtp->snd_wnd = ntohs(tcp_hdr(skb)->window) << newtp->rx_opt.snd_wscale;
574 newtp->max_window = newtp->snd_wnd;
576 if (newtp->rx_opt.tstamp_ok) {
577 newtp->tcp_usec_ts = treq->req_usec_ts;
578 newtp->rx_opt.ts_recent = READ_ONCE(req->ts_recent);
579 newtp->rx_opt.ts_recent_stamp = ktime_get_seconds();
580 newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED;
582 newtp->tcp_usec_ts = 0;
583 newtp->rx_opt.ts_recent_stamp = 0;
584 newtp->tcp_header_len = sizeof(struct tcphdr);
586 if (req->num_timeout) {
587 newtp->total_rto = req->num_timeout;
588 newtp->undo_marker = treq->snt_isn;
589 if (newtp->tcp_usec_ts) {
590 newtp->retrans_stamp = treq->snt_synack;
591 newtp->total_rto_time = (u32)(tcp_clock_us() -
592 newtp->retrans_stamp) / USEC_PER_MSEC;
594 newtp->retrans_stamp = div_u64(treq->snt_synack,
595 USEC_PER_SEC / TCP_TS_HZ);
596 newtp->total_rto_time = tcp_clock_ms() -
597 newtp->retrans_stamp;
599 newtp->total_rto_recoveries = 1;
601 newtp->tsoffset = treq->ts_off;
602 #ifdef CONFIG_TCP_MD5SIG
603 newtp->md5sig_info = NULL; /*XXX*/
606 newtp->ao_info = NULL;
608 if (tcp_rsk_used_ao(req)) {
609 struct tcp_ao_key *ao_key;
611 ao_key = treq->af_specific->ao_lookup(sk, req, tcp_rsk(req)->ao_keyid, -1);
613 newtp->tcp_header_len += tcp_ao_len_aligned(ao_key);
616 if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len)
617 newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len;
618 newtp->rx_opt.mss_clamp = req->mss;
619 tcp_ecn_openreq_child(newtp, req);
620 newtp->fastopen_req = NULL;
621 RCU_INIT_POINTER(newtp->fastopen_rsk, NULL);
623 newtp->bpf_chg_cc_inprogress = 0;
624 tcp_bpf_clone(sk, newsk);
626 __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS);
630 EXPORT_SYMBOL(tcp_create_openreq_child);
633 * Process an incoming packet for SYN_RECV sockets represented as a
634 * request_sock. Normally sk is the listener socket but for TFO it
635 * points to the child socket.
637 * XXX (TFO) - The current impl contains a special check for ack
638 * validation and inside tcp_v4_reqsk_send_ack(). Can we do better?
640 * We don't need to initialize tmp_opt.sack_ok as we don't use the results
642 * Note: If @fastopen is true, this can be called from process context.
643 * Otherwise, this is from BH context.
646 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
647 struct request_sock *req,
648 bool fastopen, bool *req_stolen)
650 struct tcp_options_received tmp_opt;
652 const struct tcphdr *th = tcp_hdr(skb);
653 __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK);
654 bool paws_reject = false;
657 tmp_opt.saw_tstamp = 0;
658 if (th->doff > (sizeof(struct tcphdr)>>2)) {
659 tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL);
661 if (tmp_opt.saw_tstamp) {
662 tmp_opt.ts_recent = READ_ONCE(req->ts_recent);
663 if (tmp_opt.rcv_tsecr)
664 tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off;
665 /* We do not store true stamp, but it is not required,
666 * it can be estimated (approximately)
669 tmp_opt.ts_recent_stamp = ktime_get_seconds() - reqsk_timeout(req, TCP_RTO_MAX) / HZ;
670 paws_reject = tcp_paws_reject(&tmp_opt, th->rst);
674 /* Check for pure retransmitted SYN. */
675 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn &&
676 flg == TCP_FLAG_SYN &&
679 * RFC793 draws (Incorrectly! It was fixed in RFC1122)
680 * this case on figure 6 and figure 8, but formal
681 * protocol description says NOTHING.
682 * To be more exact, it says that we should send ACK,
683 * because this segment (at least, if it has no data)
686 * CONCLUSION: RFC793 (even with RFC1122) DOES NOT
687 * describe SYN-RECV state. All the description
688 * is wrong, we cannot believe to it and should
689 * rely only on common sense and implementation
692 * Enforce "SYN-ACK" according to figure 8, figure 6
693 * of RFC793, fixed by RFC1122.
695 * Note that even if there is new data in the SYN packet
696 * they will be thrown away too.
698 * Reset timer after retransmitting SYNACK, similar to
699 * the idea of fast retransmit in recovery.
701 if (!tcp_oow_rate_limited(sock_net(sk), skb,
702 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
703 &tcp_rsk(req)->last_oow_ack_time) &&
705 !inet_rtx_syn_ack(sk, req)) {
706 unsigned long expires = jiffies;
708 expires += reqsk_timeout(req, TCP_RTO_MAX);
710 mod_timer_pending(&req->rsk_timer, expires);
712 req->rsk_timer.expires = expires;
717 /* Further reproduces section "SEGMENT ARRIVES"
718 for state SYN-RECEIVED of RFC793.
719 It is broken, however, it does not work only
720 when SYNs are crossed.
722 You would think that SYN crossing is impossible here, since
723 we should have a SYN_SENT socket (from connect()) on our end,
724 but this is not true if the crossed SYNs were sent to both
725 ends by a malicious third party. We must defend against this,
726 and to do that we first verify the ACK (as per RFC793, page
727 36) and reset if it is invalid. Is this a true full defense?
728 To convince ourselves, let us consider a way in which the ACK
729 test can still pass in this 'malicious crossed SYNs' case.
730 Malicious sender sends identical SYNs (and thus identical sequence
731 numbers) to both A and B:
736 By our good fortune, both A and B select the same initial
737 send sequence number of seven :-)
739 A: sends SYN|ACK, seq=7, ack_seq=8
740 B: sends SYN|ACK, seq=7, ack_seq=8
742 So we are now A eating this SYN|ACK, ACK test passes. So
743 does sequence test, SYN is truncated, and thus we consider
746 If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this
747 bare ACK. Otherwise, we create an established connection. Both
748 ends (listening sockets) accept the new incoming connection and try
749 to talk to each other. 8-)
751 Note: This case is both harmless, and rare. Possibility is about the
752 same as us discovering intelligent life on another plant tomorrow.
754 But generally, we should (RFC lies!) to accept ACK
755 from SYNACK both here and in tcp_rcv_state_process().
756 tcp_rcv_state_process() does not, hence, we do not too.
758 Note that the case is absolutely generic:
759 we cannot optimize anything here without
760 violating protocol. All the checks must be made
761 before attempt to create socket.
764 /* RFC793 page 36: "If the connection is in any non-synchronized state ...
765 * and the incoming segment acknowledges something not yet
766 * sent (the segment carries an unacceptable ACK) ...
769 * Invalid ACK: reset will be sent by listening socket.
770 * Note that the ACK validity check for a Fast Open socket is done
771 * elsewhere and is checked directly against the child socket rather
772 * than req because user data may have been sent out.
774 if ((flg & TCP_FLAG_ACK) && !fastopen &&
775 (TCP_SKB_CB(skb)->ack_seq !=
776 tcp_rsk(req)->snt_isn + 1))
779 /* Also, it would be not so bad idea to check rcv_tsecr, which
780 * is essentially ACK extension and too early or too late values
781 * should cause reset in unsynchronized states.
784 /* RFC793: "first check sequence number". */
786 if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq,
787 TCP_SKB_CB(skb)->end_seq,
788 tcp_rsk(req)->rcv_nxt,
789 tcp_rsk(req)->rcv_nxt +
790 tcp_synack_window(req))) {
791 /* Out of window: send ACK and drop. */
792 if (!(flg & TCP_FLAG_RST) &&
793 !tcp_oow_rate_limited(sock_net(sk), skb,
794 LINUX_MIB_TCPACKSKIPPEDSYNRECV,
795 &tcp_rsk(req)->last_oow_ack_time))
796 req->rsk_ops->send_ack(sk, skb, req);
798 NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED);
802 /* In sequence, PAWS is OK. */
804 /* TODO: We probably should defer ts_recent change once
805 * we take ownership of @req.
807 if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt))
808 WRITE_ONCE(req->ts_recent, tmp_opt.rcv_tsval);
810 if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) {
811 /* Truncate SYN, it is out of window starting
812 at tcp_rsk(req)->rcv_isn + 1. */
813 flg &= ~TCP_FLAG_SYN;
816 /* RFC793: "second check the RST bit" and
817 * "fourth, check the SYN bit"
819 if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) {
820 TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
821 goto embryonic_reset;
824 /* ACK sequence verified above, just make sure ACK is
825 * set. If ACK not set, just silently drop the packet.
827 * XXX (TFO) - if we ever allow "data after SYN", the
828 * following check needs to be removed.
830 if (!(flg & TCP_FLAG_ACK))
833 /* For Fast Open no more processing is needed (sk is the
839 /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */
840 if (req->num_timeout < READ_ONCE(inet_csk(sk)->icsk_accept_queue.rskq_defer_accept) &&
841 TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) {
842 inet_rsk(req)->acked = 1;
843 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP);
847 /* OK, ACK is valid, create big socket and
848 * feed this segment to it. It will repeat all
849 * the tests. THIS SEGMENT MUST MOVE SOCKET TO
850 * ESTABLISHED STATE. If it will be dropped after
851 * socket is created, wait for troubles.
853 child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL,
856 goto listen_overflow;
858 if (own_req && rsk_drop_req(req)) {
859 reqsk_queue_removed(&inet_csk(req->rsk_listener)->icsk_accept_queue, req);
860 inet_csk_reqsk_queue_drop_and_put(req->rsk_listener, req);
864 sock_rps_save_rxhash(child, skb);
865 tcp_synack_rtt_meas(child, req);
866 *req_stolen = !own_req;
867 return inet_csk_complete_hashdance(sk, child, req, own_req);
870 if (sk != req->rsk_listener)
871 __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMIGRATEREQFAILURE);
873 if (!READ_ONCE(sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow)) {
874 inet_rsk(req)->acked = 1;
879 if (!(flg & TCP_FLAG_RST)) {
880 /* Received a bad SYN pkt - for TFO We try not to reset
881 * the local connection unless it's really necessary to
882 * avoid becoming vulnerable to outside attack aiming at
883 * resetting legit local connections.
885 req->rsk_ops->send_reset(sk, skb, SK_RST_REASON_INVALID_SYN);
886 } else if (fastopen) { /* received a valid RST pkt */
887 reqsk_fastopen_remove(sk, req, true);
891 bool unlinked = inet_csk_reqsk_queue_drop(sk, req);
894 __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
895 *req_stolen = !unlinked;
899 EXPORT_SYMBOL(tcp_check_req);
902 * Queue segment on the new socket if the new socket is active,
903 * otherwise we just shortcircuit this and continue with
906 * For the vast majority of cases child->sk_state will be TCP_SYN_RECV
907 * when entering. But other states are possible due to a race condition
908 * where after __inet_lookup_established() fails but before the listener
909 * locked is obtained, other packets cause the same connection to
913 enum skb_drop_reason tcp_child_process(struct sock *parent, struct sock *child,
915 __releases(&((child)->sk_lock.slock))
917 enum skb_drop_reason reason = SKB_NOT_DROPPED_YET;
918 int state = child->sk_state;
920 /* record sk_napi_id and sk_rx_queue_mapping of child. */
921 sk_mark_napi_id_set(child, skb);
923 tcp_segs_in(tcp_sk(child), skb);
924 if (!sock_owned_by_user(child)) {
925 reason = tcp_rcv_state_process(child, skb);
926 /* Wakeup parent, send SIGIO */
927 if (state == TCP_SYN_RECV && child->sk_state != state)
928 parent->sk_data_ready(parent);
930 /* Alas, it is possible again, because we do lookup
931 * in main socket hash table and lock on listening
932 * socket does not protect us more.
934 __sk_add_backlog(child, skb);
937 bh_unlock_sock(child);
941 EXPORT_SYMBOL(tcp_child_process);