]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <[email protected]> |
10 | * Mark Evans, <[email protected]> | |
11 | * Corey Minyard <[email protected]> | |
12 | * Florian La Roche, <[email protected]> | |
13 | * Charles Hedrick, <[email protected]> | |
14 | * Linus Torvalds, <[email protected]> | |
15 | * Alan Cox, <[email protected]> | |
16 | * Matthew Dillon, <[email protected]> | |
17 | * Arnt Gulbrandsen, <[email protected]> | |
18 | * Jorge Cwik, <[email protected]> | |
19 | */ | |
20 | ||
1da177e4 LT |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | |
5a0e3ad6 | 23 | #include <linux/slab.h> |
1da177e4 LT |
24 | #include <linux/sysctl.h> |
25 | #include <linux/workqueue.h> | |
60e2a778 | 26 | #include <linux/static_key.h> |
1da177e4 LT |
27 | #include <net/tcp.h> |
28 | #include <net/inet_common.h> | |
29 | #include <net/xfrm.h> | |
e5907459 | 30 | #include <net/busy_poll.h> |
1da177e4 | 31 | |
a2a385d6 | 32 | static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) |
1da177e4 LT |
33 | { |
34 | if (seq == s_win) | |
a2a385d6 | 35 | return true; |
1da177e4 | 36 | if (after(end_seq, s_win) && before(seq, e_win)) |
a2a385d6 | 37 | return true; |
a02cec21 | 38 | return seq == e_win && seq == end_seq; |
1da177e4 LT |
39 | } |
40 | ||
4fb17a60 NC |
41 | static enum tcp_tw_status |
42 | tcp_timewait_check_oow_rate_limit(struct inet_timewait_sock *tw, | |
43 | const struct sk_buff *skb, int mib_idx) | |
44 | { | |
45 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | |
46 | ||
47 | if (!tcp_oow_rate_limited(twsk_net(tw), skb, mib_idx, | |
48 | &tcptw->tw_last_oow_ack_time)) { | |
49 | /* Send ACK. Note, we do not put the bucket, | |
50 | * it will be released by caller. | |
51 | */ | |
52 | return TCP_TW_ACK; | |
53 | } | |
54 | ||
55 | /* We are rate-limiting, so just release the tw sock and drop skb. */ | |
56 | inet_twsk_put(tw); | |
57 | return TCP_TW_SUCCESS; | |
58 | } | |
59 | ||
e905a9ed | 60 | /* |
1da177e4 LT |
61 | * * Main purpose of TIME-WAIT state is to close connection gracefully, |
62 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN | |
63 | * (and, probably, tail of data) and one or more our ACKs are lost. | |
64 | * * What is TIME-WAIT timeout? It is associated with maximal packet | |
65 | * lifetime in the internet, which results in wrong conclusion, that | |
66 | * it is set to catch "old duplicate segments" wandering out of their path. | |
67 | * It is not quite correct. This timeout is calculated so that it exceeds | |
68 | * maximal retransmission timeout enough to allow to lose one (or more) | |
69 | * segments sent by peer and our ACKs. This time may be calculated from RTO. | |
70 | * * When TIME-WAIT socket receives RST, it means that another end | |
71 | * finally closed and we are allowed to kill TIME-WAIT too. | |
72 | * * Second purpose of TIME-WAIT is catching old duplicate segments. | |
73 | * Well, certainly it is pure paranoia, but if we load TIME-WAIT | |
74 | * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. | |
75 | * * If we invented some more clever way to catch duplicates | |
76 | * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. | |
77 | * | |
78 | * The algorithm below is based on FORMAL INTERPRETATION of RFCs. | |
79 | * When you compare it to RFCs, please, read section SEGMENT ARRIVES | |
80 | * from the very beginning. | |
81 | * | |
82 | * NOTE. With recycling (and later with fin-wait-2) TW bucket | |
83 | * is _not_ stateless. It means, that strictly speaking we must | |
84 | * spinlock it. I do not want! Well, probability of misbehaviour | |
85 | * is ridiculously low and, seems, we could use some mb() tricks | |
86 | * to avoid misread sequence numbers, states etc. --ANK | |
4308fc58 AC |
87 | * |
88 | * We don't need to initialize tmp_out.sack_ok as we don't use the results | |
1da177e4 LT |
89 | */ |
90 | enum tcp_tw_status | |
8feaf0c0 ACM |
91 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
92 | const struct tcphdr *th) | |
1da177e4 LT |
93 | { |
94 | struct tcp_options_received tmp_opt; | |
4957faad | 95 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
a2a385d6 | 96 | bool paws_reject = false; |
1da177e4 | 97 | |
bb5b7c11 | 98 | tmp_opt.saw_tstamp = 0; |
8feaf0c0 | 99 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
eed29f17 | 100 | tcp_parse_options(twsk_net(tw), skb, &tmp_opt, 0, NULL); |
1da177e4 LT |
101 | |
102 | if (tmp_opt.saw_tstamp) { | |
eee2faab AK |
103 | if (tmp_opt.rcv_tsecr) |
104 | tmp_opt.rcv_tsecr -= tcptw->tw_ts_offset; | |
8feaf0c0 ACM |
105 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
106 | tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; | |
c887e6d2 | 107 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
1da177e4 LT |
108 | } |
109 | } | |
110 | ||
111 | if (tw->tw_substate == TCP_FIN_WAIT2) { | |
112 | /* Just repeat all the checks of tcp_rcv_state_process() */ | |
113 | ||
114 | /* Out of window, send ACK */ | |
115 | if (paws_reject || | |
116 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
8feaf0c0 ACM |
117 | tcptw->tw_rcv_nxt, |
118 | tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) | |
4fb17a60 NC |
119 | return tcp_timewait_check_oow_rate_limit( |
120 | tw, skb, LINUX_MIB_TCPACKSKIPPEDFINWAIT2); | |
1da177e4 LT |
121 | |
122 | if (th->rst) | |
123 | goto kill; | |
124 | ||
8feaf0c0 | 125 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) |
271c3b9b | 126 | return TCP_TW_RST; |
1da177e4 LT |
127 | |
128 | /* Dup ACK? */ | |
1ac530b3 WY |
129 | if (!th->ack || |
130 | !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || | |
1da177e4 | 131 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { |
8feaf0c0 | 132 | inet_twsk_put(tw); |
1da177e4 LT |
133 | return TCP_TW_SUCCESS; |
134 | } | |
135 | ||
136 | /* New data or FIN. If new data arrive after half-duplex close, | |
137 | * reset. | |
138 | */ | |
139 | if (!th->fin || | |
271c3b9b | 140 | TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) |
1da177e4 | 141 | return TCP_TW_RST; |
1da177e4 LT |
142 | |
143 | /* FIN arrived, enter true time-wait state. */ | |
8feaf0c0 ACM |
144 | tw->tw_substate = TCP_TIME_WAIT; |
145 | tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; | |
1da177e4 | 146 | if (tmp_opt.saw_tstamp) { |
9d729f72 | 147 | tcptw->tw_ts_recent_stamp = get_seconds(); |
8feaf0c0 | 148 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
1da177e4 LT |
149 | } |
150 | ||
d82bae12 | 151 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
1da177e4 LT |
152 | return TCP_TW_ACK; |
153 | } | |
154 | ||
155 | /* | |
156 | * Now real TIME-WAIT state. | |
157 | * | |
158 | * RFC 1122: | |
159 | * "When a connection is [...] on TIME-WAIT state [...] | |
160 | * [a TCP] MAY accept a new SYN from the remote TCP to | |
161 | * reopen the connection directly, if it: | |
e905a9ed | 162 | * |
1da177e4 LT |
163 | * (1) assigns its initial sequence number for the new |
164 | * connection to be larger than the largest sequence | |
165 | * number it used on the previous connection incarnation, | |
166 | * and | |
167 | * | |
e905a9ed | 168 | * (2) returns to TIME-WAIT state if the SYN turns out |
1da177e4 LT |
169 | * to be an old duplicate". |
170 | */ | |
171 | ||
172 | if (!paws_reject && | |
8feaf0c0 | 173 | (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && |
1da177e4 LT |
174 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { |
175 | /* In window segment, it may be only reset or bare ack. */ | |
176 | ||
177 | if (th->rst) { | |
caa20d9a | 178 | /* This is TIME_WAIT assassination, in two flavors. |
1da177e4 LT |
179 | * Oh well... nobody has a sufficient solution to this |
180 | * protocol bug yet. | |
181 | */ | |
625357aa | 182 | if (twsk_net(tw)->ipv4.sysctl_tcp_rfc1337 == 0) { |
1da177e4 | 183 | kill: |
dbe7faa4 | 184 | inet_twsk_deschedule_put(tw); |
1da177e4 LT |
185 | return TCP_TW_SUCCESS; |
186 | } | |
187 | } | |
ed2e9239 | 188 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
1da177e4 LT |
189 | |
190 | if (tmp_opt.saw_tstamp) { | |
8feaf0c0 | 191 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
9d729f72 | 192 | tcptw->tw_ts_recent_stamp = get_seconds(); |
1da177e4 LT |
193 | } |
194 | ||
8feaf0c0 | 195 | inet_twsk_put(tw); |
1da177e4 LT |
196 | return TCP_TW_SUCCESS; |
197 | } | |
198 | ||
199 | /* Out of window segment. | |
200 | ||
201 | All the segments are ACKed immediately. | |
202 | ||
203 | The only exception is new SYN. We accept it, if it is | |
204 | not old duplicate and we are not in danger to be killed | |
205 | by delayed old duplicates. RFC check is that it has | |
206 | newer sequence number works at rates <40Mbit/sec. | |
207 | However, if paws works, it is reliable AND even more, | |
208 | we even may relax silly seq space cutoff. | |
209 | ||
210 | RED-PEN: we violate main RFC requirement, if this SYN will appear | |
211 | old duplicate (i.e. we receive RST in reply to SYN-ACK), | |
212 | we must return socket to time-wait state. It is not good, | |
213 | but not fatal yet. | |
214 | */ | |
215 | ||
216 | if (th->syn && !th->rst && !th->ack && !paws_reject && | |
8feaf0c0 ACM |
217 | (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || |
218 | (tmp_opt.saw_tstamp && | |
219 | (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { | |
220 | u32 isn = tcptw->tw_snd_nxt + 65535 + 2; | |
1da177e4 LT |
221 | if (isn == 0) |
222 | isn++; | |
04317daf | 223 | TCP_SKB_CB(skb)->tcp_tw_isn = isn; |
1da177e4 LT |
224 | return TCP_TW_SYN; |
225 | } | |
226 | ||
227 | if (paws_reject) | |
02a1d6e7 | 228 | __NET_INC_STATS(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); |
1da177e4 | 229 | |
2de979bd | 230 | if (!th->rst) { |
1da177e4 LT |
231 | /* In this case we must reset the TIMEWAIT timer. |
232 | * | |
233 | * If it is ACKless SYN it may be both old duplicate | |
234 | * and new good SYN with random sequence number <rcv_nxt. | |
235 | * Do not reschedule in the last case. | |
236 | */ | |
237 | if (paws_reject || th->ack) | |
ed2e9239 | 238 | inet_twsk_reschedule(tw, TCP_TIMEWAIT_LEN); |
1da177e4 | 239 | |
4fb17a60 NC |
240 | return tcp_timewait_check_oow_rate_limit( |
241 | tw, skb, LINUX_MIB_TCPACKSKIPPEDTIMEWAIT); | |
1da177e4 | 242 | } |
8feaf0c0 | 243 | inet_twsk_put(tw); |
1da177e4 LT |
244 | return TCP_TW_SUCCESS; |
245 | } | |
4bc2f18b | 246 | EXPORT_SYMBOL(tcp_timewait_state_process); |
1da177e4 | 247 | |
e905a9ed | 248 | /* |
1da177e4 | 249 | * Move a socket to time-wait or dead fin-wait-2 state. |
e905a9ed | 250 | */ |
1da177e4 LT |
251 | void tcp_time_wait(struct sock *sk, int state, int timeo) |
252 | { | |
8292a17a | 253 | const struct inet_connection_sock *icsk = inet_csk(sk); |
8feaf0c0 | 254 | const struct tcp_sock *tp = tcp_sk(sk); |
789f558c | 255 | struct inet_timewait_sock *tw; |
1946e672 | 256 | struct inet_timewait_death_row *tcp_death_row = &sock_net(sk)->ipv4.tcp_death_row; |
1da177e4 | 257 | |
1946e672 | 258 | tw = inet_twsk_alloc(sk, tcp_death_row, state); |
1da177e4 | 259 | |
00db4124 | 260 | if (tw) { |
8feaf0c0 | 261 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
463c84b9 | 262 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); |
2397849b | 263 | struct inet_sock *inet = inet_sk(sk); |
8feaf0c0 | 264 | |
2397849b | 265 | tw->tw_transparent = inet->transparent; |
1da177e4 | 266 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; |
8feaf0c0 ACM |
267 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
268 | tcptw->tw_snd_nxt = tp->snd_nxt; | |
269 | tcptw->tw_rcv_wnd = tcp_receive_window(tp); | |
270 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; | |
271 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; | |
ceaa1fef | 272 | tcptw->tw_ts_offset = tp->tsoffset; |
4fb17a60 | 273 | tcptw->tw_last_oow_ack_time = 0; |
1da177e4 | 274 | |
dfd56b8b | 275 | #if IS_ENABLED(CONFIG_IPV6) |
1da177e4 LT |
276 | if (tw->tw_family == PF_INET6) { |
277 | struct ipv6_pinfo *np = inet6_sk(sk); | |
278 | ||
efe4208f ED |
279 | tw->tw_v6_daddr = sk->sk_v6_daddr; |
280 | tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr; | |
b903d324 | 281 | tw->tw_tclass = np->tclass; |
21858cd0 | 282 | tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK); |
9fe516ba | 283 | tw->tw_ipv6only = sk->sk_ipv6only; |
c676270b | 284 | } |
1da177e4 | 285 | #endif |
cfb6eeb4 YH |
286 | |
287 | #ifdef CONFIG_TCP_MD5SIG | |
288 | /* | |
289 | * The timewait bucket does not have the key DB from the | |
290 | * sock structure. We just make a quick copy of the | |
291 | * md5 key being used (if indeed we are using one) | |
292 | * so the timewait ack generating code has the key. | |
293 | */ | |
294 | do { | |
295 | struct tcp_md5sig_key *key; | |
a915da9b | 296 | tcptw->tw_md5_key = NULL; |
cfb6eeb4 | 297 | key = tp->af_specific->md5_lookup(sk, sk); |
00db4124 | 298 | if (key) { |
a915da9b | 299 | tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); |
49ca1943 | 300 | BUG_ON(tcptw->tw_md5_key && !tcp_alloc_md5sig_pool()); |
cfb6eeb4 | 301 | } |
2de979bd | 302 | } while (0); |
cfb6eeb4 YH |
303 | #endif |
304 | ||
1da177e4 LT |
305 | /* Get the TIME_WAIT timeout firing. */ |
306 | if (timeo < rto) | |
307 | timeo = rto; | |
308 | ||
d82bae12 SHY |
309 | tw->tw_timeout = TCP_TIMEWAIT_LEN; |
310 | if (state == TCP_TIME_WAIT) | |
311 | timeo = TCP_TIMEWAIT_LEN; | |
1da177e4 | 312 | |
cfac7f83 ED |
313 | /* tw_timer is pinned, so we need to make sure BH are disabled |
314 | * in following section, otherwise timer handler could run before | |
315 | * we complete the initialization. | |
316 | */ | |
317 | local_bh_disable(); | |
789f558c | 318 | inet_twsk_schedule(tw, timeo); |
ed2e9239 ED |
319 | /* Linkage updates. */ |
320 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); | |
8feaf0c0 | 321 | inet_twsk_put(tw); |
cfac7f83 | 322 | local_bh_enable(); |
1da177e4 LT |
323 | } else { |
324 | /* Sorry, if we're out of memory, just CLOSE this | |
325 | * socket up. We've got bigger problems than | |
326 | * non-graceful socket closings. | |
327 | */ | |
c10d9310 | 328 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); |
1da177e4 LT |
329 | } |
330 | ||
331 | tcp_update_metrics(sk); | |
332 | tcp_done(sk); | |
333 | } | |
334 | ||
cfb6eeb4 YH |
335 | void tcp_twsk_destructor(struct sock *sk) |
336 | { | |
b6242b9b | 337 | #ifdef CONFIG_TCP_MD5SIG |
a928630a | 338 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); |
2397849b | 339 | |
71cea17e | 340 | if (twsk->tw_md5_key) |
a915da9b | 341 | kfree_rcu(twsk->tw_md5_key, rcu); |
cfb6eeb4 YH |
342 | #endif |
343 | } | |
cfb6eeb4 YH |
344 | EXPORT_SYMBOL_GPL(tcp_twsk_destructor); |
345 | ||
b1964b5f ED |
346 | /* Warning : This function is called without sk_listener being locked. |
347 | * Be sure to read socket fields once, as their value could change under us. | |
348 | */ | |
843f4a55 | 349 | void tcp_openreq_init_rwin(struct request_sock *req, |
b1964b5f ED |
350 | const struct sock *sk_listener, |
351 | const struct dst_entry *dst) | |
843f4a55 YC |
352 | { |
353 | struct inet_request_sock *ireq = inet_rsk(req); | |
b1964b5f | 354 | const struct tcp_sock *tp = tcp_sk(sk_listener); |
b1964b5f | 355 | int full_space = tcp_full_space(sk_listener); |
b1964b5f ED |
356 | u32 window_clamp; |
357 | __u8 rcv_wscale; | |
13d3b1eb | 358 | u32 rcv_wnd; |
3541f9e8 | 359 | int mss; |
843f4a55 | 360 | |
3541f9e8 | 361 | mss = tcp_mss_clamp(tp, dst_metric_advmss(dst)); |
b1964b5f | 362 | window_clamp = READ_ONCE(tp->window_clamp); |
843f4a55 | 363 | /* Set this up on the first call only */ |
ed53d0ab | 364 | req->rsk_window_clamp = window_clamp ? : dst_metric(dst, RTAX_WINDOW); |
843f4a55 YC |
365 | |
366 | /* limit the window selection if the user enforce a smaller rx buffer */ | |
b1964b5f | 367 | if (sk_listener->sk_userlocks & SOCK_RCVBUF_LOCK && |
ed53d0ab ED |
368 | (req->rsk_window_clamp > full_space || req->rsk_window_clamp == 0)) |
369 | req->rsk_window_clamp = full_space; | |
843f4a55 | 370 | |
13d3b1eb LB |
371 | rcv_wnd = tcp_rwnd_init_bpf((struct sock *)req); |
372 | if (rcv_wnd == 0) | |
373 | rcv_wnd = dst_metric(dst, RTAX_INITRWND); | |
374 | else if (full_space < rcv_wnd * mss) | |
375 | full_space = rcv_wnd * mss; | |
376 | ||
843f4a55 | 377 | /* tcp_full_space because it is guaranteed to be the first packet */ |
ceef9ab6 | 378 | tcp_select_initial_window(sk_listener, full_space, |
843f4a55 | 379 | mss - (ireq->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0), |
ed53d0ab ED |
380 | &req->rsk_rcv_wnd, |
381 | &req->rsk_window_clamp, | |
843f4a55 YC |
382 | ireq->wscale_ok, |
383 | &rcv_wscale, | |
13d3b1eb | 384 | rcv_wnd); |
843f4a55 YC |
385 | ireq->rcv_wscale = rcv_wscale; |
386 | } | |
387 | EXPORT_SYMBOL(tcp_openreq_init_rwin); | |
388 | ||
735d3831 FW |
389 | static void tcp_ecn_openreq_child(struct tcp_sock *tp, |
390 | const struct request_sock *req) | |
bdf1ee5d IJ |
391 | { |
392 | tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; | |
393 | } | |
394 | ||
81164413 DB |
395 | void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst) |
396 | { | |
397 | struct inet_connection_sock *icsk = inet_csk(sk); | |
398 | u32 ca_key = dst_metric(dst, RTAX_CC_ALGO); | |
399 | bool ca_got_dst = false; | |
400 | ||
401 | if (ca_key != TCP_CA_UNSPEC) { | |
402 | const struct tcp_congestion_ops *ca; | |
403 | ||
404 | rcu_read_lock(); | |
405 | ca = tcp_ca_find_key(ca_key); | |
406 | if (likely(ca && try_module_get(ca->owner))) { | |
407 | icsk->icsk_ca_dst_locked = tcp_ca_dst_locked(dst); | |
408 | icsk->icsk_ca_ops = ca; | |
409 | ca_got_dst = true; | |
410 | } | |
411 | rcu_read_unlock(); | |
412 | } | |
413 | ||
9f950415 NC |
414 | /* If no valid choice made yet, assign current system default ca. */ |
415 | if (!ca_got_dst && | |
416 | (!icsk->icsk_ca_setsockopt || | |
417 | !try_module_get(icsk->icsk_ca_ops->owner))) | |
81164413 DB |
418 | tcp_assign_congestion_control(sk); |
419 | ||
420 | tcp_set_ca_state(sk, TCP_CA_Open); | |
421 | } | |
422 | EXPORT_SYMBOL_GPL(tcp_ca_openreq_child); | |
423 | ||
60e2a778 UB |
424 | static void smc_check_reset_syn_req(struct tcp_sock *oldtp, |
425 | struct request_sock *req, | |
426 | struct tcp_sock *newtp) | |
427 | { | |
428 | #if IS_ENABLED(CONFIG_SMC) | |
429 | struct inet_request_sock *ireq; | |
430 | ||
431 | if (static_branch_unlikely(&tcp_have_smc)) { | |
432 | ireq = inet_rsk(req); | |
433 | if (oldtp->syn_smc && !ireq->smc_ok) | |
434 | newtp->syn_smc = 0; | |
435 | } | |
436 | #endif | |
437 | } | |
438 | ||
1da177e4 LT |
439 | /* This is not only more efficient than what we used to do, it eliminates |
440 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | |
441 | * | |
442 | * Actually, we could lots of memory writes here. tp of listening | |
443 | * socket contains all necessary default parameters. | |
444 | */ | |
c28c6f04 ED |
445 | struct sock *tcp_create_openreq_child(const struct sock *sk, |
446 | struct request_sock *req, | |
447 | struct sk_buff *skb) | |
1da177e4 | 448 | { |
e56c57d0 | 449 | struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); |
1da177e4 | 450 | |
00db4124 | 451 | if (newsk) { |
9f1d2604 | 452 | const struct inet_request_sock *ireq = inet_rsk(req); |
2e6599cb | 453 | struct tcp_request_sock *treq = tcp_rsk(req); |
a9948a7e | 454 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
435cf559 | 455 | struct tcp_sock *newtp = tcp_sk(newsk); |
60e2a778 UB |
456 | struct tcp_sock *oldtp = tcp_sk(sk); |
457 | ||
458 | smc_check_reset_syn_req(oldtp, req, newtp); | |
1da177e4 | 459 | |
1da177e4 | 460 | /* Now setup tcp_sock */ |
31770e34 FW |
461 | newtp->pred_flags = 0; |
462 | ||
435cf559 WAS |
463 | newtp->rcv_wup = newtp->copied_seq = |
464 | newtp->rcv_nxt = treq->rcv_isn + 1; | |
a9d99ce2 | 465 | newtp->segs_in = 1; |
435cf559 WAS |
466 | |
467 | newtp->snd_sml = newtp->snd_una = | |
1a2c6181 | 468 | newtp->snd_nxt = newtp->snd_up = treq->snt_isn + 1; |
1da177e4 | 469 | |
46d3ceab | 470 | INIT_LIST_HEAD(&newtp->tsq_node); |
e2080072 | 471 | INIT_LIST_HEAD(&newtp->tsorted_sent_queue); |
1da177e4 | 472 | |
ee7537b6 | 473 | tcp_init_wl(newtp, treq->rcv_isn); |
1da177e4 | 474 | |
740b0f18 ED |
475 | newtp->srtt_us = 0; |
476 | newtp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT); | |
ac9517fc | 477 | minmax_reset(&newtp->rtt_min, tcp_jiffies32, ~0U); |
463c84b9 | 478 | newicsk->icsk_rto = TCP_TIMEOUT_INIT; |
70eabf0e | 479 | newicsk->icsk_ack.lrcvtime = tcp_jiffies32; |
1da177e4 LT |
480 | |
481 | newtp->packets_out = 0; | |
1da177e4 LT |
482 | newtp->retrans_out = 0; |
483 | newtp->sacked_out = 0; | |
0b6a05c1 | 484 | newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
9b717a8d | 485 | newtp->tlp_high_seq = 0; |
9a568de4 | 486 | newtp->lsndtime = tcp_jiffies32; |
d8ed6250 | 487 | newsk->sk_txhash = treq->txhash; |
f2b2c582 | 488 | newtp->last_oow_ack_time = 0; |
375fe02c | 489 | newtp->total_retrans = req->num_retrans; |
1da177e4 LT |
490 | |
491 | /* So many TCP implementations out there (incorrectly) count the | |
492 | * initial SYN frame in their delayed-ACK and congestion control | |
493 | * algorithms that we must have the following bandaid to talk | |
494 | * efficiently to them. -DaveM | |
495 | */ | |
9ad7c049 | 496 | newtp->snd_cwnd = TCP_INIT_CWND; |
1da177e4 LT |
497 | newtp->snd_cwnd_cnt = 0; |
498 | ||
d7722e85 SHY |
499 | /* There's a bubble in the pipe until at least the first ACK. */ |
500 | newtp->app_limited = ~0U; | |
501 | ||
1da177e4 | 502 | tcp_init_xmit_timers(newsk); |
1a2c6181 | 503 | newtp->write_seq = newtp->pushed_seq = treq->snt_isn + 1; |
1da177e4 LT |
504 | |
505 | newtp->rx_opt.saw_tstamp = 0; | |
506 | ||
507 | newtp->rx_opt.dsack = 0; | |
1da177e4 | 508 | newtp->rx_opt.num_sacks = 0; |
cabeccbd | 509 | |
1da177e4 | 510 | newtp->urg_data = 0; |
1da177e4 | 511 | |
1da177e4 | 512 | if (sock_flag(newsk, SOCK_KEEPOPEN)) |
463c84b9 ACM |
513 | inet_csk_reset_keepalive_timer(newsk, |
514 | keepalive_time_when(newtp)); | |
1da177e4 | 515 | |
2e6599cb | 516 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
713bafea | 517 | newtp->rx_opt.sack_ok = ireq->sack_ok; |
ed53d0ab ED |
518 | newtp->window_clamp = req->rsk_window_clamp; |
519 | newtp->rcv_ssthresh = req->rsk_rcv_wnd; | |
520 | newtp->rcv_wnd = req->rsk_rcv_wnd; | |
2e6599cb | 521 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
1da177e4 | 522 | if (newtp->rx_opt.wscale_ok) { |
2e6599cb ACM |
523 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
524 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; | |
1da177e4 LT |
525 | } else { |
526 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; | |
527 | newtp->window_clamp = min(newtp->window_clamp, 65535U); | |
528 | } | |
aa8223c7 ACM |
529 | newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << |
530 | newtp->rx_opt.snd_wscale); | |
1da177e4 LT |
531 | newtp->max_window = newtp->snd_wnd; |
532 | ||
533 | if (newtp->rx_opt.tstamp_ok) { | |
534 | newtp->rx_opt.ts_recent = req->ts_recent; | |
9d729f72 | 535 | newtp->rx_opt.ts_recent_stamp = get_seconds(); |
1da177e4 LT |
536 | newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; |
537 | } else { | |
538 | newtp->rx_opt.ts_recent_stamp = 0; | |
539 | newtp->tcp_header_len = sizeof(struct tcphdr); | |
540 | } | |
95a22cae | 541 | newtp->tsoffset = treq->ts_off; |
cfb6eeb4 YH |
542 | #ifdef CONFIG_TCP_MD5SIG |
543 | newtp->md5sig_info = NULL; /*XXX*/ | |
544 | if (newtp->af_specific->md5_lookup(sk, newsk)) | |
545 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | |
546 | #endif | |
bee7ca9e | 547 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) |
463c84b9 | 548 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
1da177e4 | 549 | newtp->rx_opt.mss_clamp = req->mss; |
735d3831 | 550 | tcp_ecn_openreq_child(newtp, req); |
8b485ce6 | 551 | newtp->fastopen_req = NULL; |
8336886f | 552 | newtp->fastopen_rsk = NULL; |
6f73601e | 553 | newtp->syn_data_acked = 0; |
9a568de4 | 554 | newtp->rack.mstamp = 0; |
659a8ad5 | 555 | newtp->rack.advanced = 0; |
1f255691 PJ |
556 | newtp->rack.reo_wnd_steps = 1; |
557 | newtp->rack.last_delivered = 0; | |
558 | newtp->rack.reo_wnd_persist = 0; | |
559 | newtp->rack.dsack_seen = 0; | |
1da177e4 | 560 | |
90bbcc60 | 561 | __TCP_INC_STATS(sock_net(sk), TCP_MIB_PASSIVEOPENS); |
1da177e4 LT |
562 | } |
563 | return newsk; | |
564 | } | |
4bc2f18b | 565 | EXPORT_SYMBOL(tcp_create_openreq_child); |
1da177e4 | 566 | |
e905a9ed | 567 | /* |
8336886f JC |
568 | * Process an incoming packet for SYN_RECV sockets represented as a |
569 | * request_sock. Normally sk is the listener socket but for TFO it | |
570 | * points to the child socket. | |
571 | * | |
572 | * XXX (TFO) - The current impl contains a special check for ack | |
573 | * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? | |
4308fc58 AC |
574 | * |
575 | * We don't need to initialize tmp_opt.sack_ok as we don't use the results | |
1da177e4 LT |
576 | */ |
577 | ||
5a5f3a8d | 578 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
60236fdd | 579 | struct request_sock *req, |
8336886f | 580 | bool fastopen) |
1da177e4 | 581 | { |
4957faad | 582 | struct tcp_options_received tmp_opt; |
4957faad | 583 | struct sock *child; |
aa8223c7 | 584 | const struct tcphdr *th = tcp_hdr(skb); |
714e85be | 585 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
a2a385d6 | 586 | bool paws_reject = false; |
5e0724d0 | 587 | bool own_req; |
1da177e4 | 588 | |
bb5b7c11 DM |
589 | tmp_opt.saw_tstamp = 0; |
590 | if (th->doff > (sizeof(struct tcphdr)>>2)) { | |
eed29f17 | 591 | tcp_parse_options(sock_net(sk), skb, &tmp_opt, 0, NULL); |
1da177e4 LT |
592 | |
593 | if (tmp_opt.saw_tstamp) { | |
594 | tmp_opt.ts_recent = req->ts_recent; | |
95a22cae FW |
595 | if (tmp_opt.rcv_tsecr) |
596 | tmp_opt.rcv_tsecr -= tcp_rsk(req)->ts_off; | |
1da177e4 LT |
597 | /* We do not store true stamp, but it is not required, |
598 | * it can be estimated (approximately) | |
599 | * from another data. | |
600 | */ | |
e6c022a4 | 601 | tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->num_timeout); |
c887e6d2 | 602 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
1da177e4 LT |
603 | } |
604 | } | |
605 | ||
606 | /* Check for pure retransmitted SYN. */ | |
2e6599cb | 607 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
1da177e4 LT |
608 | flg == TCP_FLAG_SYN && |
609 | !paws_reject) { | |
610 | /* | |
611 | * RFC793 draws (Incorrectly! It was fixed in RFC1122) | |
612 | * this case on figure 6 and figure 8, but formal | |
613 | * protocol description says NOTHING. | |
614 | * To be more exact, it says that we should send ACK, | |
615 | * because this segment (at least, if it has no data) | |
616 | * is out of window. | |
617 | * | |
618 | * CONCLUSION: RFC793 (even with RFC1122) DOES NOT | |
619 | * describe SYN-RECV state. All the description | |
620 | * is wrong, we cannot believe to it and should | |
621 | * rely only on common sense and implementation | |
622 | * experience. | |
623 | * | |
624 | * Enforce "SYN-ACK" according to figure 8, figure 6 | |
625 | * of RFC793, fixed by RFC1122. | |
8336886f JC |
626 | * |
627 | * Note that even if there is new data in the SYN packet | |
628 | * they will be thrown away too. | |
cd75eff6 YC |
629 | * |
630 | * Reset timer after retransmitting SYNACK, similar to | |
631 | * the idea of fast retransmit in recovery. | |
1da177e4 | 632 | */ |
a9b2c06d NC |
633 | if (!tcp_oow_rate_limited(sock_net(sk), skb, |
634 | LINUX_MIB_TCPACKSKIPPEDSYNRECV, | |
635 | &tcp_rsk(req)->last_oow_ack_time) && | |
636 | ||
dd929c1b ED |
637 | !inet_rtx_syn_ack(sk, req)) { |
638 | unsigned long expires = jiffies; | |
639 | ||
640 | expires += min(TCP_TIMEOUT_INIT << req->num_timeout, | |
641 | TCP_RTO_MAX); | |
642 | if (!fastopen) | |
643 | mod_timer_pending(&req->rsk_timer, expires); | |
644 | else | |
645 | req->rsk_timer.expires = expires; | |
646 | } | |
1da177e4 LT |
647 | return NULL; |
648 | } | |
649 | ||
650 | /* Further reproduces section "SEGMENT ARRIVES" | |
651 | for state SYN-RECEIVED of RFC793. | |
652 | It is broken, however, it does not work only | |
653 | when SYNs are crossed. | |
654 | ||
655 | You would think that SYN crossing is impossible here, since | |
656 | we should have a SYN_SENT socket (from connect()) on our end, | |
657 | but this is not true if the crossed SYNs were sent to both | |
658 | ends by a malicious third party. We must defend against this, | |
659 | and to do that we first verify the ACK (as per RFC793, page | |
660 | 36) and reset if it is invalid. Is this a true full defense? | |
661 | To convince ourselves, let us consider a way in which the ACK | |
662 | test can still pass in this 'malicious crossed SYNs' case. | |
663 | Malicious sender sends identical SYNs (and thus identical sequence | |
664 | numbers) to both A and B: | |
665 | ||
666 | A: gets SYN, seq=7 | |
667 | B: gets SYN, seq=7 | |
668 | ||
669 | By our good fortune, both A and B select the same initial | |
670 | send sequence number of seven :-) | |
671 | ||
672 | A: sends SYN|ACK, seq=7, ack_seq=8 | |
673 | B: sends SYN|ACK, seq=7, ack_seq=8 | |
674 | ||
675 | So we are now A eating this SYN|ACK, ACK test passes. So | |
676 | does sequence test, SYN is truncated, and thus we consider | |
677 | it a bare ACK. | |
678 | ||
ec0a1966 DM |
679 | If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this |
680 | bare ACK. Otherwise, we create an established connection. Both | |
681 | ends (listening sockets) accept the new incoming connection and try | |
682 | to talk to each other. 8-) | |
1da177e4 LT |
683 | |
684 | Note: This case is both harmless, and rare. Possibility is about the | |
685 | same as us discovering intelligent life on another plant tomorrow. | |
686 | ||
687 | But generally, we should (RFC lies!) to accept ACK | |
688 | from SYNACK both here and in tcp_rcv_state_process(). | |
689 | tcp_rcv_state_process() does not, hence, we do not too. | |
690 | ||
691 | Note that the case is absolutely generic: | |
692 | we cannot optimize anything here without | |
693 | violating protocol. All the checks must be made | |
694 | before attempt to create socket. | |
695 | */ | |
696 | ||
697 | /* RFC793 page 36: "If the connection is in any non-synchronized state ... | |
698 | * and the incoming segment acknowledges something not yet | |
caa20d9a | 699 | * sent (the segment carries an unacceptable ACK) ... |
1da177e4 LT |
700 | * a reset is sent." |
701 | * | |
8336886f JC |
702 | * Invalid ACK: reset will be sent by listening socket. |
703 | * Note that the ACK validity check for a Fast Open socket is done | |
704 | * elsewhere and is checked directly against the child socket rather | |
705 | * than req because user data may have been sent out. | |
1da177e4 | 706 | */ |
8336886f | 707 | if ((flg & TCP_FLAG_ACK) && !fastopen && |
435cf559 | 708 | (TCP_SKB_CB(skb)->ack_seq != |
1a2c6181 | 709 | tcp_rsk(req)->snt_isn + 1)) |
1da177e4 LT |
710 | return sk; |
711 | ||
712 | /* Also, it would be not so bad idea to check rcv_tsecr, which | |
713 | * is essentially ACK extension and too early or too late values | |
714 | * should cause reset in unsynchronized states. | |
715 | */ | |
716 | ||
717 | /* RFC793: "first check sequence number". */ | |
718 | ||
719 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
ed53d0ab | 720 | tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rsk_rcv_wnd)) { |
1da177e4 | 721 | /* Out of window: send ACK and drop. */ |
4ce7e93c ED |
722 | if (!(flg & TCP_FLAG_RST) && |
723 | !tcp_oow_rate_limited(sock_net(sk), skb, | |
724 | LINUX_MIB_TCPACKSKIPPEDSYNRECV, | |
725 | &tcp_rsk(req)->last_oow_ack_time)) | |
6edafaaf | 726 | req->rsk_ops->send_ack(sk, skb, req); |
1da177e4 | 727 | if (paws_reject) |
02a1d6e7 | 728 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
1da177e4 LT |
729 | return NULL; |
730 | } | |
731 | ||
732 | /* In sequence, PAWS is OK. */ | |
733 | ||
8336886f | 734 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) |
2aaab9a0 | 735 | req->ts_recent = tmp_opt.rcv_tsval; |
1da177e4 | 736 | |
2aaab9a0 AL |
737 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
738 | /* Truncate SYN, it is out of window starting | |
739 | at tcp_rsk(req)->rcv_isn + 1. */ | |
740 | flg &= ~TCP_FLAG_SYN; | |
741 | } | |
1da177e4 | 742 | |
2aaab9a0 AL |
743 | /* RFC793: "second check the RST bit" and |
744 | * "fourth, check the SYN bit" | |
745 | */ | |
746 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { | |
90bbcc60 | 747 | __TCP_INC_STATS(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
2aaab9a0 AL |
748 | goto embryonic_reset; |
749 | } | |
1da177e4 | 750 | |
2aaab9a0 AL |
751 | /* ACK sequence verified above, just make sure ACK is |
752 | * set. If ACK not set, just silently drop the packet. | |
8336886f JC |
753 | * |
754 | * XXX (TFO) - if we ever allow "data after SYN", the | |
755 | * following check needs to be removed. | |
2aaab9a0 AL |
756 | */ |
757 | if (!(flg & TCP_FLAG_ACK)) | |
758 | return NULL; | |
ec0a1966 | 759 | |
8336886f JC |
760 | /* For Fast Open no more processing is needed (sk is the |
761 | * child socket). | |
762 | */ | |
763 | if (fastopen) | |
764 | return sk; | |
765 | ||
d1b99ba4 | 766 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ |
e6c022a4 | 767 | if (req->num_timeout < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && |
2aaab9a0 AL |
768 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
769 | inet_rsk(req)->acked = 1; | |
02a1d6e7 | 770 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); |
2aaab9a0 AL |
771 | return NULL; |
772 | } | |
773 | ||
774 | /* OK, ACK is valid, create big socket and | |
775 | * feed this segment to it. It will repeat all | |
776 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO | |
777 | * ESTABLISHED STATE. If it will be dropped after | |
778 | * socket is created, wait for troubles. | |
779 | */ | |
5e0724d0 ED |
780 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL, |
781 | req, &own_req); | |
51456b29 | 782 | if (!child) |
2aaab9a0 | 783 | goto listen_overflow; |
1da177e4 | 784 | |
6bcfd7f8 | 785 | sock_rps_save_rxhash(child, skb); |
0f1c28ae | 786 | tcp_synack_rtt_meas(child, req); |
5e0724d0 | 787 | return inet_csk_complete_hashdance(sk, child, req, own_req); |
1da177e4 | 788 | |
2aaab9a0 | 789 | listen_overflow: |
65c9410c | 790 | if (!sock_net(sk)->ipv4.sysctl_tcp_abort_on_overflow) { |
2aaab9a0 AL |
791 | inet_rsk(req)->acked = 1; |
792 | return NULL; | |
793 | } | |
1da177e4 | 794 | |
2aaab9a0 | 795 | embryonic_reset: |
8336886f JC |
796 | if (!(flg & TCP_FLAG_RST)) { |
797 | /* Received a bad SYN pkt - for TFO We try not to reset | |
798 | * the local connection unless it's really necessary to | |
799 | * avoid becoming vulnerable to outside attack aiming at | |
800 | * resetting legit local connections. | |
801 | */ | |
2aaab9a0 | 802 | req->rsk_ops->send_reset(sk, skb); |
8336886f JC |
803 | } else if (fastopen) { /* received a valid RST pkt */ |
804 | reqsk_fastopen_remove(sk, req, true); | |
805 | tcp_reset(sk); | |
806 | } | |
807 | if (!fastopen) { | |
52452c54 | 808 | inet_csk_reqsk_queue_drop(sk, req); |
02a1d6e7 | 809 | __NET_INC_STATS(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); |
8336886f | 810 | } |
2aaab9a0 | 811 | return NULL; |
1da177e4 | 812 | } |
4bc2f18b | 813 | EXPORT_SYMBOL(tcp_check_req); |
1da177e4 LT |
814 | |
815 | /* | |
816 | * Queue segment on the new socket if the new socket is active, | |
817 | * otherwise we just shortcircuit this and continue with | |
818 | * the new socket. | |
8336886f JC |
819 | * |
820 | * For the vast majority of cases child->sk_state will be TCP_SYN_RECV | |
821 | * when entering. But other states are possible due to a race condition | |
822 | * where after __inet_lookup_established() fails but before the listener | |
823 | * locked is obtained, other packets cause the same connection to | |
824 | * be created. | |
1da177e4 LT |
825 | */ |
826 | ||
827 | int tcp_child_process(struct sock *parent, struct sock *child, | |
828 | struct sk_buff *skb) | |
829 | { | |
830 | int ret = 0; | |
831 | int state = child->sk_state; | |
832 | ||
e5907459 AD |
833 | /* record NAPI ID of child */ |
834 | sk_mark_napi_id(child, skb); | |
835 | ||
a44d6eac | 836 | tcp_segs_in(tcp_sk(child), skb); |
1da177e4 | 837 | if (!sock_owned_by_user(child)) { |
72ab4a86 | 838 | ret = tcp_rcv_state_process(child, skb); |
1da177e4 LT |
839 | /* Wakeup parent, send SIGIO */ |
840 | if (state == TCP_SYN_RECV && child->sk_state != state) | |
676d2369 | 841 | parent->sk_data_ready(parent); |
1da177e4 LT |
842 | } else { |
843 | /* Alas, it is possible again, because we do lookup | |
844 | * in main socket hash table and lock on listening | |
845 | * socket does not protect us more. | |
846 | */ | |
a3a858ff | 847 | __sk_add_backlog(child, skb); |
1da177e4 LT |
848 | } |
849 | ||
850 | bh_unlock_sock(child); | |
851 | sock_put(child); | |
852 | return ret; | |
853 | } | |
1da177e4 | 854 | EXPORT_SYMBOL(tcp_child_process); |