]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <[email protected]> |
10 | * Mark Evans, <[email protected]> | |
11 | * Corey Minyard <[email protected]> | |
12 | * Florian La Roche, <[email protected]> | |
13 | * Charles Hedrick, <[email protected]> | |
14 | * Linus Torvalds, <[email protected]> | |
15 | * Alan Cox, <[email protected]> | |
16 | * Matthew Dillon, <[email protected]> | |
17 | * Arnt Gulbrandsen, <[email protected]> | |
18 | * Jorge Cwik, <[email protected]> | |
19 | */ | |
20 | ||
1da177e4 LT |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | |
5a0e3ad6 | 23 | #include <linux/slab.h> |
1da177e4 LT |
24 | #include <linux/sysctl.h> |
25 | #include <linux/workqueue.h> | |
26 | #include <net/tcp.h> | |
27 | #include <net/inet_common.h> | |
28 | #include <net/xfrm.h> | |
29 | ||
e994b7c9 | 30 | int sysctl_tcp_syncookies __read_mostly = 1; |
c6aefafb GG |
31 | EXPORT_SYMBOL(sysctl_tcp_syncookies); |
32 | ||
ab32ea5d | 33 | int sysctl_tcp_abort_on_overflow __read_mostly; |
1da177e4 | 34 | |
295ff7ed ACM |
35 | struct inet_timewait_death_row tcp_death_row = { |
36 | .sysctl_max_tw_buckets = NR_FILE * 2, | |
37 | .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, | |
e4d91918 | 38 | .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), |
295ff7ed ACM |
39 | .hashinfo = &tcp_hashinfo, |
40 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | |
41 | (unsigned long)&tcp_death_row), | |
42 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, | |
65f27f38 | 43 | inet_twdr_twkill_work), |
295ff7ed ACM |
44 | /* Short-time timewait calendar */ |
45 | ||
46 | .twcal_hand = -1, | |
47 | .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0, | |
48 | (unsigned long)&tcp_death_row), | |
49 | }; | |
295ff7ed ACM |
50 | EXPORT_SYMBOL_GPL(tcp_death_row); |
51 | ||
a2a385d6 | 52 | static bool tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) |
1da177e4 LT |
53 | { |
54 | if (seq == s_win) | |
a2a385d6 | 55 | return true; |
1da177e4 | 56 | if (after(end_seq, s_win) && before(seq, e_win)) |
a2a385d6 | 57 | return true; |
a02cec21 | 58 | return seq == e_win && seq == end_seq; |
1da177e4 LT |
59 | } |
60 | ||
e905a9ed | 61 | /* |
1da177e4 LT |
62 | * * Main purpose of TIME-WAIT state is to close connection gracefully, |
63 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN | |
64 | * (and, probably, tail of data) and one or more our ACKs are lost. | |
65 | * * What is TIME-WAIT timeout? It is associated with maximal packet | |
66 | * lifetime in the internet, which results in wrong conclusion, that | |
67 | * it is set to catch "old duplicate segments" wandering out of their path. | |
68 | * It is not quite correct. This timeout is calculated so that it exceeds | |
69 | * maximal retransmission timeout enough to allow to lose one (or more) | |
70 | * segments sent by peer and our ACKs. This time may be calculated from RTO. | |
71 | * * When TIME-WAIT socket receives RST, it means that another end | |
72 | * finally closed and we are allowed to kill TIME-WAIT too. | |
73 | * * Second purpose of TIME-WAIT is catching old duplicate segments. | |
74 | * Well, certainly it is pure paranoia, but if we load TIME-WAIT | |
75 | * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. | |
76 | * * If we invented some more clever way to catch duplicates | |
77 | * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. | |
78 | * | |
79 | * The algorithm below is based on FORMAL INTERPRETATION of RFCs. | |
80 | * When you compare it to RFCs, please, read section SEGMENT ARRIVES | |
81 | * from the very beginning. | |
82 | * | |
83 | * NOTE. With recycling (and later with fin-wait-2) TW bucket | |
84 | * is _not_ stateless. It means, that strictly speaking we must | |
85 | * spinlock it. I do not want! Well, probability of misbehaviour | |
86 | * is ridiculously low and, seems, we could use some mb() tricks | |
87 | * to avoid misread sequence numbers, states etc. --ANK | |
4308fc58 AC |
88 | * |
89 | * We don't need to initialize tmp_out.sack_ok as we don't use the results | |
1da177e4 LT |
90 | */ |
91 | enum tcp_tw_status | |
8feaf0c0 ACM |
92 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
93 | const struct tcphdr *th) | |
1da177e4 LT |
94 | { |
95 | struct tcp_options_received tmp_opt; | |
cf533ea5 | 96 | const u8 *hash_location; |
4957faad | 97 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
a2a385d6 | 98 | bool paws_reject = false; |
1da177e4 | 99 | |
bb5b7c11 | 100 | tmp_opt.saw_tstamp = 0; |
8feaf0c0 | 101 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
2100c8d2 | 102 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); |
1da177e4 LT |
103 | |
104 | if (tmp_opt.saw_tstamp) { | |
8feaf0c0 ACM |
105 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
106 | tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; | |
c887e6d2 | 107 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
1da177e4 LT |
108 | } |
109 | } | |
110 | ||
111 | if (tw->tw_substate == TCP_FIN_WAIT2) { | |
112 | /* Just repeat all the checks of tcp_rcv_state_process() */ | |
113 | ||
114 | /* Out of window, send ACK */ | |
115 | if (paws_reject || | |
116 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
8feaf0c0 ACM |
117 | tcptw->tw_rcv_nxt, |
118 | tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) | |
1da177e4 LT |
119 | return TCP_TW_ACK; |
120 | ||
121 | if (th->rst) | |
122 | goto kill; | |
123 | ||
8feaf0c0 | 124 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) |
1da177e4 LT |
125 | goto kill_with_rst; |
126 | ||
127 | /* Dup ACK? */ | |
1ac530b3 WY |
128 | if (!th->ack || |
129 | !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || | |
1da177e4 | 130 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { |
8feaf0c0 | 131 | inet_twsk_put(tw); |
1da177e4 LT |
132 | return TCP_TW_SUCCESS; |
133 | } | |
134 | ||
135 | /* New data or FIN. If new data arrive after half-duplex close, | |
136 | * reset. | |
137 | */ | |
138 | if (!th->fin || | |
8feaf0c0 | 139 | TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) { |
1da177e4 | 140 | kill_with_rst: |
295ff7ed | 141 | inet_twsk_deschedule(tw, &tcp_death_row); |
8feaf0c0 | 142 | inet_twsk_put(tw); |
1da177e4 LT |
143 | return TCP_TW_RST; |
144 | } | |
145 | ||
146 | /* FIN arrived, enter true time-wait state. */ | |
8feaf0c0 ACM |
147 | tw->tw_substate = TCP_TIME_WAIT; |
148 | tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; | |
1da177e4 | 149 | if (tmp_opt.saw_tstamp) { |
9d729f72 | 150 | tcptw->tw_ts_recent_stamp = get_seconds(); |
8feaf0c0 | 151 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
1da177e4 LT |
152 | } |
153 | ||
ccb7c410 DM |
154 | if (tcp_death_row.sysctl_tw_recycle && |
155 | tcptw->tw_ts_recent_stamp && | |
156 | tcp_tw_remember_stamp(tw)) | |
696ab2d3 ACM |
157 | inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout, |
158 | TCP_TIMEWAIT_LEN); | |
1da177e4 | 159 | else |
696ab2d3 ACM |
160 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, |
161 | TCP_TIMEWAIT_LEN); | |
1da177e4 LT |
162 | return TCP_TW_ACK; |
163 | } | |
164 | ||
165 | /* | |
166 | * Now real TIME-WAIT state. | |
167 | * | |
168 | * RFC 1122: | |
169 | * "When a connection is [...] on TIME-WAIT state [...] | |
170 | * [a TCP] MAY accept a new SYN from the remote TCP to | |
171 | * reopen the connection directly, if it: | |
e905a9ed | 172 | * |
1da177e4 LT |
173 | * (1) assigns its initial sequence number for the new |
174 | * connection to be larger than the largest sequence | |
175 | * number it used on the previous connection incarnation, | |
176 | * and | |
177 | * | |
e905a9ed | 178 | * (2) returns to TIME-WAIT state if the SYN turns out |
1da177e4 LT |
179 | * to be an old duplicate". |
180 | */ | |
181 | ||
182 | if (!paws_reject && | |
8feaf0c0 | 183 | (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && |
1da177e4 LT |
184 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { |
185 | /* In window segment, it may be only reset or bare ack. */ | |
186 | ||
187 | if (th->rst) { | |
caa20d9a | 188 | /* This is TIME_WAIT assassination, in two flavors. |
1da177e4 LT |
189 | * Oh well... nobody has a sufficient solution to this |
190 | * protocol bug yet. | |
191 | */ | |
192 | if (sysctl_tcp_rfc1337 == 0) { | |
193 | kill: | |
295ff7ed | 194 | inet_twsk_deschedule(tw, &tcp_death_row); |
8feaf0c0 | 195 | inet_twsk_put(tw); |
1da177e4 LT |
196 | return TCP_TW_SUCCESS; |
197 | } | |
198 | } | |
696ab2d3 ACM |
199 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, |
200 | TCP_TIMEWAIT_LEN); | |
1da177e4 LT |
201 | |
202 | if (tmp_opt.saw_tstamp) { | |
8feaf0c0 | 203 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
9d729f72 | 204 | tcptw->tw_ts_recent_stamp = get_seconds(); |
1da177e4 LT |
205 | } |
206 | ||
8feaf0c0 | 207 | inet_twsk_put(tw); |
1da177e4 LT |
208 | return TCP_TW_SUCCESS; |
209 | } | |
210 | ||
211 | /* Out of window segment. | |
212 | ||
213 | All the segments are ACKed immediately. | |
214 | ||
215 | The only exception is new SYN. We accept it, if it is | |
216 | not old duplicate and we are not in danger to be killed | |
217 | by delayed old duplicates. RFC check is that it has | |
218 | newer sequence number works at rates <40Mbit/sec. | |
219 | However, if paws works, it is reliable AND even more, | |
220 | we even may relax silly seq space cutoff. | |
221 | ||
222 | RED-PEN: we violate main RFC requirement, if this SYN will appear | |
223 | old duplicate (i.e. we receive RST in reply to SYN-ACK), | |
224 | we must return socket to time-wait state. It is not good, | |
225 | but not fatal yet. | |
226 | */ | |
227 | ||
228 | if (th->syn && !th->rst && !th->ack && !paws_reject && | |
8feaf0c0 ACM |
229 | (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || |
230 | (tmp_opt.saw_tstamp && | |
231 | (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { | |
232 | u32 isn = tcptw->tw_snd_nxt + 65535 + 2; | |
1da177e4 LT |
233 | if (isn == 0) |
234 | isn++; | |
235 | TCP_SKB_CB(skb)->when = isn; | |
236 | return TCP_TW_SYN; | |
237 | } | |
238 | ||
239 | if (paws_reject) | |
de0744af | 240 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); |
1da177e4 | 241 | |
2de979bd | 242 | if (!th->rst) { |
1da177e4 LT |
243 | /* In this case we must reset the TIMEWAIT timer. |
244 | * | |
245 | * If it is ACKless SYN it may be both old duplicate | |
246 | * and new good SYN with random sequence number <rcv_nxt. | |
247 | * Do not reschedule in the last case. | |
248 | */ | |
249 | if (paws_reject || th->ack) | |
696ab2d3 ACM |
250 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, |
251 | TCP_TIMEWAIT_LEN); | |
1da177e4 LT |
252 | |
253 | /* Send ACK. Note, we do not put the bucket, | |
254 | * it will be released by caller. | |
255 | */ | |
256 | return TCP_TW_ACK; | |
257 | } | |
8feaf0c0 | 258 | inet_twsk_put(tw); |
1da177e4 LT |
259 | return TCP_TW_SUCCESS; |
260 | } | |
4bc2f18b | 261 | EXPORT_SYMBOL(tcp_timewait_state_process); |
1da177e4 | 262 | |
e905a9ed | 263 | /* |
1da177e4 | 264 | * Move a socket to time-wait or dead fin-wait-2 state. |
e905a9ed | 265 | */ |
1da177e4 LT |
266 | void tcp_time_wait(struct sock *sk, int state, int timeo) |
267 | { | |
8feaf0c0 | 268 | struct inet_timewait_sock *tw = NULL; |
8292a17a | 269 | const struct inet_connection_sock *icsk = inet_csk(sk); |
8feaf0c0 | 270 | const struct tcp_sock *tp = tcp_sk(sk); |
a2a385d6 | 271 | bool recycle_ok = false; |
1da177e4 | 272 | |
b6242b9b | 273 | if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) |
3f419d2d | 274 | recycle_ok = tcp_remember_stamp(sk); |
1da177e4 | 275 | |
295ff7ed | 276 | if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) |
c676270b | 277 | tw = inet_twsk_alloc(sk, state); |
1da177e4 | 278 | |
8feaf0c0 ACM |
279 | if (tw != NULL) { |
280 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | |
463c84b9 | 281 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); |
2397849b | 282 | struct inet_sock *inet = inet_sk(sk); |
8feaf0c0 | 283 | |
2397849b | 284 | tw->tw_transparent = inet->transparent; |
1da177e4 | 285 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; |
8feaf0c0 ACM |
286 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
287 | tcptw->tw_snd_nxt = tp->snd_nxt; | |
288 | tcptw->tw_rcv_wnd = tcp_receive_window(tp); | |
289 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; | |
290 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; | |
1da177e4 | 291 | |
dfd56b8b | 292 | #if IS_ENABLED(CONFIG_IPV6) |
1da177e4 LT |
293 | if (tw->tw_family == PF_INET6) { |
294 | struct ipv6_pinfo *np = inet6_sk(sk); | |
0fa1a53e | 295 | struct inet6_timewait_sock *tw6; |
1da177e4 | 296 | |
0fa1a53e ACM |
297 | tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); |
298 | tw6 = inet6_twsk((struct sock *)tw); | |
4e3fd7a0 AD |
299 | tw6->tw_v6_daddr = np->daddr; |
300 | tw6->tw_v6_rcv_saddr = np->rcv_saddr; | |
b903d324 | 301 | tw->tw_tclass = np->tclass; |
8feaf0c0 | 302 | tw->tw_ipv6only = np->ipv6only; |
c676270b | 303 | } |
1da177e4 | 304 | #endif |
cfb6eeb4 YH |
305 | |
306 | #ifdef CONFIG_TCP_MD5SIG | |
307 | /* | |
308 | * The timewait bucket does not have the key DB from the | |
309 | * sock structure. We just make a quick copy of the | |
310 | * md5 key being used (if indeed we are using one) | |
311 | * so the timewait ack generating code has the key. | |
312 | */ | |
313 | do { | |
314 | struct tcp_md5sig_key *key; | |
a915da9b | 315 | tcptw->tw_md5_key = NULL; |
cfb6eeb4 YH |
316 | key = tp->af_specific->md5_lookup(sk, sk); |
317 | if (key != NULL) { | |
a915da9b ED |
318 | tcptw->tw_md5_key = kmemdup(key, sizeof(*key), GFP_ATOMIC); |
319 | if (tcptw->tw_md5_key && tcp_alloc_md5sig_pool(sk) == NULL) | |
cfb6eeb4 YH |
320 | BUG(); |
321 | } | |
2de979bd | 322 | } while (0); |
cfb6eeb4 YH |
323 | #endif |
324 | ||
1da177e4 | 325 | /* Linkage updates. */ |
e48c414e | 326 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); |
1da177e4 LT |
327 | |
328 | /* Get the TIME_WAIT timeout firing. */ | |
329 | if (timeo < rto) | |
330 | timeo = rto; | |
331 | ||
332 | if (recycle_ok) { | |
333 | tw->tw_timeout = rto; | |
334 | } else { | |
335 | tw->tw_timeout = TCP_TIMEWAIT_LEN; | |
336 | if (state == TCP_TIME_WAIT) | |
337 | timeo = TCP_TIMEWAIT_LEN; | |
338 | } | |
339 | ||
696ab2d3 ACM |
340 | inet_twsk_schedule(tw, &tcp_death_row, timeo, |
341 | TCP_TIMEWAIT_LEN); | |
8feaf0c0 | 342 | inet_twsk_put(tw); |
1da177e4 LT |
343 | } else { |
344 | /* Sorry, if we're out of memory, just CLOSE this | |
345 | * socket up. We've got bigger problems than | |
346 | * non-graceful socket closings. | |
347 | */ | |
67631510 | 348 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPTIMEWAITOVERFLOW); |
1da177e4 LT |
349 | } |
350 | ||
351 | tcp_update_metrics(sk); | |
352 | tcp_done(sk); | |
353 | } | |
354 | ||
cfb6eeb4 YH |
355 | void tcp_twsk_destructor(struct sock *sk) |
356 | { | |
b6242b9b | 357 | #ifdef CONFIG_TCP_MD5SIG |
a928630a | 358 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); |
2397849b | 359 | |
a915da9b | 360 | if (twsk->tw_md5_key) { |
657e9649 | 361 | tcp_free_md5sig_pool(); |
a915da9b ED |
362 | kfree_rcu(twsk->tw_md5_key, rcu); |
363 | } | |
cfb6eeb4 YH |
364 | #endif |
365 | } | |
cfb6eeb4 YH |
366 | EXPORT_SYMBOL_GPL(tcp_twsk_destructor); |
367 | ||
bdf1ee5d IJ |
368 | static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, |
369 | struct request_sock *req) | |
370 | { | |
371 | tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; | |
372 | } | |
373 | ||
1da177e4 LT |
374 | /* This is not only more efficient than what we used to do, it eliminates |
375 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | |
376 | * | |
377 | * Actually, we could lots of memory writes here. tp of listening | |
378 | * socket contains all necessary default parameters. | |
379 | */ | |
60236fdd | 380 | struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) |
1da177e4 | 381 | { |
e56c57d0 | 382 | struct sock *newsk = inet_csk_clone_lock(sk, req, GFP_ATOMIC); |
1da177e4 | 383 | |
87d11ceb | 384 | if (newsk != NULL) { |
9f1d2604 | 385 | const struct inet_request_sock *ireq = inet_rsk(req); |
2e6599cb | 386 | struct tcp_request_sock *treq = tcp_rsk(req); |
a9948a7e | 387 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
435cf559 WAS |
388 | struct tcp_sock *newtp = tcp_sk(newsk); |
389 | struct tcp_sock *oldtp = tcp_sk(sk); | |
390 | struct tcp_cookie_values *oldcvp = oldtp->cookie_values; | |
391 | ||
392 | /* TCP Cookie Transactions require space for the cookie pair, | |
393 | * as it differs for each connection. There is no need to | |
394 | * copy any s_data_payload stored at the original socket. | |
395 | * Failure will prevent resuming the connection. | |
396 | * | |
397 | * Presumed copied, in order of appearance: | |
398 | * cookie_in_always, cookie_out_never | |
399 | */ | |
400 | if (oldcvp != NULL) { | |
401 | struct tcp_cookie_values *newcvp = | |
402 | kzalloc(sizeof(*newtp->cookie_values), | |
403 | GFP_ATOMIC); | |
404 | ||
405 | if (newcvp != NULL) { | |
406 | kref_init(&newcvp->kref); | |
407 | newcvp->cookie_desired = | |
408 | oldcvp->cookie_desired; | |
409 | newtp->cookie_values = newcvp; | |
410 | } else { | |
411 | /* Not Yet Implemented */ | |
412 | newtp->cookie_values = NULL; | |
413 | } | |
414 | } | |
1da177e4 | 415 | |
1da177e4 | 416 | /* Now setup tcp_sock */ |
1da177e4 | 417 | newtp->pred_flags = 0; |
435cf559 WAS |
418 | |
419 | newtp->rcv_wup = newtp->copied_seq = | |
420 | newtp->rcv_nxt = treq->rcv_isn + 1; | |
421 | ||
422 | newtp->snd_sml = newtp->snd_una = | |
423 | newtp->snd_nxt = newtp->snd_up = | |
424 | treq->snt_isn + 1 + tcp_s_data_size(oldtp); | |
1da177e4 LT |
425 | |
426 | tcp_prequeue_init(newtp); | |
46d3ceab | 427 | INIT_LIST_HEAD(&newtp->tsq_node); |
1da177e4 | 428 | |
ee7537b6 | 429 | tcp_init_wl(newtp, treq->rcv_isn); |
1da177e4 | 430 | |
1da177e4 LT |
431 | newtp->srtt = 0; |
432 | newtp->mdev = TCP_TIMEOUT_INIT; | |
463c84b9 | 433 | newicsk->icsk_rto = TCP_TIMEOUT_INIT; |
1da177e4 LT |
434 | |
435 | newtp->packets_out = 0; | |
1da177e4 LT |
436 | newtp->retrans_out = 0; |
437 | newtp->sacked_out = 0; | |
438 | newtp->fackets_out = 0; | |
0b6a05c1 | 439 | newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
eed530b6 | 440 | tcp_enable_early_retrans(newtp); |
1da177e4 LT |
441 | |
442 | /* So many TCP implementations out there (incorrectly) count the | |
443 | * initial SYN frame in their delayed-ACK and congestion control | |
444 | * algorithms that we must have the following bandaid to talk | |
445 | * efficiently to them. -DaveM | |
446 | */ | |
9ad7c049 | 447 | newtp->snd_cwnd = TCP_INIT_CWND; |
1da177e4 | 448 | newtp->snd_cwnd_cnt = 0; |
9772efb9 | 449 | newtp->bytes_acked = 0; |
1da177e4 LT |
450 | |
451 | newtp->frto_counter = 0; | |
452 | newtp->frto_highmark = 0; | |
453 | ||
d8a6e65f ED |
454 | if (newicsk->icsk_ca_ops != &tcp_init_congestion_ops && |
455 | !try_module_get(newicsk->icsk_ca_ops->owner)) | |
456 | newicsk->icsk_ca_ops = &tcp_init_congestion_ops; | |
317a76f9 | 457 | |
6687e988 | 458 | tcp_set_ca_state(newsk, TCP_CA_Open); |
1da177e4 LT |
459 | tcp_init_xmit_timers(newsk); |
460 | skb_queue_head_init(&newtp->out_of_order_queue); | |
435cf559 WAS |
461 | newtp->write_seq = newtp->pushed_seq = |
462 | treq->snt_isn + 1 + tcp_s_data_size(oldtp); | |
1da177e4 LT |
463 | |
464 | newtp->rx_opt.saw_tstamp = 0; | |
465 | ||
466 | newtp->rx_opt.dsack = 0; | |
1da177e4 | 467 | newtp->rx_opt.num_sacks = 0; |
cabeccbd | 468 | |
1da177e4 | 469 | newtp->urg_data = 0; |
1da177e4 | 470 | |
1da177e4 | 471 | if (sock_flag(newsk, SOCK_KEEPOPEN)) |
463c84b9 ACM |
472 | inet_csk_reset_keepalive_timer(newsk, |
473 | keepalive_time_when(newtp)); | |
1da177e4 | 474 | |
2e6599cb | 475 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
2de979bd | 476 | if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { |
1da177e4 | 477 | if (sysctl_tcp_fack) |
e60402d0 | 478 | tcp_enable_fack(newtp); |
1da177e4 LT |
479 | } |
480 | newtp->window_clamp = req->window_clamp; | |
481 | newtp->rcv_ssthresh = req->rcv_wnd; | |
482 | newtp->rcv_wnd = req->rcv_wnd; | |
2e6599cb | 483 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
1da177e4 | 484 | if (newtp->rx_opt.wscale_ok) { |
2e6599cb ACM |
485 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
486 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; | |
1da177e4 LT |
487 | } else { |
488 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; | |
489 | newtp->window_clamp = min(newtp->window_clamp, 65535U); | |
490 | } | |
aa8223c7 ACM |
491 | newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << |
492 | newtp->rx_opt.snd_wscale); | |
1da177e4 LT |
493 | newtp->max_window = newtp->snd_wnd; |
494 | ||
495 | if (newtp->rx_opt.tstamp_ok) { | |
496 | newtp->rx_opt.ts_recent = req->ts_recent; | |
9d729f72 | 497 | newtp->rx_opt.ts_recent_stamp = get_seconds(); |
1da177e4 LT |
498 | newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; |
499 | } else { | |
500 | newtp->rx_opt.ts_recent_stamp = 0; | |
501 | newtp->tcp_header_len = sizeof(struct tcphdr); | |
502 | } | |
cfb6eeb4 YH |
503 | #ifdef CONFIG_TCP_MD5SIG |
504 | newtp->md5sig_info = NULL; /*XXX*/ | |
505 | if (newtp->af_specific->md5_lookup(sk, newsk)) | |
506 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | |
507 | #endif | |
bee7ca9e | 508 | if (skb->len >= TCP_MSS_DEFAULT + newtp->tcp_header_len) |
463c84b9 | 509 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
1da177e4 LT |
510 | newtp->rx_opt.mss_clamp = req->mss; |
511 | TCP_ECN_openreq_child(newtp, req); | |
8336886f | 512 | newtp->fastopen_rsk = NULL; |
1da177e4 | 513 | |
63231bdd | 514 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); |
1da177e4 LT |
515 | } |
516 | return newsk; | |
517 | } | |
4bc2f18b | 518 | EXPORT_SYMBOL(tcp_create_openreq_child); |
1da177e4 | 519 | |
e905a9ed | 520 | /* |
8336886f JC |
521 | * Process an incoming packet for SYN_RECV sockets represented as a |
522 | * request_sock. Normally sk is the listener socket but for TFO it | |
523 | * points to the child socket. | |
524 | * | |
525 | * XXX (TFO) - The current impl contains a special check for ack | |
526 | * validation and inside tcp_v4_reqsk_send_ack(). Can we do better? | |
4308fc58 AC |
527 | * |
528 | * We don't need to initialize tmp_opt.sack_ok as we don't use the results | |
1da177e4 LT |
529 | */ |
530 | ||
5a5f3a8d | 531 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
60236fdd | 532 | struct request_sock *req, |
8336886f JC |
533 | struct request_sock **prev, |
534 | bool fastopen) | |
1da177e4 | 535 | { |
4957faad | 536 | struct tcp_options_received tmp_opt; |
cf533ea5 | 537 | const u8 *hash_location; |
4957faad | 538 | struct sock *child; |
aa8223c7 | 539 | const struct tcphdr *th = tcp_hdr(skb); |
714e85be | 540 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
a2a385d6 | 541 | bool paws_reject = false; |
1da177e4 | 542 | |
8336886f JC |
543 | BUG_ON(fastopen == (sk->sk_state == TCP_LISTEN)); |
544 | ||
bb5b7c11 DM |
545 | tmp_opt.saw_tstamp = 0; |
546 | if (th->doff > (sizeof(struct tcphdr)>>2)) { | |
2100c8d2 | 547 | tcp_parse_options(skb, &tmp_opt, &hash_location, 0, NULL); |
1da177e4 LT |
548 | |
549 | if (tmp_opt.saw_tstamp) { | |
550 | tmp_opt.ts_recent = req->ts_recent; | |
551 | /* We do not store true stamp, but it is not required, | |
552 | * it can be estimated (approximately) | |
553 | * from another data. | |
554 | */ | |
9d729f72 | 555 | tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); |
c887e6d2 | 556 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
1da177e4 LT |
557 | } |
558 | } | |
559 | ||
560 | /* Check for pure retransmitted SYN. */ | |
2e6599cb | 561 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
1da177e4 LT |
562 | flg == TCP_FLAG_SYN && |
563 | !paws_reject) { | |
564 | /* | |
565 | * RFC793 draws (Incorrectly! It was fixed in RFC1122) | |
566 | * this case on figure 6 and figure 8, but formal | |
567 | * protocol description says NOTHING. | |
568 | * To be more exact, it says that we should send ACK, | |
569 | * because this segment (at least, if it has no data) | |
570 | * is out of window. | |
571 | * | |
572 | * CONCLUSION: RFC793 (even with RFC1122) DOES NOT | |
573 | * describe SYN-RECV state. All the description | |
574 | * is wrong, we cannot believe to it and should | |
575 | * rely only on common sense and implementation | |
576 | * experience. | |
577 | * | |
578 | * Enforce "SYN-ACK" according to figure 8, figure 6 | |
579 | * of RFC793, fixed by RFC1122. | |
8336886f JC |
580 | * |
581 | * Note that even if there is new data in the SYN packet | |
582 | * they will be thrown away too. | |
1da177e4 | 583 | */ |
e6b4d113 | 584 | req->rsk_ops->rtx_syn_ack(sk, req, NULL); |
1da177e4 LT |
585 | return NULL; |
586 | } | |
587 | ||
588 | /* Further reproduces section "SEGMENT ARRIVES" | |
589 | for state SYN-RECEIVED of RFC793. | |
590 | It is broken, however, it does not work only | |
591 | when SYNs are crossed. | |
592 | ||
593 | You would think that SYN crossing is impossible here, since | |
594 | we should have a SYN_SENT socket (from connect()) on our end, | |
595 | but this is not true if the crossed SYNs were sent to both | |
596 | ends by a malicious third party. We must defend against this, | |
597 | and to do that we first verify the ACK (as per RFC793, page | |
598 | 36) and reset if it is invalid. Is this a true full defense? | |
599 | To convince ourselves, let us consider a way in which the ACK | |
600 | test can still pass in this 'malicious crossed SYNs' case. | |
601 | Malicious sender sends identical SYNs (and thus identical sequence | |
602 | numbers) to both A and B: | |
603 | ||
604 | A: gets SYN, seq=7 | |
605 | B: gets SYN, seq=7 | |
606 | ||
607 | By our good fortune, both A and B select the same initial | |
608 | send sequence number of seven :-) | |
609 | ||
610 | A: sends SYN|ACK, seq=7, ack_seq=8 | |
611 | B: sends SYN|ACK, seq=7, ack_seq=8 | |
612 | ||
613 | So we are now A eating this SYN|ACK, ACK test passes. So | |
614 | does sequence test, SYN is truncated, and thus we consider | |
615 | it a bare ACK. | |
616 | ||
ec0a1966 DM |
617 | If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this |
618 | bare ACK. Otherwise, we create an established connection. Both | |
619 | ends (listening sockets) accept the new incoming connection and try | |
620 | to talk to each other. 8-) | |
1da177e4 LT |
621 | |
622 | Note: This case is both harmless, and rare. Possibility is about the | |
623 | same as us discovering intelligent life on another plant tomorrow. | |
624 | ||
625 | But generally, we should (RFC lies!) to accept ACK | |
626 | from SYNACK both here and in tcp_rcv_state_process(). | |
627 | tcp_rcv_state_process() does not, hence, we do not too. | |
628 | ||
629 | Note that the case is absolutely generic: | |
630 | we cannot optimize anything here without | |
631 | violating protocol. All the checks must be made | |
632 | before attempt to create socket. | |
633 | */ | |
634 | ||
635 | /* RFC793 page 36: "If the connection is in any non-synchronized state ... | |
636 | * and the incoming segment acknowledges something not yet | |
caa20d9a | 637 | * sent (the segment carries an unacceptable ACK) ... |
1da177e4 LT |
638 | * a reset is sent." |
639 | * | |
8336886f JC |
640 | * Invalid ACK: reset will be sent by listening socket. |
641 | * Note that the ACK validity check for a Fast Open socket is done | |
642 | * elsewhere and is checked directly against the child socket rather | |
643 | * than req because user data may have been sent out. | |
1da177e4 | 644 | */ |
8336886f | 645 | if ((flg & TCP_FLAG_ACK) && !fastopen && |
435cf559 WAS |
646 | (TCP_SKB_CB(skb)->ack_seq != |
647 | tcp_rsk(req)->snt_isn + 1 + tcp_s_data_size(tcp_sk(sk)))) | |
1da177e4 LT |
648 | return sk; |
649 | ||
650 | /* Also, it would be not so bad idea to check rcv_tsecr, which | |
651 | * is essentially ACK extension and too early or too late values | |
652 | * should cause reset in unsynchronized states. | |
653 | */ | |
654 | ||
655 | /* RFC793: "first check sequence number". */ | |
656 | ||
657 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
8336886f | 658 | tcp_rsk(req)->rcv_nxt, tcp_rsk(req)->rcv_nxt + req->rcv_wnd)) { |
1da177e4 LT |
659 | /* Out of window: send ACK and drop. */ |
660 | if (!(flg & TCP_FLAG_RST)) | |
6edafaaf | 661 | req->rsk_ops->send_ack(sk, skb, req); |
1da177e4 | 662 | if (paws_reject) |
de0744af | 663 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
1da177e4 LT |
664 | return NULL; |
665 | } | |
666 | ||
667 | /* In sequence, PAWS is OK. */ | |
668 | ||
8336886f | 669 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_nxt)) |
2aaab9a0 | 670 | req->ts_recent = tmp_opt.rcv_tsval; |
1da177e4 | 671 | |
2aaab9a0 AL |
672 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
673 | /* Truncate SYN, it is out of window starting | |
674 | at tcp_rsk(req)->rcv_isn + 1. */ | |
675 | flg &= ~TCP_FLAG_SYN; | |
676 | } | |
1da177e4 | 677 | |
2aaab9a0 AL |
678 | /* RFC793: "second check the RST bit" and |
679 | * "fourth, check the SYN bit" | |
680 | */ | |
681 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { | |
682 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); | |
683 | goto embryonic_reset; | |
684 | } | |
1da177e4 | 685 | |
2aaab9a0 AL |
686 | /* ACK sequence verified above, just make sure ACK is |
687 | * set. If ACK not set, just silently drop the packet. | |
8336886f JC |
688 | * |
689 | * XXX (TFO) - if we ever allow "data after SYN", the | |
690 | * following check needs to be removed. | |
2aaab9a0 AL |
691 | */ |
692 | if (!(flg & TCP_FLAG_ACK)) | |
693 | return NULL; | |
ec0a1966 | 694 | |
07253988 NC |
695 | /* Got ACK for our SYNACK, so update baseline for SYNACK RTT sample. */ |
696 | if (tmp_opt.saw_tstamp && tmp_opt.rcv_tsecr) | |
697 | tcp_rsk(req)->snt_synack = tmp_opt.rcv_tsecr; | |
698 | else if (req->retrans) /* don't take RTT sample if retrans && ~TS */ | |
699 | tcp_rsk(req)->snt_synack = 0; | |
700 | ||
8336886f JC |
701 | /* For Fast Open no more processing is needed (sk is the |
702 | * child socket). | |
703 | */ | |
704 | if (fastopen) | |
705 | return sk; | |
706 | ||
d1b99ba4 JA |
707 | /* While TCP_DEFER_ACCEPT is active, drop bare ACK. */ |
708 | if (req->retrans < inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | |
2aaab9a0 AL |
709 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { |
710 | inet_rsk(req)->acked = 1; | |
907cdda5 | 711 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPDEFERACCEPTDROP); |
2aaab9a0 AL |
712 | return NULL; |
713 | } | |
714 | ||
715 | /* OK, ACK is valid, create big socket and | |
716 | * feed this segment to it. It will repeat all | |
717 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO | |
718 | * ESTABLISHED STATE. If it will be dropped after | |
719 | * socket is created, wait for troubles. | |
720 | */ | |
721 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | |
722 | if (child == NULL) | |
723 | goto listen_overflow; | |
1da177e4 | 724 | |
2aaab9a0 AL |
725 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
726 | inet_csk_reqsk_queue_removed(sk, req); | |
1da177e4 | 727 | |
2aaab9a0 AL |
728 | inet_csk_reqsk_queue_add(sk, req, child); |
729 | return child; | |
1da177e4 | 730 | |
2aaab9a0 AL |
731 | listen_overflow: |
732 | if (!sysctl_tcp_abort_on_overflow) { | |
733 | inet_rsk(req)->acked = 1; | |
734 | return NULL; | |
735 | } | |
1da177e4 | 736 | |
2aaab9a0 | 737 | embryonic_reset: |
8336886f JC |
738 | if (!(flg & TCP_FLAG_RST)) { |
739 | /* Received a bad SYN pkt - for TFO We try not to reset | |
740 | * the local connection unless it's really necessary to | |
741 | * avoid becoming vulnerable to outside attack aiming at | |
742 | * resetting legit local connections. | |
743 | */ | |
2aaab9a0 | 744 | req->rsk_ops->send_reset(sk, skb); |
8336886f JC |
745 | } else if (fastopen) { /* received a valid RST pkt */ |
746 | reqsk_fastopen_remove(sk, req, true); | |
747 | tcp_reset(sk); | |
748 | } | |
749 | if (!fastopen) { | |
750 | inet_csk_reqsk_queue_drop(sk, req, prev); | |
751 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); | |
752 | } | |
2aaab9a0 | 753 | return NULL; |
1da177e4 | 754 | } |
4bc2f18b | 755 | EXPORT_SYMBOL(tcp_check_req); |
1da177e4 LT |
756 | |
757 | /* | |
758 | * Queue segment on the new socket if the new socket is active, | |
759 | * otherwise we just shortcircuit this and continue with | |
760 | * the new socket. | |
8336886f JC |
761 | * |
762 | * For the vast majority of cases child->sk_state will be TCP_SYN_RECV | |
763 | * when entering. But other states are possible due to a race condition | |
764 | * where after __inet_lookup_established() fails but before the listener | |
765 | * locked is obtained, other packets cause the same connection to | |
766 | * be created. | |
1da177e4 LT |
767 | */ |
768 | ||
769 | int tcp_child_process(struct sock *parent, struct sock *child, | |
770 | struct sk_buff *skb) | |
771 | { | |
772 | int ret = 0; | |
773 | int state = child->sk_state; | |
774 | ||
775 | if (!sock_owned_by_user(child)) { | |
aa8223c7 ACM |
776 | ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), |
777 | skb->len); | |
1da177e4 LT |
778 | /* Wakeup parent, send SIGIO */ |
779 | if (state == TCP_SYN_RECV && child->sk_state != state) | |
780 | parent->sk_data_ready(parent, 0); | |
781 | } else { | |
782 | /* Alas, it is possible again, because we do lookup | |
783 | * in main socket hash table and lock on listening | |
784 | * socket does not protect us more. | |
785 | */ | |
a3a858ff | 786 | __sk_add_backlog(child, skb); |
1da177e4 LT |
787 | } |
788 | ||
789 | bh_unlock_sock(child); | |
790 | sock_put(child); | |
791 | return ret; | |
792 | } | |
1da177e4 | 793 | EXPORT_SYMBOL(tcp_child_process); |