]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <[email protected]> |
10 | * Mark Evans, <[email protected]> | |
11 | * Corey Minyard <[email protected]> | |
12 | * Florian La Roche, <[email protected]> | |
13 | * Charles Hedrick, <[email protected]> | |
14 | * Linus Torvalds, <[email protected]> | |
15 | * Alan Cox, <[email protected]> | |
16 | * Matthew Dillon, <[email protected]> | |
17 | * Arnt Gulbrandsen, <[email protected]> | |
18 | * Jorge Cwik, <[email protected]> | |
19 | */ | |
20 | ||
1da177e4 LT |
21 | #include <linux/mm.h> |
22 | #include <linux/module.h> | |
23 | #include <linux/sysctl.h> | |
24 | #include <linux/workqueue.h> | |
25 | #include <net/tcp.h> | |
26 | #include <net/inet_common.h> | |
27 | #include <net/xfrm.h> | |
28 | ||
29 | #ifdef CONFIG_SYSCTL | |
30 | #define SYNC_INIT 0 /* let the user enable it */ | |
31 | #else | |
32 | #define SYNC_INIT 1 | |
33 | #endif | |
34 | ||
ab32ea5d | 35 | int sysctl_tcp_syncookies __read_mostly = SYNC_INIT; |
c6aefafb GG |
36 | EXPORT_SYMBOL(sysctl_tcp_syncookies); |
37 | ||
ab32ea5d | 38 | int sysctl_tcp_abort_on_overflow __read_mostly; |
1da177e4 | 39 | |
295ff7ed ACM |
40 | struct inet_timewait_death_row tcp_death_row = { |
41 | .sysctl_max_tw_buckets = NR_FILE * 2, | |
42 | .period = TCP_TIMEWAIT_LEN / INET_TWDR_TWKILL_SLOTS, | |
e4d91918 | 43 | .death_lock = __SPIN_LOCK_UNLOCKED(tcp_death_row.death_lock), |
295ff7ed ACM |
44 | .hashinfo = &tcp_hashinfo, |
45 | .tw_timer = TIMER_INITIALIZER(inet_twdr_hangman, 0, | |
46 | (unsigned long)&tcp_death_row), | |
47 | .twkill_work = __WORK_INITIALIZER(tcp_death_row.twkill_work, | |
65f27f38 | 48 | inet_twdr_twkill_work), |
295ff7ed ACM |
49 | /* Short-time timewait calendar */ |
50 | ||
51 | .twcal_hand = -1, | |
52 | .twcal_timer = TIMER_INITIALIZER(inet_twdr_twcal_tick, 0, | |
53 | (unsigned long)&tcp_death_row), | |
54 | }; | |
55 | ||
56 | EXPORT_SYMBOL_GPL(tcp_death_row); | |
57 | ||
1da177e4 LT |
58 | static __inline__ int tcp_in_window(u32 seq, u32 end_seq, u32 s_win, u32 e_win) |
59 | { | |
60 | if (seq == s_win) | |
61 | return 1; | |
62 | if (after(end_seq, s_win) && before(seq, e_win)) | |
63 | return 1; | |
64 | return (seq == e_win && seq == end_seq); | |
65 | } | |
66 | ||
e905a9ed | 67 | /* |
1da177e4 LT |
68 | * * Main purpose of TIME-WAIT state is to close connection gracefully, |
69 | * when one of ends sits in LAST-ACK or CLOSING retransmitting FIN | |
70 | * (and, probably, tail of data) and one or more our ACKs are lost. | |
71 | * * What is TIME-WAIT timeout? It is associated with maximal packet | |
72 | * lifetime in the internet, which results in wrong conclusion, that | |
73 | * it is set to catch "old duplicate segments" wandering out of their path. | |
74 | * It is not quite correct. This timeout is calculated so that it exceeds | |
75 | * maximal retransmission timeout enough to allow to lose one (or more) | |
76 | * segments sent by peer and our ACKs. This time may be calculated from RTO. | |
77 | * * When TIME-WAIT socket receives RST, it means that another end | |
78 | * finally closed and we are allowed to kill TIME-WAIT too. | |
79 | * * Second purpose of TIME-WAIT is catching old duplicate segments. | |
80 | * Well, certainly it is pure paranoia, but if we load TIME-WAIT | |
81 | * with this semantics, we MUST NOT kill TIME-WAIT state with RSTs. | |
82 | * * If we invented some more clever way to catch duplicates | |
83 | * (f.e. based on PAWS), we could truncate TIME-WAIT to several RTOs. | |
84 | * | |
85 | * The algorithm below is based on FORMAL INTERPRETATION of RFCs. | |
86 | * When you compare it to RFCs, please, read section SEGMENT ARRIVES | |
87 | * from the very beginning. | |
88 | * | |
89 | * NOTE. With recycling (and later with fin-wait-2) TW bucket | |
90 | * is _not_ stateless. It means, that strictly speaking we must | |
91 | * spinlock it. I do not want! Well, probability of misbehaviour | |
92 | * is ridiculously low and, seems, we could use some mb() tricks | |
93 | * to avoid misread sequence numbers, states etc. --ANK | |
94 | */ | |
95 | enum tcp_tw_status | |
8feaf0c0 ACM |
96 | tcp_timewait_state_process(struct inet_timewait_sock *tw, struct sk_buff *skb, |
97 | const struct tcphdr *th) | |
1da177e4 | 98 | { |
8feaf0c0 | 99 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); |
1da177e4 LT |
100 | struct tcp_options_received tmp_opt; |
101 | int paws_reject = 0; | |
102 | ||
103 | tmp_opt.saw_tstamp = 0; | |
8feaf0c0 | 104 | if (th->doff > (sizeof(*th) >> 2) && tcptw->tw_ts_recent_stamp) { |
1da177e4 LT |
105 | tcp_parse_options(skb, &tmp_opt, 0); |
106 | ||
107 | if (tmp_opt.saw_tstamp) { | |
8feaf0c0 ACM |
108 | tmp_opt.ts_recent = tcptw->tw_ts_recent; |
109 | tmp_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp; | |
c887e6d2 | 110 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
1da177e4 LT |
111 | } |
112 | } | |
113 | ||
114 | if (tw->tw_substate == TCP_FIN_WAIT2) { | |
115 | /* Just repeat all the checks of tcp_rcv_state_process() */ | |
116 | ||
117 | /* Out of window, send ACK */ | |
118 | if (paws_reject || | |
119 | !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
8feaf0c0 ACM |
120 | tcptw->tw_rcv_nxt, |
121 | tcptw->tw_rcv_nxt + tcptw->tw_rcv_wnd)) | |
1da177e4 LT |
122 | return TCP_TW_ACK; |
123 | ||
124 | if (th->rst) | |
125 | goto kill; | |
126 | ||
8feaf0c0 | 127 | if (th->syn && !before(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt)) |
1da177e4 LT |
128 | goto kill_with_rst; |
129 | ||
130 | /* Dup ACK? */ | |
1ac530b3 WY |
131 | if (!th->ack || |
132 | !after(TCP_SKB_CB(skb)->end_seq, tcptw->tw_rcv_nxt) || | |
1da177e4 | 133 | TCP_SKB_CB(skb)->end_seq == TCP_SKB_CB(skb)->seq) { |
8feaf0c0 | 134 | inet_twsk_put(tw); |
1da177e4 LT |
135 | return TCP_TW_SUCCESS; |
136 | } | |
137 | ||
138 | /* New data or FIN. If new data arrive after half-duplex close, | |
139 | * reset. | |
140 | */ | |
141 | if (!th->fin || | |
8feaf0c0 | 142 | TCP_SKB_CB(skb)->end_seq != tcptw->tw_rcv_nxt + 1) { |
1da177e4 | 143 | kill_with_rst: |
295ff7ed | 144 | inet_twsk_deschedule(tw, &tcp_death_row); |
8feaf0c0 | 145 | inet_twsk_put(tw); |
1da177e4 LT |
146 | return TCP_TW_RST; |
147 | } | |
148 | ||
149 | /* FIN arrived, enter true time-wait state. */ | |
8feaf0c0 ACM |
150 | tw->tw_substate = TCP_TIME_WAIT; |
151 | tcptw->tw_rcv_nxt = TCP_SKB_CB(skb)->end_seq; | |
1da177e4 | 152 | if (tmp_opt.saw_tstamp) { |
9d729f72 | 153 | tcptw->tw_ts_recent_stamp = get_seconds(); |
8feaf0c0 | 154 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
1da177e4 LT |
155 | } |
156 | ||
157 | /* I am shamed, but failed to make it more elegant. | |
158 | * Yes, it is direct reference to IP, which is impossible | |
159 | * to generalize to IPv6. Taking into account that IPv6 | |
caa20d9a | 160 | * do not understand recycling in any case, it not |
1da177e4 LT |
161 | * a big problem in practice. --ANK */ |
162 | if (tw->tw_family == AF_INET && | |
295ff7ed | 163 | tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp && |
1da177e4 | 164 | tcp_v4_tw_remember_stamp(tw)) |
696ab2d3 ACM |
165 | inet_twsk_schedule(tw, &tcp_death_row, tw->tw_timeout, |
166 | TCP_TIMEWAIT_LEN); | |
1da177e4 | 167 | else |
696ab2d3 ACM |
168 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, |
169 | TCP_TIMEWAIT_LEN); | |
1da177e4 LT |
170 | return TCP_TW_ACK; |
171 | } | |
172 | ||
173 | /* | |
174 | * Now real TIME-WAIT state. | |
175 | * | |
176 | * RFC 1122: | |
177 | * "When a connection is [...] on TIME-WAIT state [...] | |
178 | * [a TCP] MAY accept a new SYN from the remote TCP to | |
179 | * reopen the connection directly, if it: | |
e905a9ed | 180 | * |
1da177e4 LT |
181 | * (1) assigns its initial sequence number for the new |
182 | * connection to be larger than the largest sequence | |
183 | * number it used on the previous connection incarnation, | |
184 | * and | |
185 | * | |
e905a9ed | 186 | * (2) returns to TIME-WAIT state if the SYN turns out |
1da177e4 LT |
187 | * to be an old duplicate". |
188 | */ | |
189 | ||
190 | if (!paws_reject && | |
8feaf0c0 | 191 | (TCP_SKB_CB(skb)->seq == tcptw->tw_rcv_nxt && |
1da177e4 LT |
192 | (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq || th->rst))) { |
193 | /* In window segment, it may be only reset or bare ack. */ | |
194 | ||
195 | if (th->rst) { | |
caa20d9a | 196 | /* This is TIME_WAIT assassination, in two flavors. |
1da177e4 LT |
197 | * Oh well... nobody has a sufficient solution to this |
198 | * protocol bug yet. | |
199 | */ | |
200 | if (sysctl_tcp_rfc1337 == 0) { | |
201 | kill: | |
295ff7ed | 202 | inet_twsk_deschedule(tw, &tcp_death_row); |
8feaf0c0 | 203 | inet_twsk_put(tw); |
1da177e4 LT |
204 | return TCP_TW_SUCCESS; |
205 | } | |
206 | } | |
696ab2d3 ACM |
207 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, |
208 | TCP_TIMEWAIT_LEN); | |
1da177e4 LT |
209 | |
210 | if (tmp_opt.saw_tstamp) { | |
8feaf0c0 | 211 | tcptw->tw_ts_recent = tmp_opt.rcv_tsval; |
9d729f72 | 212 | tcptw->tw_ts_recent_stamp = get_seconds(); |
1da177e4 LT |
213 | } |
214 | ||
8feaf0c0 | 215 | inet_twsk_put(tw); |
1da177e4 LT |
216 | return TCP_TW_SUCCESS; |
217 | } | |
218 | ||
219 | /* Out of window segment. | |
220 | ||
221 | All the segments are ACKed immediately. | |
222 | ||
223 | The only exception is new SYN. We accept it, if it is | |
224 | not old duplicate and we are not in danger to be killed | |
225 | by delayed old duplicates. RFC check is that it has | |
226 | newer sequence number works at rates <40Mbit/sec. | |
227 | However, if paws works, it is reliable AND even more, | |
228 | we even may relax silly seq space cutoff. | |
229 | ||
230 | RED-PEN: we violate main RFC requirement, if this SYN will appear | |
231 | old duplicate (i.e. we receive RST in reply to SYN-ACK), | |
232 | we must return socket to time-wait state. It is not good, | |
233 | but not fatal yet. | |
234 | */ | |
235 | ||
236 | if (th->syn && !th->rst && !th->ack && !paws_reject && | |
8feaf0c0 ACM |
237 | (after(TCP_SKB_CB(skb)->seq, tcptw->tw_rcv_nxt) || |
238 | (tmp_opt.saw_tstamp && | |
239 | (s32)(tcptw->tw_ts_recent - tmp_opt.rcv_tsval) < 0))) { | |
240 | u32 isn = tcptw->tw_snd_nxt + 65535 + 2; | |
1da177e4 LT |
241 | if (isn == 0) |
242 | isn++; | |
243 | TCP_SKB_CB(skb)->when = isn; | |
244 | return TCP_TW_SYN; | |
245 | } | |
246 | ||
247 | if (paws_reject) | |
de0744af | 248 | NET_INC_STATS_BH(twsk_net(tw), LINUX_MIB_PAWSESTABREJECTED); |
1da177e4 | 249 | |
2de979bd | 250 | if (!th->rst) { |
1da177e4 LT |
251 | /* In this case we must reset the TIMEWAIT timer. |
252 | * | |
253 | * If it is ACKless SYN it may be both old duplicate | |
254 | * and new good SYN with random sequence number <rcv_nxt. | |
255 | * Do not reschedule in the last case. | |
256 | */ | |
257 | if (paws_reject || th->ack) | |
696ab2d3 ACM |
258 | inet_twsk_schedule(tw, &tcp_death_row, TCP_TIMEWAIT_LEN, |
259 | TCP_TIMEWAIT_LEN); | |
1da177e4 LT |
260 | |
261 | /* Send ACK. Note, we do not put the bucket, | |
262 | * it will be released by caller. | |
263 | */ | |
264 | return TCP_TW_ACK; | |
265 | } | |
8feaf0c0 | 266 | inet_twsk_put(tw); |
1da177e4 LT |
267 | return TCP_TW_SUCCESS; |
268 | } | |
269 | ||
e905a9ed | 270 | /* |
1da177e4 | 271 | * Move a socket to time-wait or dead fin-wait-2 state. |
e905a9ed | 272 | */ |
1da177e4 LT |
273 | void tcp_time_wait(struct sock *sk, int state, int timeo) |
274 | { | |
8feaf0c0 | 275 | struct inet_timewait_sock *tw = NULL; |
8292a17a | 276 | const struct inet_connection_sock *icsk = inet_csk(sk); |
8feaf0c0 | 277 | const struct tcp_sock *tp = tcp_sk(sk); |
1da177e4 LT |
278 | int recycle_ok = 0; |
279 | ||
295ff7ed | 280 | if (tcp_death_row.sysctl_tw_recycle && tp->rx_opt.ts_recent_stamp) |
8292a17a | 281 | recycle_ok = icsk->icsk_af_ops->remember_stamp(sk); |
1da177e4 | 282 | |
295ff7ed | 283 | if (tcp_death_row.tw_count < tcp_death_row.sysctl_max_tw_buckets) |
c676270b | 284 | tw = inet_twsk_alloc(sk, state); |
1da177e4 | 285 | |
8feaf0c0 ACM |
286 | if (tw != NULL) { |
287 | struct tcp_timewait_sock *tcptw = tcp_twsk((struct sock *)tw); | |
463c84b9 | 288 | const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1); |
8feaf0c0 | 289 | |
1da177e4 | 290 | tw->tw_rcv_wscale = tp->rx_opt.rcv_wscale; |
8feaf0c0 ACM |
291 | tcptw->tw_rcv_nxt = tp->rcv_nxt; |
292 | tcptw->tw_snd_nxt = tp->snd_nxt; | |
293 | tcptw->tw_rcv_wnd = tcp_receive_window(tp); | |
294 | tcptw->tw_ts_recent = tp->rx_opt.ts_recent; | |
295 | tcptw->tw_ts_recent_stamp = tp->rx_opt.ts_recent_stamp; | |
1da177e4 LT |
296 | |
297 | #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) | |
298 | if (tw->tw_family == PF_INET6) { | |
299 | struct ipv6_pinfo *np = inet6_sk(sk); | |
0fa1a53e | 300 | struct inet6_timewait_sock *tw6; |
1da177e4 | 301 | |
0fa1a53e ACM |
302 | tw->tw_ipv6_offset = inet6_tw_offset(sk->sk_prot); |
303 | tw6 = inet6_twsk((struct sock *)tw); | |
304 | ipv6_addr_copy(&tw6->tw_v6_daddr, &np->daddr); | |
305 | ipv6_addr_copy(&tw6->tw_v6_rcv_saddr, &np->rcv_saddr); | |
8feaf0c0 | 306 | tw->tw_ipv6only = np->ipv6only; |
c676270b | 307 | } |
1da177e4 | 308 | #endif |
cfb6eeb4 YH |
309 | |
310 | #ifdef CONFIG_TCP_MD5SIG | |
311 | /* | |
312 | * The timewait bucket does not have the key DB from the | |
313 | * sock structure. We just make a quick copy of the | |
314 | * md5 key being used (if indeed we are using one) | |
315 | * so the timewait ack generating code has the key. | |
316 | */ | |
317 | do { | |
318 | struct tcp_md5sig_key *key; | |
319 | memset(tcptw->tw_md5_key, 0, sizeof(tcptw->tw_md5_key)); | |
320 | tcptw->tw_md5_keylen = 0; | |
321 | key = tp->af_specific->md5_lookup(sk, sk); | |
322 | if (key != NULL) { | |
323 | memcpy(&tcptw->tw_md5_key, key->key, key->keylen); | |
324 | tcptw->tw_md5_keylen = key->keylen; | |
aa133076 | 325 | if (tcp_alloc_md5sig_pool(sk) == NULL) |
cfb6eeb4 YH |
326 | BUG(); |
327 | } | |
2de979bd | 328 | } while (0); |
cfb6eeb4 YH |
329 | #endif |
330 | ||
1da177e4 | 331 | /* Linkage updates. */ |
e48c414e | 332 | __inet_twsk_hashdance(tw, sk, &tcp_hashinfo); |
1da177e4 LT |
333 | |
334 | /* Get the TIME_WAIT timeout firing. */ | |
335 | if (timeo < rto) | |
336 | timeo = rto; | |
337 | ||
338 | if (recycle_ok) { | |
339 | tw->tw_timeout = rto; | |
340 | } else { | |
341 | tw->tw_timeout = TCP_TIMEWAIT_LEN; | |
342 | if (state == TCP_TIME_WAIT) | |
343 | timeo = TCP_TIMEWAIT_LEN; | |
344 | } | |
345 | ||
696ab2d3 ACM |
346 | inet_twsk_schedule(tw, &tcp_death_row, timeo, |
347 | TCP_TIMEWAIT_LEN); | |
8feaf0c0 | 348 | inet_twsk_put(tw); |
1da177e4 LT |
349 | } else { |
350 | /* Sorry, if we're out of memory, just CLOSE this | |
351 | * socket up. We've got bigger problems than | |
352 | * non-graceful socket closings. | |
353 | */ | |
c6786240 | 354 | LIMIT_NETDEBUG(KERN_INFO "TCP: time wait bucket table overflow\n"); |
1da177e4 LT |
355 | } |
356 | ||
357 | tcp_update_metrics(sk); | |
358 | tcp_done(sk); | |
359 | } | |
360 | ||
cfb6eeb4 YH |
361 | void tcp_twsk_destructor(struct sock *sk) |
362 | { | |
cfb6eeb4 | 363 | #ifdef CONFIG_TCP_MD5SIG |
a928630a | 364 | struct tcp_timewait_sock *twsk = tcp_twsk(sk); |
cfb6eeb4 | 365 | if (twsk->tw_md5_keylen) |
657e9649 | 366 | tcp_free_md5sig_pool(); |
cfb6eeb4 YH |
367 | #endif |
368 | } | |
369 | ||
370 | EXPORT_SYMBOL_GPL(tcp_twsk_destructor); | |
371 | ||
bdf1ee5d IJ |
372 | static inline void TCP_ECN_openreq_child(struct tcp_sock *tp, |
373 | struct request_sock *req) | |
374 | { | |
375 | tp->ecn_flags = inet_rsk(req)->ecn_ok ? TCP_ECN_OK : 0; | |
376 | } | |
377 | ||
1da177e4 LT |
378 | /* This is not only more efficient than what we used to do, it eliminates |
379 | * a lot of code duplication between IPv4/IPv6 SYN recv processing. -DaveM | |
380 | * | |
381 | * Actually, we could lots of memory writes here. tp of listening | |
382 | * socket contains all necessary default parameters. | |
383 | */ | |
60236fdd | 384 | struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req, struct sk_buff *skb) |
1da177e4 | 385 | { |
9f1d2604 | 386 | struct sock *newsk = inet_csk_clone(sk, req, GFP_ATOMIC); |
1da177e4 | 387 | |
87d11ceb | 388 | if (newsk != NULL) { |
9f1d2604 | 389 | const struct inet_request_sock *ireq = inet_rsk(req); |
2e6599cb | 390 | struct tcp_request_sock *treq = tcp_rsk(req); |
a9948a7e | 391 | struct inet_connection_sock *newicsk = inet_csk(newsk); |
1da177e4 | 392 | struct tcp_sock *newtp; |
1da177e4 | 393 | |
1da177e4 LT |
394 | /* Now setup tcp_sock */ |
395 | newtp = tcp_sk(newsk); | |
396 | newtp->pred_flags = 0; | |
54287cc1 ED |
397 | newtp->rcv_wup = newtp->copied_seq = newtp->rcv_nxt = treq->rcv_isn + 1; |
398 | newtp->snd_sml = newtp->snd_una = newtp->snd_nxt = treq->snt_isn + 1; | |
33f5f57e | 399 | newtp->snd_up = treq->snt_isn + 1; |
1da177e4 LT |
400 | |
401 | tcp_prequeue_init(newtp); | |
402 | ||
ee7537b6 | 403 | tcp_init_wl(newtp, treq->rcv_isn); |
1da177e4 | 404 | |
1da177e4 LT |
405 | newtp->srtt = 0; |
406 | newtp->mdev = TCP_TIMEOUT_INIT; | |
463c84b9 | 407 | newicsk->icsk_rto = TCP_TIMEOUT_INIT; |
1da177e4 LT |
408 | |
409 | newtp->packets_out = 0; | |
1da177e4 LT |
410 | newtp->retrans_out = 0; |
411 | newtp->sacked_out = 0; | |
412 | newtp->fackets_out = 0; | |
0b6a05c1 | 413 | newtp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
1da177e4 LT |
414 | |
415 | /* So many TCP implementations out there (incorrectly) count the | |
416 | * initial SYN frame in their delayed-ACK and congestion control | |
417 | * algorithms that we must have the following bandaid to talk | |
418 | * efficiently to them. -DaveM | |
419 | */ | |
420 | newtp->snd_cwnd = 2; | |
421 | newtp->snd_cwnd_cnt = 0; | |
9772efb9 | 422 | newtp->bytes_acked = 0; |
1da177e4 LT |
423 | |
424 | newtp->frto_counter = 0; | |
425 | newtp->frto_highmark = 0; | |
426 | ||
7957aed7 | 427 | newicsk->icsk_ca_ops = &tcp_init_congestion_ops; |
317a76f9 | 428 | |
6687e988 | 429 | tcp_set_ca_state(newsk, TCP_CA_Open); |
1da177e4 LT |
430 | tcp_init_xmit_timers(newsk); |
431 | skb_queue_head_init(&newtp->out_of_order_queue); | |
2e6599cb | 432 | newtp->write_seq = treq->snt_isn + 1; |
1da177e4 | 433 | newtp->pushed_seq = newtp->write_seq; |
1da177e4 LT |
434 | |
435 | newtp->rx_opt.saw_tstamp = 0; | |
436 | ||
437 | newtp->rx_opt.dsack = 0; | |
1da177e4 | 438 | newtp->rx_opt.num_sacks = 0; |
cabeccbd | 439 | |
1da177e4 | 440 | newtp->urg_data = 0; |
1da177e4 | 441 | |
1da177e4 | 442 | if (sock_flag(newsk, SOCK_KEEPOPEN)) |
463c84b9 ACM |
443 | inet_csk_reset_keepalive_timer(newsk, |
444 | keepalive_time_when(newtp)); | |
1da177e4 | 445 | |
2e6599cb | 446 | newtp->rx_opt.tstamp_ok = ireq->tstamp_ok; |
2de979bd | 447 | if ((newtp->rx_opt.sack_ok = ireq->sack_ok) != 0) { |
1da177e4 | 448 | if (sysctl_tcp_fack) |
e60402d0 | 449 | tcp_enable_fack(newtp); |
1da177e4 LT |
450 | } |
451 | newtp->window_clamp = req->window_clamp; | |
452 | newtp->rcv_ssthresh = req->rcv_wnd; | |
453 | newtp->rcv_wnd = req->rcv_wnd; | |
2e6599cb | 454 | newtp->rx_opt.wscale_ok = ireq->wscale_ok; |
1da177e4 | 455 | if (newtp->rx_opt.wscale_ok) { |
2e6599cb ACM |
456 | newtp->rx_opt.snd_wscale = ireq->snd_wscale; |
457 | newtp->rx_opt.rcv_wscale = ireq->rcv_wscale; | |
1da177e4 LT |
458 | } else { |
459 | newtp->rx_opt.snd_wscale = newtp->rx_opt.rcv_wscale = 0; | |
460 | newtp->window_clamp = min(newtp->window_clamp, 65535U); | |
461 | } | |
aa8223c7 ACM |
462 | newtp->snd_wnd = (ntohs(tcp_hdr(skb)->window) << |
463 | newtp->rx_opt.snd_wscale); | |
1da177e4 LT |
464 | newtp->max_window = newtp->snd_wnd; |
465 | ||
466 | if (newtp->rx_opt.tstamp_ok) { | |
467 | newtp->rx_opt.ts_recent = req->ts_recent; | |
9d729f72 | 468 | newtp->rx_opt.ts_recent_stamp = get_seconds(); |
1da177e4 LT |
469 | newtp->tcp_header_len = sizeof(struct tcphdr) + TCPOLEN_TSTAMP_ALIGNED; |
470 | } else { | |
471 | newtp->rx_opt.ts_recent_stamp = 0; | |
472 | newtp->tcp_header_len = sizeof(struct tcphdr); | |
473 | } | |
cfb6eeb4 YH |
474 | #ifdef CONFIG_TCP_MD5SIG |
475 | newtp->md5sig_info = NULL; /*XXX*/ | |
476 | if (newtp->af_specific->md5_lookup(sk, newsk)) | |
477 | newtp->tcp_header_len += TCPOLEN_MD5SIG_ALIGNED; | |
478 | #endif | |
1da177e4 | 479 | if (skb->len >= TCP_MIN_RCVMSS+newtp->tcp_header_len) |
463c84b9 | 480 | newicsk->icsk_ack.last_seg_size = skb->len - newtp->tcp_header_len; |
1da177e4 LT |
481 | newtp->rx_opt.mss_clamp = req->mss; |
482 | TCP_ECN_openreq_child(newtp, req); | |
1da177e4 | 483 | |
63231bdd | 484 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_PASSIVEOPENS); |
1da177e4 LT |
485 | } |
486 | return newsk; | |
487 | } | |
488 | ||
e905a9ed | 489 | /* |
1da177e4 | 490 | * Process an incoming packet for SYN_RECV sockets represented |
60236fdd | 491 | * as a request_sock. |
1da177e4 LT |
492 | */ |
493 | ||
5a5f3a8d | 494 | struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb, |
60236fdd ACM |
495 | struct request_sock *req, |
496 | struct request_sock **prev) | |
1da177e4 | 497 | { |
aa8223c7 | 498 | const struct tcphdr *th = tcp_hdr(skb); |
714e85be | 499 | __be32 flg = tcp_flag_word(th) & (TCP_FLAG_RST|TCP_FLAG_SYN|TCP_FLAG_ACK); |
1da177e4 LT |
500 | int paws_reject = 0; |
501 | struct tcp_options_received tmp_opt; | |
502 | struct sock *child; | |
503 | ||
504 | tmp_opt.saw_tstamp = 0; | |
505 | if (th->doff > (sizeof(struct tcphdr)>>2)) { | |
506 | tcp_parse_options(skb, &tmp_opt, 0); | |
507 | ||
508 | if (tmp_opt.saw_tstamp) { | |
509 | tmp_opt.ts_recent = req->ts_recent; | |
510 | /* We do not store true stamp, but it is not required, | |
511 | * it can be estimated (approximately) | |
512 | * from another data. | |
513 | */ | |
9d729f72 | 514 | tmp_opt.ts_recent_stamp = get_seconds() - ((TCP_TIMEOUT_INIT/HZ)<<req->retrans); |
c887e6d2 | 515 | paws_reject = tcp_paws_reject(&tmp_opt, th->rst); |
1da177e4 LT |
516 | } |
517 | } | |
518 | ||
519 | /* Check for pure retransmitted SYN. */ | |
2e6599cb | 520 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn && |
1da177e4 LT |
521 | flg == TCP_FLAG_SYN && |
522 | !paws_reject) { | |
523 | /* | |
524 | * RFC793 draws (Incorrectly! It was fixed in RFC1122) | |
525 | * this case on figure 6 and figure 8, but formal | |
526 | * protocol description says NOTHING. | |
527 | * To be more exact, it says that we should send ACK, | |
528 | * because this segment (at least, if it has no data) | |
529 | * is out of window. | |
530 | * | |
531 | * CONCLUSION: RFC793 (even with RFC1122) DOES NOT | |
532 | * describe SYN-RECV state. All the description | |
533 | * is wrong, we cannot believe to it and should | |
534 | * rely only on common sense and implementation | |
535 | * experience. | |
536 | * | |
537 | * Enforce "SYN-ACK" according to figure 8, figure 6 | |
538 | * of RFC793, fixed by RFC1122. | |
539 | */ | |
fd80eb94 | 540 | req->rsk_ops->rtx_syn_ack(sk, req); |
1da177e4 LT |
541 | return NULL; |
542 | } | |
543 | ||
544 | /* Further reproduces section "SEGMENT ARRIVES" | |
545 | for state SYN-RECEIVED of RFC793. | |
546 | It is broken, however, it does not work only | |
547 | when SYNs are crossed. | |
548 | ||
549 | You would think that SYN crossing is impossible here, since | |
550 | we should have a SYN_SENT socket (from connect()) on our end, | |
551 | but this is not true if the crossed SYNs were sent to both | |
552 | ends by a malicious third party. We must defend against this, | |
553 | and to do that we first verify the ACK (as per RFC793, page | |
554 | 36) and reset if it is invalid. Is this a true full defense? | |
555 | To convince ourselves, let us consider a way in which the ACK | |
556 | test can still pass in this 'malicious crossed SYNs' case. | |
557 | Malicious sender sends identical SYNs (and thus identical sequence | |
558 | numbers) to both A and B: | |
559 | ||
560 | A: gets SYN, seq=7 | |
561 | B: gets SYN, seq=7 | |
562 | ||
563 | By our good fortune, both A and B select the same initial | |
564 | send sequence number of seven :-) | |
565 | ||
566 | A: sends SYN|ACK, seq=7, ack_seq=8 | |
567 | B: sends SYN|ACK, seq=7, ack_seq=8 | |
568 | ||
569 | So we are now A eating this SYN|ACK, ACK test passes. So | |
570 | does sequence test, SYN is truncated, and thus we consider | |
571 | it a bare ACK. | |
572 | ||
ec0a1966 DM |
573 | If icsk->icsk_accept_queue.rskq_defer_accept, we silently drop this |
574 | bare ACK. Otherwise, we create an established connection. Both | |
575 | ends (listening sockets) accept the new incoming connection and try | |
576 | to talk to each other. 8-) | |
1da177e4 LT |
577 | |
578 | Note: This case is both harmless, and rare. Possibility is about the | |
579 | same as us discovering intelligent life on another plant tomorrow. | |
580 | ||
581 | But generally, we should (RFC lies!) to accept ACK | |
582 | from SYNACK both here and in tcp_rcv_state_process(). | |
583 | tcp_rcv_state_process() does not, hence, we do not too. | |
584 | ||
585 | Note that the case is absolutely generic: | |
586 | we cannot optimize anything here without | |
587 | violating protocol. All the checks must be made | |
588 | before attempt to create socket. | |
589 | */ | |
590 | ||
591 | /* RFC793 page 36: "If the connection is in any non-synchronized state ... | |
592 | * and the incoming segment acknowledges something not yet | |
caa20d9a | 593 | * sent (the segment carries an unacceptable ACK) ... |
1da177e4 LT |
594 | * a reset is sent." |
595 | * | |
596 | * Invalid ACK: reset will be sent by listening socket | |
597 | */ | |
598 | if ((flg & TCP_FLAG_ACK) && | |
2e6599cb | 599 | (TCP_SKB_CB(skb)->ack_seq != tcp_rsk(req)->snt_isn + 1)) |
1da177e4 LT |
600 | return sk; |
601 | ||
602 | /* Also, it would be not so bad idea to check rcv_tsecr, which | |
603 | * is essentially ACK extension and too early or too late values | |
604 | * should cause reset in unsynchronized states. | |
605 | */ | |
606 | ||
607 | /* RFC793: "first check sequence number". */ | |
608 | ||
609 | if (paws_reject || !tcp_in_window(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb)->end_seq, | |
2e6599cb | 610 | tcp_rsk(req)->rcv_isn + 1, tcp_rsk(req)->rcv_isn + 1 + req->rcv_wnd)) { |
1da177e4 LT |
611 | /* Out of window: send ACK and drop. */ |
612 | if (!(flg & TCP_FLAG_RST)) | |
6edafaaf | 613 | req->rsk_ops->send_ack(sk, skb, req); |
1da177e4 | 614 | if (paws_reject) |
de0744af | 615 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSESTABREJECTED); |
1da177e4 LT |
616 | return NULL; |
617 | } | |
618 | ||
619 | /* In sequence, PAWS is OK. */ | |
620 | ||
2e6599cb | 621 | if (tmp_opt.saw_tstamp && !after(TCP_SKB_CB(skb)->seq, tcp_rsk(req)->rcv_isn + 1)) |
2aaab9a0 | 622 | req->ts_recent = tmp_opt.rcv_tsval; |
1da177e4 | 623 | |
2aaab9a0 AL |
624 | if (TCP_SKB_CB(skb)->seq == tcp_rsk(req)->rcv_isn) { |
625 | /* Truncate SYN, it is out of window starting | |
626 | at tcp_rsk(req)->rcv_isn + 1. */ | |
627 | flg &= ~TCP_FLAG_SYN; | |
628 | } | |
1da177e4 | 629 | |
2aaab9a0 AL |
630 | /* RFC793: "second check the RST bit" and |
631 | * "fourth, check the SYN bit" | |
632 | */ | |
633 | if (flg & (TCP_FLAG_RST|TCP_FLAG_SYN)) { | |
634 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); | |
635 | goto embryonic_reset; | |
636 | } | |
1da177e4 | 637 | |
2aaab9a0 AL |
638 | /* ACK sequence verified above, just make sure ACK is |
639 | * set. If ACK not set, just silently drop the packet. | |
640 | */ | |
641 | if (!(flg & TCP_FLAG_ACK)) | |
642 | return NULL; | |
ec0a1966 | 643 | |
2aaab9a0 AL |
644 | /* If TCP_DEFER_ACCEPT is set, drop bare ACK. */ |
645 | if (inet_csk(sk)->icsk_accept_queue.rskq_defer_accept && | |
646 | TCP_SKB_CB(skb)->end_seq == tcp_rsk(req)->rcv_isn + 1) { | |
647 | inet_rsk(req)->acked = 1; | |
648 | return NULL; | |
649 | } | |
650 | ||
651 | /* OK, ACK is valid, create big socket and | |
652 | * feed this segment to it. It will repeat all | |
653 | * the tests. THIS SEGMENT MUST MOVE SOCKET TO | |
654 | * ESTABLISHED STATE. If it will be dropped after | |
655 | * socket is created, wait for troubles. | |
656 | */ | |
657 | child = inet_csk(sk)->icsk_af_ops->syn_recv_sock(sk, skb, req, NULL); | |
658 | if (child == NULL) | |
659 | goto listen_overflow; | |
1da177e4 | 660 | |
2aaab9a0 AL |
661 | inet_csk_reqsk_queue_unlink(sk, req, prev); |
662 | inet_csk_reqsk_queue_removed(sk, req); | |
1da177e4 | 663 | |
2aaab9a0 AL |
664 | inet_csk_reqsk_queue_add(sk, req, child); |
665 | return child; | |
1da177e4 | 666 | |
2aaab9a0 AL |
667 | listen_overflow: |
668 | if (!sysctl_tcp_abort_on_overflow) { | |
669 | inet_rsk(req)->acked = 1; | |
670 | return NULL; | |
671 | } | |
1da177e4 | 672 | |
2aaab9a0 AL |
673 | embryonic_reset: |
674 | NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); | |
675 | if (!(flg & TCP_FLAG_RST)) | |
676 | req->rsk_ops->send_reset(sk, skb); | |
1da177e4 | 677 | |
2aaab9a0 AL |
678 | inet_csk_reqsk_queue_drop(sk, req, prev); |
679 | return NULL; | |
1da177e4 LT |
680 | } |
681 | ||
682 | /* | |
683 | * Queue segment on the new socket if the new socket is active, | |
684 | * otherwise we just shortcircuit this and continue with | |
685 | * the new socket. | |
686 | */ | |
687 | ||
688 | int tcp_child_process(struct sock *parent, struct sock *child, | |
689 | struct sk_buff *skb) | |
690 | { | |
691 | int ret = 0; | |
692 | int state = child->sk_state; | |
693 | ||
694 | if (!sock_owned_by_user(child)) { | |
aa8223c7 ACM |
695 | ret = tcp_rcv_state_process(child, skb, tcp_hdr(skb), |
696 | skb->len); | |
1da177e4 LT |
697 | /* Wakeup parent, send SIGIO */ |
698 | if (state == TCP_SYN_RECV && child->sk_state != state) | |
699 | parent->sk_data_ready(parent, 0); | |
700 | } else { | |
701 | /* Alas, it is possible again, because we do lookup | |
702 | * in main socket hash table and lock on listening | |
703 | * socket does not protect us more. | |
704 | */ | |
705 | sk_add_backlog(child, skb); | |
706 | } | |
707 | ||
708 | bh_unlock_sock(child); | |
709 | sock_put(child); | |
710 | return ret; | |
711 | } | |
712 | ||
713 | EXPORT_SYMBOL(tcp_check_req); | |
714 | EXPORT_SYMBOL(tcp_child_process); | |
715 | EXPORT_SYMBOL(tcp_create_openreq_child); | |
716 | EXPORT_SYMBOL(tcp_timewait_state_process); |