]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * INET An implementation of the TCP/IP protocol suite for the LINUX | |
3 | * operating system. INET is implemented using the BSD Socket | |
4 | * interface as the means of communication with the user level. | |
5 | * | |
6 | * Implementation of the Transmission Control Protocol(TCP). | |
7 | * | |
02c30a84 | 8 | * Authors: Ross Biro |
1da177e4 LT |
9 | * Fred N. van Kempen, <[email protected]> |
10 | * Mark Evans, <[email protected]> | |
11 | * Corey Minyard <[email protected]> | |
12 | * Florian La Roche, <[email protected]> | |
13 | * Charles Hedrick, <[email protected]> | |
14 | * Linus Torvalds, <[email protected]> | |
15 | * Alan Cox, <[email protected]> | |
16 | * Matthew Dillon, <[email protected]> | |
17 | * Arnt Gulbrandsen, <[email protected]> | |
18 | * Jorge Cwik, <[email protected]> | |
19 | * | |
20 | * Fixes: | |
21 | * Alan Cox : Numerous verify_area() calls | |
22 | * Alan Cox : Set the ACK bit on a reset | |
23 | * Alan Cox : Stopped it crashing if it closed while | |
24 | * sk->inuse=1 and was trying to connect | |
25 | * (tcp_err()). | |
26 | * Alan Cox : All icmp error handling was broken | |
27 | * pointers passed where wrong and the | |
28 | * socket was looked up backwards. Nobody | |
29 | * tested any icmp error code obviously. | |
30 | * Alan Cox : tcp_err() now handled properly. It | |
31 | * wakes people on errors. poll | |
32 | * behaves and the icmp error race | |
33 | * has gone by moving it into sock.c | |
34 | * Alan Cox : tcp_send_reset() fixed to work for | |
35 | * everything not just packets for | |
36 | * unknown sockets. | |
37 | * Alan Cox : tcp option processing. | |
38 | * Alan Cox : Reset tweaked (still not 100%) [Had | |
39 | * syn rule wrong] | |
40 | * Herp Rosmanith : More reset fixes | |
41 | * Alan Cox : No longer acks invalid rst frames. | |
42 | * Acking any kind of RST is right out. | |
43 | * Alan Cox : Sets an ignore me flag on an rst | |
44 | * receive otherwise odd bits of prattle | |
45 | * escape still | |
46 | * Alan Cox : Fixed another acking RST frame bug. | |
47 | * Should stop LAN workplace lockups. | |
48 | * Alan Cox : Some tidyups using the new skb list | |
49 | * facilities | |
50 | * Alan Cox : sk->keepopen now seems to work | |
51 | * Alan Cox : Pulls options out correctly on accepts | |
52 | * Alan Cox : Fixed assorted sk->rqueue->next errors | |
53 | * Alan Cox : PSH doesn't end a TCP read. Switched a | |
54 | * bit to skb ops. | |
55 | * Alan Cox : Tidied tcp_data to avoid a potential | |
56 | * nasty. | |
57 | * Alan Cox : Added some better commenting, as the | |
58 | * tcp is hard to follow | |
59 | * Alan Cox : Removed incorrect check for 20 * psh | |
60 | * Michael O'Reilly : ack < copied bug fix. | |
61 | * Johannes Stille : Misc tcp fixes (not all in yet). | |
62 | * Alan Cox : FIN with no memory -> CRASH | |
63 | * Alan Cox : Added socket option proto entries. | |
64 | * Also added awareness of them to accept. | |
65 | * Alan Cox : Added TCP options (SOL_TCP) | |
66 | * Alan Cox : Switched wakeup calls to callbacks, | |
67 | * so the kernel can layer network | |
68 | * sockets. | |
69 | * Alan Cox : Use ip_tos/ip_ttl settings. | |
70 | * Alan Cox : Handle FIN (more) properly (we hope). | |
71 | * Alan Cox : RST frames sent on unsynchronised | |
72 | * state ack error. | |
73 | * Alan Cox : Put in missing check for SYN bit. | |
74 | * Alan Cox : Added tcp_select_window() aka NET2E | |
75 | * window non shrink trick. | |
76 | * Alan Cox : Added a couple of small NET2E timer | |
77 | * fixes | |
78 | * Charles Hedrick : TCP fixes | |
79 | * Toomas Tamm : TCP window fixes | |
80 | * Alan Cox : Small URG fix to rlogin ^C ack fight | |
81 | * Charles Hedrick : Rewrote most of it to actually work | |
82 | * Linus : Rewrote tcp_read() and URG handling | |
83 | * completely | |
84 | * Gerhard Koerting: Fixed some missing timer handling | |
85 | * Matthew Dillon : Reworked TCP machine states as per RFC | |
86 | * Gerhard Koerting: PC/TCP workarounds | |
87 | * Adam Caldwell : Assorted timer/timing errors | |
88 | * Matthew Dillon : Fixed another RST bug | |
89 | * Alan Cox : Move to kernel side addressing changes. | |
90 | * Alan Cox : Beginning work on TCP fastpathing | |
91 | * (not yet usable) | |
92 | * Arnt Gulbrandsen: Turbocharged tcp_check() routine. | |
93 | * Alan Cox : TCP fast path debugging | |
94 | * Alan Cox : Window clamping | |
95 | * Michael Riepe : Bug in tcp_check() | |
96 | * Matt Dillon : More TCP improvements and RST bug fixes | |
97 | * Matt Dillon : Yet more small nasties remove from the | |
98 | * TCP code (Be very nice to this man if | |
99 | * tcp finally works 100%) 8) | |
100 | * Alan Cox : BSD accept semantics. | |
101 | * Alan Cox : Reset on closedown bug. | |
102 | * Peter De Schrijver : ENOTCONN check missing in tcp_sendto(). | |
103 | * Michael Pall : Handle poll() after URG properly in | |
104 | * all cases. | |
105 | * Michael Pall : Undo the last fix in tcp_read_urg() | |
106 | * (multi URG PUSH broke rlogin). | |
107 | * Michael Pall : Fix the multi URG PUSH problem in | |
108 | * tcp_readable(), poll() after URG | |
109 | * works now. | |
110 | * Michael Pall : recv(...,MSG_OOB) never blocks in the | |
111 | * BSD api. | |
112 | * Alan Cox : Changed the semantics of sk->socket to | |
113 | * fix a race and a signal problem with | |
114 | * accept() and async I/O. | |
115 | * Alan Cox : Relaxed the rules on tcp_sendto(). | |
116 | * Yury Shevchuk : Really fixed accept() blocking problem. | |
117 | * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for | |
118 | * clients/servers which listen in on | |
119 | * fixed ports. | |
120 | * Alan Cox : Cleaned the above up and shrank it to | |
121 | * a sensible code size. | |
122 | * Alan Cox : Self connect lockup fix. | |
123 | * Alan Cox : No connect to multicast. | |
124 | * Ross Biro : Close unaccepted children on master | |
125 | * socket close. | |
126 | * Alan Cox : Reset tracing code. | |
127 | * Alan Cox : Spurious resets on shutdown. | |
128 | * Alan Cox : Giant 15 minute/60 second timer error | |
129 | * Alan Cox : Small whoops in polling before an | |
130 | * accept. | |
131 | * Alan Cox : Kept the state trace facility since | |
132 | * it's handy for debugging. | |
133 | * Alan Cox : More reset handler fixes. | |
134 | * Alan Cox : Started rewriting the code based on | |
135 | * the RFC's for other useful protocol | |
136 | * references see: Comer, KA9Q NOS, and | |
137 | * for a reference on the difference | |
138 | * between specifications and how BSD | |
139 | * works see the 4.4lite source. | |
140 | * A.N.Kuznetsov : Don't time wait on completion of tidy | |
141 | * close. | |
142 | * Linus Torvalds : Fin/Shutdown & copied_seq changes. | |
143 | * Linus Torvalds : Fixed BSD port reuse to work first syn | |
144 | * Alan Cox : Reimplemented timers as per the RFC | |
145 | * and using multiple timers for sanity. | |
146 | * Alan Cox : Small bug fixes, and a lot of new | |
147 | * comments. | |
148 | * Alan Cox : Fixed dual reader crash by locking | |
149 | * the buffers (much like datagram.c) | |
150 | * Alan Cox : Fixed stuck sockets in probe. A probe | |
151 | * now gets fed up of retrying without | |
152 | * (even a no space) answer. | |
153 | * Alan Cox : Extracted closing code better | |
154 | * Alan Cox : Fixed the closing state machine to | |
155 | * resemble the RFC. | |
156 | * Alan Cox : More 'per spec' fixes. | |
157 | * Jorge Cwik : Even faster checksumming. | |
158 | * Alan Cox : tcp_data() doesn't ack illegal PSH | |
159 | * only frames. At least one pc tcp stack | |
160 | * generates them. | |
161 | * Alan Cox : Cache last socket. | |
162 | * Alan Cox : Per route irtt. | |
163 | * Matt Day : poll()->select() match BSD precisely on error | |
164 | * Alan Cox : New buffers | |
165 | * Marc Tamsky : Various sk->prot->retransmits and | |
166 | * sk->retransmits misupdating fixed. | |
167 | * Fixed tcp_write_timeout: stuck close, | |
168 | * and TCP syn retries gets used now. | |
169 | * Mark Yarvis : In tcp_read_wakeup(), don't send an | |
170 | * ack if state is TCP_CLOSED. | |
171 | * Alan Cox : Look up device on a retransmit - routes may | |
172 | * change. Doesn't yet cope with MSS shrink right | |
173 | * but it's a start! | |
174 | * Marc Tamsky : Closing in closing fixes. | |
175 | * Mike Shaver : RFC1122 verifications. | |
176 | * Alan Cox : rcv_saddr errors. | |
177 | * Alan Cox : Block double connect(). | |
178 | * Alan Cox : Small hooks for enSKIP. | |
179 | * Alexey Kuznetsov: Path MTU discovery. | |
180 | * Alan Cox : Support soft errors. | |
181 | * Alan Cox : Fix MTU discovery pathological case | |
182 | * when the remote claims no mtu! | |
183 | * Marc Tamsky : TCP_CLOSE fix. | |
184 | * Colin (G3TNE) : Send a reset on syn ack replies in | |
185 | * window but wrong (fixes NT lpd problems) | |
186 | * Pedro Roque : Better TCP window handling, delayed ack. | |
187 | * Joerg Reuter : No modification of locked buffers in | |
188 | * tcp_do_retransmit() | |
189 | * Eric Schenk : Changed receiver side silly window | |
190 | * avoidance algorithm to BSD style | |
191 | * algorithm. This doubles throughput | |
192 | * against machines running Solaris, | |
193 | * and seems to result in general | |
194 | * improvement. | |
195 | * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD | |
196 | * Willy Konynenberg : Transparent proxying support. | |
197 | * Mike McLagan : Routing by source | |
198 | * Keith Owens : Do proper merging with partial SKB's in | |
199 | * tcp_do_sendmsg to avoid burstiness. | |
200 | * Eric Schenk : Fix fast close down bug with | |
201 | * shutdown() followed by close(). | |
202 | * Andi Kleen : Make poll agree with SIGIO | |
203 | * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and | |
204 | * lingertime == 0 (RFC 793 ABORT Call) | |
205 | * Hirokazu Takahashi : Use copy_from_user() instead of | |
206 | * csum_and_copy_from_user() if possible. | |
207 | * | |
208 | * This program is free software; you can redistribute it and/or | |
209 | * modify it under the terms of the GNU General Public License | |
210 | * as published by the Free Software Foundation; either version | |
211 | * 2 of the License, or(at your option) any later version. | |
212 | * | |
213 | * Description of States: | |
214 | * | |
215 | * TCP_SYN_SENT sent a connection request, waiting for ack | |
216 | * | |
217 | * TCP_SYN_RECV received a connection request, sent ack, | |
218 | * waiting for final ack in three-way handshake. | |
219 | * | |
220 | * TCP_ESTABLISHED connection established | |
221 | * | |
222 | * TCP_FIN_WAIT1 our side has shutdown, waiting to complete | |
223 | * transmission of remaining buffered data | |
224 | * | |
225 | * TCP_FIN_WAIT2 all buffered data sent, waiting for remote | |
226 | * to shutdown | |
227 | * | |
228 | * TCP_CLOSING both sides have shutdown but we still have | |
229 | * data we have to finish sending | |
230 | * | |
231 | * TCP_TIME_WAIT timeout to catch resent junk before entering | |
232 | * closed, can only be entered from FIN_WAIT2 | |
233 | * or CLOSING. Required because the other end | |
234 | * may not have gotten our last ACK causing it | |
235 | * to retransmit the data packet (which we ignore) | |
236 | * | |
237 | * TCP_CLOSE_WAIT remote side has shutdown and is waiting for | |
238 | * us to finish writing our data and to shutdown | |
239 | * (we have to close() to move on to LAST_ACK) | |
240 | * | |
241 | * TCP_LAST_ACK out side has shutdown after remote has | |
242 | * shutdown. There may still be data in our | |
243 | * buffer that we have to finish sending | |
244 | * | |
245 | * TCP_CLOSE socket is finished | |
246 | */ | |
247 | ||
afd46503 JP |
248 | #define pr_fmt(fmt) "TCP: " fmt |
249 | ||
172589cc | 250 | #include <linux/kernel.h> |
1da177e4 LT |
251 | #include <linux/module.h> |
252 | #include <linux/types.h> | |
253 | #include <linux/fcntl.h> | |
254 | #include <linux/poll.h> | |
255 | #include <linux/init.h> | |
1da177e4 | 256 | #include <linux/fs.h> |
9c55e01c | 257 | #include <linux/skbuff.h> |
81b23b4a | 258 | #include <linux/scatterlist.h> |
9c55e01c JA |
259 | #include <linux/splice.h> |
260 | #include <linux/net.h> | |
261 | #include <linux/socket.h> | |
1da177e4 LT |
262 | #include <linux/random.h> |
263 | #include <linux/bootmem.h> | |
57413ebc MS |
264 | #include <linux/highmem.h> |
265 | #include <linux/swap.h> | |
b8059ead | 266 | #include <linux/cache.h> |
f4c50d99 | 267 | #include <linux/err.h> |
cfb6eeb4 | 268 | #include <linux/crypto.h> |
da5c78c8 | 269 | #include <linux/time.h> |
5a0e3ad6 | 270 | #include <linux/slab.h> |
1da177e4 LT |
271 | |
272 | #include <net/icmp.h> | |
273 | #include <net/tcp.h> | |
274 | #include <net/xfrm.h> | |
275 | #include <net/ip.h> | |
1a2449a8 | 276 | #include <net/netdma.h> |
9c55e01c | 277 | #include <net/sock.h> |
1da177e4 LT |
278 | |
279 | #include <asm/uaccess.h> | |
280 | #include <asm/ioctls.h> | |
281 | ||
ab32ea5d | 282 | int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT; |
1da177e4 | 283 | |
dd24c001 | 284 | struct percpu_counter tcp_orphan_count; |
0a5578cf ACM |
285 | EXPORT_SYMBOL_GPL(tcp_orphan_count); |
286 | ||
b8059ead DM |
287 | int sysctl_tcp_wmem[3] __read_mostly; |
288 | int sysctl_tcp_rmem[3] __read_mostly; | |
1da177e4 | 289 | |
1da177e4 LT |
290 | EXPORT_SYMBOL(sysctl_tcp_rmem); |
291 | EXPORT_SYMBOL(sysctl_tcp_wmem); | |
292 | ||
8d987e5c | 293 | atomic_long_t tcp_memory_allocated; /* Current allocated memory. */ |
1da177e4 | 294 | EXPORT_SYMBOL(tcp_memory_allocated); |
1748376b ED |
295 | |
296 | /* | |
297 | * Current number of TCP sockets. | |
298 | */ | |
299 | struct percpu_counter tcp_sockets_allocated; | |
1da177e4 LT |
300 | EXPORT_SYMBOL(tcp_sockets_allocated); |
301 | ||
9c55e01c JA |
302 | /* |
303 | * TCP splice context | |
304 | */ | |
305 | struct tcp_splice_state { | |
306 | struct pipe_inode_info *pipe; | |
307 | size_t len; | |
308 | unsigned int flags; | |
309 | }; | |
310 | ||
1da177e4 LT |
311 | /* |
312 | * Pressure flag: try to collapse. | |
313 | * Technical note: it is used by multiple contexts non atomically. | |
3ab224be | 314 | * All the __sk_mem_schedule() is of this nature: accounting |
1da177e4 LT |
315 | * is strict, actions are advisory and have some latency. |
316 | */ | |
4103f8cd | 317 | int tcp_memory_pressure __read_mostly; |
1da177e4 LT |
318 | EXPORT_SYMBOL(tcp_memory_pressure); |
319 | ||
5c52ba17 | 320 | void tcp_enter_memory_pressure(struct sock *sk) |
1da177e4 LT |
321 | { |
322 | if (!tcp_memory_pressure) { | |
4e673444 | 323 | NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES); |
1da177e4 LT |
324 | tcp_memory_pressure = 1; |
325 | } | |
326 | } | |
1da177e4 LT |
327 | EXPORT_SYMBOL(tcp_enter_memory_pressure); |
328 | ||
b103cf34 JA |
329 | /* Convert seconds to retransmits based on initial and max timeout */ |
330 | static u8 secs_to_retrans(int seconds, int timeout, int rto_max) | |
331 | { | |
332 | u8 res = 0; | |
333 | ||
334 | if (seconds > 0) { | |
335 | int period = timeout; | |
336 | ||
337 | res = 1; | |
338 | while (seconds > period && res < 255) { | |
339 | res++; | |
340 | timeout <<= 1; | |
341 | if (timeout > rto_max) | |
342 | timeout = rto_max; | |
343 | period += timeout; | |
344 | } | |
345 | } | |
346 | return res; | |
347 | } | |
348 | ||
349 | /* Convert retransmits to seconds based on initial and max timeout */ | |
350 | static int retrans_to_secs(u8 retrans, int timeout, int rto_max) | |
351 | { | |
352 | int period = 0; | |
353 | ||
354 | if (retrans > 0) { | |
355 | period = timeout; | |
356 | while (--retrans) { | |
357 | timeout <<= 1; | |
358 | if (timeout > rto_max) | |
359 | timeout = rto_max; | |
360 | period += timeout; | |
361 | } | |
362 | } | |
363 | return period; | |
364 | } | |
365 | ||
900f65d3 NC |
366 | /* Address-family independent initialization for a tcp_sock. |
367 | * | |
368 | * NOTE: A lot of things set to zero explicitly by call to | |
369 | * sk_alloc() so need not be done here. | |
370 | */ | |
371 | void tcp_init_sock(struct sock *sk) | |
372 | { | |
373 | struct inet_connection_sock *icsk = inet_csk(sk); | |
374 | struct tcp_sock *tp = tcp_sk(sk); | |
375 | ||
376 | skb_queue_head_init(&tp->out_of_order_queue); | |
377 | tcp_init_xmit_timers(sk); | |
378 | tcp_prequeue_init(tp); | |
379 | ||
380 | icsk->icsk_rto = TCP_TIMEOUT_INIT; | |
381 | tp->mdev = TCP_TIMEOUT_INIT; | |
382 | ||
383 | /* So many TCP implementations out there (incorrectly) count the | |
384 | * initial SYN frame in their delayed-ACK and congestion control | |
385 | * algorithms that we must have the following bandaid to talk | |
386 | * efficiently to them. -DaveM | |
387 | */ | |
388 | tp->snd_cwnd = TCP_INIT_CWND; | |
389 | ||
390 | /* See draft-stevens-tcpca-spec-01 for discussion of the | |
391 | * initialization of these values. | |
392 | */ | |
393 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; | |
394 | tp->snd_cwnd_clamp = ~0; | |
395 | tp->mss_cache = TCP_MSS_DEFAULT; | |
396 | ||
397 | tp->reordering = sysctl_tcp_reordering; | |
eed530b6 | 398 | tcp_enable_early_retrans(tp); |
900f65d3 NC |
399 | icsk->icsk_ca_ops = &tcp_init_congestion_ops; |
400 | ||
401 | sk->sk_state = TCP_CLOSE; | |
402 | ||
403 | sk->sk_write_space = sk_stream_write_space; | |
404 | sock_set_flag(sk, SOCK_USE_WRITE_QUEUE); | |
405 | ||
406 | icsk->icsk_sync_mss = tcp_sync_mss; | |
407 | ||
408 | /* TCP Cookie Transactions */ | |
409 | if (sysctl_tcp_cookie_size > 0) { | |
410 | /* Default, cookies without s_data_payload. */ | |
411 | tp->cookie_values = | |
412 | kzalloc(sizeof(*tp->cookie_values), | |
413 | sk->sk_allocation); | |
414 | if (tp->cookie_values != NULL) | |
415 | kref_init(&tp->cookie_values->kref); | |
416 | } | |
417 | /* Presumed zeroed, in order of appearance: | |
418 | * cookie_in_always, cookie_out_never, | |
419 | * s_data_constant, s_data_in, s_data_out | |
420 | */ | |
421 | sk->sk_sndbuf = sysctl_tcp_wmem[1]; | |
422 | sk->sk_rcvbuf = sysctl_tcp_rmem[1]; | |
423 | ||
424 | local_bh_disable(); | |
425 | sock_update_memcg(sk); | |
426 | sk_sockets_allocated_inc(sk); | |
427 | local_bh_enable(); | |
428 | } | |
429 | EXPORT_SYMBOL(tcp_init_sock); | |
430 | ||
1da177e4 LT |
431 | /* |
432 | * Wait for a TCP event. | |
433 | * | |
434 | * Note that we don't need to lock the socket, as the upper poll layers | |
435 | * take care of normal races (between the test and the event) and we don't | |
436 | * go look at any of the socket buffers directly. | |
437 | */ | |
438 | unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait) | |
439 | { | |
440 | unsigned int mask; | |
441 | struct sock *sk = sock->sk; | |
cf533ea5 | 442 | const struct tcp_sock *tp = tcp_sk(sk); |
1da177e4 | 443 | |
aa395145 | 444 | sock_poll_wait(file, sk_sleep(sk), wait); |
1da177e4 | 445 | if (sk->sk_state == TCP_LISTEN) |
dc40c7bc | 446 | return inet_csk_listen_poll(sk); |
1da177e4 LT |
447 | |
448 | /* Socket is not locked. We are protected from async events | |
70efce27 WN |
449 | * by poll logic and correct handling of state changes |
450 | * made by other threads is impossible in any case. | |
1da177e4 LT |
451 | */ |
452 | ||
453 | mask = 0; | |
1da177e4 LT |
454 | |
455 | /* | |
456 | * POLLHUP is certainly not done right. But poll() doesn't | |
457 | * have a notion of HUP in just one direction, and for a | |
458 | * socket the read side is more interesting. | |
459 | * | |
460 | * Some poll() documentation says that POLLHUP is incompatible | |
461 | * with the POLLOUT/POLLWR flags, so somebody should check this | |
462 | * all. But careful, it tends to be safer to return too many | |
463 | * bits than too few, and you can easily break real applications | |
464 | * if you don't tell them that something has hung up! | |
465 | * | |
466 | * Check-me. | |
467 | * | |
468 | * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and | |
469 | * our fs/select.c). It means that after we received EOF, | |
470 | * poll always returns immediately, making impossible poll() on write() | |
471 | * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP | |
472 | * if and only if shutdown has been made in both directions. | |
473 | * Actually, it is interesting to look how Solaris and DUX | |
70efce27 | 474 | * solve this dilemma. I would prefer, if POLLHUP were maskable, |
1da177e4 LT |
475 | * then we could set it on SND_SHUTDOWN. BTW examples given |
476 | * in Stevens' books assume exactly this behaviour, it explains | |
70efce27 | 477 | * why POLLHUP is incompatible with POLLOUT. --ANK |
1da177e4 LT |
478 | * |
479 | * NOTE. Check for TCP_CLOSE is added. The goal is to prevent | |
480 | * blocking on fresh not-connected or disconnected socket. --ANK | |
481 | */ | |
482 | if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE) | |
483 | mask |= POLLHUP; | |
484 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
f348d70a | 485 | mask |= POLLIN | POLLRDNORM | POLLRDHUP; |
1da177e4 LT |
486 | |
487 | /* Connected? */ | |
488 | if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) { | |
c7004482 DM |
489 | int target = sock_rcvlowat(sk, 0, INT_MAX); |
490 | ||
491 | if (tp->urg_seq == tp->copied_seq && | |
492 | !sock_flag(sk, SOCK_URGINLINE) && | |
493 | tp->urg_data) | |
b634f875 | 494 | target++; |
c7004482 | 495 | |
1da177e4 LT |
496 | /* Potential race condition. If read of tp below will |
497 | * escape above sk->sk_state, we can be illegally awaken | |
498 | * in SYN_* states. */ | |
c7004482 | 499 | if (tp->rcv_nxt - tp->copied_seq >= target) |
1da177e4 LT |
500 | mask |= POLLIN | POLLRDNORM; |
501 | ||
502 | if (!(sk->sk_shutdown & SEND_SHUTDOWN)) { | |
503 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) { | |
504 | mask |= POLLOUT | POLLWRNORM; | |
505 | } else { /* send SIGIO later */ | |
506 | set_bit(SOCK_ASYNC_NOSPACE, | |
507 | &sk->sk_socket->flags); | |
508 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
509 | ||
510 | /* Race breaker. If space is freed after | |
511 | * wspace test but before the flags are set, | |
512 | * IO signal will be lost. | |
513 | */ | |
514 | if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) | |
515 | mask |= POLLOUT | POLLWRNORM; | |
516 | } | |
d84ba638 KM |
517 | } else |
518 | mask |= POLLOUT | POLLWRNORM; | |
1da177e4 LT |
519 | |
520 | if (tp->urg_data & TCP_URG_VALID) | |
521 | mask |= POLLPRI; | |
522 | } | |
a4d25803 TM |
523 | /* This barrier is coupled with smp_wmb() in tcp_reset() */ |
524 | smp_rmb(); | |
525 | if (sk->sk_err) | |
526 | mask |= POLLERR; | |
527 | ||
1da177e4 LT |
528 | return mask; |
529 | } | |
4bc2f18b | 530 | EXPORT_SYMBOL(tcp_poll); |
1da177e4 LT |
531 | |
532 | int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg) | |
533 | { | |
534 | struct tcp_sock *tp = tcp_sk(sk); | |
535 | int answ; | |
536 | ||
537 | switch (cmd) { | |
538 | case SIOCINQ: | |
539 | if (sk->sk_state == TCP_LISTEN) | |
540 | return -EINVAL; | |
541 | ||
542 | lock_sock(sk); | |
543 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
544 | answ = 0; | |
545 | else if (sock_flag(sk, SOCK_URGINLINE) || | |
546 | !tp->urg_data || | |
547 | before(tp->urg_seq, tp->copied_seq) || | |
548 | !before(tp->urg_seq, tp->rcv_nxt)) { | |
91521944 DM |
549 | struct sk_buff *skb; |
550 | ||
1da177e4 LT |
551 | answ = tp->rcv_nxt - tp->copied_seq; |
552 | ||
553 | /* Subtract 1, if FIN is in queue. */ | |
91521944 DM |
554 | skb = skb_peek_tail(&sk->sk_receive_queue); |
555 | if (answ && skb) | |
556 | answ -= tcp_hdr(skb)->fin; | |
1da177e4 LT |
557 | } else |
558 | answ = tp->urg_seq - tp->copied_seq; | |
559 | release_sock(sk); | |
560 | break; | |
561 | case SIOCATMARK: | |
562 | answ = tp->urg_data && tp->urg_seq == tp->copied_seq; | |
563 | break; | |
564 | case SIOCOUTQ: | |
565 | if (sk->sk_state == TCP_LISTEN) | |
566 | return -EINVAL; | |
567 | ||
568 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
569 | answ = 0; | |
570 | else | |
571 | answ = tp->write_seq - tp->snd_una; | |
572 | break; | |
2f4e1b39 MS |
573 | case SIOCOUTQNSD: |
574 | if (sk->sk_state == TCP_LISTEN) | |
575 | return -EINVAL; | |
576 | ||
577 | if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) | |
578 | answ = 0; | |
579 | else | |
580 | answ = tp->write_seq - tp->snd_nxt; | |
581 | break; | |
1da177e4 LT |
582 | default: |
583 | return -ENOIOCTLCMD; | |
3ff50b79 | 584 | } |
1da177e4 LT |
585 | |
586 | return put_user(answ, (int __user *)arg); | |
587 | } | |
4bc2f18b | 588 | EXPORT_SYMBOL(tcp_ioctl); |
1da177e4 | 589 | |
1da177e4 LT |
590 | static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb) |
591 | { | |
4de075e0 | 592 | TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; |
1da177e4 LT |
593 | tp->pushed_seq = tp->write_seq; |
594 | } | |
595 | ||
a2a385d6 | 596 | static inline bool forced_push(const struct tcp_sock *tp) |
1da177e4 LT |
597 | { |
598 | return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1)); | |
599 | } | |
600 | ||
9e412ba7 | 601 | static inline void skb_entail(struct sock *sk, struct sk_buff *skb) |
1da177e4 | 602 | { |
9e412ba7 | 603 | struct tcp_sock *tp = tcp_sk(sk); |
352d4800 ACM |
604 | struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); |
605 | ||
606 | skb->csum = 0; | |
607 | tcb->seq = tcb->end_seq = tp->write_seq; | |
4de075e0 | 608 | tcb->tcp_flags = TCPHDR_ACK; |
352d4800 | 609 | tcb->sacked = 0; |
1da177e4 | 610 | skb_header_release(skb); |
fe067e8a | 611 | tcp_add_write_queue_tail(sk, skb); |
3ab224be HA |
612 | sk->sk_wmem_queued += skb->truesize; |
613 | sk_mem_charge(sk, skb->truesize); | |
89ebd197 | 614 | if (tp->nonagle & TCP_NAGLE_PUSH) |
e905a9ed | 615 | tp->nonagle &= ~TCP_NAGLE_PUSH; |
1da177e4 LT |
616 | } |
617 | ||
afeca340 | 618 | static inline void tcp_mark_urg(struct tcp_sock *tp, int flags) |
1da177e4 | 619 | { |
33f5f57e | 620 | if (flags & MSG_OOB) |
1da177e4 | 621 | tp->snd_up = tp->write_seq; |
1da177e4 LT |
622 | } |
623 | ||
9e412ba7 IJ |
624 | static inline void tcp_push(struct sock *sk, int flags, int mss_now, |
625 | int nonagle) | |
1da177e4 | 626 | { |
fe067e8a | 627 | if (tcp_send_head(sk)) { |
afeca340 KK |
628 | struct tcp_sock *tp = tcp_sk(sk); |
629 | ||
1da177e4 | 630 | if (!(flags & MSG_MORE) || forced_push(tp)) |
afeca340 KK |
631 | tcp_mark_push(tp, tcp_write_queue_tail(sk)); |
632 | ||
633 | tcp_mark_urg(tp, flags); | |
9e412ba7 | 634 | __tcp_push_pending_frames(sk, mss_now, |
1da177e4 LT |
635 | (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle); |
636 | } | |
637 | } | |
638 | ||
6ff7751d AB |
639 | static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb, |
640 | unsigned int offset, size_t len) | |
9c55e01c JA |
641 | { |
642 | struct tcp_splice_state *tss = rd_desc->arg.data; | |
33966dd0 | 643 | int ret; |
9c55e01c | 644 | |
9fa5fdf2 DM |
645 | ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len), |
646 | tss->flags); | |
33966dd0 WT |
647 | if (ret > 0) |
648 | rd_desc->count -= ret; | |
649 | return ret; | |
9c55e01c JA |
650 | } |
651 | ||
652 | static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss) | |
653 | { | |
654 | /* Store TCP splice context information in read_descriptor_t. */ | |
655 | read_descriptor_t rd_desc = { | |
656 | .arg.data = tss, | |
33966dd0 | 657 | .count = tss->len, |
9c55e01c JA |
658 | }; |
659 | ||
660 | return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv); | |
661 | } | |
662 | ||
663 | /** | |
664 | * tcp_splice_read - splice data from TCP socket to a pipe | |
665 | * @sock: socket to splice from | |
666 | * @ppos: position (not valid) | |
667 | * @pipe: pipe to splice to | |
668 | * @len: number of bytes to splice | |
669 | * @flags: splice modifier flags | |
670 | * | |
671 | * Description: | |
672 | * Will read pages from given socket and fill them into a pipe. | |
673 | * | |
674 | **/ | |
675 | ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos, | |
676 | struct pipe_inode_info *pipe, size_t len, | |
677 | unsigned int flags) | |
678 | { | |
679 | struct sock *sk = sock->sk; | |
680 | struct tcp_splice_state tss = { | |
681 | .pipe = pipe, | |
682 | .len = len, | |
683 | .flags = flags, | |
684 | }; | |
685 | long timeo; | |
686 | ssize_t spliced; | |
687 | int ret; | |
688 | ||
3a047bf8 | 689 | sock_rps_record_flow(sk); |
9c55e01c JA |
690 | /* |
691 | * We can't seek on a socket input | |
692 | */ | |
693 | if (unlikely(*ppos)) | |
694 | return -ESPIPE; | |
695 | ||
696 | ret = spliced = 0; | |
697 | ||
698 | lock_sock(sk); | |
699 | ||
42324c62 | 700 | timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK); |
9c55e01c JA |
701 | while (tss.len) { |
702 | ret = __tcp_splice_read(sk, &tss); | |
703 | if (ret < 0) | |
704 | break; | |
705 | else if (!ret) { | |
706 | if (spliced) | |
707 | break; | |
9c55e01c JA |
708 | if (sock_flag(sk, SOCK_DONE)) |
709 | break; | |
710 | if (sk->sk_err) { | |
711 | ret = sock_error(sk); | |
712 | break; | |
713 | } | |
714 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
715 | break; | |
716 | if (sk->sk_state == TCP_CLOSE) { | |
717 | /* | |
718 | * This occurs when user tries to read | |
719 | * from never connected socket. | |
720 | */ | |
721 | if (!sock_flag(sk, SOCK_DONE)) | |
722 | ret = -ENOTCONN; | |
723 | break; | |
724 | } | |
725 | if (!timeo) { | |
726 | ret = -EAGAIN; | |
727 | break; | |
728 | } | |
729 | sk_wait_data(sk, &timeo); | |
730 | if (signal_pending(current)) { | |
731 | ret = sock_intr_errno(timeo); | |
732 | break; | |
733 | } | |
734 | continue; | |
735 | } | |
736 | tss.len -= ret; | |
737 | spliced += ret; | |
738 | ||
33966dd0 WT |
739 | if (!timeo) |
740 | break; | |
9c55e01c JA |
741 | release_sock(sk); |
742 | lock_sock(sk); | |
743 | ||
744 | if (sk->sk_err || sk->sk_state == TCP_CLOSE || | |
33966dd0 | 745 | (sk->sk_shutdown & RCV_SHUTDOWN) || |
9c55e01c JA |
746 | signal_pending(current)) |
747 | break; | |
748 | } | |
749 | ||
750 | release_sock(sk); | |
751 | ||
752 | if (spliced) | |
753 | return spliced; | |
754 | ||
755 | return ret; | |
756 | } | |
4bc2f18b | 757 | EXPORT_SYMBOL(tcp_splice_read); |
9c55e01c | 758 | |
df97c708 | 759 | struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp) |
f561d0f2 PE |
760 | { |
761 | struct sk_buff *skb; | |
762 | ||
763 | /* The TCP header must be at least 32-bit aligned. */ | |
764 | size = ALIGN(size, 4); | |
765 | ||
766 | skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp); | |
767 | if (skb) { | |
3ab224be | 768 | if (sk_wmem_schedule(sk, skb->truesize)) { |
a21d4572 | 769 | skb_reserve(skb, sk->sk_prot->max_header); |
f561d0f2 PE |
770 | /* |
771 | * Make sure that we have exactly size bytes | |
772 | * available to the caller, no more, no less. | |
773 | */ | |
a21d4572 | 774 | skb->avail_size = size; |
f561d0f2 PE |
775 | return skb; |
776 | } | |
777 | __kfree_skb(skb); | |
778 | } else { | |
5c52ba17 | 779 | sk->sk_prot->enter_memory_pressure(sk); |
f561d0f2 PE |
780 | sk_stream_moderate_sndbuf(sk); |
781 | } | |
782 | return NULL; | |
783 | } | |
784 | ||
0c54b85f IJ |
785 | static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now, |
786 | int large_allowed) | |
787 | { | |
788 | struct tcp_sock *tp = tcp_sk(sk); | |
2a3a041c | 789 | u32 xmit_size_goal, old_size_goal; |
0c54b85f IJ |
790 | |
791 | xmit_size_goal = mss_now; | |
792 | ||
793 | if (large_allowed && sk_can_gso(sk)) { | |
794 | xmit_size_goal = ((sk->sk_gso_max_size - 1) - | |
795 | inet_csk(sk)->icsk_af_ops->net_header_len - | |
796 | inet_csk(sk)->icsk_ext_hdr_len - | |
797 | tp->tcp_header_len); | |
798 | ||
799 | xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal); | |
2a3a041c IJ |
800 | |
801 | /* We try hard to avoid divides here */ | |
802 | old_size_goal = tp->xmit_size_goal_segs * mss_now; | |
803 | ||
804 | if (likely(old_size_goal <= xmit_size_goal && | |
805 | old_size_goal + mss_now > xmit_size_goal)) { | |
806 | xmit_size_goal = old_size_goal; | |
807 | } else { | |
808 | tp->xmit_size_goal_segs = xmit_size_goal / mss_now; | |
809 | xmit_size_goal = tp->xmit_size_goal_segs * mss_now; | |
810 | } | |
0c54b85f IJ |
811 | } |
812 | ||
afece1c6 | 813 | return max(xmit_size_goal, mss_now); |
0c54b85f IJ |
814 | } |
815 | ||
816 | static int tcp_send_mss(struct sock *sk, int *size_goal, int flags) | |
817 | { | |
818 | int mss_now; | |
819 | ||
820 | mss_now = tcp_current_mss(sk); | |
821 | *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB)); | |
822 | ||
823 | return mss_now; | |
824 | } | |
825 | ||
1da177e4 LT |
826 | static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset, |
827 | size_t psize, int flags) | |
828 | { | |
829 | struct tcp_sock *tp = tcp_sk(sk); | |
c1b4a7e6 | 830 | int mss_now, size_goal; |
1da177e4 LT |
831 | int err; |
832 | ssize_t copied; | |
833 | long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
834 | ||
835 | /* Wait for a connection to finish. */ | |
836 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) | |
837 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | |
838 | goto out_err; | |
839 | ||
840 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
841 | ||
0c54b85f | 842 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
843 | copied = 0; |
844 | ||
845 | err = -EPIPE; | |
846 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
0d6a775e | 847 | goto out_err; |
1da177e4 LT |
848 | |
849 | while (psize > 0) { | |
fe067e8a | 850 | struct sk_buff *skb = tcp_write_queue_tail(sk); |
1da177e4 | 851 | struct page *page = pages[poffset / PAGE_SIZE]; |
38ba0a65 | 852 | int copy, i; |
1da177e4 LT |
853 | int offset = poffset % PAGE_SIZE; |
854 | int size = min_t(size_t, psize, PAGE_SIZE - offset); | |
38ba0a65 | 855 | bool can_coalesce; |
1da177e4 | 856 | |
fe067e8a | 857 | if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) { |
1da177e4 LT |
858 | new_segment: |
859 | if (!sk_stream_memory_free(sk)) | |
860 | goto wait_for_sndbuf; | |
861 | ||
df97c708 | 862 | skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation); |
1da177e4 LT |
863 | if (!skb) |
864 | goto wait_for_memory; | |
865 | ||
9e412ba7 | 866 | skb_entail(sk, skb); |
c1b4a7e6 | 867 | copy = size_goal; |
1da177e4 LT |
868 | } |
869 | ||
870 | if (copy > size) | |
871 | copy = size; | |
872 | ||
873 | i = skb_shinfo(skb)->nr_frags; | |
874 | can_coalesce = skb_can_coalesce(skb, i, page, offset); | |
875 | if (!can_coalesce && i >= MAX_SKB_FRAGS) { | |
876 | tcp_mark_push(tp, skb); | |
877 | goto new_segment; | |
878 | } | |
3ab224be | 879 | if (!sk_wmem_schedule(sk, copy)) |
1da177e4 | 880 | goto wait_for_memory; |
e905a9ed | 881 | |
1da177e4 | 882 | if (can_coalesce) { |
9e903e08 | 883 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
1da177e4 LT |
884 | } else { |
885 | get_page(page); | |
886 | skb_fill_page_desc(skb, i, page, offset, copy); | |
887 | } | |
888 | ||
889 | skb->len += copy; | |
890 | skb->data_len += copy; | |
891 | skb->truesize += copy; | |
892 | sk->sk_wmem_queued += copy; | |
3ab224be | 893 | sk_mem_charge(sk, copy); |
84fa7933 | 894 | skb->ip_summed = CHECKSUM_PARTIAL; |
1da177e4 LT |
895 | tp->write_seq += copy; |
896 | TCP_SKB_CB(skb)->end_seq += copy; | |
7967168c | 897 | skb_shinfo(skb)->gso_segs = 0; |
1da177e4 LT |
898 | |
899 | if (!copied) | |
4de075e0 | 900 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
1da177e4 LT |
901 | |
902 | copied += copy; | |
903 | poffset += copy; | |
904 | if (!(psize -= copy)) | |
905 | goto out; | |
906 | ||
69d15067 | 907 | if (skb->len < size_goal || (flags & MSG_OOB)) |
1da177e4 LT |
908 | continue; |
909 | ||
910 | if (forced_push(tp)) { | |
911 | tcp_mark_push(tp, skb); | |
9e412ba7 | 912 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
fe067e8a | 913 | } else if (skb == tcp_send_head(sk)) |
1da177e4 LT |
914 | tcp_push_one(sk, mss_now); |
915 | continue; | |
916 | ||
917 | wait_for_sndbuf: | |
918 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
919 | wait_for_memory: | |
bad115cf | 920 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); |
1da177e4 LT |
921 | |
922 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
923 | goto do_error; | |
924 | ||
0c54b85f | 925 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
926 | } |
927 | ||
928 | out: | |
35f9c09f | 929 | if (copied && !(flags & MSG_SENDPAGE_NOTLAST)) |
9e412ba7 | 930 | tcp_push(sk, flags, mss_now, tp->nonagle); |
1da177e4 LT |
931 | return copied; |
932 | ||
933 | do_error: | |
934 | if (copied) | |
935 | goto out; | |
936 | out_err: | |
937 | return sk_stream_error(sk, flags, err); | |
938 | } | |
939 | ||
7ba42910 CG |
940 | int tcp_sendpage(struct sock *sk, struct page *page, int offset, |
941 | size_t size, int flags) | |
1da177e4 LT |
942 | { |
943 | ssize_t res; | |
1da177e4 | 944 | |
1da177e4 | 945 | if (!(sk->sk_route_caps & NETIF_F_SG) || |
8648b305 | 946 | !(sk->sk_route_caps & NETIF_F_ALL_CSUM)) |
7ba42910 CG |
947 | return sock_no_sendpage(sk->sk_socket, page, offset, size, |
948 | flags); | |
1da177e4 | 949 | |
1da177e4 | 950 | lock_sock(sk); |
1da177e4 | 951 | res = do_tcp_sendpages(sk, &page, offset, size, flags); |
1da177e4 LT |
952 | release_sock(sk); |
953 | return res; | |
954 | } | |
4bc2f18b | 955 | EXPORT_SYMBOL(tcp_sendpage); |
1da177e4 | 956 | |
690e99c4 | 957 | static inline int select_size(const struct sock *sk, bool sg) |
1da177e4 | 958 | { |
cf533ea5 | 959 | const struct tcp_sock *tp = tcp_sk(sk); |
c1b4a7e6 | 960 | int tmp = tp->mss_cache; |
1da177e4 | 961 | |
def87cf4 | 962 | if (sg) { |
f07d960d ED |
963 | if (sk_can_gso(sk)) { |
964 | /* Small frames wont use a full page: | |
965 | * Payload will immediately follow tcp header. | |
966 | */ | |
967 | tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER); | |
968 | } else { | |
b4e26f5e DM |
969 | int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER); |
970 | ||
971 | if (tmp >= pgbreak && | |
972 | tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE) | |
973 | tmp = pgbreak; | |
974 | } | |
975 | } | |
1da177e4 | 976 | |
1da177e4 LT |
977 | return tmp; |
978 | } | |
979 | ||
7ba42910 | 980 | int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, |
1da177e4 LT |
981 | size_t size) |
982 | { | |
983 | struct iovec *iov; | |
984 | struct tcp_sock *tp = tcp_sk(sk); | |
985 | struct sk_buff *skb; | |
690e99c4 | 986 | int iovlen, flags, err, copied; |
370816ae | 987 | int mss_now = 0, size_goal; |
690e99c4 | 988 | bool sg; |
1da177e4 LT |
989 | long timeo; |
990 | ||
991 | lock_sock(sk); | |
1da177e4 LT |
992 | |
993 | flags = msg->msg_flags; | |
994 | timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT); | |
995 | ||
996 | /* Wait for a connection to finish. */ | |
997 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) | |
998 | if ((err = sk_stream_wait_connect(sk, &timeo)) != 0) | |
999 | goto out_err; | |
1000 | ||
c0e88ff0 PE |
1001 | if (unlikely(tp->repair)) { |
1002 | if (tp->repair_queue == TCP_RECV_QUEUE) { | |
1003 | copied = tcp_send_rcvq(sk, msg, size); | |
1004 | goto out; | |
1005 | } | |
1006 | ||
1007 | err = -EINVAL; | |
1008 | if (tp->repair_queue == TCP_NO_QUEUE) | |
1009 | goto out_err; | |
1010 | ||
1011 | /* 'common' sending to sendq */ | |
1012 | } | |
1013 | ||
1da177e4 LT |
1014 | /* This should be in poll */ |
1015 | clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); | |
1016 | ||
0c54b85f | 1017 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
1018 | |
1019 | /* Ok commence sending. */ | |
1020 | iovlen = msg->msg_iovlen; | |
1021 | iov = msg->msg_iov; | |
1022 | copied = 0; | |
1023 | ||
1024 | err = -EPIPE; | |
1025 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) | |
0d6a775e | 1026 | goto out_err; |
1da177e4 | 1027 | |
690e99c4 | 1028 | sg = !!(sk->sk_route_caps & NETIF_F_SG); |
def87cf4 | 1029 | |
1da177e4 | 1030 | while (--iovlen >= 0) { |
01db403c | 1031 | size_t seglen = iov->iov_len; |
1da177e4 LT |
1032 | unsigned char __user *from = iov->iov_base; |
1033 | ||
1034 | iov++; | |
1035 | ||
1036 | while (seglen > 0) { | |
6828b92b HX |
1037 | int copy = 0; |
1038 | int max = size_goal; | |
1da177e4 | 1039 | |
fe067e8a | 1040 | skb = tcp_write_queue_tail(sk); |
6828b92b HX |
1041 | if (tcp_send_head(sk)) { |
1042 | if (skb->ip_summed == CHECKSUM_NONE) | |
1043 | max = mss_now; | |
1044 | copy = max - skb->len; | |
1045 | } | |
1da177e4 | 1046 | |
6828b92b | 1047 | if (copy <= 0) { |
1da177e4 LT |
1048 | new_segment: |
1049 | /* Allocate new segment. If the interface is SG, | |
1050 | * allocate skb fitting to single page. | |
1051 | */ | |
1052 | if (!sk_stream_memory_free(sk)) | |
1053 | goto wait_for_sndbuf; | |
1054 | ||
def87cf4 KK |
1055 | skb = sk_stream_alloc_skb(sk, |
1056 | select_size(sk, sg), | |
1057 | sk->sk_allocation); | |
1da177e4 LT |
1058 | if (!skb) |
1059 | goto wait_for_memory; | |
1060 | ||
1061 | /* | |
1062 | * Check whether we can use HW checksum. | |
1063 | */ | |
8648b305 | 1064 | if (sk->sk_route_caps & NETIF_F_ALL_CSUM) |
84fa7933 | 1065 | skb->ip_summed = CHECKSUM_PARTIAL; |
1da177e4 | 1066 | |
9e412ba7 | 1067 | skb_entail(sk, skb); |
c1b4a7e6 | 1068 | copy = size_goal; |
6828b92b | 1069 | max = size_goal; |
1da177e4 LT |
1070 | } |
1071 | ||
1072 | /* Try to append data to the end of skb. */ | |
1073 | if (copy > seglen) | |
1074 | copy = seglen; | |
1075 | ||
1076 | /* Where to copy to? */ | |
a21d4572 | 1077 | if (skb_availroom(skb) > 0) { |
1da177e4 | 1078 | /* We have some space in skb head. Superb! */ |
a21d4572 | 1079 | copy = min_t(int, copy, skb_availroom(skb)); |
c6e1a0d1 TH |
1080 | err = skb_add_data_nocache(sk, skb, from, copy); |
1081 | if (err) | |
1da177e4 LT |
1082 | goto do_fault; |
1083 | } else { | |
a2a385d6 | 1084 | bool merge = false; |
1da177e4 | 1085 | int i = skb_shinfo(skb)->nr_frags; |
0a5912db | 1086 | struct page *page = sk->sk_sndmsg_page; |
761965ea ED |
1087 | int off; |
1088 | ||
1089 | if (page && page_count(page) == 1) | |
0a5912db | 1090 | sk->sk_sndmsg_off = 0; |
761965ea | 1091 | |
0a5912db | 1092 | off = sk->sk_sndmsg_off; |
1da177e4 LT |
1093 | |
1094 | if (skb_can_coalesce(skb, i, page, off) && | |
1095 | off != PAGE_SIZE) { | |
1096 | /* We can extend the last page | |
1097 | * fragment. */ | |
a2a385d6 | 1098 | merge = true; |
def87cf4 | 1099 | } else if (i == MAX_SKB_FRAGS || !sg) { |
1da177e4 LT |
1100 | /* Need to add new fragment and cannot |
1101 | * do this because interface is non-SG, | |
1102 | * or because all the page slots are | |
1103 | * busy. */ | |
1104 | tcp_mark_push(tp, skb); | |
1105 | goto new_segment; | |
1106 | } else if (page) { | |
1da177e4 LT |
1107 | if (off == PAGE_SIZE) { |
1108 | put_page(page); | |
0a5912db | 1109 | sk->sk_sndmsg_page = page = NULL; |
fb5f5e6e | 1110 | off = 0; |
1da177e4 | 1111 | } |
ef015786 | 1112 | } else |
fb5f5e6e | 1113 | off = 0; |
ef015786 HX |
1114 | |
1115 | if (copy > PAGE_SIZE - off) | |
1116 | copy = PAGE_SIZE - off; | |
1117 | ||
3ab224be | 1118 | if (!sk_wmem_schedule(sk, copy)) |
ef015786 | 1119 | goto wait_for_memory; |
1da177e4 LT |
1120 | |
1121 | if (!page) { | |
1122 | /* Allocate new cache page. */ | |
1123 | if (!(page = sk_stream_alloc_page(sk))) | |
1124 | goto wait_for_memory; | |
1da177e4 LT |
1125 | } |
1126 | ||
1da177e4 LT |
1127 | /* Time to copy data. We are close to |
1128 | * the end! */ | |
c6e1a0d1 TH |
1129 | err = skb_copy_to_page_nocache(sk, from, skb, |
1130 | page, off, copy); | |
1da177e4 LT |
1131 | if (err) { |
1132 | /* If this page was new, give it to the | |
1133 | * socket so it does not get leaked. | |
1134 | */ | |
0a5912db ED |
1135 | if (!sk->sk_sndmsg_page) { |
1136 | sk->sk_sndmsg_page = page; | |
1137 | sk->sk_sndmsg_off = 0; | |
1da177e4 LT |
1138 | } |
1139 | goto do_error; | |
1140 | } | |
1141 | ||
1142 | /* Update the skb. */ | |
1143 | if (merge) { | |
9e903e08 | 1144 | skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy); |
1da177e4 LT |
1145 | } else { |
1146 | skb_fill_page_desc(skb, i, page, off, copy); | |
0a5912db | 1147 | if (sk->sk_sndmsg_page) { |
1da177e4 LT |
1148 | get_page(page); |
1149 | } else if (off + copy < PAGE_SIZE) { | |
1150 | get_page(page); | |
0a5912db | 1151 | sk->sk_sndmsg_page = page; |
1da177e4 LT |
1152 | } |
1153 | } | |
1154 | ||
0a5912db | 1155 | sk->sk_sndmsg_off = off + copy; |
1da177e4 LT |
1156 | } |
1157 | ||
1158 | if (!copied) | |
4de075e0 | 1159 | TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH; |
1da177e4 LT |
1160 | |
1161 | tp->write_seq += copy; | |
1162 | TCP_SKB_CB(skb)->end_seq += copy; | |
7967168c | 1163 | skb_shinfo(skb)->gso_segs = 0; |
1da177e4 LT |
1164 | |
1165 | from += copy; | |
1166 | copied += copy; | |
1167 | if ((seglen -= copy) == 0 && iovlen == 0) | |
1168 | goto out; | |
1169 | ||
c0e88ff0 | 1170 | if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair)) |
1da177e4 LT |
1171 | continue; |
1172 | ||
1173 | if (forced_push(tp)) { | |
1174 | tcp_mark_push(tp, skb); | |
9e412ba7 | 1175 | __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH); |
fe067e8a | 1176 | } else if (skb == tcp_send_head(sk)) |
1da177e4 LT |
1177 | tcp_push_one(sk, mss_now); |
1178 | continue; | |
1179 | ||
1180 | wait_for_sndbuf: | |
1181 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); | |
1182 | wait_for_memory: | |
c0e88ff0 | 1183 | if (copied && likely(!tp->repair)) |
9e412ba7 | 1184 | tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH); |
1da177e4 LT |
1185 | |
1186 | if ((err = sk_stream_wait_memory(sk, &timeo)) != 0) | |
1187 | goto do_error; | |
1188 | ||
0c54b85f | 1189 | mss_now = tcp_send_mss(sk, &size_goal, flags); |
1da177e4 LT |
1190 | } |
1191 | } | |
1192 | ||
1193 | out: | |
c0e88ff0 | 1194 | if (copied && likely(!tp->repair)) |
9e412ba7 | 1195 | tcp_push(sk, flags, mss_now, tp->nonagle); |
1da177e4 LT |
1196 | release_sock(sk); |
1197 | return copied; | |
1198 | ||
1199 | do_fault: | |
1200 | if (!skb->len) { | |
fe067e8a DM |
1201 | tcp_unlink_write_queue(skb, sk); |
1202 | /* It is the one place in all of TCP, except connection | |
1203 | * reset, where we can be unlinking the send_head. | |
1204 | */ | |
1205 | tcp_check_send_head(sk, skb); | |
3ab224be | 1206 | sk_wmem_free_skb(sk, skb); |
1da177e4 LT |
1207 | } |
1208 | ||
1209 | do_error: | |
1210 | if (copied) | |
1211 | goto out; | |
1212 | out_err: | |
1213 | err = sk_stream_error(sk, flags, err); | |
1da177e4 LT |
1214 | release_sock(sk); |
1215 | return err; | |
1216 | } | |
4bc2f18b | 1217 | EXPORT_SYMBOL(tcp_sendmsg); |
1da177e4 LT |
1218 | |
1219 | /* | |
1220 | * Handle reading urgent data. BSD has very simple semantics for | |
1221 | * this, no blocking and very strange errors 8) | |
1222 | */ | |
1223 | ||
377f0a08 | 1224 | static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags) |
1da177e4 LT |
1225 | { |
1226 | struct tcp_sock *tp = tcp_sk(sk); | |
1227 | ||
1228 | /* No URG data to read. */ | |
1229 | if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data || | |
1230 | tp->urg_data == TCP_URG_READ) | |
1231 | return -EINVAL; /* Yes this is right ! */ | |
1232 | ||
1233 | if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE)) | |
1234 | return -ENOTCONN; | |
1235 | ||
1236 | if (tp->urg_data & TCP_URG_VALID) { | |
1237 | int err = 0; | |
1238 | char c = tp->urg_data; | |
1239 | ||
1240 | if (!(flags & MSG_PEEK)) | |
1241 | tp->urg_data = TCP_URG_READ; | |
1242 | ||
1243 | /* Read urgent data. */ | |
1244 | msg->msg_flags |= MSG_OOB; | |
1245 | ||
1246 | if (len > 0) { | |
1247 | if (!(flags & MSG_TRUNC)) | |
1248 | err = memcpy_toiovec(msg->msg_iov, &c, 1); | |
1249 | len = 1; | |
1250 | } else | |
1251 | msg->msg_flags |= MSG_TRUNC; | |
1252 | ||
1253 | return err ? -EFAULT : len; | |
1254 | } | |
1255 | ||
1256 | if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN)) | |
1257 | return 0; | |
1258 | ||
1259 | /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and | |
1260 | * the available implementations agree in this case: | |
1261 | * this call should never block, independent of the | |
1262 | * blocking state of the socket. | |
1263 | * Mike <[email protected]> | |
1264 | */ | |
1265 | return -EAGAIN; | |
1266 | } | |
1267 | ||
c0e88ff0 PE |
1268 | static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len) |
1269 | { | |
1270 | struct sk_buff *skb; | |
1271 | int copied = 0, err = 0; | |
1272 | ||
1273 | /* XXX -- need to support SO_PEEK_OFF */ | |
1274 | ||
1275 | skb_queue_walk(&sk->sk_write_queue, skb) { | |
1276 | err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len); | |
1277 | if (err) | |
1278 | break; | |
1279 | ||
1280 | copied += skb->len; | |
1281 | } | |
1282 | ||
1283 | return err ?: copied; | |
1284 | } | |
1285 | ||
1da177e4 LT |
1286 | /* Clean up the receive buffer for full frames taken by the user, |
1287 | * then send an ACK if necessary. COPIED is the number of bytes | |
1288 | * tcp_recvmsg has given to the user so far, it speeds up the | |
1289 | * calculation of whether or not we must ACK for the sake of | |
1290 | * a window update. | |
1291 | */ | |
0e4b4992 | 1292 | void tcp_cleanup_rbuf(struct sock *sk, int copied) |
1da177e4 LT |
1293 | { |
1294 | struct tcp_sock *tp = tcp_sk(sk); | |
a2a385d6 | 1295 | bool time_to_ack = false; |
1da177e4 | 1296 | |
1da177e4 LT |
1297 | struct sk_buff *skb = skb_peek(&sk->sk_receive_queue); |
1298 | ||
d792c100 | 1299 | WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq), |
2af6fd8b | 1300 | "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n", |
d792c100 | 1301 | tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt); |
1da177e4 | 1302 | |
463c84b9 ACM |
1303 | if (inet_csk_ack_scheduled(sk)) { |
1304 | const struct inet_connection_sock *icsk = inet_csk(sk); | |
1da177e4 LT |
1305 | /* Delayed ACKs frequently hit locked sockets during bulk |
1306 | * receive. */ | |
463c84b9 | 1307 | if (icsk->icsk_ack.blocked || |
1da177e4 | 1308 | /* Once-per-two-segments ACK was not sent by tcp_input.c */ |
463c84b9 | 1309 | tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss || |
1da177e4 LT |
1310 | /* |
1311 | * If this read emptied read buffer, we send ACK, if | |
1312 | * connection is not bidirectional, user drained | |
1313 | * receive buffer and there was a small segment | |
1314 | * in queue. | |
1315 | */ | |
1ef9696c AK |
1316 | (copied > 0 && |
1317 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) || | |
1318 | ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) && | |
1319 | !icsk->icsk_ack.pingpong)) && | |
1320 | !atomic_read(&sk->sk_rmem_alloc))) | |
a2a385d6 | 1321 | time_to_ack = true; |
1da177e4 LT |
1322 | } |
1323 | ||
1324 | /* We send an ACK if we can now advertise a non-zero window | |
1325 | * which has been raised "significantly". | |
1326 | * | |
1327 | * Even if window raised up to infinity, do not send window open ACK | |
1328 | * in states, where we will not receive more. It is useless. | |
1329 | */ | |
1330 | if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) { | |
1331 | __u32 rcv_window_now = tcp_receive_window(tp); | |
1332 | ||
1333 | /* Optimize, __tcp_select_window() is not cheap. */ | |
1334 | if (2*rcv_window_now <= tp->window_clamp) { | |
1335 | __u32 new_window = __tcp_select_window(sk); | |
1336 | ||
1337 | /* Send ACK now, if this read freed lots of space | |
1338 | * in our buffer. Certainly, new_window is new window. | |
1339 | * We can advertise it now, if it is not less than current one. | |
1340 | * "Lots" means "at least twice" here. | |
1341 | */ | |
1342 | if (new_window && new_window >= 2 * rcv_window_now) | |
a2a385d6 | 1343 | time_to_ack = true; |
1da177e4 LT |
1344 | } |
1345 | } | |
1346 | if (time_to_ack) | |
1347 | tcp_send_ack(sk); | |
1348 | } | |
1349 | ||
1350 | static void tcp_prequeue_process(struct sock *sk) | |
1351 | { | |
1352 | struct sk_buff *skb; | |
1353 | struct tcp_sock *tp = tcp_sk(sk); | |
1354 | ||
6f67c817 | 1355 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED); |
1da177e4 LT |
1356 | |
1357 | /* RX process wants to run with disabled BHs, though it is not | |
1358 | * necessary */ | |
1359 | local_bh_disable(); | |
1360 | while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) | |
c57943a1 | 1361 | sk_backlog_rcv(sk, skb); |
1da177e4 LT |
1362 | local_bh_enable(); |
1363 | ||
1364 | /* Clear memory counter. */ | |
1365 | tp->ucopy.memory = 0; | |
1366 | } | |
1367 | ||
73852e81 SM |
1368 | #ifdef CONFIG_NET_DMA |
1369 | static void tcp_service_net_dma(struct sock *sk, bool wait) | |
1370 | { | |
1371 | dma_cookie_t done, used; | |
1372 | dma_cookie_t last_issued; | |
1373 | struct tcp_sock *tp = tcp_sk(sk); | |
1374 | ||
1375 | if (!tp->ucopy.dma_chan) | |
1376 | return; | |
1377 | ||
1378 | last_issued = tp->ucopy.dma_cookie; | |
1379 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | |
1380 | ||
1381 | do { | |
1382 | if (dma_async_memcpy_complete(tp->ucopy.dma_chan, | |
1383 | last_issued, &done, | |
1384 | &used) == DMA_SUCCESS) { | |
1385 | /* Safe to free early-copied skbs now */ | |
1386 | __skb_queue_purge(&sk->sk_async_wait_queue); | |
1387 | break; | |
1388 | } else { | |
1389 | struct sk_buff *skb; | |
1390 | while ((skb = skb_peek(&sk->sk_async_wait_queue)) && | |
1391 | (dma_async_is_complete(skb->dma_cookie, done, | |
1392 | used) == DMA_SUCCESS)) { | |
1393 | __skb_dequeue(&sk->sk_async_wait_queue); | |
1394 | kfree_skb(skb); | |
1395 | } | |
1396 | } | |
1397 | } while (wait); | |
1398 | } | |
1399 | #endif | |
1400 | ||
1da177e4 LT |
1401 | static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off) |
1402 | { | |
1403 | struct sk_buff *skb; | |
1404 | u32 offset; | |
1405 | ||
1406 | skb_queue_walk(&sk->sk_receive_queue, skb) { | |
1407 | offset = seq - TCP_SKB_CB(skb)->seq; | |
aa8223c7 | 1408 | if (tcp_hdr(skb)->syn) |
1da177e4 | 1409 | offset--; |
aa8223c7 | 1410 | if (offset < skb->len || tcp_hdr(skb)->fin) { |
1da177e4 LT |
1411 | *off = offset; |
1412 | return skb; | |
1413 | } | |
1414 | } | |
1415 | return NULL; | |
1416 | } | |
1417 | ||
1418 | /* | |
1419 | * This routine provides an alternative to tcp_recvmsg() for routines | |
1420 | * that would like to handle copying from skbuffs directly in 'sendfile' | |
1421 | * fashion. | |
1422 | * Note: | |
1423 | * - It is assumed that the socket was locked by the caller. | |
1424 | * - The routine does not block. | |
1425 | * - At present, there is no support for reading OOB data | |
1426 | * or for 'peeking' the socket using this routine | |
1427 | * (although both would be easy to implement). | |
1428 | */ | |
1429 | int tcp_read_sock(struct sock *sk, read_descriptor_t *desc, | |
1430 | sk_read_actor_t recv_actor) | |
1431 | { | |
1432 | struct sk_buff *skb; | |
1433 | struct tcp_sock *tp = tcp_sk(sk); | |
1434 | u32 seq = tp->copied_seq; | |
1435 | u32 offset; | |
1436 | int copied = 0; | |
1437 | ||
1438 | if (sk->sk_state == TCP_LISTEN) | |
1439 | return -ENOTCONN; | |
1440 | while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) { | |
1441 | if (offset < skb->len) { | |
374e7b59 OP |
1442 | int used; |
1443 | size_t len; | |
1da177e4 LT |
1444 | |
1445 | len = skb->len - offset; | |
1446 | /* Stop reading if we hit a patch of urgent data */ | |
1447 | if (tp->urg_data) { | |
1448 | u32 urg_offset = tp->urg_seq - seq; | |
1449 | if (urg_offset < len) | |
1450 | len = urg_offset; | |
1451 | if (!len) | |
1452 | break; | |
1453 | } | |
1454 | used = recv_actor(desc, skb, offset, len); | |
ddb61a57 JA |
1455 | if (used < 0) { |
1456 | if (!copied) | |
1457 | copied = used; | |
1458 | break; | |
1459 | } else if (used <= len) { | |
1da177e4 LT |
1460 | seq += used; |
1461 | copied += used; | |
1462 | offset += used; | |
1463 | } | |
293ad604 OP |
1464 | /* |
1465 | * If recv_actor drops the lock (e.g. TCP splice | |
1466 | * receive) the skb pointer might be invalid when | |
1467 | * getting here: tcp_collapse might have deleted it | |
1468 | * while aggregating skbs from the socket queue. | |
1469 | */ | |
1470 | skb = tcp_recv_skb(sk, seq-1, &offset); | |
1471 | if (!skb || (offset+1 != skb->len)) | |
1da177e4 LT |
1472 | break; |
1473 | } | |
aa8223c7 | 1474 | if (tcp_hdr(skb)->fin) { |
dc6b9b78 | 1475 | sk_eat_skb(sk, skb, false); |
1da177e4 LT |
1476 | ++seq; |
1477 | break; | |
1478 | } | |
dc6b9b78 | 1479 | sk_eat_skb(sk, skb, false); |
1da177e4 LT |
1480 | if (!desc->count) |
1481 | break; | |
baff42ab | 1482 | tp->copied_seq = seq; |
1da177e4 LT |
1483 | } |
1484 | tp->copied_seq = seq; | |
1485 | ||
1486 | tcp_rcv_space_adjust(sk); | |
1487 | ||
1488 | /* Clean up data we have read: This will do ACK frames. */ | |
ddb61a57 | 1489 | if (copied > 0) |
0e4b4992 | 1490 | tcp_cleanup_rbuf(sk, copied); |
1da177e4 LT |
1491 | return copied; |
1492 | } | |
4bc2f18b | 1493 | EXPORT_SYMBOL(tcp_read_sock); |
1da177e4 LT |
1494 | |
1495 | /* | |
1496 | * This routine copies from a sock struct into the user buffer. | |
1497 | * | |
1498 | * Technical note: in 2.3 we work on _locked_ socket, so that | |
1499 | * tricks with *seq access order and skb->users are not required. | |
1500 | * Probably, code can be easily improved even more. | |
1501 | */ | |
1502 | ||
1503 | int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, | |
1504 | size_t len, int nonblock, int flags, int *addr_len) | |
1505 | { | |
1506 | struct tcp_sock *tp = tcp_sk(sk); | |
1507 | int copied = 0; | |
1508 | u32 peek_seq; | |
1509 | u32 *seq; | |
1510 | unsigned long used; | |
1511 | int err; | |
1512 | int target; /* Read at least this many bytes */ | |
1513 | long timeo; | |
1514 | struct task_struct *user_recv = NULL; | |
dc6b9b78 | 1515 | bool copied_early = false; |
2b1244a4 | 1516 | struct sk_buff *skb; |
77527313 | 1517 | u32 urg_hole = 0; |
1da177e4 LT |
1518 | |
1519 | lock_sock(sk); | |
1520 | ||
1da177e4 LT |
1521 | err = -ENOTCONN; |
1522 | if (sk->sk_state == TCP_LISTEN) | |
1523 | goto out; | |
1524 | ||
1525 | timeo = sock_rcvtimeo(sk, nonblock); | |
1526 | ||
1527 | /* Urgent data needs to be handled specially. */ | |
1528 | if (flags & MSG_OOB) | |
1529 | goto recv_urg; | |
1530 | ||
c0e88ff0 PE |
1531 | if (unlikely(tp->repair)) { |
1532 | err = -EPERM; | |
1533 | if (!(flags & MSG_PEEK)) | |
1534 | goto out; | |
1535 | ||
1536 | if (tp->repair_queue == TCP_SEND_QUEUE) | |
1537 | goto recv_sndq; | |
1538 | ||
1539 | err = -EINVAL; | |
1540 | if (tp->repair_queue == TCP_NO_QUEUE) | |
1541 | goto out; | |
1542 | ||
1543 | /* 'common' recv queue MSG_PEEK-ing */ | |
1544 | } | |
1545 | ||
1da177e4 LT |
1546 | seq = &tp->copied_seq; |
1547 | if (flags & MSG_PEEK) { | |
1548 | peek_seq = tp->copied_seq; | |
1549 | seq = &peek_seq; | |
1550 | } | |
1551 | ||
1552 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
1553 | ||
1a2449a8 CL |
1554 | #ifdef CONFIG_NET_DMA |
1555 | tp->ucopy.dma_chan = NULL; | |
1556 | preempt_disable(); | |
2b1244a4 | 1557 | skb = skb_peek_tail(&sk->sk_receive_queue); |
e00c5d8b AM |
1558 | { |
1559 | int available = 0; | |
1560 | ||
1561 | if (skb) | |
1562 | available = TCP_SKB_CB(skb)->seq + skb->len - (*seq); | |
1563 | if ((available < target) && | |
1564 | (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) && | |
1565 | !sysctl_tcp_low_latency && | |
a2bd1140 | 1566 | net_dma_find_channel()) { |
e00c5d8b AM |
1567 | preempt_enable_no_resched(); |
1568 | tp->ucopy.pinned_list = | |
1569 | dma_pin_iovec_pages(msg->msg_iov, len); | |
1570 | } else { | |
1571 | preempt_enable_no_resched(); | |
1572 | } | |
1573 | } | |
1a2449a8 CL |
1574 | #endif |
1575 | ||
1da177e4 | 1576 | do { |
1da177e4 LT |
1577 | u32 offset; |
1578 | ||
1579 | /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */ | |
1580 | if (tp->urg_data && tp->urg_seq == *seq) { | |
1581 | if (copied) | |
1582 | break; | |
1583 | if (signal_pending(current)) { | |
1584 | copied = timeo ? sock_intr_errno(timeo) : -EAGAIN; | |
1585 | break; | |
1586 | } | |
1587 | } | |
1588 | ||
1589 | /* Next get a buffer. */ | |
1590 | ||
91521944 | 1591 | skb_queue_walk(&sk->sk_receive_queue, skb) { |
1da177e4 LT |
1592 | /* Now that we have two receive queues this |
1593 | * shouldn't happen. | |
1594 | */ | |
d792c100 | 1595 | if (WARN(before(*seq, TCP_SKB_CB(skb)->seq), |
2af6fd8b JP |
1596 | "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n", |
1597 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, | |
1598 | flags)) | |
1da177e4 | 1599 | break; |
d792c100 | 1600 | |
1da177e4 | 1601 | offset = *seq - TCP_SKB_CB(skb)->seq; |
aa8223c7 | 1602 | if (tcp_hdr(skb)->syn) |
1da177e4 LT |
1603 | offset--; |
1604 | if (offset < skb->len) | |
1605 | goto found_ok_skb; | |
aa8223c7 | 1606 | if (tcp_hdr(skb)->fin) |
1da177e4 | 1607 | goto found_fin_ok; |
2af6fd8b JP |
1608 | WARN(!(flags & MSG_PEEK), |
1609 | "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n", | |
1610 | *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags); | |
91521944 | 1611 | } |
1da177e4 LT |
1612 | |
1613 | /* Well, if we have backlog, try to process it now yet. */ | |
1614 | ||
1615 | if (copied >= target && !sk->sk_backlog.tail) | |
1616 | break; | |
1617 | ||
1618 | if (copied) { | |
1619 | if (sk->sk_err || | |
1620 | sk->sk_state == TCP_CLOSE || | |
1621 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1622 | !timeo || | |
518a09ef | 1623 | signal_pending(current)) |
1da177e4 LT |
1624 | break; |
1625 | } else { | |
1626 | if (sock_flag(sk, SOCK_DONE)) | |
1627 | break; | |
1628 | ||
1629 | if (sk->sk_err) { | |
1630 | copied = sock_error(sk); | |
1631 | break; | |
1632 | } | |
1633 | ||
1634 | if (sk->sk_shutdown & RCV_SHUTDOWN) | |
1635 | break; | |
1636 | ||
1637 | if (sk->sk_state == TCP_CLOSE) { | |
1638 | if (!sock_flag(sk, SOCK_DONE)) { | |
1639 | /* This occurs when user tries to read | |
1640 | * from never connected socket. | |
1641 | */ | |
1642 | copied = -ENOTCONN; | |
1643 | break; | |
1644 | } | |
1645 | break; | |
1646 | } | |
1647 | ||
1648 | if (!timeo) { | |
1649 | copied = -EAGAIN; | |
1650 | break; | |
1651 | } | |
1652 | ||
1653 | if (signal_pending(current)) { | |
1654 | copied = sock_intr_errno(timeo); | |
1655 | break; | |
1656 | } | |
1657 | } | |
1658 | ||
0e4b4992 | 1659 | tcp_cleanup_rbuf(sk, copied); |
1da177e4 | 1660 | |
7df55125 | 1661 | if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) { |
1da177e4 LT |
1662 | /* Install new reader */ |
1663 | if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) { | |
1664 | user_recv = current; | |
1665 | tp->ucopy.task = user_recv; | |
1666 | tp->ucopy.iov = msg->msg_iov; | |
1667 | } | |
1668 | ||
1669 | tp->ucopy.len = len; | |
1670 | ||
547b792c IJ |
1671 | WARN_ON(tp->copied_seq != tp->rcv_nxt && |
1672 | !(flags & (MSG_PEEK | MSG_TRUNC))); | |
1da177e4 LT |
1673 | |
1674 | /* Ugly... If prequeue is not empty, we have to | |
1675 | * process it before releasing socket, otherwise | |
1676 | * order will be broken at second iteration. | |
1677 | * More elegant solution is required!!! | |
1678 | * | |
1679 | * Look: we have the following (pseudo)queues: | |
1680 | * | |
1681 | * 1. packets in flight | |
1682 | * 2. backlog | |
1683 | * 3. prequeue | |
1684 | * 4. receive_queue | |
1685 | * | |
1686 | * Each queue can be processed only if the next ones | |
1687 | * are empty. At this point we have empty receive_queue. | |
1688 | * But prequeue _can_ be not empty after 2nd iteration, | |
1689 | * when we jumped to start of loop because backlog | |
1690 | * processing added something to receive_queue. | |
1691 | * We cannot release_sock(), because backlog contains | |
1692 | * packets arrived _after_ prequeued ones. | |
1693 | * | |
1694 | * Shortly, algorithm is clear --- to process all | |
1695 | * the queues in order. We could make it more directly, | |
1696 | * requeueing packets from backlog to prequeue, if | |
1697 | * is not empty. It is more elegant, but eats cycles, | |
1698 | * unfortunately. | |
1699 | */ | |
b03efcfb | 1700 | if (!skb_queue_empty(&tp->ucopy.prequeue)) |
1da177e4 LT |
1701 | goto do_prequeue; |
1702 | ||
1703 | /* __ Set realtime policy in scheduler __ */ | |
1704 | } | |
1705 | ||
73852e81 SM |
1706 | #ifdef CONFIG_NET_DMA |
1707 | if (tp->ucopy.dma_chan) | |
1708 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | |
1709 | #endif | |
1da177e4 LT |
1710 | if (copied >= target) { |
1711 | /* Do not sleep, just process backlog. */ | |
1712 | release_sock(sk); | |
1713 | lock_sock(sk); | |
1714 | } else | |
1715 | sk_wait_data(sk, &timeo); | |
1716 | ||
1a2449a8 | 1717 | #ifdef CONFIG_NET_DMA |
73852e81 | 1718 | tcp_service_net_dma(sk, false); /* Don't block */ |
1a2449a8 CL |
1719 | tp->ucopy.wakeup = 0; |
1720 | #endif | |
1721 | ||
1da177e4 LT |
1722 | if (user_recv) { |
1723 | int chunk; | |
1724 | ||
1725 | /* __ Restore normal policy in scheduler __ */ | |
1726 | ||
1727 | if ((chunk = len - tp->ucopy.len) != 0) { | |
ed88098e | 1728 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk); |
1da177e4 LT |
1729 | len -= chunk; |
1730 | copied += chunk; | |
1731 | } | |
1732 | ||
1733 | if (tp->rcv_nxt == tp->copied_seq && | |
b03efcfb | 1734 | !skb_queue_empty(&tp->ucopy.prequeue)) { |
1da177e4 LT |
1735 | do_prequeue: |
1736 | tcp_prequeue_process(sk); | |
1737 | ||
1738 | if ((chunk = len - tp->ucopy.len) != 0) { | |
ed88098e | 1739 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1da177e4 LT |
1740 | len -= chunk; |
1741 | copied += chunk; | |
1742 | } | |
1743 | } | |
1744 | } | |
77527313 IJ |
1745 | if ((flags & MSG_PEEK) && |
1746 | (peek_seq - copied - urg_hole != tp->copied_seq)) { | |
e87cc472 JP |
1747 | net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n", |
1748 | current->comm, | |
1749 | task_pid_nr(current)); | |
1da177e4 LT |
1750 | peek_seq = tp->copied_seq; |
1751 | } | |
1752 | continue; | |
1753 | ||
1754 | found_ok_skb: | |
1755 | /* Ok so how much can we use? */ | |
1756 | used = skb->len - offset; | |
1757 | if (len < used) | |
1758 | used = len; | |
1759 | ||
1760 | /* Do we have urgent data here? */ | |
1761 | if (tp->urg_data) { | |
1762 | u32 urg_offset = tp->urg_seq - *seq; | |
1763 | if (urg_offset < used) { | |
1764 | if (!urg_offset) { | |
1765 | if (!sock_flag(sk, SOCK_URGINLINE)) { | |
1766 | ++*seq; | |
77527313 | 1767 | urg_hole++; |
1da177e4 LT |
1768 | offset++; |
1769 | used--; | |
1770 | if (!used) | |
1771 | goto skip_copy; | |
1772 | } | |
1773 | } else | |
1774 | used = urg_offset; | |
1775 | } | |
1776 | } | |
1777 | ||
1778 | if (!(flags & MSG_TRUNC)) { | |
1a2449a8 CL |
1779 | #ifdef CONFIG_NET_DMA |
1780 | if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list) | |
a2bd1140 | 1781 | tp->ucopy.dma_chan = net_dma_find_channel(); |
1a2449a8 CL |
1782 | |
1783 | if (tp->ucopy.dma_chan) { | |
1784 | tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec( | |
1785 | tp->ucopy.dma_chan, skb, offset, | |
1786 | msg->msg_iov, used, | |
1787 | tp->ucopy.pinned_list); | |
1788 | ||
1789 | if (tp->ucopy.dma_cookie < 0) { | |
1790 | ||
afd46503 JP |
1791 | pr_alert("%s: dma_cookie < 0\n", |
1792 | __func__); | |
1a2449a8 CL |
1793 | |
1794 | /* Exception. Bailout! */ | |
1795 | if (!copied) | |
1796 | copied = -EFAULT; | |
1797 | break; | |
1798 | } | |
73852e81 SM |
1799 | |
1800 | dma_async_memcpy_issue_pending(tp->ucopy.dma_chan); | |
1801 | ||
1a2449a8 | 1802 | if ((offset + used) == skb->len) |
dc6b9b78 | 1803 | copied_early = true; |
1a2449a8 CL |
1804 | |
1805 | } else | |
1806 | #endif | |
1807 | { | |
1808 | err = skb_copy_datagram_iovec(skb, offset, | |
1809 | msg->msg_iov, used); | |
1810 | if (err) { | |
1811 | /* Exception. Bailout! */ | |
1812 | if (!copied) | |
1813 | copied = -EFAULT; | |
1814 | break; | |
1815 | } | |
1da177e4 LT |
1816 | } |
1817 | } | |
1818 | ||
1819 | *seq += used; | |
1820 | copied += used; | |
1821 | len -= used; | |
1822 | ||
1823 | tcp_rcv_space_adjust(sk); | |
1824 | ||
1825 | skip_copy: | |
1826 | if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) { | |
1827 | tp->urg_data = 0; | |
9e412ba7 | 1828 | tcp_fast_path_check(sk); |
1da177e4 LT |
1829 | } |
1830 | if (used + offset < skb->len) | |
1831 | continue; | |
1832 | ||
aa8223c7 | 1833 | if (tcp_hdr(skb)->fin) |
1da177e4 | 1834 | goto found_fin_ok; |
1a2449a8 CL |
1835 | if (!(flags & MSG_PEEK)) { |
1836 | sk_eat_skb(sk, skb, copied_early); | |
dc6b9b78 | 1837 | copied_early = false; |
1a2449a8 | 1838 | } |
1da177e4 LT |
1839 | continue; |
1840 | ||
1841 | found_fin_ok: | |
1842 | /* Process the FIN. */ | |
1843 | ++*seq; | |
1a2449a8 CL |
1844 | if (!(flags & MSG_PEEK)) { |
1845 | sk_eat_skb(sk, skb, copied_early); | |
dc6b9b78 | 1846 | copied_early = false; |
1a2449a8 | 1847 | } |
1da177e4 LT |
1848 | break; |
1849 | } while (len > 0); | |
1850 | ||
1851 | if (user_recv) { | |
b03efcfb | 1852 | if (!skb_queue_empty(&tp->ucopy.prequeue)) { |
1da177e4 LT |
1853 | int chunk; |
1854 | ||
1855 | tp->ucopy.len = copied > 0 ? len : 0; | |
1856 | ||
1857 | tcp_prequeue_process(sk); | |
1858 | ||
1859 | if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) { | |
ed88098e | 1860 | NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk); |
1da177e4 LT |
1861 | len -= chunk; |
1862 | copied += chunk; | |
1863 | } | |
1864 | } | |
1865 | ||
1866 | tp->ucopy.task = NULL; | |
1867 | tp->ucopy.len = 0; | |
1868 | } | |
1869 | ||
1a2449a8 | 1870 | #ifdef CONFIG_NET_DMA |
73852e81 SM |
1871 | tcp_service_net_dma(sk, true); /* Wait for queue to drain */ |
1872 | tp->ucopy.dma_chan = NULL; | |
1a2449a8 | 1873 | |
1a2449a8 CL |
1874 | if (tp->ucopy.pinned_list) { |
1875 | dma_unpin_iovec_pages(tp->ucopy.pinned_list); | |
1876 | tp->ucopy.pinned_list = NULL; | |
1877 | } | |
1878 | #endif | |
1879 | ||
1da177e4 LT |
1880 | /* According to UNIX98, msg_name/msg_namelen are ignored |
1881 | * on connected socket. I was just happy when found this 8) --ANK | |
1882 | */ | |
1883 | ||
1884 | /* Clean up data we have read: This will do ACK frames. */ | |
0e4b4992 | 1885 | tcp_cleanup_rbuf(sk, copied); |
1da177e4 | 1886 | |
1da177e4 LT |
1887 | release_sock(sk); |
1888 | return copied; | |
1889 | ||
1890 | out: | |
1da177e4 LT |
1891 | release_sock(sk); |
1892 | return err; | |
1893 | ||
1894 | recv_urg: | |
377f0a08 | 1895 | err = tcp_recv_urg(sk, msg, len, flags); |
1da177e4 | 1896 | goto out; |
c0e88ff0 PE |
1897 | |
1898 | recv_sndq: | |
1899 | err = tcp_peek_sndq(sk, msg, len); | |
1900 | goto out; | |
1da177e4 | 1901 | } |
4bc2f18b | 1902 | EXPORT_SYMBOL(tcp_recvmsg); |
1da177e4 | 1903 | |
490d5046 IJ |
1904 | void tcp_set_state(struct sock *sk, int state) |
1905 | { | |
1906 | int oldstate = sk->sk_state; | |
1907 | ||
1908 | switch (state) { | |
1909 | case TCP_ESTABLISHED: | |
1910 | if (oldstate != TCP_ESTABLISHED) | |
81cc8a75 | 1911 | TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
490d5046 IJ |
1912 | break; |
1913 | ||
1914 | case TCP_CLOSE: | |
1915 | if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED) | |
81cc8a75 | 1916 | TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS); |
490d5046 IJ |
1917 | |
1918 | sk->sk_prot->unhash(sk); | |
1919 | if (inet_csk(sk)->icsk_bind_hash && | |
1920 | !(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) | |
ab1e0a13 | 1921 | inet_put_port(sk); |
490d5046 IJ |
1922 | /* fall through */ |
1923 | default: | |
5a5f3a8d | 1924 | if (oldstate == TCP_ESTABLISHED) |
74688e48 | 1925 | TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB); |
490d5046 IJ |
1926 | } |
1927 | ||
1928 | /* Change state AFTER socket is unhashed to avoid closed | |
1929 | * socket sitting in hash tables. | |
1930 | */ | |
1931 | sk->sk_state = state; | |
1932 | ||
1933 | #ifdef STATE_TRACE | |
5a5f3a8d | 1934 | SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]); |
490d5046 IJ |
1935 | #endif |
1936 | } | |
1937 | EXPORT_SYMBOL_GPL(tcp_set_state); | |
1938 | ||
1da177e4 LT |
1939 | /* |
1940 | * State processing on a close. This implements the state shift for | |
1941 | * sending our FIN frame. Note that we only send a FIN for some | |
1942 | * states. A shutdown() may have already sent the FIN, or we may be | |
1943 | * closed. | |
1944 | */ | |
1945 | ||
9b5b5cff | 1946 | static const unsigned char new_state[16] = { |
1da177e4 LT |
1947 | /* current state: new state: action: */ |
1948 | /* (Invalid) */ TCP_CLOSE, | |
1949 | /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1950 | /* TCP_SYN_SENT */ TCP_CLOSE, | |
1951 | /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1952 | /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1, | |
1953 | /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2, | |
1954 | /* TCP_TIME_WAIT */ TCP_CLOSE, | |
1955 | /* TCP_CLOSE */ TCP_CLOSE, | |
1956 | /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN, | |
1957 | /* TCP_LAST_ACK */ TCP_LAST_ACK, | |
1958 | /* TCP_LISTEN */ TCP_CLOSE, | |
1959 | /* TCP_CLOSING */ TCP_CLOSING, | |
1960 | }; | |
1961 | ||
1962 | static int tcp_close_state(struct sock *sk) | |
1963 | { | |
1964 | int next = (int)new_state[sk->sk_state]; | |
1965 | int ns = next & TCP_STATE_MASK; | |
1966 | ||
1967 | tcp_set_state(sk, ns); | |
1968 | ||
1969 | return next & TCP_ACTION_FIN; | |
1970 | } | |
1971 | ||
1972 | /* | |
1973 | * Shutdown the sending side of a connection. Much like close except | |
1f29b058 | 1974 | * that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD). |
1da177e4 LT |
1975 | */ |
1976 | ||
1977 | void tcp_shutdown(struct sock *sk, int how) | |
1978 | { | |
1979 | /* We need to grab some memory, and put together a FIN, | |
1980 | * and then put it into the queue to be sent. | |
1981 | * Tim MacKenzie([email protected]) 4 Dec '92. | |
1982 | */ | |
1983 | if (!(how & SEND_SHUTDOWN)) | |
1984 | return; | |
1985 | ||
1986 | /* If we've already sent a FIN, or it's a closed state, skip this. */ | |
1987 | if ((1 << sk->sk_state) & | |
1988 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | | |
1989 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) { | |
1990 | /* Clear out any half completed packets. FIN if needed. */ | |
1991 | if (tcp_close_state(sk)) | |
1992 | tcp_send_fin(sk); | |
1993 | } | |
1994 | } | |
4bc2f18b | 1995 | EXPORT_SYMBOL(tcp_shutdown); |
1da177e4 | 1996 | |
efcdbf24 AS |
1997 | bool tcp_check_oom(struct sock *sk, int shift) |
1998 | { | |
1999 | bool too_many_orphans, out_of_socket_memory; | |
2000 | ||
2001 | too_many_orphans = tcp_too_many_orphans(sk, shift); | |
2002 | out_of_socket_memory = tcp_out_of_memory(sk); | |
2003 | ||
e87cc472 JP |
2004 | if (too_many_orphans) |
2005 | net_info_ratelimited("too many orphaned sockets\n"); | |
2006 | if (out_of_socket_memory) | |
2007 | net_info_ratelimited("out of memory -- consider tuning tcp_mem\n"); | |
efcdbf24 AS |
2008 | return too_many_orphans || out_of_socket_memory; |
2009 | } | |
2010 | ||
1da177e4 LT |
2011 | void tcp_close(struct sock *sk, long timeout) |
2012 | { | |
2013 | struct sk_buff *skb; | |
2014 | int data_was_unread = 0; | |
75c2d907 | 2015 | int state; |
1da177e4 LT |
2016 | |
2017 | lock_sock(sk); | |
2018 | sk->sk_shutdown = SHUTDOWN_MASK; | |
2019 | ||
2020 | if (sk->sk_state == TCP_LISTEN) { | |
2021 | tcp_set_state(sk, TCP_CLOSE); | |
2022 | ||
2023 | /* Special case. */ | |
0a5578cf | 2024 | inet_csk_listen_stop(sk); |
1da177e4 LT |
2025 | |
2026 | goto adjudge_to_death; | |
2027 | } | |
2028 | ||
2029 | /* We need to flush the recv. buffs. We do this only on the | |
2030 | * descriptor close, not protocol-sourced closes, because the | |
2031 | * reader process may not have drained the data yet! | |
2032 | */ | |
2033 | while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) { | |
2034 | u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq - | |
aa8223c7 | 2035 | tcp_hdr(skb)->fin; |
1da177e4 LT |
2036 | data_was_unread += len; |
2037 | __kfree_skb(skb); | |
2038 | } | |
2039 | ||
3ab224be | 2040 | sk_mem_reclaim(sk); |
1da177e4 | 2041 | |
565b7b2d KK |
2042 | /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */ |
2043 | if (sk->sk_state == TCP_CLOSE) | |
2044 | goto adjudge_to_death; | |
2045 | ||
65bb723c GR |
2046 | /* As outlined in RFC 2525, section 2.17, we send a RST here because |
2047 | * data was lost. To witness the awful effects of the old behavior of | |
2048 | * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk | |
2049 | * GET in an FTP client, suspend the process, wait for the client to | |
2050 | * advertise a zero window, then kill -9 the FTP client, wheee... | |
2051 | * Note: timeout is always zero in such a case. | |
1da177e4 | 2052 | */ |
ee995283 PE |
2053 | if (unlikely(tcp_sk(sk)->repair)) { |
2054 | sk->sk_prot->disconnect(sk, 0); | |
2055 | } else if (data_was_unread) { | |
1da177e4 | 2056 | /* Unread data was tossed, zap the connection. */ |
6f67c817 | 2057 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE); |
1da177e4 | 2058 | tcp_set_state(sk, TCP_CLOSE); |
aa133076 | 2059 | tcp_send_active_reset(sk, sk->sk_allocation); |
1da177e4 LT |
2060 | } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) { |
2061 | /* Check zero linger _after_ checking for unread data. */ | |
2062 | sk->sk_prot->disconnect(sk, 0); | |
6f67c817 | 2063 | NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA); |
1da177e4 LT |
2064 | } else if (tcp_close_state(sk)) { |
2065 | /* We FIN if the application ate all the data before | |
2066 | * zapping the connection. | |
2067 | */ | |
2068 | ||
2069 | /* RED-PEN. Formally speaking, we have broken TCP state | |
2070 | * machine. State transitions: | |
2071 | * | |
2072 | * TCP_ESTABLISHED -> TCP_FIN_WAIT1 | |
2073 | * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible) | |
2074 | * TCP_CLOSE_WAIT -> TCP_LAST_ACK | |
2075 | * | |
2076 | * are legal only when FIN has been sent (i.e. in window), | |
2077 | * rather than queued out of window. Purists blame. | |
2078 | * | |
2079 | * F.e. "RFC state" is ESTABLISHED, | |
2080 | * if Linux state is FIN-WAIT-1, but FIN is still not sent. | |
2081 | * | |
2082 | * The visible declinations are that sometimes | |
2083 | * we enter time-wait state, when it is not required really | |
2084 | * (harmless), do not send active resets, when they are | |
2085 | * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when | |
2086 | * they look as CLOSING or LAST_ACK for Linux) | |
2087 | * Probably, I missed some more holelets. | |
2088 | * --ANK | |
2089 | */ | |
2090 | tcp_send_fin(sk); | |
2091 | } | |
2092 | ||
2093 | sk_stream_wait_close(sk, timeout); | |
2094 | ||
2095 | adjudge_to_death: | |
75c2d907 HX |
2096 | state = sk->sk_state; |
2097 | sock_hold(sk); | |
2098 | sock_orphan(sk); | |
75c2d907 | 2099 | |
1da177e4 LT |
2100 | /* It is the last release_sock in its life. It will remove backlog. */ |
2101 | release_sock(sk); | |
2102 | ||
2103 | ||
2104 | /* Now socket is owned by kernel and we acquire BH lock | |
2105 | to finish close. No need to check for user refs. | |
2106 | */ | |
2107 | local_bh_disable(); | |
2108 | bh_lock_sock(sk); | |
547b792c | 2109 | WARN_ON(sock_owned_by_user(sk)); |
1da177e4 | 2110 | |
eb4dea58 HX |
2111 | percpu_counter_inc(sk->sk_prot->orphan_count); |
2112 | ||
75c2d907 HX |
2113 | /* Have we already been destroyed by a softirq or backlog? */ |
2114 | if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE) | |
2115 | goto out; | |
1da177e4 LT |
2116 | |
2117 | /* This is a (useful) BSD violating of the RFC. There is a | |
2118 | * problem with TCP as specified in that the other end could | |
2119 | * keep a socket open forever with no application left this end. | |
2120 | * We use a 3 minute timeout (about the same as BSD) then kill | |
2121 | * our end. If they send after that then tough - BUT: long enough | |
2122 | * that we won't make the old 4*rto = almost no time - whoops | |
2123 | * reset mistake. | |
2124 | * | |
2125 | * Nope, it was not mistake. It is really desired behaviour | |
2126 | * f.e. on http servers, when such sockets are useless, but | |
2127 | * consume significant resources. Let's do it with special | |
2128 | * linger2 option. --ANK | |
2129 | */ | |
2130 | ||
2131 | if (sk->sk_state == TCP_FIN_WAIT2) { | |
2132 | struct tcp_sock *tp = tcp_sk(sk); | |
2133 | if (tp->linger2 < 0) { | |
2134 | tcp_set_state(sk, TCP_CLOSE); | |
2135 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
de0744af PE |
2136 | NET_INC_STATS_BH(sock_net(sk), |
2137 | LINUX_MIB_TCPABORTONLINGER); | |
1da177e4 | 2138 | } else { |
463c84b9 | 2139 | const int tmo = tcp_fin_time(sk); |
1da177e4 LT |
2140 | |
2141 | if (tmo > TCP_TIMEWAIT_LEN) { | |
52499afe DM |
2142 | inet_csk_reset_keepalive_timer(sk, |
2143 | tmo - TCP_TIMEWAIT_LEN); | |
1da177e4 | 2144 | } else { |
1da177e4 LT |
2145 | tcp_time_wait(sk, TCP_FIN_WAIT2, tmo); |
2146 | goto out; | |
2147 | } | |
2148 | } | |
2149 | } | |
2150 | if (sk->sk_state != TCP_CLOSE) { | |
3ab224be | 2151 | sk_mem_reclaim(sk); |
efcdbf24 | 2152 | if (tcp_check_oom(sk, 0)) { |
1da177e4 LT |
2153 | tcp_set_state(sk, TCP_CLOSE); |
2154 | tcp_send_active_reset(sk, GFP_ATOMIC); | |
de0744af PE |
2155 | NET_INC_STATS_BH(sock_net(sk), |
2156 | LINUX_MIB_TCPABORTONMEMORY); | |
1da177e4 LT |
2157 | } |
2158 | } | |
1da177e4 LT |
2159 | |
2160 | if (sk->sk_state == TCP_CLOSE) | |
0a5578cf | 2161 | inet_csk_destroy_sock(sk); |
1da177e4 LT |
2162 | /* Otherwise, socket is reprieved until protocol close. */ |
2163 | ||
2164 | out: | |
2165 | bh_unlock_sock(sk); | |
2166 | local_bh_enable(); | |
2167 | sock_put(sk); | |
2168 | } | |
4bc2f18b | 2169 | EXPORT_SYMBOL(tcp_close); |
1da177e4 LT |
2170 | |
2171 | /* These states need RST on ABORT according to RFC793 */ | |
2172 | ||
a2a385d6 | 2173 | static inline bool tcp_need_reset(int state) |
1da177e4 LT |
2174 | { |
2175 | return (1 << state) & | |
2176 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 | | |
2177 | TCPF_FIN_WAIT2 | TCPF_SYN_RECV); | |
2178 | } | |
2179 | ||
2180 | int tcp_disconnect(struct sock *sk, int flags) | |
2181 | { | |
2182 | struct inet_sock *inet = inet_sk(sk); | |
463c84b9 | 2183 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2184 | struct tcp_sock *tp = tcp_sk(sk); |
2185 | int err = 0; | |
2186 | int old_state = sk->sk_state; | |
2187 | ||
2188 | if (old_state != TCP_CLOSE) | |
2189 | tcp_set_state(sk, TCP_CLOSE); | |
2190 | ||
2191 | /* ABORT function of RFC793 */ | |
2192 | if (old_state == TCP_LISTEN) { | |
0a5578cf | 2193 | inet_csk_listen_stop(sk); |
ee995283 PE |
2194 | } else if (unlikely(tp->repair)) { |
2195 | sk->sk_err = ECONNABORTED; | |
1da177e4 LT |
2196 | } else if (tcp_need_reset(old_state) || |
2197 | (tp->snd_nxt != tp->write_seq && | |
2198 | (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) { | |
caa20d9a | 2199 | /* The last check adjusts for discrepancy of Linux wrt. RFC |
1da177e4 LT |
2200 | * states |
2201 | */ | |
2202 | tcp_send_active_reset(sk, gfp_any()); | |
2203 | sk->sk_err = ECONNRESET; | |
2204 | } else if (old_state == TCP_SYN_SENT) | |
2205 | sk->sk_err = ECONNRESET; | |
2206 | ||
2207 | tcp_clear_xmit_timers(sk); | |
2208 | __skb_queue_purge(&sk->sk_receive_queue); | |
fe067e8a | 2209 | tcp_write_queue_purge(sk); |
1da177e4 | 2210 | __skb_queue_purge(&tp->out_of_order_queue); |
1a2449a8 CL |
2211 | #ifdef CONFIG_NET_DMA |
2212 | __skb_queue_purge(&sk->sk_async_wait_queue); | |
2213 | #endif | |
1da177e4 | 2214 | |
c720c7e8 | 2215 | inet->inet_dport = 0; |
1da177e4 LT |
2216 | |
2217 | if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) | |
2218 | inet_reset_saddr(sk); | |
2219 | ||
2220 | sk->sk_shutdown = 0; | |
2221 | sock_reset_flag(sk, SOCK_DONE); | |
2222 | tp->srtt = 0; | |
2223 | if ((tp->write_seq += tp->max_window + 2) == 0) | |
2224 | tp->write_seq = 1; | |
463c84b9 | 2225 | icsk->icsk_backoff = 0; |
1da177e4 | 2226 | tp->snd_cwnd = 2; |
6687e988 | 2227 | icsk->icsk_probes_out = 0; |
1da177e4 | 2228 | tp->packets_out = 0; |
0b6a05c1 | 2229 | tp->snd_ssthresh = TCP_INFINITE_SSTHRESH; |
1da177e4 | 2230 | tp->snd_cwnd_cnt = 0; |
9772efb9 | 2231 | tp->bytes_acked = 0; |
1fdf475a | 2232 | tp->window_clamp = 0; |
6687e988 | 2233 | tcp_set_ca_state(sk, TCP_CA_Open); |
1da177e4 | 2234 | tcp_clear_retrans(tp); |
463c84b9 | 2235 | inet_csk_delack_init(sk); |
fe067e8a | 2236 | tcp_init_send_head(sk); |
b40b4f79 | 2237 | memset(&tp->rx_opt, 0, sizeof(tp->rx_opt)); |
1da177e4 LT |
2238 | __sk_dst_reset(sk); |
2239 | ||
c720c7e8 | 2240 | WARN_ON(inet->inet_num && !icsk->icsk_bind_hash); |
1da177e4 LT |
2241 | |
2242 | sk->sk_error_report(sk); | |
2243 | return err; | |
2244 | } | |
4bc2f18b | 2245 | EXPORT_SYMBOL(tcp_disconnect); |
1da177e4 | 2246 | |
a2a385d6 | 2247 | static inline bool tcp_can_repair_sock(const struct sock *sk) |
ee995283 PE |
2248 | { |
2249 | return capable(CAP_NET_ADMIN) && | |
2250 | ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED)); | |
2251 | } | |
2252 | ||
de248a75 PE |
2253 | static int tcp_repair_options_est(struct tcp_sock *tp, |
2254 | struct tcp_repair_opt __user *optbuf, unsigned int len) | |
b139ba4e | 2255 | { |
de248a75 | 2256 | struct tcp_repair_opt opt; |
b139ba4e | 2257 | |
de248a75 PE |
2258 | while (len >= sizeof(opt)) { |
2259 | if (copy_from_user(&opt, optbuf, sizeof(opt))) | |
b139ba4e PE |
2260 | return -EFAULT; |
2261 | ||
2262 | optbuf++; | |
de248a75 | 2263 | len -= sizeof(opt); |
b139ba4e | 2264 | |
de248a75 PE |
2265 | switch (opt.opt_code) { |
2266 | case TCPOPT_MSS: | |
2267 | tp->rx_opt.mss_clamp = opt.opt_val; | |
b139ba4e | 2268 | break; |
de248a75 PE |
2269 | case TCPOPT_WINDOW: |
2270 | if (opt.opt_val > 14) | |
b139ba4e PE |
2271 | return -EFBIG; |
2272 | ||
de248a75 | 2273 | tp->rx_opt.snd_wscale = opt.opt_val; |
b139ba4e | 2274 | break; |
b139ba4e | 2275 | case TCPOPT_SACK_PERM: |
de248a75 PE |
2276 | if (opt.opt_val != 0) |
2277 | return -EINVAL; | |
2278 | ||
b139ba4e PE |
2279 | tp->rx_opt.sack_ok |= TCP_SACK_SEEN; |
2280 | if (sysctl_tcp_fack) | |
2281 | tcp_enable_fack(tp); | |
2282 | break; | |
2283 | case TCPOPT_TIMESTAMP: | |
de248a75 PE |
2284 | if (opt.opt_val != 0) |
2285 | return -EINVAL; | |
2286 | ||
b139ba4e PE |
2287 | tp->rx_opt.tstamp_ok = 1; |
2288 | break; | |
2289 | } | |
2290 | } | |
2291 | ||
2292 | return 0; | |
2293 | } | |
2294 | ||
1da177e4 LT |
2295 | /* |
2296 | * Socket option code for TCP. | |
2297 | */ | |
3fdadf7d | 2298 | static int do_tcp_setsockopt(struct sock *sk, int level, |
b7058842 | 2299 | int optname, char __user *optval, unsigned int optlen) |
1da177e4 LT |
2300 | { |
2301 | struct tcp_sock *tp = tcp_sk(sk); | |
463c84b9 | 2302 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2303 | int val; |
2304 | int err = 0; | |
2305 | ||
e56fb50f WAS |
2306 | /* These are data/string values, all the others are ints */ |
2307 | switch (optname) { | |
2308 | case TCP_CONGESTION: { | |
5f8ef48d SH |
2309 | char name[TCP_CA_NAME_MAX]; |
2310 | ||
2311 | if (optlen < 1) | |
2312 | return -EINVAL; | |
2313 | ||
2314 | val = strncpy_from_user(name, optval, | |
4fdb78d3 | 2315 | min_t(long, TCP_CA_NAME_MAX-1, optlen)); |
5f8ef48d SH |
2316 | if (val < 0) |
2317 | return -EFAULT; | |
2318 | name[val] = 0; | |
2319 | ||
2320 | lock_sock(sk); | |
6687e988 | 2321 | err = tcp_set_congestion_control(sk, name); |
5f8ef48d SH |
2322 | release_sock(sk); |
2323 | return err; | |
2324 | } | |
e56fb50f WAS |
2325 | case TCP_COOKIE_TRANSACTIONS: { |
2326 | struct tcp_cookie_transactions ctd; | |
2327 | struct tcp_cookie_values *cvp = NULL; | |
2328 | ||
2329 | if (sizeof(ctd) > optlen) | |
2330 | return -EINVAL; | |
2331 | if (copy_from_user(&ctd, optval, sizeof(ctd))) | |
2332 | return -EFAULT; | |
2333 | ||
2334 | if (ctd.tcpct_used > sizeof(ctd.tcpct_value) || | |
2335 | ctd.tcpct_s_data_desired > TCP_MSS_DESIRED) | |
2336 | return -EINVAL; | |
2337 | ||
2338 | if (ctd.tcpct_cookie_desired == 0) { | |
2339 | /* default to global value */ | |
2340 | } else if ((0x1 & ctd.tcpct_cookie_desired) || | |
2341 | ctd.tcpct_cookie_desired > TCP_COOKIE_MAX || | |
2342 | ctd.tcpct_cookie_desired < TCP_COOKIE_MIN) { | |
2343 | return -EINVAL; | |
2344 | } | |
2345 | ||
2346 | if (TCP_COOKIE_OUT_NEVER & ctd.tcpct_flags) { | |
2347 | /* Supercedes all other values */ | |
2348 | lock_sock(sk); | |
2349 | if (tp->cookie_values != NULL) { | |
2350 | kref_put(&tp->cookie_values->kref, | |
2351 | tcp_cookie_values_release); | |
2352 | tp->cookie_values = NULL; | |
2353 | } | |
2354 | tp->rx_opt.cookie_in_always = 0; /* false */ | |
2355 | tp->rx_opt.cookie_out_never = 1; /* true */ | |
2356 | release_sock(sk); | |
2357 | return err; | |
2358 | } | |
2359 | ||
2360 | /* Allocate ancillary memory before locking. | |
2361 | */ | |
2362 | if (ctd.tcpct_used > 0 || | |
2363 | (tp->cookie_values == NULL && | |
2364 | (sysctl_tcp_cookie_size > 0 || | |
2365 | ctd.tcpct_cookie_desired > 0 || | |
2366 | ctd.tcpct_s_data_desired > 0))) { | |
2367 | cvp = kzalloc(sizeof(*cvp) + ctd.tcpct_used, | |
2368 | GFP_KERNEL); | |
2369 | if (cvp == NULL) | |
2370 | return -ENOMEM; | |
a3bdb549 DP |
2371 | |
2372 | kref_init(&cvp->kref); | |
e56fb50f WAS |
2373 | } |
2374 | lock_sock(sk); | |
2375 | tp->rx_opt.cookie_in_always = | |
2376 | (TCP_COOKIE_IN_ALWAYS & ctd.tcpct_flags); | |
2377 | tp->rx_opt.cookie_out_never = 0; /* false */ | |
2378 | ||
2379 | if (tp->cookie_values != NULL) { | |
2380 | if (cvp != NULL) { | |
2381 | /* Changed values are recorded by a changed | |
2382 | * pointer, ensuring the cookie will differ, | |
2383 | * without separately hashing each value later. | |
2384 | */ | |
2385 | kref_put(&tp->cookie_values->kref, | |
2386 | tcp_cookie_values_release); | |
e56fb50f WAS |
2387 | } else { |
2388 | cvp = tp->cookie_values; | |
2389 | } | |
2390 | } | |
a3bdb549 | 2391 | |
e56fb50f WAS |
2392 | if (cvp != NULL) { |
2393 | cvp->cookie_desired = ctd.tcpct_cookie_desired; | |
2394 | ||
2395 | if (ctd.tcpct_used > 0) { | |
2396 | memcpy(cvp->s_data_payload, ctd.tcpct_value, | |
2397 | ctd.tcpct_used); | |
2398 | cvp->s_data_desired = ctd.tcpct_used; | |
2399 | cvp->s_data_constant = 1; /* true */ | |
2400 | } else { | |
2401 | /* No constant payload data. */ | |
2402 | cvp->s_data_desired = ctd.tcpct_s_data_desired; | |
2403 | cvp->s_data_constant = 0; /* false */ | |
2404 | } | |
a3bdb549 DP |
2405 | |
2406 | tp->cookie_values = cvp; | |
e56fb50f WAS |
2407 | } |
2408 | release_sock(sk); | |
2409 | return err; | |
2410 | } | |
2411 | default: | |
2412 | /* fallthru */ | |
2413 | break; | |
ccbd6a5a | 2414 | } |
5f8ef48d | 2415 | |
1da177e4 LT |
2416 | if (optlen < sizeof(int)) |
2417 | return -EINVAL; | |
2418 | ||
2419 | if (get_user(val, (int __user *)optval)) | |
2420 | return -EFAULT; | |
2421 | ||
2422 | lock_sock(sk); | |
2423 | ||
2424 | switch (optname) { | |
2425 | case TCP_MAXSEG: | |
2426 | /* Values greater than interface MTU won't take effect. However | |
2427 | * at the point when this call is done we typically don't yet | |
2428 | * know which interface is going to be used */ | |
c39508d6 | 2429 | if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) { |
1da177e4 LT |
2430 | err = -EINVAL; |
2431 | break; | |
2432 | } | |
2433 | tp->rx_opt.user_mss = val; | |
2434 | break; | |
2435 | ||
2436 | case TCP_NODELAY: | |
2437 | if (val) { | |
2438 | /* TCP_NODELAY is weaker than TCP_CORK, so that | |
2439 | * this option on corked socket is remembered, but | |
2440 | * it is not activated until cork is cleared. | |
2441 | * | |
2442 | * However, when TCP_NODELAY is set we make | |
2443 | * an explicit push, which overrides even TCP_CORK | |
2444 | * for currently queued segments. | |
2445 | */ | |
2446 | tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH; | |
9e412ba7 | 2447 | tcp_push_pending_frames(sk); |
1da177e4 LT |
2448 | } else { |
2449 | tp->nonagle &= ~TCP_NAGLE_OFF; | |
2450 | } | |
2451 | break; | |
2452 | ||
36e31b0a AP |
2453 | case TCP_THIN_LINEAR_TIMEOUTS: |
2454 | if (val < 0 || val > 1) | |
2455 | err = -EINVAL; | |
2456 | else | |
2457 | tp->thin_lto = val; | |
2458 | break; | |
2459 | ||
7e380175 AP |
2460 | case TCP_THIN_DUPACK: |
2461 | if (val < 0 || val > 1) | |
2462 | err = -EINVAL; | |
2463 | else | |
2464 | tp->thin_dupack = val; | |
eed530b6 YC |
2465 | if (tp->thin_dupack) |
2466 | tcp_disable_early_retrans(tp); | |
7e380175 AP |
2467 | break; |
2468 | ||
ee995283 PE |
2469 | case TCP_REPAIR: |
2470 | if (!tcp_can_repair_sock(sk)) | |
2471 | err = -EPERM; | |
2472 | else if (val == 1) { | |
2473 | tp->repair = 1; | |
2474 | sk->sk_reuse = SK_FORCE_REUSE; | |
2475 | tp->repair_queue = TCP_NO_QUEUE; | |
2476 | } else if (val == 0) { | |
2477 | tp->repair = 0; | |
2478 | sk->sk_reuse = SK_NO_REUSE; | |
2479 | tcp_send_window_probe(sk); | |
2480 | } else | |
2481 | err = -EINVAL; | |
2482 | ||
2483 | break; | |
2484 | ||
2485 | case TCP_REPAIR_QUEUE: | |
2486 | if (!tp->repair) | |
2487 | err = -EPERM; | |
2488 | else if (val < TCP_QUEUES_NR) | |
2489 | tp->repair_queue = val; | |
2490 | else | |
2491 | err = -EINVAL; | |
2492 | break; | |
2493 | ||
2494 | case TCP_QUEUE_SEQ: | |
2495 | if (sk->sk_state != TCP_CLOSE) | |
2496 | err = -EPERM; | |
2497 | else if (tp->repair_queue == TCP_SEND_QUEUE) | |
2498 | tp->write_seq = val; | |
2499 | else if (tp->repair_queue == TCP_RECV_QUEUE) | |
2500 | tp->rcv_nxt = val; | |
2501 | else | |
2502 | err = -EINVAL; | |
2503 | break; | |
2504 | ||
b139ba4e PE |
2505 | case TCP_REPAIR_OPTIONS: |
2506 | if (!tp->repair) | |
2507 | err = -EINVAL; | |
2508 | else if (sk->sk_state == TCP_ESTABLISHED) | |
de248a75 PE |
2509 | err = tcp_repair_options_est(tp, |
2510 | (struct tcp_repair_opt __user *)optval, | |
2511 | optlen); | |
b139ba4e PE |
2512 | else |
2513 | err = -EPERM; | |
2514 | break; | |
2515 | ||
1da177e4 LT |
2516 | case TCP_CORK: |
2517 | /* When set indicates to always queue non-full frames. | |
2518 | * Later the user clears this option and we transmit | |
2519 | * any pending partial frames in the queue. This is | |
2520 | * meant to be used alongside sendfile() to get properly | |
2521 | * filled frames when the user (for example) must write | |
2522 | * out headers with a write() call first and then use | |
2523 | * sendfile to send out the data parts. | |
2524 | * | |
2525 | * TCP_CORK can be set together with TCP_NODELAY and it is | |
2526 | * stronger than TCP_NODELAY. | |
2527 | */ | |
2528 | if (val) { | |
2529 | tp->nonagle |= TCP_NAGLE_CORK; | |
2530 | } else { | |
2531 | tp->nonagle &= ~TCP_NAGLE_CORK; | |
2532 | if (tp->nonagle&TCP_NAGLE_OFF) | |
2533 | tp->nonagle |= TCP_NAGLE_PUSH; | |
9e412ba7 | 2534 | tcp_push_pending_frames(sk); |
1da177e4 LT |
2535 | } |
2536 | break; | |
2537 | ||
2538 | case TCP_KEEPIDLE: | |
2539 | if (val < 1 || val > MAX_TCP_KEEPIDLE) | |
2540 | err = -EINVAL; | |
2541 | else { | |
2542 | tp->keepalive_time = val * HZ; | |
2543 | if (sock_flag(sk, SOCK_KEEPOPEN) && | |
2544 | !((1 << sk->sk_state) & | |
2545 | (TCPF_CLOSE | TCPF_LISTEN))) { | |
6c37e5de | 2546 | u32 elapsed = keepalive_time_elapsed(tp); |
1da177e4 LT |
2547 | if (tp->keepalive_time > elapsed) |
2548 | elapsed = tp->keepalive_time - elapsed; | |
2549 | else | |
2550 | elapsed = 0; | |
463c84b9 | 2551 | inet_csk_reset_keepalive_timer(sk, elapsed); |
1da177e4 LT |
2552 | } |
2553 | } | |
2554 | break; | |
2555 | case TCP_KEEPINTVL: | |
2556 | if (val < 1 || val > MAX_TCP_KEEPINTVL) | |
2557 | err = -EINVAL; | |
2558 | else | |
2559 | tp->keepalive_intvl = val * HZ; | |
2560 | break; | |
2561 | case TCP_KEEPCNT: | |
2562 | if (val < 1 || val > MAX_TCP_KEEPCNT) | |
2563 | err = -EINVAL; | |
2564 | else | |
2565 | tp->keepalive_probes = val; | |
2566 | break; | |
2567 | case TCP_SYNCNT: | |
2568 | if (val < 1 || val > MAX_TCP_SYNCNT) | |
2569 | err = -EINVAL; | |
2570 | else | |
463c84b9 | 2571 | icsk->icsk_syn_retries = val; |
1da177e4 LT |
2572 | break; |
2573 | ||
2574 | case TCP_LINGER2: | |
2575 | if (val < 0) | |
2576 | tp->linger2 = -1; | |
2577 | else if (val > sysctl_tcp_fin_timeout / HZ) | |
2578 | tp->linger2 = 0; | |
2579 | else | |
2580 | tp->linger2 = val * HZ; | |
2581 | break; | |
2582 | ||
2583 | case TCP_DEFER_ACCEPT: | |
b103cf34 JA |
2584 | /* Translate value in seconds to number of retransmits */ |
2585 | icsk->icsk_accept_queue.rskq_defer_accept = | |
2586 | secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ, | |
2587 | TCP_RTO_MAX / HZ); | |
1da177e4 LT |
2588 | break; |
2589 | ||
2590 | case TCP_WINDOW_CLAMP: | |
2591 | if (!val) { | |
2592 | if (sk->sk_state != TCP_CLOSE) { | |
2593 | err = -EINVAL; | |
2594 | break; | |
2595 | } | |
2596 | tp->window_clamp = 0; | |
2597 | } else | |
2598 | tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ? | |
2599 | SOCK_MIN_RCVBUF / 2 : val; | |
2600 | break; | |
2601 | ||
2602 | case TCP_QUICKACK: | |
2603 | if (!val) { | |
463c84b9 | 2604 | icsk->icsk_ack.pingpong = 1; |
1da177e4 | 2605 | } else { |
463c84b9 | 2606 | icsk->icsk_ack.pingpong = 0; |
1da177e4 LT |
2607 | if ((1 << sk->sk_state) & |
2608 | (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) && | |
463c84b9 ACM |
2609 | inet_csk_ack_scheduled(sk)) { |
2610 | icsk->icsk_ack.pending |= ICSK_ACK_PUSHED; | |
0e4b4992 | 2611 | tcp_cleanup_rbuf(sk, 1); |
1da177e4 | 2612 | if (!(val & 1)) |
463c84b9 | 2613 | icsk->icsk_ack.pingpong = 1; |
1da177e4 LT |
2614 | } |
2615 | } | |
2616 | break; | |
2617 | ||
cfb6eeb4 YH |
2618 | #ifdef CONFIG_TCP_MD5SIG |
2619 | case TCP_MD5SIG: | |
2620 | /* Read the IP->Key mappings from userspace */ | |
2621 | err = tp->af_specific->md5_parse(sk, optval, optlen); | |
2622 | break; | |
2623 | #endif | |
dca43c75 JC |
2624 | case TCP_USER_TIMEOUT: |
2625 | /* Cap the max timeout in ms TCP will retry/retrans | |
2626 | * before giving up and aborting (ETIMEDOUT) a connection. | |
2627 | */ | |
2628 | icsk->icsk_user_timeout = msecs_to_jiffies(val); | |
2629 | break; | |
1da177e4 LT |
2630 | default: |
2631 | err = -ENOPROTOOPT; | |
2632 | break; | |
3ff50b79 SH |
2633 | } |
2634 | ||
1da177e4 LT |
2635 | release_sock(sk); |
2636 | return err; | |
2637 | } | |
2638 | ||
3fdadf7d | 2639 | int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval, |
b7058842 | 2640 | unsigned int optlen) |
3fdadf7d | 2641 | { |
cf533ea5 | 2642 | const struct inet_connection_sock *icsk = inet_csk(sk); |
3fdadf7d DM |
2643 | |
2644 | if (level != SOL_TCP) | |
2645 | return icsk->icsk_af_ops->setsockopt(sk, level, optname, | |
2646 | optval, optlen); | |
2647 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); | |
2648 | } | |
4bc2f18b | 2649 | EXPORT_SYMBOL(tcp_setsockopt); |
3fdadf7d DM |
2650 | |
2651 | #ifdef CONFIG_COMPAT | |
543d9cfe | 2652 | int compat_tcp_setsockopt(struct sock *sk, int level, int optname, |
b7058842 | 2653 | char __user *optval, unsigned int optlen) |
3fdadf7d | 2654 | { |
dec73ff0 ACM |
2655 | if (level != SOL_TCP) |
2656 | return inet_csk_compat_setsockopt(sk, level, optname, | |
2657 | optval, optlen); | |
3fdadf7d DM |
2658 | return do_tcp_setsockopt(sk, level, optname, optval, optlen); |
2659 | } | |
543d9cfe | 2660 | EXPORT_SYMBOL(compat_tcp_setsockopt); |
3fdadf7d DM |
2661 | #endif |
2662 | ||
1da177e4 | 2663 | /* Return information about state of tcp endpoint in API format. */ |
cf533ea5 | 2664 | void tcp_get_info(const struct sock *sk, struct tcp_info *info) |
1da177e4 | 2665 | { |
cf533ea5 | 2666 | const struct tcp_sock *tp = tcp_sk(sk); |
463c84b9 | 2667 | const struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2668 | u32 now = tcp_time_stamp; |
2669 | ||
2670 | memset(info, 0, sizeof(*info)); | |
2671 | ||
2672 | info->tcpi_state = sk->sk_state; | |
6687e988 | 2673 | info->tcpi_ca_state = icsk->icsk_ca_state; |
463c84b9 | 2674 | info->tcpi_retransmits = icsk->icsk_retransmits; |
6687e988 | 2675 | info->tcpi_probes = icsk->icsk_probes_out; |
463c84b9 | 2676 | info->tcpi_backoff = icsk->icsk_backoff; |
1da177e4 LT |
2677 | |
2678 | if (tp->rx_opt.tstamp_ok) | |
2679 | info->tcpi_options |= TCPI_OPT_TIMESTAMPS; | |
e60402d0 | 2680 | if (tcp_is_sack(tp)) |
1da177e4 LT |
2681 | info->tcpi_options |= TCPI_OPT_SACK; |
2682 | if (tp->rx_opt.wscale_ok) { | |
2683 | info->tcpi_options |= TCPI_OPT_WSCALE; | |
2684 | info->tcpi_snd_wscale = tp->rx_opt.snd_wscale; | |
2685 | info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale; | |
e905a9ed | 2686 | } |
1da177e4 | 2687 | |
b5c5693b | 2688 | if (tp->ecn_flags & TCP_ECN_OK) |
1da177e4 | 2689 | info->tcpi_options |= TCPI_OPT_ECN; |
b5c5693b ED |
2690 | if (tp->ecn_flags & TCP_ECN_SEEN) |
2691 | info->tcpi_options |= TCPI_OPT_ECN_SEEN; | |
1da177e4 | 2692 | |
463c84b9 ACM |
2693 | info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto); |
2694 | info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato); | |
c1b4a7e6 | 2695 | info->tcpi_snd_mss = tp->mss_cache; |
463c84b9 | 2696 | info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss; |
1da177e4 | 2697 | |
5ee3afba RJ |
2698 | if (sk->sk_state == TCP_LISTEN) { |
2699 | info->tcpi_unacked = sk->sk_ack_backlog; | |
2700 | info->tcpi_sacked = sk->sk_max_ack_backlog; | |
2701 | } else { | |
2702 | info->tcpi_unacked = tp->packets_out; | |
2703 | info->tcpi_sacked = tp->sacked_out; | |
2704 | } | |
1da177e4 LT |
2705 | info->tcpi_lost = tp->lost_out; |
2706 | info->tcpi_retrans = tp->retrans_out; | |
2707 | info->tcpi_fackets = tp->fackets_out; | |
2708 | ||
2709 | info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime); | |
463c84b9 | 2710 | info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime); |
1da177e4 LT |
2711 | info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp); |
2712 | ||
d83d8461 | 2713 | info->tcpi_pmtu = icsk->icsk_pmtu_cookie; |
1da177e4 LT |
2714 | info->tcpi_rcv_ssthresh = tp->rcv_ssthresh; |
2715 | info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3; | |
2716 | info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2; | |
2717 | info->tcpi_snd_ssthresh = tp->snd_ssthresh; | |
2718 | info->tcpi_snd_cwnd = tp->snd_cwnd; | |
2719 | info->tcpi_advmss = tp->advmss; | |
2720 | info->tcpi_reordering = tp->reordering; | |
2721 | ||
2722 | info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3; | |
2723 | info->tcpi_rcv_space = tp->rcvq_space.space; | |
2724 | ||
2725 | info->tcpi_total_retrans = tp->total_retrans; | |
2726 | } | |
1da177e4 LT |
2727 | EXPORT_SYMBOL_GPL(tcp_get_info); |
2728 | ||
3fdadf7d DM |
2729 | static int do_tcp_getsockopt(struct sock *sk, int level, |
2730 | int optname, char __user *optval, int __user *optlen) | |
1da177e4 | 2731 | { |
295f7324 | 2732 | struct inet_connection_sock *icsk = inet_csk(sk); |
1da177e4 LT |
2733 | struct tcp_sock *tp = tcp_sk(sk); |
2734 | int val, len; | |
2735 | ||
1da177e4 LT |
2736 | if (get_user(len, optlen)) |
2737 | return -EFAULT; | |
2738 | ||
2739 | len = min_t(unsigned int, len, sizeof(int)); | |
2740 | ||
2741 | if (len < 0) | |
2742 | return -EINVAL; | |
2743 | ||
2744 | switch (optname) { | |
2745 | case TCP_MAXSEG: | |
c1b4a7e6 | 2746 | val = tp->mss_cache; |
1da177e4 LT |
2747 | if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN))) |
2748 | val = tp->rx_opt.user_mss; | |
5e6a3ce6 PE |
2749 | if (tp->repair) |
2750 | val = tp->rx_opt.mss_clamp; | |
1da177e4 LT |
2751 | break; |
2752 | case TCP_NODELAY: | |
2753 | val = !!(tp->nonagle&TCP_NAGLE_OFF); | |
2754 | break; | |
2755 | case TCP_CORK: | |
2756 | val = !!(tp->nonagle&TCP_NAGLE_CORK); | |
2757 | break; | |
2758 | case TCP_KEEPIDLE: | |
df19a626 | 2759 | val = keepalive_time_when(tp) / HZ; |
1da177e4 LT |
2760 | break; |
2761 | case TCP_KEEPINTVL: | |
df19a626 | 2762 | val = keepalive_intvl_when(tp) / HZ; |
1da177e4 LT |
2763 | break; |
2764 | case TCP_KEEPCNT: | |
df19a626 | 2765 | val = keepalive_probes(tp); |
1da177e4 LT |
2766 | break; |
2767 | case TCP_SYNCNT: | |
295f7324 | 2768 | val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries; |
1da177e4 LT |
2769 | break; |
2770 | case TCP_LINGER2: | |
2771 | val = tp->linger2; | |
2772 | if (val >= 0) | |
2773 | val = (val ? : sysctl_tcp_fin_timeout) / HZ; | |
2774 | break; | |
2775 | case TCP_DEFER_ACCEPT: | |
b103cf34 JA |
2776 | val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept, |
2777 | TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ); | |
1da177e4 LT |
2778 | break; |
2779 | case TCP_WINDOW_CLAMP: | |
2780 | val = tp->window_clamp; | |
2781 | break; | |
2782 | case TCP_INFO: { | |
2783 | struct tcp_info info; | |
2784 | ||
2785 | if (get_user(len, optlen)) | |
2786 | return -EFAULT; | |
2787 | ||
2788 | tcp_get_info(sk, &info); | |
2789 | ||
2790 | len = min_t(unsigned int, len, sizeof(info)); | |
2791 | if (put_user(len, optlen)) | |
2792 | return -EFAULT; | |
2793 | if (copy_to_user(optval, &info, len)) | |
2794 | return -EFAULT; | |
2795 | return 0; | |
2796 | } | |
2797 | case TCP_QUICKACK: | |
295f7324 | 2798 | val = !icsk->icsk_ack.pingpong; |
1da177e4 | 2799 | break; |
5f8ef48d SH |
2800 | |
2801 | case TCP_CONGESTION: | |
2802 | if (get_user(len, optlen)) | |
2803 | return -EFAULT; | |
2804 | len = min_t(unsigned int, len, TCP_CA_NAME_MAX); | |
2805 | if (put_user(len, optlen)) | |
2806 | return -EFAULT; | |
6687e988 | 2807 | if (copy_to_user(optval, icsk->icsk_ca_ops->name, len)) |
5f8ef48d SH |
2808 | return -EFAULT; |
2809 | return 0; | |
e56fb50f WAS |
2810 | |
2811 | case TCP_COOKIE_TRANSACTIONS: { | |
2812 | struct tcp_cookie_transactions ctd; | |
2813 | struct tcp_cookie_values *cvp = tp->cookie_values; | |
2814 | ||
2815 | if (get_user(len, optlen)) | |
2816 | return -EFAULT; | |
2817 | if (len < sizeof(ctd)) | |
2818 | return -EINVAL; | |
2819 | ||
2820 | memset(&ctd, 0, sizeof(ctd)); | |
2821 | ctd.tcpct_flags = (tp->rx_opt.cookie_in_always ? | |
2822 | TCP_COOKIE_IN_ALWAYS : 0) | |
2823 | | (tp->rx_opt.cookie_out_never ? | |
2824 | TCP_COOKIE_OUT_NEVER : 0); | |
2825 | ||
2826 | if (cvp != NULL) { | |
2827 | ctd.tcpct_flags |= (cvp->s_data_in ? | |
2828 | TCP_S_DATA_IN : 0) | |
2829 | | (cvp->s_data_out ? | |
2830 | TCP_S_DATA_OUT : 0); | |
2831 | ||
2832 | ctd.tcpct_cookie_desired = cvp->cookie_desired; | |
2833 | ctd.tcpct_s_data_desired = cvp->s_data_desired; | |
2834 | ||
e56fb50f WAS |
2835 | memcpy(&ctd.tcpct_value[0], &cvp->cookie_pair[0], |
2836 | cvp->cookie_pair_size); | |
2837 | ctd.tcpct_used = cvp->cookie_pair_size; | |
2838 | } | |
2839 | ||
2840 | if (put_user(sizeof(ctd), optlen)) | |
2841 | return -EFAULT; | |
2842 | if (copy_to_user(optval, &ctd, sizeof(ctd))) | |
2843 | return -EFAULT; | |
2844 | return 0; | |
2845 | } | |
3c0fef0b JH |
2846 | case TCP_THIN_LINEAR_TIMEOUTS: |
2847 | val = tp->thin_lto; | |
2848 | break; | |
2849 | case TCP_THIN_DUPACK: | |
2850 | val = tp->thin_dupack; | |
2851 | break; | |
dca43c75 | 2852 | |
ee995283 PE |
2853 | case TCP_REPAIR: |
2854 | val = tp->repair; | |
2855 | break; | |
2856 | ||
2857 | case TCP_REPAIR_QUEUE: | |
2858 | if (tp->repair) | |
2859 | val = tp->repair_queue; | |
2860 | else | |
2861 | return -EINVAL; | |
2862 | break; | |
2863 | ||
2864 | case TCP_QUEUE_SEQ: | |
2865 | if (tp->repair_queue == TCP_SEND_QUEUE) | |
2866 | val = tp->write_seq; | |
2867 | else if (tp->repair_queue == TCP_RECV_QUEUE) | |
2868 | val = tp->rcv_nxt; | |
2869 | else | |
2870 | return -EINVAL; | |
2871 | break; | |
2872 | ||
dca43c75 JC |
2873 | case TCP_USER_TIMEOUT: |
2874 | val = jiffies_to_msecs(icsk->icsk_user_timeout); | |
2875 | break; | |
1da177e4 LT |
2876 | default: |
2877 | return -ENOPROTOOPT; | |
3ff50b79 | 2878 | } |
1da177e4 LT |
2879 | |
2880 | if (put_user(len, optlen)) | |
2881 | return -EFAULT; | |
2882 | if (copy_to_user(optval, &val, len)) | |
2883 | return -EFAULT; | |
2884 | return 0; | |
2885 | } | |
2886 | ||
3fdadf7d DM |
2887 | int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval, |
2888 | int __user *optlen) | |
2889 | { | |
2890 | struct inet_connection_sock *icsk = inet_csk(sk); | |
2891 | ||
2892 | if (level != SOL_TCP) | |
2893 | return icsk->icsk_af_ops->getsockopt(sk, level, optname, | |
2894 | optval, optlen); | |
2895 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); | |
2896 | } | |
4bc2f18b | 2897 | EXPORT_SYMBOL(tcp_getsockopt); |
3fdadf7d DM |
2898 | |
2899 | #ifdef CONFIG_COMPAT | |
543d9cfe ACM |
2900 | int compat_tcp_getsockopt(struct sock *sk, int level, int optname, |
2901 | char __user *optval, int __user *optlen) | |
3fdadf7d | 2902 | { |
dec73ff0 ACM |
2903 | if (level != SOL_TCP) |
2904 | return inet_csk_compat_getsockopt(sk, level, optname, | |
2905 | optval, optlen); | |
3fdadf7d DM |
2906 | return do_tcp_getsockopt(sk, level, optname, optval, optlen); |
2907 | } | |
543d9cfe | 2908 | EXPORT_SYMBOL(compat_tcp_getsockopt); |
3fdadf7d | 2909 | #endif |
1da177e4 | 2910 | |
c8f44aff MM |
2911 | struct sk_buff *tcp_tso_segment(struct sk_buff *skb, |
2912 | netdev_features_t features) | |
f4c50d99 HX |
2913 | { |
2914 | struct sk_buff *segs = ERR_PTR(-EINVAL); | |
2915 | struct tcphdr *th; | |
95c96174 | 2916 | unsigned int thlen; |
f4c50d99 | 2917 | unsigned int seq; |
d3bc23e7 | 2918 | __be32 delta; |
f4c50d99 | 2919 | unsigned int oldlen; |
4e704ee3 | 2920 | unsigned int mss; |
f4c50d99 HX |
2921 | |
2922 | if (!pskb_may_pull(skb, sizeof(*th))) | |
2923 | goto out; | |
2924 | ||
aa8223c7 | 2925 | th = tcp_hdr(skb); |
f4c50d99 HX |
2926 | thlen = th->doff * 4; |
2927 | if (thlen < sizeof(*th)) | |
2928 | goto out; | |
2929 | ||
2930 | if (!pskb_may_pull(skb, thlen)) | |
2931 | goto out; | |
2932 | ||
0718bcc0 | 2933 | oldlen = (u16)~skb->len; |
f4c50d99 HX |
2934 | __skb_pull(skb, thlen); |
2935 | ||
4e704ee3 HX |
2936 | mss = skb_shinfo(skb)->gso_size; |
2937 | if (unlikely(skb->len <= mss)) | |
2938 | goto out; | |
2939 | ||
3820c3f3 HX |
2940 | if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) { |
2941 | /* Packet is from an untrusted source, reset gso_segs. */ | |
bbcf467d | 2942 | int type = skb_shinfo(skb)->gso_type; |
bbcf467d HX |
2943 | |
2944 | if (unlikely(type & | |
2945 | ~(SKB_GSO_TCPV4 | | |
2946 | SKB_GSO_DODGY | | |
2947 | SKB_GSO_TCP_ECN | | |
2948 | SKB_GSO_TCPV6 | | |
2949 | 0) || | |
2950 | !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))) | |
2951 | goto out; | |
3820c3f3 | 2952 | |
172589cc | 2953 | skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss); |
3820c3f3 HX |
2954 | |
2955 | segs = NULL; | |
2956 | goto out; | |
2957 | } | |
2958 | ||
576a30eb | 2959 | segs = skb_segment(skb, features); |
f4c50d99 HX |
2960 | if (IS_ERR(segs)) |
2961 | goto out; | |
2962 | ||
4e704ee3 | 2963 | delta = htonl(oldlen + (thlen + mss)); |
f4c50d99 HX |
2964 | |
2965 | skb = segs; | |
aa8223c7 | 2966 | th = tcp_hdr(skb); |
f4c50d99 HX |
2967 | seq = ntohl(th->seq); |
2968 | ||
2969 | do { | |
2970 | th->fin = th->psh = 0; | |
2971 | ||
d3bc23e7 AV |
2972 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
2973 | (__force u32)delta)); | |
84fa7933 | 2974 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
9c70220b ACM |
2975 | th->check = |
2976 | csum_fold(csum_partial(skb_transport_header(skb), | |
2977 | thlen, skb->csum)); | |
f4c50d99 | 2978 | |
4e704ee3 | 2979 | seq += mss; |
f4c50d99 | 2980 | skb = skb->next; |
aa8223c7 | 2981 | th = tcp_hdr(skb); |
f4c50d99 HX |
2982 | |
2983 | th->seq = htonl(seq); | |
2984 | th->cwr = 0; | |
2985 | } while (skb->next); | |
2986 | ||
27a884dc | 2987 | delta = htonl(oldlen + (skb->tail - skb->transport_header) + |
9c70220b | 2988 | skb->data_len); |
d3bc23e7 AV |
2989 | th->check = ~csum_fold((__force __wsum)((__force u32)th->check + |
2990 | (__force u32)delta)); | |
84fa7933 | 2991 | if (skb->ip_summed != CHECKSUM_PARTIAL) |
9c70220b ACM |
2992 | th->check = csum_fold(csum_partial(skb_transport_header(skb), |
2993 | thlen, skb->csum)); | |
f4c50d99 HX |
2994 | |
2995 | out: | |
2996 | return segs; | |
2997 | } | |
adcfc7d0 | 2998 | EXPORT_SYMBOL(tcp_tso_segment); |
f4c50d99 | 2999 | |
bf296b12 HX |
3000 | struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb) |
3001 | { | |
3002 | struct sk_buff **pp = NULL; | |
3003 | struct sk_buff *p; | |
3004 | struct tcphdr *th; | |
3005 | struct tcphdr *th2; | |
a0a69a01 | 3006 | unsigned int len; |
bf296b12 | 3007 | unsigned int thlen; |
0eae88f3 | 3008 | __be32 flags; |
bf296b12 | 3009 | unsigned int mss = 1; |
a5b1cf28 HX |
3010 | unsigned int hlen; |
3011 | unsigned int off; | |
bf296b12 | 3012 | int flush = 1; |
aa6320d3 | 3013 | int i; |
bf296b12 | 3014 | |
a5b1cf28 HX |
3015 | off = skb_gro_offset(skb); |
3016 | hlen = off + sizeof(*th); | |
3017 | th = skb_gro_header_fast(skb, off); | |
3018 | if (skb_gro_header_hard(skb, hlen)) { | |
3019 | th = skb_gro_header_slow(skb, hlen, off); | |
3020 | if (unlikely(!th)) | |
3021 | goto out; | |
3022 | } | |
bf296b12 | 3023 | |
bf296b12 HX |
3024 | thlen = th->doff * 4; |
3025 | if (thlen < sizeof(*th)) | |
3026 | goto out; | |
3027 | ||
a5b1cf28 HX |
3028 | hlen = off + thlen; |
3029 | if (skb_gro_header_hard(skb, hlen)) { | |
3030 | th = skb_gro_header_slow(skb, hlen, off); | |
3031 | if (unlikely(!th)) | |
3032 | goto out; | |
3033 | } | |
bf296b12 | 3034 | |
86911732 | 3035 | skb_gro_pull(skb, thlen); |
bf296b12 | 3036 | |
a0a69a01 | 3037 | len = skb_gro_len(skb); |
bf296b12 HX |
3038 | flags = tcp_flag_word(th); |
3039 | ||
3040 | for (; (p = *head); head = &p->next) { | |
3041 | if (!NAPI_GRO_CB(p)->same_flow) | |
3042 | continue; | |
3043 | ||
3044 | th2 = tcp_hdr(p); | |
3045 | ||
745898ea | 3046 | if (*(u32 *)&th->source ^ *(u32 *)&th2->source) { |
bf296b12 HX |
3047 | NAPI_GRO_CB(p)->same_flow = 0; |
3048 | continue; | |
3049 | } | |
3050 | ||
3051 | goto found; | |
3052 | } | |
3053 | ||
3054 | goto out_check_final; | |
3055 | ||
3056 | found: | |
3057 | flush = NAPI_GRO_CB(p)->flush; | |
0eae88f3 ED |
3058 | flush |= (__force int)(flags & TCP_FLAG_CWR); |
3059 | flush |= (__force int)((flags ^ tcp_flag_word(th2)) & | |
3060 | ~(TCP_FLAG_CWR | TCP_FLAG_FIN | TCP_FLAG_PSH)); | |
3061 | flush |= (__force int)(th->ack_seq ^ th2->ack_seq); | |
a2a804cd | 3062 | for (i = sizeof(*th); i < thlen; i += 4) |
aa6320d3 HX |
3063 | flush |= *(u32 *)((u8 *)th + i) ^ |
3064 | *(u32 *)((u8 *)th2 + i); | |
bf296b12 | 3065 | |
b530256d | 3066 | mss = skb_shinfo(p)->gso_size; |
bf296b12 | 3067 | |
30a3ae30 | 3068 | flush |= (len - 1) >= mss; |
aa6320d3 | 3069 | flush |= (ntohl(th2->seq) + skb_gro_len(p)) ^ ntohl(th->seq); |
bf296b12 HX |
3070 | |
3071 | if (flush || skb_gro_receive(head, skb)) { | |
3072 | mss = 1; | |
3073 | goto out_check_final; | |
3074 | } | |
3075 | ||
3076 | p = *head; | |
3077 | th2 = tcp_hdr(p); | |
3078 | tcp_flag_word(th2) |= flags & (TCP_FLAG_FIN | TCP_FLAG_PSH); | |
3079 | ||
3080 | out_check_final: | |
a0a69a01 | 3081 | flush = len < mss; |
0eae88f3 ED |
3082 | flush |= (__force int)(flags & (TCP_FLAG_URG | TCP_FLAG_PSH | |
3083 | TCP_FLAG_RST | TCP_FLAG_SYN | | |
3084 | TCP_FLAG_FIN)); | |
bf296b12 HX |
3085 | |
3086 | if (p && (!NAPI_GRO_CB(skb)->same_flow || flush)) | |
3087 | pp = head; | |
3088 | ||
3089 | out: | |
3090 | NAPI_GRO_CB(skb)->flush |= flush; | |
3091 | ||
3092 | return pp; | |
3093 | } | |
684f2176 | 3094 | EXPORT_SYMBOL(tcp_gro_receive); |
bf296b12 HX |
3095 | |
3096 | int tcp_gro_complete(struct sk_buff *skb) | |
3097 | { | |
3098 | struct tcphdr *th = tcp_hdr(skb); | |
3099 | ||
3100 | skb->csum_start = skb_transport_header(skb) - skb->head; | |
3101 | skb->csum_offset = offsetof(struct tcphdr, check); | |
3102 | skb->ip_summed = CHECKSUM_PARTIAL; | |
3103 | ||
bf296b12 HX |
3104 | skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; |
3105 | ||
3106 | if (th->cwr) | |
3107 | skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; | |
3108 | ||
3109 | return 0; | |
3110 | } | |
684f2176 | 3111 | EXPORT_SYMBOL(tcp_gro_complete); |
bf296b12 | 3112 | |
cfb6eeb4 YH |
3113 | #ifdef CONFIG_TCP_MD5SIG |
3114 | static unsigned long tcp_md5sig_users; | |
765cf997 | 3115 | static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool; |
cfb6eeb4 YH |
3116 | static DEFINE_SPINLOCK(tcp_md5sig_pool_lock); |
3117 | ||
765cf997 | 3118 | static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool) |
cfb6eeb4 YH |
3119 | { |
3120 | int cpu; | |
765cf997 | 3121 | |
cfb6eeb4 | 3122 | for_each_possible_cpu(cpu) { |
765cf997 ED |
3123 | struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu); |
3124 | ||
3125 | if (p->md5_desc.tfm) | |
3126 | crypto_free_hash(p->md5_desc.tfm); | |
cfb6eeb4 YH |
3127 | } |
3128 | free_percpu(pool); | |
3129 | } | |
3130 | ||
3131 | void tcp_free_md5sig_pool(void) | |
3132 | { | |
765cf997 | 3133 | struct tcp_md5sig_pool __percpu *pool = NULL; |
cfb6eeb4 | 3134 | |
2c4f6219 | 3135 | spin_lock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3136 | if (--tcp_md5sig_users == 0) { |
3137 | pool = tcp_md5sig_pool; | |
3138 | tcp_md5sig_pool = NULL; | |
3139 | } | |
2c4f6219 | 3140 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3141 | if (pool) |
3142 | __tcp_free_md5sig_pool(pool); | |
3143 | } | |
cfb6eeb4 YH |
3144 | EXPORT_SYMBOL(tcp_free_md5sig_pool); |
3145 | ||
765cf997 | 3146 | static struct tcp_md5sig_pool __percpu * |
7d720c3e | 3147 | __tcp_alloc_md5sig_pool(struct sock *sk) |
cfb6eeb4 YH |
3148 | { |
3149 | int cpu; | |
765cf997 | 3150 | struct tcp_md5sig_pool __percpu *pool; |
cfb6eeb4 | 3151 | |
765cf997 | 3152 | pool = alloc_percpu(struct tcp_md5sig_pool); |
cfb6eeb4 YH |
3153 | if (!pool) |
3154 | return NULL; | |
3155 | ||
3156 | for_each_possible_cpu(cpu) { | |
cfb6eeb4 YH |
3157 | struct crypto_hash *hash; |
3158 | ||
cfb6eeb4 YH |
3159 | hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC); |
3160 | if (!hash || IS_ERR(hash)) | |
3161 | goto out_free; | |
3162 | ||
765cf997 | 3163 | per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; |
cfb6eeb4 YH |
3164 | } |
3165 | return pool; | |
3166 | out_free: | |
3167 | __tcp_free_md5sig_pool(pool); | |
3168 | return NULL; | |
3169 | } | |
3170 | ||
765cf997 | 3171 | struct tcp_md5sig_pool __percpu *tcp_alloc_md5sig_pool(struct sock *sk) |
cfb6eeb4 | 3172 | { |
765cf997 | 3173 | struct tcp_md5sig_pool __percpu *pool; |
a2a385d6 | 3174 | bool alloc = false; |
cfb6eeb4 YH |
3175 | |
3176 | retry: | |
2c4f6219 | 3177 | spin_lock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3178 | pool = tcp_md5sig_pool; |
3179 | if (tcp_md5sig_users++ == 0) { | |
a2a385d6 | 3180 | alloc = true; |
2c4f6219 | 3181 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3182 | } else if (!pool) { |
3183 | tcp_md5sig_users--; | |
2c4f6219 | 3184 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3185 | cpu_relax(); |
3186 | goto retry; | |
3187 | } else | |
2c4f6219 | 3188 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3189 | |
3190 | if (alloc) { | |
3191 | /* we cannot hold spinlock here because this may sleep. */ | |
765cf997 | 3192 | struct tcp_md5sig_pool __percpu *p; |
7d720c3e TH |
3193 | |
3194 | p = __tcp_alloc_md5sig_pool(sk); | |
2c4f6219 | 3195 | spin_lock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3196 | if (!p) { |
3197 | tcp_md5sig_users--; | |
2c4f6219 | 3198 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3199 | return NULL; |
3200 | } | |
3201 | pool = tcp_md5sig_pool; | |
3202 | if (pool) { | |
3203 | /* oops, it has already been assigned. */ | |
2c4f6219 | 3204 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3205 | __tcp_free_md5sig_pool(p); |
3206 | } else { | |
3207 | tcp_md5sig_pool = pool = p; | |
2c4f6219 | 3208 | spin_unlock_bh(&tcp_md5sig_pool_lock); |
cfb6eeb4 YH |
3209 | } |
3210 | } | |
3211 | return pool; | |
3212 | } | |
cfb6eeb4 YH |
3213 | EXPORT_SYMBOL(tcp_alloc_md5sig_pool); |
3214 | ||
35790c04 ED |
3215 | |
3216 | /** | |
3217 | * tcp_get_md5sig_pool - get md5sig_pool for this user | |
3218 | * | |
3219 | * We use percpu structure, so if we succeed, we exit with preemption | |
3220 | * and BH disabled, to make sure another thread or softirq handling | |
3221 | * wont try to get same context. | |
3222 | */ | |
3223 | struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) | |
cfb6eeb4 | 3224 | { |
765cf997 | 3225 | struct tcp_md5sig_pool __percpu *p; |
35790c04 ED |
3226 | |
3227 | local_bh_disable(); | |
3228 | ||
3229 | spin_lock(&tcp_md5sig_pool_lock); | |
cfb6eeb4 YH |
3230 | p = tcp_md5sig_pool; |
3231 | if (p) | |
3232 | tcp_md5sig_users++; | |
35790c04 ED |
3233 | spin_unlock(&tcp_md5sig_pool_lock); |
3234 | ||
3235 | if (p) | |
765cf997 | 3236 | return this_cpu_ptr(p); |
cfb6eeb4 | 3237 | |
35790c04 ED |
3238 | local_bh_enable(); |
3239 | return NULL; | |
3240 | } | |
3241 | EXPORT_SYMBOL(tcp_get_md5sig_pool); | |
cfb6eeb4 | 3242 | |
35790c04 | 3243 | void tcp_put_md5sig_pool(void) |
6931ba7c | 3244 | { |
35790c04 | 3245 | local_bh_enable(); |
6931ba7c | 3246 | tcp_free_md5sig_pool(); |
cfb6eeb4 | 3247 | } |
35790c04 | 3248 | EXPORT_SYMBOL(tcp_put_md5sig_pool); |
49a72dfb AL |
3249 | |
3250 | int tcp_md5_hash_header(struct tcp_md5sig_pool *hp, | |
ca35a0ef | 3251 | const struct tcphdr *th) |
49a72dfb AL |
3252 | { |
3253 | struct scatterlist sg; | |
ca35a0ef | 3254 | struct tcphdr hdr; |
49a72dfb AL |
3255 | int err; |
3256 | ||
ca35a0ef ED |
3257 | /* We are not allowed to change tcphdr, make a local copy */ |
3258 | memcpy(&hdr, th, sizeof(hdr)); | |
3259 | hdr.check = 0; | |
3260 | ||
49a72dfb | 3261 | /* options aren't included in the hash */ |
ca35a0ef ED |
3262 | sg_init_one(&sg, &hdr, sizeof(hdr)); |
3263 | err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr)); | |
49a72dfb AL |
3264 | return err; |
3265 | } | |
49a72dfb AL |
3266 | EXPORT_SYMBOL(tcp_md5_hash_header); |
3267 | ||
3268 | int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, | |
cf533ea5 | 3269 | const struct sk_buff *skb, unsigned int header_len) |
49a72dfb AL |
3270 | { |
3271 | struct scatterlist sg; | |
3272 | const struct tcphdr *tp = tcp_hdr(skb); | |
3273 | struct hash_desc *desc = &hp->md5_desc; | |
95c96174 ED |
3274 | unsigned int i; |
3275 | const unsigned int head_data_len = skb_headlen(skb) > header_len ? | |
3276 | skb_headlen(skb) - header_len : 0; | |
49a72dfb | 3277 | const struct skb_shared_info *shi = skb_shinfo(skb); |
d7fd1b57 | 3278 | struct sk_buff *frag_iter; |
49a72dfb AL |
3279 | |
3280 | sg_init_table(&sg, 1); | |
3281 | ||
3282 | sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len); | |
3283 | if (crypto_hash_update(desc, &sg, head_data_len)) | |
3284 | return 1; | |
3285 | ||
3286 | for (i = 0; i < shi->nr_frags; ++i) { | |
3287 | const struct skb_frag_struct *f = &shi->frags[i]; | |
aff65da0 | 3288 | struct page *page = skb_frag_page(f); |
9e903e08 ED |
3289 | sg_set_page(&sg, page, skb_frag_size(f), f->page_offset); |
3290 | if (crypto_hash_update(desc, &sg, skb_frag_size(f))) | |
49a72dfb AL |
3291 | return 1; |
3292 | } | |
3293 | ||
d7fd1b57 ED |
3294 | skb_walk_frags(skb, frag_iter) |
3295 | if (tcp_md5_hash_skb_data(hp, frag_iter, 0)) | |
3296 | return 1; | |
3297 | ||
49a72dfb AL |
3298 | return 0; |
3299 | } | |
49a72dfb AL |
3300 | EXPORT_SYMBOL(tcp_md5_hash_skb_data); |
3301 | ||
cf533ea5 | 3302 | int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key) |
49a72dfb AL |
3303 | { |
3304 | struct scatterlist sg; | |
3305 | ||
3306 | sg_init_one(&sg, key->key, key->keylen); | |
3307 | return crypto_hash_update(&hp->md5_desc, &sg, key->keylen); | |
3308 | } | |
49a72dfb AL |
3309 | EXPORT_SYMBOL(tcp_md5_hash_key); |
3310 | ||
cfb6eeb4 YH |
3311 | #endif |
3312 | ||
da5c78c8 WAS |
3313 | /** |
3314 | * Each Responder maintains up to two secret values concurrently for | |
3315 | * efficient secret rollover. Each secret value has 4 states: | |
3316 | * | |
3317 | * Generating. (tcp_secret_generating != tcp_secret_primary) | |
3318 | * Generates new Responder-Cookies, but not yet used for primary | |
3319 | * verification. This is a short-term state, typically lasting only | |
3320 | * one round trip time (RTT). | |
3321 | * | |
3322 | * Primary. (tcp_secret_generating == tcp_secret_primary) | |
3323 | * Used both for generation and primary verification. | |
3324 | * | |
3325 | * Retiring. (tcp_secret_retiring != tcp_secret_secondary) | |
3326 | * Used for verification, until the first failure that can be | |
3327 | * verified by the newer Generating secret. At that time, this | |
3328 | * cookie's state is changed to Secondary, and the Generating | |
3329 | * cookie's state is changed to Primary. This is a short-term state, | |
3330 | * typically lasting only one round trip time (RTT). | |
3331 | * | |
3332 | * Secondary. (tcp_secret_retiring == tcp_secret_secondary) | |
3333 | * Used for secondary verification, after primary verification | |
3334 | * failures. This state lasts no more than twice the Maximum Segment | |
3335 | * Lifetime (2MSL). Then, the secret is discarded. | |
3336 | */ | |
3337 | struct tcp_cookie_secret { | |
3338 | /* The secret is divided into two parts. The digest part is the | |
3339 | * equivalent of previously hashing a secret and saving the state, | |
3340 | * and serves as an initialization vector (IV). The message part | |
3341 | * serves as the trailing secret. | |
3342 | */ | |
3343 | u32 secrets[COOKIE_WORKSPACE_WORDS]; | |
3344 | unsigned long expires; | |
3345 | }; | |
3346 | ||
3347 | #define TCP_SECRET_1MSL (HZ * TCP_PAWS_MSL) | |
3348 | #define TCP_SECRET_2MSL (HZ * TCP_PAWS_MSL * 2) | |
3349 | #define TCP_SECRET_LIFE (HZ * 600) | |
3350 | ||
3351 | static struct tcp_cookie_secret tcp_secret_one; | |
3352 | static struct tcp_cookie_secret tcp_secret_two; | |
3353 | ||
3354 | /* Essentially a circular list, without dynamic allocation. */ | |
3355 | static struct tcp_cookie_secret *tcp_secret_generating; | |
3356 | static struct tcp_cookie_secret *tcp_secret_primary; | |
3357 | static struct tcp_cookie_secret *tcp_secret_retiring; | |
3358 | static struct tcp_cookie_secret *tcp_secret_secondary; | |
3359 | ||
3360 | static DEFINE_SPINLOCK(tcp_secret_locker); | |
3361 | ||
3362 | /* Select a pseudo-random word in the cookie workspace. | |
3363 | */ | |
3364 | static inline u32 tcp_cookie_work(const u32 *ws, const int n) | |
3365 | { | |
3366 | return ws[COOKIE_DIGEST_WORDS + ((COOKIE_MESSAGE_WORDS-1) & ws[n])]; | |
3367 | } | |
3368 | ||
3369 | /* Fill bakery[COOKIE_WORKSPACE_WORDS] with generator, updating as needed. | |
3370 | * Called in softirq context. | |
3371 | * Returns: 0 for success. | |
3372 | */ | |
3373 | int tcp_cookie_generator(u32 *bakery) | |
3374 | { | |
3375 | unsigned long jiffy = jiffies; | |
3376 | ||
3377 | if (unlikely(time_after_eq(jiffy, tcp_secret_generating->expires))) { | |
3378 | spin_lock_bh(&tcp_secret_locker); | |
3379 | if (!time_after_eq(jiffy, tcp_secret_generating->expires)) { | |
3380 | /* refreshed by another */ | |
3381 | memcpy(bakery, | |
3382 | &tcp_secret_generating->secrets[0], | |
3383 | COOKIE_WORKSPACE_WORDS); | |
3384 | } else { | |
3385 | /* still needs refreshing */ | |
3386 | get_random_bytes(bakery, COOKIE_WORKSPACE_WORDS); | |
3387 | ||
3388 | /* The first time, paranoia assumes that the | |
3389 | * randomization function isn't as strong. But, | |
3390 | * this secret initialization is delayed until | |
3391 | * the last possible moment (packet arrival). | |
3392 | * Although that time is observable, it is | |
3393 | * unpredictably variable. Mash in the most | |
3394 | * volatile clock bits available, and expire the | |
3395 | * secret extra quickly. | |
3396 | */ | |
3397 | if (unlikely(tcp_secret_primary->expires == | |
3398 | tcp_secret_secondary->expires)) { | |
3399 | struct timespec tv; | |
3400 | ||
3401 | getnstimeofday(&tv); | |
3402 | bakery[COOKIE_DIGEST_WORDS+0] ^= | |
3403 | (u32)tv.tv_nsec; | |
3404 | ||
3405 | tcp_secret_secondary->expires = jiffy | |
3406 | + TCP_SECRET_1MSL | |
3407 | + (0x0f & tcp_cookie_work(bakery, 0)); | |
3408 | } else { | |
3409 | tcp_secret_secondary->expires = jiffy | |
3410 | + TCP_SECRET_LIFE | |
3411 | + (0xff & tcp_cookie_work(bakery, 1)); | |
3412 | tcp_secret_primary->expires = jiffy | |
3413 | + TCP_SECRET_2MSL | |
3414 | + (0x1f & tcp_cookie_work(bakery, 2)); | |
3415 | } | |
3416 | memcpy(&tcp_secret_secondary->secrets[0], | |
3417 | bakery, COOKIE_WORKSPACE_WORDS); | |
3418 | ||
3419 | rcu_assign_pointer(tcp_secret_generating, | |
3420 | tcp_secret_secondary); | |
3421 | rcu_assign_pointer(tcp_secret_retiring, | |
3422 | tcp_secret_primary); | |
3423 | /* | |
3424 | * Neither call_rcu() nor synchronize_rcu() needed. | |
3425 | * Retiring data is not freed. It is replaced after | |
3426 | * further (locked) pointer updates, and a quiet time | |
3427 | * (minimum 1MSL, maximum LIFE - 2MSL). | |
3428 | */ | |
3429 | } | |
3430 | spin_unlock_bh(&tcp_secret_locker); | |
3431 | } else { | |
3432 | rcu_read_lock_bh(); | |
3433 | memcpy(bakery, | |
3434 | &rcu_dereference(tcp_secret_generating)->secrets[0], | |
3435 | COOKIE_WORKSPACE_WORDS); | |
3436 | rcu_read_unlock_bh(); | |
3437 | } | |
3438 | return 0; | |
3439 | } | |
3440 | EXPORT_SYMBOL(tcp_cookie_generator); | |
3441 | ||
4ac02bab AK |
3442 | void tcp_done(struct sock *sk) |
3443 | { | |
5a5f3a8d | 3444 | if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV) |
63231bdd | 3445 | TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS); |
4ac02bab AK |
3446 | |
3447 | tcp_set_state(sk, TCP_CLOSE); | |
3448 | tcp_clear_xmit_timers(sk); | |
3449 | ||
3450 | sk->sk_shutdown = SHUTDOWN_MASK; | |
3451 | ||
3452 | if (!sock_flag(sk, SOCK_DEAD)) | |
3453 | sk->sk_state_change(sk); | |
3454 | else | |
3455 | inet_csk_destroy_sock(sk); | |
3456 | } | |
3457 | EXPORT_SYMBOL_GPL(tcp_done); | |
3458 | ||
5f8ef48d | 3459 | extern struct tcp_congestion_ops tcp_reno; |
1da177e4 LT |
3460 | |
3461 | static __initdata unsigned long thash_entries; | |
3462 | static int __init set_thash_entries(char *str) | |
3463 | { | |
413c27d8 EZ |
3464 | ssize_t ret; |
3465 | ||
1da177e4 LT |
3466 | if (!str) |
3467 | return 0; | |
413c27d8 EZ |
3468 | |
3469 | ret = kstrtoul(str, 0, &thash_entries); | |
3470 | if (ret) | |
3471 | return 0; | |
3472 | ||
1da177e4 LT |
3473 | return 1; |
3474 | } | |
3475 | __setup("thash_entries=", set_thash_entries); | |
3476 | ||
4acb4190 GC |
3477 | void tcp_init_mem(struct net *net) |
3478 | { | |
4acb4190 GC |
3479 | unsigned long limit = nr_free_buffer_pages() / 8; |
3480 | limit = max(limit, 128UL); | |
3481 | net->ipv4.sysctl_tcp_mem[0] = limit / 4 * 3; | |
3482 | net->ipv4.sysctl_tcp_mem[1] = limit; | |
3483 | net->ipv4.sysctl_tcp_mem[2] = net->ipv4.sysctl_tcp_mem[0] * 2; | |
3484 | } | |
3485 | ||
1da177e4 LT |
3486 | void __init tcp_init(void) |
3487 | { | |
3488 | struct sk_buff *skb = NULL; | |
f03d78db | 3489 | unsigned long limit; |
b49960a0 | 3490 | int max_rshare, max_wshare, cnt; |
074b8517 | 3491 | unsigned int i; |
da5c78c8 | 3492 | unsigned long jiffy = jiffies; |
1da177e4 | 3493 | |
1f9e636e | 3494 | BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb)); |
1da177e4 | 3495 | |
1748376b | 3496 | percpu_counter_init(&tcp_sockets_allocated, 0); |
dd24c001 | 3497 | percpu_counter_init(&tcp_orphan_count, 0); |
6e04e021 ACM |
3498 | tcp_hashinfo.bind_bucket_cachep = |
3499 | kmem_cache_create("tcp_bind_bucket", | |
3500 | sizeof(struct inet_bind_bucket), 0, | |
20c2df83 | 3501 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1da177e4 | 3502 | |
1da177e4 LT |
3503 | /* Size and allocate the main established and bind bucket |
3504 | * hash tables. | |
3505 | * | |
3506 | * The methodology is similar to that of the buffer cache. | |
3507 | */ | |
6e04e021 | 3508 | tcp_hashinfo.ehash = |
1da177e4 | 3509 | alloc_large_system_hash("TCP established", |
0f7ff927 | 3510 | sizeof(struct inet_ehash_bucket), |
1da177e4 | 3511 | thash_entries, |
4481374c | 3512 | (totalram_pages >= 128 * 1024) ? |
18955cfc | 3513 | 13 : 15, |
9e950efa | 3514 | 0, |
1da177e4 | 3515 | NULL, |
f373b53b | 3516 | &tcp_hashinfo.ehash_mask, |
31fe62b9 | 3517 | 0, |
0ccfe618 | 3518 | thash_entries ? 0 : 512 * 1024); |
f373b53b | 3519 | for (i = 0; i <= tcp_hashinfo.ehash_mask; i++) { |
3ab5aee7 ED |
3520 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i); |
3521 | INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].twchain, i); | |
1da177e4 | 3522 | } |
230140cf ED |
3523 | if (inet_ehash_locks_alloc(&tcp_hashinfo)) |
3524 | panic("TCP: failed to alloc ehash_locks"); | |
6e04e021 | 3525 | tcp_hashinfo.bhash = |
1da177e4 | 3526 | alloc_large_system_hash("TCP bind", |
0f7ff927 | 3527 | sizeof(struct inet_bind_hashbucket), |
f373b53b | 3528 | tcp_hashinfo.ehash_mask + 1, |
4481374c | 3529 | (totalram_pages >= 128 * 1024) ? |
18955cfc | 3530 | 13 : 15, |
9e950efa | 3531 | 0, |
6e04e021 | 3532 | &tcp_hashinfo.bhash_size, |
1da177e4 | 3533 | NULL, |
31fe62b9 | 3534 | 0, |
1da177e4 | 3535 | 64 * 1024); |
074b8517 | 3536 | tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size; |
6e04e021 ACM |
3537 | for (i = 0; i < tcp_hashinfo.bhash_size; i++) { |
3538 | spin_lock_init(&tcp_hashinfo.bhash[i].lock); | |
3539 | INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain); | |
1da177e4 LT |
3540 | } |
3541 | ||
c5ed63d6 ED |
3542 | |
3543 | cnt = tcp_hashinfo.ehash_mask + 1; | |
3544 | ||
3545 | tcp_death_row.sysctl_max_tw_buckets = cnt / 2; | |
3546 | sysctl_tcp_max_orphans = cnt / 2; | |
3547 | sysctl_max_syn_backlog = max(128, cnt / 256); | |
1da177e4 | 3548 | |
4acb4190 | 3549 | tcp_init_mem(&init_net); |
c43b874d | 3550 | /* Set per-socket limits to no more than 1/128 the pressure threshold */ |
5fb84b14 | 3551 | limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7); |
b49960a0 ED |
3552 | max_wshare = min(4UL*1024*1024, limit); |
3553 | max_rshare = min(6UL*1024*1024, limit); | |
7b4f4b5e | 3554 | |
3ab224be | 3555 | sysctl_tcp_wmem[0] = SK_MEM_QUANTUM; |
7b4f4b5e | 3556 | sysctl_tcp_wmem[1] = 16*1024; |
b49960a0 | 3557 | sysctl_tcp_wmem[2] = max(64*1024, max_wshare); |
7b4f4b5e | 3558 | |
3ab224be | 3559 | sysctl_tcp_rmem[0] = SK_MEM_QUANTUM; |
7b4f4b5e | 3560 | sysctl_tcp_rmem[1] = 87380; |
b49960a0 | 3561 | sysctl_tcp_rmem[2] = max(87380, max_rshare); |
1da177e4 | 3562 | |
afd46503 | 3563 | pr_info("Hash tables configured (established %u bind %u)\n", |
058bd4d2 | 3564 | tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size); |
317a76f9 SH |
3565 | |
3566 | tcp_register_congestion_control(&tcp_reno); | |
da5c78c8 WAS |
3567 | |
3568 | memset(&tcp_secret_one.secrets[0], 0, sizeof(tcp_secret_one.secrets)); | |
3569 | memset(&tcp_secret_two.secrets[0], 0, sizeof(tcp_secret_two.secrets)); | |
3570 | tcp_secret_one.expires = jiffy; /* past due */ | |
3571 | tcp_secret_two.expires = jiffy; /* past due */ | |
3572 | tcp_secret_generating = &tcp_secret_one; | |
3573 | tcp_secret_primary = &tcp_secret_one; | |
3574 | tcp_secret_retiring = &tcp_secret_two; | |
3575 | tcp_secret_secondary = &tcp_secret_two; | |
1da177e4 | 3576 | } |