]>
Commit | Line | Data |
---|---|---|
f870fa0b MM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2017 - 2019, Intel Corporation. | |
5 | */ | |
6 | ||
7 | #define pr_fmt(fmt) "MPTCP: " fmt | |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
7a6a6cbc PA |
12 | #include <linux/sched/signal.h> |
13 | #include <linux/atomic.h> | |
f870fa0b MM |
14 | #include <net/sock.h> |
15 | #include <net/inet_common.h> | |
16 | #include <net/inet_hashtables.h> | |
17 | #include <net/protocol.h> | |
18 | #include <net/tcp.h> | |
cf7da0d6 PK |
19 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
20 | #include <net/transp_v6.h> | |
21 | #endif | |
f870fa0b MM |
22 | #include <net/mptcp.h> |
23 | #include "protocol.h" | |
fc518953 | 24 | #include "mib.h" |
f870fa0b | 25 | |
2303f994 PK |
26 | #define MPTCP_SAME_STATE TCP_MAX_STATES |
27 | ||
b0519de8 FW |
28 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
29 | struct mptcp6_sock { | |
30 | struct mptcp_sock msk; | |
31 | struct ipv6_pinfo np; | |
32 | }; | |
33 | #endif | |
34 | ||
6771bfd9 FW |
35 | struct mptcp_skb_cb { |
36 | u32 offset; | |
37 | }; | |
38 | ||
39 | #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0])) | |
40 | ||
d027236c PA |
41 | static struct percpu_counter mptcp_sockets_allocated; |
42 | ||
2303f994 PK |
43 | /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not |
44 | * completed yet or has failed, return the subflow socket. | |
45 | * Otherwise return NULL. | |
46 | */ | |
47 | static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) | |
48 | { | |
d22f4988 | 49 | if (!msk->subflow || READ_ONCE(msk->can_ack)) |
2303f994 PK |
50 | return NULL; |
51 | ||
52 | return msk->subflow; | |
53 | } | |
54 | ||
d2f77c53 | 55 | static bool mptcp_is_tcpsk(struct sock *sk) |
0b4f33de FW |
56 | { |
57 | struct socket *sock = sk->sk_socket; | |
58 | ||
0b4f33de FW |
59 | if (unlikely(sk->sk_prot == &tcp_prot)) { |
60 | /* we are being invoked after mptcp_accept() has | |
61 | * accepted a non-mp-capable flow: sk is a tcp_sk, | |
62 | * not an mptcp one. | |
63 | * | |
64 | * Hand the socket over to tcp so all further socket ops | |
65 | * bypass mptcp. | |
66 | */ | |
67 | sock->ops = &inet_stream_ops; | |
d2f77c53 | 68 | return true; |
0b4f33de FW |
69 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
70 | } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { | |
71 | sock->ops = &inet6_stream_ops; | |
d2f77c53 | 72 | return true; |
0b4f33de FW |
73 | #endif |
74 | } | |
75 | ||
d2f77c53 | 76 | return false; |
0b4f33de FW |
77 | } |
78 | ||
76660afb | 79 | static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) |
cec37a6e | 80 | { |
cec37a6e PK |
81 | sock_owned_by_me((const struct sock *)msk); |
82 | ||
e1ff9e82 | 83 | if (likely(!__mptcp_check_fallback(msk))) |
cec37a6e PK |
84 | return NULL; |
85 | ||
76660afb | 86 | return msk->first; |
cec37a6e PK |
87 | } |
88 | ||
fa68018d | 89 | static int __mptcp_socket_create(struct mptcp_sock *msk) |
2303f994 PK |
90 | { |
91 | struct mptcp_subflow_context *subflow; | |
92 | struct sock *sk = (struct sock *)msk; | |
93 | struct socket *ssock; | |
94 | int err; | |
95 | ||
2303f994 PK |
96 | err = mptcp_subflow_create_socket(sk, &ssock); |
97 | if (err) | |
fa68018d | 98 | return err; |
2303f994 | 99 | |
8ab183de | 100 | msk->first = ssock->sk; |
2303f994 PK |
101 | msk->subflow = ssock; |
102 | subflow = mptcp_subflow_ctx(ssock->sk); | |
cec37a6e | 103 | list_add(&subflow->node, &msk->conn_list); |
2303f994 PK |
104 | subflow->request_mptcp = 1; |
105 | ||
e1ff9e82 DC |
106 | /* accept() will wait on first subflow sk_wq, and we always wakes up |
107 | * via msk->sk_socket | |
108 | */ | |
109 | RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq); | |
110 | ||
fa68018d | 111 | return 0; |
2303f994 PK |
112 | } |
113 | ||
6771bfd9 FW |
114 | static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, |
115 | struct sk_buff *skb, | |
116 | unsigned int offset, size_t copy_len) | |
117 | { | |
118 | struct sock *sk = (struct sock *)msk; | |
4e637c70 | 119 | struct sk_buff *tail; |
6771bfd9 FW |
120 | |
121 | __skb_unlink(skb, &ssk->sk_receive_queue); | |
6771bfd9 | 122 | |
4e637c70 FW |
123 | skb_ext_reset(skb); |
124 | skb_orphan(skb); | |
6771bfd9 | 125 | msk->ack_seq += copy_len; |
4e637c70 FW |
126 | |
127 | tail = skb_peek_tail(&sk->sk_receive_queue); | |
128 | if (offset == 0 && tail) { | |
129 | bool fragstolen; | |
130 | int delta; | |
131 | ||
132 | if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) { | |
133 | kfree_skb_partial(skb, fragstolen); | |
134 | atomic_add(delta, &sk->sk_rmem_alloc); | |
135 | sk_mem_charge(sk, delta); | |
136 | return; | |
137 | } | |
138 | } | |
139 | ||
140 | skb_set_owner_r(skb, sk); | |
141 | __skb_queue_tail(&sk->sk_receive_queue, skb); | |
6771bfd9 FW |
142 | MPTCP_SKB_CB(skb)->offset = offset; |
143 | } | |
144 | ||
de06f573 FW |
145 | /* both sockets must be locked */ |
146 | static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk, | |
147 | struct sock *ssk) | |
148 | { | |
149 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
150 | u64 dsn = mptcp_subflow_get_mapped_dsn(subflow); | |
151 | ||
152 | /* revalidate data sequence number. | |
153 | * | |
154 | * mptcp_subflow_data_available() is usually called | |
155 | * without msk lock. Its unlikely (but possible) | |
156 | * that msk->ack_seq has been advanced since the last | |
157 | * call found in-sequence data. | |
158 | */ | |
159 | if (likely(dsn == msk->ack_seq)) | |
160 | return true; | |
161 | ||
162 | subflow->data_avail = 0; | |
163 | return mptcp_subflow_data_available(ssk); | |
164 | } | |
165 | ||
6771bfd9 FW |
166 | static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, |
167 | struct sock *ssk, | |
168 | unsigned int *bytes) | |
169 | { | |
170 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
600911ff | 171 | struct sock *sk = (struct sock *)msk; |
6771bfd9 FW |
172 | unsigned int moved = 0; |
173 | bool more_data_avail; | |
174 | struct tcp_sock *tp; | |
175 | bool done = false; | |
600911ff | 176 | |
de06f573 FW |
177 | if (!mptcp_subflow_dsn_valid(msk, ssk)) { |
178 | *bytes = 0; | |
179 | return false; | |
180 | } | |
181 | ||
6771bfd9 FW |
182 | tp = tcp_sk(ssk); |
183 | do { | |
184 | u32 map_remaining, offset; | |
185 | u32 seq = tp->copied_seq; | |
186 | struct sk_buff *skb; | |
187 | bool fin; | |
188 | ||
189 | /* try to move as much data as available */ | |
190 | map_remaining = subflow->map_data_len - | |
191 | mptcp_subflow_get_map_offset(subflow); | |
192 | ||
193 | skb = skb_peek(&ssk->sk_receive_queue); | |
194 | if (!skb) | |
195 | break; | |
196 | ||
e1ff9e82 DC |
197 | if (__mptcp_check_fallback(msk)) { |
198 | /* if we are running under the workqueue, TCP could have | |
199 | * collapsed skbs between dummy map creation and now | |
200 | * be sure to adjust the size | |
201 | */ | |
202 | map_remaining = skb->len; | |
203 | subflow->map_data_len = skb->len; | |
204 | } | |
205 | ||
6771bfd9 FW |
206 | offset = seq - TCP_SKB_CB(skb)->seq; |
207 | fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; | |
208 | if (fin) { | |
209 | done = true; | |
210 | seq++; | |
211 | } | |
212 | ||
213 | if (offset < skb->len) { | |
214 | size_t len = skb->len - offset; | |
215 | ||
216 | if (tp->urg_data) | |
217 | done = true; | |
218 | ||
219 | __mptcp_move_skb(msk, ssk, skb, offset, len); | |
220 | seq += len; | |
221 | moved += len; | |
222 | ||
223 | if (WARN_ON_ONCE(map_remaining < len)) | |
224 | break; | |
225 | } else { | |
226 | WARN_ON_ONCE(!fin); | |
227 | sk_eat_skb(ssk, skb); | |
228 | done = true; | |
229 | } | |
230 | ||
231 | WRITE_ONCE(tp->copied_seq, seq); | |
232 | more_data_avail = mptcp_subflow_data_available(ssk); | |
600911ff FW |
233 | |
234 | if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) { | |
235 | done = true; | |
236 | break; | |
237 | } | |
6771bfd9 FW |
238 | } while (more_data_avail); |
239 | ||
240 | *bytes = moved; | |
241 | ||
242 | return done; | |
243 | } | |
244 | ||
2e52213c FW |
245 | /* In most cases we will be able to lock the mptcp socket. If its already |
246 | * owned, we need to defer to the work queue to avoid ABBA deadlock. | |
247 | */ | |
248 | static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) | |
249 | { | |
250 | struct sock *sk = (struct sock *)msk; | |
251 | unsigned int moved = 0; | |
252 | ||
253 | if (READ_ONCE(sk->sk_lock.owned)) | |
254 | return false; | |
255 | ||
256 | if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock))) | |
257 | return false; | |
258 | ||
259 | /* must re-check after taking the lock */ | |
260 | if (!READ_ONCE(sk->sk_lock.owned)) | |
261 | __mptcp_move_skbs_from_subflow(msk, ssk, &moved); | |
262 | ||
263 | spin_unlock_bh(&sk->sk_lock.slock); | |
264 | ||
265 | return moved > 0; | |
266 | } | |
267 | ||
268 | void mptcp_data_ready(struct sock *sk, struct sock *ssk) | |
101f6f85 FW |
269 | { |
270 | struct mptcp_sock *msk = mptcp_sk(sk); | |
271 | ||
272 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
6771bfd9 | 273 | |
2e52213c FW |
274 | if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) && |
275 | move_skbs_to_msk(msk, ssk)) | |
276 | goto wake; | |
277 | ||
600911ff FW |
278 | /* don't schedule if mptcp sk is (still) over limit */ |
279 | if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) | |
280 | goto wake; | |
281 | ||
14c441b5 PA |
282 | /* mptcp socket is owned, release_cb should retry */ |
283 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | |
284 | &sk->sk_tsq_flags)) { | |
285 | sock_hold(sk); | |
6771bfd9 | 286 | |
14c441b5 PA |
287 | /* need to try again, its possible release_cb() has already |
288 | * been called after the test_and_set_bit() above. | |
289 | */ | |
290 | move_skbs_to_msk(msk, ssk); | |
291 | } | |
600911ff | 292 | wake: |
101f6f85 FW |
293 | sk->sk_data_ready(sk); |
294 | } | |
295 | ||
ec3edaa7 PK |
296 | static void __mptcp_flush_join_list(struct mptcp_sock *msk) |
297 | { | |
298 | if (likely(list_empty(&msk->join_list))) | |
299 | return; | |
300 | ||
301 | spin_lock_bh(&msk->join_list_lock); | |
302 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
303 | spin_unlock_bh(&msk->join_list_lock); | |
304 | } | |
305 | ||
b51f9b80 PA |
306 | static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk) |
307 | { | |
308 | long tout = ssk && inet_csk(ssk)->icsk_pending ? | |
309 | inet_csk(ssk)->icsk_timeout - jiffies : 0; | |
310 | ||
311 | if (tout <= 0) | |
312 | tout = mptcp_sk(sk)->timer_ival; | |
313 | mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; | |
314 | } | |
315 | ||
316 | static bool mptcp_timer_pending(struct sock *sk) | |
317 | { | |
318 | return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); | |
319 | } | |
320 | ||
321 | static void mptcp_reset_timer(struct sock *sk) | |
322 | { | |
323 | struct inet_connection_sock *icsk = inet_csk(sk); | |
324 | unsigned long tout; | |
325 | ||
326 | /* should never be called with mptcp level timer cleared */ | |
327 | tout = READ_ONCE(mptcp_sk(sk)->timer_ival); | |
328 | if (WARN_ON_ONCE(!tout)) | |
329 | tout = TCP_RTO_MIN; | |
330 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); | |
331 | } | |
332 | ||
333 | void mptcp_data_acked(struct sock *sk) | |
334 | { | |
335 | mptcp_reset_timer(sk); | |
3b1d6210 PA |
336 | |
337 | if (!sk_stream_is_writeable(sk) && | |
338 | schedule_work(&mptcp_sk(sk)->work)) | |
339 | sock_hold(sk); | |
b51f9b80 PA |
340 | } |
341 | ||
59832e24 FW |
342 | void mptcp_subflow_eof(struct sock *sk) |
343 | { | |
344 | struct mptcp_sock *msk = mptcp_sk(sk); | |
345 | ||
346 | if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) && | |
347 | schedule_work(&msk->work)) | |
348 | sock_hold(sk); | |
349 | } | |
350 | ||
5969856a PA |
351 | static void mptcp_check_for_eof(struct mptcp_sock *msk) |
352 | { | |
353 | struct mptcp_subflow_context *subflow; | |
354 | struct sock *sk = (struct sock *)msk; | |
355 | int receivers = 0; | |
356 | ||
357 | mptcp_for_each_subflow(msk, subflow) | |
358 | receivers += !subflow->rx_eof; | |
359 | ||
360 | if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) { | |
361 | /* hopefully temporary hack: propagate shutdown status | |
362 | * to msk, when all subflows agree on it | |
363 | */ | |
364 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
365 | ||
366 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ | |
367 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
368 | sk->sk_data_ready(sk); | |
369 | } | |
370 | } | |
371 | ||
b51f9b80 PA |
372 | static void mptcp_stop_timer(struct sock *sk) |
373 | { | |
374 | struct inet_connection_sock *icsk = inet_csk(sk); | |
375 | ||
376 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
377 | mptcp_sk(sk)->timer_ival = 0; | |
378 | } | |
379 | ||
6d0060f6 MM |
380 | static bool mptcp_ext_cache_refill(struct mptcp_sock *msk) |
381 | { | |
4930f483 FW |
382 | const struct sock *sk = (const struct sock *)msk; |
383 | ||
6d0060f6 | 384 | if (!msk->cached_ext) |
4930f483 | 385 | msk->cached_ext = __skb_ext_alloc(sk->sk_allocation); |
6d0060f6 MM |
386 | |
387 | return !!msk->cached_ext; | |
388 | } | |
389 | ||
7a6a6cbc PA |
390 | static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) |
391 | { | |
392 | struct mptcp_subflow_context *subflow; | |
393 | struct sock *sk = (struct sock *)msk; | |
394 | ||
395 | sock_owned_by_me(sk); | |
396 | ||
397 | mptcp_for_each_subflow(msk, subflow) { | |
398 | if (subflow->data_avail) | |
399 | return mptcp_subflow_tcp_sock(subflow); | |
400 | } | |
401 | ||
402 | return NULL; | |
403 | } | |
404 | ||
3f8e0aae PA |
405 | static bool mptcp_skb_can_collapse_to(u64 write_seq, |
406 | const struct sk_buff *skb, | |
407 | const struct mptcp_ext *mpext) | |
57040755 PA |
408 | { |
409 | if (!tcp_skb_can_collapse_to(skb)) | |
410 | return false; | |
411 | ||
412 | /* can collapse only if MPTCP level sequence is in order */ | |
3f8e0aae | 413 | return mpext && mpext->data_seq + mpext->data_len == write_seq; |
57040755 PA |
414 | } |
415 | ||
18b683bf PA |
416 | static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, |
417 | const struct page_frag *pfrag, | |
418 | const struct mptcp_data_frag *df) | |
419 | { | |
420 | return df && pfrag->page == df->page && | |
421 | df->data_seq + df->data_len == msk->write_seq; | |
422 | } | |
423 | ||
d027236c PA |
424 | static void dfrag_uncharge(struct sock *sk, int len) |
425 | { | |
426 | sk_mem_uncharge(sk, len); | |
7948f6cc | 427 | sk_wmem_queued_add(sk, -len); |
d027236c PA |
428 | } |
429 | ||
430 | static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) | |
18b683bf | 431 | { |
d027236c PA |
432 | int len = dfrag->data_len + dfrag->overhead; |
433 | ||
18b683bf | 434 | list_del(&dfrag->list); |
d027236c | 435 | dfrag_uncharge(sk, len); |
18b683bf PA |
436 | put_page(dfrag->page); |
437 | } | |
438 | ||
439 | static void mptcp_clean_una(struct sock *sk) | |
440 | { | |
441 | struct mptcp_sock *msk = mptcp_sk(sk); | |
442 | struct mptcp_data_frag *dtmp, *dfrag; | |
d027236c | 443 | bool cleaned = false; |
e1ff9e82 DC |
444 | u64 snd_una; |
445 | ||
446 | /* on fallback we just need to ignore snd_una, as this is really | |
447 | * plain TCP | |
448 | */ | |
449 | if (__mptcp_check_fallback(msk)) | |
450 | atomic64_set(&msk->snd_una, msk->write_seq); | |
451 | snd_una = atomic64_read(&msk->snd_una); | |
18b683bf PA |
452 | |
453 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { | |
454 | if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) | |
455 | break; | |
456 | ||
d027236c PA |
457 | dfrag_clear(sk, dfrag); |
458 | cleaned = true; | |
459 | } | |
460 | ||
7948f6cc FW |
461 | dfrag = mptcp_rtx_head(sk); |
462 | if (dfrag && after64(snd_una, dfrag->data_seq)) { | |
53eb4c38 PA |
463 | u64 delta = snd_una - dfrag->data_seq; |
464 | ||
465 | if (WARN_ON_ONCE(delta > dfrag->data_len)) | |
466 | goto out; | |
7948f6cc FW |
467 | |
468 | dfrag->data_seq += delta; | |
53eb4c38 | 469 | dfrag->offset += delta; |
7948f6cc FW |
470 | dfrag->data_len -= delta; |
471 | ||
472 | dfrag_uncharge(sk, delta); | |
473 | cleaned = true; | |
474 | } | |
475 | ||
53eb4c38 | 476 | out: |
d027236c PA |
477 | if (cleaned) { |
478 | sk_mem_reclaim_partial(sk); | |
7948f6cc FW |
479 | |
480 | /* Only wake up writers if a subflow is ready */ | |
481 | if (test_bit(MPTCP_SEND_SPACE, &msk->flags)) | |
482 | sk_stream_write_space(sk); | |
18b683bf PA |
483 | } |
484 | } | |
485 | ||
486 | /* ensure we get enough memory for the frag hdr, beyond some minimal amount of | |
487 | * data | |
488 | */ | |
489 | static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |
490 | { | |
491 | if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), | |
492 | pfrag, sk->sk_allocation))) | |
493 | return true; | |
494 | ||
495 | sk->sk_prot->enter_memory_pressure(sk); | |
496 | sk_stream_moderate_sndbuf(sk); | |
497 | return false; | |
498 | } | |
499 | ||
500 | static struct mptcp_data_frag * | |
501 | mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, | |
502 | int orig_offset) | |
503 | { | |
504 | int offset = ALIGN(orig_offset, sizeof(long)); | |
505 | struct mptcp_data_frag *dfrag; | |
506 | ||
507 | dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); | |
508 | dfrag->data_len = 0; | |
509 | dfrag->data_seq = msk->write_seq; | |
510 | dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); | |
511 | dfrag->offset = offset + sizeof(struct mptcp_data_frag); | |
512 | dfrag->page = pfrag->page; | |
513 | ||
514 | return dfrag; | |
515 | } | |
516 | ||
6d0060f6 | 517 | static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, |
3f8e0aae PA |
518 | struct msghdr *msg, struct mptcp_data_frag *dfrag, |
519 | long *timeo, int *pmss_now, | |
57040755 | 520 | int *ps_goal) |
6d0060f6 | 521 | { |
18b683bf PA |
522 | int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0; |
523 | bool dfrag_collapsed, can_collapse = false; | |
6d0060f6 MM |
524 | struct mptcp_sock *msk = mptcp_sk(sk); |
525 | struct mptcp_ext *mpext = NULL; | |
3f8e0aae | 526 | bool retransmission = !!dfrag; |
57040755 | 527 | struct sk_buff *skb, *tail; |
6d0060f6 | 528 | struct page_frag *pfrag; |
3f8e0aae PA |
529 | struct page *page; |
530 | u64 *write_seq; | |
6d0060f6 MM |
531 | size_t psize; |
532 | ||
533 | /* use the mptcp page cache so that we can easily move the data | |
534 | * from one substream to another, but do per subflow memory accounting | |
3f8e0aae PA |
535 | * Note: pfrag is used only !retransmission, but the compiler if |
536 | * fooled into a warning if we don't init here | |
6d0060f6 MM |
537 | */ |
538 | pfrag = sk_page_frag(sk); | |
3f8e0aae PA |
539 | if (!retransmission) { |
540 | write_seq = &msk->write_seq; | |
541 | page = pfrag->page; | |
542 | } else { | |
543 | write_seq = &dfrag->data_seq; | |
544 | page = dfrag->page; | |
545 | } | |
6d0060f6 MM |
546 | |
547 | /* compute copy limit */ | |
548 | mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags); | |
57040755 PA |
549 | *pmss_now = mss_now; |
550 | *ps_goal = size_goal; | |
551 | avail_size = size_goal; | |
552 | skb = tcp_write_queue_tail(ssk); | |
553 | if (skb) { | |
554 | mpext = skb_ext_find(skb, SKB_EXT_MPTCP); | |
555 | ||
556 | /* Limit the write to the size available in the | |
557 | * current skb, if any, so that we create at most a new skb. | |
558 | * Explicitly tells TCP internals to avoid collapsing on later | |
559 | * queue management operation, to avoid breaking the ext <-> | |
560 | * SSN association set here | |
561 | */ | |
562 | can_collapse = (size_goal - skb->len > 0) && | |
3f8e0aae | 563 | mptcp_skb_can_collapse_to(*write_seq, skb, mpext); |
57040755 PA |
564 | if (!can_collapse) |
565 | TCP_SKB_CB(skb)->eor = 1; | |
566 | else | |
567 | avail_size = size_goal - skb->len; | |
568 | } | |
18b683bf | 569 | |
3f8e0aae PA |
570 | if (!retransmission) { |
571 | /* reuse tail pfrag, if possible, or carve a new one from the | |
572 | * page allocator | |
573 | */ | |
574 | dfrag = mptcp_rtx_tail(sk); | |
575 | offset = pfrag->offset; | |
576 | dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); | |
577 | if (!dfrag_collapsed) { | |
578 | dfrag = mptcp_carve_data_frag(msk, pfrag, offset); | |
579 | offset = dfrag->offset; | |
580 | frag_truesize = dfrag->overhead; | |
581 | } | |
582 | psize = min_t(size_t, pfrag->size - offset, avail_size); | |
583 | ||
584 | /* Copy to page */ | |
585 | pr_debug("left=%zu", msg_data_left(msg)); | |
586 | psize = copy_page_from_iter(pfrag->page, offset, | |
587 | min_t(size_t, msg_data_left(msg), | |
588 | psize), | |
589 | &msg->msg_iter); | |
590 | pr_debug("left=%zu", msg_data_left(msg)); | |
591 | if (!psize) | |
592 | return -EINVAL; | |
593 | ||
594 | if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) | |
595 | return -ENOMEM; | |
596 | } else { | |
18b683bf | 597 | offset = dfrag->offset; |
3f8e0aae | 598 | psize = min_t(size_t, dfrag->data_len, avail_size); |
18b683bf | 599 | } |
d027236c | 600 | |
57040755 PA |
601 | /* tell the TCP stack to delay the push so that we can safely |
602 | * access the skb after the sendpages call | |
6d0060f6 | 603 | */ |
3f8e0aae | 604 | ret = do_tcp_sendpages(ssk, page, offset, psize, |
72511aab | 605 | msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT); |
6d0060f6 MM |
606 | if (ret <= 0) |
607 | return ret; | |
18b683bf PA |
608 | |
609 | frag_truesize += ret; | |
3f8e0aae PA |
610 | if (!retransmission) { |
611 | if (unlikely(ret < psize)) | |
612 | iov_iter_revert(&msg->msg_iter, psize - ret); | |
6d0060f6 | 613 | |
3f8e0aae PA |
614 | /* send successful, keep track of sent data for mptcp-level |
615 | * retransmission | |
616 | */ | |
617 | dfrag->data_len += ret; | |
618 | if (!dfrag_collapsed) { | |
619 | get_page(dfrag->page); | |
620 | list_add_tail(&dfrag->list, &msk->rtx_queue); | |
621 | sk_wmem_queued_add(sk, frag_truesize); | |
622 | } else { | |
623 | sk_wmem_queued_add(sk, ret); | |
624 | } | |
18b683bf | 625 | |
3f8e0aae PA |
626 | /* charge data on mptcp rtx queue to the master socket |
627 | * Note: we charge such data both to sk and ssk | |
628 | */ | |
629 | sk->sk_forward_alloc -= frag_truesize; | |
630 | } | |
d027236c | 631 | |
57040755 PA |
632 | /* if the tail skb extension is still the cached one, collapsing |
633 | * really happened. Note: we can't check for 'same skb' as the sk_buff | |
634 | * hdr on tail can be transmitted, freed and re-allocated by the | |
635 | * do_tcp_sendpages() call | |
636 | */ | |
637 | tail = tcp_write_queue_tail(ssk); | |
638 | if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) { | |
639 | WARN_ON_ONCE(!can_collapse); | |
640 | mpext->data_len += ret; | |
641 | goto out; | |
642 | } | |
643 | ||
6d0060f6 MM |
644 | skb = tcp_write_queue_tail(ssk); |
645 | mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext); | |
646 | msk->cached_ext = NULL; | |
647 | ||
648 | memset(mpext, 0, sizeof(*mpext)); | |
3f8e0aae | 649 | mpext->data_seq = *write_seq; |
6d0060f6 MM |
650 | mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; |
651 | mpext->data_len = ret; | |
652 | mpext->use_map = 1; | |
653 | mpext->dsn64 = 1; | |
654 | ||
655 | pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", | |
656 | mpext->data_seq, mpext->subflow_seq, mpext->data_len, | |
657 | mpext->dsn64); | |
658 | ||
57040755 | 659 | out: |
3f8e0aae PA |
660 | if (!retransmission) |
661 | pfrag->offset += frag_truesize; | |
662 | *write_seq += ret; | |
6d0060f6 MM |
663 | mptcp_subflow_ctx(ssk)->rel_write_seq += ret; |
664 | ||
6d0060f6 MM |
665 | return ret; |
666 | } | |
667 | ||
a0e17064 FW |
668 | static void mptcp_nospace(struct mptcp_sock *msk, struct socket *sock) |
669 | { | |
670 | clear_bit(MPTCP_SEND_SPACE, &msk->flags); | |
671 | smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */ | |
672 | ||
673 | /* enables sk->write_space() callbacks */ | |
674 | set_bit(SOCK_NOSPACE, &sock->flags); | |
675 | } | |
676 | ||
f296234c PK |
677 | static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk) |
678 | { | |
679 | struct mptcp_subflow_context *subflow; | |
680 | struct sock *backup = NULL; | |
681 | ||
682 | sock_owned_by_me((const struct sock *)msk); | |
683 | ||
149f7c71 FW |
684 | if (!mptcp_ext_cache_refill(msk)) |
685 | return NULL; | |
686 | ||
f296234c PK |
687 | mptcp_for_each_subflow(msk, subflow) { |
688 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
689 | ||
690 | if (!sk_stream_memory_free(ssk)) { | |
691 | struct socket *sock = ssk->sk_socket; | |
692 | ||
a0e17064 FW |
693 | if (sock) |
694 | mptcp_nospace(msk, sock); | |
f296234c PK |
695 | |
696 | return NULL; | |
697 | } | |
698 | ||
699 | if (subflow->backup) { | |
700 | if (!backup) | |
701 | backup = ssk; | |
702 | ||
703 | continue; | |
704 | } | |
705 | ||
706 | return ssk; | |
707 | } | |
708 | ||
709 | return backup; | |
710 | } | |
711 | ||
1891c4a0 FW |
712 | static void ssk_check_wmem(struct mptcp_sock *msk, struct sock *ssk) |
713 | { | |
714 | struct socket *sock; | |
715 | ||
716 | if (likely(sk_stream_is_writeable(ssk))) | |
717 | return; | |
718 | ||
719 | sock = READ_ONCE(ssk->sk_socket); | |
a0e17064 FW |
720 | if (sock) |
721 | mptcp_nospace(msk, sock); | |
1891c4a0 FW |
722 | } |
723 | ||
f870fa0b MM |
724 | static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
725 | { | |
57040755 | 726 | int mss_now = 0, size_goal = 0, ret = 0; |
f870fa0b | 727 | struct mptcp_sock *msk = mptcp_sk(sk); |
17091708 | 728 | struct page_frag *pfrag; |
6d0060f6 | 729 | size_t copied = 0; |
cec37a6e | 730 | struct sock *ssk; |
72511aab | 731 | bool tx_ok; |
6d0060f6 | 732 | long timeo; |
f870fa0b MM |
733 | |
734 | if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) | |
735 | return -EOPNOTSUPP; | |
736 | ||
cec37a6e | 737 | lock_sock(sk); |
1954b860 MM |
738 | |
739 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | |
740 | ||
741 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { | |
742 | ret = sk_stream_wait_connect(sk, &timeo); | |
743 | if (ret) | |
744 | goto out; | |
745 | } | |
746 | ||
17091708 | 747 | pfrag = sk_page_frag(sk); |
72511aab | 748 | restart: |
18b683bf PA |
749 | mptcp_clean_una(sk); |
750 | ||
57baaf28 MM |
751 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { |
752 | ret = -EPIPE; | |
753 | goto out; | |
754 | } | |
755 | ||
fb529e62 | 756 | wait_for_sndbuf: |
ec3edaa7 | 757 | __mptcp_flush_join_list(msk); |
f296234c | 758 | ssk = mptcp_subflow_get_send(msk); |
17091708 FW |
759 | while (!sk_stream_memory_free(sk) || |
760 | !ssk || | |
761 | !mptcp_page_frag_refill(ssk, pfrag)) { | |
fb529e62 FW |
762 | if (ssk) { |
763 | /* make sure retransmit timer is | |
764 | * running before we wait for memory. | |
765 | * | |
766 | * The retransmit timer might be needed | |
767 | * to make the peer send an up-to-date | |
768 | * MPTCP Ack. | |
769 | */ | |
770 | mptcp_set_timeout(sk, ssk); | |
771 | if (!mptcp_timer_pending(sk)) | |
772 | mptcp_reset_timer(sk); | |
773 | } | |
774 | ||
f296234c PK |
775 | ret = sk_stream_wait_memory(sk, &timeo); |
776 | if (ret) | |
777 | goto out; | |
778 | ||
18b683bf PA |
779 | mptcp_clean_una(sk); |
780 | ||
f296234c PK |
781 | ssk = mptcp_subflow_get_send(msk); |
782 | if (list_empty(&msk->conn_list)) { | |
783 | ret = -ENOTCONN; | |
784 | goto out; | |
785 | } | |
cec37a6e PK |
786 | } |
787 | ||
6d0060f6 | 788 | pr_debug("conn_list->subflow=%p", ssk); |
cec37a6e | 789 | |
6d0060f6 | 790 | lock_sock(ssk); |
72511aab FW |
791 | tx_ok = msg_data_left(msg); |
792 | while (tx_ok) { | |
3f8e0aae | 793 | ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now, |
57040755 | 794 | &size_goal); |
72511aab FW |
795 | if (ret < 0) { |
796 | if (ret == -EAGAIN && timeo > 0) { | |
797 | mptcp_set_timeout(sk, ssk); | |
798 | release_sock(ssk); | |
799 | goto restart; | |
800 | } | |
6d0060f6 | 801 | break; |
72511aab | 802 | } |
6d0060f6 MM |
803 | |
804 | copied += ret; | |
fb529e62 | 805 | |
72511aab FW |
806 | tx_ok = msg_data_left(msg); |
807 | if (!tx_ok) | |
808 | break; | |
809 | ||
149f7c71 | 810 | if (!sk_stream_memory_free(ssk) || |
17091708 | 811 | !mptcp_page_frag_refill(ssk, pfrag) || |
149f7c71 | 812 | !mptcp_ext_cache_refill(msk)) { |
72511aab FW |
813 | set_bit(SOCK_NOSPACE, &sk->sk_socket->flags); |
814 | tcp_push(ssk, msg->msg_flags, mss_now, | |
815 | tcp_sk(ssk)->nonagle, size_goal); | |
816 | mptcp_set_timeout(sk, ssk); | |
817 | release_sock(ssk); | |
818 | goto restart; | |
819 | } | |
820 | ||
fb529e62 FW |
821 | /* memory is charged to mptcp level socket as well, i.e. |
822 | * if msg is very large, mptcp socket may run out of buffer | |
823 | * space. mptcp_clean_una() will release data that has | |
824 | * been acked at mptcp level in the mean time, so there is | |
825 | * a good chance we can continue sending data right away. | |
72511aab FW |
826 | * |
827 | * Normally, when the tcp subflow can accept more data, then | |
828 | * so can the MPTCP socket. However, we need to cope with | |
829 | * peers that might lag behind in their MPTCP-level | |
830 | * acknowledgements, i.e. data might have been acked at | |
831 | * tcp level only. So, we must also check the MPTCP socket | |
832 | * limits before we send more data. | |
fb529e62 FW |
833 | */ |
834 | if (unlikely(!sk_stream_memory_free(sk))) { | |
835 | tcp_push(ssk, msg->msg_flags, mss_now, | |
836 | tcp_sk(ssk)->nonagle, size_goal); | |
837 | mptcp_clean_una(sk); | |
838 | if (!sk_stream_memory_free(sk)) { | |
839 | /* can't send more for now, need to wait for | |
840 | * MPTCP-level ACKs from peer. | |
841 | * | |
842 | * Wakeup will happen via mptcp_clean_una(). | |
843 | */ | |
844 | mptcp_set_timeout(sk, ssk); | |
845 | release_sock(ssk); | |
846 | goto wait_for_sndbuf; | |
847 | } | |
848 | } | |
6d0060f6 MM |
849 | } |
850 | ||
b51f9b80 | 851 | mptcp_set_timeout(sk, ssk); |
57040755 | 852 | if (copied) { |
6d0060f6 | 853 | ret = copied; |
57040755 PA |
854 | tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, |
855 | size_goal); | |
b51f9b80 PA |
856 | |
857 | /* start the timer, if it's not pending */ | |
858 | if (!mptcp_timer_pending(sk)) | |
859 | mptcp_reset_timer(sk); | |
57040755 | 860 | } |
6d0060f6 | 861 | |
1891c4a0 | 862 | ssk_check_wmem(msk, ssk); |
6d0060f6 | 863 | release_sock(ssk); |
1954b860 | 864 | out: |
cec37a6e PK |
865 | release_sock(sk); |
866 | return ret; | |
f870fa0b MM |
867 | } |
868 | ||
7a6a6cbc PA |
869 | static void mptcp_wait_data(struct sock *sk, long *timeo) |
870 | { | |
871 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | |
872 | struct mptcp_sock *msk = mptcp_sk(sk); | |
873 | ||
874 | add_wait_queue(sk_sleep(sk), &wait); | |
875 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
876 | ||
877 | sk_wait_event(sk, timeo, | |
878 | test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait); | |
879 | ||
880 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
881 | remove_wait_queue(sk_sleep(sk), &wait); | |
882 | } | |
883 | ||
6771bfd9 FW |
884 | static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, |
885 | struct msghdr *msg, | |
886 | size_t len) | |
887 | { | |
888 | struct sock *sk = (struct sock *)msk; | |
889 | struct sk_buff *skb; | |
890 | int copied = 0; | |
891 | ||
892 | while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { | |
893 | u32 offset = MPTCP_SKB_CB(skb)->offset; | |
894 | u32 data_len = skb->len - offset; | |
895 | u32 count = min_t(size_t, len - copied, data_len); | |
896 | int err; | |
897 | ||
898 | err = skb_copy_datagram_msg(skb, offset, msg, count); | |
899 | if (unlikely(err < 0)) { | |
900 | if (!copied) | |
901 | return err; | |
902 | break; | |
903 | } | |
904 | ||
905 | copied += count; | |
906 | ||
907 | if (count < data_len) { | |
908 | MPTCP_SKB_CB(skb)->offset += count; | |
909 | break; | |
910 | } | |
911 | ||
912 | __skb_unlink(skb, &sk->sk_receive_queue); | |
913 | __kfree_skb(skb); | |
914 | ||
915 | if (copied >= len) | |
916 | break; | |
917 | } | |
918 | ||
919 | return copied; | |
920 | } | |
921 | ||
a6b118fe FW |
922 | /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. |
923 | * | |
924 | * Only difference: Use highest rtt estimate of the subflows in use. | |
925 | */ | |
926 | static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) | |
927 | { | |
928 | struct mptcp_subflow_context *subflow; | |
929 | struct sock *sk = (struct sock *)msk; | |
930 | u32 time, advmss = 1; | |
931 | u64 rtt_us, mstamp; | |
932 | ||
933 | sock_owned_by_me(sk); | |
934 | ||
935 | if (copied <= 0) | |
936 | return; | |
937 | ||
938 | msk->rcvq_space.copied += copied; | |
939 | ||
940 | mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); | |
941 | time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); | |
942 | ||
943 | rtt_us = msk->rcvq_space.rtt_us; | |
944 | if (rtt_us && time < (rtt_us >> 3)) | |
945 | return; | |
946 | ||
947 | rtt_us = 0; | |
948 | mptcp_for_each_subflow(msk, subflow) { | |
949 | const struct tcp_sock *tp; | |
950 | u64 sf_rtt_us; | |
951 | u32 sf_advmss; | |
952 | ||
953 | tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); | |
954 | ||
955 | sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); | |
956 | sf_advmss = READ_ONCE(tp->advmss); | |
957 | ||
958 | rtt_us = max(sf_rtt_us, rtt_us); | |
959 | advmss = max(sf_advmss, advmss); | |
960 | } | |
961 | ||
962 | msk->rcvq_space.rtt_us = rtt_us; | |
963 | if (time < (rtt_us >> 3) || rtt_us == 0) | |
964 | return; | |
965 | ||
966 | if (msk->rcvq_space.copied <= msk->rcvq_space.space) | |
967 | goto new_measure; | |
968 | ||
969 | if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && | |
970 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { | |
971 | int rcvmem, rcvbuf; | |
972 | u64 rcvwin, grow; | |
973 | ||
974 | rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; | |
975 | ||
976 | grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); | |
977 | ||
978 | do_div(grow, msk->rcvq_space.space); | |
979 | rcvwin += (grow << 1); | |
980 | ||
981 | rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER); | |
982 | while (tcp_win_from_space(sk, rcvmem) < advmss) | |
983 | rcvmem += 128; | |
984 | ||
985 | do_div(rcvwin, advmss); | |
986 | rcvbuf = min_t(u64, rcvwin * rcvmem, | |
987 | sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); | |
988 | ||
989 | if (rcvbuf > sk->sk_rcvbuf) { | |
990 | u32 window_clamp; | |
991 | ||
992 | window_clamp = tcp_win_from_space(sk, rcvbuf); | |
993 | WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); | |
994 | ||
995 | /* Make subflows follow along. If we do not do this, we | |
996 | * get drops at subflow level if skbs can't be moved to | |
997 | * the mptcp rx queue fast enough (announced rcv_win can | |
998 | * exceed ssk->sk_rcvbuf). | |
999 | */ | |
1000 | mptcp_for_each_subflow(msk, subflow) { | |
1001 | struct sock *ssk; | |
1002 | ||
1003 | ssk = mptcp_subflow_tcp_sock(subflow); | |
1004 | WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); | |
1005 | tcp_sk(ssk)->window_clamp = window_clamp; | |
1006 | } | |
1007 | } | |
1008 | } | |
1009 | ||
1010 | msk->rcvq_space.space = msk->rcvq_space.copied; | |
1011 | new_measure: | |
1012 | msk->rcvq_space.copied = 0; | |
1013 | msk->rcvq_space.time = mstamp; | |
1014 | } | |
1015 | ||
6771bfd9 FW |
1016 | static bool __mptcp_move_skbs(struct mptcp_sock *msk) |
1017 | { | |
1018 | unsigned int moved = 0; | |
1019 | bool done; | |
1020 | ||
1021 | do { | |
1022 | struct sock *ssk = mptcp_subflow_recv_lookup(msk); | |
1023 | ||
1024 | if (!ssk) | |
1025 | break; | |
1026 | ||
1027 | lock_sock(ssk); | |
1028 | done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); | |
1029 | release_sock(ssk); | |
1030 | } while (!done); | |
1031 | ||
1032 | return moved > 0; | |
1033 | } | |
1034 | ||
f870fa0b MM |
1035 | static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
1036 | int nonblock, int flags, int *addr_len) | |
1037 | { | |
1038 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 1039 | int copied = 0; |
7a6a6cbc PA |
1040 | int target; |
1041 | long timeo; | |
f870fa0b MM |
1042 | |
1043 | if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT)) | |
1044 | return -EOPNOTSUPP; | |
1045 | ||
cec37a6e | 1046 | lock_sock(sk); |
7a6a6cbc PA |
1047 | timeo = sock_rcvtimeo(sk, nonblock); |
1048 | ||
1049 | len = min_t(size_t, len, INT_MAX); | |
1050 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
ec3edaa7 | 1051 | __mptcp_flush_join_list(msk); |
7a6a6cbc | 1052 | |
6771bfd9 | 1053 | while (len > (size_t)copied) { |
7a6a6cbc PA |
1054 | int bytes_read; |
1055 | ||
6771bfd9 FW |
1056 | bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied); |
1057 | if (unlikely(bytes_read < 0)) { | |
1058 | if (!copied) | |
1059 | copied = bytes_read; | |
1060 | goto out_err; | |
1061 | } | |
7a6a6cbc | 1062 | |
6771bfd9 | 1063 | copied += bytes_read; |
7a6a6cbc | 1064 | |
6771bfd9 FW |
1065 | if (skb_queue_empty(&sk->sk_receive_queue) && |
1066 | __mptcp_move_skbs(msk)) | |
1067 | continue; | |
7a6a6cbc PA |
1068 | |
1069 | /* only the master socket status is relevant here. The exit | |
1070 | * conditions mirror closely tcp_recvmsg() | |
1071 | */ | |
1072 | if (copied >= target) | |
1073 | break; | |
1074 | ||
1075 | if (copied) { | |
1076 | if (sk->sk_err || | |
1077 | sk->sk_state == TCP_CLOSE || | |
1078 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1079 | !timeo || | |
1080 | signal_pending(current)) | |
1081 | break; | |
1082 | } else { | |
1083 | if (sk->sk_err) { | |
1084 | copied = sock_error(sk); | |
1085 | break; | |
1086 | } | |
1087 | ||
5969856a PA |
1088 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
1089 | mptcp_check_for_eof(msk); | |
1090 | ||
7a6a6cbc PA |
1091 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1092 | break; | |
1093 | ||
1094 | if (sk->sk_state == TCP_CLOSE) { | |
1095 | copied = -ENOTCONN; | |
1096 | break; | |
1097 | } | |
1098 | ||
1099 | if (!timeo) { | |
1100 | copied = -EAGAIN; | |
1101 | break; | |
1102 | } | |
1103 | ||
1104 | if (signal_pending(current)) { | |
1105 | copied = sock_intr_errno(timeo); | |
1106 | break; | |
1107 | } | |
1108 | } | |
1109 | ||
1110 | pr_debug("block timeout %ld", timeo); | |
7a6a6cbc | 1111 | mptcp_wait_data(sk, &timeo); |
cec37a6e PK |
1112 | } |
1113 | ||
6771bfd9 FW |
1114 | if (skb_queue_empty(&sk->sk_receive_queue)) { |
1115 | /* entire backlog drained, clear DATA_READY. */ | |
7a6a6cbc | 1116 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cec37a6e | 1117 | |
6771bfd9 FW |
1118 | /* .. race-breaker: ssk might have gotten new data |
1119 | * after last __mptcp_move_skbs() returned false. | |
7a6a6cbc | 1120 | */ |
6771bfd9 | 1121 | if (unlikely(__mptcp_move_skbs(msk))) |
7a6a6cbc | 1122 | set_bit(MPTCP_DATA_READY, &msk->flags); |
6771bfd9 FW |
1123 | } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) { |
1124 | /* data to read but mptcp_wait_data() cleared DATA_READY */ | |
1125 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
7a6a6cbc | 1126 | } |
6771bfd9 | 1127 | out_err: |
a6b118fe FW |
1128 | mptcp_rcv_space_adjust(msk, copied); |
1129 | ||
7a6a6cbc | 1130 | release_sock(sk); |
cec37a6e PK |
1131 | return copied; |
1132 | } | |
1133 | ||
b51f9b80 PA |
1134 | static void mptcp_retransmit_handler(struct sock *sk) |
1135 | { | |
1136 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1137 | ||
3b1d6210 | 1138 | if (atomic64_read(&msk->snd_una) == msk->write_seq) { |
b51f9b80 | 1139 | mptcp_stop_timer(sk); |
3b1d6210 PA |
1140 | } else { |
1141 | set_bit(MPTCP_WORK_RTX, &msk->flags); | |
1142 | if (schedule_work(&msk->work)) | |
1143 | sock_hold(sk); | |
1144 | } | |
b51f9b80 PA |
1145 | } |
1146 | ||
1147 | static void mptcp_retransmit_timer(struct timer_list *t) | |
1148 | { | |
1149 | struct inet_connection_sock *icsk = from_timer(icsk, t, | |
1150 | icsk_retransmit_timer); | |
1151 | struct sock *sk = &icsk->icsk_inet.sk; | |
1152 | ||
1153 | bh_lock_sock(sk); | |
1154 | if (!sock_owned_by_user(sk)) { | |
1155 | mptcp_retransmit_handler(sk); | |
1156 | } else { | |
1157 | /* delegate our work to tcp_release_cb() */ | |
1158 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, | |
1159 | &sk->sk_tsq_flags)) | |
1160 | sock_hold(sk); | |
1161 | } | |
1162 | bh_unlock_sock(sk); | |
1163 | sock_put(sk); | |
1164 | } | |
1165 | ||
3b1d6210 PA |
1166 | /* Find an idle subflow. Return NULL if there is unacked data at tcp |
1167 | * level. | |
1168 | * | |
1169 | * A backup subflow is returned only if that is the only kind available. | |
1170 | */ | |
1171 | static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk) | |
1172 | { | |
1173 | struct mptcp_subflow_context *subflow; | |
1174 | struct sock *backup = NULL; | |
1175 | ||
1176 | sock_owned_by_me((const struct sock *)msk); | |
1177 | ||
1178 | mptcp_for_each_subflow(msk, subflow) { | |
1179 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
1180 | ||
1181 | /* still data outstanding at TCP level? Don't retransmit. */ | |
1182 | if (!tcp_write_queue_empty(ssk)) | |
1183 | return NULL; | |
1184 | ||
1185 | if (subflow->backup) { | |
1186 | if (!backup) | |
1187 | backup = ssk; | |
1188 | continue; | |
1189 | } | |
1190 | ||
1191 | return ssk; | |
1192 | } | |
1193 | ||
1194 | return backup; | |
1195 | } | |
1196 | ||
cec37a6e PK |
1197 | /* subflow sockets can be either outgoing (connect) or incoming |
1198 | * (accept). | |
1199 | * | |
1200 | * Outgoing subflows use in-kernel sockets. | |
1201 | * Incoming subflows do not have their own 'struct socket' allocated, | |
1202 | * so we need to use tcp_close() after detaching them from the mptcp | |
1203 | * parent socket. | |
1204 | */ | |
1205 | static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, | |
1206 | struct mptcp_subflow_context *subflow, | |
1207 | long timeout) | |
1208 | { | |
1209 | struct socket *sock = READ_ONCE(ssk->sk_socket); | |
1210 | ||
1211 | list_del(&subflow->node); | |
1212 | ||
1213 | if (sock && sock != sk->sk_socket) { | |
1214 | /* outgoing subflow */ | |
1215 | sock_release(sock); | |
1216 | } else { | |
1217 | /* incoming subflow */ | |
1218 | tcp_close(ssk, timeout); | |
1219 | } | |
f870fa0b MM |
1220 | } |
1221 | ||
dc24f8b4 PA |
1222 | static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) |
1223 | { | |
1224 | return 0; | |
1225 | } | |
1226 | ||
b416268b FW |
1227 | static void pm_work(struct mptcp_sock *msk) |
1228 | { | |
1229 | struct mptcp_pm_data *pm = &msk->pm; | |
1230 | ||
1231 | spin_lock_bh(&msk->pm.lock); | |
1232 | ||
1233 | pr_debug("msk=%p status=%x", msk, pm->status); | |
1234 | if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
1235 | pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); | |
1236 | mptcp_pm_nl_add_addr_received(msk); | |
1237 | } | |
1238 | if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { | |
1239 | pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); | |
1240 | mptcp_pm_nl_fully_established(msk); | |
1241 | } | |
1242 | if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { | |
1243 | pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); | |
1244 | mptcp_pm_nl_subflow_established(msk); | |
1245 | } | |
1246 | ||
1247 | spin_unlock_bh(&msk->pm.lock); | |
1248 | } | |
1249 | ||
80992017 PA |
1250 | static void mptcp_worker(struct work_struct *work) |
1251 | { | |
1252 | struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); | |
3b1d6210 | 1253 | struct sock *ssk, *sk = &msk->sk.icsk_inet.sk; |
149f7c71 | 1254 | int orig_len, orig_offset, mss_now = 0, size_goal = 0; |
3b1d6210 PA |
1255 | struct mptcp_data_frag *dfrag; |
1256 | u64 orig_write_seq; | |
1257 | size_t copied = 0; | |
1258 | struct msghdr msg; | |
1259 | long timeo = 0; | |
80992017 PA |
1260 | |
1261 | lock_sock(sk); | |
3b1d6210 | 1262 | mptcp_clean_una(sk); |
ec3edaa7 | 1263 | __mptcp_flush_join_list(msk); |
6771bfd9 | 1264 | __mptcp_move_skbs(msk); |
3b1d6210 | 1265 | |
b416268b FW |
1266 | if (msk->pm.status) |
1267 | pm_work(msk); | |
1268 | ||
59832e24 FW |
1269 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
1270 | mptcp_check_for_eof(msk); | |
1271 | ||
3b1d6210 PA |
1272 | if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) |
1273 | goto unlock; | |
1274 | ||
1275 | dfrag = mptcp_rtx_head(sk); | |
1276 | if (!dfrag) | |
1277 | goto unlock; | |
1278 | ||
149f7c71 FW |
1279 | if (!mptcp_ext_cache_refill(msk)) |
1280 | goto reset_unlock; | |
1281 | ||
3b1d6210 PA |
1282 | ssk = mptcp_subflow_get_retrans(msk); |
1283 | if (!ssk) | |
1284 | goto reset_unlock; | |
1285 | ||
1286 | lock_sock(ssk); | |
1287 | ||
1288 | msg.msg_flags = MSG_DONTWAIT; | |
1289 | orig_len = dfrag->data_len; | |
1290 | orig_offset = dfrag->offset; | |
1291 | orig_write_seq = dfrag->data_seq; | |
1292 | while (dfrag->data_len > 0) { | |
149f7c71 FW |
1293 | int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo, |
1294 | &mss_now, &size_goal); | |
3b1d6210 PA |
1295 | if (ret < 0) |
1296 | break; | |
1297 | ||
fc518953 | 1298 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); |
3b1d6210 PA |
1299 | copied += ret; |
1300 | dfrag->data_len -= ret; | |
1301 | dfrag->offset += ret; | |
149f7c71 FW |
1302 | |
1303 | if (!mptcp_ext_cache_refill(msk)) | |
1304 | break; | |
3b1d6210 PA |
1305 | } |
1306 | if (copied) | |
1307 | tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle, | |
1308 | size_goal); | |
1309 | ||
1310 | dfrag->data_seq = orig_write_seq; | |
1311 | dfrag->offset = orig_offset; | |
1312 | dfrag->data_len = orig_len; | |
1313 | ||
1314 | mptcp_set_timeout(sk, ssk); | |
1315 | release_sock(ssk); | |
1316 | ||
1317 | reset_unlock: | |
1318 | if (!mptcp_timer_pending(sk)) | |
1319 | mptcp_reset_timer(sk); | |
1320 | ||
1321 | unlock: | |
80992017 PA |
1322 | release_sock(sk); |
1323 | sock_put(sk); | |
1324 | } | |
1325 | ||
784325e9 | 1326 | static int __mptcp_init_sock(struct sock *sk) |
f870fa0b | 1327 | { |
cec37a6e PK |
1328 | struct mptcp_sock *msk = mptcp_sk(sk); |
1329 | ||
ec3edaa7 PK |
1330 | spin_lock_init(&msk->join_list_lock); |
1331 | ||
cec37a6e | 1332 | INIT_LIST_HEAD(&msk->conn_list); |
ec3edaa7 | 1333 | INIT_LIST_HEAD(&msk->join_list); |
18b683bf | 1334 | INIT_LIST_HEAD(&msk->rtx_queue); |
1891c4a0 | 1335 | __set_bit(MPTCP_SEND_SPACE, &msk->flags); |
80992017 | 1336 | INIT_WORK(&msk->work, mptcp_worker); |
cec37a6e | 1337 | |
8ab183de | 1338 | msk->first = NULL; |
dc24f8b4 | 1339 | inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; |
8ab183de | 1340 | |
1b1c7a0e PK |
1341 | mptcp_pm_data_init(msk); |
1342 | ||
b51f9b80 PA |
1343 | /* re-use the csk retrans timer for MPTCP-level retrans */ |
1344 | timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); | |
1345 | ||
f870fa0b MM |
1346 | return 0; |
1347 | } | |
1348 | ||
784325e9 MB |
1349 | static int mptcp_init_sock(struct sock *sk) |
1350 | { | |
fc518953 FW |
1351 | struct net *net = sock_net(sk); |
1352 | int ret; | |
18b683bf | 1353 | |
fc518953 FW |
1354 | if (!mptcp_is_enabled(net)) |
1355 | return -ENOPROTOOPT; | |
1356 | ||
1357 | if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) | |
1358 | return -ENOMEM; | |
1359 | ||
1360 | ret = __mptcp_init_sock(sk); | |
18b683bf PA |
1361 | if (ret) |
1362 | return ret; | |
1363 | ||
fa68018d PA |
1364 | ret = __mptcp_socket_create(mptcp_sk(sk)); |
1365 | if (ret) | |
1366 | return ret; | |
1367 | ||
d027236c | 1368 | sk_sockets_allocated_inc(sk); |
a6b118fe | 1369 | sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; |
3f8e0aae | 1370 | sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[2]; |
d027236c | 1371 | |
18b683bf PA |
1372 | return 0; |
1373 | } | |
1374 | ||
1375 | static void __mptcp_clear_xmit(struct sock *sk) | |
1376 | { | |
1377 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1378 | struct mptcp_data_frag *dtmp, *dfrag; | |
1379 | ||
b51f9b80 PA |
1380 | sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer); |
1381 | ||
18b683bf | 1382 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) |
d027236c | 1383 | dfrag_clear(sk, dfrag); |
784325e9 MB |
1384 | } |
1385 | ||
80992017 PA |
1386 | static void mptcp_cancel_work(struct sock *sk) |
1387 | { | |
1388 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1389 | ||
1390 | if (cancel_work_sync(&msk->work)) | |
1391 | sock_put(sk); | |
1392 | } | |
1393 | ||
76c42a29 MM |
1394 | static void mptcp_subflow_shutdown(struct sock *ssk, int how, |
1395 | bool data_fin_tx_enable, u64 data_fin_tx_seq) | |
21498490 PK |
1396 | { |
1397 | lock_sock(ssk); | |
1398 | ||
1399 | switch (ssk->sk_state) { | |
1400 | case TCP_LISTEN: | |
1401 | if (!(how & RCV_SHUTDOWN)) | |
1402 | break; | |
1403 | /* fall through */ | |
1404 | case TCP_SYN_SENT: | |
1405 | tcp_disconnect(ssk, O_NONBLOCK); | |
1406 | break; | |
1407 | default: | |
76c42a29 MM |
1408 | if (data_fin_tx_enable) { |
1409 | struct mptcp_subflow_context *subflow; | |
1410 | ||
1411 | subflow = mptcp_subflow_ctx(ssk); | |
1412 | subflow->data_fin_tx_seq = data_fin_tx_seq; | |
1413 | subflow->data_fin_tx_enable = 1; | |
1414 | } | |
1415 | ||
21498490 PK |
1416 | ssk->sk_shutdown |= how; |
1417 | tcp_shutdown(ssk, how); | |
1418 | break; | |
1419 | } | |
1420 | ||
21498490 PK |
1421 | release_sock(ssk); |
1422 | } | |
1423 | ||
2c22c06c | 1424 | static void mptcp_close(struct sock *sk, long timeout) |
f870fa0b | 1425 | { |
cec37a6e | 1426 | struct mptcp_subflow_context *subflow, *tmp; |
f870fa0b | 1427 | struct mptcp_sock *msk = mptcp_sk(sk); |
b2c5b614 | 1428 | LIST_HEAD(conn_list); |
76c42a29 | 1429 | u64 data_fin_tx_seq; |
f870fa0b | 1430 | |
2c22c06c FW |
1431 | lock_sock(sk); |
1432 | ||
f870fa0b MM |
1433 | inet_sk_state_store(sk, TCP_CLOSE); |
1434 | ||
10f6d46c PA |
1435 | /* be sure to always acquire the join list lock, to sync vs |
1436 | * mptcp_finish_join(). | |
1437 | */ | |
1438 | spin_lock_bh(&msk->join_list_lock); | |
1439 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
1440 | spin_unlock_bh(&msk->join_list_lock); | |
b2c5b614 FW |
1441 | list_splice_init(&msk->conn_list, &conn_list); |
1442 | ||
76c42a29 MM |
1443 | data_fin_tx_seq = msk->write_seq; |
1444 | ||
18b683bf PA |
1445 | __mptcp_clear_xmit(sk); |
1446 | ||
b2c5b614 FW |
1447 | release_sock(sk); |
1448 | ||
1449 | list_for_each_entry_safe(subflow, tmp, &conn_list, node) { | |
cec37a6e PK |
1450 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
1451 | ||
76c42a29 MM |
1452 | subflow->data_fin_tx_seq = data_fin_tx_seq; |
1453 | subflow->data_fin_tx_enable = 1; | |
cec37a6e | 1454 | __mptcp_close_ssk(sk, ssk, subflow, timeout); |
f870fa0b MM |
1455 | } |
1456 | ||
80992017 PA |
1457 | mptcp_cancel_work(sk); |
1458 | ||
6771bfd9 FW |
1459 | __skb_queue_purge(&sk->sk_receive_queue); |
1460 | ||
cec37a6e | 1461 | sk_common_release(sk); |
f870fa0b MM |
1462 | } |
1463 | ||
cf7da0d6 PK |
1464 | static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) |
1465 | { | |
1466 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1467 | const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); | |
1468 | struct ipv6_pinfo *msk6 = inet6_sk(msk); | |
1469 | ||
1470 | msk->sk_v6_daddr = ssk->sk_v6_daddr; | |
1471 | msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; | |
1472 | ||
1473 | if (msk6 && ssk6) { | |
1474 | msk6->saddr = ssk6->saddr; | |
1475 | msk6->flow_label = ssk6->flow_label; | |
1476 | } | |
1477 | #endif | |
1478 | ||
1479 | inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; | |
1480 | inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; | |
1481 | inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; | |
1482 | inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; | |
1483 | inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; | |
1484 | inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; | |
1485 | } | |
1486 | ||
18b683bf PA |
1487 | static int mptcp_disconnect(struct sock *sk, int flags) |
1488 | { | |
42c556fe FW |
1489 | /* Should never be called. |
1490 | * inet_stream_connect() calls ->disconnect, but that | |
1491 | * refers to the subflow socket, not the mptcp one. | |
1492 | */ | |
1493 | WARN_ON_ONCE(1); | |
1494 | return 0; | |
18b683bf PA |
1495 | } |
1496 | ||
b0519de8 FW |
1497 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1498 | static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) | |
1499 | { | |
1500 | unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); | |
1501 | ||
1502 | return (struct ipv6_pinfo *)(((u8 *)sk) + offset); | |
1503 | } | |
1504 | #endif | |
1505 | ||
fca5c82c | 1506 | struct sock *mptcp_sk_clone(const struct sock *sk, |
cfde141e | 1507 | const struct mptcp_options_received *mp_opt, |
fca5c82c | 1508 | struct request_sock *req) |
b0519de8 | 1509 | { |
58b09919 | 1510 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); |
b0519de8 | 1511 | struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); |
58b09919 PA |
1512 | struct mptcp_sock *msk; |
1513 | u64 ack_seq; | |
b0519de8 FW |
1514 | |
1515 | if (!nsk) | |
1516 | return NULL; | |
1517 | ||
1518 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1519 | if (nsk->sk_family == AF_INET6) | |
1520 | inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); | |
1521 | #endif | |
1522 | ||
58b09919 PA |
1523 | __mptcp_init_sock(nsk); |
1524 | ||
1525 | msk = mptcp_sk(nsk); | |
1526 | msk->local_key = subflow_req->local_key; | |
1527 | msk->token = subflow_req->token; | |
1528 | msk->subflow = NULL; | |
b93df08c | 1529 | WRITE_ONCE(msk->fully_established, false); |
58b09919 | 1530 | |
58b09919 | 1531 | msk->write_seq = subflow_req->idsn + 1; |
cc9d2566 | 1532 | atomic64_set(&msk->snd_una, msk->write_seq); |
cfde141e | 1533 | if (mp_opt->mp_capable) { |
58b09919 | 1534 | msk->can_ack = true; |
cfde141e | 1535 | msk->remote_key = mp_opt->sndr_key; |
58b09919 PA |
1536 | mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); |
1537 | ack_seq++; | |
1538 | msk->ack_seq = ack_seq; | |
1539 | } | |
7f20d5fc | 1540 | |
5e20087d | 1541 | sock_reset_flag(nsk, SOCK_RCU_FREE); |
7f20d5fc PA |
1542 | /* will be fully established after successful MPC subflow creation */ |
1543 | inet_sk_state_store(nsk, TCP_SYN_RECV); | |
58b09919 PA |
1544 | bh_unlock_sock(nsk); |
1545 | ||
1546 | /* keep a single reference */ | |
1547 | __sock_put(nsk); | |
b0519de8 FW |
1548 | return nsk; |
1549 | } | |
1550 | ||
a6b118fe FW |
1551 | void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) |
1552 | { | |
1553 | const struct tcp_sock *tp = tcp_sk(ssk); | |
1554 | ||
1555 | msk->rcvq_space.copied = 0; | |
1556 | msk->rcvq_space.rtt_us = 0; | |
1557 | ||
1558 | msk->rcvq_space.time = tp->tcp_mstamp; | |
1559 | ||
1560 | /* initial rcv_space offering made to peer */ | |
1561 | msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, | |
1562 | TCP_INIT_CWND * tp->advmss); | |
1563 | if (msk->rcvq_space.space == 0) | |
1564 | msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; | |
1565 | } | |
1566 | ||
cf7da0d6 PK |
1567 | static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, |
1568 | bool kern) | |
1569 | { | |
1570 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1571 | struct socket *listener; | |
1572 | struct sock *newsk; | |
1573 | ||
1574 | listener = __mptcp_nmpc_socket(msk); | |
1575 | if (WARN_ON_ONCE(!listener)) { | |
1576 | *err = -EINVAL; | |
1577 | return NULL; | |
1578 | } | |
1579 | ||
1580 | pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); | |
1581 | newsk = inet_csk_accept(listener->sk, flags, err, kern); | |
1582 | if (!newsk) | |
1583 | return NULL; | |
1584 | ||
1585 | pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); | |
cf7da0d6 PK |
1586 | if (sk_is_mptcp(newsk)) { |
1587 | struct mptcp_subflow_context *subflow; | |
1588 | struct sock *new_mptcp_sock; | |
1589 | struct sock *ssk = newsk; | |
1590 | ||
1591 | subflow = mptcp_subflow_ctx(newsk); | |
58b09919 | 1592 | new_mptcp_sock = subflow->conn; |
cf7da0d6 | 1593 | |
58b09919 PA |
1594 | /* is_mptcp should be false if subflow->conn is missing, see |
1595 | * subflow_syn_recv_sock() | |
1596 | */ | |
1597 | if (WARN_ON_ONCE(!new_mptcp_sock)) { | |
1598 | tcp_sk(newsk)->is_mptcp = 0; | |
1599 | return newsk; | |
cf7da0d6 PK |
1600 | } |
1601 | ||
58b09919 PA |
1602 | /* acquire the 2nd reference for the owning socket */ |
1603 | sock_hold(new_mptcp_sock); | |
cf7da0d6 | 1604 | |
58b09919 PA |
1605 | local_bh_disable(); |
1606 | bh_lock_sock(new_mptcp_sock); | |
cf7da0d6 | 1607 | msk = mptcp_sk(new_mptcp_sock); |
8ab183de | 1608 | msk->first = newsk; |
cf7da0d6 PK |
1609 | |
1610 | newsk = new_mptcp_sock; | |
1611 | mptcp_copy_inaddrs(newsk, ssk); | |
1612 | list_add(&subflow->node, &msk->conn_list); | |
1613 | ||
a6b118fe | 1614 | mptcp_rcv_space_init(msk, ssk); |
cf7da0d6 | 1615 | bh_unlock_sock(new_mptcp_sock); |
fc518953 FW |
1616 | |
1617 | __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); | |
cf7da0d6 | 1618 | local_bh_enable(); |
fc518953 FW |
1619 | } else { |
1620 | MPTCP_INC_STATS(sock_net(sk), | |
1621 | MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); | |
cf7da0d6 PK |
1622 | } |
1623 | ||
1624 | return newsk; | |
1625 | } | |
1626 | ||
79c0949e PK |
1627 | static void mptcp_destroy(struct sock *sk) |
1628 | { | |
c9fd9c5f FW |
1629 | struct mptcp_sock *msk = mptcp_sk(sk); |
1630 | ||
2c5ebd00 | 1631 | mptcp_token_destroy(msk); |
c9fd9c5f FW |
1632 | if (msk->cached_ext) |
1633 | __skb_ext_put(msk->cached_ext); | |
d027236c PA |
1634 | |
1635 | sk_sockets_allocated_dec(sk); | |
79c0949e PK |
1636 | } |
1637 | ||
fd1452d8 | 1638 | static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, |
a7b75c5a | 1639 | sockptr_t optval, unsigned int optlen) |
fd1452d8 FW |
1640 | { |
1641 | struct sock *sk = (struct sock *)msk; | |
1642 | struct socket *ssock; | |
1643 | int ret; | |
1644 | ||
1645 | switch (optname) { | |
1646 | case SO_REUSEPORT: | |
1647 | case SO_REUSEADDR: | |
1648 | lock_sock(sk); | |
1649 | ssock = __mptcp_nmpc_socket(msk); | |
1650 | if (!ssock) { | |
1651 | release_sock(sk); | |
1652 | return -EINVAL; | |
1653 | } | |
1654 | ||
a7b75c5a | 1655 | ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
1656 | if (ret == 0) { |
1657 | if (optname == SO_REUSEPORT) | |
1658 | sk->sk_reuseport = ssock->sk->sk_reuseport; | |
1659 | else if (optname == SO_REUSEADDR) | |
1660 | sk->sk_reuse = ssock->sk->sk_reuse; | |
1661 | } | |
1662 | release_sock(sk); | |
1663 | return ret; | |
1664 | } | |
1665 | ||
a7b75c5a | 1666 | return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
1667 | } |
1668 | ||
c9b95a13 | 1669 | static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, |
a7b75c5a | 1670 | sockptr_t optval, unsigned int optlen) |
c9b95a13 FW |
1671 | { |
1672 | struct sock *sk = (struct sock *)msk; | |
1673 | int ret = -EOPNOTSUPP; | |
1674 | struct socket *ssock; | |
1675 | ||
1676 | switch (optname) { | |
1677 | case IPV6_V6ONLY: | |
1678 | lock_sock(sk); | |
1679 | ssock = __mptcp_nmpc_socket(msk); | |
1680 | if (!ssock) { | |
1681 | release_sock(sk); | |
1682 | return -EINVAL; | |
1683 | } | |
1684 | ||
1685 | ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen); | |
1686 | if (ret == 0) | |
1687 | sk->sk_ipv6only = ssock->sk->sk_ipv6only; | |
1688 | ||
1689 | release_sock(sk); | |
1690 | break; | |
1691 | } | |
1692 | ||
1693 | return ret; | |
1694 | } | |
1695 | ||
717e79c8 | 1696 | static int mptcp_setsockopt(struct sock *sk, int level, int optname, |
a7b75c5a | 1697 | sockptr_t optval, unsigned int optlen) |
717e79c8 PK |
1698 | { |
1699 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 1700 | struct sock *ssk; |
717e79c8 PK |
1701 | |
1702 | pr_debug("msk=%p", msk); | |
1703 | ||
83f0c10b | 1704 | if (level == SOL_SOCKET) |
fd1452d8 | 1705 | return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); |
83f0c10b | 1706 | |
717e79c8 | 1707 | /* @@ the meaning of setsockopt() when the socket is connected and |
b6e4a1ae MM |
1708 | * there are multiple subflows is not yet defined. It is up to the |
1709 | * MPTCP-level socket to configure the subflows until the subflow | |
1710 | * is in TCP fallback, when TCP socket options are passed through | |
1711 | * to the one remaining subflow. | |
717e79c8 PK |
1712 | */ |
1713 | lock_sock(sk); | |
76660afb | 1714 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 1715 | release_sock(sk); |
76660afb PA |
1716 | if (ssk) |
1717 | return tcp_setsockopt(ssk, level, optname, optval, optlen); | |
50e741bb | 1718 | |
c9b95a13 FW |
1719 | if (level == SOL_IPV6) |
1720 | return mptcp_setsockopt_v6(msk, optname, optval, optlen); | |
1721 | ||
b6e4a1ae | 1722 | return -EOPNOTSUPP; |
717e79c8 PK |
1723 | } |
1724 | ||
1725 | static int mptcp_getsockopt(struct sock *sk, int level, int optname, | |
50e741bb | 1726 | char __user *optval, int __user *option) |
717e79c8 PK |
1727 | { |
1728 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 1729 | struct sock *ssk; |
717e79c8 PK |
1730 | |
1731 | pr_debug("msk=%p", msk); | |
1732 | ||
b6e4a1ae MM |
1733 | /* @@ the meaning of setsockopt() when the socket is connected and |
1734 | * there are multiple subflows is not yet defined. It is up to the | |
1735 | * MPTCP-level socket to configure the subflows until the subflow | |
1736 | * is in TCP fallback, when socket options are passed through | |
1737 | * to the one remaining subflow. | |
717e79c8 PK |
1738 | */ |
1739 | lock_sock(sk); | |
76660afb | 1740 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 1741 | release_sock(sk); |
76660afb PA |
1742 | if (ssk) |
1743 | return tcp_getsockopt(ssk, level, optname, optval, option); | |
50e741bb | 1744 | |
b6e4a1ae | 1745 | return -EOPNOTSUPP; |
717e79c8 PK |
1746 | } |
1747 | ||
b51f9b80 PA |
1748 | #define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \ |
1749 | TCPF_WRITE_TIMER_DEFERRED) | |
14c441b5 PA |
1750 | |
1751 | /* this is very alike tcp_release_cb() but we must handle differently a | |
1752 | * different set of events | |
1753 | */ | |
1754 | static void mptcp_release_cb(struct sock *sk) | |
1755 | { | |
1756 | unsigned long flags, nflags; | |
1757 | ||
1758 | do { | |
1759 | flags = sk->sk_tsq_flags; | |
1760 | if (!(flags & MPTCP_DEFERRED_ALL)) | |
1761 | return; | |
1762 | nflags = flags & ~MPTCP_DEFERRED_ALL; | |
1763 | } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); | |
1764 | ||
b51f9b80 PA |
1765 | sock_release_ownership(sk); |
1766 | ||
14c441b5 PA |
1767 | if (flags & TCPF_DELACK_TIMER_DEFERRED) { |
1768 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1769 | struct sock *ssk; | |
1770 | ||
1771 | ssk = mptcp_subflow_recv_lookup(msk); | |
1772 | if (!ssk || !schedule_work(&msk->work)) | |
1773 | __sock_put(sk); | |
1774 | } | |
b51f9b80 PA |
1775 | |
1776 | if (flags & TCPF_WRITE_TIMER_DEFERRED) { | |
1777 | mptcp_retransmit_handler(sk); | |
1778 | __sock_put(sk); | |
1779 | } | |
14c441b5 PA |
1780 | } |
1781 | ||
2c5ebd00 PA |
1782 | static int mptcp_hash(struct sock *sk) |
1783 | { | |
1784 | /* should never be called, | |
1785 | * we hash the TCP subflows not the master socket | |
1786 | */ | |
1787 | WARN_ON_ONCE(1); | |
1788 | return 0; | |
1789 | } | |
1790 | ||
1791 | static void mptcp_unhash(struct sock *sk) | |
1792 | { | |
1793 | /* called from sk_common_release(), but nothing to do here */ | |
1794 | } | |
1795 | ||
cec37a6e | 1796 | static int mptcp_get_port(struct sock *sk, unsigned short snum) |
f870fa0b MM |
1797 | { |
1798 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 1799 | struct socket *ssock; |
f870fa0b | 1800 | |
cec37a6e PK |
1801 | ssock = __mptcp_nmpc_socket(msk); |
1802 | pr_debug("msk=%p, subflow=%p", msk, ssock); | |
1803 | if (WARN_ON_ONCE(!ssock)) | |
1804 | return -EINVAL; | |
f870fa0b | 1805 | |
cec37a6e PK |
1806 | return inet_csk_get_port(ssock->sk, snum); |
1807 | } | |
f870fa0b | 1808 | |
cec37a6e PK |
1809 | void mptcp_finish_connect(struct sock *ssk) |
1810 | { | |
1811 | struct mptcp_subflow_context *subflow; | |
1812 | struct mptcp_sock *msk; | |
1813 | struct sock *sk; | |
6d0060f6 | 1814 | u64 ack_seq; |
f870fa0b | 1815 | |
cec37a6e | 1816 | subflow = mptcp_subflow_ctx(ssk); |
cec37a6e PK |
1817 | sk = subflow->conn; |
1818 | msk = mptcp_sk(sk); | |
1819 | ||
648ef4b8 MM |
1820 | pr_debug("msk=%p, token=%u", sk, subflow->token); |
1821 | ||
6d0060f6 MM |
1822 | mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); |
1823 | ack_seq++; | |
648ef4b8 MM |
1824 | subflow->map_seq = ack_seq; |
1825 | subflow->map_subflow_seq = 1; | |
6d0060f6 | 1826 | |
cec37a6e PK |
1827 | /* the socket is not connected yet, no msk/subflow ops can access/race |
1828 | * accessing the field below | |
1829 | */ | |
1830 | WRITE_ONCE(msk->remote_key, subflow->remote_key); | |
1831 | WRITE_ONCE(msk->local_key, subflow->local_key); | |
6d0060f6 MM |
1832 | WRITE_ONCE(msk->write_seq, subflow->idsn + 1); |
1833 | WRITE_ONCE(msk->ack_seq, ack_seq); | |
d22f4988 | 1834 | WRITE_ONCE(msk->can_ack, 1); |
cc9d2566 | 1835 | atomic64_set(&msk->snd_una, msk->write_seq); |
1b1c7a0e PK |
1836 | |
1837 | mptcp_pm_new_connection(msk, 0); | |
a6b118fe FW |
1838 | |
1839 | mptcp_rcv_space_init(msk, ssk); | |
f870fa0b MM |
1840 | } |
1841 | ||
cf7da0d6 PK |
1842 | static void mptcp_sock_graft(struct sock *sk, struct socket *parent) |
1843 | { | |
1844 | write_lock_bh(&sk->sk_callback_lock); | |
1845 | rcu_assign_pointer(sk->sk_wq, &parent->wq); | |
1846 | sk_set_socket(sk, parent); | |
1847 | sk->sk_uid = SOCK_INODE(parent)->i_uid; | |
1848 | write_unlock_bh(&sk->sk_callback_lock); | |
1849 | } | |
1850 | ||
f296234c PK |
1851 | bool mptcp_finish_join(struct sock *sk) |
1852 | { | |
1853 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
1854 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
1855 | struct sock *parent = (void *)msk; | |
1856 | struct socket *parent_sock; | |
ec3edaa7 | 1857 | bool ret; |
f296234c PK |
1858 | |
1859 | pr_debug("msk=%p, subflow=%p", msk, subflow); | |
1860 | ||
1861 | /* mptcp socket already closing? */ | |
b93df08c | 1862 | if (!mptcp_is_fully_established(parent)) |
f296234c PK |
1863 | return false; |
1864 | ||
1865 | if (!msk->pm.server_side) | |
1866 | return true; | |
1867 | ||
10f6d46c PA |
1868 | if (!mptcp_pm_allow_new_subflow(msk)) |
1869 | return false; | |
1870 | ||
1871 | /* active connections are already on conn_list, and we can't acquire | |
1872 | * msk lock here. | |
1873 | * use the join list lock as synchronization point and double-check | |
1874 | * msk status to avoid racing with mptcp_close() | |
1875 | */ | |
1876 | spin_lock_bh(&msk->join_list_lock); | |
1877 | ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; | |
1878 | if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) | |
1879 | list_add_tail(&subflow->node, &msk->join_list); | |
1880 | spin_unlock_bh(&msk->join_list_lock); | |
1881 | if (!ret) | |
1882 | return false; | |
1883 | ||
1884 | /* attach to msk socket only after we are sure he will deal with us | |
1885 | * at close time | |
1886 | */ | |
f296234c PK |
1887 | parent_sock = READ_ONCE(parent->sk_socket); |
1888 | if (parent_sock && !sk->sk_socket) | |
1889 | mptcp_sock_graft(sk, parent_sock); | |
10f6d46c PA |
1890 | subflow->map_seq = msk->ack_seq; |
1891 | return true; | |
f296234c PK |
1892 | } |
1893 | ||
1891c4a0 FW |
1894 | static bool mptcp_memory_free(const struct sock *sk, int wake) |
1895 | { | |
1896 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1897 | ||
1898 | return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true; | |
1899 | } | |
1900 | ||
f870fa0b MM |
1901 | static struct proto mptcp_prot = { |
1902 | .name = "MPTCP", | |
1903 | .owner = THIS_MODULE, | |
1904 | .init = mptcp_init_sock, | |
18b683bf | 1905 | .disconnect = mptcp_disconnect, |
f870fa0b | 1906 | .close = mptcp_close, |
cf7da0d6 | 1907 | .accept = mptcp_accept, |
717e79c8 PK |
1908 | .setsockopt = mptcp_setsockopt, |
1909 | .getsockopt = mptcp_getsockopt, | |
f870fa0b | 1910 | .shutdown = tcp_shutdown, |
79c0949e | 1911 | .destroy = mptcp_destroy, |
f870fa0b MM |
1912 | .sendmsg = mptcp_sendmsg, |
1913 | .recvmsg = mptcp_recvmsg, | |
14c441b5 | 1914 | .release_cb = mptcp_release_cb, |
2c5ebd00 PA |
1915 | .hash = mptcp_hash, |
1916 | .unhash = mptcp_unhash, | |
cec37a6e | 1917 | .get_port = mptcp_get_port, |
d027236c PA |
1918 | .sockets_allocated = &mptcp_sockets_allocated, |
1919 | .memory_allocated = &tcp_memory_allocated, | |
1920 | .memory_pressure = &tcp_memory_pressure, | |
1891c4a0 | 1921 | .stream_memory_free = mptcp_memory_free, |
d027236c PA |
1922 | .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), |
1923 | .sysctl_mem = sysctl_tcp_mem, | |
f870fa0b | 1924 | .obj_size = sizeof(struct mptcp_sock), |
2c5ebd00 | 1925 | .slab_flags = SLAB_TYPESAFE_BY_RCU, |
f870fa0b MM |
1926 | .no_autobind = true, |
1927 | }; | |
1928 | ||
2303f994 PK |
1929 | static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
1930 | { | |
1931 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
1932 | struct socket *ssock; | |
cf7da0d6 | 1933 | int err; |
2303f994 PK |
1934 | |
1935 | lock_sock(sock->sk); | |
fa68018d PA |
1936 | ssock = __mptcp_nmpc_socket(msk); |
1937 | if (!ssock) { | |
1938 | err = -EINVAL; | |
2303f994 PK |
1939 | goto unlock; |
1940 | } | |
1941 | ||
1942 | err = ssock->ops->bind(ssock, uaddr, addr_len); | |
cf7da0d6 PK |
1943 | if (!err) |
1944 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2303f994 PK |
1945 | |
1946 | unlock: | |
1947 | release_sock(sock->sk); | |
1948 | return err; | |
1949 | } | |
1950 | ||
0235d075 PA |
1951 | static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, |
1952 | struct mptcp_subflow_context *subflow) | |
1953 | { | |
1954 | subflow->request_mptcp = 0; | |
1955 | __mptcp_do_fallback(msk); | |
1956 | } | |
1957 | ||
2303f994 PK |
1958 | static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
1959 | int addr_len, int flags) | |
1960 | { | |
1961 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2c5ebd00 | 1962 | struct mptcp_subflow_context *subflow; |
2303f994 PK |
1963 | struct socket *ssock; |
1964 | int err; | |
1965 | ||
1966 | lock_sock(sock->sk); | |
41be81a8 PA |
1967 | if (sock->state != SS_UNCONNECTED && msk->subflow) { |
1968 | /* pending connection or invalid state, let existing subflow | |
1969 | * cope with that | |
1970 | */ | |
1971 | ssock = msk->subflow; | |
1972 | goto do_connect; | |
1973 | } | |
1974 | ||
fa68018d PA |
1975 | ssock = __mptcp_nmpc_socket(msk); |
1976 | if (!ssock) { | |
1977 | err = -EINVAL; | |
2303f994 PK |
1978 | goto unlock; |
1979 | } | |
1980 | ||
fa68018d PA |
1981 | mptcp_token_destroy(msk); |
1982 | inet_sk_state_store(sock->sk, TCP_SYN_SENT); | |
2c5ebd00 | 1983 | subflow = mptcp_subflow_ctx(ssock->sk); |
cf7da0d6 PK |
1984 | #ifdef CONFIG_TCP_MD5SIG |
1985 | /* no MPTCP if MD5SIG is enabled on this socket or we may run out of | |
1986 | * TCP option space. | |
1987 | */ | |
1988 | if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) | |
0235d075 | 1989 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 1990 | #endif |
2c5ebd00 | 1991 | if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) |
0235d075 | 1992 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 1993 | |
41be81a8 | 1994 | do_connect: |
2303f994 | 1995 | err = ssock->ops->connect(ssock, uaddr, addr_len, flags); |
41be81a8 PA |
1996 | sock->state = ssock->state; |
1997 | ||
1998 | /* on successful connect, the msk state will be moved to established by | |
1999 | * subflow_finish_connect() | |
2000 | */ | |
2001 | if (!err || err == EINPROGRESS) | |
2002 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2003 | else | |
2004 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
2303f994 PK |
2005 | |
2006 | unlock: | |
2007 | release_sock(sock->sk); | |
2008 | return err; | |
2009 | } | |
2010 | ||
cf7da0d6 PK |
2011 | static int mptcp_listen(struct socket *sock, int backlog) |
2012 | { | |
2013 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2014 | struct socket *ssock; | |
2015 | int err; | |
2016 | ||
2017 | pr_debug("msk=%p", msk); | |
2018 | ||
2019 | lock_sock(sock->sk); | |
fa68018d PA |
2020 | ssock = __mptcp_nmpc_socket(msk); |
2021 | if (!ssock) { | |
2022 | err = -EINVAL; | |
cf7da0d6 PK |
2023 | goto unlock; |
2024 | } | |
2025 | ||
fa68018d PA |
2026 | mptcp_token_destroy(msk); |
2027 | inet_sk_state_store(sock->sk, TCP_LISTEN); | |
5e20087d FW |
2028 | sock_set_flag(sock->sk, SOCK_RCU_FREE); |
2029 | ||
cf7da0d6 PK |
2030 | err = ssock->ops->listen(ssock, backlog); |
2031 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
2032 | if (!err) | |
2033 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2034 | ||
2035 | unlock: | |
2036 | release_sock(sock->sk); | |
2037 | return err; | |
2038 | } | |
2039 | ||
cf7da0d6 PK |
2040 | static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, |
2041 | int flags, bool kern) | |
2042 | { | |
2043 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2044 | struct socket *ssock; | |
2045 | int err; | |
2046 | ||
2047 | pr_debug("msk=%p", msk); | |
2048 | ||
2049 | lock_sock(sock->sk); | |
2050 | if (sock->sk->sk_state != TCP_LISTEN) | |
2051 | goto unlock_fail; | |
2052 | ||
2053 | ssock = __mptcp_nmpc_socket(msk); | |
2054 | if (!ssock) | |
2055 | goto unlock_fail; | |
2056 | ||
8a05661b | 2057 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cf7da0d6 PK |
2058 | sock_hold(ssock->sk); |
2059 | release_sock(sock->sk); | |
2060 | ||
2061 | err = ssock->ops->accept(sock, newsock, flags, kern); | |
d2f77c53 | 2062 | if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { |
cf7da0d6 PK |
2063 | struct mptcp_sock *msk = mptcp_sk(newsock->sk); |
2064 | struct mptcp_subflow_context *subflow; | |
2065 | ||
2066 | /* set ssk->sk_socket of accept()ed flows to mptcp socket. | |
2067 | * This is needed so NOSPACE flag can be set from tcp stack. | |
2068 | */ | |
ec3edaa7 | 2069 | __mptcp_flush_join_list(msk); |
cf7da0d6 PK |
2070 | list_for_each_entry(subflow, &msk->conn_list, node) { |
2071 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
2072 | ||
2073 | if (!ssk->sk_socket) | |
2074 | mptcp_sock_graft(ssk, newsock); | |
2075 | } | |
cf7da0d6 PK |
2076 | } |
2077 | ||
8a05661b PA |
2078 | if (inet_csk_listen_poll(ssock->sk)) |
2079 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
cf7da0d6 PK |
2080 | sock_put(ssock->sk); |
2081 | return err; | |
2082 | ||
2083 | unlock_fail: | |
2084 | release_sock(sock->sk); | |
2085 | return -EINVAL; | |
2086 | } | |
2087 | ||
8a05661b PA |
2088 | static __poll_t mptcp_check_readable(struct mptcp_sock *msk) |
2089 | { | |
2090 | return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : | |
2091 | 0; | |
2092 | } | |
2093 | ||
2303f994 PK |
2094 | static __poll_t mptcp_poll(struct file *file, struct socket *sock, |
2095 | struct poll_table_struct *wait) | |
2096 | { | |
1891c4a0 | 2097 | struct sock *sk = sock->sk; |
8ab183de | 2098 | struct mptcp_sock *msk; |
2303f994 | 2099 | __poll_t mask = 0; |
8a05661b | 2100 | int state; |
2303f994 | 2101 | |
1891c4a0 | 2102 | msk = mptcp_sk(sk); |
1891c4a0 | 2103 | sock_poll_wait(file, sock, wait); |
1891c4a0 | 2104 | |
8a05661b PA |
2105 | state = inet_sk_state_load(sk); |
2106 | if (state == TCP_LISTEN) | |
2107 | return mptcp_check_readable(msk); | |
2108 | ||
2109 | if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { | |
2110 | mask |= mptcp_check_readable(msk); | |
2111 | if (sk_stream_is_writeable(sk) && | |
2112 | test_bit(MPTCP_SEND_SPACE, &msk->flags)) | |
2113 | mask |= EPOLLOUT | EPOLLWRNORM; | |
2114 | } | |
1891c4a0 FW |
2115 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
2116 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; | |
2117 | ||
2303f994 PK |
2118 | return mask; |
2119 | } | |
2120 | ||
21498490 PK |
2121 | static int mptcp_shutdown(struct socket *sock, int how) |
2122 | { | |
2123 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2124 | struct mptcp_subflow_context *subflow; | |
2125 | int ret = 0; | |
2126 | ||
2127 | pr_debug("sk=%p, how=%d", msk, how); | |
2128 | ||
2129 | lock_sock(sock->sk); | |
21498490 PK |
2130 | if (how == SHUT_WR || how == SHUT_RDWR) |
2131 | inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); | |
2132 | ||
2133 | how++; | |
2134 | ||
2135 | if ((how & ~SHUTDOWN_MASK) || !how) { | |
2136 | ret = -EINVAL; | |
2137 | goto out_unlock; | |
2138 | } | |
2139 | ||
2140 | if (sock->state == SS_CONNECTING) { | |
2141 | if ((1 << sock->sk->sk_state) & | |
2142 | (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) | |
2143 | sock->state = SS_DISCONNECTING; | |
2144 | else | |
2145 | sock->state = SS_CONNECTED; | |
2146 | } | |
2147 | ||
ec3edaa7 | 2148 | __mptcp_flush_join_list(msk); |
21498490 PK |
2149 | mptcp_for_each_subflow(msk, subflow) { |
2150 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
2151 | ||
76c42a29 | 2152 | mptcp_subflow_shutdown(tcp_sk, how, 1, msk->write_seq); |
21498490 PK |
2153 | } |
2154 | ||
e1ff9e82 DC |
2155 | /* Wake up anyone sleeping in poll. */ |
2156 | sock->sk->sk_state_change(sock->sk); | |
2157 | ||
21498490 PK |
2158 | out_unlock: |
2159 | release_sock(sock->sk); | |
2160 | ||
2161 | return ret; | |
2162 | } | |
2163 | ||
e42f1ac6 FW |
2164 | static const struct proto_ops mptcp_stream_ops = { |
2165 | .family = PF_INET, | |
2166 | .owner = THIS_MODULE, | |
2167 | .release = inet_release, | |
2168 | .bind = mptcp_bind, | |
2169 | .connect = mptcp_stream_connect, | |
2170 | .socketpair = sock_no_socketpair, | |
2171 | .accept = mptcp_stream_accept, | |
d2f77c53 | 2172 | .getname = inet_getname, |
e42f1ac6 FW |
2173 | .poll = mptcp_poll, |
2174 | .ioctl = inet_ioctl, | |
2175 | .gettstamp = sock_gettstamp, | |
2176 | .listen = mptcp_listen, | |
2177 | .shutdown = mptcp_shutdown, | |
2178 | .setsockopt = sock_common_setsockopt, | |
2179 | .getsockopt = sock_common_getsockopt, | |
2180 | .sendmsg = inet_sendmsg, | |
2181 | .recvmsg = inet_recvmsg, | |
2182 | .mmap = sock_no_mmap, | |
2183 | .sendpage = inet_sendpage, | |
e42f1ac6 | 2184 | }; |
2303f994 | 2185 | |
f870fa0b MM |
2186 | static struct inet_protosw mptcp_protosw = { |
2187 | .type = SOCK_STREAM, | |
2188 | .protocol = IPPROTO_MPTCP, | |
2189 | .prot = &mptcp_prot, | |
2303f994 PK |
2190 | .ops = &mptcp_stream_ops, |
2191 | .flags = INET_PROTOSW_ICSK, | |
f870fa0b MM |
2192 | }; |
2193 | ||
d39dceca | 2194 | void __init mptcp_proto_init(void) |
f870fa0b | 2195 | { |
2303f994 | 2196 | mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; |
2303f994 | 2197 | |
d027236c PA |
2198 | if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) |
2199 | panic("Failed to allocate MPTCP pcpu counter\n"); | |
2200 | ||
2303f994 | 2201 | mptcp_subflow_init(); |
1b1c7a0e | 2202 | mptcp_pm_init(); |
2c5ebd00 | 2203 | mptcp_token_init(); |
2303f994 | 2204 | |
f870fa0b MM |
2205 | if (proto_register(&mptcp_prot, 1) != 0) |
2206 | panic("Failed to register MPTCP proto.\n"); | |
2207 | ||
2208 | inet_register_protosw(&mptcp_protosw); | |
6771bfd9 FW |
2209 | |
2210 | BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); | |
f870fa0b MM |
2211 | } |
2212 | ||
2213 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
e42f1ac6 FW |
2214 | static const struct proto_ops mptcp_v6_stream_ops = { |
2215 | .family = PF_INET6, | |
2216 | .owner = THIS_MODULE, | |
2217 | .release = inet6_release, | |
2218 | .bind = mptcp_bind, | |
2219 | .connect = mptcp_stream_connect, | |
2220 | .socketpair = sock_no_socketpair, | |
2221 | .accept = mptcp_stream_accept, | |
d2f77c53 | 2222 | .getname = inet6_getname, |
e42f1ac6 FW |
2223 | .poll = mptcp_poll, |
2224 | .ioctl = inet6_ioctl, | |
2225 | .gettstamp = sock_gettstamp, | |
2226 | .listen = mptcp_listen, | |
2227 | .shutdown = mptcp_shutdown, | |
2228 | .setsockopt = sock_common_setsockopt, | |
2229 | .getsockopt = sock_common_getsockopt, | |
2230 | .sendmsg = inet6_sendmsg, | |
2231 | .recvmsg = inet6_recvmsg, | |
2232 | .mmap = sock_no_mmap, | |
2233 | .sendpage = inet_sendpage, | |
2234 | #ifdef CONFIG_COMPAT | |
3986912f | 2235 | .compat_ioctl = inet6_compat_ioctl, |
e42f1ac6 FW |
2236 | #endif |
2237 | }; | |
2238 | ||
f870fa0b MM |
2239 | static struct proto mptcp_v6_prot; |
2240 | ||
79c0949e PK |
2241 | static void mptcp_v6_destroy(struct sock *sk) |
2242 | { | |
2243 | mptcp_destroy(sk); | |
2244 | inet6_destroy_sock(sk); | |
2245 | } | |
2246 | ||
f870fa0b MM |
2247 | static struct inet_protosw mptcp_v6_protosw = { |
2248 | .type = SOCK_STREAM, | |
2249 | .protocol = IPPROTO_MPTCP, | |
2250 | .prot = &mptcp_v6_prot, | |
2303f994 | 2251 | .ops = &mptcp_v6_stream_ops, |
f870fa0b MM |
2252 | .flags = INET_PROTOSW_ICSK, |
2253 | }; | |
2254 | ||
d39dceca | 2255 | int __init mptcp_proto_v6_init(void) |
f870fa0b MM |
2256 | { |
2257 | int err; | |
2258 | ||
2259 | mptcp_v6_prot = mptcp_prot; | |
2260 | strcpy(mptcp_v6_prot.name, "MPTCPv6"); | |
2261 | mptcp_v6_prot.slab = NULL; | |
79c0949e | 2262 | mptcp_v6_prot.destroy = mptcp_v6_destroy; |
b0519de8 | 2263 | mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); |
f870fa0b MM |
2264 | |
2265 | err = proto_register(&mptcp_v6_prot, 1); | |
2266 | if (err) | |
2267 | return err; | |
2268 | ||
2269 | err = inet6_register_protosw(&mptcp_v6_protosw); | |
2270 | if (err) | |
2271 | proto_unregister(&mptcp_v6_prot); | |
2272 | ||
2273 | return err; | |
2274 | } | |
2275 | #endif |