]>
Commit | Line | Data |
---|---|---|
f870fa0b MM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2017 - 2019, Intel Corporation. | |
5 | */ | |
6 | ||
7 | #define pr_fmt(fmt) "MPTCP: " fmt | |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/module.h> | |
11 | #include <linux/netdevice.h> | |
7a6a6cbc PA |
12 | #include <linux/sched/signal.h> |
13 | #include <linux/atomic.h> | |
f870fa0b MM |
14 | #include <net/sock.h> |
15 | #include <net/inet_common.h> | |
16 | #include <net/inet_hashtables.h> | |
17 | #include <net/protocol.h> | |
18 | #include <net/tcp.h> | |
3721b9b6 | 19 | #include <net/tcp_states.h> |
cf7da0d6 PK |
20 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
21 | #include <net/transp_v6.h> | |
22 | #endif | |
f870fa0b MM |
23 | #include <net/mptcp.h> |
24 | #include "protocol.h" | |
fc518953 | 25 | #include "mib.h" |
f870fa0b | 26 | |
b0519de8 FW |
27 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
28 | struct mptcp6_sock { | |
29 | struct mptcp_sock msk; | |
30 | struct ipv6_pinfo np; | |
31 | }; | |
32 | #endif | |
33 | ||
6771bfd9 FW |
34 | struct mptcp_skb_cb { |
35 | u32 offset; | |
36 | }; | |
37 | ||
38 | #define MPTCP_SKB_CB(__skb) ((struct mptcp_skb_cb *)&((__skb)->cb[0])) | |
39 | ||
d027236c PA |
40 | static struct percpu_counter mptcp_sockets_allocated; |
41 | ||
2303f994 PK |
42 | /* If msk has an initial subflow socket, and the MP_CAPABLE handshake has not |
43 | * completed yet or has failed, return the subflow socket. | |
44 | * Otherwise return NULL. | |
45 | */ | |
46 | static struct socket *__mptcp_nmpc_socket(const struct mptcp_sock *msk) | |
47 | { | |
d22f4988 | 48 | if (!msk->subflow || READ_ONCE(msk->can_ack)) |
2303f994 PK |
49 | return NULL; |
50 | ||
51 | return msk->subflow; | |
52 | } | |
53 | ||
d2f77c53 | 54 | static bool mptcp_is_tcpsk(struct sock *sk) |
0b4f33de FW |
55 | { |
56 | struct socket *sock = sk->sk_socket; | |
57 | ||
0b4f33de FW |
58 | if (unlikely(sk->sk_prot == &tcp_prot)) { |
59 | /* we are being invoked after mptcp_accept() has | |
60 | * accepted a non-mp-capable flow: sk is a tcp_sk, | |
61 | * not an mptcp one. | |
62 | * | |
63 | * Hand the socket over to tcp so all further socket ops | |
64 | * bypass mptcp. | |
65 | */ | |
66 | sock->ops = &inet_stream_ops; | |
d2f77c53 | 67 | return true; |
0b4f33de FW |
68 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
69 | } else if (unlikely(sk->sk_prot == &tcpv6_prot)) { | |
70 | sock->ops = &inet6_stream_ops; | |
d2f77c53 | 71 | return true; |
0b4f33de FW |
72 | #endif |
73 | } | |
74 | ||
d2f77c53 | 75 | return false; |
0b4f33de FW |
76 | } |
77 | ||
76660afb | 78 | static struct sock *__mptcp_tcp_fallback(struct mptcp_sock *msk) |
cec37a6e | 79 | { |
cec37a6e PK |
80 | sock_owned_by_me((const struct sock *)msk); |
81 | ||
e1ff9e82 | 82 | if (likely(!__mptcp_check_fallback(msk))) |
cec37a6e PK |
83 | return NULL; |
84 | ||
76660afb | 85 | return msk->first; |
cec37a6e PK |
86 | } |
87 | ||
fa68018d | 88 | static int __mptcp_socket_create(struct mptcp_sock *msk) |
2303f994 PK |
89 | { |
90 | struct mptcp_subflow_context *subflow; | |
91 | struct sock *sk = (struct sock *)msk; | |
92 | struct socket *ssock; | |
93 | int err; | |
94 | ||
2303f994 PK |
95 | err = mptcp_subflow_create_socket(sk, &ssock); |
96 | if (err) | |
fa68018d | 97 | return err; |
2303f994 | 98 | |
8ab183de | 99 | msk->first = ssock->sk; |
2303f994 PK |
100 | msk->subflow = ssock; |
101 | subflow = mptcp_subflow_ctx(ssock->sk); | |
cec37a6e | 102 | list_add(&subflow->node, &msk->conn_list); |
2303f994 PK |
103 | subflow->request_mptcp = 1; |
104 | ||
e1ff9e82 DC |
105 | /* accept() will wait on first subflow sk_wq, and we always wakes up |
106 | * via msk->sk_socket | |
107 | */ | |
108 | RCU_INIT_POINTER(msk->first->sk_wq, &sk->sk_socket->wq); | |
109 | ||
fa68018d | 110 | return 0; |
2303f994 PK |
111 | } |
112 | ||
6771bfd9 FW |
113 | static void __mptcp_move_skb(struct mptcp_sock *msk, struct sock *ssk, |
114 | struct sk_buff *skb, | |
115 | unsigned int offset, size_t copy_len) | |
116 | { | |
117 | struct sock *sk = (struct sock *)msk; | |
4e637c70 | 118 | struct sk_buff *tail; |
6771bfd9 FW |
119 | |
120 | __skb_unlink(skb, &ssk->sk_receive_queue); | |
6771bfd9 | 121 | |
4e637c70 FW |
122 | skb_ext_reset(skb); |
123 | skb_orphan(skb); | |
6771bfd9 | 124 | msk->ack_seq += copy_len; |
4e637c70 FW |
125 | |
126 | tail = skb_peek_tail(&sk->sk_receive_queue); | |
127 | if (offset == 0 && tail) { | |
128 | bool fragstolen; | |
129 | int delta; | |
130 | ||
131 | if (skb_try_coalesce(tail, skb, &fragstolen, &delta)) { | |
132 | kfree_skb_partial(skb, fragstolen); | |
133 | atomic_add(delta, &sk->sk_rmem_alloc); | |
134 | sk_mem_charge(sk, delta); | |
135 | return; | |
136 | } | |
137 | } | |
138 | ||
139 | skb_set_owner_r(skb, sk); | |
140 | __skb_queue_tail(&sk->sk_receive_queue, skb); | |
6771bfd9 FW |
141 | MPTCP_SKB_CB(skb)->offset = offset; |
142 | } | |
143 | ||
16a9a9da MM |
144 | static void mptcp_stop_timer(struct sock *sk) |
145 | { | |
146 | struct inet_connection_sock *icsk = inet_csk(sk); | |
147 | ||
148 | sk_stop_timer(sk, &icsk->icsk_retransmit_timer); | |
149 | mptcp_sk(sk)->timer_ival = 0; | |
150 | } | |
151 | ||
de06f573 FW |
152 | /* both sockets must be locked */ |
153 | static bool mptcp_subflow_dsn_valid(const struct mptcp_sock *msk, | |
154 | struct sock *ssk) | |
155 | { | |
156 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
157 | u64 dsn = mptcp_subflow_get_mapped_dsn(subflow); | |
158 | ||
159 | /* revalidate data sequence number. | |
160 | * | |
161 | * mptcp_subflow_data_available() is usually called | |
162 | * without msk lock. Its unlikely (but possible) | |
163 | * that msk->ack_seq has been advanced since the last | |
164 | * call found in-sequence data. | |
165 | */ | |
166 | if (likely(dsn == msk->ack_seq)) | |
167 | return true; | |
168 | ||
169 | subflow->data_avail = 0; | |
6719331c PA |
170 | mptcp_subflow_data_available(ssk); |
171 | return subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL; | |
de06f573 FW |
172 | } |
173 | ||
16a9a9da MM |
174 | static void mptcp_check_data_fin_ack(struct sock *sk) |
175 | { | |
176 | struct mptcp_sock *msk = mptcp_sk(sk); | |
177 | ||
178 | if (__mptcp_check_fallback(msk)) | |
179 | return; | |
180 | ||
181 | /* Look for an acknowledged DATA_FIN */ | |
182 | if (((1 << sk->sk_state) & | |
183 | (TCPF_FIN_WAIT1 | TCPF_CLOSING | TCPF_LAST_ACK)) && | |
184 | msk->write_seq == atomic64_read(&msk->snd_una)) { | |
185 | mptcp_stop_timer(sk); | |
186 | ||
187 | WRITE_ONCE(msk->snd_data_fin_enable, 0); | |
188 | ||
189 | switch (sk->sk_state) { | |
190 | case TCP_FIN_WAIT1: | |
191 | inet_sk_state_store(sk, TCP_FIN_WAIT2); | |
192 | sk->sk_state_change(sk); | |
193 | break; | |
194 | case TCP_CLOSING: | |
16a9a9da MM |
195 | case TCP_LAST_ACK: |
196 | inet_sk_state_store(sk, TCP_CLOSE); | |
197 | sk->sk_state_change(sk); | |
198 | break; | |
199 | } | |
200 | ||
201 | if (sk->sk_shutdown == SHUTDOWN_MASK || | |
202 | sk->sk_state == TCP_CLOSE) | |
203 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | |
204 | else | |
205 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
206 | } | |
207 | } | |
208 | ||
3721b9b6 MM |
209 | static bool mptcp_pending_data_fin(struct sock *sk, u64 *seq) |
210 | { | |
211 | struct mptcp_sock *msk = mptcp_sk(sk); | |
212 | ||
213 | if (READ_ONCE(msk->rcv_data_fin) && | |
214 | ((1 << sk->sk_state) & | |
215 | (TCPF_ESTABLISHED | TCPF_FIN_WAIT1 | TCPF_FIN_WAIT2))) { | |
216 | u64 rcv_data_fin_seq = READ_ONCE(msk->rcv_data_fin_seq); | |
217 | ||
218 | if (msk->ack_seq == rcv_data_fin_seq) { | |
219 | if (seq) | |
220 | *seq = rcv_data_fin_seq; | |
221 | ||
222 | return true; | |
223 | } | |
224 | } | |
225 | ||
226 | return false; | |
227 | } | |
228 | ||
229 | static void mptcp_set_timeout(const struct sock *sk, const struct sock *ssk) | |
230 | { | |
231 | long tout = ssk && inet_csk(ssk)->icsk_pending ? | |
232 | inet_csk(ssk)->icsk_timeout - jiffies : 0; | |
233 | ||
234 | if (tout <= 0) | |
235 | tout = mptcp_sk(sk)->timer_ival; | |
236 | mptcp_sk(sk)->timer_ival = tout > 0 ? tout : TCP_RTO_MIN; | |
237 | } | |
238 | ||
239 | static void mptcp_check_data_fin(struct sock *sk) | |
240 | { | |
241 | struct mptcp_sock *msk = mptcp_sk(sk); | |
242 | u64 rcv_data_fin_seq; | |
243 | ||
244 | if (__mptcp_check_fallback(msk) || !msk->first) | |
245 | return; | |
246 | ||
247 | /* Need to ack a DATA_FIN received from a peer while this side | |
248 | * of the connection is in ESTABLISHED, FIN_WAIT1, or FIN_WAIT2. | |
249 | * msk->rcv_data_fin was set when parsing the incoming options | |
250 | * at the subflow level and the msk lock was not held, so this | |
251 | * is the first opportunity to act on the DATA_FIN and change | |
252 | * the msk state. | |
253 | * | |
254 | * If we are caught up to the sequence number of the incoming | |
255 | * DATA_FIN, send the DATA_ACK now and do state transition. If | |
256 | * not caught up, do nothing and let the recv code send DATA_ACK | |
257 | * when catching up. | |
258 | */ | |
259 | ||
260 | if (mptcp_pending_data_fin(sk, &rcv_data_fin_seq)) { | |
261 | struct mptcp_subflow_context *subflow; | |
262 | ||
263 | msk->ack_seq++; | |
264 | WRITE_ONCE(msk->rcv_data_fin, 0); | |
265 | ||
266 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
16a9a9da MM |
267 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ |
268 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
3721b9b6 MM |
269 | |
270 | switch (sk->sk_state) { | |
271 | case TCP_ESTABLISHED: | |
272 | inet_sk_state_store(sk, TCP_CLOSE_WAIT); | |
273 | break; | |
274 | case TCP_FIN_WAIT1: | |
275 | inet_sk_state_store(sk, TCP_CLOSING); | |
276 | break; | |
277 | case TCP_FIN_WAIT2: | |
278 | inet_sk_state_store(sk, TCP_CLOSE); | |
279 | // @@ Close subflows now? | |
280 | break; | |
281 | default: | |
282 | /* Other states not expected */ | |
283 | WARN_ON_ONCE(1); | |
284 | break; | |
285 | } | |
286 | ||
287 | mptcp_set_timeout(sk, NULL); | |
288 | mptcp_for_each_subflow(msk, subflow) { | |
289 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
290 | ||
291 | lock_sock(ssk); | |
292 | tcp_send_ack(ssk); | |
293 | release_sock(ssk); | |
294 | } | |
295 | ||
296 | sk->sk_state_change(sk); | |
297 | ||
298 | if (sk->sk_shutdown == SHUTDOWN_MASK || | |
299 | sk->sk_state == TCP_CLOSE) | |
300 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_HUP); | |
301 | else | |
302 | sk_wake_async(sk, SOCK_WAKE_WAITD, POLL_IN); | |
303 | } | |
304 | } | |
305 | ||
6771bfd9 FW |
306 | static bool __mptcp_move_skbs_from_subflow(struct mptcp_sock *msk, |
307 | struct sock *ssk, | |
308 | unsigned int *bytes) | |
309 | { | |
310 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); | |
600911ff | 311 | struct sock *sk = (struct sock *)msk; |
6771bfd9 FW |
312 | unsigned int moved = 0; |
313 | bool more_data_avail; | |
314 | struct tcp_sock *tp; | |
315 | bool done = false; | |
600911ff | 316 | |
6719331c PA |
317 | pr_debug("msk=%p ssk=%p data avail=%d valid=%d empty=%d", |
318 | msk, ssk, subflow->data_avail, | |
319 | mptcp_subflow_dsn_valid(msk, ssk), | |
320 | !skb_peek(&ssk->sk_receive_queue)); | |
321 | if (subflow->data_avail == MPTCP_SUBFLOW_OOO_DATA) { | |
322 | mptcp_subflow_discard_data(ssk, subflow->map_data_len); | |
de06f573 FW |
323 | return false; |
324 | } | |
325 | ||
6719331c PA |
326 | if (!mptcp_subflow_dsn_valid(msk, ssk)) |
327 | return false; | |
328 | ||
6771bfd9 FW |
329 | tp = tcp_sk(ssk); |
330 | do { | |
331 | u32 map_remaining, offset; | |
332 | u32 seq = tp->copied_seq; | |
333 | struct sk_buff *skb; | |
334 | bool fin; | |
335 | ||
336 | /* try to move as much data as available */ | |
337 | map_remaining = subflow->map_data_len - | |
338 | mptcp_subflow_get_map_offset(subflow); | |
339 | ||
340 | skb = skb_peek(&ssk->sk_receive_queue); | |
341 | if (!skb) | |
342 | break; | |
343 | ||
e1ff9e82 DC |
344 | if (__mptcp_check_fallback(msk)) { |
345 | /* if we are running under the workqueue, TCP could have | |
346 | * collapsed skbs between dummy map creation and now | |
347 | * be sure to adjust the size | |
348 | */ | |
349 | map_remaining = skb->len; | |
350 | subflow->map_data_len = skb->len; | |
351 | } | |
352 | ||
6771bfd9 FW |
353 | offset = seq - TCP_SKB_CB(skb)->seq; |
354 | fin = TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN; | |
355 | if (fin) { | |
356 | done = true; | |
357 | seq++; | |
358 | } | |
359 | ||
360 | if (offset < skb->len) { | |
361 | size_t len = skb->len - offset; | |
362 | ||
363 | if (tp->urg_data) | |
364 | done = true; | |
365 | ||
366 | __mptcp_move_skb(msk, ssk, skb, offset, len); | |
367 | seq += len; | |
368 | moved += len; | |
369 | ||
370 | if (WARN_ON_ONCE(map_remaining < len)) | |
371 | break; | |
372 | } else { | |
373 | WARN_ON_ONCE(!fin); | |
374 | sk_eat_skb(ssk, skb); | |
375 | done = true; | |
376 | } | |
377 | ||
378 | WRITE_ONCE(tp->copied_seq, seq); | |
379 | more_data_avail = mptcp_subflow_data_available(ssk); | |
600911ff FW |
380 | |
381 | if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) { | |
382 | done = true; | |
383 | break; | |
384 | } | |
6771bfd9 FW |
385 | } while (more_data_avail); |
386 | ||
6719331c | 387 | *bytes += moved; |
6771bfd9 | 388 | |
43b54c6e MM |
389 | /* If the moves have caught up with the DATA_FIN sequence number |
390 | * it's time to ack the DATA_FIN and change socket state, but | |
391 | * this is not a good place to change state. Let the workqueue | |
392 | * do it. | |
393 | */ | |
394 | if (mptcp_pending_data_fin(sk, NULL) && | |
395 | schedule_work(&msk->work)) | |
396 | sock_hold(sk); | |
397 | ||
6771bfd9 FW |
398 | return done; |
399 | } | |
400 | ||
2e52213c FW |
401 | /* In most cases we will be able to lock the mptcp socket. If its already |
402 | * owned, we need to defer to the work queue to avoid ABBA deadlock. | |
403 | */ | |
404 | static bool move_skbs_to_msk(struct mptcp_sock *msk, struct sock *ssk) | |
405 | { | |
406 | struct sock *sk = (struct sock *)msk; | |
407 | unsigned int moved = 0; | |
408 | ||
409 | if (READ_ONCE(sk->sk_lock.owned)) | |
410 | return false; | |
411 | ||
412 | if (unlikely(!spin_trylock_bh(&sk->sk_lock.slock))) | |
413 | return false; | |
414 | ||
415 | /* must re-check after taking the lock */ | |
416 | if (!READ_ONCE(sk->sk_lock.owned)) | |
417 | __mptcp_move_skbs_from_subflow(msk, ssk, &moved); | |
418 | ||
419 | spin_unlock_bh(&sk->sk_lock.slock); | |
420 | ||
421 | return moved > 0; | |
422 | } | |
423 | ||
424 | void mptcp_data_ready(struct sock *sk, struct sock *ssk) | |
101f6f85 | 425 | { |
6719331c | 426 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(ssk); |
101f6f85 | 427 | struct mptcp_sock *msk = mptcp_sk(sk); |
6719331c | 428 | bool wake; |
101f6f85 | 429 | |
6719331c PA |
430 | /* move_skbs_to_msk below can legitly clear the data_avail flag, |
431 | * but we will need later to properly woke the reader, cache its | |
432 | * value | |
433 | */ | |
434 | wake = subflow->data_avail == MPTCP_SUBFLOW_DATA_AVAIL; | |
435 | if (wake) | |
436 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
6771bfd9 | 437 | |
2e52213c FW |
438 | if (atomic_read(&sk->sk_rmem_alloc) < READ_ONCE(sk->sk_rcvbuf) && |
439 | move_skbs_to_msk(msk, ssk)) | |
440 | goto wake; | |
441 | ||
600911ff FW |
442 | /* don't schedule if mptcp sk is (still) over limit */ |
443 | if (atomic_read(&sk->sk_rmem_alloc) > READ_ONCE(sk->sk_rcvbuf)) | |
444 | goto wake; | |
445 | ||
14c441b5 PA |
446 | /* mptcp socket is owned, release_cb should retry */ |
447 | if (!test_and_set_bit(TCP_DELACK_TIMER_DEFERRED, | |
448 | &sk->sk_tsq_flags)) { | |
449 | sock_hold(sk); | |
6771bfd9 | 450 | |
14c441b5 PA |
451 | /* need to try again, its possible release_cb() has already |
452 | * been called after the test_and_set_bit() above. | |
453 | */ | |
454 | move_skbs_to_msk(msk, ssk); | |
455 | } | |
600911ff | 456 | wake: |
6719331c PA |
457 | if (wake) |
458 | sk->sk_data_ready(sk); | |
101f6f85 FW |
459 | } |
460 | ||
ec3edaa7 PK |
461 | static void __mptcp_flush_join_list(struct mptcp_sock *msk) |
462 | { | |
463 | if (likely(list_empty(&msk->join_list))) | |
464 | return; | |
465 | ||
466 | spin_lock_bh(&msk->join_list_lock); | |
467 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
468 | spin_unlock_bh(&msk->join_list_lock); | |
469 | } | |
470 | ||
b51f9b80 PA |
471 | static bool mptcp_timer_pending(struct sock *sk) |
472 | { | |
473 | return timer_pending(&inet_csk(sk)->icsk_retransmit_timer); | |
474 | } | |
475 | ||
476 | static void mptcp_reset_timer(struct sock *sk) | |
477 | { | |
478 | struct inet_connection_sock *icsk = inet_csk(sk); | |
479 | unsigned long tout; | |
480 | ||
481 | /* should never be called with mptcp level timer cleared */ | |
482 | tout = READ_ONCE(mptcp_sk(sk)->timer_ival); | |
483 | if (WARN_ON_ONCE(!tout)) | |
484 | tout = TCP_RTO_MIN; | |
485 | sk_reset_timer(sk, &icsk->icsk_retransmit_timer, jiffies + tout); | |
486 | } | |
487 | ||
488 | void mptcp_data_acked(struct sock *sk) | |
489 | { | |
490 | mptcp_reset_timer(sk); | |
3b1d6210 | 491 | |
63561a40 | 492 | if ((!test_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags) || |
43b54c6e | 493 | (inet_sk_state_load(sk) != TCP_ESTABLISHED)) && |
3b1d6210 PA |
494 | schedule_work(&mptcp_sk(sk)->work)) |
495 | sock_hold(sk); | |
b51f9b80 PA |
496 | } |
497 | ||
59832e24 FW |
498 | void mptcp_subflow_eof(struct sock *sk) |
499 | { | |
500 | struct mptcp_sock *msk = mptcp_sk(sk); | |
501 | ||
502 | if (!test_and_set_bit(MPTCP_WORK_EOF, &msk->flags) && | |
503 | schedule_work(&msk->work)) | |
504 | sock_hold(sk); | |
505 | } | |
506 | ||
5969856a PA |
507 | static void mptcp_check_for_eof(struct mptcp_sock *msk) |
508 | { | |
509 | struct mptcp_subflow_context *subflow; | |
510 | struct sock *sk = (struct sock *)msk; | |
511 | int receivers = 0; | |
512 | ||
513 | mptcp_for_each_subflow(msk, subflow) | |
514 | receivers += !subflow->rx_eof; | |
515 | ||
516 | if (!receivers && !(sk->sk_shutdown & RCV_SHUTDOWN)) { | |
517 | /* hopefully temporary hack: propagate shutdown status | |
518 | * to msk, when all subflows agree on it | |
519 | */ | |
520 | sk->sk_shutdown |= RCV_SHUTDOWN; | |
521 | ||
522 | smp_mb__before_atomic(); /* SHUTDOWN must be visible first */ | |
523 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
524 | sk->sk_data_ready(sk); | |
525 | } | |
526 | } | |
527 | ||
6d0060f6 MM |
528 | static bool mptcp_ext_cache_refill(struct mptcp_sock *msk) |
529 | { | |
4930f483 FW |
530 | const struct sock *sk = (const struct sock *)msk; |
531 | ||
6d0060f6 | 532 | if (!msk->cached_ext) |
4930f483 | 533 | msk->cached_ext = __skb_ext_alloc(sk->sk_allocation); |
6d0060f6 MM |
534 | |
535 | return !!msk->cached_ext; | |
536 | } | |
537 | ||
7a6a6cbc PA |
538 | static struct sock *mptcp_subflow_recv_lookup(const struct mptcp_sock *msk) |
539 | { | |
540 | struct mptcp_subflow_context *subflow; | |
541 | struct sock *sk = (struct sock *)msk; | |
542 | ||
543 | sock_owned_by_me(sk); | |
544 | ||
545 | mptcp_for_each_subflow(msk, subflow) { | |
546 | if (subflow->data_avail) | |
547 | return mptcp_subflow_tcp_sock(subflow); | |
548 | } | |
549 | ||
550 | return NULL; | |
551 | } | |
552 | ||
3f8e0aae PA |
553 | static bool mptcp_skb_can_collapse_to(u64 write_seq, |
554 | const struct sk_buff *skb, | |
555 | const struct mptcp_ext *mpext) | |
57040755 PA |
556 | { |
557 | if (!tcp_skb_can_collapse_to(skb)) | |
558 | return false; | |
559 | ||
560 | /* can collapse only if MPTCP level sequence is in order */ | |
3f8e0aae | 561 | return mpext && mpext->data_seq + mpext->data_len == write_seq; |
57040755 PA |
562 | } |
563 | ||
18b683bf PA |
564 | static bool mptcp_frag_can_collapse_to(const struct mptcp_sock *msk, |
565 | const struct page_frag *pfrag, | |
566 | const struct mptcp_data_frag *df) | |
567 | { | |
568 | return df && pfrag->page == df->page && | |
569 | df->data_seq + df->data_len == msk->write_seq; | |
570 | } | |
571 | ||
d027236c PA |
572 | static void dfrag_uncharge(struct sock *sk, int len) |
573 | { | |
574 | sk_mem_uncharge(sk, len); | |
7948f6cc | 575 | sk_wmem_queued_add(sk, -len); |
d027236c PA |
576 | } |
577 | ||
578 | static void dfrag_clear(struct sock *sk, struct mptcp_data_frag *dfrag) | |
18b683bf | 579 | { |
d027236c PA |
580 | int len = dfrag->data_len + dfrag->overhead; |
581 | ||
18b683bf | 582 | list_del(&dfrag->list); |
d027236c | 583 | dfrag_uncharge(sk, len); |
18b683bf PA |
584 | put_page(dfrag->page); |
585 | } | |
586 | ||
63561a40 PA |
587 | static bool mptcp_is_writeable(struct mptcp_sock *msk) |
588 | { | |
589 | struct mptcp_subflow_context *subflow; | |
590 | ||
591 | if (!sk_stream_is_writeable((struct sock *)msk)) | |
592 | return false; | |
593 | ||
594 | mptcp_for_each_subflow(msk, subflow) { | |
595 | if (sk_stream_is_writeable(subflow->tcp_sock)) | |
596 | return true; | |
597 | } | |
598 | return false; | |
599 | } | |
600 | ||
18b683bf PA |
601 | static void mptcp_clean_una(struct sock *sk) |
602 | { | |
603 | struct mptcp_sock *msk = mptcp_sk(sk); | |
604 | struct mptcp_data_frag *dtmp, *dfrag; | |
d027236c | 605 | bool cleaned = false; |
e1ff9e82 DC |
606 | u64 snd_una; |
607 | ||
608 | /* on fallback we just need to ignore snd_una, as this is really | |
609 | * plain TCP | |
610 | */ | |
611 | if (__mptcp_check_fallback(msk)) | |
612 | atomic64_set(&msk->snd_una, msk->write_seq); | |
613 | snd_una = atomic64_read(&msk->snd_una); | |
18b683bf PA |
614 | |
615 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) { | |
616 | if (after64(dfrag->data_seq + dfrag->data_len, snd_una)) | |
617 | break; | |
618 | ||
d027236c PA |
619 | dfrag_clear(sk, dfrag); |
620 | cleaned = true; | |
621 | } | |
622 | ||
7948f6cc FW |
623 | dfrag = mptcp_rtx_head(sk); |
624 | if (dfrag && after64(snd_una, dfrag->data_seq)) { | |
53eb4c38 PA |
625 | u64 delta = snd_una - dfrag->data_seq; |
626 | ||
627 | if (WARN_ON_ONCE(delta > dfrag->data_len)) | |
628 | goto out; | |
7948f6cc FW |
629 | |
630 | dfrag->data_seq += delta; | |
53eb4c38 | 631 | dfrag->offset += delta; |
7948f6cc FW |
632 | dfrag->data_len -= delta; |
633 | ||
634 | dfrag_uncharge(sk, delta); | |
635 | cleaned = true; | |
636 | } | |
637 | ||
53eb4c38 | 638 | out: |
d027236c PA |
639 | if (cleaned) { |
640 | sk_mem_reclaim_partial(sk); | |
7948f6cc FW |
641 | |
642 | /* Only wake up writers if a subflow is ready */ | |
63561a40 PA |
643 | if (mptcp_is_writeable(msk)) { |
644 | set_bit(MPTCP_SEND_SPACE, &mptcp_sk(sk)->flags); | |
645 | smp_mb__after_atomic(); | |
646 | ||
647 | /* set SEND_SPACE before sk_stream_write_space clears | |
648 | * NOSPACE | |
649 | */ | |
7948f6cc | 650 | sk_stream_write_space(sk); |
63561a40 | 651 | } |
18b683bf PA |
652 | } |
653 | } | |
654 | ||
655 | /* ensure we get enough memory for the frag hdr, beyond some minimal amount of | |
656 | * data | |
657 | */ | |
658 | static bool mptcp_page_frag_refill(struct sock *sk, struct page_frag *pfrag) | |
659 | { | |
660 | if (likely(skb_page_frag_refill(32U + sizeof(struct mptcp_data_frag), | |
661 | pfrag, sk->sk_allocation))) | |
662 | return true; | |
663 | ||
664 | sk->sk_prot->enter_memory_pressure(sk); | |
665 | sk_stream_moderate_sndbuf(sk); | |
666 | return false; | |
667 | } | |
668 | ||
669 | static struct mptcp_data_frag * | |
670 | mptcp_carve_data_frag(const struct mptcp_sock *msk, struct page_frag *pfrag, | |
671 | int orig_offset) | |
672 | { | |
673 | int offset = ALIGN(orig_offset, sizeof(long)); | |
674 | struct mptcp_data_frag *dfrag; | |
675 | ||
676 | dfrag = (struct mptcp_data_frag *)(page_to_virt(pfrag->page) + offset); | |
677 | dfrag->data_len = 0; | |
678 | dfrag->data_seq = msk->write_seq; | |
679 | dfrag->overhead = offset - orig_offset + sizeof(struct mptcp_data_frag); | |
680 | dfrag->offset = offset + sizeof(struct mptcp_data_frag); | |
681 | dfrag->page = pfrag->page; | |
682 | ||
683 | return dfrag; | |
684 | } | |
685 | ||
6d0060f6 | 686 | static int mptcp_sendmsg_frag(struct sock *sk, struct sock *ssk, |
3f8e0aae PA |
687 | struct msghdr *msg, struct mptcp_data_frag *dfrag, |
688 | long *timeo, int *pmss_now, | |
57040755 | 689 | int *ps_goal) |
6d0060f6 | 690 | { |
18b683bf PA |
691 | int mss_now, avail_size, size_goal, offset, ret, frag_truesize = 0; |
692 | bool dfrag_collapsed, can_collapse = false; | |
6d0060f6 MM |
693 | struct mptcp_sock *msk = mptcp_sk(sk); |
694 | struct mptcp_ext *mpext = NULL; | |
3f8e0aae | 695 | bool retransmission = !!dfrag; |
57040755 | 696 | struct sk_buff *skb, *tail; |
6d0060f6 | 697 | struct page_frag *pfrag; |
3f8e0aae PA |
698 | struct page *page; |
699 | u64 *write_seq; | |
6d0060f6 MM |
700 | size_t psize; |
701 | ||
702 | /* use the mptcp page cache so that we can easily move the data | |
703 | * from one substream to another, but do per subflow memory accounting | |
3f8e0aae PA |
704 | * Note: pfrag is used only !retransmission, but the compiler if |
705 | * fooled into a warning if we don't init here | |
6d0060f6 MM |
706 | */ |
707 | pfrag = sk_page_frag(sk); | |
3f8e0aae PA |
708 | if (!retransmission) { |
709 | write_seq = &msk->write_seq; | |
710 | page = pfrag->page; | |
711 | } else { | |
712 | write_seq = &dfrag->data_seq; | |
713 | page = dfrag->page; | |
714 | } | |
6d0060f6 MM |
715 | |
716 | /* compute copy limit */ | |
717 | mss_now = tcp_send_mss(ssk, &size_goal, msg->msg_flags); | |
57040755 PA |
718 | *pmss_now = mss_now; |
719 | *ps_goal = size_goal; | |
720 | avail_size = size_goal; | |
721 | skb = tcp_write_queue_tail(ssk); | |
722 | if (skb) { | |
723 | mpext = skb_ext_find(skb, SKB_EXT_MPTCP); | |
724 | ||
725 | /* Limit the write to the size available in the | |
726 | * current skb, if any, so that we create at most a new skb. | |
727 | * Explicitly tells TCP internals to avoid collapsing on later | |
728 | * queue management operation, to avoid breaking the ext <-> | |
729 | * SSN association set here | |
730 | */ | |
731 | can_collapse = (size_goal - skb->len > 0) && | |
3f8e0aae | 732 | mptcp_skb_can_collapse_to(*write_seq, skb, mpext); |
57040755 PA |
733 | if (!can_collapse) |
734 | TCP_SKB_CB(skb)->eor = 1; | |
735 | else | |
736 | avail_size = size_goal - skb->len; | |
737 | } | |
18b683bf | 738 | |
3f8e0aae PA |
739 | if (!retransmission) { |
740 | /* reuse tail pfrag, if possible, or carve a new one from the | |
741 | * page allocator | |
742 | */ | |
743 | dfrag = mptcp_rtx_tail(sk); | |
744 | offset = pfrag->offset; | |
745 | dfrag_collapsed = mptcp_frag_can_collapse_to(msk, pfrag, dfrag); | |
746 | if (!dfrag_collapsed) { | |
747 | dfrag = mptcp_carve_data_frag(msk, pfrag, offset); | |
748 | offset = dfrag->offset; | |
749 | frag_truesize = dfrag->overhead; | |
750 | } | |
751 | psize = min_t(size_t, pfrag->size - offset, avail_size); | |
752 | ||
753 | /* Copy to page */ | |
754 | pr_debug("left=%zu", msg_data_left(msg)); | |
755 | psize = copy_page_from_iter(pfrag->page, offset, | |
756 | min_t(size_t, msg_data_left(msg), | |
757 | psize), | |
758 | &msg->msg_iter); | |
759 | pr_debug("left=%zu", msg_data_left(msg)); | |
760 | if (!psize) | |
761 | return -EINVAL; | |
762 | ||
35759383 FW |
763 | if (!sk_wmem_schedule(sk, psize + dfrag->overhead)) { |
764 | iov_iter_revert(&msg->msg_iter, psize); | |
3f8e0aae | 765 | return -ENOMEM; |
35759383 | 766 | } |
3f8e0aae | 767 | } else { |
18b683bf | 768 | offset = dfrag->offset; |
3f8e0aae | 769 | psize = min_t(size_t, dfrag->data_len, avail_size); |
18b683bf | 770 | } |
d027236c | 771 | |
57040755 PA |
772 | /* tell the TCP stack to delay the push so that we can safely |
773 | * access the skb after the sendpages call | |
6d0060f6 | 774 | */ |
3f8e0aae | 775 | ret = do_tcp_sendpages(ssk, page, offset, psize, |
72511aab | 776 | msg->msg_flags | MSG_SENDPAGE_NOTLAST | MSG_DONTWAIT); |
35759383 | 777 | if (ret <= 0) { |
b3b2854d FW |
778 | if (!retransmission) |
779 | iov_iter_revert(&msg->msg_iter, psize); | |
6d0060f6 | 780 | return ret; |
35759383 | 781 | } |
18b683bf PA |
782 | |
783 | frag_truesize += ret; | |
3f8e0aae PA |
784 | if (!retransmission) { |
785 | if (unlikely(ret < psize)) | |
786 | iov_iter_revert(&msg->msg_iter, psize - ret); | |
6d0060f6 | 787 | |
3f8e0aae PA |
788 | /* send successful, keep track of sent data for mptcp-level |
789 | * retransmission | |
790 | */ | |
791 | dfrag->data_len += ret; | |
792 | if (!dfrag_collapsed) { | |
793 | get_page(dfrag->page); | |
794 | list_add_tail(&dfrag->list, &msk->rtx_queue); | |
795 | sk_wmem_queued_add(sk, frag_truesize); | |
796 | } else { | |
797 | sk_wmem_queued_add(sk, ret); | |
798 | } | |
18b683bf | 799 | |
3f8e0aae PA |
800 | /* charge data on mptcp rtx queue to the master socket |
801 | * Note: we charge such data both to sk and ssk | |
802 | */ | |
803 | sk->sk_forward_alloc -= frag_truesize; | |
804 | } | |
d027236c | 805 | |
57040755 PA |
806 | /* if the tail skb extension is still the cached one, collapsing |
807 | * really happened. Note: we can't check for 'same skb' as the sk_buff | |
808 | * hdr on tail can be transmitted, freed and re-allocated by the | |
809 | * do_tcp_sendpages() call | |
810 | */ | |
811 | tail = tcp_write_queue_tail(ssk); | |
812 | if (mpext && tail && mpext == skb_ext_find(tail, SKB_EXT_MPTCP)) { | |
813 | WARN_ON_ONCE(!can_collapse); | |
814 | mpext->data_len += ret; | |
815 | goto out; | |
816 | } | |
817 | ||
6d0060f6 MM |
818 | skb = tcp_write_queue_tail(ssk); |
819 | mpext = __skb_ext_set(skb, SKB_EXT_MPTCP, msk->cached_ext); | |
820 | msk->cached_ext = NULL; | |
821 | ||
822 | memset(mpext, 0, sizeof(*mpext)); | |
3f8e0aae | 823 | mpext->data_seq = *write_seq; |
6d0060f6 MM |
824 | mpext->subflow_seq = mptcp_subflow_ctx(ssk)->rel_write_seq; |
825 | mpext->data_len = ret; | |
826 | mpext->use_map = 1; | |
827 | mpext->dsn64 = 1; | |
828 | ||
829 | pr_debug("data_seq=%llu subflow_seq=%u data_len=%u dsn64=%d", | |
830 | mpext->data_seq, mpext->subflow_seq, mpext->data_len, | |
831 | mpext->dsn64); | |
832 | ||
57040755 | 833 | out: |
3f8e0aae PA |
834 | if (!retransmission) |
835 | pfrag->offset += frag_truesize; | |
721e9089 | 836 | WRITE_ONCE(*write_seq, *write_seq + ret); |
6d0060f6 MM |
837 | mptcp_subflow_ctx(ssk)->rel_write_seq += ret; |
838 | ||
6d0060f6 MM |
839 | return ret; |
840 | } | |
841 | ||
63561a40 | 842 | static void mptcp_nospace(struct mptcp_sock *msk) |
a0e17064 | 843 | { |
63561a40 PA |
844 | struct mptcp_subflow_context *subflow; |
845 | ||
a0e17064 FW |
846 | clear_bit(MPTCP_SEND_SPACE, &msk->flags); |
847 | smp_mb__after_atomic(); /* msk->flags is changed by write_space cb */ | |
848 | ||
63561a40 PA |
849 | mptcp_for_each_subflow(msk, subflow) { |
850 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
851 | struct socket *sock = READ_ONCE(ssk->sk_socket); | |
852 | ||
853 | /* enables ssk->write_space() callbacks */ | |
854 | if (sock) | |
855 | set_bit(SOCK_NOSPACE, &sock->flags); | |
856 | } | |
a0e17064 FW |
857 | } |
858 | ||
da51aef5 PA |
859 | static struct sock *mptcp_subflow_get_send(struct mptcp_sock *msk, |
860 | u32 *sndbuf) | |
f296234c PK |
861 | { |
862 | struct mptcp_subflow_context *subflow; | |
63561a40 | 863 | struct sock *sk = (struct sock *)msk; |
f296234c | 864 | struct sock *backup = NULL; |
63561a40 | 865 | bool free; |
f296234c | 866 | |
63561a40 | 867 | sock_owned_by_me(sk); |
f296234c | 868 | |
da51aef5 | 869 | *sndbuf = 0; |
149f7c71 FW |
870 | if (!mptcp_ext_cache_refill(msk)) |
871 | return NULL; | |
872 | ||
f296234c PK |
873 | mptcp_for_each_subflow(msk, subflow) { |
874 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
875 | ||
63561a40 PA |
876 | free = sk_stream_is_writeable(subflow->tcp_sock); |
877 | if (!free) { | |
878 | mptcp_nospace(msk); | |
f296234c PK |
879 | return NULL; |
880 | } | |
881 | ||
da51aef5 | 882 | *sndbuf = max(tcp_sk(ssk)->snd_wnd, *sndbuf); |
f296234c PK |
883 | if (subflow->backup) { |
884 | if (!backup) | |
885 | backup = ssk; | |
886 | ||
887 | continue; | |
888 | } | |
889 | ||
890 | return ssk; | |
891 | } | |
892 | ||
893 | return backup; | |
894 | } | |
895 | ||
63561a40 | 896 | static void ssk_check_wmem(struct mptcp_sock *msk) |
1891c4a0 | 897 | { |
63561a40 PA |
898 | if (unlikely(!mptcp_is_writeable(msk))) |
899 | mptcp_nospace(msk); | |
1891c4a0 FW |
900 | } |
901 | ||
f870fa0b MM |
902 | static int mptcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len) |
903 | { | |
57040755 | 904 | int mss_now = 0, size_goal = 0, ret = 0; |
f870fa0b | 905 | struct mptcp_sock *msk = mptcp_sk(sk); |
17091708 | 906 | struct page_frag *pfrag; |
6d0060f6 | 907 | size_t copied = 0; |
cec37a6e | 908 | struct sock *ssk; |
da51aef5 | 909 | u32 sndbuf; |
72511aab | 910 | bool tx_ok; |
6d0060f6 | 911 | long timeo; |
f870fa0b MM |
912 | |
913 | if (msg->msg_flags & ~(MSG_MORE | MSG_DONTWAIT | MSG_NOSIGNAL)) | |
914 | return -EOPNOTSUPP; | |
915 | ||
cec37a6e | 916 | lock_sock(sk); |
1954b860 MM |
917 | |
918 | timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); | |
919 | ||
920 | if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) { | |
921 | ret = sk_stream_wait_connect(sk, &timeo); | |
922 | if (ret) | |
923 | goto out; | |
924 | } | |
925 | ||
17091708 | 926 | pfrag = sk_page_frag(sk); |
72511aab | 927 | restart: |
18b683bf PA |
928 | mptcp_clean_una(sk); |
929 | ||
57baaf28 MM |
930 | if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN)) { |
931 | ret = -EPIPE; | |
932 | goto out; | |
933 | } | |
934 | ||
ec3edaa7 | 935 | __mptcp_flush_join_list(msk); |
da51aef5 | 936 | ssk = mptcp_subflow_get_send(msk, &sndbuf); |
17091708 FW |
937 | while (!sk_stream_memory_free(sk) || |
938 | !ssk || | |
939 | !mptcp_page_frag_refill(ssk, pfrag)) { | |
fb529e62 FW |
940 | if (ssk) { |
941 | /* make sure retransmit timer is | |
942 | * running before we wait for memory. | |
943 | * | |
944 | * The retransmit timer might be needed | |
945 | * to make the peer send an up-to-date | |
946 | * MPTCP Ack. | |
947 | */ | |
948 | mptcp_set_timeout(sk, ssk); | |
949 | if (!mptcp_timer_pending(sk)) | |
950 | mptcp_reset_timer(sk); | |
951 | } | |
952 | ||
63561a40 | 953 | mptcp_nospace(msk); |
f296234c PK |
954 | ret = sk_stream_wait_memory(sk, &timeo); |
955 | if (ret) | |
956 | goto out; | |
957 | ||
18b683bf PA |
958 | mptcp_clean_una(sk); |
959 | ||
da51aef5 | 960 | ssk = mptcp_subflow_get_send(msk, &sndbuf); |
f296234c PK |
961 | if (list_empty(&msk->conn_list)) { |
962 | ret = -ENOTCONN; | |
963 | goto out; | |
964 | } | |
cec37a6e PK |
965 | } |
966 | ||
da51aef5 PA |
967 | /* do auto tuning */ |
968 | if (!(sk->sk_userlocks & SOCK_SNDBUF_LOCK) && | |
969 | sndbuf > READ_ONCE(sk->sk_sndbuf)) | |
970 | WRITE_ONCE(sk->sk_sndbuf, sndbuf); | |
971 | ||
6d0060f6 | 972 | pr_debug("conn_list->subflow=%p", ssk); |
cec37a6e | 973 | |
6d0060f6 | 974 | lock_sock(ssk); |
72511aab FW |
975 | tx_ok = msg_data_left(msg); |
976 | while (tx_ok) { | |
3f8e0aae | 977 | ret = mptcp_sendmsg_frag(sk, ssk, msg, NULL, &timeo, &mss_now, |
57040755 | 978 | &size_goal); |
72511aab FW |
979 | if (ret < 0) { |
980 | if (ret == -EAGAIN && timeo > 0) { | |
981 | mptcp_set_timeout(sk, ssk); | |
982 | release_sock(ssk); | |
983 | goto restart; | |
984 | } | |
6d0060f6 | 985 | break; |
72511aab | 986 | } |
6d0060f6 MM |
987 | |
988 | copied += ret; | |
fb529e62 | 989 | |
72511aab FW |
990 | tx_ok = msg_data_left(msg); |
991 | if (!tx_ok) | |
992 | break; | |
993 | ||
149f7c71 | 994 | if (!sk_stream_memory_free(ssk) || |
17091708 | 995 | !mptcp_page_frag_refill(ssk, pfrag) || |
149f7c71 | 996 | !mptcp_ext_cache_refill(msk)) { |
72511aab FW |
997 | tcp_push(ssk, msg->msg_flags, mss_now, |
998 | tcp_sk(ssk)->nonagle, size_goal); | |
999 | mptcp_set_timeout(sk, ssk); | |
1000 | release_sock(ssk); | |
1001 | goto restart; | |
1002 | } | |
1003 | ||
fb529e62 FW |
1004 | /* memory is charged to mptcp level socket as well, i.e. |
1005 | * if msg is very large, mptcp socket may run out of buffer | |
1006 | * space. mptcp_clean_una() will release data that has | |
1007 | * been acked at mptcp level in the mean time, so there is | |
1008 | * a good chance we can continue sending data right away. | |
72511aab FW |
1009 | * |
1010 | * Normally, when the tcp subflow can accept more data, then | |
1011 | * so can the MPTCP socket. However, we need to cope with | |
1012 | * peers that might lag behind in their MPTCP-level | |
1013 | * acknowledgements, i.e. data might have been acked at | |
1014 | * tcp level only. So, we must also check the MPTCP socket | |
1015 | * limits before we send more data. | |
fb529e62 FW |
1016 | */ |
1017 | if (unlikely(!sk_stream_memory_free(sk))) { | |
1018 | tcp_push(ssk, msg->msg_flags, mss_now, | |
1019 | tcp_sk(ssk)->nonagle, size_goal); | |
1020 | mptcp_clean_una(sk); | |
1021 | if (!sk_stream_memory_free(sk)) { | |
1022 | /* can't send more for now, need to wait for | |
1023 | * MPTCP-level ACKs from peer. | |
1024 | * | |
1025 | * Wakeup will happen via mptcp_clean_una(). | |
1026 | */ | |
1027 | mptcp_set_timeout(sk, ssk); | |
1028 | release_sock(ssk); | |
1cec170d | 1029 | goto restart; |
fb529e62 FW |
1030 | } |
1031 | } | |
6d0060f6 MM |
1032 | } |
1033 | ||
b51f9b80 | 1034 | mptcp_set_timeout(sk, ssk); |
57040755 | 1035 | if (copied) { |
57040755 PA |
1036 | tcp_push(ssk, msg->msg_flags, mss_now, tcp_sk(ssk)->nonagle, |
1037 | size_goal); | |
b51f9b80 PA |
1038 | |
1039 | /* start the timer, if it's not pending */ | |
1040 | if (!mptcp_timer_pending(sk)) | |
1041 | mptcp_reset_timer(sk); | |
57040755 | 1042 | } |
6d0060f6 MM |
1043 | |
1044 | release_sock(ssk); | |
1954b860 | 1045 | out: |
63561a40 | 1046 | ssk_check_wmem(msk); |
cec37a6e | 1047 | release_sock(sk); |
8555c6bf | 1048 | return copied ? : ret; |
f870fa0b MM |
1049 | } |
1050 | ||
7a6a6cbc PA |
1051 | static void mptcp_wait_data(struct sock *sk, long *timeo) |
1052 | { | |
1053 | DEFINE_WAIT_FUNC(wait, woken_wake_function); | |
1054 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1055 | ||
1056 | add_wait_queue(sk_sleep(sk), &wait); | |
1057 | sk_set_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
1058 | ||
1059 | sk_wait_event(sk, timeo, | |
1060 | test_and_clear_bit(MPTCP_DATA_READY, &msk->flags), &wait); | |
1061 | ||
1062 | sk_clear_bit(SOCKWQ_ASYNC_WAITDATA, sk); | |
1063 | remove_wait_queue(sk_sleep(sk), &wait); | |
1064 | } | |
1065 | ||
6771bfd9 FW |
1066 | static int __mptcp_recvmsg_mskq(struct mptcp_sock *msk, |
1067 | struct msghdr *msg, | |
1068 | size_t len) | |
1069 | { | |
1070 | struct sock *sk = (struct sock *)msk; | |
1071 | struct sk_buff *skb; | |
1072 | int copied = 0; | |
1073 | ||
1074 | while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) { | |
1075 | u32 offset = MPTCP_SKB_CB(skb)->offset; | |
1076 | u32 data_len = skb->len - offset; | |
1077 | u32 count = min_t(size_t, len - copied, data_len); | |
1078 | int err; | |
1079 | ||
1080 | err = skb_copy_datagram_msg(skb, offset, msg, count); | |
1081 | if (unlikely(err < 0)) { | |
1082 | if (!copied) | |
1083 | return err; | |
1084 | break; | |
1085 | } | |
1086 | ||
1087 | copied += count; | |
1088 | ||
1089 | if (count < data_len) { | |
1090 | MPTCP_SKB_CB(skb)->offset += count; | |
1091 | break; | |
1092 | } | |
1093 | ||
1094 | __skb_unlink(skb, &sk->sk_receive_queue); | |
1095 | __kfree_skb(skb); | |
1096 | ||
1097 | if (copied >= len) | |
1098 | break; | |
1099 | } | |
1100 | ||
1101 | return copied; | |
1102 | } | |
1103 | ||
a6b118fe FW |
1104 | /* receive buffer autotuning. See tcp_rcv_space_adjust for more information. |
1105 | * | |
1106 | * Only difference: Use highest rtt estimate of the subflows in use. | |
1107 | */ | |
1108 | static void mptcp_rcv_space_adjust(struct mptcp_sock *msk, int copied) | |
1109 | { | |
1110 | struct mptcp_subflow_context *subflow; | |
1111 | struct sock *sk = (struct sock *)msk; | |
1112 | u32 time, advmss = 1; | |
1113 | u64 rtt_us, mstamp; | |
1114 | ||
1115 | sock_owned_by_me(sk); | |
1116 | ||
1117 | if (copied <= 0) | |
1118 | return; | |
1119 | ||
1120 | msk->rcvq_space.copied += copied; | |
1121 | ||
1122 | mstamp = div_u64(tcp_clock_ns(), NSEC_PER_USEC); | |
1123 | time = tcp_stamp_us_delta(mstamp, msk->rcvq_space.time); | |
1124 | ||
1125 | rtt_us = msk->rcvq_space.rtt_us; | |
1126 | if (rtt_us && time < (rtt_us >> 3)) | |
1127 | return; | |
1128 | ||
1129 | rtt_us = 0; | |
1130 | mptcp_for_each_subflow(msk, subflow) { | |
1131 | const struct tcp_sock *tp; | |
1132 | u64 sf_rtt_us; | |
1133 | u32 sf_advmss; | |
1134 | ||
1135 | tp = tcp_sk(mptcp_subflow_tcp_sock(subflow)); | |
1136 | ||
1137 | sf_rtt_us = READ_ONCE(tp->rcv_rtt_est.rtt_us); | |
1138 | sf_advmss = READ_ONCE(tp->advmss); | |
1139 | ||
1140 | rtt_us = max(sf_rtt_us, rtt_us); | |
1141 | advmss = max(sf_advmss, advmss); | |
1142 | } | |
1143 | ||
1144 | msk->rcvq_space.rtt_us = rtt_us; | |
1145 | if (time < (rtt_us >> 3) || rtt_us == 0) | |
1146 | return; | |
1147 | ||
1148 | if (msk->rcvq_space.copied <= msk->rcvq_space.space) | |
1149 | goto new_measure; | |
1150 | ||
1151 | if (sock_net(sk)->ipv4.sysctl_tcp_moderate_rcvbuf && | |
1152 | !(sk->sk_userlocks & SOCK_RCVBUF_LOCK)) { | |
1153 | int rcvmem, rcvbuf; | |
1154 | u64 rcvwin, grow; | |
1155 | ||
1156 | rcvwin = ((u64)msk->rcvq_space.copied << 1) + 16 * advmss; | |
1157 | ||
1158 | grow = rcvwin * (msk->rcvq_space.copied - msk->rcvq_space.space); | |
1159 | ||
1160 | do_div(grow, msk->rcvq_space.space); | |
1161 | rcvwin += (grow << 1); | |
1162 | ||
1163 | rcvmem = SKB_TRUESIZE(advmss + MAX_TCP_HEADER); | |
1164 | while (tcp_win_from_space(sk, rcvmem) < advmss) | |
1165 | rcvmem += 128; | |
1166 | ||
1167 | do_div(rcvwin, advmss); | |
1168 | rcvbuf = min_t(u64, rcvwin * rcvmem, | |
1169 | sock_net(sk)->ipv4.sysctl_tcp_rmem[2]); | |
1170 | ||
1171 | if (rcvbuf > sk->sk_rcvbuf) { | |
1172 | u32 window_clamp; | |
1173 | ||
1174 | window_clamp = tcp_win_from_space(sk, rcvbuf); | |
1175 | WRITE_ONCE(sk->sk_rcvbuf, rcvbuf); | |
1176 | ||
1177 | /* Make subflows follow along. If we do not do this, we | |
1178 | * get drops at subflow level if skbs can't be moved to | |
1179 | * the mptcp rx queue fast enough (announced rcv_win can | |
1180 | * exceed ssk->sk_rcvbuf). | |
1181 | */ | |
1182 | mptcp_for_each_subflow(msk, subflow) { | |
1183 | struct sock *ssk; | |
1184 | ||
1185 | ssk = mptcp_subflow_tcp_sock(subflow); | |
1186 | WRITE_ONCE(ssk->sk_rcvbuf, rcvbuf); | |
1187 | tcp_sk(ssk)->window_clamp = window_clamp; | |
1188 | } | |
1189 | } | |
1190 | } | |
1191 | ||
1192 | msk->rcvq_space.space = msk->rcvq_space.copied; | |
1193 | new_measure: | |
1194 | msk->rcvq_space.copied = 0; | |
1195 | msk->rcvq_space.time = mstamp; | |
1196 | } | |
1197 | ||
6771bfd9 FW |
1198 | static bool __mptcp_move_skbs(struct mptcp_sock *msk) |
1199 | { | |
1200 | unsigned int moved = 0; | |
1201 | bool done; | |
1202 | ||
1203 | do { | |
1204 | struct sock *ssk = mptcp_subflow_recv_lookup(msk); | |
1205 | ||
1206 | if (!ssk) | |
1207 | break; | |
1208 | ||
1209 | lock_sock(ssk); | |
1210 | done = __mptcp_move_skbs_from_subflow(msk, ssk, &moved); | |
1211 | release_sock(ssk); | |
1212 | } while (!done); | |
1213 | ||
1214 | return moved > 0; | |
1215 | } | |
1216 | ||
f870fa0b MM |
1217 | static int mptcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, |
1218 | int nonblock, int flags, int *addr_len) | |
1219 | { | |
1220 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 1221 | int copied = 0; |
7a6a6cbc PA |
1222 | int target; |
1223 | long timeo; | |
f870fa0b MM |
1224 | |
1225 | if (msg->msg_flags & ~(MSG_WAITALL | MSG_DONTWAIT)) | |
1226 | return -EOPNOTSUPP; | |
1227 | ||
cec37a6e | 1228 | lock_sock(sk); |
7a6a6cbc PA |
1229 | timeo = sock_rcvtimeo(sk, nonblock); |
1230 | ||
1231 | len = min_t(size_t, len, INT_MAX); | |
1232 | target = sock_rcvlowat(sk, flags & MSG_WAITALL, len); | |
ec3edaa7 | 1233 | __mptcp_flush_join_list(msk); |
7a6a6cbc | 1234 | |
6771bfd9 | 1235 | while (len > (size_t)copied) { |
7a6a6cbc PA |
1236 | int bytes_read; |
1237 | ||
6771bfd9 FW |
1238 | bytes_read = __mptcp_recvmsg_mskq(msk, msg, len - copied); |
1239 | if (unlikely(bytes_read < 0)) { | |
1240 | if (!copied) | |
1241 | copied = bytes_read; | |
1242 | goto out_err; | |
1243 | } | |
7a6a6cbc | 1244 | |
6771bfd9 | 1245 | copied += bytes_read; |
7a6a6cbc | 1246 | |
6771bfd9 FW |
1247 | if (skb_queue_empty(&sk->sk_receive_queue) && |
1248 | __mptcp_move_skbs(msk)) | |
1249 | continue; | |
7a6a6cbc PA |
1250 | |
1251 | /* only the master socket status is relevant here. The exit | |
1252 | * conditions mirror closely tcp_recvmsg() | |
1253 | */ | |
1254 | if (copied >= target) | |
1255 | break; | |
1256 | ||
1257 | if (copied) { | |
1258 | if (sk->sk_err || | |
1259 | sk->sk_state == TCP_CLOSE || | |
1260 | (sk->sk_shutdown & RCV_SHUTDOWN) || | |
1261 | !timeo || | |
1262 | signal_pending(current)) | |
1263 | break; | |
1264 | } else { | |
1265 | if (sk->sk_err) { | |
1266 | copied = sock_error(sk); | |
1267 | break; | |
1268 | } | |
1269 | ||
5969856a PA |
1270 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
1271 | mptcp_check_for_eof(msk); | |
1272 | ||
7a6a6cbc PA |
1273 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
1274 | break; | |
1275 | ||
1276 | if (sk->sk_state == TCP_CLOSE) { | |
1277 | copied = -ENOTCONN; | |
1278 | break; | |
1279 | } | |
1280 | ||
1281 | if (!timeo) { | |
1282 | copied = -EAGAIN; | |
1283 | break; | |
1284 | } | |
1285 | ||
1286 | if (signal_pending(current)) { | |
1287 | copied = sock_intr_errno(timeo); | |
1288 | break; | |
1289 | } | |
1290 | } | |
1291 | ||
1292 | pr_debug("block timeout %ld", timeo); | |
7a6a6cbc | 1293 | mptcp_wait_data(sk, &timeo); |
cec37a6e PK |
1294 | } |
1295 | ||
6771bfd9 FW |
1296 | if (skb_queue_empty(&sk->sk_receive_queue)) { |
1297 | /* entire backlog drained, clear DATA_READY. */ | |
7a6a6cbc | 1298 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cec37a6e | 1299 | |
6771bfd9 FW |
1300 | /* .. race-breaker: ssk might have gotten new data |
1301 | * after last __mptcp_move_skbs() returned false. | |
7a6a6cbc | 1302 | */ |
6771bfd9 | 1303 | if (unlikely(__mptcp_move_skbs(msk))) |
7a6a6cbc | 1304 | set_bit(MPTCP_DATA_READY, &msk->flags); |
6771bfd9 FW |
1305 | } else if (unlikely(!test_bit(MPTCP_DATA_READY, &msk->flags))) { |
1306 | /* data to read but mptcp_wait_data() cleared DATA_READY */ | |
1307 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
7a6a6cbc | 1308 | } |
6771bfd9 | 1309 | out_err: |
6719331c PA |
1310 | pr_debug("msk=%p data_ready=%d rx queue empty=%d copied=%d", |
1311 | msk, test_bit(MPTCP_DATA_READY, &msk->flags), | |
1312 | skb_queue_empty(&sk->sk_receive_queue), copied); | |
a6b118fe FW |
1313 | mptcp_rcv_space_adjust(msk, copied); |
1314 | ||
7a6a6cbc | 1315 | release_sock(sk); |
cec37a6e PK |
1316 | return copied; |
1317 | } | |
1318 | ||
b51f9b80 PA |
1319 | static void mptcp_retransmit_handler(struct sock *sk) |
1320 | { | |
1321 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1322 | ||
c7529392 | 1323 | if (atomic64_read(&msk->snd_una) == READ_ONCE(msk->write_seq)) { |
b51f9b80 | 1324 | mptcp_stop_timer(sk); |
3b1d6210 PA |
1325 | } else { |
1326 | set_bit(MPTCP_WORK_RTX, &msk->flags); | |
1327 | if (schedule_work(&msk->work)) | |
1328 | sock_hold(sk); | |
1329 | } | |
b51f9b80 PA |
1330 | } |
1331 | ||
1332 | static void mptcp_retransmit_timer(struct timer_list *t) | |
1333 | { | |
1334 | struct inet_connection_sock *icsk = from_timer(icsk, t, | |
1335 | icsk_retransmit_timer); | |
1336 | struct sock *sk = &icsk->icsk_inet.sk; | |
1337 | ||
1338 | bh_lock_sock(sk); | |
1339 | if (!sock_owned_by_user(sk)) { | |
1340 | mptcp_retransmit_handler(sk); | |
1341 | } else { | |
1342 | /* delegate our work to tcp_release_cb() */ | |
1343 | if (!test_and_set_bit(TCP_WRITE_TIMER_DEFERRED, | |
1344 | &sk->sk_tsq_flags)) | |
1345 | sock_hold(sk); | |
1346 | } | |
1347 | bh_unlock_sock(sk); | |
1348 | sock_put(sk); | |
1349 | } | |
1350 | ||
3b1d6210 PA |
1351 | /* Find an idle subflow. Return NULL if there is unacked data at tcp |
1352 | * level. | |
1353 | * | |
1354 | * A backup subflow is returned only if that is the only kind available. | |
1355 | */ | |
1356 | static struct sock *mptcp_subflow_get_retrans(const struct mptcp_sock *msk) | |
1357 | { | |
1358 | struct mptcp_subflow_context *subflow; | |
1359 | struct sock *backup = NULL; | |
1360 | ||
1361 | sock_owned_by_me((const struct sock *)msk); | |
1362 | ||
1363 | mptcp_for_each_subflow(msk, subflow) { | |
1364 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); | |
1365 | ||
1366 | /* still data outstanding at TCP level? Don't retransmit. */ | |
1367 | if (!tcp_write_queue_empty(ssk)) | |
1368 | return NULL; | |
1369 | ||
1370 | if (subflow->backup) { | |
1371 | if (!backup) | |
1372 | backup = ssk; | |
1373 | continue; | |
1374 | } | |
1375 | ||
1376 | return ssk; | |
1377 | } | |
1378 | ||
1379 | return backup; | |
1380 | } | |
1381 | ||
cec37a6e PK |
1382 | /* subflow sockets can be either outgoing (connect) or incoming |
1383 | * (accept). | |
1384 | * | |
1385 | * Outgoing subflows use in-kernel sockets. | |
1386 | * Incoming subflows do not have their own 'struct socket' allocated, | |
1387 | * so we need to use tcp_close() after detaching them from the mptcp | |
1388 | * parent socket. | |
1389 | */ | |
1390 | static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk, | |
1391 | struct mptcp_subflow_context *subflow, | |
1392 | long timeout) | |
1393 | { | |
1394 | struct socket *sock = READ_ONCE(ssk->sk_socket); | |
1395 | ||
1396 | list_del(&subflow->node); | |
1397 | ||
1398 | if (sock && sock != sk->sk_socket) { | |
1399 | /* outgoing subflow */ | |
1400 | sock_release(sock); | |
1401 | } else { | |
1402 | /* incoming subflow */ | |
1403 | tcp_close(ssk, timeout); | |
1404 | } | |
f870fa0b MM |
1405 | } |
1406 | ||
dc24f8b4 PA |
1407 | static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu) |
1408 | { | |
1409 | return 0; | |
1410 | } | |
1411 | ||
b416268b FW |
1412 | static void pm_work(struct mptcp_sock *msk) |
1413 | { | |
1414 | struct mptcp_pm_data *pm = &msk->pm; | |
1415 | ||
1416 | spin_lock_bh(&msk->pm.lock); | |
1417 | ||
1418 | pr_debug("msk=%p status=%x", msk, pm->status); | |
1419 | if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
1420 | pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); | |
1421 | mptcp_pm_nl_add_addr_received(msk); | |
1422 | } | |
1423 | if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { | |
1424 | pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); | |
1425 | mptcp_pm_nl_fully_established(msk); | |
1426 | } | |
1427 | if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { | |
1428 | pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); | |
1429 | mptcp_pm_nl_subflow_established(msk); | |
1430 | } | |
1431 | ||
1432 | spin_unlock_bh(&msk->pm.lock); | |
1433 | } | |
1434 | ||
80992017 PA |
1435 | static void mptcp_worker(struct work_struct *work) |
1436 | { | |
1437 | struct mptcp_sock *msk = container_of(work, struct mptcp_sock, work); | |
3b1d6210 | 1438 | struct sock *ssk, *sk = &msk->sk.icsk_inet.sk; |
149f7c71 | 1439 | int orig_len, orig_offset, mss_now = 0, size_goal = 0; |
3b1d6210 PA |
1440 | struct mptcp_data_frag *dfrag; |
1441 | u64 orig_write_seq; | |
1442 | size_t copied = 0; | |
b3b2854d FW |
1443 | struct msghdr msg = { |
1444 | .msg_flags = MSG_DONTWAIT, | |
1445 | }; | |
3b1d6210 | 1446 | long timeo = 0; |
80992017 PA |
1447 | |
1448 | lock_sock(sk); | |
3b1d6210 | 1449 | mptcp_clean_una(sk); |
43b54c6e | 1450 | mptcp_check_data_fin_ack(sk); |
ec3edaa7 | 1451 | __mptcp_flush_join_list(msk); |
6771bfd9 | 1452 | __mptcp_move_skbs(msk); |
3b1d6210 | 1453 | |
b416268b FW |
1454 | if (msk->pm.status) |
1455 | pm_work(msk); | |
1456 | ||
59832e24 FW |
1457 | if (test_and_clear_bit(MPTCP_WORK_EOF, &msk->flags)) |
1458 | mptcp_check_for_eof(msk); | |
1459 | ||
43b54c6e MM |
1460 | mptcp_check_data_fin(sk); |
1461 | ||
3b1d6210 PA |
1462 | if (!test_and_clear_bit(MPTCP_WORK_RTX, &msk->flags)) |
1463 | goto unlock; | |
1464 | ||
1465 | dfrag = mptcp_rtx_head(sk); | |
1466 | if (!dfrag) | |
1467 | goto unlock; | |
1468 | ||
149f7c71 FW |
1469 | if (!mptcp_ext_cache_refill(msk)) |
1470 | goto reset_unlock; | |
1471 | ||
3b1d6210 PA |
1472 | ssk = mptcp_subflow_get_retrans(msk); |
1473 | if (!ssk) | |
1474 | goto reset_unlock; | |
1475 | ||
1476 | lock_sock(ssk); | |
1477 | ||
3b1d6210 PA |
1478 | orig_len = dfrag->data_len; |
1479 | orig_offset = dfrag->offset; | |
1480 | orig_write_seq = dfrag->data_seq; | |
1481 | while (dfrag->data_len > 0) { | |
149f7c71 FW |
1482 | int ret = mptcp_sendmsg_frag(sk, ssk, &msg, dfrag, &timeo, |
1483 | &mss_now, &size_goal); | |
3b1d6210 PA |
1484 | if (ret < 0) |
1485 | break; | |
1486 | ||
fc518953 | 1487 | MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_RETRANSSEGS); |
3b1d6210 PA |
1488 | copied += ret; |
1489 | dfrag->data_len -= ret; | |
1490 | dfrag->offset += ret; | |
149f7c71 FW |
1491 | |
1492 | if (!mptcp_ext_cache_refill(msk)) | |
1493 | break; | |
3b1d6210 PA |
1494 | } |
1495 | if (copied) | |
1496 | tcp_push(ssk, msg.msg_flags, mss_now, tcp_sk(ssk)->nonagle, | |
1497 | size_goal); | |
1498 | ||
1499 | dfrag->data_seq = orig_write_seq; | |
1500 | dfrag->offset = orig_offset; | |
1501 | dfrag->data_len = orig_len; | |
1502 | ||
1503 | mptcp_set_timeout(sk, ssk); | |
1504 | release_sock(ssk); | |
1505 | ||
1506 | reset_unlock: | |
1507 | if (!mptcp_timer_pending(sk)) | |
1508 | mptcp_reset_timer(sk); | |
1509 | ||
1510 | unlock: | |
80992017 PA |
1511 | release_sock(sk); |
1512 | sock_put(sk); | |
1513 | } | |
1514 | ||
784325e9 | 1515 | static int __mptcp_init_sock(struct sock *sk) |
f870fa0b | 1516 | { |
cec37a6e PK |
1517 | struct mptcp_sock *msk = mptcp_sk(sk); |
1518 | ||
ec3edaa7 PK |
1519 | spin_lock_init(&msk->join_list_lock); |
1520 | ||
cec37a6e | 1521 | INIT_LIST_HEAD(&msk->conn_list); |
ec3edaa7 | 1522 | INIT_LIST_HEAD(&msk->join_list); |
18b683bf | 1523 | INIT_LIST_HEAD(&msk->rtx_queue); |
1891c4a0 | 1524 | __set_bit(MPTCP_SEND_SPACE, &msk->flags); |
80992017 | 1525 | INIT_WORK(&msk->work, mptcp_worker); |
cec37a6e | 1526 | |
8ab183de | 1527 | msk->first = NULL; |
dc24f8b4 | 1528 | inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss; |
8ab183de | 1529 | |
1b1c7a0e PK |
1530 | mptcp_pm_data_init(msk); |
1531 | ||
b51f9b80 PA |
1532 | /* re-use the csk retrans timer for MPTCP-level retrans */ |
1533 | timer_setup(&msk->sk.icsk_retransmit_timer, mptcp_retransmit_timer, 0); | |
1534 | ||
f870fa0b MM |
1535 | return 0; |
1536 | } | |
1537 | ||
784325e9 MB |
1538 | static int mptcp_init_sock(struct sock *sk) |
1539 | { | |
fc518953 FW |
1540 | struct net *net = sock_net(sk); |
1541 | int ret; | |
18b683bf | 1542 | |
fc518953 FW |
1543 | if (!mptcp_is_enabled(net)) |
1544 | return -ENOPROTOOPT; | |
1545 | ||
1546 | if (unlikely(!net->mib.mptcp_statistics) && !mptcp_mib_alloc(net)) | |
1547 | return -ENOMEM; | |
1548 | ||
1549 | ret = __mptcp_init_sock(sk); | |
18b683bf PA |
1550 | if (ret) |
1551 | return ret; | |
1552 | ||
fa68018d PA |
1553 | ret = __mptcp_socket_create(mptcp_sk(sk)); |
1554 | if (ret) | |
1555 | return ret; | |
1556 | ||
d027236c | 1557 | sk_sockets_allocated_inc(sk); |
a6b118fe | 1558 | sk->sk_rcvbuf = sock_net(sk)->ipv4.sysctl_tcp_rmem[1]; |
da51aef5 | 1559 | sk->sk_sndbuf = sock_net(sk)->ipv4.sysctl_tcp_wmem[1]; |
d027236c | 1560 | |
18b683bf PA |
1561 | return 0; |
1562 | } | |
1563 | ||
1564 | static void __mptcp_clear_xmit(struct sock *sk) | |
1565 | { | |
1566 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1567 | struct mptcp_data_frag *dtmp, *dfrag; | |
1568 | ||
b51f9b80 PA |
1569 | sk_stop_timer(sk, &msk->sk.icsk_retransmit_timer); |
1570 | ||
18b683bf | 1571 | list_for_each_entry_safe(dfrag, dtmp, &msk->rtx_queue, list) |
d027236c | 1572 | dfrag_clear(sk, dfrag); |
784325e9 MB |
1573 | } |
1574 | ||
80992017 PA |
1575 | static void mptcp_cancel_work(struct sock *sk) |
1576 | { | |
1577 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1578 | ||
1579 | if (cancel_work_sync(&msk->work)) | |
1580 | sock_put(sk); | |
1581 | } | |
1582 | ||
43b54c6e | 1583 | static void mptcp_subflow_shutdown(struct sock *sk, struct sock *ssk, int how) |
21498490 PK |
1584 | { |
1585 | lock_sock(ssk); | |
1586 | ||
1587 | switch (ssk->sk_state) { | |
1588 | case TCP_LISTEN: | |
1589 | if (!(how & RCV_SHUTDOWN)) | |
1590 | break; | |
df561f66 | 1591 | fallthrough; |
21498490 PK |
1592 | case TCP_SYN_SENT: |
1593 | tcp_disconnect(ssk, O_NONBLOCK); | |
1594 | break; | |
1595 | default: | |
43b54c6e MM |
1596 | if (__mptcp_check_fallback(mptcp_sk(sk))) { |
1597 | pr_debug("Fallback"); | |
1598 | ssk->sk_shutdown |= how; | |
1599 | tcp_shutdown(ssk, how); | |
1600 | } else { | |
1601 | pr_debug("Sending DATA_FIN on subflow %p", ssk); | |
1602 | mptcp_set_timeout(sk, ssk); | |
1603 | tcp_send_ack(ssk); | |
1604 | } | |
21498490 PK |
1605 | break; |
1606 | } | |
1607 | ||
21498490 PK |
1608 | release_sock(ssk); |
1609 | } | |
1610 | ||
6920b851 MM |
1611 | static const unsigned char new_state[16] = { |
1612 | /* current state: new state: action: */ | |
1613 | [0 /* (Invalid) */] = TCP_CLOSE, | |
1614 | [TCP_ESTABLISHED] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1615 | [TCP_SYN_SENT] = TCP_CLOSE, | |
1616 | [TCP_SYN_RECV] = TCP_FIN_WAIT1 | TCP_ACTION_FIN, | |
1617 | [TCP_FIN_WAIT1] = TCP_FIN_WAIT1, | |
1618 | [TCP_FIN_WAIT2] = TCP_FIN_WAIT2, | |
1619 | [TCP_TIME_WAIT] = TCP_CLOSE, /* should not happen ! */ | |
1620 | [TCP_CLOSE] = TCP_CLOSE, | |
1621 | [TCP_CLOSE_WAIT] = TCP_LAST_ACK | TCP_ACTION_FIN, | |
1622 | [TCP_LAST_ACK] = TCP_LAST_ACK, | |
1623 | [TCP_LISTEN] = TCP_CLOSE, | |
1624 | [TCP_CLOSING] = TCP_CLOSING, | |
1625 | [TCP_NEW_SYN_RECV] = TCP_CLOSE, /* should not happen ! */ | |
1626 | }; | |
1627 | ||
1628 | static int mptcp_close_state(struct sock *sk) | |
1629 | { | |
1630 | int next = (int)new_state[sk->sk_state]; | |
1631 | int ns = next & TCP_STATE_MASK; | |
1632 | ||
1633 | inet_sk_state_store(sk, ns); | |
1634 | ||
1635 | return next & TCP_ACTION_FIN; | |
1636 | } | |
1637 | ||
2c22c06c | 1638 | static void mptcp_close(struct sock *sk, long timeout) |
f870fa0b | 1639 | { |
cec37a6e | 1640 | struct mptcp_subflow_context *subflow, *tmp; |
f870fa0b | 1641 | struct mptcp_sock *msk = mptcp_sk(sk); |
b2c5b614 | 1642 | LIST_HEAD(conn_list); |
f870fa0b | 1643 | |
2c22c06c | 1644 | lock_sock(sk); |
43b54c6e MM |
1645 | sk->sk_shutdown = SHUTDOWN_MASK; |
1646 | ||
1647 | if (sk->sk_state == TCP_LISTEN) { | |
1648 | inet_sk_state_store(sk, TCP_CLOSE); | |
1649 | goto cleanup; | |
1650 | } else if (sk->sk_state == TCP_CLOSE) { | |
1651 | goto cleanup; | |
1652 | } | |
1653 | ||
1654 | if (__mptcp_check_fallback(msk)) { | |
1655 | goto update_state; | |
1656 | } else if (mptcp_close_state(sk)) { | |
1657 | pr_debug("Sending DATA_FIN sk=%p", sk); | |
1658 | WRITE_ONCE(msk->write_seq, msk->write_seq + 1); | |
1659 | WRITE_ONCE(msk->snd_data_fin_enable, 1); | |
1660 | ||
1661 | mptcp_for_each_subflow(msk, subflow) { | |
1662 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
1663 | ||
1664 | mptcp_subflow_shutdown(sk, tcp_sk, SHUTDOWN_MASK); | |
1665 | } | |
1666 | } | |
2c22c06c | 1667 | |
43b54c6e MM |
1668 | sk_stream_wait_close(sk, timeout); |
1669 | ||
1670 | update_state: | |
f870fa0b MM |
1671 | inet_sk_state_store(sk, TCP_CLOSE); |
1672 | ||
43b54c6e | 1673 | cleanup: |
10f6d46c PA |
1674 | /* be sure to always acquire the join list lock, to sync vs |
1675 | * mptcp_finish_join(). | |
1676 | */ | |
1677 | spin_lock_bh(&msk->join_list_lock); | |
1678 | list_splice_tail_init(&msk->join_list, &msk->conn_list); | |
1679 | spin_unlock_bh(&msk->join_list_lock); | |
b2c5b614 FW |
1680 | list_splice_init(&msk->conn_list, &conn_list); |
1681 | ||
18b683bf PA |
1682 | __mptcp_clear_xmit(sk); |
1683 | ||
b2c5b614 FW |
1684 | release_sock(sk); |
1685 | ||
1686 | list_for_each_entry_safe(subflow, tmp, &conn_list, node) { | |
cec37a6e | 1687 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
cec37a6e | 1688 | __mptcp_close_ssk(sk, ssk, subflow, timeout); |
f870fa0b MM |
1689 | } |
1690 | ||
80992017 PA |
1691 | mptcp_cancel_work(sk); |
1692 | ||
6771bfd9 FW |
1693 | __skb_queue_purge(&sk->sk_receive_queue); |
1694 | ||
cec37a6e | 1695 | sk_common_release(sk); |
f870fa0b MM |
1696 | } |
1697 | ||
cf7da0d6 PK |
1698 | static void mptcp_copy_inaddrs(struct sock *msk, const struct sock *ssk) |
1699 | { | |
1700 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1701 | const struct ipv6_pinfo *ssk6 = inet6_sk(ssk); | |
1702 | struct ipv6_pinfo *msk6 = inet6_sk(msk); | |
1703 | ||
1704 | msk->sk_v6_daddr = ssk->sk_v6_daddr; | |
1705 | msk->sk_v6_rcv_saddr = ssk->sk_v6_rcv_saddr; | |
1706 | ||
1707 | if (msk6 && ssk6) { | |
1708 | msk6->saddr = ssk6->saddr; | |
1709 | msk6->flow_label = ssk6->flow_label; | |
1710 | } | |
1711 | #endif | |
1712 | ||
1713 | inet_sk(msk)->inet_num = inet_sk(ssk)->inet_num; | |
1714 | inet_sk(msk)->inet_dport = inet_sk(ssk)->inet_dport; | |
1715 | inet_sk(msk)->inet_sport = inet_sk(ssk)->inet_sport; | |
1716 | inet_sk(msk)->inet_daddr = inet_sk(ssk)->inet_daddr; | |
1717 | inet_sk(msk)->inet_saddr = inet_sk(ssk)->inet_saddr; | |
1718 | inet_sk(msk)->inet_rcv_saddr = inet_sk(ssk)->inet_rcv_saddr; | |
1719 | } | |
1720 | ||
18b683bf PA |
1721 | static int mptcp_disconnect(struct sock *sk, int flags) |
1722 | { | |
42c556fe FW |
1723 | /* Should never be called. |
1724 | * inet_stream_connect() calls ->disconnect, but that | |
1725 | * refers to the subflow socket, not the mptcp one. | |
1726 | */ | |
1727 | WARN_ON_ONCE(1); | |
1728 | return 0; | |
18b683bf PA |
1729 | } |
1730 | ||
b0519de8 FW |
1731 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) |
1732 | static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk) | |
1733 | { | |
1734 | unsigned int offset = sizeof(struct mptcp6_sock) - sizeof(struct ipv6_pinfo); | |
1735 | ||
1736 | return (struct ipv6_pinfo *)(((u8 *)sk) + offset); | |
1737 | } | |
1738 | #endif | |
1739 | ||
fca5c82c | 1740 | struct sock *mptcp_sk_clone(const struct sock *sk, |
cfde141e | 1741 | const struct mptcp_options_received *mp_opt, |
fca5c82c | 1742 | struct request_sock *req) |
b0519de8 | 1743 | { |
58b09919 | 1744 | struct mptcp_subflow_request_sock *subflow_req = mptcp_subflow_rsk(req); |
b0519de8 | 1745 | struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC); |
58b09919 PA |
1746 | struct mptcp_sock *msk; |
1747 | u64 ack_seq; | |
b0519de8 FW |
1748 | |
1749 | if (!nsk) | |
1750 | return NULL; | |
1751 | ||
1752 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
1753 | if (nsk->sk_family == AF_INET6) | |
1754 | inet_sk(nsk)->pinet6 = mptcp_inet6_sk(nsk); | |
1755 | #endif | |
1756 | ||
58b09919 PA |
1757 | __mptcp_init_sock(nsk); |
1758 | ||
1759 | msk = mptcp_sk(nsk); | |
1760 | msk->local_key = subflow_req->local_key; | |
1761 | msk->token = subflow_req->token; | |
1762 | msk->subflow = NULL; | |
b93df08c | 1763 | WRITE_ONCE(msk->fully_established, false); |
58b09919 | 1764 | |
58b09919 | 1765 | msk->write_seq = subflow_req->idsn + 1; |
cc9d2566 | 1766 | atomic64_set(&msk->snd_una, msk->write_seq); |
cfde141e | 1767 | if (mp_opt->mp_capable) { |
58b09919 | 1768 | msk->can_ack = true; |
cfde141e | 1769 | msk->remote_key = mp_opt->sndr_key; |
58b09919 PA |
1770 | mptcp_crypto_key_sha(msk->remote_key, NULL, &ack_seq); |
1771 | ack_seq++; | |
1772 | msk->ack_seq = ack_seq; | |
1773 | } | |
7f20d5fc | 1774 | |
5e20087d | 1775 | sock_reset_flag(nsk, SOCK_RCU_FREE); |
7f20d5fc PA |
1776 | /* will be fully established after successful MPC subflow creation */ |
1777 | inet_sk_state_store(nsk, TCP_SYN_RECV); | |
58b09919 PA |
1778 | bh_unlock_sock(nsk); |
1779 | ||
1780 | /* keep a single reference */ | |
1781 | __sock_put(nsk); | |
b0519de8 FW |
1782 | return nsk; |
1783 | } | |
1784 | ||
a6b118fe FW |
1785 | void mptcp_rcv_space_init(struct mptcp_sock *msk, const struct sock *ssk) |
1786 | { | |
1787 | const struct tcp_sock *tp = tcp_sk(ssk); | |
1788 | ||
1789 | msk->rcvq_space.copied = 0; | |
1790 | msk->rcvq_space.rtt_us = 0; | |
1791 | ||
1792 | msk->rcvq_space.time = tp->tcp_mstamp; | |
1793 | ||
1794 | /* initial rcv_space offering made to peer */ | |
1795 | msk->rcvq_space.space = min_t(u32, tp->rcv_wnd, | |
1796 | TCP_INIT_CWND * tp->advmss); | |
1797 | if (msk->rcvq_space.space == 0) | |
1798 | msk->rcvq_space.space = TCP_INIT_CWND * TCP_MSS_DEFAULT; | |
1799 | } | |
1800 | ||
cf7da0d6 PK |
1801 | static struct sock *mptcp_accept(struct sock *sk, int flags, int *err, |
1802 | bool kern) | |
1803 | { | |
1804 | struct mptcp_sock *msk = mptcp_sk(sk); | |
1805 | struct socket *listener; | |
1806 | struct sock *newsk; | |
1807 | ||
1808 | listener = __mptcp_nmpc_socket(msk); | |
1809 | if (WARN_ON_ONCE(!listener)) { | |
1810 | *err = -EINVAL; | |
1811 | return NULL; | |
1812 | } | |
1813 | ||
1814 | pr_debug("msk=%p, listener=%p", msk, mptcp_subflow_ctx(listener->sk)); | |
1815 | newsk = inet_csk_accept(listener->sk, flags, err, kern); | |
1816 | if (!newsk) | |
1817 | return NULL; | |
1818 | ||
1819 | pr_debug("msk=%p, subflow is mptcp=%d", msk, sk_is_mptcp(newsk)); | |
cf7da0d6 PK |
1820 | if (sk_is_mptcp(newsk)) { |
1821 | struct mptcp_subflow_context *subflow; | |
1822 | struct sock *new_mptcp_sock; | |
1823 | struct sock *ssk = newsk; | |
1824 | ||
1825 | subflow = mptcp_subflow_ctx(newsk); | |
58b09919 | 1826 | new_mptcp_sock = subflow->conn; |
cf7da0d6 | 1827 | |
58b09919 PA |
1828 | /* is_mptcp should be false if subflow->conn is missing, see |
1829 | * subflow_syn_recv_sock() | |
1830 | */ | |
1831 | if (WARN_ON_ONCE(!new_mptcp_sock)) { | |
1832 | tcp_sk(newsk)->is_mptcp = 0; | |
1833 | return newsk; | |
cf7da0d6 PK |
1834 | } |
1835 | ||
58b09919 PA |
1836 | /* acquire the 2nd reference for the owning socket */ |
1837 | sock_hold(new_mptcp_sock); | |
cf7da0d6 | 1838 | |
58b09919 PA |
1839 | local_bh_disable(); |
1840 | bh_lock_sock(new_mptcp_sock); | |
cf7da0d6 | 1841 | msk = mptcp_sk(new_mptcp_sock); |
8ab183de | 1842 | msk->first = newsk; |
cf7da0d6 PK |
1843 | |
1844 | newsk = new_mptcp_sock; | |
1845 | mptcp_copy_inaddrs(newsk, ssk); | |
1846 | list_add(&subflow->node, &msk->conn_list); | |
1847 | ||
a6b118fe | 1848 | mptcp_rcv_space_init(msk, ssk); |
cf7da0d6 | 1849 | bh_unlock_sock(new_mptcp_sock); |
fc518953 FW |
1850 | |
1851 | __MPTCP_INC_STATS(sock_net(sk), MPTCP_MIB_MPCAPABLEPASSIVEACK); | |
cf7da0d6 | 1852 | local_bh_enable(); |
fc518953 FW |
1853 | } else { |
1854 | MPTCP_INC_STATS(sock_net(sk), | |
1855 | MPTCP_MIB_MPCAPABLEPASSIVEFALLBACK); | |
cf7da0d6 PK |
1856 | } |
1857 | ||
1858 | return newsk; | |
1859 | } | |
1860 | ||
79c0949e PK |
1861 | static void mptcp_destroy(struct sock *sk) |
1862 | { | |
c9fd9c5f FW |
1863 | struct mptcp_sock *msk = mptcp_sk(sk); |
1864 | ||
2c5ebd00 | 1865 | mptcp_token_destroy(msk); |
c9fd9c5f FW |
1866 | if (msk->cached_ext) |
1867 | __skb_ext_put(msk->cached_ext); | |
d027236c PA |
1868 | |
1869 | sk_sockets_allocated_dec(sk); | |
79c0949e PK |
1870 | } |
1871 | ||
fd1452d8 | 1872 | static int mptcp_setsockopt_sol_socket(struct mptcp_sock *msk, int optname, |
a7b75c5a | 1873 | sockptr_t optval, unsigned int optlen) |
fd1452d8 FW |
1874 | { |
1875 | struct sock *sk = (struct sock *)msk; | |
1876 | struct socket *ssock; | |
1877 | int ret; | |
1878 | ||
1879 | switch (optname) { | |
1880 | case SO_REUSEPORT: | |
1881 | case SO_REUSEADDR: | |
1882 | lock_sock(sk); | |
1883 | ssock = __mptcp_nmpc_socket(msk); | |
1884 | if (!ssock) { | |
1885 | release_sock(sk); | |
1886 | return -EINVAL; | |
1887 | } | |
1888 | ||
a7b75c5a | 1889 | ret = sock_setsockopt(ssock, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
1890 | if (ret == 0) { |
1891 | if (optname == SO_REUSEPORT) | |
1892 | sk->sk_reuseport = ssock->sk->sk_reuseport; | |
1893 | else if (optname == SO_REUSEADDR) | |
1894 | sk->sk_reuse = ssock->sk->sk_reuse; | |
1895 | } | |
1896 | release_sock(sk); | |
1897 | return ret; | |
1898 | } | |
1899 | ||
a7b75c5a | 1900 | return sock_setsockopt(sk->sk_socket, SOL_SOCKET, optname, optval, optlen); |
fd1452d8 FW |
1901 | } |
1902 | ||
c9b95a13 | 1903 | static int mptcp_setsockopt_v6(struct mptcp_sock *msk, int optname, |
a7b75c5a | 1904 | sockptr_t optval, unsigned int optlen) |
c9b95a13 FW |
1905 | { |
1906 | struct sock *sk = (struct sock *)msk; | |
1907 | int ret = -EOPNOTSUPP; | |
1908 | struct socket *ssock; | |
1909 | ||
1910 | switch (optname) { | |
1911 | case IPV6_V6ONLY: | |
1912 | lock_sock(sk); | |
1913 | ssock = __mptcp_nmpc_socket(msk); | |
1914 | if (!ssock) { | |
1915 | release_sock(sk); | |
1916 | return -EINVAL; | |
1917 | } | |
1918 | ||
1919 | ret = tcp_setsockopt(ssock->sk, SOL_IPV6, optname, optval, optlen); | |
1920 | if (ret == 0) | |
1921 | sk->sk_ipv6only = ssock->sk->sk_ipv6only; | |
1922 | ||
1923 | release_sock(sk); | |
1924 | break; | |
1925 | } | |
1926 | ||
1927 | return ret; | |
1928 | } | |
1929 | ||
717e79c8 | 1930 | static int mptcp_setsockopt(struct sock *sk, int level, int optname, |
a7b75c5a | 1931 | sockptr_t optval, unsigned int optlen) |
717e79c8 PK |
1932 | { |
1933 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 1934 | struct sock *ssk; |
717e79c8 PK |
1935 | |
1936 | pr_debug("msk=%p", msk); | |
1937 | ||
83f0c10b | 1938 | if (level == SOL_SOCKET) |
fd1452d8 | 1939 | return mptcp_setsockopt_sol_socket(msk, optname, optval, optlen); |
83f0c10b | 1940 | |
717e79c8 | 1941 | /* @@ the meaning of setsockopt() when the socket is connected and |
b6e4a1ae MM |
1942 | * there are multiple subflows is not yet defined. It is up to the |
1943 | * MPTCP-level socket to configure the subflows until the subflow | |
1944 | * is in TCP fallback, when TCP socket options are passed through | |
1945 | * to the one remaining subflow. | |
717e79c8 PK |
1946 | */ |
1947 | lock_sock(sk); | |
76660afb | 1948 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 1949 | release_sock(sk); |
76660afb PA |
1950 | if (ssk) |
1951 | return tcp_setsockopt(ssk, level, optname, optval, optlen); | |
50e741bb | 1952 | |
c9b95a13 FW |
1953 | if (level == SOL_IPV6) |
1954 | return mptcp_setsockopt_v6(msk, optname, optval, optlen); | |
1955 | ||
b6e4a1ae | 1956 | return -EOPNOTSUPP; |
717e79c8 PK |
1957 | } |
1958 | ||
1959 | static int mptcp_getsockopt(struct sock *sk, int level, int optname, | |
50e741bb | 1960 | char __user *optval, int __user *option) |
717e79c8 PK |
1961 | { |
1962 | struct mptcp_sock *msk = mptcp_sk(sk); | |
76660afb | 1963 | struct sock *ssk; |
717e79c8 PK |
1964 | |
1965 | pr_debug("msk=%p", msk); | |
1966 | ||
b6e4a1ae MM |
1967 | /* @@ the meaning of setsockopt() when the socket is connected and |
1968 | * there are multiple subflows is not yet defined. It is up to the | |
1969 | * MPTCP-level socket to configure the subflows until the subflow | |
1970 | * is in TCP fallback, when socket options are passed through | |
1971 | * to the one remaining subflow. | |
717e79c8 PK |
1972 | */ |
1973 | lock_sock(sk); | |
76660afb | 1974 | ssk = __mptcp_tcp_fallback(msk); |
e154659b | 1975 | release_sock(sk); |
76660afb PA |
1976 | if (ssk) |
1977 | return tcp_getsockopt(ssk, level, optname, optval, option); | |
50e741bb | 1978 | |
b6e4a1ae | 1979 | return -EOPNOTSUPP; |
717e79c8 PK |
1980 | } |
1981 | ||
b51f9b80 PA |
1982 | #define MPTCP_DEFERRED_ALL (TCPF_DELACK_TIMER_DEFERRED | \ |
1983 | TCPF_WRITE_TIMER_DEFERRED) | |
14c441b5 PA |
1984 | |
1985 | /* this is very alike tcp_release_cb() but we must handle differently a | |
1986 | * different set of events | |
1987 | */ | |
1988 | static void mptcp_release_cb(struct sock *sk) | |
1989 | { | |
1990 | unsigned long flags, nflags; | |
1991 | ||
1992 | do { | |
1993 | flags = sk->sk_tsq_flags; | |
1994 | if (!(flags & MPTCP_DEFERRED_ALL)) | |
1995 | return; | |
1996 | nflags = flags & ~MPTCP_DEFERRED_ALL; | |
1997 | } while (cmpxchg(&sk->sk_tsq_flags, flags, nflags) != flags); | |
1998 | ||
b51f9b80 PA |
1999 | sock_release_ownership(sk); |
2000 | ||
14c441b5 PA |
2001 | if (flags & TCPF_DELACK_TIMER_DEFERRED) { |
2002 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2003 | struct sock *ssk; | |
2004 | ||
2005 | ssk = mptcp_subflow_recv_lookup(msk); | |
2006 | if (!ssk || !schedule_work(&msk->work)) | |
2007 | __sock_put(sk); | |
2008 | } | |
b51f9b80 PA |
2009 | |
2010 | if (flags & TCPF_WRITE_TIMER_DEFERRED) { | |
2011 | mptcp_retransmit_handler(sk); | |
2012 | __sock_put(sk); | |
2013 | } | |
14c441b5 PA |
2014 | } |
2015 | ||
2c5ebd00 PA |
2016 | static int mptcp_hash(struct sock *sk) |
2017 | { | |
2018 | /* should never be called, | |
2019 | * we hash the TCP subflows not the master socket | |
2020 | */ | |
2021 | WARN_ON_ONCE(1); | |
2022 | return 0; | |
2023 | } | |
2024 | ||
2025 | static void mptcp_unhash(struct sock *sk) | |
2026 | { | |
2027 | /* called from sk_common_release(), but nothing to do here */ | |
2028 | } | |
2029 | ||
cec37a6e | 2030 | static int mptcp_get_port(struct sock *sk, unsigned short snum) |
f870fa0b MM |
2031 | { |
2032 | struct mptcp_sock *msk = mptcp_sk(sk); | |
cec37a6e | 2033 | struct socket *ssock; |
f870fa0b | 2034 | |
cec37a6e PK |
2035 | ssock = __mptcp_nmpc_socket(msk); |
2036 | pr_debug("msk=%p, subflow=%p", msk, ssock); | |
2037 | if (WARN_ON_ONCE(!ssock)) | |
2038 | return -EINVAL; | |
f870fa0b | 2039 | |
cec37a6e PK |
2040 | return inet_csk_get_port(ssock->sk, snum); |
2041 | } | |
f870fa0b | 2042 | |
cec37a6e PK |
2043 | void mptcp_finish_connect(struct sock *ssk) |
2044 | { | |
2045 | struct mptcp_subflow_context *subflow; | |
2046 | struct mptcp_sock *msk; | |
2047 | struct sock *sk; | |
6d0060f6 | 2048 | u64 ack_seq; |
f870fa0b | 2049 | |
cec37a6e | 2050 | subflow = mptcp_subflow_ctx(ssk); |
cec37a6e PK |
2051 | sk = subflow->conn; |
2052 | msk = mptcp_sk(sk); | |
2053 | ||
648ef4b8 MM |
2054 | pr_debug("msk=%p, token=%u", sk, subflow->token); |
2055 | ||
6d0060f6 MM |
2056 | mptcp_crypto_key_sha(subflow->remote_key, NULL, &ack_seq); |
2057 | ack_seq++; | |
648ef4b8 MM |
2058 | subflow->map_seq = ack_seq; |
2059 | subflow->map_subflow_seq = 1; | |
6d0060f6 | 2060 | |
cec37a6e PK |
2061 | /* the socket is not connected yet, no msk/subflow ops can access/race |
2062 | * accessing the field below | |
2063 | */ | |
2064 | WRITE_ONCE(msk->remote_key, subflow->remote_key); | |
2065 | WRITE_ONCE(msk->local_key, subflow->local_key); | |
6d0060f6 MM |
2066 | WRITE_ONCE(msk->write_seq, subflow->idsn + 1); |
2067 | WRITE_ONCE(msk->ack_seq, ack_seq); | |
d22f4988 | 2068 | WRITE_ONCE(msk->can_ack, 1); |
cc9d2566 | 2069 | atomic64_set(&msk->snd_una, msk->write_seq); |
1b1c7a0e PK |
2070 | |
2071 | mptcp_pm_new_connection(msk, 0); | |
a6b118fe FW |
2072 | |
2073 | mptcp_rcv_space_init(msk, ssk); | |
f870fa0b MM |
2074 | } |
2075 | ||
cf7da0d6 PK |
2076 | static void mptcp_sock_graft(struct sock *sk, struct socket *parent) |
2077 | { | |
2078 | write_lock_bh(&sk->sk_callback_lock); | |
2079 | rcu_assign_pointer(sk->sk_wq, &parent->wq); | |
2080 | sk_set_socket(sk, parent); | |
2081 | sk->sk_uid = SOCK_INODE(parent)->i_uid; | |
2082 | write_unlock_bh(&sk->sk_callback_lock); | |
2083 | } | |
2084 | ||
f296234c PK |
2085 | bool mptcp_finish_join(struct sock *sk) |
2086 | { | |
2087 | struct mptcp_subflow_context *subflow = mptcp_subflow_ctx(sk); | |
2088 | struct mptcp_sock *msk = mptcp_sk(subflow->conn); | |
2089 | struct sock *parent = (void *)msk; | |
2090 | struct socket *parent_sock; | |
ec3edaa7 | 2091 | bool ret; |
f296234c PK |
2092 | |
2093 | pr_debug("msk=%p, subflow=%p", msk, subflow); | |
2094 | ||
2095 | /* mptcp socket already closing? */ | |
b93df08c | 2096 | if (!mptcp_is_fully_established(parent)) |
f296234c PK |
2097 | return false; |
2098 | ||
2099 | if (!msk->pm.server_side) | |
2100 | return true; | |
2101 | ||
10f6d46c PA |
2102 | if (!mptcp_pm_allow_new_subflow(msk)) |
2103 | return false; | |
2104 | ||
2105 | /* active connections are already on conn_list, and we can't acquire | |
2106 | * msk lock here. | |
2107 | * use the join list lock as synchronization point and double-check | |
2108 | * msk status to avoid racing with mptcp_close() | |
2109 | */ | |
2110 | spin_lock_bh(&msk->join_list_lock); | |
2111 | ret = inet_sk_state_load(parent) == TCP_ESTABLISHED; | |
2112 | if (ret && !WARN_ON_ONCE(!list_empty(&subflow->node))) | |
2113 | list_add_tail(&subflow->node, &msk->join_list); | |
2114 | spin_unlock_bh(&msk->join_list_lock); | |
2115 | if (!ret) | |
2116 | return false; | |
2117 | ||
2118 | /* attach to msk socket only after we are sure he will deal with us | |
2119 | * at close time | |
2120 | */ | |
f296234c PK |
2121 | parent_sock = READ_ONCE(parent->sk_socket); |
2122 | if (parent_sock && !sk->sk_socket) | |
2123 | mptcp_sock_graft(sk, parent_sock); | |
10f6d46c PA |
2124 | subflow->map_seq = msk->ack_seq; |
2125 | return true; | |
f296234c PK |
2126 | } |
2127 | ||
1891c4a0 FW |
2128 | static bool mptcp_memory_free(const struct sock *sk, int wake) |
2129 | { | |
2130 | struct mptcp_sock *msk = mptcp_sk(sk); | |
2131 | ||
2132 | return wake ? test_bit(MPTCP_SEND_SPACE, &msk->flags) : true; | |
2133 | } | |
2134 | ||
f870fa0b MM |
2135 | static struct proto mptcp_prot = { |
2136 | .name = "MPTCP", | |
2137 | .owner = THIS_MODULE, | |
2138 | .init = mptcp_init_sock, | |
18b683bf | 2139 | .disconnect = mptcp_disconnect, |
f870fa0b | 2140 | .close = mptcp_close, |
cf7da0d6 | 2141 | .accept = mptcp_accept, |
717e79c8 PK |
2142 | .setsockopt = mptcp_setsockopt, |
2143 | .getsockopt = mptcp_getsockopt, | |
f870fa0b | 2144 | .shutdown = tcp_shutdown, |
79c0949e | 2145 | .destroy = mptcp_destroy, |
f870fa0b MM |
2146 | .sendmsg = mptcp_sendmsg, |
2147 | .recvmsg = mptcp_recvmsg, | |
14c441b5 | 2148 | .release_cb = mptcp_release_cb, |
2c5ebd00 PA |
2149 | .hash = mptcp_hash, |
2150 | .unhash = mptcp_unhash, | |
cec37a6e | 2151 | .get_port = mptcp_get_port, |
d027236c PA |
2152 | .sockets_allocated = &mptcp_sockets_allocated, |
2153 | .memory_allocated = &tcp_memory_allocated, | |
2154 | .memory_pressure = &tcp_memory_pressure, | |
1891c4a0 | 2155 | .stream_memory_free = mptcp_memory_free, |
d027236c PA |
2156 | .sysctl_wmem_offset = offsetof(struct net, ipv4.sysctl_tcp_wmem), |
2157 | .sysctl_mem = sysctl_tcp_mem, | |
f870fa0b | 2158 | .obj_size = sizeof(struct mptcp_sock), |
2c5ebd00 | 2159 | .slab_flags = SLAB_TYPESAFE_BY_RCU, |
f870fa0b MM |
2160 | .no_autobind = true, |
2161 | }; | |
2162 | ||
2303f994 PK |
2163 | static int mptcp_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) |
2164 | { | |
2165 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2166 | struct socket *ssock; | |
cf7da0d6 | 2167 | int err; |
2303f994 PK |
2168 | |
2169 | lock_sock(sock->sk); | |
fa68018d PA |
2170 | ssock = __mptcp_nmpc_socket(msk); |
2171 | if (!ssock) { | |
2172 | err = -EINVAL; | |
2303f994 PK |
2173 | goto unlock; |
2174 | } | |
2175 | ||
2176 | err = ssock->ops->bind(ssock, uaddr, addr_len); | |
cf7da0d6 PK |
2177 | if (!err) |
2178 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2303f994 PK |
2179 | |
2180 | unlock: | |
2181 | release_sock(sock->sk); | |
2182 | return err; | |
2183 | } | |
2184 | ||
0235d075 PA |
2185 | static void mptcp_subflow_early_fallback(struct mptcp_sock *msk, |
2186 | struct mptcp_subflow_context *subflow) | |
2187 | { | |
2188 | subflow->request_mptcp = 0; | |
2189 | __mptcp_do_fallback(msk); | |
2190 | } | |
2191 | ||
2303f994 PK |
2192 | static int mptcp_stream_connect(struct socket *sock, struct sockaddr *uaddr, |
2193 | int addr_len, int flags) | |
2194 | { | |
2195 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2c5ebd00 | 2196 | struct mptcp_subflow_context *subflow; |
2303f994 PK |
2197 | struct socket *ssock; |
2198 | int err; | |
2199 | ||
2200 | lock_sock(sock->sk); | |
41be81a8 PA |
2201 | if (sock->state != SS_UNCONNECTED && msk->subflow) { |
2202 | /* pending connection or invalid state, let existing subflow | |
2203 | * cope with that | |
2204 | */ | |
2205 | ssock = msk->subflow; | |
2206 | goto do_connect; | |
2207 | } | |
2208 | ||
fa68018d PA |
2209 | ssock = __mptcp_nmpc_socket(msk); |
2210 | if (!ssock) { | |
2211 | err = -EINVAL; | |
2303f994 PK |
2212 | goto unlock; |
2213 | } | |
2214 | ||
fa68018d PA |
2215 | mptcp_token_destroy(msk); |
2216 | inet_sk_state_store(sock->sk, TCP_SYN_SENT); | |
2c5ebd00 | 2217 | subflow = mptcp_subflow_ctx(ssock->sk); |
cf7da0d6 PK |
2218 | #ifdef CONFIG_TCP_MD5SIG |
2219 | /* no MPTCP if MD5SIG is enabled on this socket or we may run out of | |
2220 | * TCP option space. | |
2221 | */ | |
2222 | if (rcu_access_pointer(tcp_sk(ssock->sk)->md5sig_info)) | |
0235d075 | 2223 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 2224 | #endif |
2c5ebd00 | 2225 | if (subflow->request_mptcp && mptcp_token_new_connect(ssock->sk)) |
0235d075 | 2226 | mptcp_subflow_early_fallback(msk, subflow); |
cf7da0d6 | 2227 | |
41be81a8 | 2228 | do_connect: |
2303f994 | 2229 | err = ssock->ops->connect(ssock, uaddr, addr_len, flags); |
41be81a8 PA |
2230 | sock->state = ssock->state; |
2231 | ||
2232 | /* on successful connect, the msk state will be moved to established by | |
2233 | * subflow_finish_connect() | |
2234 | */ | |
367fe04e | 2235 | if (!err || err == -EINPROGRESS) |
41be81a8 PA |
2236 | mptcp_copy_inaddrs(sock->sk, ssock->sk); |
2237 | else | |
2238 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
2303f994 PK |
2239 | |
2240 | unlock: | |
2241 | release_sock(sock->sk); | |
2242 | return err; | |
2243 | } | |
2244 | ||
cf7da0d6 PK |
2245 | static int mptcp_listen(struct socket *sock, int backlog) |
2246 | { | |
2247 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2248 | struct socket *ssock; | |
2249 | int err; | |
2250 | ||
2251 | pr_debug("msk=%p", msk); | |
2252 | ||
2253 | lock_sock(sock->sk); | |
fa68018d PA |
2254 | ssock = __mptcp_nmpc_socket(msk); |
2255 | if (!ssock) { | |
2256 | err = -EINVAL; | |
cf7da0d6 PK |
2257 | goto unlock; |
2258 | } | |
2259 | ||
fa68018d PA |
2260 | mptcp_token_destroy(msk); |
2261 | inet_sk_state_store(sock->sk, TCP_LISTEN); | |
5e20087d FW |
2262 | sock_set_flag(sock->sk, SOCK_RCU_FREE); |
2263 | ||
cf7da0d6 PK |
2264 | err = ssock->ops->listen(ssock, backlog); |
2265 | inet_sk_state_store(sock->sk, inet_sk_state_load(ssock->sk)); | |
2266 | if (!err) | |
2267 | mptcp_copy_inaddrs(sock->sk, ssock->sk); | |
2268 | ||
2269 | unlock: | |
2270 | release_sock(sock->sk); | |
2271 | return err; | |
2272 | } | |
2273 | ||
cf7da0d6 PK |
2274 | static int mptcp_stream_accept(struct socket *sock, struct socket *newsock, |
2275 | int flags, bool kern) | |
2276 | { | |
2277 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2278 | struct socket *ssock; | |
2279 | int err; | |
2280 | ||
2281 | pr_debug("msk=%p", msk); | |
2282 | ||
2283 | lock_sock(sock->sk); | |
2284 | if (sock->sk->sk_state != TCP_LISTEN) | |
2285 | goto unlock_fail; | |
2286 | ||
2287 | ssock = __mptcp_nmpc_socket(msk); | |
2288 | if (!ssock) | |
2289 | goto unlock_fail; | |
2290 | ||
8a05661b | 2291 | clear_bit(MPTCP_DATA_READY, &msk->flags); |
cf7da0d6 PK |
2292 | sock_hold(ssock->sk); |
2293 | release_sock(sock->sk); | |
2294 | ||
2295 | err = ssock->ops->accept(sock, newsock, flags, kern); | |
d2f77c53 | 2296 | if (err == 0 && !mptcp_is_tcpsk(newsock->sk)) { |
cf7da0d6 PK |
2297 | struct mptcp_sock *msk = mptcp_sk(newsock->sk); |
2298 | struct mptcp_subflow_context *subflow; | |
2299 | ||
2300 | /* set ssk->sk_socket of accept()ed flows to mptcp socket. | |
2301 | * This is needed so NOSPACE flag can be set from tcp stack. | |
2302 | */ | |
ec3edaa7 | 2303 | __mptcp_flush_join_list(msk); |
190f8b06 | 2304 | mptcp_for_each_subflow(msk, subflow) { |
cf7da0d6 PK |
2305 | struct sock *ssk = mptcp_subflow_tcp_sock(subflow); |
2306 | ||
2307 | if (!ssk->sk_socket) | |
2308 | mptcp_sock_graft(ssk, newsock); | |
2309 | } | |
cf7da0d6 PK |
2310 | } |
2311 | ||
8a05661b PA |
2312 | if (inet_csk_listen_poll(ssock->sk)) |
2313 | set_bit(MPTCP_DATA_READY, &msk->flags); | |
cf7da0d6 PK |
2314 | sock_put(ssock->sk); |
2315 | return err; | |
2316 | ||
2317 | unlock_fail: | |
2318 | release_sock(sock->sk); | |
2319 | return -EINVAL; | |
2320 | } | |
2321 | ||
8a05661b PA |
2322 | static __poll_t mptcp_check_readable(struct mptcp_sock *msk) |
2323 | { | |
2324 | return test_bit(MPTCP_DATA_READY, &msk->flags) ? EPOLLIN | EPOLLRDNORM : | |
2325 | 0; | |
2326 | } | |
2327 | ||
2303f994 PK |
2328 | static __poll_t mptcp_poll(struct file *file, struct socket *sock, |
2329 | struct poll_table_struct *wait) | |
2330 | { | |
1891c4a0 | 2331 | struct sock *sk = sock->sk; |
8ab183de | 2332 | struct mptcp_sock *msk; |
2303f994 | 2333 | __poll_t mask = 0; |
8a05661b | 2334 | int state; |
2303f994 | 2335 | |
1891c4a0 | 2336 | msk = mptcp_sk(sk); |
1891c4a0 | 2337 | sock_poll_wait(file, sock, wait); |
1891c4a0 | 2338 | |
8a05661b | 2339 | state = inet_sk_state_load(sk); |
6719331c | 2340 | pr_debug("msk=%p state=%d flags=%lx", msk, state, msk->flags); |
8a05661b PA |
2341 | if (state == TCP_LISTEN) |
2342 | return mptcp_check_readable(msk); | |
2343 | ||
2344 | if (state != TCP_SYN_SENT && state != TCP_SYN_RECV) { | |
2345 | mask |= mptcp_check_readable(msk); | |
63561a40 | 2346 | if (test_bit(MPTCP_SEND_SPACE, &msk->flags)) |
8a05661b PA |
2347 | mask |= EPOLLOUT | EPOLLWRNORM; |
2348 | } | |
1891c4a0 FW |
2349 | if (sk->sk_shutdown & RCV_SHUTDOWN) |
2350 | mask |= EPOLLIN | EPOLLRDNORM | EPOLLRDHUP; | |
2351 | ||
2303f994 PK |
2352 | return mask; |
2353 | } | |
2354 | ||
21498490 PK |
2355 | static int mptcp_shutdown(struct socket *sock, int how) |
2356 | { | |
2357 | struct mptcp_sock *msk = mptcp_sk(sock->sk); | |
2358 | struct mptcp_subflow_context *subflow; | |
2359 | int ret = 0; | |
2360 | ||
2361 | pr_debug("sk=%p, how=%d", msk, how); | |
2362 | ||
2363 | lock_sock(sock->sk); | |
21498490 PK |
2364 | |
2365 | how++; | |
21498490 PK |
2366 | if ((how & ~SHUTDOWN_MASK) || !how) { |
2367 | ret = -EINVAL; | |
2368 | goto out_unlock; | |
2369 | } | |
2370 | ||
2371 | if (sock->state == SS_CONNECTING) { | |
2372 | if ((1 << sock->sk->sk_state) & | |
2373 | (TCPF_SYN_SENT | TCPF_SYN_RECV | TCPF_CLOSE)) | |
2374 | sock->state = SS_DISCONNECTING; | |
2375 | else | |
2376 | sock->state = SS_CONNECTED; | |
2377 | } | |
2378 | ||
43b54c6e MM |
2379 | /* If we've already sent a FIN, or it's a closed state, skip this. */ |
2380 | if (__mptcp_check_fallback(msk)) { | |
2381 | if (how == SHUT_WR || how == SHUT_RDWR) | |
2382 | inet_sk_state_store(sock->sk, TCP_FIN_WAIT1); | |
7279da61 | 2383 | |
43b54c6e MM |
2384 | mptcp_for_each_subflow(msk, subflow) { |
2385 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
21498490 | 2386 | |
43b54c6e MM |
2387 | mptcp_subflow_shutdown(sock->sk, tcp_sk, how); |
2388 | } | |
2389 | } else if ((how & SEND_SHUTDOWN) && | |
2390 | ((1 << sock->sk->sk_state) & | |
2391 | (TCPF_ESTABLISHED | TCPF_SYN_SENT | | |
2392 | TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) && | |
2393 | mptcp_close_state(sock->sk)) { | |
2394 | __mptcp_flush_join_list(msk); | |
2395 | ||
2396 | WRITE_ONCE(msk->write_seq, msk->write_seq + 1); | |
2397 | WRITE_ONCE(msk->snd_data_fin_enable, 1); | |
2398 | ||
2399 | mptcp_for_each_subflow(msk, subflow) { | |
2400 | struct sock *tcp_sk = mptcp_subflow_tcp_sock(subflow); | |
2401 | ||
2402 | mptcp_subflow_shutdown(sock->sk, tcp_sk, how); | |
2403 | } | |
21498490 PK |
2404 | } |
2405 | ||
e1ff9e82 DC |
2406 | /* Wake up anyone sleeping in poll. */ |
2407 | sock->sk->sk_state_change(sock->sk); | |
2408 | ||
21498490 PK |
2409 | out_unlock: |
2410 | release_sock(sock->sk); | |
2411 | ||
2412 | return ret; | |
2413 | } | |
2414 | ||
e42f1ac6 FW |
2415 | static const struct proto_ops mptcp_stream_ops = { |
2416 | .family = PF_INET, | |
2417 | .owner = THIS_MODULE, | |
2418 | .release = inet_release, | |
2419 | .bind = mptcp_bind, | |
2420 | .connect = mptcp_stream_connect, | |
2421 | .socketpair = sock_no_socketpair, | |
2422 | .accept = mptcp_stream_accept, | |
d2f77c53 | 2423 | .getname = inet_getname, |
e42f1ac6 FW |
2424 | .poll = mptcp_poll, |
2425 | .ioctl = inet_ioctl, | |
2426 | .gettstamp = sock_gettstamp, | |
2427 | .listen = mptcp_listen, | |
2428 | .shutdown = mptcp_shutdown, | |
2429 | .setsockopt = sock_common_setsockopt, | |
2430 | .getsockopt = sock_common_getsockopt, | |
2431 | .sendmsg = inet_sendmsg, | |
2432 | .recvmsg = inet_recvmsg, | |
2433 | .mmap = sock_no_mmap, | |
2434 | .sendpage = inet_sendpage, | |
e42f1ac6 | 2435 | }; |
2303f994 | 2436 | |
f870fa0b MM |
2437 | static struct inet_protosw mptcp_protosw = { |
2438 | .type = SOCK_STREAM, | |
2439 | .protocol = IPPROTO_MPTCP, | |
2440 | .prot = &mptcp_prot, | |
2303f994 PK |
2441 | .ops = &mptcp_stream_ops, |
2442 | .flags = INET_PROTOSW_ICSK, | |
f870fa0b MM |
2443 | }; |
2444 | ||
d39dceca | 2445 | void __init mptcp_proto_init(void) |
f870fa0b | 2446 | { |
2303f994 | 2447 | mptcp_prot.h.hashinfo = tcp_prot.h.hashinfo; |
2303f994 | 2448 | |
d027236c PA |
2449 | if (percpu_counter_init(&mptcp_sockets_allocated, 0, GFP_KERNEL)) |
2450 | panic("Failed to allocate MPTCP pcpu counter\n"); | |
2451 | ||
2303f994 | 2452 | mptcp_subflow_init(); |
1b1c7a0e | 2453 | mptcp_pm_init(); |
2c5ebd00 | 2454 | mptcp_token_init(); |
2303f994 | 2455 | |
f870fa0b MM |
2456 | if (proto_register(&mptcp_prot, 1) != 0) |
2457 | panic("Failed to register MPTCP proto.\n"); | |
2458 | ||
2459 | inet_register_protosw(&mptcp_protosw); | |
6771bfd9 FW |
2460 | |
2461 | BUILD_BUG_ON(sizeof(struct mptcp_skb_cb) > sizeof_field(struct sk_buff, cb)); | |
f870fa0b MM |
2462 | } |
2463 | ||
2464 | #if IS_ENABLED(CONFIG_MPTCP_IPV6) | |
e42f1ac6 FW |
2465 | static const struct proto_ops mptcp_v6_stream_ops = { |
2466 | .family = PF_INET6, | |
2467 | .owner = THIS_MODULE, | |
2468 | .release = inet6_release, | |
2469 | .bind = mptcp_bind, | |
2470 | .connect = mptcp_stream_connect, | |
2471 | .socketpair = sock_no_socketpair, | |
2472 | .accept = mptcp_stream_accept, | |
d2f77c53 | 2473 | .getname = inet6_getname, |
e42f1ac6 FW |
2474 | .poll = mptcp_poll, |
2475 | .ioctl = inet6_ioctl, | |
2476 | .gettstamp = sock_gettstamp, | |
2477 | .listen = mptcp_listen, | |
2478 | .shutdown = mptcp_shutdown, | |
2479 | .setsockopt = sock_common_setsockopt, | |
2480 | .getsockopt = sock_common_getsockopt, | |
2481 | .sendmsg = inet6_sendmsg, | |
2482 | .recvmsg = inet6_recvmsg, | |
2483 | .mmap = sock_no_mmap, | |
2484 | .sendpage = inet_sendpage, | |
2485 | #ifdef CONFIG_COMPAT | |
3986912f | 2486 | .compat_ioctl = inet6_compat_ioctl, |
e42f1ac6 FW |
2487 | #endif |
2488 | }; | |
2489 | ||
f870fa0b MM |
2490 | static struct proto mptcp_v6_prot; |
2491 | ||
79c0949e PK |
2492 | static void mptcp_v6_destroy(struct sock *sk) |
2493 | { | |
2494 | mptcp_destroy(sk); | |
2495 | inet6_destroy_sock(sk); | |
2496 | } | |
2497 | ||
f870fa0b MM |
2498 | static struct inet_protosw mptcp_v6_protosw = { |
2499 | .type = SOCK_STREAM, | |
2500 | .protocol = IPPROTO_MPTCP, | |
2501 | .prot = &mptcp_v6_prot, | |
2303f994 | 2502 | .ops = &mptcp_v6_stream_ops, |
f870fa0b MM |
2503 | .flags = INET_PROTOSW_ICSK, |
2504 | }; | |
2505 | ||
d39dceca | 2506 | int __init mptcp_proto_v6_init(void) |
f870fa0b MM |
2507 | { |
2508 | int err; | |
2509 | ||
2510 | mptcp_v6_prot = mptcp_prot; | |
2511 | strcpy(mptcp_v6_prot.name, "MPTCPv6"); | |
2512 | mptcp_v6_prot.slab = NULL; | |
79c0949e | 2513 | mptcp_v6_prot.destroy = mptcp_v6_destroy; |
b0519de8 | 2514 | mptcp_v6_prot.obj_size = sizeof(struct mptcp6_sock); |
f870fa0b MM |
2515 | |
2516 | err = proto_register(&mptcp_v6_prot, 1); | |
2517 | if (err) | |
2518 | return err; | |
2519 | ||
2520 | err = inet6_register_protosw(&mptcp_v6_protosw); | |
2521 | if (err) | |
2522 | proto_unregister(&mptcp_v6_prot); | |
2523 | ||
2524 | return err; | |
2525 | } | |
2526 | #endif |