1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (c) 2019, Tessares SA.
8 #include <linux/sysctl.h>
11 #include <net/net_namespace.h>
12 #include <net/netns/generic.h>
17 #define MPTCP_SYSCTL_PATH "net/mptcp"
19 static int mptcp_pernet_id;
22 static int mptcp_pm_type_max = __MPTCP_PM_TYPE_MAX;
27 struct ctl_table_header *ctl_table_hdr;
30 unsigned int add_addr_timeout;
31 unsigned int blackhole_timeout;
32 unsigned int close_timeout;
33 unsigned int stale_loss_cnt;
34 atomic_t active_disable_times;
35 u8 syn_retrans_before_tcp_fallback;
36 unsigned long active_disable_stamp;
39 u8 allow_join_initial_addr_port;
41 char scheduler[MPTCP_SCHED_NAME_MAX];
44 static struct mptcp_pernet *mptcp_get_pernet(const struct net *net)
46 return net_generic(net, mptcp_pernet_id);
49 int mptcp_is_enabled(const struct net *net)
51 return mptcp_get_pernet(net)->mptcp_enabled;
54 unsigned int mptcp_get_add_addr_timeout(const struct net *net)
56 return mptcp_get_pernet(net)->add_addr_timeout;
59 int mptcp_is_checksum_enabled(const struct net *net)
61 return mptcp_get_pernet(net)->checksum_enabled;
64 int mptcp_allow_join_id0(const struct net *net)
66 return mptcp_get_pernet(net)->allow_join_initial_addr_port;
69 unsigned int mptcp_stale_loss_cnt(const struct net *net)
71 return mptcp_get_pernet(net)->stale_loss_cnt;
74 unsigned int mptcp_close_timeout(const struct sock *sk)
76 if (sock_flag(sk, SOCK_DEAD))
77 return TCP_TIMEWAIT_LEN;
78 return mptcp_get_pernet(sock_net(sk))->close_timeout;
81 int mptcp_get_pm_type(const struct net *net)
83 return mptcp_get_pernet(net)->pm_type;
86 const char *mptcp_get_scheduler(const struct net *net)
88 return mptcp_get_pernet(net)->scheduler;
91 static void mptcp_pernet_set_defaults(struct mptcp_pernet *pernet)
93 pernet->mptcp_enabled = 1;
94 pernet->add_addr_timeout = TCP_RTO_MAX;
95 pernet->blackhole_timeout = 3600;
96 pernet->syn_retrans_before_tcp_fallback = 2;
97 atomic_set(&pernet->active_disable_times, 0);
98 pernet->close_timeout = TCP_TIMEWAIT_LEN;
99 pernet->checksum_enabled = 0;
100 pernet->allow_join_initial_addr_port = 1;
101 pernet->stale_loss_cnt = 4;
102 pernet->pm_type = MPTCP_PM_TYPE_KERNEL;
103 strscpy(pernet->scheduler, "default", sizeof(pernet->scheduler));
107 static int mptcp_set_scheduler(char *scheduler, const char *name)
109 struct mptcp_sched_ops *sched;
113 sched = mptcp_sched_find(name);
115 strscpy(scheduler, name, MPTCP_SCHED_NAME_MAX);
123 static int proc_scheduler(const struct ctl_table *ctl, int write,
124 void *buffer, size_t *lenp, loff_t *ppos)
126 char (*scheduler)[MPTCP_SCHED_NAME_MAX] = ctl->data;
127 char val[MPTCP_SCHED_NAME_MAX];
128 struct ctl_table tbl = {
130 .maxlen = MPTCP_SCHED_NAME_MAX,
134 strscpy(val, *scheduler, MPTCP_SCHED_NAME_MAX);
136 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
137 if (write && ret == 0)
138 ret = mptcp_set_scheduler(*scheduler, val);
143 static int proc_available_schedulers(const struct ctl_table *ctl,
144 int write, void *buffer,
145 size_t *lenp, loff_t *ppos)
147 struct ctl_table tbl = { .maxlen = MPTCP_SCHED_BUF_MAX, };
150 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
154 mptcp_get_available_schedulers(tbl.data, MPTCP_SCHED_BUF_MAX);
155 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
161 static int proc_blackhole_detect_timeout(const struct ctl_table *table,
162 int write, void *buffer, size_t *lenp,
165 struct mptcp_pernet *pernet = container_of(table->data,
170 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
171 if (write && ret == 0)
172 atomic_set(&pernet->active_disable_times, 0);
177 static struct ctl_table mptcp_sysctl_table[] = {
179 .procname = "enabled",
180 .maxlen = sizeof(u8),
182 /* users with CAP_NET_ADMIN or root (not and) can change this
183 * value, same as other sysctl or the 'net' tree.
185 .proc_handler = proc_dou8vec_minmax,
186 .extra1 = SYSCTL_ZERO,
190 .procname = "add_addr_timeout",
191 .maxlen = sizeof(unsigned int),
193 .proc_handler = proc_dointvec_jiffies,
196 .procname = "checksum_enabled",
197 .maxlen = sizeof(u8),
199 .proc_handler = proc_dou8vec_minmax,
200 .extra1 = SYSCTL_ZERO,
204 .procname = "allow_join_initial_addr_port",
205 .maxlen = sizeof(u8),
207 .proc_handler = proc_dou8vec_minmax,
208 .extra1 = SYSCTL_ZERO,
212 .procname = "stale_loss_cnt",
213 .maxlen = sizeof(unsigned int),
215 .proc_handler = proc_douintvec_minmax,
218 .procname = "pm_type",
219 .maxlen = sizeof(u8),
221 .proc_handler = proc_dou8vec_minmax,
222 .extra1 = SYSCTL_ZERO,
223 .extra2 = &mptcp_pm_type_max
226 .procname = "scheduler",
227 .maxlen = MPTCP_SCHED_NAME_MAX,
229 .proc_handler = proc_scheduler,
232 .procname = "available_schedulers",
233 .maxlen = MPTCP_SCHED_BUF_MAX,
235 .proc_handler = proc_available_schedulers,
238 .procname = "close_timeout",
239 .maxlen = sizeof(unsigned int),
241 .proc_handler = proc_dointvec_jiffies,
244 .procname = "blackhole_timeout",
245 .maxlen = sizeof(unsigned int),
247 .proc_handler = proc_blackhole_detect_timeout,
248 .extra1 = SYSCTL_ZERO,
251 .procname = "syn_retrans_before_tcp_fallback",
252 .maxlen = sizeof(u8),
254 .proc_handler = proc_dou8vec_minmax,
258 static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
260 struct ctl_table_header *hdr;
261 struct ctl_table *table;
263 table = mptcp_sysctl_table;
264 if (!net_eq(net, &init_net)) {
265 table = kmemdup(table, sizeof(mptcp_sysctl_table), GFP_KERNEL);
270 table[0].data = &pernet->mptcp_enabled;
271 table[1].data = &pernet->add_addr_timeout;
272 table[2].data = &pernet->checksum_enabled;
273 table[3].data = &pernet->allow_join_initial_addr_port;
274 table[4].data = &pernet->stale_loss_cnt;
275 table[5].data = &pernet->pm_type;
276 table[6].data = &pernet->scheduler;
277 /* table[7] is for available_schedulers which is read-only info */
278 table[8].data = &pernet->close_timeout;
279 table[9].data = &pernet->blackhole_timeout;
280 table[10].data = &pernet->syn_retrans_before_tcp_fallback;
282 hdr = register_net_sysctl_sz(net, MPTCP_SYSCTL_PATH, table,
283 ARRAY_SIZE(mptcp_sysctl_table));
287 pernet->ctl_table_hdr = hdr;
292 if (!net_eq(net, &init_net))
298 static void mptcp_pernet_del_table(struct mptcp_pernet *pernet)
300 const struct ctl_table *table = pernet->ctl_table_hdr->ctl_table_arg;
302 unregister_net_sysctl_table(pernet->ctl_table_hdr);
309 static int mptcp_pernet_new_table(struct net *net, struct mptcp_pernet *pernet)
314 static void mptcp_pernet_del_table(struct mptcp_pernet *pernet) {}
316 #endif /* CONFIG_SYSCTL */
318 /* The following code block is to deal with middle box issues with MPTCP,
319 * similar to what is done with TFO.
320 * The proposed solution is to disable active MPTCP globally when SYN+MPC are
321 * dropped, while SYN without MPC aren't. In this case, active side MPTCP is
322 * disabled globally for 1hr at first. Then if it happens again, it is disabled
323 * for 2h, then 4h, 8h, ...
324 * The timeout is reset back to 1hr when a successful active MPTCP connection is
328 /* Disable active MPTCP and record current jiffies and active_disable_times */
329 void mptcp_active_disable(struct sock *sk)
331 struct net *net = sock_net(sk);
332 struct mptcp_pernet *pernet;
334 pernet = mptcp_get_pernet(net);
336 if (!READ_ONCE(pernet->blackhole_timeout))
339 /* Paired with READ_ONCE() in mptcp_active_should_disable() */
340 WRITE_ONCE(pernet->active_disable_stamp, jiffies);
342 /* Paired with smp_rmb() in mptcp_active_should_disable().
343 * We want pernet->active_disable_stamp to be updated first.
345 smp_mb__before_atomic();
346 atomic_inc(&pernet->active_disable_times);
348 MPTCP_INC_STATS(net, MPTCP_MIB_BLACKHOLE);
351 /* Calculate timeout for MPTCP active disable
352 * Return true if we are still in the active MPTCP disable period
353 * Return false if timeout already expired and we should use active MPTCP
355 bool mptcp_active_should_disable(struct sock *ssk)
357 struct net *net = sock_net(ssk);
358 unsigned int blackhole_timeout;
359 struct mptcp_pernet *pernet;
360 unsigned long timeout;
364 pernet = mptcp_get_pernet(net);
365 blackhole_timeout = READ_ONCE(pernet->blackhole_timeout);
367 if (!blackhole_timeout)
370 disable_times = atomic_read(&pernet->active_disable_times);
374 /* Paired with smp_mb__before_atomic() in mptcp_active_disable() */
377 /* Limit timeout to max: 2^6 * initial timeout */
378 multiplier = 1 << min(disable_times - 1, 6);
380 /* Paired with the WRITE_ONCE() in mptcp_active_disable(). */
381 timeout = READ_ONCE(pernet->active_disable_stamp) +
382 multiplier * blackhole_timeout * HZ;
384 return time_before(jiffies, timeout);
387 /* Enable active MPTCP and reset active_disable_times if needed */
388 void mptcp_active_enable(struct sock *sk)
390 struct mptcp_pernet *pernet = mptcp_get_pernet(sock_net(sk));
392 if (atomic_read(&pernet->active_disable_times)) {
393 struct dst_entry *dst = sk_dst_get(sk);
395 if (dst && dst->dev && (dst->dev->flags & IFF_LOOPBACK))
396 atomic_set(&pernet->active_disable_times, 0);
400 /* Check the number of retransmissions, and fallback to TCP if needed */
401 void mptcp_active_detect_blackhole(struct sock *ssk, bool expired)
403 struct mptcp_subflow_context *subflow;
405 if (!sk_is_mptcp(ssk))
408 subflow = mptcp_subflow_ctx(ssk);
410 if (subflow->request_mptcp && ssk->sk_state == TCP_SYN_SENT) {
411 struct net *net = sock_net(ssk);
414 timeouts = inet_csk(ssk)->icsk_retransmits;
415 to_max = mptcp_get_pernet(net)->syn_retrans_before_tcp_fallback;
417 if (timeouts == to_max || (timeouts < to_max && expired)) {
418 MPTCP_INC_STATS(net, MPTCP_MIB_MPCAPABLEACTIVEDROP);
419 subflow->mpc_drop = 1;
420 mptcp_subflow_early_fallback(mptcp_sk(subflow->conn), subflow);
422 } else if (ssk->sk_state == TCP_SYN_SENT) {
423 subflow->mpc_drop = 0;
427 static int __net_init mptcp_net_init(struct net *net)
429 struct mptcp_pernet *pernet = mptcp_get_pernet(net);
431 mptcp_pernet_set_defaults(pernet);
433 return mptcp_pernet_new_table(net, pernet);
436 /* Note: the callback will only be called per extra netns */
437 static void __net_exit mptcp_net_exit(struct net *net)
439 struct mptcp_pernet *pernet = mptcp_get_pernet(net);
441 mptcp_pernet_del_table(pernet);
444 static struct pernet_operations mptcp_pernet_ops = {
445 .init = mptcp_net_init,
446 .exit = mptcp_net_exit,
447 .id = &mptcp_pernet_id,
448 .size = sizeof(struct mptcp_pernet),
451 void __init mptcp_init(void)
453 mptcp_join_cookie_init();
456 if (register_pernet_subsys(&mptcp_pernet_ops) < 0)
457 panic("Failed to register MPTCP pernet subsystem.\n");
460 #if IS_ENABLED(CONFIG_MPTCP_IPV6)
461 int __init mptcpv6_init(void)
465 err = mptcp_proto_v6_init();