]> Git Repo - linux.git/blob - net/mptcp/pm.c
userfaultfd: wp: drop _PAGE_UFFD_WP properly when fork
[linux.git] / net / mptcp / pm.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Multipath TCP
3  *
4  * Copyright (c) 2019, Intel Corporation.
5  */
6 #include <linux/kernel.h>
7 #include <net/tcp.h>
8 #include <net/mptcp.h>
9 #include "protocol.h"
10
11 static struct workqueue_struct *pm_wq;
12
13 /* path manager command handlers */
14
15 int mptcp_pm_announce_addr(struct mptcp_sock *msk,
16                            const struct mptcp_addr_info *addr)
17 {
18         pr_debug("msk=%p, local_id=%d", msk, addr->id);
19
20         msk->pm.local = *addr;
21         WRITE_ONCE(msk->pm.addr_signal, true);
22         return 0;
23 }
24
25 int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id)
26 {
27         return -ENOTSUPP;
28 }
29
30 int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id)
31 {
32         return -ENOTSUPP;
33 }
34
35 /* path manager event handlers */
36
37 void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side)
38 {
39         struct mptcp_pm_data *pm = &msk->pm;
40
41         pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side);
42
43         WRITE_ONCE(pm->server_side, server_side);
44 }
45
46 bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk)
47 {
48         struct mptcp_pm_data *pm = &msk->pm;
49         int ret;
50
51         pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows,
52                  pm->subflows_max, READ_ONCE(pm->accept_subflow));
53
54         /* try to avoid acquiring the lock below */
55         if (!READ_ONCE(pm->accept_subflow))
56                 return false;
57
58         spin_lock_bh(&pm->lock);
59         ret = pm->subflows < pm->subflows_max;
60         if (ret && ++pm->subflows == pm->subflows_max)
61                 WRITE_ONCE(pm->accept_subflow, false);
62         spin_unlock_bh(&pm->lock);
63
64         return ret;
65 }
66
67 /* return true if the new status bit is currently cleared, that is, this event
68  * can be server, eventually by an already scheduled work
69  */
70 static bool mptcp_pm_schedule_work(struct mptcp_sock *msk,
71                                    enum mptcp_pm_status new_status)
72 {
73         pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status,
74                  BIT(new_status));
75         if (msk->pm.status & BIT(new_status))
76                 return false;
77
78         msk->pm.status |= BIT(new_status);
79         if (queue_work(pm_wq, &msk->pm.work))
80                 sock_hold((struct sock *)msk);
81         return true;
82 }
83
84 void mptcp_pm_fully_established(struct mptcp_sock *msk)
85 {
86         struct mptcp_pm_data *pm = &msk->pm;
87
88         pr_debug("msk=%p", msk);
89
90         /* try to avoid acquiring the lock below */
91         if (!READ_ONCE(pm->work_pending))
92                 return;
93
94         spin_lock_bh(&pm->lock);
95
96         if (READ_ONCE(pm->work_pending))
97                 mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED);
98
99         spin_unlock_bh(&pm->lock);
100 }
101
102 void mptcp_pm_connection_closed(struct mptcp_sock *msk)
103 {
104         pr_debug("msk=%p", msk);
105 }
106
107 void mptcp_pm_subflow_established(struct mptcp_sock *msk,
108                                   struct mptcp_subflow_context *subflow)
109 {
110         struct mptcp_pm_data *pm = &msk->pm;
111
112         pr_debug("msk=%p", msk);
113
114         if (!READ_ONCE(pm->work_pending))
115                 return;
116
117         spin_lock_bh(&pm->lock);
118
119         if (READ_ONCE(pm->work_pending))
120                 mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED);
121
122         spin_unlock_bh(&pm->lock);
123 }
124
125 void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id)
126 {
127         pr_debug("msk=%p", msk);
128 }
129
130 void mptcp_pm_add_addr_received(struct mptcp_sock *msk,
131                                 const struct mptcp_addr_info *addr)
132 {
133         struct mptcp_pm_data *pm = &msk->pm;
134
135         pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id,
136                  READ_ONCE(pm->accept_addr));
137
138         /* avoid acquiring the lock if there is no room for fouther addresses */
139         if (!READ_ONCE(pm->accept_addr))
140                 return;
141
142         spin_lock_bh(&pm->lock);
143
144         /* be sure there is something to signal re-checking under PM lock */
145         if (READ_ONCE(pm->accept_addr) &&
146             mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED))
147                 pm->remote = *addr;
148
149         spin_unlock_bh(&pm->lock);
150 }
151
152 /* path manager helpers */
153
154 bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining,
155                           struct mptcp_addr_info *saddr)
156 {
157         int ret = false;
158
159         spin_lock_bh(&msk->pm.lock);
160
161         /* double check after the lock is acquired */
162         if (!mptcp_pm_should_signal(msk))
163                 goto out_unlock;
164
165         if (remaining < mptcp_add_addr_len(msk->pm.local.family))
166                 goto out_unlock;
167
168         *saddr = msk->pm.local;
169         WRITE_ONCE(msk->pm.addr_signal, false);
170         ret = true;
171
172 out_unlock:
173         spin_unlock_bh(&msk->pm.lock);
174         return ret;
175 }
176
177 int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc)
178 {
179         return mptcp_pm_nl_get_local_id(msk, skc);
180 }
181
182 static void pm_worker(struct work_struct *work)
183 {
184         struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data,
185                                                 work);
186         struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm);
187         struct sock *sk = (struct sock *)msk;
188
189         lock_sock(sk);
190         spin_lock_bh(&msk->pm.lock);
191
192         pr_debug("msk=%p status=%x", msk, pm->status);
193         if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) {
194                 pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED);
195                 mptcp_pm_nl_add_addr_received(msk);
196         }
197         if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) {
198                 pm->status &= ~BIT(MPTCP_PM_ESTABLISHED);
199                 mptcp_pm_nl_fully_established(msk);
200         }
201         if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) {
202                 pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED);
203                 mptcp_pm_nl_subflow_established(msk);
204         }
205
206         spin_unlock_bh(&msk->pm.lock);
207         release_sock(sk);
208         sock_put(sk);
209 }
210
211 void mptcp_pm_data_init(struct mptcp_sock *msk)
212 {
213         msk->pm.add_addr_signaled = 0;
214         msk->pm.add_addr_accepted = 0;
215         msk->pm.local_addr_used = 0;
216         msk->pm.subflows = 0;
217         WRITE_ONCE(msk->pm.work_pending, false);
218         WRITE_ONCE(msk->pm.addr_signal, false);
219         WRITE_ONCE(msk->pm.accept_addr, false);
220         WRITE_ONCE(msk->pm.accept_subflow, false);
221         msk->pm.status = 0;
222
223         spin_lock_init(&msk->pm.lock);
224         INIT_WORK(&msk->pm.work, pm_worker);
225
226         mptcp_pm_nl_data_init(msk);
227 }
228
229 void mptcp_pm_close(struct mptcp_sock *msk)
230 {
231         if (cancel_work_sync(&msk->pm.work))
232                 sock_put((struct sock *)msk);
233 }
234
235 void mptcp_pm_init(void)
236 {
237         pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8);
238         if (!pm_wq)
239                 panic("Failed to allocate workqueue");
240
241         mptcp_pm_nl_init();
242 }
This page took 0.044986 seconds and 4 git commands to generate.