]>
Commit | Line | Data |
---|---|---|
1b1c7a0e PK |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Multipath TCP | |
3 | * | |
4 | * Copyright (c) 2019, Intel Corporation. | |
5 | */ | |
6 | #include <linux/kernel.h> | |
7 | #include <net/tcp.h> | |
8 | #include <net/mptcp.h> | |
9 | #include "protocol.h" | |
10 | ||
11 | static struct workqueue_struct *pm_wq; | |
12 | ||
13 | /* path manager command handlers */ | |
14 | ||
15 | int mptcp_pm_announce_addr(struct mptcp_sock *msk, | |
16 | const struct mptcp_addr_info *addr) | |
17 | { | |
926bdeab PK |
18 | pr_debug("msk=%p, local_id=%d", msk, addr->id); |
19 | ||
20 | msk->pm.local = *addr; | |
21 | WRITE_ONCE(msk->pm.addr_signal, true); | |
22 | return 0; | |
1b1c7a0e PK |
23 | } |
24 | ||
25 | int mptcp_pm_remove_addr(struct mptcp_sock *msk, u8 local_id) | |
26 | { | |
27 | return -ENOTSUPP; | |
28 | } | |
29 | ||
30 | int mptcp_pm_remove_subflow(struct mptcp_sock *msk, u8 remote_id) | |
31 | { | |
32 | return -ENOTSUPP; | |
33 | } | |
34 | ||
35 | /* path manager event handlers */ | |
36 | ||
37 | void mptcp_pm_new_connection(struct mptcp_sock *msk, int server_side) | |
38 | { | |
39 | struct mptcp_pm_data *pm = &msk->pm; | |
40 | ||
41 | pr_debug("msk=%p, token=%u side=%d", msk, msk->token, server_side); | |
42 | ||
43 | WRITE_ONCE(pm->server_side, server_side); | |
44 | } | |
45 | ||
46 | bool mptcp_pm_allow_new_subflow(struct mptcp_sock *msk) | |
47 | { | |
926bdeab PK |
48 | struct mptcp_pm_data *pm = &msk->pm; |
49 | int ret; | |
50 | ||
51 | pr_debug("msk=%p subflows=%d max=%d allow=%d", msk, pm->subflows, | |
52 | pm->subflows_max, READ_ONCE(pm->accept_subflow)); | |
53 | ||
54 | /* try to avoid acquiring the lock below */ | |
55 | if (!READ_ONCE(pm->accept_subflow)) | |
56 | return false; | |
57 | ||
58 | spin_lock_bh(&pm->lock); | |
59 | ret = pm->subflows < pm->subflows_max; | |
60 | if (ret && ++pm->subflows == pm->subflows_max) | |
61 | WRITE_ONCE(pm->accept_subflow, false); | |
62 | spin_unlock_bh(&pm->lock); | |
63 | ||
64 | return ret; | |
65 | } | |
66 | ||
67 | /* return true if the new status bit is currently cleared, that is, this event | |
68 | * can be server, eventually by an already scheduled work | |
69 | */ | |
70 | static bool mptcp_pm_schedule_work(struct mptcp_sock *msk, | |
71 | enum mptcp_pm_status new_status) | |
72 | { | |
73 | pr_debug("msk=%p status=%x new=%lx", msk, msk->pm.status, | |
74 | BIT(new_status)); | |
75 | if (msk->pm.status & BIT(new_status)) | |
76 | return false; | |
77 | ||
78 | msk->pm.status |= BIT(new_status); | |
79 | if (queue_work(pm_wq, &msk->pm.work)) | |
80 | sock_hold((struct sock *)msk); | |
81 | return true; | |
1b1c7a0e PK |
82 | } |
83 | ||
84 | void mptcp_pm_fully_established(struct mptcp_sock *msk) | |
85 | { | |
926bdeab PK |
86 | struct mptcp_pm_data *pm = &msk->pm; |
87 | ||
1b1c7a0e | 88 | pr_debug("msk=%p", msk); |
926bdeab PK |
89 | |
90 | /* try to avoid acquiring the lock below */ | |
91 | if (!READ_ONCE(pm->work_pending)) | |
92 | return; | |
93 | ||
94 | spin_lock_bh(&pm->lock); | |
95 | ||
96 | if (READ_ONCE(pm->work_pending)) | |
97 | mptcp_pm_schedule_work(msk, MPTCP_PM_ESTABLISHED); | |
98 | ||
99 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
100 | } |
101 | ||
102 | void mptcp_pm_connection_closed(struct mptcp_sock *msk) | |
103 | { | |
104 | pr_debug("msk=%p", msk); | |
105 | } | |
106 | ||
107 | void mptcp_pm_subflow_established(struct mptcp_sock *msk, | |
108 | struct mptcp_subflow_context *subflow) | |
109 | { | |
926bdeab PK |
110 | struct mptcp_pm_data *pm = &msk->pm; |
111 | ||
1b1c7a0e | 112 | pr_debug("msk=%p", msk); |
926bdeab PK |
113 | |
114 | if (!READ_ONCE(pm->work_pending)) | |
115 | return; | |
116 | ||
117 | spin_lock_bh(&pm->lock); | |
118 | ||
119 | if (READ_ONCE(pm->work_pending)) | |
120 | mptcp_pm_schedule_work(msk, MPTCP_PM_SUBFLOW_ESTABLISHED); | |
121 | ||
122 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
123 | } |
124 | ||
125 | void mptcp_pm_subflow_closed(struct mptcp_sock *msk, u8 id) | |
126 | { | |
127 | pr_debug("msk=%p", msk); | |
128 | } | |
129 | ||
130 | void mptcp_pm_add_addr_received(struct mptcp_sock *msk, | |
131 | const struct mptcp_addr_info *addr) | |
132 | { | |
926bdeab PK |
133 | struct mptcp_pm_data *pm = &msk->pm; |
134 | ||
135 | pr_debug("msk=%p remote_id=%d accept=%d", msk, addr->id, | |
136 | READ_ONCE(pm->accept_addr)); | |
137 | ||
138 | /* avoid acquiring the lock if there is no room for fouther addresses */ | |
139 | if (!READ_ONCE(pm->accept_addr)) | |
140 | return; | |
141 | ||
142 | spin_lock_bh(&pm->lock); | |
143 | ||
144 | /* be sure there is something to signal re-checking under PM lock */ | |
145 | if (READ_ONCE(pm->accept_addr) && | |
146 | mptcp_pm_schedule_work(msk, MPTCP_PM_ADD_ADDR_RECEIVED)) | |
147 | pm->remote = *addr; | |
148 | ||
149 | spin_unlock_bh(&pm->lock); | |
1b1c7a0e PK |
150 | } |
151 | ||
152 | /* path manager helpers */ | |
153 | ||
154 | bool mptcp_pm_addr_signal(struct mptcp_sock *msk, unsigned int remaining, | |
155 | struct mptcp_addr_info *saddr) | |
156 | { | |
926bdeab PK |
157 | int ret = false; |
158 | ||
159 | spin_lock_bh(&msk->pm.lock); | |
160 | ||
161 | /* double check after the lock is acquired */ | |
162 | if (!mptcp_pm_should_signal(msk)) | |
163 | goto out_unlock; | |
164 | ||
165 | if (remaining < mptcp_add_addr_len(msk->pm.local.family)) | |
166 | goto out_unlock; | |
167 | ||
168 | *saddr = msk->pm.local; | |
169 | WRITE_ONCE(msk->pm.addr_signal, false); | |
170 | ret = true; | |
171 | ||
172 | out_unlock: | |
173 | spin_unlock_bh(&msk->pm.lock); | |
174 | return ret; | |
1b1c7a0e PK |
175 | } |
176 | ||
177 | int mptcp_pm_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) | |
178 | { | |
179 | return 0; | |
180 | } | |
181 | ||
182 | static void pm_worker(struct work_struct *work) | |
183 | { | |
926bdeab PK |
184 | struct mptcp_pm_data *pm = container_of(work, struct mptcp_pm_data, |
185 | work); | |
186 | struct mptcp_sock *msk = container_of(pm, struct mptcp_sock, pm); | |
187 | struct sock *sk = (struct sock *)msk; | |
188 | ||
189 | lock_sock(sk); | |
190 | spin_lock_bh(&msk->pm.lock); | |
191 | ||
192 | pr_debug("msk=%p status=%x", msk, pm->status); | |
193 | if (pm->status & BIT(MPTCP_PM_ADD_ADDR_RECEIVED)) { | |
194 | pm->status &= ~BIT(MPTCP_PM_ADD_ADDR_RECEIVED); | |
195 | } | |
196 | if (pm->status & BIT(MPTCP_PM_ESTABLISHED)) { | |
197 | pm->status &= ~BIT(MPTCP_PM_ESTABLISHED); | |
198 | } | |
199 | if (pm->status & BIT(MPTCP_PM_SUBFLOW_ESTABLISHED)) { | |
200 | pm->status &= ~BIT(MPTCP_PM_SUBFLOW_ESTABLISHED); | |
201 | } | |
202 | ||
203 | spin_unlock_bh(&msk->pm.lock); | |
204 | release_sock(sk); | |
205 | sock_put(sk); | |
1b1c7a0e PK |
206 | } |
207 | ||
208 | void mptcp_pm_data_init(struct mptcp_sock *msk) | |
209 | { | |
210 | msk->pm.add_addr_signaled = 0; | |
211 | msk->pm.add_addr_accepted = 0; | |
212 | msk->pm.local_addr_used = 0; | |
213 | msk->pm.subflows = 0; | |
214 | WRITE_ONCE(msk->pm.work_pending, false); | |
215 | WRITE_ONCE(msk->pm.addr_signal, false); | |
216 | WRITE_ONCE(msk->pm.accept_addr, false); | |
217 | WRITE_ONCE(msk->pm.accept_subflow, false); | |
218 | msk->pm.status = 0; | |
219 | ||
220 | spin_lock_init(&msk->pm.lock); | |
221 | INIT_WORK(&msk->pm.work, pm_worker); | |
222 | } | |
223 | ||
926bdeab PK |
224 | void mptcp_pm_close(struct mptcp_sock *msk) |
225 | { | |
226 | if (cancel_work_sync(&msk->pm.work)) | |
227 | sock_put((struct sock *)msk); | |
228 | } | |
229 | ||
1b1c7a0e PK |
230 | void mptcp_pm_init(void) |
231 | { | |
232 | pm_wq = alloc_workqueue("pm_wq", WQ_UNBOUND | WQ_MEM_RECLAIM, 8); | |
233 | if (!pm_wq) | |
234 | panic("Failed to allocate workqueue"); | |
235 | } |