]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
ef456144 CG |
2 | /* |
3 | * To speed up listener socket lookup, create an array to store all sockets | |
4 | * listening on the same port. This allows a decision to be made after finding | |
538950a1 CG |
5 | * the first socket. An optional BPF program can also be configured for |
6 | * selecting the socket index from the array of available sockets. | |
ef456144 CG |
7 | */ |
8 | ||
9 | #include <net/sock_reuseport.h> | |
538950a1 | 10 | #include <linux/bpf.h> |
736b4602 | 11 | #include <linux/idr.h> |
8217ca65 | 12 | #include <linux/filter.h> |
ef456144 CG |
13 | #include <linux/rcupdate.h> |
14 | ||
15 | #define INIT_SOCKS 128 | |
16 | ||
736b4602 MKL |
17 | DEFINE_SPINLOCK(reuseport_lock); |
18 | ||
19 | #define REUSEPORT_MIN_ID 1 | |
20 | static DEFINE_IDA(reuseport_ida); | |
21 | ||
22 | int reuseport_get_id(struct sock_reuseport *reuse) | |
23 | { | |
24 | int id; | |
25 | ||
26 | if (reuse->reuseport_id) | |
27 | return reuse->reuseport_id; | |
28 | ||
29 | id = ida_simple_get(&reuseport_ida, REUSEPORT_MIN_ID, 0, | |
30 | /* Called under reuseport_lock */ | |
31 | GFP_ATOMIC); | |
32 | if (id < 0) | |
33 | return id; | |
34 | ||
35 | reuse->reuseport_id = id; | |
36 | ||
37 | return reuse->reuseport_id; | |
38 | } | |
ef456144 | 39 | |
822f9bb1 | 40 | static struct sock_reuseport *__reuseport_alloc(unsigned int max_socks) |
ef456144 | 41 | { |
822f9bb1 | 42 | unsigned int size = sizeof(struct sock_reuseport) + |
ef456144 CG |
43 | sizeof(struct sock *) * max_socks; |
44 | struct sock_reuseport *reuse = kzalloc(size, GFP_ATOMIC); | |
45 | ||
46 | if (!reuse) | |
47 | return NULL; | |
48 | ||
49 | reuse->max_socks = max_socks; | |
50 | ||
538950a1 | 51 | RCU_INIT_POINTER(reuse->prog, NULL); |
ef456144 CG |
52 | return reuse; |
53 | } | |
54 | ||
2dbb9b9e | 55 | int reuseport_alloc(struct sock *sk, bool bind_inany) |
ef456144 CG |
56 | { |
57 | struct sock_reuseport *reuse; | |
58 | ||
59 | /* bh lock used since this function call may precede hlist lock in | |
60 | * soft irq of receive path or setsockopt from process context | |
61 | */ | |
62 | spin_lock_bh(&reuseport_lock); | |
1b5f962e CG |
63 | |
64 | /* Allocation attempts can occur concurrently via the setsockopt path | |
65 | * and the bind/hash path. Nothing to do when we lose the race. | |
66 | */ | |
2dbb9b9e MKL |
67 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, |
68 | lockdep_is_held(&reuseport_lock)); | |
69 | if (reuse) { | |
70 | /* Only set reuse->bind_inany if the bind_inany is true. | |
71 | * Otherwise, it will overwrite the reuse->bind_inany | |
72 | * which was set by the bind/hash path. | |
73 | */ | |
74 | if (bind_inany) | |
75 | reuse->bind_inany = bind_inany; | |
1b5f962e | 76 | goto out; |
2dbb9b9e | 77 | } |
1b5f962e | 78 | |
ef456144 CG |
79 | reuse = __reuseport_alloc(INIT_SOCKS); |
80 | if (!reuse) { | |
81 | spin_unlock_bh(&reuseport_lock); | |
82 | return -ENOMEM; | |
83 | } | |
84 | ||
85 | reuse->socks[0] = sk; | |
86 | reuse->num_socks = 1; | |
2dbb9b9e | 87 | reuse->bind_inany = bind_inany; |
ef456144 CG |
88 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); |
89 | ||
1b5f962e | 90 | out: |
ef456144 CG |
91 | spin_unlock_bh(&reuseport_lock); |
92 | ||
93 | return 0; | |
94 | } | |
95 | EXPORT_SYMBOL(reuseport_alloc); | |
96 | ||
97 | static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse) | |
98 | { | |
99 | struct sock_reuseport *more_reuse; | |
100 | u32 more_socks_size, i; | |
101 | ||
102 | more_socks_size = reuse->max_socks * 2U; | |
103 | if (more_socks_size > U16_MAX) | |
104 | return NULL; | |
105 | ||
106 | more_reuse = __reuseport_alloc(more_socks_size); | |
107 | if (!more_reuse) | |
108 | return NULL; | |
109 | ||
110 | more_reuse->max_socks = more_socks_size; | |
111 | more_reuse->num_socks = reuse->num_socks; | |
538950a1 | 112 | more_reuse->prog = reuse->prog; |
736b4602 | 113 | more_reuse->reuseport_id = reuse->reuseport_id; |
2dbb9b9e | 114 | more_reuse->bind_inany = reuse->bind_inany; |
ef456144 CG |
115 | |
116 | memcpy(more_reuse->socks, reuse->socks, | |
117 | reuse->num_socks * sizeof(struct sock *)); | |
40a1227e | 118 | more_reuse->synq_overflow_ts = READ_ONCE(reuse->synq_overflow_ts); |
ef456144 CG |
119 | |
120 | for (i = 0; i < reuse->num_socks; ++i) | |
121 | rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb, | |
122 | more_reuse); | |
123 | ||
538950a1 CG |
124 | /* Note: we use kfree_rcu here instead of reuseport_free_rcu so |
125 | * that reuse and more_reuse can temporarily share a reference | |
126 | * to prog. | |
127 | */ | |
ef456144 CG |
128 | kfree_rcu(reuse, rcu); |
129 | return more_reuse; | |
130 | } | |
131 | ||
4db428a7 ED |
132 | static void reuseport_free_rcu(struct rcu_head *head) |
133 | { | |
134 | struct sock_reuseport *reuse; | |
135 | ||
136 | reuse = container_of(head, struct sock_reuseport, rcu); | |
8217ca65 | 137 | sk_reuseport_prog_free(rcu_dereference_protected(reuse->prog, 1)); |
736b4602 MKL |
138 | if (reuse->reuseport_id) |
139 | ida_simple_remove(&reuseport_ida, reuse->reuseport_id); | |
4db428a7 ED |
140 | kfree(reuse); |
141 | } | |
142 | ||
ef456144 CG |
143 | /** |
144 | * reuseport_add_sock - Add a socket to the reuseport group of another. | |
145 | * @sk: New socket to add to the group. | |
146 | * @sk2: Socket belonging to the existing reuseport group. | |
147 | * May return ENOMEM and not add socket to group under memory pressure. | |
148 | */ | |
2dbb9b9e | 149 | int reuseport_add_sock(struct sock *sk, struct sock *sk2, bool bind_inany) |
ef456144 | 150 | { |
4db428a7 | 151 | struct sock_reuseport *old_reuse, *reuse; |
ef456144 | 152 | |
b4ace4f1 | 153 | if (!rcu_access_pointer(sk2->sk_reuseport_cb)) { |
2dbb9b9e | 154 | int err = reuseport_alloc(sk2, bind_inany); |
b4ace4f1 CG |
155 | |
156 | if (err) | |
157 | return err; | |
158 | } | |
159 | ||
ef456144 CG |
160 | spin_lock_bh(&reuseport_lock); |
161 | reuse = rcu_dereference_protected(sk2->sk_reuseport_cb, | |
4db428a7 ED |
162 | lockdep_is_held(&reuseport_lock)); |
163 | old_reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
164 | lockdep_is_held(&reuseport_lock)); | |
165 | if (old_reuse && old_reuse->num_socks != 1) { | |
166 | spin_unlock_bh(&reuseport_lock); | |
167 | return -EBUSY; | |
168 | } | |
ef456144 CG |
169 | |
170 | if (reuse->num_socks == reuse->max_socks) { | |
171 | reuse = reuseport_grow(reuse); | |
172 | if (!reuse) { | |
173 | spin_unlock_bh(&reuseport_lock); | |
174 | return -ENOMEM; | |
175 | } | |
176 | } | |
177 | ||
178 | reuse->socks[reuse->num_socks] = sk; | |
179 | /* paired with smp_rmb() in reuseport_select_sock() */ | |
180 | smp_wmb(); | |
181 | reuse->num_socks++; | |
182 | rcu_assign_pointer(sk->sk_reuseport_cb, reuse); | |
183 | ||
184 | spin_unlock_bh(&reuseport_lock); | |
185 | ||
4db428a7 ED |
186 | if (old_reuse) |
187 | call_rcu(&old_reuse->rcu, reuseport_free_rcu); | |
ef456144 CG |
188 | return 0; |
189 | } | |
ef456144 CG |
190 | |
191 | void reuseport_detach_sock(struct sock *sk) | |
192 | { | |
193 | struct sock_reuseport *reuse; | |
194 | int i; | |
195 | ||
196 | spin_lock_bh(&reuseport_lock); | |
197 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
198 | lockdep_is_held(&reuseport_lock)); | |
5dc4c4b7 MKL |
199 | |
200 | /* At least one of the sk in this reuseport group is added to | |
201 | * a bpf map. Notify the bpf side. The bpf map logic will | |
202 | * remove the sk if it is indeed added to a bpf map. | |
203 | */ | |
204 | if (reuse->reuseport_id) | |
205 | bpf_sk_reuseport_detach(sk); | |
206 | ||
ef456144 CG |
207 | rcu_assign_pointer(sk->sk_reuseport_cb, NULL); |
208 | ||
209 | for (i = 0; i < reuse->num_socks; i++) { | |
210 | if (reuse->socks[i] == sk) { | |
211 | reuse->socks[i] = reuse->socks[reuse->num_socks - 1]; | |
212 | reuse->num_socks--; | |
213 | if (reuse->num_socks == 0) | |
538950a1 | 214 | call_rcu(&reuse->rcu, reuseport_free_rcu); |
ef456144 CG |
215 | break; |
216 | } | |
217 | } | |
218 | spin_unlock_bh(&reuseport_lock); | |
219 | } | |
220 | EXPORT_SYMBOL(reuseport_detach_sock); | |
221 | ||
8217ca65 MKL |
222 | static struct sock *run_bpf_filter(struct sock_reuseport *reuse, u16 socks, |
223 | struct bpf_prog *prog, struct sk_buff *skb, | |
224 | int hdr_len) | |
538950a1 CG |
225 | { |
226 | struct sk_buff *nskb = NULL; | |
227 | u32 index; | |
228 | ||
229 | if (skb_shared(skb)) { | |
230 | nskb = skb_clone(skb, GFP_ATOMIC); | |
231 | if (!nskb) | |
232 | return NULL; | |
233 | skb = nskb; | |
234 | } | |
235 | ||
236 | /* temporarily advance data past protocol header */ | |
237 | if (!pskb_pull(skb, hdr_len)) { | |
00ce3a15 | 238 | kfree_skb(nskb); |
538950a1 CG |
239 | return NULL; |
240 | } | |
241 | index = bpf_prog_run_save_cb(prog, skb); | |
242 | __skb_push(skb, hdr_len); | |
243 | ||
244 | consume_skb(nskb); | |
245 | ||
246 | if (index >= socks) | |
247 | return NULL; | |
248 | ||
249 | return reuse->socks[index]; | |
250 | } | |
251 | ||
ef456144 CG |
252 | /** |
253 | * reuseport_select_sock - Select a socket from an SO_REUSEPORT group. | |
254 | * @sk: First socket in the group. | |
538950a1 CG |
255 | * @hash: When no BPF filter is available, use this hash to select. |
256 | * @skb: skb to run through BPF filter. | |
257 | * @hdr_len: BPF filter expects skb data pointer at payload data. If | |
258 | * the skb does not yet point at the payload, this parameter represents | |
259 | * how far the pointer needs to advance to reach the payload. | |
ef456144 CG |
260 | * Returns a socket that should receive the packet (or NULL on error). |
261 | */ | |
538950a1 CG |
262 | struct sock *reuseport_select_sock(struct sock *sk, |
263 | u32 hash, | |
264 | struct sk_buff *skb, | |
265 | int hdr_len) | |
ef456144 CG |
266 | { |
267 | struct sock_reuseport *reuse; | |
538950a1 | 268 | struct bpf_prog *prog; |
ef456144 CG |
269 | struct sock *sk2 = NULL; |
270 | u16 socks; | |
271 | ||
272 | rcu_read_lock(); | |
273 | reuse = rcu_dereference(sk->sk_reuseport_cb); | |
274 | ||
275 | /* if memory allocation failed or add call is not yet complete */ | |
276 | if (!reuse) | |
277 | goto out; | |
278 | ||
538950a1 | 279 | prog = rcu_dereference(reuse->prog); |
ef456144 CG |
280 | socks = READ_ONCE(reuse->num_socks); |
281 | if (likely(socks)) { | |
282 | /* paired with smp_wmb() in reuseport_add_sock() */ | |
283 | smp_rmb(); | |
284 | ||
8217ca65 MKL |
285 | if (!prog || !skb) |
286 | goto select_by_hash; | |
287 | ||
288 | if (prog->type == BPF_PROG_TYPE_SK_REUSEPORT) | |
289 | sk2 = bpf_run_sk_reuseport(reuse, sk, prog, skb, hash); | |
290 | else | |
291 | sk2 = run_bpf_filter(reuse, socks, prog, skb, hdr_len); | |
e94a62f5 | 292 | |
8217ca65 | 293 | select_by_hash: |
e94a62f5 PA |
294 | /* no bpf or invalid bpf result: fall back to hash usage */ |
295 | if (!sk2) | |
538950a1 | 296 | sk2 = reuse->socks[reciprocal_scale(hash, socks)]; |
ef456144 CG |
297 | } |
298 | ||
299 | out: | |
300 | rcu_read_unlock(); | |
301 | return sk2; | |
302 | } | |
303 | EXPORT_SYMBOL(reuseport_select_sock); | |
538950a1 | 304 | |
8217ca65 | 305 | int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog) |
538950a1 CG |
306 | { |
307 | struct sock_reuseport *reuse; | |
308 | struct bpf_prog *old_prog; | |
309 | ||
8217ca65 MKL |
310 | if (sk_unhashed(sk) && sk->sk_reuseport) { |
311 | int err = reuseport_alloc(sk, false); | |
312 | ||
313 | if (err) | |
314 | return err; | |
315 | } else if (!rcu_access_pointer(sk->sk_reuseport_cb)) { | |
316 | /* The socket wasn't bound with SO_REUSEPORT */ | |
317 | return -EINVAL; | |
318 | } | |
319 | ||
538950a1 CG |
320 | spin_lock_bh(&reuseport_lock); |
321 | reuse = rcu_dereference_protected(sk->sk_reuseport_cb, | |
322 | lockdep_is_held(&reuseport_lock)); | |
323 | old_prog = rcu_dereference_protected(reuse->prog, | |
324 | lockdep_is_held(&reuseport_lock)); | |
325 | rcu_assign_pointer(reuse->prog, prog); | |
326 | spin_unlock_bh(&reuseport_lock); | |
327 | ||
8217ca65 MKL |
328 | sk_reuseport_prog_free(old_prog); |
329 | return 0; | |
538950a1 CG |
330 | } |
331 | EXPORT_SYMBOL(reuseport_attach_prog); |