]>
Commit | Line | Data |
---|---|---|
6ac99e8f MKL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* Copyright (c) 2019 Facebook */ | |
3 | #include <linux/rculist.h> | |
4 | #include <linux/list.h> | |
5 | #include <linux/hash.h> | |
6 | #include <linux/types.h> | |
7 | #include <linux/spinlock.h> | |
8 | #include <linux/bpf.h> | |
8e4597c6 | 9 | #include <linux/btf.h> |
5ce6e77c | 10 | #include <linux/btf_ids.h> |
450af8d0 | 11 | #include <linux/bpf_local_storage.h> |
6ac99e8f MKL |
12 | #include <net/bpf_sk_storage.h> |
13 | #include <net/sock.h> | |
1ed4d924 | 14 | #include <uapi/linux/sock_diag.h> |
6ac99e8f MKL |
15 | #include <uapi/linux/btf.h> |
16 | ||
4cc9ce4e KS |
17 | DEFINE_BPF_STORAGE_CACHE(sk_cache); |
18 | ||
1f00d375 | 19 | static struct bpf_local_storage_data * |
e794bfdd | 20 | bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit) |
6ac99e8f | 21 | { |
1f00d375 KS |
22 | struct bpf_local_storage *sk_storage; |
23 | struct bpf_local_storage_map *smap; | |
6ac99e8f MKL |
24 | |
25 | sk_storage = rcu_dereference(sk->sk_bpf_storage); | |
26 | if (!sk_storage) | |
27 | return NULL; | |
28 | ||
1f00d375 KS |
29 | smap = (struct bpf_local_storage_map *)map; |
30 | return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit); | |
6ac99e8f MKL |
31 | } |
32 | ||
e794bfdd | 33 | static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map) |
6ac99e8f | 34 | { |
1f00d375 | 35 | struct bpf_local_storage_data *sdata; |
6ac99e8f | 36 | |
e794bfdd | 37 | sdata = bpf_sk_storage_lookup(sk, map, false); |
6ac99e8f MKL |
38 | if (!sdata) |
39 | return -ENOENT; | |
40 | ||
1f00d375 | 41 | bpf_selem_unlink(SELEM(sdata)); |
6ac99e8f MKL |
42 | |
43 | return 0; | |
44 | } | |
45 | ||
8f51dfc7 | 46 | /* Called by __sk_destruct() & bpf_sk_storage_clone() */ |
6ac99e8f MKL |
47 | void bpf_sk_storage_free(struct sock *sk) |
48 | { | |
1f00d375 KS |
49 | struct bpf_local_storage_elem *selem; |
50 | struct bpf_local_storage *sk_storage; | |
6ac99e8f MKL |
51 | bool free_sk_storage = false; |
52 | struct hlist_node *n; | |
53 | ||
54 | rcu_read_lock(); | |
55 | sk_storage = rcu_dereference(sk->sk_bpf_storage); | |
56 | if (!sk_storage) { | |
57 | rcu_read_unlock(); | |
58 | return; | |
59 | } | |
60 | ||
61 | /* Netiher the bpf_prog nor the bpf-map's syscall | |
62 | * could be modifying the sk_storage->list now. | |
63 | * Thus, no elem can be added-to or deleted-from the | |
64 | * sk_storage->list by the bpf_prog or by the bpf-map's syscall. | |
65 | * | |
1f00d375 | 66 | * It is racing with bpf_local_storage_map_free() alone |
6ac99e8f MKL |
67 | * when unlinking elem from the sk_storage->list and |
68 | * the map's bucket->list. | |
69 | */ | |
70 | raw_spin_lock_bh(&sk_storage->lock); | |
71 | hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) { | |
72 | /* Always unlink from map before unlinking from | |
73 | * sk_storage. | |
74 | */ | |
1f00d375 KS |
75 | bpf_selem_unlink_map(selem); |
76 | free_sk_storage = bpf_selem_unlink_storage_nolock(sk_storage, | |
77 | selem, true); | |
6ac99e8f MKL |
78 | } |
79 | raw_spin_unlock_bh(&sk_storage->lock); | |
80 | rcu_read_unlock(); | |
81 | ||
82 | if (free_sk_storage) | |
83 | kfree_rcu(sk_storage, rcu); | |
84 | } | |
85 | ||
e794bfdd | 86 | static void bpf_sk_storage_map_free(struct bpf_map *map) |
f836a56e KS |
87 | { |
88 | struct bpf_local_storage_map *smap; | |
89 | ||
90 | smap = (struct bpf_local_storage_map *)map; | |
91 | bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx); | |
92 | bpf_local_storage_map_free(smap); | |
6ac99e8f MKL |
93 | } |
94 | ||
e794bfdd | 95 | static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr) |
f836a56e KS |
96 | { |
97 | struct bpf_local_storage_map *smap; | |
98 | ||
99 | smap = bpf_local_storage_map_alloc(attr); | |
100 | if (IS_ERR(smap)) | |
101 | return ERR_CAST(smap); | |
102 | ||
103 | smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache); | |
6ac99e8f MKL |
104 | return &smap->map; |
105 | } | |
106 | ||
107 | static int notsupp_get_next_key(struct bpf_map *map, void *key, | |
108 | void *next_key) | |
109 | { | |
110 | return -ENOTSUPP; | |
111 | } | |
112 | ||
6ac99e8f MKL |
113 | static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key) |
114 | { | |
1f00d375 | 115 | struct bpf_local_storage_data *sdata; |
6ac99e8f MKL |
116 | struct socket *sock; |
117 | int fd, err; | |
118 | ||
119 | fd = *(int *)key; | |
120 | sock = sockfd_lookup(fd, &err); | |
121 | if (sock) { | |
e794bfdd | 122 | sdata = bpf_sk_storage_lookup(sock->sk, map, true); |
6ac99e8f MKL |
123 | sockfd_put(sock); |
124 | return sdata ? sdata->data : NULL; | |
125 | } | |
126 | ||
127 | return ERR_PTR(err); | |
128 | } | |
129 | ||
130 | static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key, | |
131 | void *value, u64 map_flags) | |
132 | { | |
1f00d375 | 133 | struct bpf_local_storage_data *sdata; |
6ac99e8f MKL |
134 | struct socket *sock; |
135 | int fd, err; | |
136 | ||
137 | fd = *(int *)key; | |
138 | sock = sockfd_lookup(fd, &err); | |
139 | if (sock) { | |
f836a56e KS |
140 | sdata = bpf_local_storage_update( |
141 | sock->sk, (struct bpf_local_storage_map *)map, value, | |
142 | map_flags); | |
6ac99e8f | 143 | sockfd_put(sock); |
71f150f4 | 144 | return PTR_ERR_OR_ZERO(sdata); |
6ac99e8f MKL |
145 | } |
146 | ||
147 | return err; | |
148 | } | |
149 | ||
150 | static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key) | |
151 | { | |
152 | struct socket *sock; | |
153 | int fd, err; | |
154 | ||
155 | fd = *(int *)key; | |
156 | sock = sockfd_lookup(fd, &err); | |
157 | if (sock) { | |
e794bfdd | 158 | err = bpf_sk_storage_del(sock->sk, map); |
6ac99e8f MKL |
159 | sockfd_put(sock); |
160 | return err; | |
161 | } | |
162 | ||
163 | return err; | |
164 | } | |
165 | ||
1f00d375 | 166 | static struct bpf_local_storage_elem * |
8f51dfc7 | 167 | bpf_sk_storage_clone_elem(struct sock *newsk, |
1f00d375 KS |
168 | struct bpf_local_storage_map *smap, |
169 | struct bpf_local_storage_elem *selem) | |
8f51dfc7 | 170 | { |
1f00d375 | 171 | struct bpf_local_storage_elem *copy_selem; |
8f51dfc7 | 172 | |
1f00d375 | 173 | copy_selem = bpf_selem_alloc(smap, newsk, NULL, true); |
8f51dfc7 SF |
174 | if (!copy_selem) |
175 | return NULL; | |
176 | ||
177 | if (map_value_has_spin_lock(&smap->map)) | |
178 | copy_map_value_locked(&smap->map, SDATA(copy_selem)->data, | |
179 | SDATA(selem)->data, true); | |
180 | else | |
181 | copy_map_value(&smap->map, SDATA(copy_selem)->data, | |
182 | SDATA(selem)->data); | |
183 | ||
184 | return copy_selem; | |
185 | } | |
186 | ||
187 | int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk) | |
188 | { | |
1f00d375 KS |
189 | struct bpf_local_storage *new_sk_storage = NULL; |
190 | struct bpf_local_storage *sk_storage; | |
191 | struct bpf_local_storage_elem *selem; | |
8f51dfc7 SF |
192 | int ret = 0; |
193 | ||
194 | RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL); | |
195 | ||
196 | rcu_read_lock(); | |
197 | sk_storage = rcu_dereference(sk->sk_bpf_storage); | |
198 | ||
199 | if (!sk_storage || hlist_empty(&sk_storage->list)) | |
200 | goto out; | |
201 | ||
202 | hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) { | |
1f00d375 KS |
203 | struct bpf_local_storage_elem *copy_selem; |
204 | struct bpf_local_storage_map *smap; | |
8f51dfc7 SF |
205 | struct bpf_map *map; |
206 | ||
207 | smap = rcu_dereference(SDATA(selem)->smap); | |
208 | if (!(smap->map.map_flags & BPF_F_CLONE)) | |
209 | continue; | |
210 | ||
211 | /* Note that for lockless listeners adding new element | |
1f00d375 | 212 | * here can race with cleanup in bpf_local_storage_map_free. |
8f51dfc7 SF |
213 | * Try to grab map refcnt to make sure that it's still |
214 | * alive and prevent concurrent removal. | |
215 | */ | |
1e0bd5a0 | 216 | map = bpf_map_inc_not_zero(&smap->map); |
8f51dfc7 SF |
217 | if (IS_ERR(map)) |
218 | continue; | |
219 | ||
220 | copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem); | |
221 | if (!copy_selem) { | |
222 | ret = -ENOMEM; | |
223 | bpf_map_put(map); | |
224 | goto out; | |
225 | } | |
226 | ||
227 | if (new_sk_storage) { | |
1f00d375 KS |
228 | bpf_selem_link_map(smap, copy_selem); |
229 | bpf_selem_link_storage_nolock(new_sk_storage, copy_selem); | |
8f51dfc7 | 230 | } else { |
f836a56e | 231 | ret = bpf_local_storage_alloc(newsk, smap, copy_selem); |
8f51dfc7 SF |
232 | if (ret) { |
233 | kfree(copy_selem); | |
234 | atomic_sub(smap->elem_size, | |
235 | &newsk->sk_omem_alloc); | |
236 | bpf_map_put(map); | |
237 | goto out; | |
238 | } | |
239 | ||
1f00d375 KS |
240 | new_sk_storage = |
241 | rcu_dereference(copy_selem->local_storage); | |
8f51dfc7 SF |
242 | } |
243 | bpf_map_put(map); | |
244 | } | |
245 | ||
246 | out: | |
247 | rcu_read_unlock(); | |
248 | ||
249 | /* In case of an error, don't free anything explicitly here, the | |
250 | * caller is responsible to call bpf_sk_storage_free. | |
251 | */ | |
252 | ||
253 | return ret; | |
254 | } | |
255 | ||
6ac99e8f MKL |
256 | BPF_CALL_4(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk, |
257 | void *, value, u64, flags) | |
258 | { | |
1f00d375 | 259 | struct bpf_local_storage_data *sdata; |
6ac99e8f | 260 | |
592a3498 | 261 | if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE) |
6ac99e8f MKL |
262 | return (unsigned long)NULL; |
263 | ||
e794bfdd | 264 | sdata = bpf_sk_storage_lookup(sk, map, true); |
6ac99e8f MKL |
265 | if (sdata) |
266 | return (unsigned long)sdata->data; | |
267 | ||
268 | if (flags == BPF_SK_STORAGE_GET_F_CREATE && | |
269 | /* Cannot add new elem to a going away sk. | |
270 | * Otherwise, the new elem may become a leak | |
271 | * (and also other memory issues during map | |
272 | * destruction). | |
273 | */ | |
274 | refcount_inc_not_zero(&sk->sk_refcnt)) { | |
f836a56e KS |
275 | sdata = bpf_local_storage_update( |
276 | sk, (struct bpf_local_storage_map *)map, value, | |
277 | BPF_NOEXIST); | |
6ac99e8f MKL |
278 | /* sk must be a fullsock (guaranteed by verifier), |
279 | * so sock_gen_put() is unnecessary. | |
280 | */ | |
281 | sock_put(sk); | |
282 | return IS_ERR(sdata) ? | |
283 | (unsigned long)NULL : (unsigned long)sdata->data; | |
284 | } | |
285 | ||
286 | return (unsigned long)NULL; | |
287 | } | |
288 | ||
289 | BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk) | |
290 | { | |
592a3498 MKL |
291 | if (!sk || !sk_fullsock(sk)) |
292 | return -EINVAL; | |
293 | ||
6ac99e8f MKL |
294 | if (refcount_inc_not_zero(&sk->sk_refcnt)) { |
295 | int err; | |
296 | ||
e794bfdd | 297 | err = bpf_sk_storage_del(sk, map); |
6ac99e8f MKL |
298 | sock_put(sk); |
299 | return err; | |
300 | } | |
301 | ||
302 | return -ENOENT; | |
303 | } | |
304 | ||
e794bfdd MKL |
305 | static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap, |
306 | void *owner, u32 size) | |
f836a56e | 307 | { |
9e838b02 MKL |
308 | struct sock *sk = (struct sock *)owner; |
309 | ||
310 | /* same check as in sock_kmalloc() */ | |
311 | if (size <= sysctl_optmem_max && | |
312 | atomic_read(&sk->sk_omem_alloc) + size < sysctl_optmem_max) { | |
313 | atomic_add(size, &sk->sk_omem_alloc); | |
314 | return 0; | |
315 | } | |
316 | ||
317 | return -ENOMEM; | |
f836a56e KS |
318 | } |
319 | ||
e794bfdd MKL |
320 | static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap, |
321 | void *owner, u32 size) | |
f836a56e KS |
322 | { |
323 | struct sock *sk = owner; | |
324 | ||
325 | atomic_sub(size, &sk->sk_omem_alloc); | |
326 | } | |
327 | ||
328 | static struct bpf_local_storage __rcu ** | |
e794bfdd | 329 | bpf_sk_storage_ptr(void *owner) |
f836a56e KS |
330 | { |
331 | struct sock *sk = owner; | |
332 | ||
333 | return &sk->sk_bpf_storage; | |
334 | } | |
335 | ||
2872e9ac | 336 | static int sk_storage_map_btf_id; |
6ac99e8f | 337 | const struct bpf_map_ops sk_storage_map_ops = { |
f4d05259 | 338 | .map_meta_equal = bpf_map_meta_equal, |
1f00d375 | 339 | .map_alloc_check = bpf_local_storage_map_alloc_check, |
e794bfdd MKL |
340 | .map_alloc = bpf_sk_storage_map_alloc, |
341 | .map_free = bpf_sk_storage_map_free, | |
6ac99e8f MKL |
342 | .map_get_next_key = notsupp_get_next_key, |
343 | .map_lookup_elem = bpf_fd_sk_storage_lookup_elem, | |
344 | .map_update_elem = bpf_fd_sk_storage_update_elem, | |
345 | .map_delete_elem = bpf_fd_sk_storage_delete_elem, | |
1f00d375 KS |
346 | .map_check_btf = bpf_local_storage_map_check_btf, |
347 | .map_btf_name = "bpf_local_storage_map", | |
2872e9ac | 348 | .map_btf_id = &sk_storage_map_btf_id, |
e794bfdd MKL |
349 | .map_local_storage_charge = bpf_sk_storage_charge, |
350 | .map_local_storage_uncharge = bpf_sk_storage_uncharge, | |
351 | .map_owner_storage_ptr = bpf_sk_storage_ptr, | |
6ac99e8f MKL |
352 | }; |
353 | ||
354 | const struct bpf_func_proto bpf_sk_storage_get_proto = { | |
355 | .func = bpf_sk_storage_get, | |
356 | .gpl_only = false, | |
357 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, | |
358 | .arg1_type = ARG_CONST_MAP_PTR, | |
592a3498 | 359 | .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, |
6ac99e8f MKL |
360 | .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, |
361 | .arg4_type = ARG_ANYTHING, | |
362 | }; | |
363 | ||
f7c6cb1d SF |
364 | const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = { |
365 | .func = bpf_sk_storage_get, | |
366 | .gpl_only = false, | |
367 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, | |
368 | .arg1_type = ARG_CONST_MAP_PTR, | |
369 | .arg2_type = ARG_PTR_TO_CTX, /* context is 'struct sock' */ | |
370 | .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, | |
371 | .arg4_type = ARG_ANYTHING, | |
372 | }; | |
373 | ||
6ac99e8f MKL |
374 | const struct bpf_func_proto bpf_sk_storage_delete_proto = { |
375 | .func = bpf_sk_storage_delete, | |
376 | .gpl_only = false, | |
377 | .ret_type = RET_INTEGER, | |
378 | .arg1_type = ARG_CONST_MAP_PTR, | |
592a3498 | 379 | .arg2_type = ARG_PTR_TO_BTF_ID_SOCK_COMMON, |
30897832 KS |
380 | }; |
381 | ||
8e4597c6 MKL |
382 | static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog) |
383 | { | |
384 | const struct btf *btf_vmlinux; | |
385 | const struct btf_type *t; | |
386 | const char *tname; | |
387 | u32 btf_id; | |
388 | ||
389 | if (prog->aux->dst_prog) | |
390 | return false; | |
391 | ||
392 | /* Ensure the tracing program is not tracing | |
393 | * any bpf_sk_storage*() function and also | |
394 | * use the bpf_sk_storage_(get|delete) helper. | |
395 | */ | |
396 | switch (prog->expected_attach_type) { | |
a50a85e4 | 397 | case BPF_TRACE_ITER: |
8e4597c6 MKL |
398 | case BPF_TRACE_RAW_TP: |
399 | /* bpf_sk_storage has no trace point */ | |
400 | return true; | |
401 | case BPF_TRACE_FENTRY: | |
402 | case BPF_TRACE_FEXIT: | |
403 | btf_vmlinux = bpf_get_btf_vmlinux(); | |
404 | btf_id = prog->aux->attach_btf_id; | |
405 | t = btf_type_by_id(btf_vmlinux, btf_id); | |
406 | tname = btf_name_by_offset(btf_vmlinux, t->name_off); | |
407 | return !!strncmp(tname, "bpf_sk_storage", | |
408 | strlen("bpf_sk_storage")); | |
409 | default: | |
410 | return false; | |
411 | } | |
412 | ||
413 | return false; | |
414 | } | |
415 | ||
416 | BPF_CALL_4(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk, | |
417 | void *, value, u64, flags) | |
418 | { | |
b93ef089 | 419 | if (in_irq() || in_nmi()) |
8e4597c6 MKL |
420 | return (unsigned long)NULL; |
421 | ||
422 | return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags); | |
423 | } | |
424 | ||
425 | BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map, | |
426 | struct sock *, sk) | |
427 | { | |
b93ef089 | 428 | if (in_irq() || in_nmi()) |
8e4597c6 MKL |
429 | return -EPERM; |
430 | ||
431 | return ____bpf_sk_storage_delete(map, sk); | |
432 | } | |
433 | ||
434 | const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = { | |
435 | .func = bpf_sk_storage_get_tracing, | |
436 | .gpl_only = false, | |
437 | .ret_type = RET_PTR_TO_MAP_VALUE_OR_NULL, | |
438 | .arg1_type = ARG_CONST_MAP_PTR, | |
439 | .arg2_type = ARG_PTR_TO_BTF_ID, | |
440 | .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], | |
441 | .arg3_type = ARG_PTR_TO_MAP_VALUE_OR_NULL, | |
442 | .arg4_type = ARG_ANYTHING, | |
443 | .allowed = bpf_sk_storage_tracing_allowed, | |
444 | }; | |
445 | ||
446 | const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = { | |
447 | .func = bpf_sk_storage_delete_tracing, | |
448 | .gpl_only = false, | |
449 | .ret_type = RET_INTEGER, | |
450 | .arg1_type = ARG_CONST_MAP_PTR, | |
451 | .arg2_type = ARG_PTR_TO_BTF_ID, | |
452 | .arg2_btf_id = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON], | |
453 | .allowed = bpf_sk_storage_tracing_allowed, | |
454 | }; | |
455 | ||
1ed4d924 MKL |
456 | struct bpf_sk_storage_diag { |
457 | u32 nr_maps; | |
458 | struct bpf_map *maps[]; | |
459 | }; | |
460 | ||
461 | /* The reply will be like: | |
462 | * INET_DIAG_BPF_SK_STORAGES (nla_nest) | |
463 | * SK_DIAG_BPF_STORAGE (nla_nest) | |
464 | * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32) | |
465 | * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit) | |
466 | * SK_DIAG_BPF_STORAGE (nla_nest) | |
467 | * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32) | |
468 | * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit) | |
469 | * .... | |
470 | */ | |
471 | static int nla_value_size(u32 value_size) | |
472 | { | |
473 | /* SK_DIAG_BPF_STORAGE (nla_nest) | |
474 | * SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32) | |
475 | * SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit) | |
476 | */ | |
477 | return nla_total_size(0) + nla_total_size(sizeof(u32)) + | |
478 | nla_total_size_64bit(value_size); | |
479 | } | |
480 | ||
481 | void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag) | |
482 | { | |
483 | u32 i; | |
484 | ||
485 | if (!diag) | |
486 | return; | |
487 | ||
488 | for (i = 0; i < diag->nr_maps; i++) | |
489 | bpf_map_put(diag->maps[i]); | |
490 | ||
491 | kfree(diag); | |
492 | } | |
493 | EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free); | |
494 | ||
495 | static bool diag_check_dup(const struct bpf_sk_storage_diag *diag, | |
496 | const struct bpf_map *map) | |
497 | { | |
498 | u32 i; | |
499 | ||
500 | for (i = 0; i < diag->nr_maps; i++) { | |
501 | if (diag->maps[i] == map) | |
502 | return true; | |
503 | } | |
504 | ||
505 | return false; | |
506 | } | |
507 | ||
508 | struct bpf_sk_storage_diag * | |
509 | bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs) | |
510 | { | |
511 | struct bpf_sk_storage_diag *diag; | |
512 | struct nlattr *nla; | |
513 | u32 nr_maps = 0; | |
514 | int rem, err; | |
515 | ||
1f00d375 | 516 | /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as |
1ed4d924 MKL |
517 | * the map_alloc_check() side also does. |
518 | */ | |
2c78ee89 | 519 | if (!bpf_capable()) |
1ed4d924 MKL |
520 | return ERR_PTR(-EPERM); |
521 | ||
522 | nla_for_each_nested(nla, nla_stgs, rem) { | |
523 | if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) | |
524 | nr_maps++; | |
525 | } | |
526 | ||
527 | diag = kzalloc(sizeof(*diag) + sizeof(diag->maps[0]) * nr_maps, | |
528 | GFP_KERNEL); | |
529 | if (!diag) | |
530 | return ERR_PTR(-ENOMEM); | |
531 | ||
532 | nla_for_each_nested(nla, nla_stgs, rem) { | |
533 | struct bpf_map *map; | |
534 | int map_fd; | |
535 | ||
536 | if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD) | |
537 | continue; | |
538 | ||
539 | map_fd = nla_get_u32(nla); | |
540 | map = bpf_map_get(map_fd); | |
541 | if (IS_ERR(map)) { | |
542 | err = PTR_ERR(map); | |
543 | goto err_free; | |
544 | } | |
545 | if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) { | |
546 | bpf_map_put(map); | |
547 | err = -EINVAL; | |
548 | goto err_free; | |
549 | } | |
550 | if (diag_check_dup(diag, map)) { | |
551 | bpf_map_put(map); | |
552 | err = -EEXIST; | |
553 | goto err_free; | |
554 | } | |
555 | diag->maps[diag->nr_maps++] = map; | |
556 | } | |
557 | ||
558 | return diag; | |
559 | ||
560 | err_free: | |
561 | bpf_sk_storage_diag_free(diag); | |
562 | return ERR_PTR(err); | |
563 | } | |
564 | EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc); | |
565 | ||
1f00d375 | 566 | static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb) |
1ed4d924 MKL |
567 | { |
568 | struct nlattr *nla_stg, *nla_value; | |
1f00d375 | 569 | struct bpf_local_storage_map *smap; |
1ed4d924 MKL |
570 | |
571 | /* It cannot exceed max nlattr's payload */ | |
1f00d375 | 572 | BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE); |
1ed4d924 MKL |
573 | |
574 | nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE); | |
575 | if (!nla_stg) | |
576 | return -EMSGSIZE; | |
577 | ||
578 | smap = rcu_dereference(sdata->smap); | |
579 | if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id)) | |
580 | goto errout; | |
581 | ||
582 | nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE, | |
583 | smap->map.value_size, | |
584 | SK_DIAG_BPF_STORAGE_PAD); | |
585 | if (!nla_value) | |
586 | goto errout; | |
587 | ||
588 | if (map_value_has_spin_lock(&smap->map)) | |
589 | copy_map_value_locked(&smap->map, nla_data(nla_value), | |
590 | sdata->data, true); | |
591 | else | |
592 | copy_map_value(&smap->map, nla_data(nla_value), sdata->data); | |
593 | ||
594 | nla_nest_end(skb, nla_stg); | |
595 | return 0; | |
596 | ||
597 | errout: | |
598 | nla_nest_cancel(skb, nla_stg); | |
599 | return -EMSGSIZE; | |
600 | } | |
601 | ||
602 | static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb, | |
603 | int stg_array_type, | |
604 | unsigned int *res_diag_size) | |
605 | { | |
606 | /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */ | |
607 | unsigned int diag_size = nla_total_size(0); | |
1f00d375 KS |
608 | struct bpf_local_storage *sk_storage; |
609 | struct bpf_local_storage_elem *selem; | |
610 | struct bpf_local_storage_map *smap; | |
1ed4d924 MKL |
611 | struct nlattr *nla_stgs; |
612 | unsigned int saved_len; | |
613 | int err = 0; | |
614 | ||
615 | rcu_read_lock(); | |
616 | ||
617 | sk_storage = rcu_dereference(sk->sk_bpf_storage); | |
618 | if (!sk_storage || hlist_empty(&sk_storage->list)) { | |
619 | rcu_read_unlock(); | |
620 | return 0; | |
621 | } | |
622 | ||
623 | nla_stgs = nla_nest_start(skb, stg_array_type); | |
624 | if (!nla_stgs) | |
625 | /* Continue to learn diag_size */ | |
626 | err = -EMSGSIZE; | |
627 | ||
628 | saved_len = skb->len; | |
629 | hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) { | |
630 | smap = rcu_dereference(SDATA(selem)->smap); | |
631 | diag_size += nla_value_size(smap->map.value_size); | |
632 | ||
633 | if (nla_stgs && diag_get(SDATA(selem), skb)) | |
634 | /* Continue to learn diag_size */ | |
635 | err = -EMSGSIZE; | |
636 | } | |
637 | ||
638 | rcu_read_unlock(); | |
639 | ||
640 | if (nla_stgs) { | |
641 | if (saved_len == skb->len) | |
642 | nla_nest_cancel(skb, nla_stgs); | |
643 | else | |
644 | nla_nest_end(skb, nla_stgs); | |
645 | } | |
646 | ||
647 | if (diag_size == nla_total_size(0)) { | |
648 | *res_diag_size = 0; | |
649 | return 0; | |
650 | } | |
651 | ||
652 | *res_diag_size = diag_size; | |
653 | return err; | |
654 | } | |
655 | ||
656 | int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag, | |
657 | struct sock *sk, struct sk_buff *skb, | |
658 | int stg_array_type, | |
659 | unsigned int *res_diag_size) | |
660 | { | |
661 | /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */ | |
662 | unsigned int diag_size = nla_total_size(0); | |
1f00d375 KS |
663 | struct bpf_local_storage *sk_storage; |
664 | struct bpf_local_storage_data *sdata; | |
1ed4d924 MKL |
665 | struct nlattr *nla_stgs; |
666 | unsigned int saved_len; | |
667 | int err = 0; | |
668 | u32 i; | |
669 | ||
670 | *res_diag_size = 0; | |
671 | ||
672 | /* No map has been specified. Dump all. */ | |
673 | if (!diag->nr_maps) | |
674 | return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type, | |
675 | res_diag_size); | |
676 | ||
677 | rcu_read_lock(); | |
678 | sk_storage = rcu_dereference(sk->sk_bpf_storage); | |
679 | if (!sk_storage || hlist_empty(&sk_storage->list)) { | |
680 | rcu_read_unlock(); | |
681 | return 0; | |
682 | } | |
683 | ||
684 | nla_stgs = nla_nest_start(skb, stg_array_type); | |
685 | if (!nla_stgs) | |
686 | /* Continue to learn diag_size */ | |
687 | err = -EMSGSIZE; | |
688 | ||
689 | saved_len = skb->len; | |
690 | for (i = 0; i < diag->nr_maps; i++) { | |
1f00d375 KS |
691 | sdata = bpf_local_storage_lookup(sk_storage, |
692 | (struct bpf_local_storage_map *)diag->maps[i], | |
1ed4d924 MKL |
693 | false); |
694 | ||
695 | if (!sdata) | |
696 | continue; | |
697 | ||
698 | diag_size += nla_value_size(diag->maps[i]->value_size); | |
699 | ||
700 | if (nla_stgs && diag_get(sdata, skb)) | |
701 | /* Continue to learn diag_size */ | |
702 | err = -EMSGSIZE; | |
703 | } | |
704 | rcu_read_unlock(); | |
705 | ||
706 | if (nla_stgs) { | |
707 | if (saved_len == skb->len) | |
708 | nla_nest_cancel(skb, nla_stgs); | |
709 | else | |
710 | nla_nest_end(skb, nla_stgs); | |
711 | } | |
712 | ||
713 | if (diag_size == nla_total_size(0)) { | |
714 | *res_diag_size = 0; | |
715 | return 0; | |
716 | } | |
717 | ||
718 | *res_diag_size = diag_size; | |
719 | return err; | |
720 | } | |
721 | EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put); | |
5ce6e77c YS |
722 | |
723 | struct bpf_iter_seq_sk_storage_map_info { | |
724 | struct bpf_map *map; | |
725 | unsigned int bucket_id; | |
726 | unsigned skip_elems; | |
727 | }; | |
728 | ||
1f00d375 | 729 | static struct bpf_local_storage_elem * |
5ce6e77c | 730 | bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info, |
1f00d375 | 731 | struct bpf_local_storage_elem *prev_selem) |
c69d2ddb | 732 | __acquires(RCU) __releases(RCU) |
5ce6e77c | 733 | { |
1f00d375 KS |
734 | struct bpf_local_storage *sk_storage; |
735 | struct bpf_local_storage_elem *selem; | |
5ce6e77c | 736 | u32 skip_elems = info->skip_elems; |
1f00d375 | 737 | struct bpf_local_storage_map *smap; |
5ce6e77c YS |
738 | u32 bucket_id = info->bucket_id; |
739 | u32 i, count, n_buckets; | |
1f00d375 | 740 | struct bpf_local_storage_map_bucket *b; |
5ce6e77c | 741 | |
1f00d375 | 742 | smap = (struct bpf_local_storage_map *)info->map; |
5ce6e77c YS |
743 | n_buckets = 1U << smap->bucket_log; |
744 | if (bucket_id >= n_buckets) | |
745 | return NULL; | |
746 | ||
747 | /* try to find next selem in the same bucket */ | |
748 | selem = prev_selem; | |
749 | count = 0; | |
750 | while (selem) { | |
c69d2ddb | 751 | selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)), |
1f00d375 | 752 | struct bpf_local_storage_elem, map_node); |
5ce6e77c YS |
753 | if (!selem) { |
754 | /* not found, unlock and go to the next bucket */ | |
755 | b = &smap->buckets[bucket_id++]; | |
c69d2ddb | 756 | rcu_read_unlock(); |
5ce6e77c YS |
757 | skip_elems = 0; |
758 | break; | |
759 | } | |
c69d2ddb | 760 | sk_storage = rcu_dereference(selem->local_storage); |
5ce6e77c YS |
761 | if (sk_storage) { |
762 | info->skip_elems = skip_elems + count; | |
763 | return selem; | |
764 | } | |
765 | count++; | |
766 | } | |
767 | ||
768 | for (i = bucket_id; i < (1U << smap->bucket_log); i++) { | |
769 | b = &smap->buckets[i]; | |
c69d2ddb | 770 | rcu_read_lock(); |
5ce6e77c | 771 | count = 0; |
c69d2ddb YS |
772 | hlist_for_each_entry_rcu(selem, &b->list, map_node) { |
773 | sk_storage = rcu_dereference(selem->local_storage); | |
5ce6e77c YS |
774 | if (sk_storage && count >= skip_elems) { |
775 | info->bucket_id = i; | |
776 | info->skip_elems = count; | |
777 | return selem; | |
778 | } | |
779 | count++; | |
780 | } | |
c69d2ddb | 781 | rcu_read_unlock(); |
5ce6e77c YS |
782 | skip_elems = 0; |
783 | } | |
784 | ||
785 | info->bucket_id = i; | |
786 | info->skip_elems = 0; | |
787 | return NULL; | |
788 | } | |
789 | ||
790 | static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos) | |
791 | { | |
1f00d375 | 792 | struct bpf_local_storage_elem *selem; |
5ce6e77c YS |
793 | |
794 | selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL); | |
795 | if (!selem) | |
796 | return NULL; | |
797 | ||
798 | if (*pos == 0) | |
799 | ++*pos; | |
800 | return selem; | |
801 | } | |
802 | ||
803 | static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v, | |
804 | loff_t *pos) | |
805 | { | |
806 | struct bpf_iter_seq_sk_storage_map_info *info = seq->private; | |
807 | ||
808 | ++*pos; | |
809 | ++info->skip_elems; | |
810 | return bpf_sk_storage_map_seq_find_next(seq->private, v); | |
811 | } | |
812 | ||
813 | struct bpf_iter__bpf_sk_storage_map { | |
814 | __bpf_md_ptr(struct bpf_iter_meta *, meta); | |
815 | __bpf_md_ptr(struct bpf_map *, map); | |
816 | __bpf_md_ptr(struct sock *, sk); | |
817 | __bpf_md_ptr(void *, value); | |
818 | }; | |
819 | ||
820 | DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta, | |
821 | struct bpf_map *map, struct sock *sk, | |
822 | void *value) | |
823 | ||
824 | static int __bpf_sk_storage_map_seq_show(struct seq_file *seq, | |
1f00d375 | 825 | struct bpf_local_storage_elem *selem) |
5ce6e77c YS |
826 | { |
827 | struct bpf_iter_seq_sk_storage_map_info *info = seq->private; | |
828 | struct bpf_iter__bpf_sk_storage_map ctx = {}; | |
1f00d375 | 829 | struct bpf_local_storage *sk_storage; |
5ce6e77c YS |
830 | struct bpf_iter_meta meta; |
831 | struct bpf_prog *prog; | |
832 | int ret = 0; | |
833 | ||
834 | meta.seq = seq; | |
835 | prog = bpf_iter_get_info(&meta, selem == NULL); | |
836 | if (prog) { | |
837 | ctx.meta = &meta; | |
838 | ctx.map = info->map; | |
839 | if (selem) { | |
c69d2ddb | 840 | sk_storage = rcu_dereference(selem->local_storage); |
1f00d375 | 841 | ctx.sk = sk_storage->owner; |
5ce6e77c YS |
842 | ctx.value = SDATA(selem)->data; |
843 | } | |
844 | ret = bpf_iter_run_prog(prog, &ctx); | |
845 | } | |
846 | ||
847 | return ret; | |
848 | } | |
849 | ||
850 | static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v) | |
851 | { | |
852 | return __bpf_sk_storage_map_seq_show(seq, v); | |
853 | } | |
854 | ||
855 | static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v) | |
c69d2ddb | 856 | __releases(RCU) |
5ce6e77c | 857 | { |
c69d2ddb | 858 | if (!v) |
5ce6e77c | 859 | (void)__bpf_sk_storage_map_seq_show(seq, v); |
c69d2ddb YS |
860 | else |
861 | rcu_read_unlock(); | |
5ce6e77c YS |
862 | } |
863 | ||
864 | static int bpf_iter_init_sk_storage_map(void *priv_data, | |
865 | struct bpf_iter_aux_info *aux) | |
866 | { | |
867 | struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data; | |
868 | ||
869 | seq_info->map = aux->map; | |
870 | return 0; | |
871 | } | |
872 | ||
5e7b3020 YS |
873 | static int bpf_iter_attach_map(struct bpf_prog *prog, |
874 | union bpf_iter_link_info *linfo, | |
875 | struct bpf_iter_aux_info *aux) | |
5ce6e77c | 876 | { |
5e7b3020 YS |
877 | struct bpf_map *map; |
878 | int err = -EINVAL; | |
879 | ||
880 | if (!linfo->map.map_fd) | |
881 | return -EBADF; | |
882 | ||
883 | map = bpf_map_get_with_uref(linfo->map.map_fd); | |
884 | if (IS_ERR(map)) | |
885 | return PTR_ERR(map); | |
5ce6e77c YS |
886 | |
887 | if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) | |
5e7b3020 | 888 | goto put_map; |
5ce6e77c | 889 | |
5e7b3020 YS |
890 | if (prog->aux->max_rdonly_access > map->value_size) { |
891 | err = -EACCES; | |
892 | goto put_map; | |
893 | } | |
5ce6e77c | 894 | |
5e7b3020 | 895 | aux->map = map; |
5ce6e77c | 896 | return 0; |
5e7b3020 YS |
897 | |
898 | put_map: | |
899 | bpf_map_put_with_uref(map); | |
900 | return err; | |
901 | } | |
902 | ||
903 | static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux) | |
904 | { | |
905 | bpf_map_put_with_uref(aux->map); | |
5ce6e77c YS |
906 | } |
907 | ||
908 | static const struct seq_operations bpf_sk_storage_map_seq_ops = { | |
909 | .start = bpf_sk_storage_map_seq_start, | |
910 | .next = bpf_sk_storage_map_seq_next, | |
911 | .stop = bpf_sk_storage_map_seq_stop, | |
912 | .show = bpf_sk_storage_map_seq_show, | |
913 | }; | |
914 | ||
915 | static const struct bpf_iter_seq_info iter_seq_info = { | |
916 | .seq_ops = &bpf_sk_storage_map_seq_ops, | |
917 | .init_seq_private = bpf_iter_init_sk_storage_map, | |
918 | .fini_seq_private = NULL, | |
919 | .seq_priv_size = sizeof(struct bpf_iter_seq_sk_storage_map_info), | |
920 | }; | |
921 | ||
922 | static struct bpf_iter_reg bpf_sk_storage_map_reg_info = { | |
923 | .target = "bpf_sk_storage_map", | |
5e7b3020 YS |
924 | .attach_target = bpf_iter_attach_map, |
925 | .detach_target = bpf_iter_detach_map, | |
b76f2226 YS |
926 | .show_fdinfo = bpf_iter_map_show_fdinfo, |
927 | .fill_link_info = bpf_iter_map_fill_link_info, | |
5ce6e77c YS |
928 | .ctx_arg_info_size = 2, |
929 | .ctx_arg_info = { | |
930 | { offsetof(struct bpf_iter__bpf_sk_storage_map, sk), | |
931 | PTR_TO_BTF_ID_OR_NULL }, | |
932 | { offsetof(struct bpf_iter__bpf_sk_storage_map, value), | |
933 | PTR_TO_RDWR_BUF_OR_NULL }, | |
934 | }, | |
935 | .seq_info = &iter_seq_info, | |
936 | }; | |
937 | ||
938 | static int __init bpf_sk_storage_map_iter_init(void) | |
939 | { | |
940 | bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id = | |
941 | btf_sock_ids[BTF_SOCK_TYPE_SOCK]; | |
942 | return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info); | |
943 | } | |
944 | late_initcall(bpf_sk_storage_map_iter_init); |