]> Git Repo - linux.git/blame - kernel/bpf/arraymap.c
ASoC: Merge fixes
[linux.git] / kernel / bpf / arraymap.c
CommitLineData
5b497af4 1// SPDX-License-Identifier: GPL-2.0-only
28fbcfa0 2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
81ed18ab 3 * Copyright (c) 2016,2017 Facebook
28fbcfa0
AS
4 */
5#include <linux/bpf.h>
a26ca7c9 6#include <linux/btf.h>
28fbcfa0 7#include <linux/err.h>
28fbcfa0
AS
8#include <linux/slab.h>
9#include <linux/mm.h>
04fd61ab 10#include <linux/filter.h>
0cdf5640 11#include <linux/perf_event.h>
a26ca7c9 12#include <uapi/linux/btf.h>
1e6c62a8 13#include <linux/rcupdate_trace.h>
28fbcfa0 14
56f668df
MKL
15#include "map_in_map.h"
16
6e71b04a 17#define ARRAY_CREATE_FLAG_MASK \
792caccc 18 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
4a8f87e6 19 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
6e71b04a 20
a10423b8
AS
21static void bpf_array_free_percpu(struct bpf_array *array)
22{
23 int i;
24
32fff239 25 for (i = 0; i < array->map.max_entries; i++) {
a10423b8 26 free_percpu(array->pptrs[i]);
32fff239
ED
27 cond_resched();
28 }
a10423b8
AS
29}
30
31static int bpf_array_alloc_percpu(struct bpf_array *array)
32{
33 void __percpu *ptr;
34 int i;
35
36 for (i = 0; i < array->map.max_entries; i++) {
6d192c79
RG
37 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
38 GFP_USER | __GFP_NOWARN);
a10423b8
AS
39 if (!ptr) {
40 bpf_array_free_percpu(array);
41 return -ENOMEM;
42 }
43 array->pptrs[i] = ptr;
32fff239 44 cond_resched();
a10423b8
AS
45 }
46
47 return 0;
48}
49
28fbcfa0 50/* Called from syscall */
5dc4c4b7 51int array_map_alloc_check(union bpf_attr *attr)
28fbcfa0 52{
a10423b8 53 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
96eabe7a 54 int numa_node = bpf_map_attr_numa_node(attr);
28fbcfa0
AS
55
56 /* check sanity of attributes */
57 if (attr->max_entries == 0 || attr->key_size != 4 ||
6e71b04a
CF
58 attr->value_size == 0 ||
59 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
591fe988 60 !bpf_map_flags_access_ok(attr->map_flags) ||
96eabe7a 61 (percpu && numa_node != NUMA_NO_NODE))
ad46061f 62 return -EINVAL;
28fbcfa0 63
fc970227 64 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
4a8f87e6 65 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
fc970227
AN
66 return -EINVAL;
67
792caccc
SL
68 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
69 attr->map_flags & BPF_F_PRESERVE_ELEMS)
70 return -EINVAL;
71
7984c27c 72 if (attr->value_size > KMALLOC_MAX_SIZE)
01b3f521
AS
73 /* if value_size is bigger, the user space won't be able to
74 * access the elements.
75 */
ad46061f
JK
76 return -E2BIG;
77
78 return 0;
79}
80
81static struct bpf_map *array_map_alloc(union bpf_attr *attr)
82{
83 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
1bc59756 84 int numa_node = bpf_map_attr_numa_node(attr);
ad46061f 85 u32 elem_size, index_mask, max_entries;
2c78ee89 86 bool bypass_spec_v1 = bpf_bypass_spec_v1();
1bc59756 87 u64 array_size, mask64;
ad46061f 88 struct bpf_array *array;
01b3f521 89
28fbcfa0
AS
90 elem_size = round_up(attr->value_size, 8);
91
b2157399 92 max_entries = attr->max_entries;
b2157399 93
bbeb6e43
DB
94 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
95 * upper most bit set in u32 space is undefined behavior due to
96 * resulting 1U << 32, so do it manually here in u64 space.
97 */
98 mask64 = fls_long(max_entries - 1);
99 mask64 = 1ULL << mask64;
100 mask64 -= 1;
101
102 index_mask = mask64;
2c78ee89 103 if (!bypass_spec_v1) {
b2157399
AS
104 /* round up array size to nearest power of 2,
105 * since cpu will speculate within index_mask limits
106 */
107 max_entries = index_mask + 1;
bbeb6e43
DB
108 /* Check for overflows. */
109 if (max_entries < attr->max_entries)
110 return ERR_PTR(-E2BIG);
111 }
b2157399 112
a10423b8 113 array_size = sizeof(*array);
fc970227 114 if (percpu) {
b2157399 115 array_size += (u64) max_entries * sizeof(void *);
fc970227
AN
116 } else {
117 /* rely on vmalloc() to return page-aligned memory and
118 * ensure array->value is exactly page-aligned
119 */
120 if (attr->map_flags & BPF_F_MMAPABLE) {
121 array_size = PAGE_ALIGN(array_size);
122 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
123 } else {
124 array_size += (u64) max_entries * elem_size;
125 }
126 }
a10423b8 127
28fbcfa0 128 /* allocate all map elements and zero-initialize them */
fc970227
AN
129 if (attr->map_flags & BPF_F_MMAPABLE) {
130 void *data;
131
132 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
133 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
1bc59756 134 if (!data)
fc970227 135 return ERR_PTR(-ENOMEM);
fc970227
AN
136 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
137 - offsetof(struct bpf_array, value);
138 } else {
139 array = bpf_map_area_alloc(array_size, numa_node);
140 }
1bc59756 141 if (!array)
d407bd25 142 return ERR_PTR(-ENOMEM);
b2157399 143 array->index_mask = index_mask;
2c78ee89 144 array->map.bypass_spec_v1 = bypass_spec_v1;
28fbcfa0
AS
145
146 /* copy mandatory map attributes */
32852649 147 bpf_map_init_from_attr(&array->map, attr);
28fbcfa0
AS
148 array->elem_size = elem_size;
149
9c2d63b8 150 if (percpu && bpf_array_alloc_percpu(array)) {
d407bd25 151 bpf_map_area_free(array);
a10423b8
AS
152 return ERR_PTR(-ENOMEM);
153 }
a10423b8 154
28fbcfa0 155 return &array->map;
28fbcfa0
AS
156}
157
158/* Called from syscall or from eBPF program */
159static void *array_map_lookup_elem(struct bpf_map *map, void *key)
160{
161 struct bpf_array *array = container_of(map, struct bpf_array, map);
162 u32 index = *(u32 *)key;
163
a10423b8 164 if (unlikely(index >= array->map.max_entries))
28fbcfa0
AS
165 return NULL;
166
b2157399 167 return array->value + array->elem_size * (index & array->index_mask);
28fbcfa0
AS
168}
169
d8eca5bb
DB
170static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
171 u32 off)
172{
173 struct bpf_array *array = container_of(map, struct bpf_array, map);
174
175 if (map->max_entries != 1)
176 return -ENOTSUPP;
177 if (off >= map->value_size)
178 return -EINVAL;
179
180 *imm = (unsigned long)array->value;
181 return 0;
182}
183
184static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
185 u32 *off)
186{
187 struct bpf_array *array = container_of(map, struct bpf_array, map);
188 u64 base = (unsigned long)array->value;
189 u64 range = array->elem_size;
190
191 if (map->max_entries != 1)
192 return -ENOTSUPP;
193 if (imm < base || imm >= base + range)
194 return -ENOENT;
195
196 *off = imm - base;
197 return 0;
198}
199
81ed18ab 200/* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
4a8f87e6 201static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
81ed18ab 202{
b2157399 203 struct bpf_array *array = container_of(map, struct bpf_array, map);
81ed18ab 204 struct bpf_insn *insn = insn_buf;
fad73a1a 205 u32 elem_size = round_up(map->value_size, 8);
81ed18ab
AS
206 const int ret = BPF_REG_0;
207 const int map_ptr = BPF_REG_1;
208 const int index = BPF_REG_2;
209
4a8f87e6
DB
210 if (map->map_flags & BPF_F_INNER_MAP)
211 return -EOPNOTSUPP;
212
81ed18ab
AS
213 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
214 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
2c78ee89 215 if (!map->bypass_spec_v1) {
b2157399
AS
216 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
217 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
218 } else {
219 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
220 }
fad73a1a
MKL
221
222 if (is_power_of_2(elem_size)) {
81ed18ab
AS
223 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
224 } else {
225 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
226 }
227 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
228 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
229 *insn++ = BPF_MOV64_IMM(ret, 0);
230 return insn - insn_buf;
231}
232
a10423b8
AS
233/* Called from eBPF program */
234static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
235{
236 struct bpf_array *array = container_of(map, struct bpf_array, map);
237 u32 index = *(u32 *)key;
238
239 if (unlikely(index >= array->map.max_entries))
240 return NULL;
241
b2157399 242 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
a10423b8
AS
243}
244
15a07b33
AS
245int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
246{
247 struct bpf_array *array = container_of(map, struct bpf_array, map);
248 u32 index = *(u32 *)key;
249 void __percpu *pptr;
250 int cpu, off = 0;
251 u32 size;
252
253 if (unlikely(index >= array->map.max_entries))
254 return -ENOENT;
255
256 /* per_cpu areas are zero-filled and bpf programs can only
257 * access 'value_size' of them, so copying rounded areas
258 * will not leak any kernel data
259 */
260 size = round_up(map->value_size, 8);
261 rcu_read_lock();
b2157399 262 pptr = array->pptrs[index & array->index_mask];
15a07b33
AS
263 for_each_possible_cpu(cpu) {
264 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
265 off += size;
266 }
267 rcu_read_unlock();
268 return 0;
269}
270
28fbcfa0
AS
271/* Called from syscall */
272static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
273{
274 struct bpf_array *array = container_of(map, struct bpf_array, map);
8fe45924 275 u32 index = key ? *(u32 *)key : U32_MAX;
28fbcfa0
AS
276 u32 *next = (u32 *)next_key;
277
278 if (index >= array->map.max_entries) {
279 *next = 0;
280 return 0;
281 }
282
283 if (index == array->map.max_entries - 1)
284 return -ENOENT;
285
286 *next = index + 1;
287 return 0;
288}
289
68134668
AS
290static void check_and_free_timer_in_array(struct bpf_array *arr, void *val)
291{
292 if (unlikely(map_value_has_timer(&arr->map)))
293 bpf_timer_cancel_and_free(val + arr->map.timer_off);
294}
295
28fbcfa0
AS
296/* Called from syscall or from eBPF program */
297static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
298 u64 map_flags)
299{
300 struct bpf_array *array = container_of(map, struct bpf_array, map);
301 u32 index = *(u32 *)key;
96049f3a 302 char *val;
28fbcfa0 303
96049f3a 304 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
28fbcfa0
AS
305 /* unknown flags */
306 return -EINVAL;
307
a10423b8 308 if (unlikely(index >= array->map.max_entries))
28fbcfa0
AS
309 /* all elements were pre-allocated, cannot insert a new one */
310 return -E2BIG;
311
96049f3a 312 if (unlikely(map_flags & BPF_NOEXIST))
daaf427c 313 /* all elements already exist */
28fbcfa0
AS
314 return -EEXIST;
315
96049f3a
AS
316 if (unlikely((map_flags & BPF_F_LOCK) &&
317 !map_value_has_spin_lock(map)))
318 return -EINVAL;
319
320 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
b2157399 321 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
a10423b8 322 value, map->value_size);
96049f3a
AS
323 } else {
324 val = array->value +
325 array->elem_size * (index & array->index_mask);
326 if (map_flags & BPF_F_LOCK)
327 copy_map_value_locked(map, val, value, false);
328 else
329 copy_map_value(map, val, value);
68134668 330 check_and_free_timer_in_array(array, val);
96049f3a 331 }
28fbcfa0
AS
332 return 0;
333}
334
15a07b33
AS
335int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
336 u64 map_flags)
337{
338 struct bpf_array *array = container_of(map, struct bpf_array, map);
339 u32 index = *(u32 *)key;
340 void __percpu *pptr;
341 int cpu, off = 0;
342 u32 size;
343
344 if (unlikely(map_flags > BPF_EXIST))
345 /* unknown flags */
346 return -EINVAL;
347
348 if (unlikely(index >= array->map.max_entries))
349 /* all elements were pre-allocated, cannot insert a new one */
350 return -E2BIG;
351
352 if (unlikely(map_flags == BPF_NOEXIST))
353 /* all elements already exist */
354 return -EEXIST;
355
356 /* the user space will provide round_up(value_size, 8) bytes that
357 * will be copied into per-cpu area. bpf programs can only access
358 * value_size of it. During lookup the same extra bytes will be
359 * returned or zeros which were zero-filled by percpu_alloc,
360 * so no kernel data leaks possible
361 */
362 size = round_up(map->value_size, 8);
363 rcu_read_lock();
b2157399 364 pptr = array->pptrs[index & array->index_mask];
15a07b33
AS
365 for_each_possible_cpu(cpu) {
366 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
367 off += size;
368 }
369 rcu_read_unlock();
370 return 0;
371}
372
28fbcfa0
AS
373/* Called from syscall or from eBPF program */
374static int array_map_delete_elem(struct bpf_map *map, void *key)
375{
376 return -EINVAL;
377}
378
fc970227
AN
379static void *array_map_vmalloc_addr(struct bpf_array *array)
380{
381 return (void *)round_down((unsigned long)array, PAGE_SIZE);
382}
383
68134668
AS
384static void array_map_free_timers(struct bpf_map *map)
385{
386 struct bpf_array *array = container_of(map, struct bpf_array, map);
387 int i;
388
389 if (likely(!map_value_has_timer(map)))
390 return;
391
392 for (i = 0; i < array->map.max_entries; i++)
393 bpf_timer_cancel_and_free(array->value + array->elem_size * i +
394 map->timer_off);
395}
396
28fbcfa0
AS
397/* Called when map->refcnt goes to zero, either from workqueue or from syscall */
398static void array_map_free(struct bpf_map *map)
399{
400 struct bpf_array *array = container_of(map, struct bpf_array, map);
401
a10423b8
AS
402 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
403 bpf_array_free_percpu(array);
404
fc970227
AN
405 if (array->map.map_flags & BPF_F_MMAPABLE)
406 bpf_map_area_free(array_map_vmalloc_addr(array));
407 else
408 bpf_map_area_free(array);
28fbcfa0
AS
409}
410
a26ca7c9
MKL
411static void array_map_seq_show_elem(struct bpf_map *map, void *key,
412 struct seq_file *m)
413{
414 void *value;
415
416 rcu_read_lock();
417
418 value = array_map_lookup_elem(map, key);
419 if (!value) {
420 rcu_read_unlock();
421 return;
422 }
423
2824ecb7
DB
424 if (map->btf_key_type_id)
425 seq_printf(m, "%u: ", *(u32 *)key);
9b2cf328 426 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
a26ca7c9
MKL
427 seq_puts(m, "\n");
428
429 rcu_read_unlock();
430}
431
c7b27c37
YS
432static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
433 struct seq_file *m)
434{
435 struct bpf_array *array = container_of(map, struct bpf_array, map);
436 u32 index = *(u32 *)key;
437 void __percpu *pptr;
438 int cpu;
439
440 rcu_read_lock();
441
442 seq_printf(m, "%u: {\n", *(u32 *)key);
443 pptr = array->pptrs[index & array->index_mask];
444 for_each_possible_cpu(cpu) {
445 seq_printf(m, "\tcpu%d: ", cpu);
446 btf_type_seq_show(map->btf, map->btf_value_type_id,
447 per_cpu_ptr(pptr, cpu), m);
448 seq_puts(m, "\n");
449 }
450 seq_puts(m, "}\n");
451
452 rcu_read_unlock();
453}
454
e8d2bec0 455static int array_map_check_btf(const struct bpf_map *map,
1b2b234b 456 const struct btf *btf,
e8d2bec0
DB
457 const struct btf_type *key_type,
458 const struct btf_type *value_type)
a26ca7c9 459{
a26ca7c9
MKL
460 u32 int_data;
461
2824ecb7
DB
462 /* One exception for keyless BTF: .bss/.data/.rodata map */
463 if (btf_type_is_void(key_type)) {
464 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
465 map->max_entries != 1)
466 return -EINVAL;
467
468 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
469 return -EINVAL;
470
471 return 0;
472 }
473
e8d2bec0 474 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
a26ca7c9
MKL
475 return -EINVAL;
476
477 int_data = *(u32 *)(key_type + 1);
e8d2bec0
DB
478 /* bpf array can only take a u32 key. This check makes sure
479 * that the btf matches the attr used during map_create.
a26ca7c9 480 */
e8d2bec0 481 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
a26ca7c9
MKL
482 return -EINVAL;
483
484 return 0;
485}
486
b2e2f0e6 487static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
fc970227
AN
488{
489 struct bpf_array *array = container_of(map, struct bpf_array, map);
490 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
491
492 if (!(map->map_flags & BPF_F_MMAPABLE))
493 return -EINVAL;
494
333291ce
AN
495 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
496 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
497 return -EINVAL;
498
499 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
500 vma->vm_pgoff + pgoff);
fc970227
AN
501}
502
134fede4
MKL
503static bool array_map_meta_equal(const struct bpf_map *meta0,
504 const struct bpf_map *meta1)
505{
4a8f87e6
DB
506 if (!bpf_map_meta_equal(meta0, meta1))
507 return false;
508 return meta0->map_flags & BPF_F_INNER_MAP ? true :
509 meta0->max_entries == meta1->max_entries;
134fede4
MKL
510}
511
d3cc2ab5
YS
512struct bpf_iter_seq_array_map_info {
513 struct bpf_map *map;
514 void *percpu_value_buf;
515 u32 index;
516};
517
518static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
519{
520 struct bpf_iter_seq_array_map_info *info = seq->private;
521 struct bpf_map *map = info->map;
522 struct bpf_array *array;
523 u32 index;
524
525 if (info->index >= map->max_entries)
526 return NULL;
527
528 if (*pos == 0)
529 ++*pos;
530 array = container_of(map, struct bpf_array, map);
531 index = info->index & array->index_mask;
532 if (info->percpu_value_buf)
533 return array->pptrs[index];
534 return array->value + array->elem_size * index;
535}
536
537static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
538{
539 struct bpf_iter_seq_array_map_info *info = seq->private;
540 struct bpf_map *map = info->map;
541 struct bpf_array *array;
542 u32 index;
543
544 ++*pos;
545 ++info->index;
546 if (info->index >= map->max_entries)
547 return NULL;
548
549 array = container_of(map, struct bpf_array, map);
550 index = info->index & array->index_mask;
551 if (info->percpu_value_buf)
552 return array->pptrs[index];
553 return array->value + array->elem_size * index;
554}
555
556static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
557{
558 struct bpf_iter_seq_array_map_info *info = seq->private;
559 struct bpf_iter__bpf_map_elem ctx = {};
560 struct bpf_map *map = info->map;
561 struct bpf_iter_meta meta;
562 struct bpf_prog *prog;
563 int off = 0, cpu = 0;
564 void __percpu **pptr;
565 u32 size;
566
567 meta.seq = seq;
568 prog = bpf_iter_get_info(&meta, v == NULL);
569 if (!prog)
570 return 0;
571
572 ctx.meta = &meta;
573 ctx.map = info->map;
574 if (v) {
575 ctx.key = &info->index;
576
577 if (!info->percpu_value_buf) {
578 ctx.value = v;
579 } else {
580 pptr = v;
581 size = round_up(map->value_size, 8);
582 for_each_possible_cpu(cpu) {
583 bpf_long_memcpy(info->percpu_value_buf + off,
584 per_cpu_ptr(pptr, cpu),
585 size);
586 off += size;
587 }
588 ctx.value = info->percpu_value_buf;
589 }
590 }
591
592 return bpf_iter_run_prog(prog, &ctx);
593}
594
595static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
596{
597 return __bpf_array_map_seq_show(seq, v);
598}
599
600static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
601{
602 if (!v)
603 (void)__bpf_array_map_seq_show(seq, NULL);
604}
605
606static int bpf_iter_init_array_map(void *priv_data,
607 struct bpf_iter_aux_info *aux)
608{
609 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
610 struct bpf_map *map = aux->map;
611 void *value_buf;
612 u32 buf_size;
613
614 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
615 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
616 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
617 if (!value_buf)
618 return -ENOMEM;
619
620 seq_info->percpu_value_buf = value_buf;
621 }
622
623 seq_info->map = map;
624 return 0;
625}
626
627static void bpf_iter_fini_array_map(void *priv_data)
628{
629 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
630
631 kfree(seq_info->percpu_value_buf);
632}
633
634static const struct seq_operations bpf_array_map_seq_ops = {
635 .start = bpf_array_map_seq_start,
636 .next = bpf_array_map_seq_next,
637 .stop = bpf_array_map_seq_stop,
638 .show = bpf_array_map_seq_show,
639};
640
641static const struct bpf_iter_seq_info iter_seq_info = {
642 .seq_ops = &bpf_array_map_seq_ops,
643 .init_seq_private = bpf_iter_init_array_map,
644 .fini_seq_private = bpf_iter_fini_array_map,
645 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
646};
647
102acbac 648static int bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
06dcdcd4
YS
649 void *callback_ctx, u64 flags)
650{
651 u32 i, key, num_elems = 0;
652 struct bpf_array *array;
653 bool is_percpu;
654 u64 ret = 0;
655 void *val;
656
657 if (flags != 0)
658 return -EINVAL;
659
660 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
661 array = container_of(map, struct bpf_array, map);
662 if (is_percpu)
663 migrate_disable();
664 for (i = 0; i < map->max_entries; i++) {
665 if (is_percpu)
666 val = this_cpu_ptr(array->pptrs[i]);
667 else
668 val = array->value + array->elem_size * i;
669 num_elems++;
670 key = i;
102acbac
KC
671 ret = callback_fn((u64)(long)map, (u64)(long)&key,
672 (u64)(long)val, (u64)(long)callback_ctx, 0);
06dcdcd4
YS
673 /* return value: 0 - continue, 1 - stop and return */
674 if (ret)
675 break;
676 }
677
678 if (is_percpu)
679 migrate_enable();
680 return num_elems;
681}
682
41c48f3a 683static int array_map_btf_id;
40077e0c 684const struct bpf_map_ops array_map_ops = {
134fede4 685 .map_meta_equal = array_map_meta_equal,
ad46061f 686 .map_alloc_check = array_map_alloc_check,
28fbcfa0
AS
687 .map_alloc = array_map_alloc,
688 .map_free = array_map_free,
689 .map_get_next_key = array_map_get_next_key,
68134668 690 .map_release_uref = array_map_free_timers,
28fbcfa0
AS
691 .map_lookup_elem = array_map_lookup_elem,
692 .map_update_elem = array_map_update_elem,
693 .map_delete_elem = array_map_delete_elem,
81ed18ab 694 .map_gen_lookup = array_map_gen_lookup,
d8eca5bb
DB
695 .map_direct_value_addr = array_map_direct_value_addr,
696 .map_direct_value_meta = array_map_direct_value_meta,
fc970227 697 .map_mmap = array_map_mmap,
a26ca7c9
MKL
698 .map_seq_show_elem = array_map_seq_show_elem,
699 .map_check_btf = array_map_check_btf,
c60f2d28
BV
700 .map_lookup_batch = generic_map_lookup_batch,
701 .map_update_batch = generic_map_update_batch,
06dcdcd4
YS
702 .map_set_for_each_callback_args = map_set_for_each_callback_args,
703 .map_for_each_callback = bpf_for_each_array_elem,
41c48f3a
AI
704 .map_btf_name = "bpf_array",
705 .map_btf_id = &array_map_btf_id,
d3cc2ab5 706 .iter_seq_info = &iter_seq_info,
28fbcfa0
AS
707};
708
2872e9ac 709static int percpu_array_map_btf_id;
40077e0c 710const struct bpf_map_ops percpu_array_map_ops = {
f4d05259 711 .map_meta_equal = bpf_map_meta_equal,
ad46061f 712 .map_alloc_check = array_map_alloc_check,
a10423b8
AS
713 .map_alloc = array_map_alloc,
714 .map_free = array_map_free,
715 .map_get_next_key = array_map_get_next_key,
716 .map_lookup_elem = percpu_array_map_lookup_elem,
717 .map_update_elem = array_map_update_elem,
718 .map_delete_elem = array_map_delete_elem,
c7b27c37 719 .map_seq_show_elem = percpu_array_map_seq_show_elem,
e8d2bec0 720 .map_check_btf = array_map_check_btf,
f008d732
PT
721 .map_lookup_batch = generic_map_lookup_batch,
722 .map_update_batch = generic_map_update_batch,
06dcdcd4
YS
723 .map_set_for_each_callback_args = map_set_for_each_callback_args,
724 .map_for_each_callback = bpf_for_each_array_elem,
2872e9ac
AI
725 .map_btf_name = "bpf_array",
726 .map_btf_id = &percpu_array_map_btf_id,
d3cc2ab5 727 .iter_seq_info = &iter_seq_info,
a10423b8
AS
728};
729
ad46061f 730static int fd_array_map_alloc_check(union bpf_attr *attr)
04fd61ab 731{
2a36f0b9 732 /* only file descriptors can be stored in this type of map */
04fd61ab 733 if (attr->value_size != sizeof(u32))
ad46061f 734 return -EINVAL;
591fe988
DB
735 /* Program read-only/write-only not supported for special maps yet. */
736 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
737 return -EINVAL;
ad46061f 738 return array_map_alloc_check(attr);
04fd61ab
AS
739}
740
2a36f0b9 741static void fd_array_map_free(struct bpf_map *map)
04fd61ab
AS
742{
743 struct bpf_array *array = container_of(map, struct bpf_array, map);
744 int i;
745
04fd61ab
AS
746 /* make sure it's empty */
747 for (i = 0; i < array->map.max_entries; i++)
2a36f0b9 748 BUG_ON(array->ptrs[i] != NULL);
d407bd25
DB
749
750 bpf_map_area_free(array);
04fd61ab
AS
751}
752
2a36f0b9 753static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
04fd61ab 754{
3b4a63f6 755 return ERR_PTR(-EOPNOTSUPP);
04fd61ab
AS
756}
757
14dc6f04
MKL
758/* only called from syscall */
759int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
760{
761 void **elem, *ptr;
762 int ret = 0;
763
764 if (!map->ops->map_fd_sys_lookup_elem)
765 return -ENOTSUPP;
766
767 rcu_read_lock();
768 elem = array_map_lookup_elem(map, key);
769 if (elem && (ptr = READ_ONCE(*elem)))
770 *value = map->ops->map_fd_sys_lookup_elem(ptr);
771 else
772 ret = -ENOENT;
773 rcu_read_unlock();
774
775 return ret;
776}
777
04fd61ab 778/* only called from syscall */
d056a788
DB
779int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
780 void *key, void *value, u64 map_flags)
04fd61ab
AS
781{
782 struct bpf_array *array = container_of(map, struct bpf_array, map);
2a36f0b9 783 void *new_ptr, *old_ptr;
04fd61ab
AS
784 u32 index = *(u32 *)key, ufd;
785
786 if (map_flags != BPF_ANY)
787 return -EINVAL;
788
789 if (index >= array->map.max_entries)
790 return -E2BIG;
791
792 ufd = *(u32 *)value;
d056a788 793 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2a36f0b9
WN
794 if (IS_ERR(new_ptr))
795 return PTR_ERR(new_ptr);
04fd61ab 796
da765a2f
DB
797 if (map->ops->map_poke_run) {
798 mutex_lock(&array->aux->poke_mutex);
799 old_ptr = xchg(array->ptrs + index, new_ptr);
800 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
801 mutex_unlock(&array->aux->poke_mutex);
802 } else {
803 old_ptr = xchg(array->ptrs + index, new_ptr);
804 }
805
2a36f0b9
WN
806 if (old_ptr)
807 map->ops->map_fd_put_ptr(old_ptr);
04fd61ab
AS
808 return 0;
809}
810
2a36f0b9 811static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
04fd61ab
AS
812{
813 struct bpf_array *array = container_of(map, struct bpf_array, map);
2a36f0b9 814 void *old_ptr;
04fd61ab
AS
815 u32 index = *(u32 *)key;
816
817 if (index >= array->map.max_entries)
818 return -E2BIG;
819
da765a2f
DB
820 if (map->ops->map_poke_run) {
821 mutex_lock(&array->aux->poke_mutex);
822 old_ptr = xchg(array->ptrs + index, NULL);
823 map->ops->map_poke_run(map, index, old_ptr, NULL);
824 mutex_unlock(&array->aux->poke_mutex);
825 } else {
826 old_ptr = xchg(array->ptrs + index, NULL);
827 }
828
2a36f0b9
WN
829 if (old_ptr) {
830 map->ops->map_fd_put_ptr(old_ptr);
04fd61ab
AS
831 return 0;
832 } else {
833 return -ENOENT;
834 }
835}
836
d056a788
DB
837static void *prog_fd_array_get_ptr(struct bpf_map *map,
838 struct file *map_file, int fd)
2a36f0b9
WN
839{
840 struct bpf_array *array = container_of(map, struct bpf_array, map);
841 struct bpf_prog *prog = bpf_prog_get(fd);
d056a788 842
2a36f0b9
WN
843 if (IS_ERR(prog))
844 return prog;
845
846 if (!bpf_prog_array_compatible(array, prog)) {
847 bpf_prog_put(prog);
848 return ERR_PTR(-EINVAL);
849 }
d056a788 850
2a36f0b9
WN
851 return prog;
852}
853
854static void prog_fd_array_put_ptr(void *ptr)
855{
1aacde3d 856 bpf_prog_put(ptr);
2a36f0b9
WN
857}
858
14dc6f04
MKL
859static u32 prog_fd_array_sys_lookup_elem(void *ptr)
860{
861 return ((struct bpf_prog *)ptr)->aux->id;
862}
863
04fd61ab 864/* decrement refcnt of all bpf_progs that are stored in this map */
ba6b8de4 865static void bpf_fd_array_map_clear(struct bpf_map *map)
04fd61ab
AS
866{
867 struct bpf_array *array = container_of(map, struct bpf_array, map);
868 int i;
869
870 for (i = 0; i < array->map.max_entries; i++)
2a36f0b9 871 fd_array_map_delete_elem(map, &i);
04fd61ab
AS
872}
873
a7c19db3
YS
874static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
875 struct seq_file *m)
876{
877 void **elem, *ptr;
878 u32 prog_id;
879
880 rcu_read_lock();
881
882 elem = array_map_lookup_elem(map, key);
883 if (elem) {
884 ptr = READ_ONCE(*elem);
885 if (ptr) {
886 seq_printf(m, "%u: ", *(u32 *)key);
887 prog_id = prog_fd_array_sys_lookup_elem(ptr);
888 btf_type_seq_show(map->btf, map->btf_value_type_id,
889 &prog_id, m);
890 seq_puts(m, "\n");
891 }
892 }
893
894 rcu_read_unlock();
895}
896
da765a2f
DB
897struct prog_poke_elem {
898 struct list_head list;
899 struct bpf_prog_aux *aux;
900};
901
902static int prog_array_map_poke_track(struct bpf_map *map,
903 struct bpf_prog_aux *prog_aux)
904{
905 struct prog_poke_elem *elem;
906 struct bpf_array_aux *aux;
907 int ret = 0;
908
909 aux = container_of(map, struct bpf_array, map)->aux;
910 mutex_lock(&aux->poke_mutex);
911 list_for_each_entry(elem, &aux->poke_progs, list) {
912 if (elem->aux == prog_aux)
913 goto out;
914 }
915
916 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
917 if (!elem) {
918 ret = -ENOMEM;
919 goto out;
920 }
921
922 INIT_LIST_HEAD(&elem->list);
923 /* We must track the program's aux info at this point in time
924 * since the program pointer itself may not be stable yet, see
925 * also comment in prog_array_map_poke_run().
926 */
927 elem->aux = prog_aux;
928
929 list_add_tail(&elem->list, &aux->poke_progs);
930out:
931 mutex_unlock(&aux->poke_mutex);
932 return ret;
933}
934
935static void prog_array_map_poke_untrack(struct bpf_map *map,
936 struct bpf_prog_aux *prog_aux)
937{
938 struct prog_poke_elem *elem, *tmp;
939 struct bpf_array_aux *aux;
940
941 aux = container_of(map, struct bpf_array, map)->aux;
942 mutex_lock(&aux->poke_mutex);
943 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
944 if (elem->aux == prog_aux) {
945 list_del_init(&elem->list);
946 kfree(elem);
947 break;
948 }
949 }
950 mutex_unlock(&aux->poke_mutex);
951}
952
953static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
954 struct bpf_prog *old,
955 struct bpf_prog *new)
956{
ebf7d1f5 957 u8 *old_addr, *new_addr, *old_bypass_addr;
da765a2f
DB
958 struct prog_poke_elem *elem;
959 struct bpf_array_aux *aux;
960
da765a2f
DB
961 aux = container_of(map, struct bpf_array, map)->aux;
962 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
963
964 list_for_each_entry(elem, &aux->poke_progs, list) {
965 struct bpf_jit_poke_descriptor *poke;
966 int i, ret;
967
968 for (i = 0; i < elem->aux->size_poke_tab; i++) {
969 poke = &elem->aux->poke_tab[i];
970
971 /* Few things to be aware of:
972 *
973 * 1) We can only ever access aux in this context, but
974 * not aux->prog since it might not be stable yet and
975 * there could be danger of use after free otherwise.
976 * 2) Initially when we start tracking aux, the program
977 * is not JITed yet and also does not have a kallsyms
cf71b174
MF
978 * entry. We skip these as poke->tailcall_target_stable
979 * is not active yet. The JIT will do the final fixup
980 * before setting it stable. The various
981 * poke->tailcall_target_stable are successively
982 * activated, so tail call updates can arrive from here
983 * while JIT is still finishing its final fixup for
984 * non-activated poke entries.
da765a2f
DB
985 * 3) On program teardown, the program's kallsym entry gets
986 * removed out of RCU callback, but we can only untrack
987 * from sleepable context, therefore bpf_arch_text_poke()
988 * might not see that this is in BPF text section and
989 * bails out with -EINVAL. As these are unreachable since
990 * RCU grace period already passed, we simply skip them.
991 * 4) Also programs reaching refcount of zero while patching
992 * is in progress is okay since we're protected under
993 * poke_mutex and untrack the programs before the JIT
994 * buffer is freed. When we're still in the middle of
995 * patching and suddenly kallsyms entry of the program
996 * gets evicted, we just skip the rest which is fine due
997 * to point 3).
998 * 5) Any other error happening below from bpf_arch_text_poke()
999 * is a unexpected bug.
1000 */
cf71b174 1001 if (!READ_ONCE(poke->tailcall_target_stable))
da765a2f
DB
1002 continue;
1003 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1004 continue;
1005 if (poke->tail_call.map != map ||
1006 poke->tail_call.key != key)
1007 continue;
1008
ebf7d1f5
MF
1009 old_bypass_addr = old ? NULL : poke->bypass_addr;
1010 old_addr = old ? (u8 *)old->bpf_func + poke->adj_off : NULL;
1011 new_addr = new ? (u8 *)new->bpf_func + poke->adj_off : NULL;
1012
1013 if (new) {
1014 ret = bpf_arch_text_poke(poke->tailcall_target,
1015 BPF_MOD_JUMP,
1016 old_addr, new_addr);
1017 BUG_ON(ret < 0 && ret != -EINVAL);
1018 if (!old) {
1019 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1020 BPF_MOD_JUMP,
1021 poke->bypass_addr,
1022 NULL);
1023 BUG_ON(ret < 0 && ret != -EINVAL);
1024 }
1025 } else {
1026 ret = bpf_arch_text_poke(poke->tailcall_bypass,
1027 BPF_MOD_JUMP,
1028 old_bypass_addr,
1029 poke->bypass_addr);
1030 BUG_ON(ret < 0 && ret != -EINVAL);
1031 /* let other CPUs finish the execution of program
1032 * so that it will not possible to expose them
1033 * to invalid nop, stack unwind, nop state
1034 */
1035 if (!ret)
1036 synchronize_rcu();
1037 ret = bpf_arch_text_poke(poke->tailcall_target,
1038 BPF_MOD_JUMP,
1039 old_addr, NULL);
1040 BUG_ON(ret < 0 && ret != -EINVAL);
1041 }
da765a2f
DB
1042 }
1043 }
1044}
1045
1046static void prog_array_map_clear_deferred(struct work_struct *work)
1047{
1048 struct bpf_map *map = container_of(work, struct bpf_array_aux,
1049 work)->map;
1050 bpf_fd_array_map_clear(map);
1051 bpf_map_put(map);
1052}
1053
1054static void prog_array_map_clear(struct bpf_map *map)
1055{
1056 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1057 map)->aux;
1058 bpf_map_inc(map);
1059 schedule_work(&aux->work);
1060}
1061
2beee5f5
DB
1062static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1063{
1064 struct bpf_array_aux *aux;
1065 struct bpf_map *map;
1066
6d192c79 1067 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
2beee5f5
DB
1068 if (!aux)
1069 return ERR_PTR(-ENOMEM);
1070
da765a2f
DB
1071 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1072 INIT_LIST_HEAD(&aux->poke_progs);
1073 mutex_init(&aux->poke_mutex);
54713c85 1074 spin_lock_init(&aux->owner.lock);
da765a2f 1075
2beee5f5
DB
1076 map = array_map_alloc(attr);
1077 if (IS_ERR(map)) {
1078 kfree(aux);
1079 return map;
1080 }
1081
1082 container_of(map, struct bpf_array, map)->aux = aux;
da765a2f
DB
1083 aux->map = map;
1084
2beee5f5
DB
1085 return map;
1086}
1087
1088static void prog_array_map_free(struct bpf_map *map)
1089{
da765a2f 1090 struct prog_poke_elem *elem, *tmp;
2beee5f5
DB
1091 struct bpf_array_aux *aux;
1092
1093 aux = container_of(map, struct bpf_array, map)->aux;
da765a2f
DB
1094 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1095 list_del_init(&elem->list);
1096 kfree(elem);
1097 }
2beee5f5
DB
1098 kfree(aux);
1099 fd_array_map_free(map);
1100}
1101
f4d05259
MKL
1102/* prog_array->aux->{type,jited} is a runtime binding.
1103 * Doing static check alone in the verifier is not enough.
1104 * Thus, prog_array_map cannot be used as an inner_map
1105 * and map_meta_equal is not implemented.
1106 */
2872e9ac 1107static int prog_array_map_btf_id;
40077e0c 1108const struct bpf_map_ops prog_array_map_ops = {
ad46061f 1109 .map_alloc_check = fd_array_map_alloc_check,
2beee5f5
DB
1110 .map_alloc = prog_array_map_alloc,
1111 .map_free = prog_array_map_free,
da765a2f
DB
1112 .map_poke_track = prog_array_map_poke_track,
1113 .map_poke_untrack = prog_array_map_poke_untrack,
1114 .map_poke_run = prog_array_map_poke_run,
04fd61ab 1115 .map_get_next_key = array_map_get_next_key,
2a36f0b9 1116 .map_lookup_elem = fd_array_map_lookup_elem,
2a36f0b9
WN
1117 .map_delete_elem = fd_array_map_delete_elem,
1118 .map_fd_get_ptr = prog_fd_array_get_ptr,
1119 .map_fd_put_ptr = prog_fd_array_put_ptr,
14dc6f04 1120 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
da765a2f 1121 .map_release_uref = prog_array_map_clear,
a7c19db3 1122 .map_seq_show_elem = prog_array_map_seq_show_elem,
2872e9ac
AI
1123 .map_btf_name = "bpf_array",
1124 .map_btf_id = &prog_array_map_btf_id,
04fd61ab
AS
1125};
1126
3b1efb19
DB
1127static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1128 struct file *map_file)
ea317b26 1129{
3b1efb19
DB
1130 struct bpf_event_entry *ee;
1131
858d68f1 1132 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
3b1efb19
DB
1133 if (ee) {
1134 ee->event = perf_file->private_data;
1135 ee->perf_file = perf_file;
1136 ee->map_file = map_file;
1137 }
1138
1139 return ee;
1140}
1141
1142static void __bpf_event_entry_free(struct rcu_head *rcu)
1143{
1144 struct bpf_event_entry *ee;
1145
1146 ee = container_of(rcu, struct bpf_event_entry, rcu);
1147 fput(ee->perf_file);
1148 kfree(ee);
1149}
1150
1151static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1152{
1153 call_rcu(&ee->rcu, __bpf_event_entry_free);
ea317b26
KX
1154}
1155
d056a788
DB
1156static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1157 struct file *map_file, int fd)
ea317b26 1158{
3b1efb19
DB
1159 struct bpf_event_entry *ee;
1160 struct perf_event *event;
1161 struct file *perf_file;
f91840a3 1162 u64 value;
ea317b26 1163
3b1efb19
DB
1164 perf_file = perf_event_get(fd);
1165 if (IS_ERR(perf_file))
1166 return perf_file;
e03e7ee3 1167
f91840a3 1168 ee = ERR_PTR(-EOPNOTSUPP);
3b1efb19 1169 event = perf_file->private_data;
97562633 1170 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
3b1efb19
DB
1171 goto err_out;
1172
f91840a3
AS
1173 ee = bpf_event_entry_gen(perf_file, map_file);
1174 if (ee)
1175 return ee;
1176 ee = ERR_PTR(-ENOMEM);
3b1efb19
DB
1177err_out:
1178 fput(perf_file);
1179 return ee;
ea317b26
KX
1180}
1181
1182static void perf_event_fd_array_put_ptr(void *ptr)
1183{
3b1efb19
DB
1184 bpf_event_entry_free_rcu(ptr);
1185}
1186
1187static void perf_event_fd_array_release(struct bpf_map *map,
1188 struct file *map_file)
1189{
1190 struct bpf_array *array = container_of(map, struct bpf_array, map);
1191 struct bpf_event_entry *ee;
1192 int i;
1193
792caccc
SL
1194 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1195 return;
1196
3b1efb19
DB
1197 rcu_read_lock();
1198 for (i = 0; i < array->map.max_entries; i++) {
1199 ee = READ_ONCE(array->ptrs[i]);
1200 if (ee && ee->map_file == map_file)
1201 fd_array_map_delete_elem(map, &i);
1202 }
1203 rcu_read_unlock();
ea317b26
KX
1204}
1205
792caccc
SL
1206static void perf_event_fd_array_map_free(struct bpf_map *map)
1207{
1208 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1209 bpf_fd_array_map_clear(map);
1210 fd_array_map_free(map);
1211}
1212
2872e9ac 1213static int perf_event_array_map_btf_id;
40077e0c 1214const struct bpf_map_ops perf_event_array_map_ops = {
f4d05259 1215 .map_meta_equal = bpf_map_meta_equal,
ad46061f
JK
1216 .map_alloc_check = fd_array_map_alloc_check,
1217 .map_alloc = array_map_alloc,
792caccc 1218 .map_free = perf_event_fd_array_map_free,
ea317b26
KX
1219 .map_get_next_key = array_map_get_next_key,
1220 .map_lookup_elem = fd_array_map_lookup_elem,
ea317b26
KX
1221 .map_delete_elem = fd_array_map_delete_elem,
1222 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1223 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
3b1efb19 1224 .map_release = perf_event_fd_array_release,
e8d2bec0 1225 .map_check_btf = map_check_no_btf,
2872e9ac
AI
1226 .map_btf_name = "bpf_array",
1227 .map_btf_id = &perf_event_array_map_btf_id,
ea317b26
KX
1228};
1229
60d20f91 1230#ifdef CONFIG_CGROUPS
4ed8ec52
MKL
1231static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1232 struct file *map_file /* not used */,
1233 int fd)
1234{
1235 return cgroup_get_from_fd(fd);
1236}
1237
1238static void cgroup_fd_array_put_ptr(void *ptr)
1239{
1240 /* cgroup_put free cgrp after a rcu grace period */
1241 cgroup_put(ptr);
1242}
1243
1244static void cgroup_fd_array_free(struct bpf_map *map)
1245{
1246 bpf_fd_array_map_clear(map);
1247 fd_array_map_free(map);
1248}
1249
2872e9ac 1250static int cgroup_array_map_btf_id;
40077e0c 1251const struct bpf_map_ops cgroup_array_map_ops = {
f4d05259 1252 .map_meta_equal = bpf_map_meta_equal,
ad46061f
JK
1253 .map_alloc_check = fd_array_map_alloc_check,
1254 .map_alloc = array_map_alloc,
4ed8ec52
MKL
1255 .map_free = cgroup_fd_array_free,
1256 .map_get_next_key = array_map_get_next_key,
1257 .map_lookup_elem = fd_array_map_lookup_elem,
1258 .map_delete_elem = fd_array_map_delete_elem,
1259 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1260 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
e8d2bec0 1261 .map_check_btf = map_check_no_btf,
2872e9ac
AI
1262 .map_btf_name = "bpf_array",
1263 .map_btf_id = &cgroup_array_map_btf_id,
4ed8ec52 1264};
4ed8ec52 1265#endif
56f668df
MKL
1266
1267static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1268{
1269 struct bpf_map *map, *inner_map_meta;
1270
1271 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1272 if (IS_ERR(inner_map_meta))
1273 return inner_map_meta;
1274
ad46061f 1275 map = array_map_alloc(attr);
56f668df
MKL
1276 if (IS_ERR(map)) {
1277 bpf_map_meta_free(inner_map_meta);
1278 return map;
1279 }
1280
1281 map->inner_map_meta = inner_map_meta;
1282
1283 return map;
1284}
1285
1286static void array_of_map_free(struct bpf_map *map)
1287{
1288 /* map->inner_map_meta is only accessed by syscall which
1289 * is protected by fdget/fdput.
1290 */
1291 bpf_map_meta_free(map->inner_map_meta);
1292 bpf_fd_array_map_clear(map);
1293 fd_array_map_free(map);
1294}
1295
1296static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1297{
1298 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1299
1300 if (!inner_map)
1301 return NULL;
1302
1303 return READ_ONCE(*inner_map);
1304}
1305
4a8f87e6 1306static int array_of_map_gen_lookup(struct bpf_map *map,
7b0c2a05
DB
1307 struct bpf_insn *insn_buf)
1308{
b2157399 1309 struct bpf_array *array = container_of(map, struct bpf_array, map);
7b0c2a05
DB
1310 u32 elem_size = round_up(map->value_size, 8);
1311 struct bpf_insn *insn = insn_buf;
1312 const int ret = BPF_REG_0;
1313 const int map_ptr = BPF_REG_1;
1314 const int index = BPF_REG_2;
1315
1316 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1317 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
2c78ee89 1318 if (!map->bypass_spec_v1) {
b2157399
AS
1319 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1320 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1321 } else {
1322 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1323 }
7b0c2a05
DB
1324 if (is_power_of_2(elem_size))
1325 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1326 else
1327 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1328 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1329 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1330 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1331 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1332 *insn++ = BPF_MOV64_IMM(ret, 0);
1333
1334 return insn - insn_buf;
1335}
1336
2872e9ac 1337static int array_of_maps_map_btf_id;
40077e0c 1338const struct bpf_map_ops array_of_maps_map_ops = {
ad46061f 1339 .map_alloc_check = fd_array_map_alloc_check,
56f668df
MKL
1340 .map_alloc = array_of_map_alloc,
1341 .map_free = array_of_map_free,
1342 .map_get_next_key = array_map_get_next_key,
1343 .map_lookup_elem = array_of_map_lookup_elem,
1344 .map_delete_elem = fd_array_map_delete_elem,
1345 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1346 .map_fd_put_ptr = bpf_map_fd_put_ptr,
14dc6f04 1347 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
7b0c2a05 1348 .map_gen_lookup = array_of_map_gen_lookup,
e8d2bec0 1349 .map_check_btf = map_check_no_btf,
2872e9ac
AI
1350 .map_btf_name = "bpf_array",
1351 .map_btf_id = &array_of_maps_map_btf_id,
56f668df 1352};
This page took 0.554134 seconds and 4 git commands to generate.