1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
14 #include "map_in_map.h"
16 #define ARRAY_CREATE_FLAG_MASK \
17 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK)
19 static void bpf_array_free_percpu(struct bpf_array *array)
23 for (i = 0; i < array->map.max_entries; i++) {
24 free_percpu(array->pptrs[i]);
29 static int bpf_array_alloc_percpu(struct bpf_array *array)
34 for (i = 0; i < array->map.max_entries; i++) {
35 ptr = __alloc_percpu_gfp(array->elem_size, 8,
36 GFP_USER | __GFP_NOWARN);
38 bpf_array_free_percpu(array);
41 array->pptrs[i] = ptr;
48 /* Called from syscall */
49 int array_map_alloc_check(union bpf_attr *attr)
51 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
52 int numa_node = bpf_map_attr_numa_node(attr);
54 /* check sanity of attributes */
55 if (attr->max_entries == 0 || attr->key_size != 4 ||
56 attr->value_size == 0 ||
57 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
58 !bpf_map_flags_access_ok(attr->map_flags) ||
59 (percpu && numa_node != NUMA_NO_NODE))
62 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
63 attr->map_flags & BPF_F_MMAPABLE)
66 if (attr->value_size > KMALLOC_MAX_SIZE)
67 /* if value_size is bigger, the user space won't be able to
68 * access the elements.
75 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
77 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
78 int ret, numa_node = bpf_map_attr_numa_node(attr);
79 u32 elem_size, index_mask, max_entries;
80 bool bypass_spec_v1 = bpf_bypass_spec_v1();
81 u64 cost, array_size, mask64;
82 struct bpf_map_memory mem;
83 struct bpf_array *array;
85 elem_size = round_up(attr->value_size, 8);
87 max_entries = attr->max_entries;
89 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
90 * upper most bit set in u32 space is undefined behavior due to
91 * resulting 1U << 32, so do it manually here in u64 space.
93 mask64 = fls_long(max_entries - 1);
94 mask64 = 1ULL << mask64;
98 if (!bypass_spec_v1) {
99 /* round up array size to nearest power of 2,
100 * since cpu will speculate within index_mask limits
102 max_entries = index_mask + 1;
103 /* Check for overflows. */
104 if (max_entries < attr->max_entries)
105 return ERR_PTR(-E2BIG);
108 array_size = sizeof(*array);
110 array_size += (u64) max_entries * sizeof(void *);
112 /* rely on vmalloc() to return page-aligned memory and
113 * ensure array->value is exactly page-aligned
115 if (attr->map_flags & BPF_F_MMAPABLE) {
116 array_size = PAGE_ALIGN(array_size);
117 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
119 array_size += (u64) max_entries * elem_size;
123 /* make sure there is no u32 overflow later in round_up() */
126 cost += (u64)attr->max_entries * elem_size * num_possible_cpus();
128 ret = bpf_map_charge_init(&mem, cost);
132 /* allocate all map elements and zero-initialize them */
133 if (attr->map_flags & BPF_F_MMAPABLE) {
136 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
137 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
139 bpf_map_charge_finish(&mem);
140 return ERR_PTR(-ENOMEM);
142 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
143 - offsetof(struct bpf_array, value);
145 array = bpf_map_area_alloc(array_size, numa_node);
148 bpf_map_charge_finish(&mem);
149 return ERR_PTR(-ENOMEM);
151 array->index_mask = index_mask;
152 array->map.bypass_spec_v1 = bypass_spec_v1;
154 /* copy mandatory map attributes */
155 bpf_map_init_from_attr(&array->map, attr);
156 bpf_map_charge_move(&array->map.memory, &mem);
157 array->elem_size = elem_size;
159 if (percpu && bpf_array_alloc_percpu(array)) {
160 bpf_map_charge_finish(&array->map.memory);
161 bpf_map_area_free(array);
162 return ERR_PTR(-ENOMEM);
168 /* Called from syscall or from eBPF program */
169 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
171 struct bpf_array *array = container_of(map, struct bpf_array, map);
172 u32 index = *(u32 *)key;
174 if (unlikely(index >= array->map.max_entries))
177 return array->value + array->elem_size * (index & array->index_mask);
180 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
183 struct bpf_array *array = container_of(map, struct bpf_array, map);
185 if (map->max_entries != 1)
187 if (off >= map->value_size)
190 *imm = (unsigned long)array->value;
194 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
197 struct bpf_array *array = container_of(map, struct bpf_array, map);
198 u64 base = (unsigned long)array->value;
199 u64 range = array->elem_size;
201 if (map->max_entries != 1)
203 if (imm < base || imm >= base + range)
210 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
211 static u32 array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
213 struct bpf_array *array = container_of(map, struct bpf_array, map);
214 struct bpf_insn *insn = insn_buf;
215 u32 elem_size = round_up(map->value_size, 8);
216 const int ret = BPF_REG_0;
217 const int map_ptr = BPF_REG_1;
218 const int index = BPF_REG_2;
220 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
221 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
222 if (!map->bypass_spec_v1) {
223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
226 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
229 if (is_power_of_2(elem_size)) {
230 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
232 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
234 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
235 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
236 *insn++ = BPF_MOV64_IMM(ret, 0);
237 return insn - insn_buf;
240 /* Called from eBPF program */
241 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
243 struct bpf_array *array = container_of(map, struct bpf_array, map);
244 u32 index = *(u32 *)key;
246 if (unlikely(index >= array->map.max_entries))
249 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
252 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
254 struct bpf_array *array = container_of(map, struct bpf_array, map);
255 u32 index = *(u32 *)key;
260 if (unlikely(index >= array->map.max_entries))
263 /* per_cpu areas are zero-filled and bpf programs can only
264 * access 'value_size' of them, so copying rounded areas
265 * will not leak any kernel data
267 size = round_up(map->value_size, 8);
269 pptr = array->pptrs[index & array->index_mask];
270 for_each_possible_cpu(cpu) {
271 bpf_long_memcpy(value + off, per_cpu_ptr(pptr, cpu), size);
278 /* Called from syscall */
279 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
281 struct bpf_array *array = container_of(map, struct bpf_array, map);
282 u32 index = key ? *(u32 *)key : U32_MAX;
283 u32 *next = (u32 *)next_key;
285 if (index >= array->map.max_entries) {
290 if (index == array->map.max_entries - 1)
297 /* Called from syscall or from eBPF program */
298 static int array_map_update_elem(struct bpf_map *map, void *key, void *value,
301 struct bpf_array *array = container_of(map, struct bpf_array, map);
302 u32 index = *(u32 *)key;
305 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
309 if (unlikely(index >= array->map.max_entries))
310 /* all elements were pre-allocated, cannot insert a new one */
313 if (unlikely(map_flags & BPF_NOEXIST))
314 /* all elements already exist */
317 if (unlikely((map_flags & BPF_F_LOCK) &&
318 !map_value_has_spin_lock(map)))
321 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
322 memcpy(this_cpu_ptr(array->pptrs[index & array->index_mask]),
323 value, map->value_size);
326 array->elem_size * (index & array->index_mask);
327 if (map_flags & BPF_F_LOCK)
328 copy_map_value_locked(map, val, value, false);
330 copy_map_value(map, val, value);
335 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
338 struct bpf_array *array = container_of(map, struct bpf_array, map);
339 u32 index = *(u32 *)key;
344 if (unlikely(map_flags > BPF_EXIST))
348 if (unlikely(index >= array->map.max_entries))
349 /* all elements were pre-allocated, cannot insert a new one */
352 if (unlikely(map_flags == BPF_NOEXIST))
353 /* all elements already exist */
356 /* the user space will provide round_up(value_size, 8) bytes that
357 * will be copied into per-cpu area. bpf programs can only access
358 * value_size of it. During lookup the same extra bytes will be
359 * returned or zeros which were zero-filled by percpu_alloc,
360 * so no kernel data leaks possible
362 size = round_up(map->value_size, 8);
364 pptr = array->pptrs[index & array->index_mask];
365 for_each_possible_cpu(cpu) {
366 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value + off, size);
373 /* Called from syscall or from eBPF program */
374 static int array_map_delete_elem(struct bpf_map *map, void *key)
379 static void *array_map_vmalloc_addr(struct bpf_array *array)
381 return (void *)round_down((unsigned long)array, PAGE_SIZE);
384 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
385 static void array_map_free(struct bpf_map *map)
387 struct bpf_array *array = container_of(map, struct bpf_array, map);
389 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
390 bpf_array_free_percpu(array);
392 if (array->map.map_flags & BPF_F_MMAPABLE)
393 bpf_map_area_free(array_map_vmalloc_addr(array));
395 bpf_map_area_free(array);
398 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
405 value = array_map_lookup_elem(map, key);
411 if (map->btf_key_type_id)
412 seq_printf(m, "%u: ", *(u32 *)key);
413 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
419 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
422 struct bpf_array *array = container_of(map, struct bpf_array, map);
423 u32 index = *(u32 *)key;
429 seq_printf(m, "%u: {\n", *(u32 *)key);
430 pptr = array->pptrs[index & array->index_mask];
431 for_each_possible_cpu(cpu) {
432 seq_printf(m, "\tcpu%d: ", cpu);
433 btf_type_seq_show(map->btf, map->btf_value_type_id,
434 per_cpu_ptr(pptr, cpu), m);
442 static int array_map_check_btf(const struct bpf_map *map,
443 const struct btf *btf,
444 const struct btf_type *key_type,
445 const struct btf_type *value_type)
449 /* One exception for keyless BTF: .bss/.data/.rodata map */
450 if (btf_type_is_void(key_type)) {
451 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
452 map->max_entries != 1)
455 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
461 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
464 int_data = *(u32 *)(key_type + 1);
465 /* bpf array can only take a u32 key. This check makes sure
466 * that the btf matches the attr used during map_create.
468 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
474 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
476 struct bpf_array *array = container_of(map, struct bpf_array, map);
477 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
479 if (!(map->map_flags & BPF_F_MMAPABLE))
482 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
483 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
486 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
487 vma->vm_pgoff + pgoff);
490 static int array_map_btf_id;
491 const struct bpf_map_ops array_map_ops = {
492 .map_alloc_check = array_map_alloc_check,
493 .map_alloc = array_map_alloc,
494 .map_free = array_map_free,
495 .map_get_next_key = array_map_get_next_key,
496 .map_lookup_elem = array_map_lookup_elem,
497 .map_update_elem = array_map_update_elem,
498 .map_delete_elem = array_map_delete_elem,
499 .map_gen_lookup = array_map_gen_lookup,
500 .map_direct_value_addr = array_map_direct_value_addr,
501 .map_direct_value_meta = array_map_direct_value_meta,
502 .map_mmap = array_map_mmap,
503 .map_seq_show_elem = array_map_seq_show_elem,
504 .map_check_btf = array_map_check_btf,
505 .map_lookup_batch = generic_map_lookup_batch,
506 .map_update_batch = generic_map_update_batch,
507 .map_btf_name = "bpf_array",
508 .map_btf_id = &array_map_btf_id,
511 static int percpu_array_map_btf_id;
512 const struct bpf_map_ops percpu_array_map_ops = {
513 .map_alloc_check = array_map_alloc_check,
514 .map_alloc = array_map_alloc,
515 .map_free = array_map_free,
516 .map_get_next_key = array_map_get_next_key,
517 .map_lookup_elem = percpu_array_map_lookup_elem,
518 .map_update_elem = array_map_update_elem,
519 .map_delete_elem = array_map_delete_elem,
520 .map_seq_show_elem = percpu_array_map_seq_show_elem,
521 .map_check_btf = array_map_check_btf,
522 .map_btf_name = "bpf_array",
523 .map_btf_id = &percpu_array_map_btf_id,
526 static int fd_array_map_alloc_check(union bpf_attr *attr)
528 /* only file descriptors can be stored in this type of map */
529 if (attr->value_size != sizeof(u32))
531 /* Program read-only/write-only not supported for special maps yet. */
532 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
534 return array_map_alloc_check(attr);
537 static void fd_array_map_free(struct bpf_map *map)
539 struct bpf_array *array = container_of(map, struct bpf_array, map);
542 /* make sure it's empty */
543 for (i = 0; i < array->map.max_entries; i++)
544 BUG_ON(array->ptrs[i] != NULL);
546 bpf_map_area_free(array);
549 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
551 return ERR_PTR(-EOPNOTSUPP);
554 /* only called from syscall */
555 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
560 if (!map->ops->map_fd_sys_lookup_elem)
564 elem = array_map_lookup_elem(map, key);
565 if (elem && (ptr = READ_ONCE(*elem)))
566 *value = map->ops->map_fd_sys_lookup_elem(ptr);
574 /* only called from syscall */
575 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
576 void *key, void *value, u64 map_flags)
578 struct bpf_array *array = container_of(map, struct bpf_array, map);
579 void *new_ptr, *old_ptr;
580 u32 index = *(u32 *)key, ufd;
582 if (map_flags != BPF_ANY)
585 if (index >= array->map.max_entries)
589 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
591 return PTR_ERR(new_ptr);
593 if (map->ops->map_poke_run) {
594 mutex_lock(&array->aux->poke_mutex);
595 old_ptr = xchg(array->ptrs + index, new_ptr);
596 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
597 mutex_unlock(&array->aux->poke_mutex);
599 old_ptr = xchg(array->ptrs + index, new_ptr);
603 map->ops->map_fd_put_ptr(old_ptr);
607 static int fd_array_map_delete_elem(struct bpf_map *map, void *key)
609 struct bpf_array *array = container_of(map, struct bpf_array, map);
611 u32 index = *(u32 *)key;
613 if (index >= array->map.max_entries)
616 if (map->ops->map_poke_run) {
617 mutex_lock(&array->aux->poke_mutex);
618 old_ptr = xchg(array->ptrs + index, NULL);
619 map->ops->map_poke_run(map, index, old_ptr, NULL);
620 mutex_unlock(&array->aux->poke_mutex);
622 old_ptr = xchg(array->ptrs + index, NULL);
626 map->ops->map_fd_put_ptr(old_ptr);
633 static void *prog_fd_array_get_ptr(struct bpf_map *map,
634 struct file *map_file, int fd)
636 struct bpf_array *array = container_of(map, struct bpf_array, map);
637 struct bpf_prog *prog = bpf_prog_get(fd);
642 if (!bpf_prog_array_compatible(array, prog)) {
644 return ERR_PTR(-EINVAL);
650 static void prog_fd_array_put_ptr(void *ptr)
655 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
657 return ((struct bpf_prog *)ptr)->aux->id;
660 /* decrement refcnt of all bpf_progs that are stored in this map */
661 static void bpf_fd_array_map_clear(struct bpf_map *map)
663 struct bpf_array *array = container_of(map, struct bpf_array, map);
666 for (i = 0; i < array->map.max_entries; i++)
667 fd_array_map_delete_elem(map, &i);
670 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
678 elem = array_map_lookup_elem(map, key);
680 ptr = READ_ONCE(*elem);
682 seq_printf(m, "%u: ", *(u32 *)key);
683 prog_id = prog_fd_array_sys_lookup_elem(ptr);
684 btf_type_seq_show(map->btf, map->btf_value_type_id,
693 struct prog_poke_elem {
694 struct list_head list;
695 struct bpf_prog_aux *aux;
698 static int prog_array_map_poke_track(struct bpf_map *map,
699 struct bpf_prog_aux *prog_aux)
701 struct prog_poke_elem *elem;
702 struct bpf_array_aux *aux;
705 aux = container_of(map, struct bpf_array, map)->aux;
706 mutex_lock(&aux->poke_mutex);
707 list_for_each_entry(elem, &aux->poke_progs, list) {
708 if (elem->aux == prog_aux)
712 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
718 INIT_LIST_HEAD(&elem->list);
719 /* We must track the program's aux info at this point in time
720 * since the program pointer itself may not be stable yet, see
721 * also comment in prog_array_map_poke_run().
723 elem->aux = prog_aux;
725 list_add_tail(&elem->list, &aux->poke_progs);
727 mutex_unlock(&aux->poke_mutex);
731 static void prog_array_map_poke_untrack(struct bpf_map *map,
732 struct bpf_prog_aux *prog_aux)
734 struct prog_poke_elem *elem, *tmp;
735 struct bpf_array_aux *aux;
737 aux = container_of(map, struct bpf_array, map)->aux;
738 mutex_lock(&aux->poke_mutex);
739 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
740 if (elem->aux == prog_aux) {
741 list_del_init(&elem->list);
746 mutex_unlock(&aux->poke_mutex);
749 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
750 struct bpf_prog *old,
751 struct bpf_prog *new)
753 struct prog_poke_elem *elem;
754 struct bpf_array_aux *aux;
756 aux = container_of(map, struct bpf_array, map)->aux;
757 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
759 list_for_each_entry(elem, &aux->poke_progs, list) {
760 struct bpf_jit_poke_descriptor *poke;
763 for (i = 0; i < elem->aux->size_poke_tab; i++) {
764 poke = &elem->aux->poke_tab[i];
766 /* Few things to be aware of:
768 * 1) We can only ever access aux in this context, but
769 * not aux->prog since it might not be stable yet and
770 * there could be danger of use after free otherwise.
771 * 2) Initially when we start tracking aux, the program
772 * is not JITed yet and also does not have a kallsyms
773 * entry. We skip these as poke->ip_stable is not
774 * active yet. The JIT will do the final fixup before
775 * setting it stable. The various poke->ip_stable are
776 * successively activated, so tail call updates can
777 * arrive from here while JIT is still finishing its
778 * final fixup for non-activated poke entries.
779 * 3) On program teardown, the program's kallsym entry gets
780 * removed out of RCU callback, but we can only untrack
781 * from sleepable context, therefore bpf_arch_text_poke()
782 * might not see that this is in BPF text section and
783 * bails out with -EINVAL. As these are unreachable since
784 * RCU grace period already passed, we simply skip them.
785 * 4) Also programs reaching refcount of zero while patching
786 * is in progress is okay since we're protected under
787 * poke_mutex and untrack the programs before the JIT
788 * buffer is freed. When we're still in the middle of
789 * patching and suddenly kallsyms entry of the program
790 * gets evicted, we just skip the rest which is fine due
792 * 5) Any other error happening below from bpf_arch_text_poke()
793 * is a unexpected bug.
795 if (!READ_ONCE(poke->ip_stable))
797 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
799 if (poke->tail_call.map != map ||
800 poke->tail_call.key != key)
803 ret = bpf_arch_text_poke(poke->ip, BPF_MOD_JUMP,
804 old ? (u8 *)old->bpf_func +
805 poke->adj_off : NULL,
806 new ? (u8 *)new->bpf_func +
807 poke->adj_off : NULL);
808 BUG_ON(ret < 0 && ret != -EINVAL);
813 static void prog_array_map_clear_deferred(struct work_struct *work)
815 struct bpf_map *map = container_of(work, struct bpf_array_aux,
817 bpf_fd_array_map_clear(map);
821 static void prog_array_map_clear(struct bpf_map *map)
823 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
826 schedule_work(&aux->work);
829 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
831 struct bpf_array_aux *aux;
834 aux = kzalloc(sizeof(*aux), GFP_KERNEL);
836 return ERR_PTR(-ENOMEM);
838 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
839 INIT_LIST_HEAD(&aux->poke_progs);
840 mutex_init(&aux->poke_mutex);
842 map = array_map_alloc(attr);
848 container_of(map, struct bpf_array, map)->aux = aux;
854 static void prog_array_map_free(struct bpf_map *map)
856 struct prog_poke_elem *elem, *tmp;
857 struct bpf_array_aux *aux;
859 aux = container_of(map, struct bpf_array, map)->aux;
860 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
861 list_del_init(&elem->list);
865 fd_array_map_free(map);
868 static int prog_array_map_btf_id;
869 const struct bpf_map_ops prog_array_map_ops = {
870 .map_alloc_check = fd_array_map_alloc_check,
871 .map_alloc = prog_array_map_alloc,
872 .map_free = prog_array_map_free,
873 .map_poke_track = prog_array_map_poke_track,
874 .map_poke_untrack = prog_array_map_poke_untrack,
875 .map_poke_run = prog_array_map_poke_run,
876 .map_get_next_key = array_map_get_next_key,
877 .map_lookup_elem = fd_array_map_lookup_elem,
878 .map_delete_elem = fd_array_map_delete_elem,
879 .map_fd_get_ptr = prog_fd_array_get_ptr,
880 .map_fd_put_ptr = prog_fd_array_put_ptr,
881 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
882 .map_release_uref = prog_array_map_clear,
883 .map_seq_show_elem = prog_array_map_seq_show_elem,
884 .map_btf_name = "bpf_array",
885 .map_btf_id = &prog_array_map_btf_id,
888 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
889 struct file *map_file)
891 struct bpf_event_entry *ee;
893 ee = kzalloc(sizeof(*ee), GFP_ATOMIC);
895 ee->event = perf_file->private_data;
896 ee->perf_file = perf_file;
897 ee->map_file = map_file;
903 static void __bpf_event_entry_free(struct rcu_head *rcu)
905 struct bpf_event_entry *ee;
907 ee = container_of(rcu, struct bpf_event_entry, rcu);
912 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
914 call_rcu(&ee->rcu, __bpf_event_entry_free);
917 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
918 struct file *map_file, int fd)
920 struct bpf_event_entry *ee;
921 struct perf_event *event;
922 struct file *perf_file;
925 perf_file = perf_event_get(fd);
926 if (IS_ERR(perf_file))
929 ee = ERR_PTR(-EOPNOTSUPP);
930 event = perf_file->private_data;
931 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
934 ee = bpf_event_entry_gen(perf_file, map_file);
937 ee = ERR_PTR(-ENOMEM);
943 static void perf_event_fd_array_put_ptr(void *ptr)
945 bpf_event_entry_free_rcu(ptr);
948 static void perf_event_fd_array_release(struct bpf_map *map,
949 struct file *map_file)
951 struct bpf_array *array = container_of(map, struct bpf_array, map);
952 struct bpf_event_entry *ee;
956 for (i = 0; i < array->map.max_entries; i++) {
957 ee = READ_ONCE(array->ptrs[i]);
958 if (ee && ee->map_file == map_file)
959 fd_array_map_delete_elem(map, &i);
964 static int perf_event_array_map_btf_id;
965 const struct bpf_map_ops perf_event_array_map_ops = {
966 .map_alloc_check = fd_array_map_alloc_check,
967 .map_alloc = array_map_alloc,
968 .map_free = fd_array_map_free,
969 .map_get_next_key = array_map_get_next_key,
970 .map_lookup_elem = fd_array_map_lookup_elem,
971 .map_delete_elem = fd_array_map_delete_elem,
972 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
973 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
974 .map_release = perf_event_fd_array_release,
975 .map_check_btf = map_check_no_btf,
976 .map_btf_name = "bpf_array",
977 .map_btf_id = &perf_event_array_map_btf_id,
980 #ifdef CONFIG_CGROUPS
981 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
982 struct file *map_file /* not used */,
985 return cgroup_get_from_fd(fd);
988 static void cgroup_fd_array_put_ptr(void *ptr)
990 /* cgroup_put free cgrp after a rcu grace period */
994 static void cgroup_fd_array_free(struct bpf_map *map)
996 bpf_fd_array_map_clear(map);
997 fd_array_map_free(map);
1000 static int cgroup_array_map_btf_id;
1001 const struct bpf_map_ops cgroup_array_map_ops = {
1002 .map_alloc_check = fd_array_map_alloc_check,
1003 .map_alloc = array_map_alloc,
1004 .map_free = cgroup_fd_array_free,
1005 .map_get_next_key = array_map_get_next_key,
1006 .map_lookup_elem = fd_array_map_lookup_elem,
1007 .map_delete_elem = fd_array_map_delete_elem,
1008 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1009 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1010 .map_check_btf = map_check_no_btf,
1011 .map_btf_name = "bpf_array",
1012 .map_btf_id = &cgroup_array_map_btf_id,
1016 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1018 struct bpf_map *map, *inner_map_meta;
1020 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1021 if (IS_ERR(inner_map_meta))
1022 return inner_map_meta;
1024 map = array_map_alloc(attr);
1026 bpf_map_meta_free(inner_map_meta);
1030 map->inner_map_meta = inner_map_meta;
1035 static void array_of_map_free(struct bpf_map *map)
1037 /* map->inner_map_meta is only accessed by syscall which
1038 * is protected by fdget/fdput.
1040 bpf_map_meta_free(map->inner_map_meta);
1041 bpf_fd_array_map_clear(map);
1042 fd_array_map_free(map);
1045 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1047 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1052 return READ_ONCE(*inner_map);
1055 static u32 array_of_map_gen_lookup(struct bpf_map *map,
1056 struct bpf_insn *insn_buf)
1058 struct bpf_array *array = container_of(map, struct bpf_array, map);
1059 u32 elem_size = round_up(map->value_size, 8);
1060 struct bpf_insn *insn = insn_buf;
1061 const int ret = BPF_REG_0;
1062 const int map_ptr = BPF_REG_1;
1063 const int index = BPF_REG_2;
1065 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1066 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1067 if (!map->bypass_spec_v1) {
1068 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1069 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1071 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1073 if (is_power_of_2(elem_size))
1074 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1076 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1077 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1078 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1079 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1080 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1081 *insn++ = BPF_MOV64_IMM(ret, 0);
1083 return insn - insn_buf;
1086 static int array_of_maps_map_btf_id;
1087 const struct bpf_map_ops array_of_maps_map_ops = {
1088 .map_alloc_check = fd_array_map_alloc_check,
1089 .map_alloc = array_of_map_alloc,
1090 .map_free = array_of_map_free,
1091 .map_get_next_key = array_map_get_next_key,
1092 .map_lookup_elem = array_of_map_lookup_elem,
1093 .map_delete_elem = fd_array_map_delete_elem,
1094 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1095 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1096 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1097 .map_gen_lookup = array_of_map_gen_lookup,
1098 .map_check_btf = map_check_no_btf,
1099 .map_btf_name = "bpf_array",
1100 .map_btf_id = &array_of_maps_map_btf_id,