1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016,2017 Facebook
8 #include <linux/slab.h>
10 #include <linux/filter.h>
11 #include <linux/perf_event.h>
12 #include <uapi/linux/btf.h>
13 #include <linux/rcupdate_trace.h>
14 #include <linux/btf_ids.h>
16 #include "map_in_map.h"
18 #define ARRAY_CREATE_FLAG_MASK \
19 (BPF_F_NUMA_NODE | BPF_F_MMAPABLE | BPF_F_ACCESS_MASK | \
20 BPF_F_PRESERVE_ELEMS | BPF_F_INNER_MAP)
22 static void bpf_array_free_percpu(struct bpf_array *array)
26 for (i = 0; i < array->map.max_entries; i++) {
27 free_percpu(array->pptrs[i]);
32 static int bpf_array_alloc_percpu(struct bpf_array *array)
37 for (i = 0; i < array->map.max_entries; i++) {
38 ptr = bpf_map_alloc_percpu(&array->map, array->elem_size, 8,
39 GFP_USER | __GFP_NOWARN);
41 bpf_array_free_percpu(array);
44 array->pptrs[i] = ptr;
51 /* Called from syscall */
52 int array_map_alloc_check(union bpf_attr *attr)
54 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
55 int numa_node = bpf_map_attr_numa_node(attr);
57 /* check sanity of attributes */
58 if (attr->max_entries == 0 || attr->key_size != 4 ||
59 attr->value_size == 0 ||
60 attr->map_flags & ~ARRAY_CREATE_FLAG_MASK ||
61 !bpf_map_flags_access_ok(attr->map_flags) ||
62 (percpu && numa_node != NUMA_NO_NODE))
65 if (attr->map_type != BPF_MAP_TYPE_ARRAY &&
66 attr->map_flags & (BPF_F_MMAPABLE | BPF_F_INNER_MAP))
69 if (attr->map_type != BPF_MAP_TYPE_PERF_EVENT_ARRAY &&
70 attr->map_flags & BPF_F_PRESERVE_ELEMS)
73 /* avoid overflow on round_up(map->value_size) */
74 if (attr->value_size > INT_MAX)
76 /* percpu map value size is bound by PCPU_MIN_UNIT_SIZE */
77 if (percpu && round_up(attr->value_size, 8) > PCPU_MIN_UNIT_SIZE)
83 static struct bpf_map *array_map_alloc(union bpf_attr *attr)
85 bool percpu = attr->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
86 int numa_node = bpf_map_attr_numa_node(attr);
87 u32 elem_size, index_mask, max_entries;
88 bool bypass_spec_v1 = bpf_bypass_spec_v1(NULL);
89 u64 array_size, mask64;
90 struct bpf_array *array;
92 elem_size = round_up(attr->value_size, 8);
94 max_entries = attr->max_entries;
96 /* On 32 bit archs roundup_pow_of_two() with max_entries that has
97 * upper most bit set in u32 space is undefined behavior due to
98 * resulting 1U << 32, so do it manually here in u64 space.
100 mask64 = fls_long(max_entries - 1);
101 mask64 = 1ULL << mask64;
105 if (!bypass_spec_v1) {
106 /* round up array size to nearest power of 2,
107 * since cpu will speculate within index_mask limits
109 max_entries = index_mask + 1;
110 /* Check for overflows. */
111 if (max_entries < attr->max_entries)
112 return ERR_PTR(-E2BIG);
115 array_size = sizeof(*array);
117 array_size += (u64) max_entries * sizeof(void *);
119 /* rely on vmalloc() to return page-aligned memory and
120 * ensure array->value is exactly page-aligned
122 if (attr->map_flags & BPF_F_MMAPABLE) {
123 array_size = PAGE_ALIGN(array_size);
124 array_size += PAGE_ALIGN((u64) max_entries * elem_size);
126 array_size += (u64) max_entries * elem_size;
130 /* allocate all map elements and zero-initialize them */
131 if (attr->map_flags & BPF_F_MMAPABLE) {
134 /* kmalloc'ed memory can't be mmap'ed, use explicit vmalloc */
135 data = bpf_map_area_mmapable_alloc(array_size, numa_node);
137 return ERR_PTR(-ENOMEM);
138 array = data + PAGE_ALIGN(sizeof(struct bpf_array))
139 - offsetof(struct bpf_array, value);
141 array = bpf_map_area_alloc(array_size, numa_node);
144 return ERR_PTR(-ENOMEM);
145 array->index_mask = index_mask;
146 array->map.bypass_spec_v1 = bypass_spec_v1;
148 /* copy mandatory map attributes */
149 bpf_map_init_from_attr(&array->map, attr);
150 array->elem_size = elem_size;
152 if (percpu && bpf_array_alloc_percpu(array)) {
153 bpf_map_area_free(array);
154 return ERR_PTR(-ENOMEM);
160 static void *array_map_elem_ptr(struct bpf_array* array, u32 index)
162 return array->value + (u64)array->elem_size * index;
165 /* Called from syscall or from eBPF program */
166 static void *array_map_lookup_elem(struct bpf_map *map, void *key)
168 struct bpf_array *array = container_of(map, struct bpf_array, map);
169 u32 index = *(u32 *)key;
171 if (unlikely(index >= array->map.max_entries))
174 return array->value + (u64)array->elem_size * (index & array->index_mask);
177 static int array_map_direct_value_addr(const struct bpf_map *map, u64 *imm,
180 struct bpf_array *array = container_of(map, struct bpf_array, map);
182 if (map->max_entries != 1)
184 if (off >= map->value_size)
187 *imm = (unsigned long)array->value;
191 static int array_map_direct_value_meta(const struct bpf_map *map, u64 imm,
194 struct bpf_array *array = container_of(map, struct bpf_array, map);
195 u64 base = (unsigned long)array->value;
196 u64 range = array->elem_size;
198 if (map->max_entries != 1)
200 if (imm < base || imm >= base + range)
207 /* emit BPF instructions equivalent to C code of array_map_lookup_elem() */
208 static int array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
210 struct bpf_array *array = container_of(map, struct bpf_array, map);
211 struct bpf_insn *insn = insn_buf;
212 u32 elem_size = array->elem_size;
213 const int ret = BPF_REG_0;
214 const int map_ptr = BPF_REG_1;
215 const int index = BPF_REG_2;
217 if (map->map_flags & BPF_F_INNER_MAP)
220 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
221 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
222 if (!map->bypass_spec_v1) {
223 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 4);
224 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
226 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 3);
229 if (is_power_of_2(elem_size)) {
230 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
232 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
234 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
235 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
236 *insn++ = BPF_MOV64_IMM(ret, 0);
237 return insn - insn_buf;
240 /* Called from eBPF program */
241 static void *percpu_array_map_lookup_elem(struct bpf_map *map, void *key)
243 struct bpf_array *array = container_of(map, struct bpf_array, map);
244 u32 index = *(u32 *)key;
246 if (unlikely(index >= array->map.max_entries))
249 return this_cpu_ptr(array->pptrs[index & array->index_mask]);
252 /* emit BPF instructions equivalent to C code of percpu_array_map_lookup_elem() */
253 static int percpu_array_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
255 struct bpf_array *array = container_of(map, struct bpf_array, map);
256 struct bpf_insn *insn = insn_buf;
258 if (!bpf_jit_supports_percpu_insn())
261 if (map->map_flags & BPF_F_INNER_MAP)
264 BUILD_BUG_ON(offsetof(struct bpf_array, map) != 0);
265 *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, offsetof(struct bpf_array, pptrs));
267 *insn++ = BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_2, 0);
268 if (!map->bypass_spec_v1) {
269 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 6);
270 *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_0, array->index_mask);
272 *insn++ = BPF_JMP_IMM(BPF_JGE, BPF_REG_0, map->max_entries, 5);
275 *insn++ = BPF_ALU64_IMM(BPF_LSH, BPF_REG_0, 3);
276 *insn++ = BPF_ALU64_REG(BPF_ADD, BPF_REG_0, BPF_REG_1);
277 *insn++ = BPF_LDX_MEM(BPF_DW, BPF_REG_0, BPF_REG_0, 0);
278 *insn++ = BPF_MOV64_PERCPU_REG(BPF_REG_0, BPF_REG_0);
279 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
280 *insn++ = BPF_MOV64_IMM(BPF_REG_0, 0);
281 return insn - insn_buf;
284 static void *percpu_array_map_lookup_percpu_elem(struct bpf_map *map, void *key, u32 cpu)
286 struct bpf_array *array = container_of(map, struct bpf_array, map);
287 u32 index = *(u32 *)key;
289 if (cpu >= nr_cpu_ids)
292 if (unlikely(index >= array->map.max_entries))
295 return per_cpu_ptr(array->pptrs[index & array->index_mask], cpu);
298 int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value)
300 struct bpf_array *array = container_of(map, struct bpf_array, map);
301 u32 index = *(u32 *)key;
306 if (unlikely(index >= array->map.max_entries))
309 /* per_cpu areas are zero-filled and bpf programs can only
310 * access 'value_size' of them, so copying rounded areas
311 * will not leak any kernel data
313 size = array->elem_size;
315 pptr = array->pptrs[index & array->index_mask];
316 for_each_possible_cpu(cpu) {
317 copy_map_value_long(map, value + off, per_cpu_ptr(pptr, cpu));
318 check_and_init_map_value(map, value + off);
325 /* Called from syscall */
326 static int array_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
328 struct bpf_array *array = container_of(map, struct bpf_array, map);
329 u32 index = key ? *(u32 *)key : U32_MAX;
330 u32 *next = (u32 *)next_key;
332 if (index >= array->map.max_entries) {
337 if (index == array->map.max_entries - 1)
344 /* Called from syscall or from eBPF program */
345 static long array_map_update_elem(struct bpf_map *map, void *key, void *value,
348 struct bpf_array *array = container_of(map, struct bpf_array, map);
349 u32 index = *(u32 *)key;
352 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
356 if (unlikely(index >= array->map.max_entries))
357 /* all elements were pre-allocated, cannot insert a new one */
360 if (unlikely(map_flags & BPF_NOEXIST))
361 /* all elements already exist */
364 if (unlikely((map_flags & BPF_F_LOCK) &&
365 !btf_record_has_field(map->record, BPF_SPIN_LOCK)))
368 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
369 val = this_cpu_ptr(array->pptrs[index & array->index_mask]);
370 copy_map_value(map, val, value);
371 bpf_obj_free_fields(array->map.record, val);
374 (u64)array->elem_size * (index & array->index_mask);
375 if (map_flags & BPF_F_LOCK)
376 copy_map_value_locked(map, val, value, false);
378 copy_map_value(map, val, value);
379 bpf_obj_free_fields(array->map.record, val);
384 int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value,
387 struct bpf_array *array = container_of(map, struct bpf_array, map);
388 u32 index = *(u32 *)key;
393 if (unlikely(map_flags > BPF_EXIST))
397 if (unlikely(index >= array->map.max_entries))
398 /* all elements were pre-allocated, cannot insert a new one */
401 if (unlikely(map_flags == BPF_NOEXIST))
402 /* all elements already exist */
405 /* the user space will provide round_up(value_size, 8) bytes that
406 * will be copied into per-cpu area. bpf programs can only access
407 * value_size of it. During lookup the same extra bytes will be
408 * returned or zeros which were zero-filled by percpu_alloc,
409 * so no kernel data leaks possible
411 size = array->elem_size;
413 pptr = array->pptrs[index & array->index_mask];
414 for_each_possible_cpu(cpu) {
415 copy_map_value_long(map, per_cpu_ptr(pptr, cpu), value + off);
416 bpf_obj_free_fields(array->map.record, per_cpu_ptr(pptr, cpu));
423 /* Called from syscall or from eBPF program */
424 static long array_map_delete_elem(struct bpf_map *map, void *key)
429 static void *array_map_vmalloc_addr(struct bpf_array *array)
431 return (void *)round_down((unsigned long)array, PAGE_SIZE);
434 static void array_map_free_timers_wq(struct bpf_map *map)
436 struct bpf_array *array = container_of(map, struct bpf_array, map);
439 /* We don't reset or free fields other than timer and workqueue
440 * on uref dropping to zero.
442 if (btf_record_has_field(map->record, BPF_TIMER | BPF_WORKQUEUE)) {
443 for (i = 0; i < array->map.max_entries; i++) {
444 if (btf_record_has_field(map->record, BPF_TIMER))
445 bpf_obj_free_timer(map->record, array_map_elem_ptr(array, i));
446 if (btf_record_has_field(map->record, BPF_WORKQUEUE))
447 bpf_obj_free_workqueue(map->record, array_map_elem_ptr(array, i));
452 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
453 static void array_map_free(struct bpf_map *map)
455 struct bpf_array *array = container_of(map, struct bpf_array, map);
458 if (!IS_ERR_OR_NULL(map->record)) {
459 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
460 for (i = 0; i < array->map.max_entries; i++) {
461 void __percpu *pptr = array->pptrs[i & array->index_mask];
464 for_each_possible_cpu(cpu) {
465 bpf_obj_free_fields(map->record, per_cpu_ptr(pptr, cpu));
470 for (i = 0; i < array->map.max_entries; i++)
471 bpf_obj_free_fields(map->record, array_map_elem_ptr(array, i));
475 if (array->map.map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
476 bpf_array_free_percpu(array);
478 if (array->map.map_flags & BPF_F_MMAPABLE)
479 bpf_map_area_free(array_map_vmalloc_addr(array));
481 bpf_map_area_free(array);
484 static void array_map_seq_show_elem(struct bpf_map *map, void *key,
491 value = array_map_lookup_elem(map, key);
497 if (map->btf_key_type_id)
498 seq_printf(m, "%u: ", *(u32 *)key);
499 btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
505 static void percpu_array_map_seq_show_elem(struct bpf_map *map, void *key,
508 struct bpf_array *array = container_of(map, struct bpf_array, map);
509 u32 index = *(u32 *)key;
515 seq_printf(m, "%u: {\n", *(u32 *)key);
516 pptr = array->pptrs[index & array->index_mask];
517 for_each_possible_cpu(cpu) {
518 seq_printf(m, "\tcpu%d: ", cpu);
519 btf_type_seq_show(map->btf, map->btf_value_type_id,
520 per_cpu_ptr(pptr, cpu), m);
528 static int array_map_check_btf(const struct bpf_map *map,
529 const struct btf *btf,
530 const struct btf_type *key_type,
531 const struct btf_type *value_type)
535 /* One exception for keyless BTF: .bss/.data/.rodata map */
536 if (btf_type_is_void(key_type)) {
537 if (map->map_type != BPF_MAP_TYPE_ARRAY ||
538 map->max_entries != 1)
541 if (BTF_INFO_KIND(value_type->info) != BTF_KIND_DATASEC)
547 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
550 int_data = *(u32 *)(key_type + 1);
551 /* bpf array can only take a u32 key. This check makes sure
552 * that the btf matches the attr used during map_create.
554 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))
560 static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
562 struct bpf_array *array = container_of(map, struct bpf_array, map);
563 pgoff_t pgoff = PAGE_ALIGN(sizeof(*array)) >> PAGE_SHIFT;
565 if (!(map->map_flags & BPF_F_MMAPABLE))
568 if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
569 PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
572 return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
573 vma->vm_pgoff + pgoff);
576 static bool array_map_meta_equal(const struct bpf_map *meta0,
577 const struct bpf_map *meta1)
579 if (!bpf_map_meta_equal(meta0, meta1))
581 return meta0->map_flags & BPF_F_INNER_MAP ? true :
582 meta0->max_entries == meta1->max_entries;
585 struct bpf_iter_seq_array_map_info {
587 void *percpu_value_buf;
591 static void *bpf_array_map_seq_start(struct seq_file *seq, loff_t *pos)
593 struct bpf_iter_seq_array_map_info *info = seq->private;
594 struct bpf_map *map = info->map;
595 struct bpf_array *array;
598 if (info->index >= map->max_entries)
603 array = container_of(map, struct bpf_array, map);
604 index = info->index & array->index_mask;
605 if (info->percpu_value_buf)
606 return (void *)(uintptr_t)array->pptrs[index];
607 return array_map_elem_ptr(array, index);
610 static void *bpf_array_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
612 struct bpf_iter_seq_array_map_info *info = seq->private;
613 struct bpf_map *map = info->map;
614 struct bpf_array *array;
619 if (info->index >= map->max_entries)
622 array = container_of(map, struct bpf_array, map);
623 index = info->index & array->index_mask;
624 if (info->percpu_value_buf)
625 return (void *)(uintptr_t)array->pptrs[index];
626 return array_map_elem_ptr(array, index);
629 static int __bpf_array_map_seq_show(struct seq_file *seq, void *v)
631 struct bpf_iter_seq_array_map_info *info = seq->private;
632 struct bpf_iter__bpf_map_elem ctx = {};
633 struct bpf_map *map = info->map;
634 struct bpf_array *array = container_of(map, struct bpf_array, map);
635 struct bpf_iter_meta meta;
636 struct bpf_prog *prog;
637 int off = 0, cpu = 0;
642 prog = bpf_iter_get_info(&meta, v == NULL);
649 ctx.key = &info->index;
651 if (!info->percpu_value_buf) {
654 pptr = (void __percpu *)(uintptr_t)v;
655 size = array->elem_size;
656 for_each_possible_cpu(cpu) {
657 copy_map_value_long(map, info->percpu_value_buf + off,
658 per_cpu_ptr(pptr, cpu));
659 check_and_init_map_value(map, info->percpu_value_buf + off);
662 ctx.value = info->percpu_value_buf;
666 return bpf_iter_run_prog(prog, &ctx);
669 static int bpf_array_map_seq_show(struct seq_file *seq, void *v)
671 return __bpf_array_map_seq_show(seq, v);
674 static void bpf_array_map_seq_stop(struct seq_file *seq, void *v)
677 (void)__bpf_array_map_seq_show(seq, NULL);
680 static int bpf_iter_init_array_map(void *priv_data,
681 struct bpf_iter_aux_info *aux)
683 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
684 struct bpf_map *map = aux->map;
685 struct bpf_array *array = container_of(map, struct bpf_array, map);
689 if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
690 buf_size = array->elem_size * num_possible_cpus();
691 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
695 seq_info->percpu_value_buf = value_buf;
698 /* bpf_iter_attach_map() acquires a map uref, and the uref may be
699 * released before or in the middle of iterating map elements, so
700 * acquire an extra map uref for iterator.
702 bpf_map_inc_with_uref(map);
707 static void bpf_iter_fini_array_map(void *priv_data)
709 struct bpf_iter_seq_array_map_info *seq_info = priv_data;
711 bpf_map_put_with_uref(seq_info->map);
712 kfree(seq_info->percpu_value_buf);
715 static const struct seq_operations bpf_array_map_seq_ops = {
716 .start = bpf_array_map_seq_start,
717 .next = bpf_array_map_seq_next,
718 .stop = bpf_array_map_seq_stop,
719 .show = bpf_array_map_seq_show,
722 static const struct bpf_iter_seq_info iter_seq_info = {
723 .seq_ops = &bpf_array_map_seq_ops,
724 .init_seq_private = bpf_iter_init_array_map,
725 .fini_seq_private = bpf_iter_fini_array_map,
726 .seq_priv_size = sizeof(struct bpf_iter_seq_array_map_info),
729 static long bpf_for_each_array_elem(struct bpf_map *map, bpf_callback_t callback_fn,
730 void *callback_ctx, u64 flags)
732 u32 i, key, num_elems = 0;
733 struct bpf_array *array;
741 is_percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
742 array = container_of(map, struct bpf_array, map);
745 for (i = 0; i < map->max_entries; i++) {
747 val = this_cpu_ptr(array->pptrs[i]);
749 val = array_map_elem_ptr(array, i);
752 ret = callback_fn((u64)(long)map, (u64)(long)&key,
753 (u64)(long)val, (u64)(long)callback_ctx, 0);
754 /* return value: 0 - continue, 1 - stop and return */
764 static u64 array_map_mem_usage(const struct bpf_map *map)
766 struct bpf_array *array = container_of(map, struct bpf_array, map);
767 bool percpu = map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY;
768 u32 elem_size = array->elem_size;
769 u64 entries = map->max_entries;
770 u64 usage = sizeof(*array);
773 usage += entries * sizeof(void *);
774 usage += entries * elem_size * num_possible_cpus();
776 if (map->map_flags & BPF_F_MMAPABLE) {
777 usage = PAGE_ALIGN(usage);
778 usage += PAGE_ALIGN(entries * elem_size);
780 usage += entries * elem_size;
786 BTF_ID_LIST_SINGLE(array_map_btf_ids, struct, bpf_array)
787 const struct bpf_map_ops array_map_ops = {
788 .map_meta_equal = array_map_meta_equal,
789 .map_alloc_check = array_map_alloc_check,
790 .map_alloc = array_map_alloc,
791 .map_free = array_map_free,
792 .map_get_next_key = array_map_get_next_key,
793 .map_release_uref = array_map_free_timers_wq,
794 .map_lookup_elem = array_map_lookup_elem,
795 .map_update_elem = array_map_update_elem,
796 .map_delete_elem = array_map_delete_elem,
797 .map_gen_lookup = array_map_gen_lookup,
798 .map_direct_value_addr = array_map_direct_value_addr,
799 .map_direct_value_meta = array_map_direct_value_meta,
800 .map_mmap = array_map_mmap,
801 .map_seq_show_elem = array_map_seq_show_elem,
802 .map_check_btf = array_map_check_btf,
803 .map_lookup_batch = generic_map_lookup_batch,
804 .map_update_batch = generic_map_update_batch,
805 .map_set_for_each_callback_args = map_set_for_each_callback_args,
806 .map_for_each_callback = bpf_for_each_array_elem,
807 .map_mem_usage = array_map_mem_usage,
808 .map_btf_id = &array_map_btf_ids[0],
809 .iter_seq_info = &iter_seq_info,
812 const struct bpf_map_ops percpu_array_map_ops = {
813 .map_meta_equal = bpf_map_meta_equal,
814 .map_alloc_check = array_map_alloc_check,
815 .map_alloc = array_map_alloc,
816 .map_free = array_map_free,
817 .map_get_next_key = array_map_get_next_key,
818 .map_lookup_elem = percpu_array_map_lookup_elem,
819 .map_gen_lookup = percpu_array_map_gen_lookup,
820 .map_update_elem = array_map_update_elem,
821 .map_delete_elem = array_map_delete_elem,
822 .map_lookup_percpu_elem = percpu_array_map_lookup_percpu_elem,
823 .map_seq_show_elem = percpu_array_map_seq_show_elem,
824 .map_check_btf = array_map_check_btf,
825 .map_lookup_batch = generic_map_lookup_batch,
826 .map_update_batch = generic_map_update_batch,
827 .map_set_for_each_callback_args = map_set_for_each_callback_args,
828 .map_for_each_callback = bpf_for_each_array_elem,
829 .map_mem_usage = array_map_mem_usage,
830 .map_btf_id = &array_map_btf_ids[0],
831 .iter_seq_info = &iter_seq_info,
834 static int fd_array_map_alloc_check(union bpf_attr *attr)
836 /* only file descriptors can be stored in this type of map */
837 if (attr->value_size != sizeof(u32))
839 /* Program read-only/write-only not supported for special maps yet. */
840 if (attr->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG))
842 return array_map_alloc_check(attr);
845 static void fd_array_map_free(struct bpf_map *map)
847 struct bpf_array *array = container_of(map, struct bpf_array, map);
850 /* make sure it's empty */
851 for (i = 0; i < array->map.max_entries; i++)
852 BUG_ON(array->ptrs[i] != NULL);
854 bpf_map_area_free(array);
857 static void *fd_array_map_lookup_elem(struct bpf_map *map, void *key)
859 return ERR_PTR(-EOPNOTSUPP);
862 /* only called from syscall */
863 int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
868 if (!map->ops->map_fd_sys_lookup_elem)
872 elem = array_map_lookup_elem(map, key);
873 if (elem && (ptr = READ_ONCE(*elem)))
874 *value = map->ops->map_fd_sys_lookup_elem(ptr);
882 /* only called from syscall */
883 int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
884 void *key, void *value, u64 map_flags)
886 struct bpf_array *array = container_of(map, struct bpf_array, map);
887 void *new_ptr, *old_ptr;
888 u32 index = *(u32 *)key, ufd;
890 if (map_flags != BPF_ANY)
893 if (index >= array->map.max_entries)
897 new_ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
899 return PTR_ERR(new_ptr);
901 if (map->ops->map_poke_run) {
902 mutex_lock(&array->aux->poke_mutex);
903 old_ptr = xchg(array->ptrs + index, new_ptr);
904 map->ops->map_poke_run(map, index, old_ptr, new_ptr);
905 mutex_unlock(&array->aux->poke_mutex);
907 old_ptr = xchg(array->ptrs + index, new_ptr);
911 map->ops->map_fd_put_ptr(map, old_ptr, true);
915 static long __fd_array_map_delete_elem(struct bpf_map *map, void *key, bool need_defer)
917 struct bpf_array *array = container_of(map, struct bpf_array, map);
919 u32 index = *(u32 *)key;
921 if (index >= array->map.max_entries)
924 if (map->ops->map_poke_run) {
925 mutex_lock(&array->aux->poke_mutex);
926 old_ptr = xchg(array->ptrs + index, NULL);
927 map->ops->map_poke_run(map, index, old_ptr, NULL);
928 mutex_unlock(&array->aux->poke_mutex);
930 old_ptr = xchg(array->ptrs + index, NULL);
934 map->ops->map_fd_put_ptr(map, old_ptr, need_defer);
941 static long fd_array_map_delete_elem(struct bpf_map *map, void *key)
943 return __fd_array_map_delete_elem(map, key, true);
946 static void *prog_fd_array_get_ptr(struct bpf_map *map,
947 struct file *map_file, int fd)
949 struct bpf_prog *prog = bpf_prog_get(fd);
954 if (!bpf_prog_map_compatible(map, prog)) {
956 return ERR_PTR(-EINVAL);
962 static void prog_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
964 /* bpf_prog is freed after one RCU or tasks trace grace period */
968 static u32 prog_fd_array_sys_lookup_elem(void *ptr)
970 return ((struct bpf_prog *)ptr)->aux->id;
973 /* decrement refcnt of all bpf_progs that are stored in this map */
974 static void bpf_fd_array_map_clear(struct bpf_map *map, bool need_defer)
976 struct bpf_array *array = container_of(map, struct bpf_array, map);
979 for (i = 0; i < array->map.max_entries; i++)
980 __fd_array_map_delete_elem(map, &i, need_defer);
983 static void prog_array_map_seq_show_elem(struct bpf_map *map, void *key,
991 elem = array_map_lookup_elem(map, key);
993 ptr = READ_ONCE(*elem);
995 seq_printf(m, "%u: ", *(u32 *)key);
996 prog_id = prog_fd_array_sys_lookup_elem(ptr);
997 btf_type_seq_show(map->btf, map->btf_value_type_id,
1006 struct prog_poke_elem {
1007 struct list_head list;
1008 struct bpf_prog_aux *aux;
1011 static int prog_array_map_poke_track(struct bpf_map *map,
1012 struct bpf_prog_aux *prog_aux)
1014 struct prog_poke_elem *elem;
1015 struct bpf_array_aux *aux;
1018 aux = container_of(map, struct bpf_array, map)->aux;
1019 mutex_lock(&aux->poke_mutex);
1020 list_for_each_entry(elem, &aux->poke_progs, list) {
1021 if (elem->aux == prog_aux)
1025 elem = kmalloc(sizeof(*elem), GFP_KERNEL);
1031 INIT_LIST_HEAD(&elem->list);
1032 /* We must track the program's aux info at this point in time
1033 * since the program pointer itself may not be stable yet, see
1034 * also comment in prog_array_map_poke_run().
1036 elem->aux = prog_aux;
1038 list_add_tail(&elem->list, &aux->poke_progs);
1040 mutex_unlock(&aux->poke_mutex);
1044 static void prog_array_map_poke_untrack(struct bpf_map *map,
1045 struct bpf_prog_aux *prog_aux)
1047 struct prog_poke_elem *elem, *tmp;
1048 struct bpf_array_aux *aux;
1050 aux = container_of(map, struct bpf_array, map)->aux;
1051 mutex_lock(&aux->poke_mutex);
1052 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1053 if (elem->aux == prog_aux) {
1054 list_del_init(&elem->list);
1059 mutex_unlock(&aux->poke_mutex);
1062 void __weak bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke,
1063 struct bpf_prog *new, struct bpf_prog *old)
1068 static void prog_array_map_poke_run(struct bpf_map *map, u32 key,
1069 struct bpf_prog *old,
1070 struct bpf_prog *new)
1072 struct prog_poke_elem *elem;
1073 struct bpf_array_aux *aux;
1075 aux = container_of(map, struct bpf_array, map)->aux;
1076 WARN_ON_ONCE(!mutex_is_locked(&aux->poke_mutex));
1078 list_for_each_entry(elem, &aux->poke_progs, list) {
1079 struct bpf_jit_poke_descriptor *poke;
1082 for (i = 0; i < elem->aux->size_poke_tab; i++) {
1083 poke = &elem->aux->poke_tab[i];
1085 /* Few things to be aware of:
1087 * 1) We can only ever access aux in this context, but
1088 * not aux->prog since it might not be stable yet and
1089 * there could be danger of use after free otherwise.
1090 * 2) Initially when we start tracking aux, the program
1091 * is not JITed yet and also does not have a kallsyms
1092 * entry. We skip these as poke->tailcall_target_stable
1093 * is not active yet. The JIT will do the final fixup
1094 * before setting it stable. The various
1095 * poke->tailcall_target_stable are successively
1096 * activated, so tail call updates can arrive from here
1097 * while JIT is still finishing its final fixup for
1098 * non-activated poke entries.
1099 * 3) Also programs reaching refcount of zero while patching
1100 * is in progress is okay since we're protected under
1101 * poke_mutex and untrack the programs before the JIT
1104 if (!READ_ONCE(poke->tailcall_target_stable))
1106 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
1108 if (poke->tail_call.map != map ||
1109 poke->tail_call.key != key)
1112 bpf_arch_poke_desc_update(poke, new, old);
1117 static void prog_array_map_clear_deferred(struct work_struct *work)
1119 struct bpf_map *map = container_of(work, struct bpf_array_aux,
1121 bpf_fd_array_map_clear(map, true);
1125 static void prog_array_map_clear(struct bpf_map *map)
1127 struct bpf_array_aux *aux = container_of(map, struct bpf_array,
1130 schedule_work(&aux->work);
1133 static struct bpf_map *prog_array_map_alloc(union bpf_attr *attr)
1135 struct bpf_array_aux *aux;
1136 struct bpf_map *map;
1138 aux = kzalloc(sizeof(*aux), GFP_KERNEL_ACCOUNT);
1140 return ERR_PTR(-ENOMEM);
1142 INIT_WORK(&aux->work, prog_array_map_clear_deferred);
1143 INIT_LIST_HEAD(&aux->poke_progs);
1144 mutex_init(&aux->poke_mutex);
1146 map = array_map_alloc(attr);
1152 container_of(map, struct bpf_array, map)->aux = aux;
1158 static void prog_array_map_free(struct bpf_map *map)
1160 struct prog_poke_elem *elem, *tmp;
1161 struct bpf_array_aux *aux;
1163 aux = container_of(map, struct bpf_array, map)->aux;
1164 list_for_each_entry_safe(elem, tmp, &aux->poke_progs, list) {
1165 list_del_init(&elem->list);
1169 fd_array_map_free(map);
1172 /* prog_array->aux->{type,jited} is a runtime binding.
1173 * Doing static check alone in the verifier is not enough.
1174 * Thus, prog_array_map cannot be used as an inner_map
1175 * and map_meta_equal is not implemented.
1177 const struct bpf_map_ops prog_array_map_ops = {
1178 .map_alloc_check = fd_array_map_alloc_check,
1179 .map_alloc = prog_array_map_alloc,
1180 .map_free = prog_array_map_free,
1181 .map_poke_track = prog_array_map_poke_track,
1182 .map_poke_untrack = prog_array_map_poke_untrack,
1183 .map_poke_run = prog_array_map_poke_run,
1184 .map_get_next_key = array_map_get_next_key,
1185 .map_lookup_elem = fd_array_map_lookup_elem,
1186 .map_delete_elem = fd_array_map_delete_elem,
1187 .map_fd_get_ptr = prog_fd_array_get_ptr,
1188 .map_fd_put_ptr = prog_fd_array_put_ptr,
1189 .map_fd_sys_lookup_elem = prog_fd_array_sys_lookup_elem,
1190 .map_release_uref = prog_array_map_clear,
1191 .map_seq_show_elem = prog_array_map_seq_show_elem,
1192 .map_mem_usage = array_map_mem_usage,
1193 .map_btf_id = &array_map_btf_ids[0],
1196 static struct bpf_event_entry *bpf_event_entry_gen(struct file *perf_file,
1197 struct file *map_file)
1199 struct bpf_event_entry *ee;
1201 ee = kzalloc(sizeof(*ee), GFP_KERNEL);
1203 ee->event = perf_file->private_data;
1204 ee->perf_file = perf_file;
1205 ee->map_file = map_file;
1211 static void __bpf_event_entry_free(struct rcu_head *rcu)
1213 struct bpf_event_entry *ee;
1215 ee = container_of(rcu, struct bpf_event_entry, rcu);
1216 fput(ee->perf_file);
1220 static void bpf_event_entry_free_rcu(struct bpf_event_entry *ee)
1222 call_rcu(&ee->rcu, __bpf_event_entry_free);
1225 static void *perf_event_fd_array_get_ptr(struct bpf_map *map,
1226 struct file *map_file, int fd)
1228 struct bpf_event_entry *ee;
1229 struct perf_event *event;
1230 struct file *perf_file;
1233 perf_file = perf_event_get(fd);
1234 if (IS_ERR(perf_file))
1237 ee = ERR_PTR(-EOPNOTSUPP);
1238 event = perf_file->private_data;
1239 if (perf_event_read_local(event, &value, NULL, NULL) == -EOPNOTSUPP)
1242 ee = bpf_event_entry_gen(perf_file, map_file);
1245 ee = ERR_PTR(-ENOMEM);
1251 static void perf_event_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1253 /* bpf_perf_event is freed after one RCU grace period */
1254 bpf_event_entry_free_rcu(ptr);
1257 static void perf_event_fd_array_release(struct bpf_map *map,
1258 struct file *map_file)
1260 struct bpf_array *array = container_of(map, struct bpf_array, map);
1261 struct bpf_event_entry *ee;
1264 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1268 for (i = 0; i < array->map.max_entries; i++) {
1269 ee = READ_ONCE(array->ptrs[i]);
1270 if (ee && ee->map_file == map_file)
1271 __fd_array_map_delete_elem(map, &i, true);
1276 static void perf_event_fd_array_map_free(struct bpf_map *map)
1278 if (map->map_flags & BPF_F_PRESERVE_ELEMS)
1279 bpf_fd_array_map_clear(map, false);
1280 fd_array_map_free(map);
1283 const struct bpf_map_ops perf_event_array_map_ops = {
1284 .map_meta_equal = bpf_map_meta_equal,
1285 .map_alloc_check = fd_array_map_alloc_check,
1286 .map_alloc = array_map_alloc,
1287 .map_free = perf_event_fd_array_map_free,
1288 .map_get_next_key = array_map_get_next_key,
1289 .map_lookup_elem = fd_array_map_lookup_elem,
1290 .map_delete_elem = fd_array_map_delete_elem,
1291 .map_fd_get_ptr = perf_event_fd_array_get_ptr,
1292 .map_fd_put_ptr = perf_event_fd_array_put_ptr,
1293 .map_release = perf_event_fd_array_release,
1294 .map_check_btf = map_check_no_btf,
1295 .map_mem_usage = array_map_mem_usage,
1296 .map_btf_id = &array_map_btf_ids[0],
1299 #ifdef CONFIG_CGROUPS
1300 static void *cgroup_fd_array_get_ptr(struct bpf_map *map,
1301 struct file *map_file /* not used */,
1304 return cgroup_get_from_fd(fd);
1307 static void cgroup_fd_array_put_ptr(struct bpf_map *map, void *ptr, bool need_defer)
1309 /* cgroup_put free cgrp after a rcu grace period */
1313 static void cgroup_fd_array_free(struct bpf_map *map)
1315 bpf_fd_array_map_clear(map, false);
1316 fd_array_map_free(map);
1319 const struct bpf_map_ops cgroup_array_map_ops = {
1320 .map_meta_equal = bpf_map_meta_equal,
1321 .map_alloc_check = fd_array_map_alloc_check,
1322 .map_alloc = array_map_alloc,
1323 .map_free = cgroup_fd_array_free,
1324 .map_get_next_key = array_map_get_next_key,
1325 .map_lookup_elem = fd_array_map_lookup_elem,
1326 .map_delete_elem = fd_array_map_delete_elem,
1327 .map_fd_get_ptr = cgroup_fd_array_get_ptr,
1328 .map_fd_put_ptr = cgroup_fd_array_put_ptr,
1329 .map_check_btf = map_check_no_btf,
1330 .map_mem_usage = array_map_mem_usage,
1331 .map_btf_id = &array_map_btf_ids[0],
1335 static struct bpf_map *array_of_map_alloc(union bpf_attr *attr)
1337 struct bpf_map *map, *inner_map_meta;
1339 inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
1340 if (IS_ERR(inner_map_meta))
1341 return inner_map_meta;
1343 map = array_map_alloc(attr);
1345 bpf_map_meta_free(inner_map_meta);
1349 map->inner_map_meta = inner_map_meta;
1354 static void array_of_map_free(struct bpf_map *map)
1356 /* map->inner_map_meta is only accessed by syscall which
1357 * is protected by fdget/fdput.
1359 bpf_map_meta_free(map->inner_map_meta);
1360 bpf_fd_array_map_clear(map, false);
1361 fd_array_map_free(map);
1364 static void *array_of_map_lookup_elem(struct bpf_map *map, void *key)
1366 struct bpf_map **inner_map = array_map_lookup_elem(map, key);
1371 return READ_ONCE(*inner_map);
1374 static int array_of_map_gen_lookup(struct bpf_map *map,
1375 struct bpf_insn *insn_buf)
1377 struct bpf_array *array = container_of(map, struct bpf_array, map);
1378 u32 elem_size = array->elem_size;
1379 struct bpf_insn *insn = insn_buf;
1380 const int ret = BPF_REG_0;
1381 const int map_ptr = BPF_REG_1;
1382 const int index = BPF_REG_2;
1384 *insn++ = BPF_ALU64_IMM(BPF_ADD, map_ptr, offsetof(struct bpf_array, value));
1385 *insn++ = BPF_LDX_MEM(BPF_W, ret, index, 0);
1386 if (!map->bypass_spec_v1) {
1387 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 6);
1388 *insn++ = BPF_ALU32_IMM(BPF_AND, ret, array->index_mask);
1390 *insn++ = BPF_JMP_IMM(BPF_JGE, ret, map->max_entries, 5);
1392 if (is_power_of_2(elem_size))
1393 *insn++ = BPF_ALU64_IMM(BPF_LSH, ret, ilog2(elem_size));
1395 *insn++ = BPF_ALU64_IMM(BPF_MUL, ret, elem_size);
1396 *insn++ = BPF_ALU64_REG(BPF_ADD, ret, map_ptr);
1397 *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
1398 *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
1399 *insn++ = BPF_JMP_IMM(BPF_JA, 0, 0, 1);
1400 *insn++ = BPF_MOV64_IMM(ret, 0);
1402 return insn - insn_buf;
1405 const struct bpf_map_ops array_of_maps_map_ops = {
1406 .map_alloc_check = fd_array_map_alloc_check,
1407 .map_alloc = array_of_map_alloc,
1408 .map_free = array_of_map_free,
1409 .map_get_next_key = array_map_get_next_key,
1410 .map_lookup_elem = array_of_map_lookup_elem,
1411 .map_delete_elem = fd_array_map_delete_elem,
1412 .map_fd_get_ptr = bpf_map_fd_get_ptr,
1413 .map_fd_put_ptr = bpf_map_fd_put_ptr,
1414 .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
1415 .map_gen_lookup = array_of_map_gen_lookup,
1416 .map_lookup_batch = generic_map_lookup_batch,
1417 .map_update_batch = generic_map_update_batch,
1418 .map_check_btf = map_check_no_btf,
1419 .map_mem_usage = array_map_mem_usage,
1420 .map_btf_id = &array_map_btf_ids[0],