1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/bpf_trace.h>
14 #include <linux/syscalls.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/vmalloc.h>
18 #include <linux/mmzone.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/file.h>
21 #include <linux/license.h>
22 #include <linux/filter.h>
23 #include <linux/version.h>
24 #include <linux/kernel.h>
26 DEFINE_PER_CPU(int, bpf_prog_active);
28 int sysctl_unprivileged_bpf_disabled __read_mostly;
30 static const struct bpf_map_ops * const bpf_map_types[] = {
31 #define BPF_PROG_TYPE(_id, _ops)
32 #define BPF_MAP_TYPE(_id, _ops) \
34 #include <linux/bpf_types.h>
39 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
43 if (attr->map_type >= ARRAY_SIZE(bpf_map_types) ||
44 !bpf_map_types[attr->map_type])
45 return ERR_PTR(-EINVAL);
47 map = bpf_map_types[attr->map_type]->map_alloc(attr);
50 map->ops = bpf_map_types[attr->map_type];
51 map->map_type = attr->map_type;
55 void *bpf_map_area_alloc(size_t size)
57 /* We definitely need __GFP_NORETRY, so OOM killer doesn't
58 * trigger under memory pressure as we really just want to
61 const gfp_t flags = __GFP_NOWARN | __GFP_NORETRY | __GFP_ZERO;
64 if (size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER)) {
65 area = kmalloc(size, GFP_USER | flags);
70 return __vmalloc(size, GFP_KERNEL | flags, PAGE_KERNEL);
73 void bpf_map_area_free(void *area)
78 int bpf_map_precharge_memlock(u32 pages)
80 struct user_struct *user = get_current_user();
81 unsigned long memlock_limit, cur;
83 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
84 cur = atomic_long_read(&user->locked_vm);
86 if (cur + pages > memlock_limit)
91 static int bpf_map_charge_memlock(struct bpf_map *map)
93 struct user_struct *user = get_current_user();
94 unsigned long memlock_limit;
96 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
98 atomic_long_add(map->pages, &user->locked_vm);
100 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
101 atomic_long_sub(map->pages, &user->locked_vm);
109 static void bpf_map_uncharge_memlock(struct bpf_map *map)
111 struct user_struct *user = map->user;
113 atomic_long_sub(map->pages, &user->locked_vm);
117 /* called from workqueue */
118 static void bpf_map_free_deferred(struct work_struct *work)
120 struct bpf_map *map = container_of(work, struct bpf_map, work);
122 bpf_map_uncharge_memlock(map);
123 /* implementation dependent freeing */
124 map->ops->map_free(map);
127 static void bpf_map_put_uref(struct bpf_map *map)
129 if (atomic_dec_and_test(&map->usercnt)) {
130 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
131 bpf_fd_array_map_clear(map);
135 /* decrement map refcnt and schedule it for freeing via workqueue
136 * (unrelying map implementation ops->map_free() might sleep)
138 void bpf_map_put(struct bpf_map *map)
140 if (atomic_dec_and_test(&map->refcnt)) {
141 INIT_WORK(&map->work, bpf_map_free_deferred);
142 schedule_work(&map->work);
146 void bpf_map_put_with_uref(struct bpf_map *map)
148 bpf_map_put_uref(map);
152 static int bpf_map_release(struct inode *inode, struct file *filp)
154 struct bpf_map *map = filp->private_data;
156 if (map->ops->map_release)
157 map->ops->map_release(map, filp);
159 bpf_map_put_with_uref(map);
163 #ifdef CONFIG_PROC_FS
164 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
166 const struct bpf_map *map = filp->private_data;
167 const struct bpf_array *array;
168 u32 owner_prog_type = 0;
170 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
171 array = container_of(map, struct bpf_array, map);
172 owner_prog_type = array->owner_prog_type;
187 map->pages * 1ULL << PAGE_SHIFT);
190 seq_printf(m, "owner_prog_type:\t%u\n",
195 static const struct file_operations bpf_map_fops = {
196 #ifdef CONFIG_PROC_FS
197 .show_fdinfo = bpf_map_show_fdinfo,
199 .release = bpf_map_release,
202 int bpf_map_new_fd(struct bpf_map *map)
204 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
208 /* helper macro to check that unused fields 'union bpf_attr' are zero */
209 #define CHECK_ATTR(CMD) \
210 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
211 sizeof(attr->CMD##_LAST_FIELD), 0, \
213 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
214 sizeof(attr->CMD##_LAST_FIELD)) != NULL
216 #define BPF_MAP_CREATE_LAST_FIELD inner_map_fd
217 /* called via syscall */
218 static int map_create(union bpf_attr *attr)
223 err = CHECK_ATTR(BPF_MAP_CREATE);
227 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
228 map = find_and_alloc_map(attr);
232 atomic_set(&map->refcnt, 1);
233 atomic_set(&map->usercnt, 1);
235 err = bpf_map_charge_memlock(map);
237 goto free_map_nouncharge;
239 err = bpf_map_new_fd(map);
241 /* failed to allocate fd */
244 trace_bpf_map_create(map, err);
248 bpf_map_uncharge_memlock(map);
250 map->ops->map_free(map);
254 /* if error is returned, fd is released.
255 * On success caller should complete fd access with matching fdput()
257 struct bpf_map *__bpf_map_get(struct fd f)
260 return ERR_PTR(-EBADF);
261 if (f.file->f_op != &bpf_map_fops) {
263 return ERR_PTR(-EINVAL);
266 return f.file->private_data;
269 /* prog's and map's refcnt limit */
270 #define BPF_MAX_REFCNT 32768
272 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
274 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
275 atomic_dec(&map->refcnt);
276 return ERR_PTR(-EBUSY);
279 atomic_inc(&map->usercnt);
283 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
285 struct fd f = fdget(ufd);
288 map = __bpf_map_get(f);
292 map = bpf_map_inc(map, true);
298 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
303 /* last field in 'union bpf_attr' used by this command */
304 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
306 static int map_lookup_elem(union bpf_attr *attr)
308 void __user *ukey = u64_to_user_ptr(attr->key);
309 void __user *uvalue = u64_to_user_ptr(attr->value);
310 int ufd = attr->map_fd;
312 void *key, *value, *ptr;
317 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
321 map = __bpf_map_get(f);
325 key = memdup_user(ukey, map->key_size);
331 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
332 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
333 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
334 value_size = round_up(map->value_size, 8) * num_possible_cpus();
336 value_size = map->value_size;
339 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
343 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
344 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
345 err = bpf_percpu_hash_copy(map, key, value);
346 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
347 err = bpf_percpu_array_copy(map, key, value);
348 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
349 err = bpf_stackmap_copy(map, key, value);
350 } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
351 map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
355 ptr = map->ops->map_lookup_elem(map, key);
357 memcpy(value, ptr, value_size);
359 err = ptr ? 0 : -ENOENT;
366 if (copy_to_user(uvalue, value, value_size) != 0)
369 trace_bpf_map_lookup_elem(map, ufd, key, value);
381 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
383 static int map_update_elem(union bpf_attr *attr)
385 void __user *ukey = u64_to_user_ptr(attr->key);
386 void __user *uvalue = u64_to_user_ptr(attr->value);
387 int ufd = attr->map_fd;
394 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
398 map = __bpf_map_get(f);
402 key = memdup_user(ukey, map->key_size);
408 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
409 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
410 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
411 value_size = round_up(map->value_size, 8) * num_possible_cpus();
413 value_size = map->value_size;
416 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
421 if (copy_from_user(value, uvalue, value_size) != 0)
424 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
425 * inside bpf map update or delete otherwise deadlocks are possible
428 __this_cpu_inc(bpf_prog_active);
429 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
430 map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
431 err = bpf_percpu_hash_update(map, key, value, attr->flags);
432 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
433 err = bpf_percpu_array_update(map, key, value, attr->flags);
434 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
435 map->map_type == BPF_MAP_TYPE_PROG_ARRAY ||
436 map->map_type == BPF_MAP_TYPE_CGROUP_ARRAY ||
437 map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
439 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
442 } else if (map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
444 err = bpf_fd_htab_map_update_elem(map, f.file, key, value,
449 err = map->ops->map_update_elem(map, key, value, attr->flags);
452 __this_cpu_dec(bpf_prog_active);
456 trace_bpf_map_update_elem(map, ufd, key, value);
466 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
468 static int map_delete_elem(union bpf_attr *attr)
470 void __user *ukey = u64_to_user_ptr(attr->key);
471 int ufd = attr->map_fd;
477 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
481 map = __bpf_map_get(f);
485 key = memdup_user(ukey, map->key_size);
492 __this_cpu_inc(bpf_prog_active);
494 err = map->ops->map_delete_elem(map, key);
496 __this_cpu_dec(bpf_prog_active);
500 trace_bpf_map_delete_elem(map, ufd, key);
507 /* last field in 'union bpf_attr' used by this command */
508 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
510 static int map_get_next_key(union bpf_attr *attr)
512 void __user *ukey = u64_to_user_ptr(attr->key);
513 void __user *unext_key = u64_to_user_ptr(attr->next_key);
514 int ufd = attr->map_fd;
516 void *key, *next_key;
520 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
524 map = __bpf_map_get(f);
529 key = memdup_user(ukey, map->key_size);
539 next_key = kmalloc(map->key_size, GFP_USER);
544 err = map->ops->map_get_next_key(map, key, next_key);
550 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
553 trace_bpf_map_next_key(map, ufd, key, next_key);
565 static const struct bpf_verifier_ops * const bpf_prog_types[] = {
566 #define BPF_PROG_TYPE(_id, _ops) \
568 #define BPF_MAP_TYPE(_id, _ops)
569 #include <linux/bpf_types.h>
574 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
576 if (type >= ARRAY_SIZE(bpf_prog_types) || !bpf_prog_types[type])
579 prog->aux->ops = bpf_prog_types[type];
584 /* drop refcnt on maps used by eBPF program and free auxilary data */
585 static void free_used_maps(struct bpf_prog_aux *aux)
589 for (i = 0; i < aux->used_map_cnt; i++)
590 bpf_map_put(aux->used_maps[i]);
592 kfree(aux->used_maps);
595 int __bpf_prog_charge(struct user_struct *user, u32 pages)
597 unsigned long memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
598 unsigned long user_bufs;
601 user_bufs = atomic_long_add_return(pages, &user->locked_vm);
602 if (user_bufs > memlock_limit) {
603 atomic_long_sub(pages, &user->locked_vm);
611 void __bpf_prog_uncharge(struct user_struct *user, u32 pages)
614 atomic_long_sub(pages, &user->locked_vm);
617 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
619 struct user_struct *user = get_current_user();
622 ret = __bpf_prog_charge(user, prog->pages);
628 prog->aux->user = user;
632 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
634 struct user_struct *user = prog->aux->user;
636 __bpf_prog_uncharge(user, prog->pages);
640 static void __bpf_prog_put_rcu(struct rcu_head *rcu)
642 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
645 bpf_prog_uncharge_memlock(aux->prog);
646 bpf_prog_free(aux->prog);
649 void bpf_prog_put(struct bpf_prog *prog)
651 if (atomic_dec_and_test(&prog->aux->refcnt)) {
652 trace_bpf_prog_put_rcu(prog);
653 bpf_prog_kallsyms_del(prog);
654 call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
657 EXPORT_SYMBOL_GPL(bpf_prog_put);
659 static int bpf_prog_release(struct inode *inode, struct file *filp)
661 struct bpf_prog *prog = filp->private_data;
667 #ifdef CONFIG_PROC_FS
668 static void bpf_prog_show_fdinfo(struct seq_file *m, struct file *filp)
670 const struct bpf_prog *prog = filp->private_data;
671 char prog_tag[sizeof(prog->tag) * 2 + 1] = { };
673 bin2hex(prog_tag, prog->tag, sizeof(prog->tag));
682 prog->pages * 1ULL << PAGE_SHIFT);
686 static const struct file_operations bpf_prog_fops = {
687 #ifdef CONFIG_PROC_FS
688 .show_fdinfo = bpf_prog_show_fdinfo,
690 .release = bpf_prog_release,
693 int bpf_prog_new_fd(struct bpf_prog *prog)
695 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
699 static struct bpf_prog *____bpf_prog_get(struct fd f)
702 return ERR_PTR(-EBADF);
703 if (f.file->f_op != &bpf_prog_fops) {
705 return ERR_PTR(-EINVAL);
708 return f.file->private_data;
711 struct bpf_prog *bpf_prog_add(struct bpf_prog *prog, int i)
713 if (atomic_add_return(i, &prog->aux->refcnt) > BPF_MAX_REFCNT) {
714 atomic_sub(i, &prog->aux->refcnt);
715 return ERR_PTR(-EBUSY);
719 EXPORT_SYMBOL_GPL(bpf_prog_add);
721 void bpf_prog_sub(struct bpf_prog *prog, int i)
723 /* Only to be used for undoing previous bpf_prog_add() in some
724 * error path. We still know that another entity in our call
725 * path holds a reference to the program, thus atomic_sub() can
726 * be safely used in such cases!
728 WARN_ON(atomic_sub_return(i, &prog->aux->refcnt) == 0);
730 EXPORT_SYMBOL_GPL(bpf_prog_sub);
732 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
734 return bpf_prog_add(prog, 1);
736 EXPORT_SYMBOL_GPL(bpf_prog_inc);
738 static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
740 struct fd f = fdget(ufd);
741 struct bpf_prog *prog;
743 prog = ____bpf_prog_get(f);
746 if (type && prog->type != *type) {
747 prog = ERR_PTR(-EINVAL);
751 prog = bpf_prog_inc(prog);
757 struct bpf_prog *bpf_prog_get(u32 ufd)
759 return __bpf_prog_get(ufd, NULL);
762 struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type)
764 struct bpf_prog *prog = __bpf_prog_get(ufd, &type);
767 trace_bpf_prog_get_type(prog);
770 EXPORT_SYMBOL_GPL(bpf_prog_get_type);
772 /* last field in 'union bpf_attr' used by this command */
773 #define BPF_PROG_LOAD_LAST_FIELD kern_version
775 static int bpf_prog_load(union bpf_attr *attr)
777 enum bpf_prog_type type = attr->prog_type;
778 struct bpf_prog *prog;
783 if (CHECK_ATTR(BPF_PROG_LOAD))
786 /* copy eBPF program license from user space */
787 if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
788 sizeof(license) - 1) < 0)
790 license[sizeof(license) - 1] = 0;
792 /* eBPF programs must be GPL compatible to use GPL-ed functions */
793 is_gpl = license_is_gpl_compatible(license);
795 if (attr->insn_cnt == 0 || attr->insn_cnt > BPF_MAXINSNS)
798 if (type == BPF_PROG_TYPE_KPROBE &&
799 attr->kern_version != LINUX_VERSION_CODE)
802 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
805 /* plain bpf_prog allocation */
806 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
810 err = bpf_prog_charge_memlock(prog);
812 goto free_prog_nouncharge;
814 prog->len = attr->insn_cnt;
817 if (copy_from_user(prog->insns, u64_to_user_ptr(attr->insns),
818 bpf_prog_insn_size(prog)) != 0)
821 prog->orig_prog = NULL;
824 atomic_set(&prog->aux->refcnt, 1);
825 prog->gpl_compatible = is_gpl ? 1 : 0;
827 /* find program type: socket_filter vs tracing_filter */
828 err = find_prog_type(type, prog);
832 /* run eBPF verifier */
833 err = bpf_check(&prog, attr);
837 /* eBPF program is ready to be JITed */
838 prog = bpf_prog_select_runtime(prog, &err);
842 err = bpf_prog_new_fd(prog);
844 /* failed to allocate fd */
847 bpf_prog_kallsyms_add(prog);
848 trace_bpf_prog_load(prog, err);
852 free_used_maps(prog->aux);
854 bpf_prog_uncharge_memlock(prog);
855 free_prog_nouncharge:
860 #define BPF_OBJ_LAST_FIELD bpf_fd
862 static int bpf_obj_pin(const union bpf_attr *attr)
864 if (CHECK_ATTR(BPF_OBJ))
867 return bpf_obj_pin_user(attr->bpf_fd, u64_to_user_ptr(attr->pathname));
870 static int bpf_obj_get(const union bpf_attr *attr)
872 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
875 return bpf_obj_get_user(u64_to_user_ptr(attr->pathname));
878 #ifdef CONFIG_CGROUP_BPF
880 #define BPF_PROG_ATTACH_LAST_FIELD attach_flags
882 static int bpf_prog_attach(const union bpf_attr *attr)
884 enum bpf_prog_type ptype;
885 struct bpf_prog *prog;
889 if (!capable(CAP_NET_ADMIN))
892 if (CHECK_ATTR(BPF_PROG_ATTACH))
895 if (attr->attach_flags & ~BPF_F_ALLOW_OVERRIDE)
898 switch (attr->attach_type) {
899 case BPF_CGROUP_INET_INGRESS:
900 case BPF_CGROUP_INET_EGRESS:
901 ptype = BPF_PROG_TYPE_CGROUP_SKB;
903 case BPF_CGROUP_INET_SOCK_CREATE:
904 ptype = BPF_PROG_TYPE_CGROUP_SOCK;
910 prog = bpf_prog_get_type(attr->attach_bpf_fd, ptype);
912 return PTR_ERR(prog);
914 cgrp = cgroup_get_from_fd(attr->target_fd);
917 return PTR_ERR(cgrp);
920 ret = cgroup_bpf_update(cgrp, prog, attr->attach_type,
921 attr->attach_flags & BPF_F_ALLOW_OVERRIDE);
929 #define BPF_PROG_DETACH_LAST_FIELD attach_type
931 static int bpf_prog_detach(const union bpf_attr *attr)
936 if (!capable(CAP_NET_ADMIN))
939 if (CHECK_ATTR(BPF_PROG_DETACH))
942 switch (attr->attach_type) {
943 case BPF_CGROUP_INET_INGRESS:
944 case BPF_CGROUP_INET_EGRESS:
945 case BPF_CGROUP_INET_SOCK_CREATE:
946 cgrp = cgroup_get_from_fd(attr->target_fd);
948 return PTR_ERR(cgrp);
950 ret = cgroup_bpf_update(cgrp, NULL, attr->attach_type, false);
960 #endif /* CONFIG_CGROUP_BPF */
962 #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
964 static int bpf_prog_test_run(const union bpf_attr *attr,
965 union bpf_attr __user *uattr)
967 struct bpf_prog *prog;
970 if (CHECK_ATTR(BPF_PROG_TEST_RUN))
973 prog = bpf_prog_get(attr->test.prog_fd);
975 return PTR_ERR(prog);
977 if (prog->aux->ops->test_run)
978 ret = prog->aux->ops->test_run(prog, attr, uattr);
984 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
986 union bpf_attr attr = {};
989 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
992 if (!access_ok(VERIFY_READ, uattr, 1))
995 if (size > PAGE_SIZE) /* silly large */
998 /* If we're handed a bigger struct than we know of,
999 * ensure all the unknown bits are 0 - i.e. new
1000 * user-space does not rely on any kernel feature
1001 * extensions we dont know about yet.
1003 if (size > sizeof(attr)) {
1004 unsigned char __user *addr;
1005 unsigned char __user *end;
1008 addr = (void __user *)uattr + sizeof(attr);
1009 end = (void __user *)uattr + size;
1011 for (; addr < end; addr++) {
1012 err = get_user(val, addr);
1018 size = sizeof(attr);
1021 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
1022 if (copy_from_user(&attr, uattr, size) != 0)
1026 case BPF_MAP_CREATE:
1027 err = map_create(&attr);
1029 case BPF_MAP_LOOKUP_ELEM:
1030 err = map_lookup_elem(&attr);
1032 case BPF_MAP_UPDATE_ELEM:
1033 err = map_update_elem(&attr);
1035 case BPF_MAP_DELETE_ELEM:
1036 err = map_delete_elem(&attr);
1038 case BPF_MAP_GET_NEXT_KEY:
1039 err = map_get_next_key(&attr);
1042 err = bpf_prog_load(&attr);
1045 err = bpf_obj_pin(&attr);
1048 err = bpf_obj_get(&attr);
1050 #ifdef CONFIG_CGROUP_BPF
1051 case BPF_PROG_ATTACH:
1052 err = bpf_prog_attach(&attr);
1054 case BPF_PROG_DETACH:
1055 err = bpf_prog_detach(&attr);
1058 case BPF_PROG_TEST_RUN:
1059 err = bpf_prog_test_run(&attr, uattr);