1 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * This program is free software; you can redistribute it and/or
4 * modify it under the terms of version 2 of the GNU General Public
5 * License as published by the Free Software Foundation.
7 * This program is distributed in the hope that it will be useful, but
8 * WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
10 * General Public License for more details.
12 #include <linux/bpf.h>
13 #include <linux/syscalls.h>
14 #include <linux/slab.h>
15 #include <linux/anon_inodes.h>
16 #include <linux/file.h>
17 #include <linux/license.h>
18 #include <linux/filter.h>
19 #include <linux/version.h>
21 DEFINE_PER_CPU(int, bpf_prog_active);
23 int sysctl_unprivileged_bpf_disabled __read_mostly;
25 static LIST_HEAD(bpf_map_types);
27 static struct bpf_map *find_and_alloc_map(union bpf_attr *attr)
29 struct bpf_map_type_list *tl;
32 list_for_each_entry(tl, &bpf_map_types, list_node) {
33 if (tl->type == attr->map_type) {
34 map = tl->ops->map_alloc(attr);
38 map->map_type = attr->map_type;
42 return ERR_PTR(-EINVAL);
45 /* boot time registration of different map implementations */
46 void bpf_register_map_type(struct bpf_map_type_list *tl)
48 list_add(&tl->list_node, &bpf_map_types);
51 int bpf_map_precharge_memlock(u32 pages)
53 struct user_struct *user = get_current_user();
54 unsigned long memlock_limit, cur;
56 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
57 cur = atomic_long_read(&user->locked_vm);
59 if (cur + pages > memlock_limit)
64 static int bpf_map_charge_memlock(struct bpf_map *map)
66 struct user_struct *user = get_current_user();
67 unsigned long memlock_limit;
69 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
71 atomic_long_add(map->pages, &user->locked_vm);
73 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
74 atomic_long_sub(map->pages, &user->locked_vm);
82 static void bpf_map_uncharge_memlock(struct bpf_map *map)
84 struct user_struct *user = map->user;
86 atomic_long_sub(map->pages, &user->locked_vm);
90 /* called from workqueue */
91 static void bpf_map_free_deferred(struct work_struct *work)
93 struct bpf_map *map = container_of(work, struct bpf_map, work);
95 bpf_map_uncharge_memlock(map);
96 /* implementation dependent freeing */
97 map->ops->map_free(map);
100 static void bpf_map_put_uref(struct bpf_map *map)
102 if (atomic_dec_and_test(&map->usercnt)) {
103 if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY)
104 bpf_fd_array_map_clear(map);
108 /* decrement map refcnt and schedule it for freeing via workqueue
109 * (unrelying map implementation ops->map_free() might sleep)
111 void bpf_map_put(struct bpf_map *map)
113 if (atomic_dec_and_test(&map->refcnt)) {
114 INIT_WORK(&map->work, bpf_map_free_deferred);
115 schedule_work(&map->work);
119 void bpf_map_put_with_uref(struct bpf_map *map)
121 bpf_map_put_uref(map);
125 static int bpf_map_release(struct inode *inode, struct file *filp)
127 struct bpf_map *map = filp->private_data;
129 if (map->ops->map_release)
130 map->ops->map_release(map, filp);
132 bpf_map_put_with_uref(map);
136 #ifdef CONFIG_PROC_FS
137 static void bpf_map_show_fdinfo(struct seq_file *m, struct file *filp)
139 const struct bpf_map *map = filp->private_data;
155 static const struct file_operations bpf_map_fops = {
156 #ifdef CONFIG_PROC_FS
157 .show_fdinfo = bpf_map_show_fdinfo,
159 .release = bpf_map_release,
162 int bpf_map_new_fd(struct bpf_map *map)
164 return anon_inode_getfd("bpf-map", &bpf_map_fops, map,
168 /* helper macro to check that unused fields 'union bpf_attr' are zero */
169 #define CHECK_ATTR(CMD) \
170 memchr_inv((void *) &attr->CMD##_LAST_FIELD + \
171 sizeof(attr->CMD##_LAST_FIELD), 0, \
173 offsetof(union bpf_attr, CMD##_LAST_FIELD) - \
174 sizeof(attr->CMD##_LAST_FIELD)) != NULL
176 #define BPF_MAP_CREATE_LAST_FIELD map_flags
177 /* called via syscall */
178 static int map_create(union bpf_attr *attr)
183 err = CHECK_ATTR(BPF_MAP_CREATE);
187 /* find map type and init map: hashtable vs rbtree vs bloom vs ... */
188 map = find_and_alloc_map(attr);
192 atomic_set(&map->refcnt, 1);
193 atomic_set(&map->usercnt, 1);
195 err = bpf_map_charge_memlock(map);
199 err = bpf_map_new_fd(map);
201 /* failed to allocate fd */
207 map->ops->map_free(map);
211 /* if error is returned, fd is released.
212 * On success caller should complete fd access with matching fdput()
214 struct bpf_map *__bpf_map_get(struct fd f)
217 return ERR_PTR(-EBADF);
218 if (f.file->f_op != &bpf_map_fops) {
220 return ERR_PTR(-EINVAL);
223 return f.file->private_data;
226 /* prog's and map's refcnt limit */
227 #define BPF_MAX_REFCNT 32768
229 struct bpf_map *bpf_map_inc(struct bpf_map *map, bool uref)
231 if (atomic_inc_return(&map->refcnt) > BPF_MAX_REFCNT) {
232 atomic_dec(&map->refcnt);
233 return ERR_PTR(-EBUSY);
236 atomic_inc(&map->usercnt);
240 struct bpf_map *bpf_map_get_with_uref(u32 ufd)
242 struct fd f = fdget(ufd);
245 map = __bpf_map_get(f);
249 map = bpf_map_inc(map, true);
255 /* helper to convert user pointers passed inside __aligned_u64 fields */
256 static void __user *u64_to_ptr(__u64 val)
258 return (void __user *) (unsigned long) val;
261 int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
266 /* last field in 'union bpf_attr' used by this command */
267 #define BPF_MAP_LOOKUP_ELEM_LAST_FIELD value
269 static int map_lookup_elem(union bpf_attr *attr)
271 void __user *ukey = u64_to_ptr(attr->key);
272 void __user *uvalue = u64_to_ptr(attr->value);
273 int ufd = attr->map_fd;
275 void *key, *value, *ptr;
280 if (CHECK_ATTR(BPF_MAP_LOOKUP_ELEM))
284 map = __bpf_map_get(f);
289 key = kmalloc(map->key_size, GFP_USER);
294 if (copy_from_user(key, ukey, map->key_size) != 0)
297 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
298 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
299 value_size = round_up(map->value_size, 8) * num_possible_cpus();
301 value_size = map->value_size;
304 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
308 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
309 err = bpf_percpu_hash_copy(map, key, value);
310 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
311 err = bpf_percpu_array_copy(map, key, value);
312 } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
313 err = bpf_stackmap_copy(map, key, value);
316 ptr = map->ops->map_lookup_elem(map, key);
318 memcpy(value, ptr, value_size);
320 err = ptr ? 0 : -ENOENT;
327 if (copy_to_user(uvalue, value, value_size) != 0)
341 #define BPF_MAP_UPDATE_ELEM_LAST_FIELD flags
343 static int map_update_elem(union bpf_attr *attr)
345 void __user *ukey = u64_to_ptr(attr->key);
346 void __user *uvalue = u64_to_ptr(attr->value);
347 int ufd = attr->map_fd;
354 if (CHECK_ATTR(BPF_MAP_UPDATE_ELEM))
358 map = __bpf_map_get(f);
363 key = kmalloc(map->key_size, GFP_USER);
368 if (copy_from_user(key, ukey, map->key_size) != 0)
371 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
372 map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
373 value_size = round_up(map->value_size, 8) * num_possible_cpus();
375 value_size = map->value_size;
378 value = kmalloc(value_size, GFP_USER | __GFP_NOWARN);
383 if (copy_from_user(value, uvalue, value_size) != 0)
386 /* must increment bpf_prog_active to avoid kprobe+bpf triggering from
387 * inside bpf map update or delete otherwise deadlocks are possible
390 __this_cpu_inc(bpf_prog_active);
391 if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH) {
392 err = bpf_percpu_hash_update(map, key, value, attr->flags);
393 } else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
394 err = bpf_percpu_array_update(map, key, value, attr->flags);
395 } else if (map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY ||
396 map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
398 err = bpf_fd_array_map_update_elem(map, f.file, key, value,
403 err = map->ops->map_update_elem(map, key, value, attr->flags);
406 __this_cpu_dec(bpf_prog_active);
418 #define BPF_MAP_DELETE_ELEM_LAST_FIELD key
420 static int map_delete_elem(union bpf_attr *attr)
422 void __user *ukey = u64_to_ptr(attr->key);
423 int ufd = attr->map_fd;
429 if (CHECK_ATTR(BPF_MAP_DELETE_ELEM))
433 map = __bpf_map_get(f);
438 key = kmalloc(map->key_size, GFP_USER);
443 if (copy_from_user(key, ukey, map->key_size) != 0)
447 __this_cpu_inc(bpf_prog_active);
449 err = map->ops->map_delete_elem(map, key);
451 __this_cpu_dec(bpf_prog_active);
461 /* last field in 'union bpf_attr' used by this command */
462 #define BPF_MAP_GET_NEXT_KEY_LAST_FIELD next_key
464 static int map_get_next_key(union bpf_attr *attr)
466 void __user *ukey = u64_to_ptr(attr->key);
467 void __user *unext_key = u64_to_ptr(attr->next_key);
468 int ufd = attr->map_fd;
470 void *key, *next_key;
474 if (CHECK_ATTR(BPF_MAP_GET_NEXT_KEY))
478 map = __bpf_map_get(f);
483 key = kmalloc(map->key_size, GFP_USER);
488 if (copy_from_user(key, ukey, map->key_size) != 0)
492 next_key = kmalloc(map->key_size, GFP_USER);
497 err = map->ops->map_get_next_key(map, key, next_key);
503 if (copy_to_user(unext_key, next_key, map->key_size) != 0)
517 static LIST_HEAD(bpf_prog_types);
519 static int find_prog_type(enum bpf_prog_type type, struct bpf_prog *prog)
521 struct bpf_prog_type_list *tl;
523 list_for_each_entry(tl, &bpf_prog_types, list_node) {
524 if (tl->type == type) {
525 prog->aux->ops = tl->ops;
534 void bpf_register_prog_type(struct bpf_prog_type_list *tl)
536 list_add(&tl->list_node, &bpf_prog_types);
539 /* fixup insn->imm field of bpf_call instructions:
540 * if (insn->imm == BPF_FUNC_map_lookup_elem)
541 * insn->imm = bpf_map_lookup_elem - __bpf_call_base;
542 * else if (insn->imm == BPF_FUNC_map_update_elem)
543 * insn->imm = bpf_map_update_elem - __bpf_call_base;
546 * this function is called after eBPF program passed verification
548 static void fixup_bpf_calls(struct bpf_prog *prog)
550 const struct bpf_func_proto *fn;
553 for (i = 0; i < prog->len; i++) {
554 struct bpf_insn *insn = &prog->insnsi[i];
556 if (insn->code == (BPF_JMP | BPF_CALL)) {
557 /* we reach here when program has bpf_call instructions
558 * and it passed bpf_check(), means that
559 * ops->get_func_proto must have been supplied, check it
561 BUG_ON(!prog->aux->ops->get_func_proto);
563 if (insn->imm == BPF_FUNC_get_route_realm)
564 prog->dst_needed = 1;
565 if (insn->imm == BPF_FUNC_get_prandom_u32)
566 bpf_user_rnd_init_once();
567 if (insn->imm == BPF_FUNC_tail_call) {
568 /* mark bpf_tail_call as different opcode
569 * to avoid conditional branch in
570 * interpeter for every normal call
571 * and to prevent accidental JITing by
572 * JIT compiler that doesn't support
580 fn = prog->aux->ops->get_func_proto(insn->imm);
581 /* all functions that have prototype and verifier allowed
582 * programs to call them, must be real in-kernel functions
585 insn->imm = fn->func - __bpf_call_base;
590 /* drop refcnt on maps used by eBPF program and free auxilary data */
591 static void free_used_maps(struct bpf_prog_aux *aux)
595 for (i = 0; i < aux->used_map_cnt; i++)
596 bpf_map_put(aux->used_maps[i]);
598 kfree(aux->used_maps);
601 static int bpf_prog_charge_memlock(struct bpf_prog *prog)
603 struct user_struct *user = get_current_user();
604 unsigned long memlock_limit;
606 memlock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
608 atomic_long_add(prog->pages, &user->locked_vm);
609 if (atomic_long_read(&user->locked_vm) > memlock_limit) {
610 atomic_long_sub(prog->pages, &user->locked_vm);
614 prog->aux->user = user;
618 static void bpf_prog_uncharge_memlock(struct bpf_prog *prog)
620 struct user_struct *user = prog->aux->user;
622 atomic_long_sub(prog->pages, &user->locked_vm);
626 static void __prog_put_common(struct rcu_head *rcu)
628 struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
631 bpf_prog_uncharge_memlock(aux->prog);
632 bpf_prog_free(aux->prog);
635 /* version of bpf_prog_put() that is called after a grace period */
636 void bpf_prog_put_rcu(struct bpf_prog *prog)
638 if (atomic_dec_and_test(&prog->aux->refcnt))
639 call_rcu(&prog->aux->rcu, __prog_put_common);
642 void bpf_prog_put(struct bpf_prog *prog)
644 if (atomic_dec_and_test(&prog->aux->refcnt))
645 __prog_put_common(&prog->aux->rcu);
647 EXPORT_SYMBOL_GPL(bpf_prog_put);
649 static int bpf_prog_release(struct inode *inode, struct file *filp)
651 struct bpf_prog *prog = filp->private_data;
653 bpf_prog_put_rcu(prog);
657 static const struct file_operations bpf_prog_fops = {
658 .release = bpf_prog_release,
661 int bpf_prog_new_fd(struct bpf_prog *prog)
663 return anon_inode_getfd("bpf-prog", &bpf_prog_fops, prog,
667 static struct bpf_prog *__bpf_prog_get(struct fd f)
670 return ERR_PTR(-EBADF);
671 if (f.file->f_op != &bpf_prog_fops) {
673 return ERR_PTR(-EINVAL);
676 return f.file->private_data;
679 struct bpf_prog *bpf_prog_inc(struct bpf_prog *prog)
681 if (atomic_inc_return(&prog->aux->refcnt) > BPF_MAX_REFCNT) {
682 atomic_dec(&prog->aux->refcnt);
683 return ERR_PTR(-EBUSY);
688 /* called by sockets/tracing/seccomp before attaching program to an event
689 * pairs with bpf_prog_put()
691 struct bpf_prog *bpf_prog_get(u32 ufd)
693 struct fd f = fdget(ufd);
694 struct bpf_prog *prog;
696 prog = __bpf_prog_get(f);
700 prog = bpf_prog_inc(prog);
705 EXPORT_SYMBOL_GPL(bpf_prog_get);
707 /* last field in 'union bpf_attr' used by this command */
708 #define BPF_PROG_LOAD_LAST_FIELD kern_version
710 static int bpf_prog_load(union bpf_attr *attr)
712 enum bpf_prog_type type = attr->prog_type;
713 struct bpf_prog *prog;
718 if (CHECK_ATTR(BPF_PROG_LOAD))
721 /* copy eBPF program license from user space */
722 if (strncpy_from_user(license, u64_to_ptr(attr->license),
723 sizeof(license) - 1) < 0)
725 license[sizeof(license) - 1] = 0;
727 /* eBPF programs must be GPL compatible to use GPL-ed functions */
728 is_gpl = license_is_gpl_compatible(license);
730 if (attr->insn_cnt >= BPF_MAXINSNS)
733 if (type == BPF_PROG_TYPE_KPROBE &&
734 attr->kern_version != LINUX_VERSION_CODE)
737 if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
740 /* plain bpf_prog allocation */
741 prog = bpf_prog_alloc(bpf_prog_size(attr->insn_cnt), GFP_USER);
745 err = bpf_prog_charge_memlock(prog);
747 goto free_prog_nouncharge;
749 prog->len = attr->insn_cnt;
752 if (copy_from_user(prog->insns, u64_to_ptr(attr->insns),
753 prog->len * sizeof(struct bpf_insn)) != 0)
756 prog->orig_prog = NULL;
759 atomic_set(&prog->aux->refcnt, 1);
760 prog->gpl_compatible = is_gpl ? 1 : 0;
762 /* find program type: socket_filter vs tracing_filter */
763 err = find_prog_type(type, prog);
767 /* run eBPF verifier */
768 err = bpf_check(&prog, attr);
772 /* fixup BPF_CALL->imm field */
773 fixup_bpf_calls(prog);
775 /* eBPF program is ready to be JITed */
776 prog = bpf_prog_select_runtime(prog, &err);
780 err = bpf_prog_new_fd(prog);
782 /* failed to allocate fd */
788 free_used_maps(prog->aux);
790 bpf_prog_uncharge_memlock(prog);
791 free_prog_nouncharge:
796 #define BPF_OBJ_LAST_FIELD bpf_fd
798 static int bpf_obj_pin(const union bpf_attr *attr)
800 if (CHECK_ATTR(BPF_OBJ))
803 return bpf_obj_pin_user(attr->bpf_fd, u64_to_ptr(attr->pathname));
806 static int bpf_obj_get(const union bpf_attr *attr)
808 if (CHECK_ATTR(BPF_OBJ) || attr->bpf_fd != 0)
811 return bpf_obj_get_user(u64_to_ptr(attr->pathname));
814 SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
816 union bpf_attr attr = {};
819 if (!capable(CAP_SYS_ADMIN) && sysctl_unprivileged_bpf_disabled)
822 if (!access_ok(VERIFY_READ, uattr, 1))
825 if (size > PAGE_SIZE) /* silly large */
828 /* If we're handed a bigger struct than we know of,
829 * ensure all the unknown bits are 0 - i.e. new
830 * user-space does not rely on any kernel feature
831 * extensions we dont know about yet.
833 if (size > sizeof(attr)) {
834 unsigned char __user *addr;
835 unsigned char __user *end;
838 addr = (void __user *)uattr + sizeof(attr);
839 end = (void __user *)uattr + size;
841 for (; addr < end; addr++) {
842 err = get_user(val, addr);
851 /* copy attributes from user space, may be less than sizeof(bpf_attr) */
852 if (copy_from_user(&attr, uattr, size) != 0)
857 err = map_create(&attr);
859 case BPF_MAP_LOOKUP_ELEM:
860 err = map_lookup_elem(&attr);
862 case BPF_MAP_UPDATE_ELEM:
863 err = map_update_elem(&attr);
865 case BPF_MAP_DELETE_ELEM:
866 err = map_delete_elem(&attr);
868 case BPF_MAP_GET_NEXT_KEY:
869 err = map_get_next_key(&attr);
872 err = bpf_prog_load(&attr);
875 err = bpf_obj_pin(&attr);
878 err = bpf_obj_get(&attr);