]> Git Repo - linux.git/commitdiff
Merge branch 'work.memdup_user' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <[email protected]>
Wed, 5 Jul 2017 23:05:24 +0000 (16:05 -0700)
committerLinus Torvalds <[email protected]>
Wed, 5 Jul 2017 23:05:24 +0000 (16:05 -0700)
Pull memdup_user() conversions from Al Viro:
 "A fairly self-contained series - hunting down open-coded memdup_user()
  and memdup_user_nul() instances"

* 'work.memdup_user' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs:
  bpf: don't open-code memdup_user()
  kimage_file_prepare_segments(): don't open-code memdup_user()
  ethtool: don't open-code memdup_user()
  do_ip_setsockopt(): don't open-code memdup_user()
  do_ipv6_setsockopt(): don't open-code memdup_user()
  irda: don't open-code memdup_user()
  xfrm_user_policy(): don't open-code memdup_user()
  ima_write_policy(): don't open-code memdup_user_nul()
  sel_write_validatetrans(): don't open-code memdup_user_nul()

1  2 
kernel/bpf/syscall.c
net/ipv6/ipv6_sockglue.c
security/selinux/selinuxfs.c

diff --combined kernel/bpf/syscall.c
index 18980472f5b06d1cda703e56efacd67a37640d3c,4b8b10bddfdeeb24d78fe18bfbfb8304b8c00d9d..045646da97cc5dc55a5063301a2e76e4178a8ea3
  #include <linux/filter.h>
  #include <linux/version.h>
  #include <linux/kernel.h>
 +#include <linux/idr.h>
 +
 +#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PROG_ARRAY || \
 +                         (map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
 +                         (map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
 +                         (map)->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS)
 +#define IS_FD_HASH(map) ((map)->map_type == BPF_MAP_TYPE_HASH_OF_MAPS)
 +#define IS_FD_MAP(map) (IS_FD_ARRAY(map) || IS_FD_HASH(map))
  
  DEFINE_PER_CPU(int, bpf_prog_active);
 +static DEFINE_IDR(prog_idr);
 +static DEFINE_SPINLOCK(prog_idr_lock);
 +static DEFINE_IDR(map_idr);
 +static DEFINE_SPINLOCK(map_idr_lock);
  
  int sysctl_unprivileged_bpf_disabled __read_mostly;
  
@@@ -126,37 -114,6 +126,37 @@@ static void bpf_map_uncharge_memlock(st
        free_uid(user);
  }
  
 +static int bpf_map_alloc_id(struct bpf_map *map)
 +{
 +      int id;
 +
 +      spin_lock_bh(&map_idr_lock);
 +      id = idr_alloc_cyclic(&map_idr, map, 1, INT_MAX, GFP_ATOMIC);
 +      if (id > 0)
 +              map->id = id;
 +      spin_unlock_bh(&map_idr_lock);
 +
 +      if (WARN_ON_ONCE(!id))
 +              return -ENOSPC;
 +
 +      return id > 0 ? 0 : id;
 +}
 +
 +static void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock)
 +{
 +      if (do_idr_lock)
 +              spin_lock_bh(&map_idr_lock);
 +      else
 +              __acquire(&map_idr_lock);
 +
 +      idr_remove(&map_idr, map->id);
 +
 +      if (do_idr_lock)
 +              spin_unlock_bh(&map_idr_lock);
 +      else
 +              __release(&map_idr_lock);
 +}
 +
  /* called from workqueue */
  static void bpf_map_free_deferred(struct work_struct *work)
  {
@@@ -178,21 -135,14 +178,21 @@@ static void bpf_map_put_uref(struct bpf
  /* decrement map refcnt and schedule it for freeing via workqueue
   * (unrelying map implementation ops->map_free() might sleep)
   */
 -void bpf_map_put(struct bpf_map *map)
 +static void __bpf_map_put(struct bpf_map *map, bool do_idr_lock)
  {
        if (atomic_dec_and_test(&map->refcnt)) {
 +              /* bpf_map_free_id() must be called first */
 +              bpf_map_free_id(map, do_idr_lock);
                INIT_WORK(&map->work, bpf_map_free_deferred);
                schedule_work(&map->work);
        }
  }
  
 +void bpf_map_put(struct bpf_map *map)
 +{
 +      __bpf_map_put(map, true);
 +}
 +
  void bpf_map_put_with_uref(struct bpf_map *map)
  {
        bpf_map_put_uref(map);
@@@ -216,12 -166,10 +216,12 @@@ static void bpf_map_show_fdinfo(struct 
        const struct bpf_map *map = filp->private_data;
        const struct bpf_array *array;
        u32 owner_prog_type = 0;
 +      u32 owner_jited = 0;
  
        if (map->map_type == BPF_MAP_TYPE_PROG_ARRAY) {
                array = container_of(map, struct bpf_array, map);
                owner_prog_type = array->owner_prog_type;
 +              owner_jited = array->owner_jited;
        }
  
        seq_printf(m,
                   map->map_flags,
                   map->pages * 1ULL << PAGE_SHIFT);
  
 -      if (owner_prog_type)
 +      if (owner_prog_type) {
                seq_printf(m, "owner_prog_type:\t%u\n",
                           owner_prog_type);
 +              seq_printf(m, "owner_jited:\t%u\n",
 +                         owner_jited);
 +      }
  }
  #endif
  
@@@ -291,22 -236,11 +291,22 @@@ static int map_create(union bpf_attr *a
        if (err)
                goto free_map_nouncharge;
  
 -      err = bpf_map_new_fd(map);
 -      if (err < 0)
 -              /* failed to allocate fd */
 +      err = bpf_map_alloc_id(map);
 +      if (err)
                goto free_map;
  
 +      err = bpf_map_new_fd(map);
 +      if (err < 0) {
 +              /* failed to allocate fd.
 +               * bpf_map_put() is needed because the above
 +               * bpf_map_alloc_id() has published the map
 +               * to the userspace and the userspace may
 +               * have refcnt-ed it through BPF_MAP_GET_FD_BY_ID.
 +               */
 +              bpf_map_put(map);
 +              return err;
 +      }
 +
        trace_bpf_map_create(map, err);
        return err;
  
@@@ -361,28 -295,6 +361,28 @@@ struct bpf_map *bpf_map_get_with_uref(u
        return map;
  }
  
 +/* map_idr_lock should have been held */
 +static struct bpf_map *bpf_map_inc_not_zero(struct bpf_map *map,
 +                                          bool uref)
 +{
 +      int refold;
 +
 +      refold = __atomic_add_unless(&map->refcnt, 1, 0);
 +
 +      if (refold >= BPF_MAX_REFCNT) {
 +              __bpf_map_put(map, false);
 +              return ERR_PTR(-EBUSY);
 +      }
 +
 +      if (!refold)
 +              return ERR_PTR(-ENOENT);
 +
 +      if (uref)
 +              atomic_inc(&map->usercnt);
 +
 +      return map;
 +}
 +
  int __weak bpf_stackmap_copy(struct bpf_map *map, void *key, void *value)
  {
        return -ENOTSUPP;
@@@ -410,21 -322,16 +410,18 @@@ static int map_lookup_elem(union bpf_at
        if (IS_ERR(map))
                return PTR_ERR(map);
  
-       err = -ENOMEM;
-       key = kmalloc(map->key_size, GFP_USER);
-       if (!key)
+       key = memdup_user(ukey, map->key_size);
+       if (IS_ERR(key)) {
+               err = PTR_ERR(key);
                goto err_put;
-       err = -EFAULT;
-       if (copy_from_user(key, ukey, map->key_size) != 0)
-               goto free_key;
+       }
  
        if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
            map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
            map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY)
                value_size = round_up(map->value_size, 8) * num_possible_cpus();
 +      else if (IS_FD_MAP(map))
 +              value_size = sizeof(u32);
        else
                value_size = map->value_size;
  
                err = bpf_percpu_array_copy(map, key, value);
        } else if (map->map_type == BPF_MAP_TYPE_STACK_TRACE) {
                err = bpf_stackmap_copy(map, key, value);
 -      } else if (map->map_type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
 -                 map->map_type == BPF_MAP_TYPE_HASH_OF_MAPS) {
 -              err = -ENOTSUPP;
 +      } else if (IS_FD_ARRAY(map)) {
 +              err = bpf_fd_array_map_lookup_elem(map, key, value);
 +      } else if (IS_FD_HASH(map)) {
 +              err = bpf_fd_htab_map_lookup_elem(map, key, value);
        } else {
                rcu_read_lock();
                ptr = map->ops->map_lookup_elem(map, key);
@@@ -493,14 -399,11 +490,11 @@@ static int map_update_elem(union bpf_at
        if (IS_ERR(map))
                return PTR_ERR(map);
  
-       err = -ENOMEM;
-       key = kmalloc(map->key_size, GFP_USER);
-       if (!key)
+       key = memdup_user(ukey, map->key_size);
+       if (IS_ERR(key)) {
+               err = PTR_ERR(key);
                goto err_put;
-       err = -EFAULT;
-       if (copy_from_user(key, ukey, map->key_size) != 0)
-               goto free_key;
+       }
  
        if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
            map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH ||
@@@ -579,14 -482,11 +573,11 @@@ static int map_delete_elem(union bpf_at
        if (IS_ERR(map))
                return PTR_ERR(map);
  
-       err = -ENOMEM;
-       key = kmalloc(map->key_size, GFP_USER);
-       if (!key)
+       key = memdup_user(ukey, map->key_size);
+       if (IS_ERR(key)) {
+               err = PTR_ERR(key);
                goto err_put;
-       err = -EFAULT;
-       if (copy_from_user(key, ukey, map->key_size) != 0)
-               goto free_key;
+       }
  
        preempt_disable();
        __this_cpu_inc(bpf_prog_active);
  
        if (!err)
                trace_bpf_map_delete_elem(map, ufd, key);
- free_key:
        kfree(key);
  err_put:
        fdput(f);
@@@ -627,14 -526,11 +617,11 @@@ static int map_get_next_key(union bpf_a
                return PTR_ERR(map);
  
        if (ukey) {
-               err = -ENOMEM;
-               key = kmalloc(map->key_size, GFP_USER);
-               if (!key)
+               key = memdup_user(ukey, map->key_size);
+               if (IS_ERR(key)) {
+                       err = PTR_ERR(key);
                        goto err_put;
-               err = -EFAULT;
-               if (copy_from_user(key, ukey, map->key_size) != 0)
-                       goto free_key;
+               }
        } else {
                key = NULL;
        }
@@@ -741,42 -637,6 +728,42 @@@ static void bpf_prog_uncharge_memlock(s
        free_uid(user);
  }
  
 +static int bpf_prog_alloc_id(struct bpf_prog *prog)
 +{
 +      int id;
 +
 +      spin_lock_bh(&prog_idr_lock);
 +      id = idr_alloc_cyclic(&prog_idr, prog, 1, INT_MAX, GFP_ATOMIC);
 +      if (id > 0)
 +              prog->aux->id = id;
 +      spin_unlock_bh(&prog_idr_lock);
 +
 +      /* id is in [1, INT_MAX) */
 +      if (WARN_ON_ONCE(!id))
 +              return -ENOSPC;
 +
 +      return id > 0 ? 0 : id;
 +}
 +
 +static void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock)
 +{
 +      /* cBPF to eBPF migrations are currently not in the idr store. */
 +      if (!prog->aux->id)
 +              return;
 +
 +      if (do_idr_lock)
 +              spin_lock_bh(&prog_idr_lock);
 +      else
 +              __acquire(&prog_idr_lock);
 +
 +      idr_remove(&prog_idr, prog->aux->id);
 +
 +      if (do_idr_lock)
 +              spin_unlock_bh(&prog_idr_lock);
 +      else
 +              __release(&prog_idr_lock);
 +}
 +
  static void __bpf_prog_put_rcu(struct rcu_head *rcu)
  {
        struct bpf_prog_aux *aux = container_of(rcu, struct bpf_prog_aux, rcu);
        bpf_prog_free(aux->prog);
  }
  
 -void bpf_prog_put(struct bpf_prog *prog)
 +static void __bpf_prog_put(struct bpf_prog *prog, bool do_idr_lock)
  {
        if (atomic_dec_and_test(&prog->aux->refcnt)) {
                trace_bpf_prog_put_rcu(prog);
 +              /* bpf_prog_free_id() must be called first */
 +              bpf_prog_free_id(prog, do_idr_lock);
                bpf_prog_kallsyms_del(prog);
                call_rcu(&prog->aux->rcu, __bpf_prog_put_rcu);
        }
  }
 +
 +void bpf_prog_put(struct bpf_prog *prog)
 +{
 +      __bpf_prog_put(prog, true);
 +}
  EXPORT_SYMBOL_GPL(bpf_prog_put);
  
  static int bpf_prog_release(struct inode *inode, struct file *filp)
@@@ -882,24 -735,6 +869,24 @@@ struct bpf_prog *bpf_prog_inc(struct bp
  }
  EXPORT_SYMBOL_GPL(bpf_prog_inc);
  
 +/* prog_idr_lock should have been held */
 +static struct bpf_prog *bpf_prog_inc_not_zero(struct bpf_prog *prog)
 +{
 +      int refold;
 +
 +      refold = __atomic_add_unless(&prog->aux->refcnt, 1, 0);
 +
 +      if (refold >= BPF_MAX_REFCNT) {
 +              __bpf_prog_put(prog, false);
 +              return ERR_PTR(-EBUSY);
 +      }
 +
 +      if (!refold)
 +              return ERR_PTR(-ENOENT);
 +
 +      return prog;
 +}
 +
  static struct bpf_prog *__bpf_prog_get(u32 ufd, enum bpf_prog_type *type)
  {
        struct fd f = fdget(ufd);
@@@ -935,7 -770,7 +922,7 @@@ struct bpf_prog *bpf_prog_get_type(u32 
  EXPORT_SYMBOL_GPL(bpf_prog_get_type);
  
  /* last field in 'union bpf_attr' used by this command */
 -#define       BPF_PROG_LOAD_LAST_FIELD kern_version
 +#define       BPF_PROG_LOAD_LAST_FIELD prog_flags
  
  static int bpf_prog_load(union bpf_attr *attr)
  {
        if (CHECK_ATTR(BPF_PROG_LOAD))
                return -EINVAL;
  
 +      if (attr->prog_flags & ~BPF_F_STRICT_ALIGNMENT)
 +              return -EINVAL;
 +
        /* copy eBPF program license from user space */
        if (strncpy_from_user(license, u64_to_user_ptr(attr->license),
                              sizeof(license) - 1) < 0)
            attr->kern_version != LINUX_VERSION_CODE)
                return -EINVAL;
  
 -      if (type != BPF_PROG_TYPE_SOCKET_FILTER && !capable(CAP_SYS_ADMIN))
 +      if (type != BPF_PROG_TYPE_SOCKET_FILTER &&
 +          type != BPF_PROG_TYPE_CGROUP_SKB &&
 +          !capable(CAP_SYS_ADMIN))
                return -EPERM;
  
        /* plain bpf_prog allocation */
        if (err < 0)
                goto free_used_maps;
  
 -      err = bpf_prog_new_fd(prog);
 -      if (err < 0)
 -              /* failed to allocate fd */
 +      err = bpf_prog_alloc_id(prog);
 +      if (err)
                goto free_used_maps;
  
 +      err = bpf_prog_new_fd(prog);
 +      if (err < 0) {
 +              /* failed to allocate fd.
 +               * bpf_prog_put() is needed because the above
 +               * bpf_prog_alloc_id() has published the prog
 +               * to the userspace and the userspace may
 +               * have refcnt-ed it through BPF_PROG_GET_FD_BY_ID.
 +               */
 +              bpf_prog_put(prog);
 +              return err;
 +      }
 +
        bpf_prog_kallsyms_add(prog);
        trace_bpf_prog_load(prog, err);
        return err;
@@@ -1084,9 -903,6 +1071,9 @@@ static int bpf_prog_attach(const union 
        case BPF_CGROUP_INET_SOCK_CREATE:
                ptype = BPF_PROG_TYPE_CGROUP_SOCK;
                break;
 +      case BPF_CGROUP_SOCK_OPS:
 +              ptype = BPF_PROG_TYPE_SOCK_OPS;
 +              break;
        default:
                return -EINVAL;
        }
@@@ -1127,7 -943,6 +1114,7 @@@ static int bpf_prog_detach(const union 
        case BPF_CGROUP_INET_INGRESS:
        case BPF_CGROUP_INET_EGRESS:
        case BPF_CGROUP_INET_SOCK_CREATE:
 +      case BPF_CGROUP_SOCK_OPS:
                cgrp = cgroup_get_from_fd(attr->target_fd);
                if (IS_ERR(cgrp))
                        return PTR_ERR(cgrp);
  
        return ret;
  }
 +
  #endif /* CONFIG_CGROUP_BPF */
  
  #define BPF_PROG_TEST_RUN_LAST_FIELD test.duration
@@@ -1167,237 -981,6 +1154,237 @@@ static int bpf_prog_test_run(const unio
        return ret;
  }
  
 +#define BPF_OBJ_GET_NEXT_ID_LAST_FIELD next_id
 +
 +static int bpf_obj_get_next_id(const union bpf_attr *attr,
 +                             union bpf_attr __user *uattr,
 +                             struct idr *idr,
 +                             spinlock_t *lock)
 +{
 +      u32 next_id = attr->start_id;
 +      int err = 0;
 +
 +      if (CHECK_ATTR(BPF_OBJ_GET_NEXT_ID) || next_id >= INT_MAX)
 +              return -EINVAL;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
 +      next_id++;
 +      spin_lock_bh(lock);
 +      if (!idr_get_next(idr, &next_id))
 +              err = -ENOENT;
 +      spin_unlock_bh(lock);
 +
 +      if (!err)
 +              err = put_user(next_id, &uattr->next_id);
 +
 +      return err;
 +}
 +
 +#define BPF_PROG_GET_FD_BY_ID_LAST_FIELD prog_id
 +
 +static int bpf_prog_get_fd_by_id(const union bpf_attr *attr)
 +{
 +      struct bpf_prog *prog;
 +      u32 id = attr->prog_id;
 +      int fd;
 +
 +      if (CHECK_ATTR(BPF_PROG_GET_FD_BY_ID))
 +              return -EINVAL;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
 +      spin_lock_bh(&prog_idr_lock);
 +      prog = idr_find(&prog_idr, id);
 +      if (prog)
 +              prog = bpf_prog_inc_not_zero(prog);
 +      else
 +              prog = ERR_PTR(-ENOENT);
 +      spin_unlock_bh(&prog_idr_lock);
 +
 +      if (IS_ERR(prog))
 +              return PTR_ERR(prog);
 +
 +      fd = bpf_prog_new_fd(prog);
 +      if (fd < 0)
 +              bpf_prog_put(prog);
 +
 +      return fd;
 +}
 +
 +#define BPF_MAP_GET_FD_BY_ID_LAST_FIELD map_id
 +
 +static int bpf_map_get_fd_by_id(const union bpf_attr *attr)
 +{
 +      struct bpf_map *map;
 +      u32 id = attr->map_id;
 +      int fd;
 +
 +      if (CHECK_ATTR(BPF_MAP_GET_FD_BY_ID))
 +              return -EINVAL;
 +
 +      if (!capable(CAP_SYS_ADMIN))
 +              return -EPERM;
 +
 +      spin_lock_bh(&map_idr_lock);
 +      map = idr_find(&map_idr, id);
 +      if (map)
 +              map = bpf_map_inc_not_zero(map, true);
 +      else
 +              map = ERR_PTR(-ENOENT);
 +      spin_unlock_bh(&map_idr_lock);
 +
 +      if (IS_ERR(map))
 +              return PTR_ERR(map);
 +
 +      fd = bpf_map_new_fd(map);
 +      if (fd < 0)
 +              bpf_map_put(map);
 +
 +      return fd;
 +}
 +
 +static int check_uarg_tail_zero(void __user *uaddr,
 +                              size_t expected_size,
 +                              size_t actual_size)
 +{
 +      unsigned char __user *addr;
 +      unsigned char __user *end;
 +      unsigned char val;
 +      int err;
 +
 +      if (actual_size <= expected_size)
 +              return 0;
 +
 +      addr = uaddr + expected_size;
 +      end  = uaddr + actual_size;
 +
 +      for (; addr < end; addr++) {
 +              err = get_user(val, addr);
 +              if (err)
 +                      return err;
 +              if (val)
 +                      return -E2BIG;
 +      }
 +
 +      return 0;
 +}
 +
 +static int bpf_prog_get_info_by_fd(struct bpf_prog *prog,
 +                                 const union bpf_attr *attr,
 +                                 union bpf_attr __user *uattr)
 +{
 +      struct bpf_prog_info __user *uinfo = u64_to_user_ptr(attr->info.info);
 +      struct bpf_prog_info info = {};
 +      u32 info_len = attr->info.info_len;
 +      char __user *uinsns;
 +      u32 ulen;
 +      int err;
 +
 +      err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
 +      if (err)
 +              return err;
 +      info_len = min_t(u32, sizeof(info), info_len);
 +
 +      if (copy_from_user(&info, uinfo, info_len))
 +              return err;
 +
 +      info.type = prog->type;
 +      info.id = prog->aux->id;
 +
 +      memcpy(info.tag, prog->tag, sizeof(prog->tag));
 +
 +      if (!capable(CAP_SYS_ADMIN)) {
 +              info.jited_prog_len = 0;
 +              info.xlated_prog_len = 0;
 +              goto done;
 +      }
 +
 +      ulen = info.jited_prog_len;
 +      info.jited_prog_len = prog->jited_len;
 +      if (info.jited_prog_len && ulen) {
 +              uinsns = u64_to_user_ptr(info.jited_prog_insns);
 +              ulen = min_t(u32, info.jited_prog_len, ulen);
 +              if (copy_to_user(uinsns, prog->bpf_func, ulen))
 +                      return -EFAULT;
 +      }
 +
 +      ulen = info.xlated_prog_len;
 +      info.xlated_prog_len = bpf_prog_size(prog->len);
 +      if (info.xlated_prog_len && ulen) {
 +              uinsns = u64_to_user_ptr(info.xlated_prog_insns);
 +              ulen = min_t(u32, info.xlated_prog_len, ulen);
 +              if (copy_to_user(uinsns, prog->insnsi, ulen))
 +                      return -EFAULT;
 +      }
 +
 +done:
 +      if (copy_to_user(uinfo, &info, info_len) ||
 +          put_user(info_len, &uattr->info.info_len))
 +              return -EFAULT;
 +
 +      return 0;
 +}
 +
 +static int bpf_map_get_info_by_fd(struct bpf_map *map,
 +                                const union bpf_attr *attr,
 +                                union bpf_attr __user *uattr)
 +{
 +      struct bpf_map_info __user *uinfo = u64_to_user_ptr(attr->info.info);
 +      struct bpf_map_info info = {};
 +      u32 info_len = attr->info.info_len;
 +      int err;
 +
 +      err = check_uarg_tail_zero(uinfo, sizeof(info), info_len);
 +      if (err)
 +              return err;
 +      info_len = min_t(u32, sizeof(info), info_len);
 +
 +      info.type = map->map_type;
 +      info.id = map->id;
 +      info.key_size = map->key_size;
 +      info.value_size = map->value_size;
 +      info.max_entries = map->max_entries;
 +      info.map_flags = map->map_flags;
 +
 +      if (copy_to_user(uinfo, &info, info_len) ||
 +          put_user(info_len, &uattr->info.info_len))
 +              return -EFAULT;
 +
 +      return 0;
 +}
 +
 +#define BPF_OBJ_GET_INFO_BY_FD_LAST_FIELD info.info
 +
 +static int bpf_obj_get_info_by_fd(const union bpf_attr *attr,
 +                                union bpf_attr __user *uattr)
 +{
 +      int ufd = attr->info.bpf_fd;
 +      struct fd f;
 +      int err;
 +
 +      if (CHECK_ATTR(BPF_OBJ_GET_INFO_BY_FD))
 +              return -EINVAL;
 +
 +      f = fdget(ufd);
 +      if (!f.file)
 +              return -EBADFD;
 +
 +      if (f.file->f_op == &bpf_prog_fops)
 +              err = bpf_prog_get_info_by_fd(f.file->private_data, attr,
 +                                            uattr);
 +      else if (f.file->f_op == &bpf_map_fops)
 +              err = bpf_map_get_info_by_fd(f.file->private_data, attr,
 +                                           uattr);
 +      else
 +              err = -EINVAL;
 +
 +      fdput(f);
 +      return err;
 +}
 +
  SYSCALL_DEFINE3(bpf, int, cmd, union bpf_attr __user *, uattr, unsigned int, size)
  {
        union bpf_attr attr = {};
         * user-space does not rely on any kernel feature
         * extensions we dont know about yet.
         */
 -      if (size > sizeof(attr)) {
 -              unsigned char __user *addr;
 -              unsigned char __user *end;
 -              unsigned char val;
 -
 -              addr = (void __user *)uattr + sizeof(attr);
 -              end  = (void __user *)uattr + size;
 -
 -              for (; addr < end; addr++) {
 -                      err = get_user(val, addr);
 -                      if (err)
 -                              return err;
 -                      if (val)
 -                              return -E2BIG;
 -              }
 -              size = sizeof(attr);
 -      }
 +      err = check_uarg_tail_zero(uattr, sizeof(attr), size);
 +      if (err)
 +              return err;
 +      size = min_t(u32, size, sizeof(attr));
  
        /* copy attributes from user space, may be less than sizeof(bpf_attr) */
        if (copy_from_user(&attr, uattr, size) != 0)
        case BPF_PROG_TEST_RUN:
                err = bpf_prog_test_run(&attr, uattr);
                break;
 +      case BPF_PROG_GET_NEXT_ID:
 +              err = bpf_obj_get_next_id(&attr, uattr,
 +                                        &prog_idr, &prog_idr_lock);
 +              break;
 +      case BPF_MAP_GET_NEXT_ID:
 +              err = bpf_obj_get_next_id(&attr, uattr,
 +                                        &map_idr, &map_idr_lock);
 +              break;
 +      case BPF_PROG_GET_FD_BY_ID:
 +              err = bpf_prog_get_fd_by_id(&attr);
 +              break;
 +      case BPF_MAP_GET_FD_BY_ID:
 +              err = bpf_map_get_fd_by_id(&attr);
 +              break;
 +      case BPF_OBJ_GET_INFO_BY_FD:
 +              err = bpf_obj_get_info_by_fd(&attr, uattr);
 +              break;
        default:
                err = -EINVAL;
                break;
diff --combined net/ipv6/ipv6_sockglue.c
index 85404e7c3114bbbb5bcde23ae59bb1ccbb1c5092,f8298c9a3160f9eab6f260e7d8b8ddcce4af5fea..02d795fe3d7f2c5e6e922a25dbbe69c8139919b6
@@@ -505,7 -505,7 +505,7 @@@ sticky_done
                        break;
  
                memset(opt, 0, sizeof(*opt));
 -              atomic_set(&opt->refcnt, 1);
 +              refcount_set(&opt->refcnt, 1);
                opt->tot_len = sizeof(*opt) + optlen;
                retv = -EFAULT;
                if (copy_from_user(opt+1, optval, optlen))
@@@ -735,14 -735,9 +735,9 @@@ done
                        retv = -ENOBUFS;
                        break;
                }
-               gsf = kmalloc(optlen, GFP_KERNEL);
-               if (!gsf) {
-                       retv = -ENOBUFS;
-                       break;
-               }
-               retv = -EFAULT;
-               if (copy_from_user(gsf, optval, optlen)) {
-                       kfree(gsf);
+               gsf = memdup_user(optval, optlen);
+               if (IS_ERR(gsf)) {
+                       retv = PTR_ERR(gsf);
                        break;
                }
                /* numsrc >= (4G-140)/128 overflow in 32 bits */
index 9010a3632d6f79bec2f7a59b78b7d4964df0d401,0940892de84de7bc94cd85a0afae6f246d54a015..00eed842c491c3585b68e7b23e2ee63885f02196
  #include "objsec.h"
  #include "conditional.h"
  
 -/* Policy capability filenames */
 -static char *policycap_names[] = {
 -      "network_peer_controls",
 -      "open_perms",
 -      "extended_socket_class",
 -      "always_check_network",
 -      "cgroup_seclabel"
 -};
 -
  unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
  
  static int __init checkreqprot_setup(char *str)
@@@ -154,8 -163,6 +154,8 @@@ static ssize_t sel_write_enforce(struc
                        avc_ss_reset(0);
                selnl_notify_setenforce(selinux_enforcing);
                selinux_status_update_setenforce(selinux_enforcing);
 +              if (!selinux_enforcing)
 +                      call_lsm_notifier(LSM_POLICY_CHANGE, NULL);
        }
        length = count;
  out:
@@@ -649,14 -656,12 +649,12 @@@ static ssize_t sel_write_validatetrans(
        if (*ppos != 0)
                goto out;
  
-       rc = -ENOMEM;
-       req = kzalloc(count + 1, GFP_KERNEL);
-       if (!req)
-               goto out;
-       rc = -EFAULT;
-       if (copy_from_user(req, buf, count))
+       req = memdup_user_nul(buf, count);
+       if (IS_ERR(req)) {
+               rc = PTR_ERR(req);
+               req = NULL;
                goto out;
+       }
  
        rc = -ENOMEM;
        oldcon = kzalloc(count + 1, GFP_KERNEL);
@@@ -1743,9 -1748,9 +1741,9 @@@ static int sel_make_policycap(void
        sel_remove_entries(policycap_dir);
  
        for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) {
 -              if (iter < ARRAY_SIZE(policycap_names))
 +              if (iter < ARRAY_SIZE(selinux_policycap_names))
                        dentry = d_alloc_name(policycap_dir,
 -                                            policycap_names[iter]);
 +                                            selinux_policycap_names[iter]);
                else
                        dentry = d_alloc_name(policycap_dir, "unknown");
  
This page took 0.122222 seconds and 4 git commands to generate.