1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2018 Facebook
7 #include <linux/sock_diag.h>
8 #include <net/sock_reuseport.h>
9 #include <linux/btf_ids.h>
11 struct reuseport_array {
13 struct sock __rcu *ptrs[];
16 static struct reuseport_array *reuseport_array(struct bpf_map *map)
18 return (struct reuseport_array *)map;
21 /* The caller must hold the reuseport_lock */
22 void bpf_sk_reuseport_detach(struct sock *sk)
24 struct sock __rcu **socks;
26 write_lock_bh(&sk->sk_callback_lock);
27 socks = __locked_read_sk_user_data_with_flags(sk, SK_USER_DATA_BPF);
29 WRITE_ONCE(sk->sk_user_data, NULL);
31 * Do not move this NULL assignment outside of
32 * sk->sk_callback_lock because there is
33 * a race with reuseport_array_free()
34 * which does not hold the reuseport_lock.
36 RCU_INIT_POINTER(*socks, NULL);
38 write_unlock_bh(&sk->sk_callback_lock);
41 static int reuseport_array_alloc_check(union bpf_attr *attr)
43 if (attr->value_size != sizeof(u32) &&
44 attr->value_size != sizeof(u64))
47 return array_map_alloc_check(attr);
50 static void *reuseport_array_lookup_elem(struct bpf_map *map, void *key)
52 struct reuseport_array *array = reuseport_array(map);
53 u32 index = *(u32 *)key;
55 if (unlikely(index >= array->map.max_entries))
58 return rcu_dereference(array->ptrs[index]);
61 /* Called from syscall only */
62 static int reuseport_array_delete_elem(struct bpf_map *map, void *key)
64 struct reuseport_array *array = reuseport_array(map);
65 u32 index = *(u32 *)key;
69 if (index >= map->max_entries)
72 if (!rcu_access_pointer(array->ptrs[index]))
75 spin_lock_bh(&reuseport_lock);
77 sk = rcu_dereference_protected(array->ptrs[index],
78 lockdep_is_held(&reuseport_lock));
80 write_lock_bh(&sk->sk_callback_lock);
81 WRITE_ONCE(sk->sk_user_data, NULL);
82 RCU_INIT_POINTER(array->ptrs[index], NULL);
83 write_unlock_bh(&sk->sk_callback_lock);
89 spin_unlock_bh(&reuseport_lock);
94 static void reuseport_array_free(struct bpf_map *map)
96 struct reuseport_array *array = reuseport_array(map);
101 * ops->map_*_elem() will not be able to access this
102 * array now. Hence, this function only races with
103 * bpf_sk_reuseport_detach() which was triggered by
104 * close() or disconnect().
106 * This function and bpf_sk_reuseport_detach() are
107 * both removing sk from "array". Who removes it
108 * first does not matter.
110 * The only concern here is bpf_sk_reuseport_detach()
111 * may access "array" which is being freed here.
112 * bpf_sk_reuseport_detach() access this "array"
113 * through sk->sk_user_data _and_ with sk->sk_callback_lock
114 * held which is enough because this "array" is not freed
115 * until all sk->sk_user_data has stopped referencing this "array".
117 * Hence, due to the above, taking "reuseport_lock" is not
122 * Since reuseport_lock is not taken, sk is accessed under
126 for (i = 0; i < map->max_entries; i++) {
127 sk = rcu_dereference(array->ptrs[i]);
129 write_lock_bh(&sk->sk_callback_lock);
131 * No need for WRITE_ONCE(). At this point,
132 * no one is reading it without taking the
133 * sk->sk_callback_lock.
135 sk->sk_user_data = NULL;
136 write_unlock_bh(&sk->sk_callback_lock);
137 RCU_INIT_POINTER(array->ptrs[i], NULL);
143 * Once reaching here, all sk->sk_user_data is not
144 * referencing this "array". "array" can be freed now.
146 bpf_map_area_free(array);
149 static struct bpf_map *reuseport_array_alloc(union bpf_attr *attr)
151 int numa_node = bpf_map_attr_numa_node(attr);
152 struct reuseport_array *array;
155 return ERR_PTR(-EPERM);
157 /* allocate all map elements and zero-initialize them */
158 array = bpf_map_area_alloc(struct_size(array, ptrs, attr->max_entries), numa_node);
160 return ERR_PTR(-ENOMEM);
162 /* copy mandatory map attributes */
163 bpf_map_init_from_attr(&array->map, attr);
168 int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
174 if (map->value_size != sizeof(u64))
178 sk = reuseport_array_lookup_elem(map, key);
180 *(u64 *)value = __sock_gen_cookie(sk);
191 reuseport_array_update_check(const struct reuseport_array *array,
192 const struct sock *nsk,
193 const struct sock *osk,
194 const struct sock_reuseport *nsk_reuse,
197 if (osk && map_flags == BPF_NOEXIST)
200 if (!osk && map_flags == BPF_EXIST)
203 if (nsk->sk_protocol != IPPROTO_UDP && nsk->sk_protocol != IPPROTO_TCP)
206 if (nsk->sk_family != AF_INET && nsk->sk_family != AF_INET6)
209 if (nsk->sk_type != SOCK_STREAM && nsk->sk_type != SOCK_DGRAM)
213 * sk must be hashed (i.e. listening in the TCP case or binded
214 * in the UDP case) and
215 * it must also be a SO_REUSEPORT sk (i.e. reuse cannot be NULL).
217 * Also, sk will be used in bpf helper that is protected by
220 if (!sock_flag(nsk, SOCK_RCU_FREE) || !sk_hashed(nsk) || !nsk_reuse)
223 /* READ_ONCE because the sk->sk_callback_lock may not be held here */
224 if (READ_ONCE(nsk->sk_user_data))
231 * Called from syscall only.
232 * The "nsk" in the fd refcnt.
233 * The "osk" and "reuse" are protected by reuseport_lock.
235 int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
236 void *value, u64 map_flags)
238 struct reuseport_array *array = reuseport_array(map);
239 struct sock *free_osk = NULL, *osk, *nsk;
240 struct sock_reuseport *reuse;
241 u32 index = *(u32 *)key;
242 uintptr_t sk_user_data;
243 struct socket *socket;
246 if (map_flags > BPF_EXIST)
249 if (index >= map->max_entries)
252 if (map->value_size == sizeof(u64)) {
253 u64 fd64 = *(u64 *)value;
262 socket = sockfd_lookup(fd, &err);
272 /* Quick checks before taking reuseport_lock */
273 err = reuseport_array_update_check(array, nsk,
274 rcu_access_pointer(array->ptrs[index]),
275 rcu_access_pointer(nsk->sk_reuseport_cb),
280 spin_lock_bh(&reuseport_lock);
282 * Some of the checks only need reuseport_lock
283 * but it is done under sk_callback_lock also
284 * for simplicity reason.
286 write_lock_bh(&nsk->sk_callback_lock);
288 osk = rcu_dereference_protected(array->ptrs[index],
289 lockdep_is_held(&reuseport_lock));
290 reuse = rcu_dereference_protected(nsk->sk_reuseport_cb,
291 lockdep_is_held(&reuseport_lock));
292 err = reuseport_array_update_check(array, nsk, osk, reuse, map_flags);
294 goto put_file_unlock;
296 sk_user_data = (uintptr_t)&array->ptrs[index] | SK_USER_DATA_NOCOPY |
298 WRITE_ONCE(nsk->sk_user_data, (void *)sk_user_data);
299 rcu_assign_pointer(array->ptrs[index], nsk);
304 write_unlock_bh(&nsk->sk_callback_lock);
307 write_lock_bh(&free_osk->sk_callback_lock);
308 WRITE_ONCE(free_osk->sk_user_data, NULL);
309 write_unlock_bh(&free_osk->sk_callback_lock);
312 spin_unlock_bh(&reuseport_lock);
318 /* Called from syscall */
319 static int reuseport_array_get_next_key(struct bpf_map *map, void *key,
322 struct reuseport_array *array = reuseport_array(map);
323 u32 index = key ? *(u32 *)key : U32_MAX;
324 u32 *next = (u32 *)next_key;
326 if (index >= array->map.max_entries) {
331 if (index == array->map.max_entries - 1)
338 BTF_ID_LIST_SINGLE(reuseport_array_map_btf_ids, struct, reuseport_array)
339 const struct bpf_map_ops reuseport_array_ops = {
340 .map_meta_equal = bpf_map_meta_equal,
341 .map_alloc_check = reuseport_array_alloc_check,
342 .map_alloc = reuseport_array_alloc,
343 .map_free = reuseport_array_free,
344 .map_lookup_elem = reuseport_array_lookup_elem,
345 .map_get_next_key = reuseport_array_get_next_key,
346 .map_delete_elem = reuseport_array_delete_elem,
347 .map_btf_id = &reuseport_array_map_btf_ids[0],