]>
Commit | Line | Data |
---|---|---|
25763b3c | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
99c55f7d | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
99c55f7d AS |
3 | */ |
4 | #ifndef _LINUX_BPF_H | |
5 | #define _LINUX_BPF_H 1 | |
6 | ||
7 | #include <uapi/linux/bpf.h> | |
d687f621 | 8 | #include <uapi/linux/filter.h> |
74451e66 | 9 | |
99c55f7d | 10 | #include <linux/workqueue.h> |
db20fd2b | 11 | #include <linux/file.h> |
b121d1e7 | 12 | #include <linux/percpu.h> |
002245cc | 13 | #include <linux/err.h> |
74451e66 | 14 | #include <linux/rbtree_latch.h> |
d6e1e46f | 15 | #include <linux/numa.h> |
fc970227 | 16 | #include <linux/mm_types.h> |
ab3f0063 | 17 | #include <linux/wait.h> |
fec56f58 AS |
18 | #include <linux/refcount.h> |
19 | #include <linux/mutex.h> | |
85d33df3 | 20 | #include <linux/module.h> |
bfea9a85 | 21 | #include <linux/kallsyms.h> |
2c78ee89 | 22 | #include <linux/capability.h> |
48edc1f7 RG |
23 | #include <linux/sched/mm.h> |
24 | #include <linux/slab.h> | |
e21aa341 | 25 | #include <linux/percpu-refcount.h> |
d687f621 | 26 | #include <linux/stddef.h> |
af2ac3e1 | 27 | #include <linux/bpfptr.h> |
14a324f6 | 28 | #include <linux/btf.h> |
8c7dcb84 | 29 | #include <linux/rcupdate_trace.h> |
c86df29d | 30 | #include <linux/static_call.h> |
ee53cbfb | 31 | #include <linux/memcontrol.h> |
4f9087f1 | 32 | #include <linux/cfi.h> |
99c55f7d | 33 | |
cae1927c | 34 | struct bpf_verifier_env; |
9e15db66 | 35 | struct bpf_verifier_log; |
3b1efb19 | 36 | struct perf_event; |
174a79ff | 37 | struct bpf_prog; |
da765a2f | 38 | struct bpf_prog_aux; |
99c55f7d | 39 | struct bpf_map; |
31746031 | 40 | struct bpf_arena; |
4f738adb | 41 | struct sock; |
a26ca7c9 | 42 | struct seq_file; |
1b2b234b | 43 | struct btf; |
e8d2bec0 | 44 | struct btf_type; |
3dec541b | 45 | struct exception_table_entry; |
ae24345d | 46 | struct seq_operations; |
f9c79272 | 47 | struct bpf_iter_aux_info; |
f836a56e KS |
48 | struct bpf_local_storage; |
49 | struct bpf_local_storage_map; | |
36e68442 | 50 | struct kobject; |
48edc1f7 | 51 | struct mem_cgroup; |
861de02e | 52 | struct module; |
69c087ba | 53 | struct bpf_func_state; |
00963a2e | 54 | struct ftrace_ops; |
d4ccaf58 | 55 | struct cgroup; |
35f96de0 AN |
56 | struct bpf_token; |
57 | struct user_namespace; | |
58 | struct super_block; | |
59 | struct inode; | |
99c55f7d | 60 | |
1b9ed84e QM |
61 | extern struct idr btf_idr; |
62 | extern spinlock_t btf_idr_lock; | |
36e68442 | 63 | extern struct kobject *btf_kobj; |
41a5db8d | 64 | extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma; |
1fda5bb6 | 65 | extern bool bpf_global_ma_set; |
1b9ed84e | 66 | |
102acbac | 67 | typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64); |
f9c79272 YS |
68 | typedef int (*bpf_iter_init_seq_priv_t)(void *private_data, |
69 | struct bpf_iter_aux_info *aux); | |
14fc6bd6 | 70 | typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data); |
af3f4134 SF |
71 | typedef unsigned int (*bpf_func_t)(const void *, |
72 | const struct bpf_insn *); | |
14fc6bd6 YS |
73 | struct bpf_iter_seq_info { |
74 | const struct seq_operations *seq_ops; | |
75 | bpf_iter_init_seq_priv_t init_seq_private; | |
76 | bpf_iter_fini_seq_priv_t fini_seq_private; | |
77 | u32 seq_priv_size; | |
78 | }; | |
79 | ||
5d903493 | 80 | /* map is generic key/value storage optionally accessible by eBPF programs */ |
99c55f7d AS |
81 | struct bpf_map_ops { |
82 | /* funcs callable from userspace (via syscall) */ | |
1110f3a9 | 83 | int (*map_alloc_check)(union bpf_attr *attr); |
99c55f7d | 84 | struct bpf_map *(*map_alloc)(union bpf_attr *attr); |
61d1b6a4 DB |
85 | void (*map_release)(struct bpf_map *map, struct file *map_file); |
86 | void (*map_free)(struct bpf_map *map); | |
db20fd2b | 87 | int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key); |
ba6b8de4 | 88 | void (*map_release_uref)(struct bpf_map *map); |
c6110222 | 89 | void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key); |
cb4d03ab BV |
90 | int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr, |
91 | union bpf_attr __user *uattr); | |
3e87f192 DS |
92 | int (*map_lookup_and_delete_elem)(struct bpf_map *map, void *key, |
93 | void *value, u64 flags); | |
05799638 YS |
94 | int (*map_lookup_and_delete_batch)(struct bpf_map *map, |
95 | const union bpf_attr *attr, | |
96 | union bpf_attr __user *uattr); | |
3af43ba4 HT |
97 | int (*map_update_batch)(struct bpf_map *map, struct file *map_file, |
98 | const union bpf_attr *attr, | |
aa2e93b8 BV |
99 | union bpf_attr __user *uattr); |
100 | int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr, | |
101 | union bpf_attr __user *uattr); | |
db20fd2b AS |
102 | |
103 | /* funcs callable from userspace and from eBPF programs */ | |
104 | void *(*map_lookup_elem)(struct bpf_map *map, void *key); | |
d7ba4cc9 JK |
105 | long (*map_update_elem)(struct bpf_map *map, void *key, void *value, u64 flags); |
106 | long (*map_delete_elem)(struct bpf_map *map, void *key); | |
107 | long (*map_push_elem)(struct bpf_map *map, void *value, u64 flags); | |
108 | long (*map_pop_elem)(struct bpf_map *map, void *value); | |
109 | long (*map_peek_elem)(struct bpf_map *map, void *value); | |
07343110 | 110 | void *(*map_lookup_percpu_elem)(struct bpf_map *map, void *key, u32 cpu); |
2a36f0b9 WN |
111 | |
112 | /* funcs called by prog_array and perf_event_array map */ | |
d056a788 DB |
113 | void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file, |
114 | int fd); | |
20c20bd1 HT |
115 | /* If need_defer is true, the implementation should guarantee that |
116 | * the to-be-put element is still alive before the bpf program, which | |
117 | * may manipulate it, exists. | |
118 | */ | |
119 | void (*map_fd_put_ptr)(struct bpf_map *map, void *ptr, bool need_defer); | |
4a8f87e6 | 120 | int (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf); |
14dc6f04 | 121 | u32 (*map_fd_sys_lookup_elem)(void *ptr); |
a26ca7c9 MKL |
122 | void (*map_seq_show_elem)(struct bpf_map *map, void *key, |
123 | struct seq_file *m); | |
e8d2bec0 | 124 | int (*map_check_btf)(const struct bpf_map *map, |
1b2b234b | 125 | const struct btf *btf, |
e8d2bec0 DB |
126 | const struct btf_type *key_type, |
127 | const struct btf_type *value_type); | |
d8eca5bb | 128 | |
da765a2f DB |
129 | /* Prog poke tracking helpers. */ |
130 | int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux); | |
131 | void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux); | |
132 | void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old, | |
133 | struct bpf_prog *new); | |
134 | ||
d8eca5bb DB |
135 | /* Direct value access helpers. */ |
136 | int (*map_direct_value_addr)(const struct bpf_map *map, | |
137 | u64 *imm, u32 off); | |
138 | int (*map_direct_value_meta)(const struct bpf_map *map, | |
139 | u64 imm, u32 *off); | |
fc970227 | 140 | int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma); |
457f4436 AN |
141 | __poll_t (*map_poll)(struct bpf_map *map, struct file *filp, |
142 | struct poll_table_struct *pts); | |
cf2c2e4a AS |
143 | unsigned long (*map_get_unmapped_area)(struct file *filep, unsigned long addr, |
144 | unsigned long len, unsigned long pgoff, | |
145 | unsigned long flags); | |
41c48f3a | 146 | |
f836a56e KS |
147 | /* Functions called by bpf_local_storage maps */ |
148 | int (*map_local_storage_charge)(struct bpf_local_storage_map *smap, | |
149 | void *owner, u32 size); | |
150 | void (*map_local_storage_uncharge)(struct bpf_local_storage_map *smap, | |
151 | void *owner, u32 size); | |
152 | struct bpf_local_storage __rcu ** (*map_owner_storage_ptr)(void *owner); | |
f4d05259 | 153 | |
e6a4750f | 154 | /* Misc helpers.*/ |
d7ba4cc9 | 155 | long (*map_redirect)(struct bpf_map *map, u64 key, u64 flags); |
e6a4750f | 156 | |
f4d05259 MKL |
157 | /* map_meta_equal must be implemented for maps that can be |
158 | * used as an inner map. It is a runtime check to ensure | |
159 | * an inner map can be inserted to an outer map. | |
160 | * | |
161 | * Some properties of the inner map has been used during the | |
162 | * verification time. When inserting an inner map at the runtime, | |
163 | * map_meta_equal has to ensure the inserting map has the same | |
164 | * properties that the verifier has used earlier. | |
165 | */ | |
166 | bool (*map_meta_equal)(const struct bpf_map *meta0, | |
167 | const struct bpf_map *meta1); | |
168 | ||
69c087ba YS |
169 | |
170 | int (*map_set_for_each_callback_args)(struct bpf_verifier_env *env, | |
171 | struct bpf_func_state *caller, | |
172 | struct bpf_func_state *callee); | |
d7ba4cc9 | 173 | long (*map_for_each_callback)(struct bpf_map *map, |
102acbac | 174 | bpf_callback_t callback_fn, |
69c087ba YS |
175 | void *callback_ctx, u64 flags); |
176 | ||
90a5527d YS |
177 | u64 (*map_mem_usage)(const struct bpf_map *map); |
178 | ||
c317ab71 | 179 | /* BTF id of struct allocated by map_alloc */ |
41c48f3a | 180 | int *map_btf_id; |
a5cbe05a YS |
181 | |
182 | /* bpf_iter info used to open a seq_file */ | |
183 | const struct bpf_iter_seq_info *iter_seq_info; | |
99c55f7d AS |
184 | }; |
185 | ||
61df10c7 | 186 | enum { |
95c07d58 | 187 | /* Support at most 11 fields in a BTF type */ |
d56b63cf | 188 | BTF_FIELDS_MAX = 11, |
61df10c7 KKD |
189 | }; |
190 | ||
aa3496ac | 191 | enum btf_field_type { |
db559117 KKD |
192 | BPF_SPIN_LOCK = (1 << 0), |
193 | BPF_TIMER = (1 << 1), | |
aa3496ac KKD |
194 | BPF_KPTR_UNREF = (1 << 2), |
195 | BPF_KPTR_REF = (1 << 3), | |
55db92f4 YS |
196 | BPF_KPTR_PERCPU = (1 << 4), |
197 | BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU, | |
198 | BPF_LIST_HEAD = (1 << 5), | |
199 | BPF_LIST_NODE = (1 << 6), | |
200 | BPF_RB_ROOT = (1 << 7), | |
201 | BPF_RB_NODE = (1 << 8), | |
790ce3cf DM |
202 | BPF_GRAPH_NODE = BPF_RB_NODE | BPF_LIST_NODE, |
203 | BPF_GRAPH_ROOT = BPF_RB_ROOT | BPF_LIST_HEAD, | |
55db92f4 | 204 | BPF_REFCOUNT = (1 << 9), |
d56b63cf | 205 | BPF_WORKQUEUE = (1 << 10), |
1cb80d9e | 206 | BPF_UPTR = (1 << 11), |
c0a5a21c KKD |
207 | }; |
208 | ||
c8e18754 | 209 | typedef void (*btf_dtor_kfunc_t)(void *); |
c8e18754 | 210 | |
aa3496ac KKD |
211 | struct btf_field_kptr { |
212 | struct btf *btf; | |
213 | struct module *module; | |
9e36a204 DM |
214 | /* dtor used if btf_is_kernel(btf), otherwise the type is |
215 | * program-allocated, dtor is NULL, and __bpf_obj_drop_impl is used | |
216 | */ | |
217 | btf_dtor_kfunc_t dtor; | |
aa3496ac KKD |
218 | u32 btf_id; |
219 | }; | |
220 | ||
30465003 | 221 | struct btf_field_graph_root { |
f0c5941f KKD |
222 | struct btf *btf; |
223 | u32 value_btf_id; | |
224 | u32 node_offset; | |
865ce09a | 225 | struct btf_record *value_rec; |
f0c5941f KKD |
226 | }; |
227 | ||
aa3496ac | 228 | struct btf_field { |
61df10c7 | 229 | u32 offset; |
cd2a8079 | 230 | u32 size; |
aa3496ac KKD |
231 | enum btf_field_type type; |
232 | union { | |
233 | struct btf_field_kptr kptr; | |
30465003 | 234 | struct btf_field_graph_root graph_root; |
aa3496ac | 235 | }; |
61df10c7 KKD |
236 | }; |
237 | ||
aa3496ac KKD |
238 | struct btf_record { |
239 | u32 cnt; | |
240 | u32 field_mask; | |
db559117 KKD |
241 | int spin_lock_off; |
242 | int timer_off; | |
d56b63cf | 243 | int wq_off; |
d54730b5 | 244 | int refcount_off; |
aa3496ac | 245 | struct btf_field fields[]; |
61df10c7 KKD |
246 | }; |
247 | ||
0a1f7bfe DM |
248 | /* Non-opaque version of bpf_rb_node in uapi/linux/bpf.h */ |
249 | struct bpf_rb_node_kern { | |
250 | struct rb_node rb_node; | |
c3c510ce | 251 | void *owner; |
0a1f7bfe DM |
252 | } __attribute__((aligned(8))); |
253 | ||
254 | /* Non-opaque version of bpf_list_node in uapi/linux/bpf.h */ | |
255 | struct bpf_list_node_kern { | |
256 | struct list_head list_head; | |
c3c510ce | 257 | void *owner; |
0a1f7bfe DM |
258 | } __attribute__((aligned(8))); |
259 | ||
99c55f7d | 260 | struct bpf_map { |
a3c70a3c | 261 | const struct bpf_map_ops *ops; |
be95a845 DB |
262 | struct bpf_map *inner_map_meta; |
263 | #ifdef CONFIG_SECURITY | |
264 | void *security; | |
265 | #endif | |
99c55f7d AS |
266 | enum bpf_map_type map_type; |
267 | u32 key_size; | |
268 | u32 value_size; | |
269 | u32 max_entries; | |
9330986c | 270 | u64 map_extra; /* any per-map-type extra fields */ |
6c905981 | 271 | u32 map_flags; |
f3f1c054 | 272 | u32 id; |
db559117 | 273 | struct btf_record *record; |
96eabe7a | 274 | int numa_node; |
9b2cf328 MKL |
275 | u32 btf_key_type_id; |
276 | u32 btf_value_type_id; | |
8845b468 | 277 | u32 btf_vmlinux_value_type_id; |
a26ca7c9 | 278 | struct btf *btf; |
3a3b7fec | 279 | #ifdef CONFIG_MEMCG |
4201d9ab | 280 | struct obj_cgroup *objcg; |
48edc1f7 | 281 | #endif |
fc970227 | 282 | char name[BPF_OBJ_NAME_LEN]; |
a3c70a3c AS |
283 | struct mutex freeze_mutex; |
284 | atomic64_t refcnt; | |
1e0bd5a0 | 285 | atomic64_t usercnt; |
87667336 HT |
286 | /* rcu is used before freeing and work is only used during freeing */ |
287 | union { | |
288 | struct work_struct work; | |
289 | struct rcu_head rcu; | |
290 | }; | |
353050be | 291 | atomic64_t writecnt; |
f45d5b6c THJ |
292 | /* 'Ownership' of program-containing map is claimed by the first program |
293 | * that is going to use this map or by the first program which FD is | |
294 | * stored in the map to make sure that all callers and callees have the | |
295 | * same prog type, JITed flag and xdp_has_frags flag. | |
296 | */ | |
297 | struct { | |
28ead3ea | 298 | const struct btf_type *attach_func_proto; |
f45d5b6c THJ |
299 | spinlock_t lock; |
300 | enum bpf_prog_type type; | |
301 | bool jited; | |
302 | bool xdp_has_frags; | |
303 | } owner; | |
4d7d7f69 KKD |
304 | bool bypass_spec_v1; |
305 | bool frozen; /* write-once; write-protected by freeze_mutex */ | |
87667336 | 306 | bool free_after_mult_rcu_gp; |
af66bfd3 HT |
307 | bool free_after_rcu_gp; |
308 | atomic64_t sleepable_refcnt; | |
25954730 | 309 | s64 __percpu *elem_count; |
99c55f7d AS |
310 | }; |
311 | ||
db559117 KKD |
312 | static inline const char *btf_field_type_name(enum btf_field_type type) |
313 | { | |
314 | switch (type) { | |
315 | case BPF_SPIN_LOCK: | |
316 | return "bpf_spin_lock"; | |
317 | case BPF_TIMER: | |
318 | return "bpf_timer"; | |
d56b63cf BT |
319 | case BPF_WORKQUEUE: |
320 | return "bpf_wq"; | |
db559117 KKD |
321 | case BPF_KPTR_UNREF: |
322 | case BPF_KPTR_REF: | |
323 | return "kptr"; | |
55db92f4 YS |
324 | case BPF_KPTR_PERCPU: |
325 | return "percpu_kptr"; | |
1cb80d9e KFL |
326 | case BPF_UPTR: |
327 | return "uptr"; | |
f0c5941f KKD |
328 | case BPF_LIST_HEAD: |
329 | return "bpf_list_head"; | |
8ffa5cc1 KKD |
330 | case BPF_LIST_NODE: |
331 | return "bpf_list_node"; | |
9c395c1b DM |
332 | case BPF_RB_ROOT: |
333 | return "bpf_rb_root"; | |
334 | case BPF_RB_NODE: | |
335 | return "bpf_rb_node"; | |
d54730b5 DM |
336 | case BPF_REFCOUNT: |
337 | return "bpf_refcount"; | |
db559117 KKD |
338 | default: |
339 | WARN_ON_ONCE(1); | |
340 | return "unknown"; | |
341 | } | |
342 | } | |
343 | ||
aa3496ac KKD |
344 | static inline u32 btf_field_type_size(enum btf_field_type type) |
345 | { | |
346 | switch (type) { | |
db559117 KKD |
347 | case BPF_SPIN_LOCK: |
348 | return sizeof(struct bpf_spin_lock); | |
349 | case BPF_TIMER: | |
350 | return sizeof(struct bpf_timer); | |
d56b63cf BT |
351 | case BPF_WORKQUEUE: |
352 | return sizeof(struct bpf_wq); | |
aa3496ac KKD |
353 | case BPF_KPTR_UNREF: |
354 | case BPF_KPTR_REF: | |
55db92f4 | 355 | case BPF_KPTR_PERCPU: |
1cb80d9e | 356 | case BPF_UPTR: |
aa3496ac | 357 | return sizeof(u64); |
f0c5941f KKD |
358 | case BPF_LIST_HEAD: |
359 | return sizeof(struct bpf_list_head); | |
8ffa5cc1 KKD |
360 | case BPF_LIST_NODE: |
361 | return sizeof(struct bpf_list_node); | |
9c395c1b DM |
362 | case BPF_RB_ROOT: |
363 | return sizeof(struct bpf_rb_root); | |
364 | case BPF_RB_NODE: | |
365 | return sizeof(struct bpf_rb_node); | |
d54730b5 DM |
366 | case BPF_REFCOUNT: |
367 | return sizeof(struct bpf_refcount); | |
aa3496ac KKD |
368 | default: |
369 | WARN_ON_ONCE(1); | |
370 | return 0; | |
371 | } | |
372 | } | |
373 | ||
374 | static inline u32 btf_field_type_align(enum btf_field_type type) | |
375 | { | |
376 | switch (type) { | |
db559117 KKD |
377 | case BPF_SPIN_LOCK: |
378 | return __alignof__(struct bpf_spin_lock); | |
379 | case BPF_TIMER: | |
380 | return __alignof__(struct bpf_timer); | |
d56b63cf BT |
381 | case BPF_WORKQUEUE: |
382 | return __alignof__(struct bpf_wq); | |
aa3496ac KKD |
383 | case BPF_KPTR_UNREF: |
384 | case BPF_KPTR_REF: | |
55db92f4 | 385 | case BPF_KPTR_PERCPU: |
1cb80d9e | 386 | case BPF_UPTR: |
aa3496ac | 387 | return __alignof__(u64); |
f0c5941f KKD |
388 | case BPF_LIST_HEAD: |
389 | return __alignof__(struct bpf_list_head); | |
8ffa5cc1 KKD |
390 | case BPF_LIST_NODE: |
391 | return __alignof__(struct bpf_list_node); | |
9c395c1b DM |
392 | case BPF_RB_ROOT: |
393 | return __alignof__(struct bpf_rb_root); | |
394 | case BPF_RB_NODE: | |
395 | return __alignof__(struct bpf_rb_node); | |
d54730b5 DM |
396 | case BPF_REFCOUNT: |
397 | return __alignof__(struct bpf_refcount); | |
aa3496ac KKD |
398 | default: |
399 | WARN_ON_ONCE(1); | |
400 | return 0; | |
401 | } | |
402 | } | |
403 | ||
3e81740a DM |
404 | static inline void bpf_obj_init_field(const struct btf_field *field, void *addr) |
405 | { | |
406 | memset(addr, 0, field->size); | |
407 | ||
408 | switch (field->type) { | |
409 | case BPF_REFCOUNT: | |
410 | refcount_set((refcount_t *)addr, 1); | |
411 | break; | |
412 | case BPF_RB_NODE: | |
413 | RB_CLEAR_NODE((struct rb_node *)addr); | |
414 | break; | |
415 | case BPF_LIST_HEAD: | |
416 | case BPF_LIST_NODE: | |
417 | INIT_LIST_HEAD((struct list_head *)addr); | |
418 | break; | |
419 | case BPF_RB_ROOT: | |
420 | /* RB_ROOT_CACHED 0-inits, no need to do anything after memset */ | |
421 | case BPF_SPIN_LOCK: | |
422 | case BPF_TIMER: | |
d56b63cf | 423 | case BPF_WORKQUEUE: |
3e81740a DM |
424 | case BPF_KPTR_UNREF: |
425 | case BPF_KPTR_REF: | |
55db92f4 | 426 | case BPF_KPTR_PERCPU: |
ba512b00 | 427 | case BPF_UPTR: |
3e81740a DM |
428 | break; |
429 | default: | |
430 | WARN_ON_ONCE(1); | |
431 | return; | |
432 | } | |
433 | } | |
434 | ||
aa3496ac KKD |
435 | static inline bool btf_record_has_field(const struct btf_record *rec, enum btf_field_type type) |
436 | { | |
437 | if (IS_ERR_OR_NULL(rec)) | |
438 | return false; | |
439 | return rec->field_mask & type; | |
440 | } | |
441 | ||
cd2a8079 | 442 | static inline void bpf_obj_init(const struct btf_record *rec, void *obj) |
68134668 | 443 | { |
958cf2e2 | 444 | int i; |
4d7d7f69 | 445 | |
cd2a8079 | 446 | if (IS_ERR_OR_NULL(rec)) |
958cf2e2 | 447 | return; |
cd2a8079 | 448 | for (i = 0; i < rec->cnt; i++) |
3e81740a | 449 | bpf_obj_init_field(&rec->fields[i], obj + rec->fields[i].offset); |
958cf2e2 KKD |
450 | } |
451 | ||
997849c4 HT |
452 | /* 'dst' must be a temporary buffer and should not point to memory that is being |
453 | * used in parallel by a bpf program or bpf syscall, otherwise the access from | |
454 | * the bpf program or bpf syscall may be corrupted by the reinitialization, | |
455 | * leading to weird problems. Even 'dst' is newly-allocated from bpf memory | |
456 | * allocator, it is still possible for 'dst' to be used in parallel by a bpf | |
457 | * program or bpf syscall. | |
458 | */ | |
958cf2e2 KKD |
459 | static inline void check_and_init_map_value(struct bpf_map *map, void *dst) |
460 | { | |
cd2a8079 | 461 | bpf_obj_init(map->record, dst); |
68134668 AS |
462 | } |
463 | ||
44832519 KKD |
464 | /* memcpy that is used with 8-byte aligned pointers, power-of-8 size and |
465 | * forced to use 'long' read/writes to try to atomically copy long counters. | |
466 | * Best-effort only. No barriers here, since it _will_ race with concurrent | |
467 | * updates from BPF programs. Called from bpf syscall and mostly used with | |
468 | * size 8 or 16 bytes, so ask compiler to inline it. | |
469 | */ | |
470 | static inline void bpf_long_memcpy(void *dst, const void *src, u32 size) | |
471 | { | |
472 | const long *lsrc = src; | |
473 | long *ldst = dst; | |
474 | ||
475 | size /= sizeof(long); | |
476 | while (size--) | |
6a86b5b5 | 477 | data_race(*ldst++ = *lsrc++); |
44832519 KKD |
478 | } |
479 | ||
480 | /* copy everything but bpf_spin_lock, bpf_timer, and kptrs. There could be one of each. */ | |
cd2a8079 | 481 | static inline void bpf_obj_memcpy(struct btf_record *rec, |
f71b2f64 KKD |
482 | void *dst, void *src, u32 size, |
483 | bool long_memcpy) | |
d83525ca | 484 | { |
4d7d7f69 KKD |
485 | u32 curr_off = 0; |
486 | int i; | |
68134668 | 487 | |
cd2a8079 | 488 | if (IS_ERR_OR_NULL(rec)) { |
44832519 | 489 | if (long_memcpy) |
f71b2f64 | 490 | bpf_long_memcpy(dst, src, round_up(size, 8)); |
44832519 | 491 | else |
f71b2f64 | 492 | memcpy(dst, src, size); |
4d7d7f69 | 493 | return; |
68134668 | 494 | } |
d83525ca | 495 | |
cd2a8079 DM |
496 | for (i = 0; i < rec->cnt; i++) { |
497 | u32 next_off = rec->fields[i].offset; | |
aa3496ac | 498 | u32 sz = next_off - curr_off; |
4d7d7f69 | 499 | |
aa3496ac | 500 | memcpy(dst + curr_off, src + curr_off, sz); |
cd2a8079 | 501 | curr_off += rec->fields[i].size + sz; |
d83525ca | 502 | } |
f71b2f64 | 503 | memcpy(dst + curr_off, src + curr_off, size - curr_off); |
d83525ca | 504 | } |
44832519 KKD |
505 | |
506 | static inline void copy_map_value(struct bpf_map *map, void *dst, void *src) | |
507 | { | |
cd2a8079 | 508 | bpf_obj_memcpy(map->record, dst, src, map->value_size, false); |
44832519 KKD |
509 | } |
510 | ||
511 | static inline void copy_map_value_long(struct bpf_map *map, void *dst, void *src) | |
512 | { | |
cd2a8079 | 513 | bpf_obj_memcpy(map->record, dst, src, map->value_size, true); |
44832519 KKD |
514 | } |
515 | ||
ba512b00 MKL |
516 | static inline void bpf_obj_swap_uptrs(const struct btf_record *rec, void *dst, void *src) |
517 | { | |
518 | unsigned long *src_uptr, *dst_uptr; | |
519 | const struct btf_field *field; | |
520 | int i; | |
521 | ||
522 | if (!btf_record_has_field(rec, BPF_UPTR)) | |
523 | return; | |
524 | ||
525 | for (i = 0, field = rec->fields; i < rec->cnt; i++, field++) { | |
526 | if (field->type != BPF_UPTR) | |
527 | continue; | |
528 | ||
529 | src_uptr = src + field->offset; | |
530 | dst_uptr = dst + field->offset; | |
531 | swap(*src_uptr, *dst_uptr); | |
532 | } | |
533 | } | |
534 | ||
cd2a8079 | 535 | static inline void bpf_obj_memzero(struct btf_record *rec, void *dst, u32 size) |
cc487558 KKD |
536 | { |
537 | u32 curr_off = 0; | |
538 | int i; | |
539 | ||
cd2a8079 | 540 | if (IS_ERR_OR_NULL(rec)) { |
f71b2f64 | 541 | memset(dst, 0, size); |
cc487558 KKD |
542 | return; |
543 | } | |
544 | ||
cd2a8079 DM |
545 | for (i = 0; i < rec->cnt; i++) { |
546 | u32 next_off = rec->fields[i].offset; | |
aa3496ac | 547 | u32 sz = next_off - curr_off; |
cc487558 | 548 | |
aa3496ac | 549 | memset(dst + curr_off, 0, sz); |
cd2a8079 | 550 | curr_off += rec->fields[i].size + sz; |
cc487558 | 551 | } |
f71b2f64 KKD |
552 | memset(dst + curr_off, 0, size - curr_off); |
553 | } | |
554 | ||
555 | static inline void zero_map_value(struct bpf_map *map, void *dst) | |
556 | { | |
cd2a8079 | 557 | bpf_obj_memzero(map->record, dst, map->value_size); |
cc487558 KKD |
558 | } |
559 | ||
96049f3a AS |
560 | void copy_map_value_locked(struct bpf_map *map, void *dst, void *src, |
561 | bool lock_src); | |
b00628b1 | 562 | void bpf_timer_cancel_and_free(void *timer); |
246331e3 | 563 | void bpf_wq_cancel_and_free(void *timer); |
f0c5941f KKD |
564 | void bpf_list_head_free(const struct btf_field *field, void *list_head, |
565 | struct bpf_spin_lock *spin_lock); | |
9c395c1b DM |
566 | void bpf_rb_root_free(const struct btf_field *field, void *rb_root, |
567 | struct bpf_spin_lock *spin_lock); | |
31746031 AS |
568 | u64 bpf_arena_get_kern_vm_start(struct bpf_arena *arena); |
569 | u64 bpf_arena_get_user_vm_start(struct bpf_arena *arena); | |
8e7ae251 | 570 | int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size); |
d83525ca | 571 | |
602144c2 | 572 | struct bpf_offload_dev; |
a3884572 JK |
573 | struct bpf_offloaded_map; |
574 | ||
575 | struct bpf_map_dev_ops { | |
576 | int (*map_get_next_key)(struct bpf_offloaded_map *map, | |
577 | void *key, void *next_key); | |
578 | int (*map_lookup_elem)(struct bpf_offloaded_map *map, | |
579 | void *key, void *value); | |
580 | int (*map_update_elem)(struct bpf_offloaded_map *map, | |
581 | void *key, void *value, u64 flags); | |
582 | int (*map_delete_elem)(struct bpf_offloaded_map *map, void *key); | |
583 | }; | |
584 | ||
585 | struct bpf_offloaded_map { | |
586 | struct bpf_map map; | |
587 | struct net_device *netdev; | |
588 | const struct bpf_map_dev_ops *dev_ops; | |
589 | void *dev_priv; | |
590 | struct list_head offloads; | |
591 | }; | |
592 | ||
593 | static inline struct bpf_offloaded_map *map_to_offmap(struct bpf_map *map) | |
594 | { | |
595 | return container_of(map, struct bpf_offloaded_map, map); | |
596 | } | |
597 | ||
0cd3cbed JK |
598 | static inline bool bpf_map_offload_neutral(const struct bpf_map *map) |
599 | { | |
600 | return map->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; | |
601 | } | |
602 | ||
a26ca7c9 MKL |
603 | static inline bool bpf_map_support_seq_show(const struct bpf_map *map) |
604 | { | |
85d33df3 MKL |
605 | return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) && |
606 | map->ops->map_seq_show_elem; | |
a26ca7c9 MKL |
607 | } |
608 | ||
e8d2bec0 | 609 | int map_check_no_btf(const struct bpf_map *map, |
1b2b234b | 610 | const struct btf *btf, |
e8d2bec0 DB |
611 | const struct btf_type *key_type, |
612 | const struct btf_type *value_type); | |
613 | ||
f4d05259 MKL |
614 | bool bpf_map_meta_equal(const struct bpf_map *meta0, |
615 | const struct bpf_map *meta1); | |
616 | ||
a3884572 JK |
617 | extern const struct bpf_map_ops bpf_map_offload_ops; |
618 | ||
d639b9d1 HL |
619 | /* bpf_type_flag contains a set of flags that are applicable to the values of |
620 | * arg_type, ret_type and reg_type. For example, a pointer value may be null, | |
621 | * or a memory is read-only. We classify types into two categories: base types | |
622 | * and extended types. Extended types are base types combined with a type flag. | |
623 | * | |
624 | * Currently there are no more than 32 base types in arg_type, ret_type and | |
625 | * reg_types. | |
626 | */ | |
627 | #define BPF_BASE_TYPE_BITS 8 | |
628 | ||
629 | enum bpf_type_flag { | |
630 | /* PTR may be NULL. */ | |
631 | PTR_MAYBE_NULL = BIT(0 + BPF_BASE_TYPE_BITS), | |
632 | ||
216e3cd2 HL |
633 | /* MEM is read-only. When applied on bpf_arg, it indicates the arg is |
634 | * compatible with both mutable and immutable memory. | |
635 | */ | |
20b2aff4 HL |
636 | MEM_RDONLY = BIT(1 + BPF_BASE_TYPE_BITS), |
637 | ||
894f2a8b KKD |
638 | /* MEM points to BPF ring buffer reservation. */ |
639 | MEM_RINGBUF = BIT(2 + BPF_BASE_TYPE_BITS), | |
a672b2e3 | 640 | |
c6f1bfe8 YS |
641 | /* MEM is in user address space. */ |
642 | MEM_USER = BIT(3 + BPF_BASE_TYPE_BITS), | |
643 | ||
5844101a HL |
644 | /* MEM is a percpu memory. MEM_PERCPU tags PTR_TO_BTF_ID. When tagged |
645 | * with MEM_PERCPU, PTR_TO_BTF_ID _cannot_ be directly accessed. In | |
646 | * order to drop this tag, it must be passed into bpf_per_cpu_ptr() | |
647 | * or bpf_this_cpu_ptr(), which will return the pointer corresponding | |
648 | * to the specified cpu. | |
649 | */ | |
650 | MEM_PERCPU = BIT(4 + BPF_BASE_TYPE_BITS), | |
651 | ||
8f14852e KKD |
652 | /* Indicates that the argument will be released. */ |
653 | OBJ_RELEASE = BIT(5 + BPF_BASE_TYPE_BITS), | |
654 | ||
6efe152d KKD |
655 | /* PTR is not trusted. This is only used with PTR_TO_BTF_ID, to mark |
656 | * unreferenced and referenced kptr loaded from map value using a load | |
657 | * instruction, so that they can only be dereferenced but not escape the | |
658 | * BPF program into the kernel (i.e. cannot be passed as arguments to | |
659 | * kfunc or bpf helpers). | |
660 | */ | |
661 | PTR_UNTRUSTED = BIT(6 + BPF_BASE_TYPE_BITS), | |
662 | ||
6fad274f | 663 | /* MEM can be uninitialized. */ |
16d1e00c JK |
664 | MEM_UNINIT = BIT(7 + BPF_BASE_TYPE_BITS), |
665 | ||
97e03f52 JK |
666 | /* DYNPTR points to memory local to the bpf program. */ |
667 | DYNPTR_TYPE_LOCAL = BIT(8 + BPF_BASE_TYPE_BITS), | |
668 | ||
20571567 | 669 | /* DYNPTR points to a kernel-produced ringbuf record. */ |
bc34dee6 JK |
670 | DYNPTR_TYPE_RINGBUF = BIT(9 + BPF_BASE_TYPE_BITS), |
671 | ||
508362ac MM |
672 | /* Size is known at compile time. */ |
673 | MEM_FIXED_SIZE = BIT(10 + BPF_BASE_TYPE_BITS), | |
674 | ||
282de143 KKD |
675 | /* MEM is of an allocated object of type in program BTF. This is used to |
676 | * tag PTR_TO_BTF_ID allocated using bpf_obj_new. | |
677 | */ | |
678 | MEM_ALLOC = BIT(11 + BPF_BASE_TYPE_BITS), | |
679 | ||
3f00c523 DV |
680 | /* PTR was passed from the kernel in a trusted context, and may be |
681 | * passed to KF_TRUSTED_ARGS kfuncs or BPF helper functions. | |
682 | * Confusingly, this is _not_ the opposite of PTR_UNTRUSTED above. | |
683 | * PTR_UNTRUSTED refers to a kptr that was read directly from a map | |
684 | * without invoking bpf_kptr_xchg(). What we really need to know is | |
685 | * whether a pointer is safe to pass to a kfunc or BPF helper function. | |
686 | * While PTR_UNTRUSTED pointers are unsafe to pass to kfuncs and BPF | |
687 | * helpers, they do not cover all possible instances of unsafe | |
688 | * pointers. For example, a pointer that was obtained from walking a | |
689 | * struct will _not_ get the PTR_UNTRUSTED type modifier, despite the | |
690 | * fact that it may be NULL, invalid, etc. This is due to backwards | |
691 | * compatibility requirements, as this was the behavior that was first | |
692 | * introduced when kptrs were added. The behavior is now considered | |
693 | * deprecated, and PTR_UNTRUSTED will eventually be removed. | |
694 | * | |
695 | * PTR_TRUSTED, on the other hand, is a pointer that the kernel | |
696 | * guarantees to be valid and safe to pass to kfuncs and BPF helpers. | |
697 | * For example, pointers passed to tracepoint arguments are considered | |
698 | * PTR_TRUSTED, as are pointers that are passed to struct_ops | |
699 | * callbacks. As alluded to above, pointers that are obtained from | |
700 | * walking PTR_TRUSTED pointers are _not_ trusted. For example, if a | |
701 | * struct task_struct *task is PTR_TRUSTED, then accessing | |
702 | * task->last_wakee will lose the PTR_TRUSTED modifier when it's stored | |
703 | * in a BPF register. Similarly, pointers passed to certain programs | |
704 | * types such as kretprobes are not guaranteed to be valid, as they may | |
705 | * for example contain an object that was recently freed. | |
706 | */ | |
707 | PTR_TRUSTED = BIT(12 + BPF_BASE_TYPE_BITS), | |
708 | ||
9bb00b28 YS |
709 | /* MEM is tagged with rcu and memory access needs rcu_read_lock protection. */ |
710 | MEM_RCU = BIT(13 + BPF_BASE_TYPE_BITS), | |
711 | ||
6a3cd331 | 712 | /* Used to tag PTR_TO_BTF_ID | MEM_ALLOC references which are non-owning. |
0816b8c6 DM |
713 | * Currently only valid for linked-list and rbtree nodes. If the nodes |
714 | * have a bpf_refcount_field, they must be tagged MEM_RCU as well. | |
6a3cd331 DM |
715 | */ |
716 | NON_OWN_REF = BIT(14 + BPF_BASE_TYPE_BITS), | |
717 | ||
b5964b96 JK |
718 | /* DYNPTR points to sk_buff */ |
719 | DYNPTR_TYPE_SKB = BIT(15 + BPF_BASE_TYPE_BITS), | |
720 | ||
05421aec JK |
721 | /* DYNPTR points to xdp_buff */ |
722 | DYNPTR_TYPE_XDP = BIT(16 + BPF_BASE_TYPE_BITS), | |
723 | ||
32556ce9 DB |
724 | /* Memory must be aligned on some architectures, used in combination with |
725 | * MEM_FIXED_SIZE. | |
726 | */ | |
727 | MEM_ALIGNED = BIT(17 + BPF_BASE_TYPE_BITS), | |
728 | ||
6fad274f DB |
729 | /* MEM is being written to, often combined with MEM_UNINIT. Non-presence |
730 | * of MEM_WRITE means that MEM is only being read. MEM_WRITE without the | |
731 | * MEM_UNINIT means that memory needs to be initialized since it is also | |
732 | * read. | |
733 | */ | |
734 | MEM_WRITE = BIT(18 + BPF_BASE_TYPE_BITS), | |
735 | ||
16d1e00c JK |
736 | __BPF_TYPE_FLAG_MAX, |
737 | __BPF_TYPE_LAST_FLAG = __BPF_TYPE_FLAG_MAX - 1, | |
d639b9d1 HL |
738 | }; |
739 | ||
05421aec JK |
740 | #define DYNPTR_TYPE_FLAG_MASK (DYNPTR_TYPE_LOCAL | DYNPTR_TYPE_RINGBUF | DYNPTR_TYPE_SKB \ |
741 | | DYNPTR_TYPE_XDP) | |
97e03f52 | 742 | |
d639b9d1 HL |
743 | /* Max number of base types. */ |
744 | #define BPF_BASE_TYPE_LIMIT (1UL << BPF_BASE_TYPE_BITS) | |
745 | ||
746 | /* Max number of all types. */ | |
747 | #define BPF_TYPE_LIMIT (__BPF_TYPE_LAST_FLAG | (__BPF_TYPE_LAST_FLAG - 1)) | |
748 | ||
17a52670 AS |
749 | /* function argument constraints */ |
750 | enum bpf_arg_type { | |
80f1d68c | 751 | ARG_DONTCARE = 0, /* unused argument in helper function */ |
17a52670 AS |
752 | |
753 | /* the following constraints used to prototype | |
754 | * bpf_map_lookup/update/delete_elem() functions | |
755 | */ | |
756 | ARG_CONST_MAP_PTR, /* const argument used as pointer to bpf_map */ | |
757 | ARG_PTR_TO_MAP_KEY, /* pointer to stack used as map key */ | |
758 | ARG_PTR_TO_MAP_VALUE, /* pointer to stack used as map value */ | |
759 | ||
16d1e00c JK |
760 | /* Used to prototype bpf_memcmp() and other functions that access data |
761 | * on eBPF program stack | |
17a52670 | 762 | */ |
39f19ebb | 763 | ARG_PTR_TO_MEM, /* pointer to valid memory (stack, packet, map value) */ |
2edc3de6 | 764 | ARG_PTR_TO_ARENA, |
435faee1 | 765 | |
39f19ebb AS |
766 | ARG_CONST_SIZE, /* number of bytes accessed from memory */ |
767 | ARG_CONST_SIZE_OR_ZERO, /* number of bytes accessed from memory or 0 */ | |
80f1d68c | 768 | |
608cd71a | 769 | ARG_PTR_TO_CTX, /* pointer to context */ |
80f1d68c | 770 | ARG_ANYTHING, /* any (initialized) argument is ok */ |
d83525ca | 771 | ARG_PTR_TO_SPIN_LOCK, /* pointer to bpf_spin_lock */ |
46f8bc92 | 772 | ARG_PTR_TO_SOCK_COMMON, /* pointer to sock_common */ |
6ac99e8f | 773 | ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */ |
a7658e1a | 774 | ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */ |
894f2a8b | 775 | ARG_PTR_TO_RINGBUF_MEM, /* pointer to dynamically reserved ringbuf memory */ |
457f4436 | 776 | ARG_CONST_ALLOC_SIZE_OR_ZERO, /* number of allocated bytes requested */ |
1df8f55a | 777 | ARG_PTR_TO_BTF_ID_SOCK_COMMON, /* pointer to in-kernel sock_common or bpf-mirrored bpf_sock */ |
eaa6bcb7 | 778 | ARG_PTR_TO_PERCPU_BTF_ID, /* pointer to in-kernel percpu type */ |
69c087ba | 779 | ARG_PTR_TO_FUNC, /* pointer to a bpf program function */ |
48946bd6 | 780 | ARG_PTR_TO_STACK, /* pointer to stack */ |
fff13c4b | 781 | ARG_PTR_TO_CONST_STR, /* pointer to a null terminated read-only string */ |
b00628b1 | 782 | ARG_PTR_TO_TIMER, /* pointer to bpf_timer */ |
d59232af | 783 | ARG_KPTR_XCHG_DEST, /* pointer to destination that kptrs are bpf_kptr_xchg'd into */ |
97e03f52 | 784 | ARG_PTR_TO_DYNPTR, /* pointer to bpf_dynptr. See bpf_type_flag for dynptr type */ |
f79e7ea5 | 785 | __BPF_ARG_TYPE_MAX, |
d639b9d1 | 786 | |
48946bd6 HL |
787 | /* Extended arg_types. */ |
788 | ARG_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MAP_VALUE, | |
789 | ARG_PTR_TO_MEM_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_MEM, | |
790 | ARG_PTR_TO_CTX_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_CTX, | |
791 | ARG_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_SOCKET, | |
48946bd6 | 792 | ARG_PTR_TO_STACK_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_STACK, |
c0a5a21c | 793 | ARG_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | ARG_PTR_TO_BTF_ID, |
6fad274f DB |
794 | /* Pointer to memory does not need to be initialized, since helper function |
795 | * fills all bytes or clears them in error case. | |
16d1e00c | 796 | */ |
6fad274f | 797 | ARG_PTR_TO_UNINIT_MEM = MEM_UNINIT | MEM_WRITE | ARG_PTR_TO_MEM, |
508362ac MM |
798 | /* Pointer to valid memory of size known at compile time. */ |
799 | ARG_PTR_TO_FIXED_SIZE_MEM = MEM_FIXED_SIZE | ARG_PTR_TO_MEM, | |
48946bd6 | 800 | |
d639b9d1 HL |
801 | /* This must be the last entry. Its purpose is to ensure the enum is |
802 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
803 | */ | |
804 | __BPF_ARG_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
17a52670 | 805 | }; |
d639b9d1 | 806 | static_assert(__BPF_ARG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
17a52670 AS |
807 | |
808 | /* type of values returned from helper functions */ | |
809 | enum bpf_return_type { | |
810 | RET_INTEGER, /* function returns integer */ | |
811 | RET_VOID, /* function doesn't return anything */ | |
3e6a4b3e | 812 | RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */ |
3c480732 HL |
813 | RET_PTR_TO_SOCKET, /* returns a pointer to a socket */ |
814 | RET_PTR_TO_TCP_SOCK, /* returns a pointer to a tcp_sock */ | |
815 | RET_PTR_TO_SOCK_COMMON, /* returns a pointer to a sock_common */ | |
2de2669b | 816 | RET_PTR_TO_MEM, /* returns a pointer to memory */ |
63d9b80d | 817 | RET_PTR_TO_MEM_OR_BTF_ID, /* returns a pointer to a valid memory or a btf_id */ |
3ca1032a | 818 | RET_PTR_TO_BTF_ID, /* returns a pointer to a btf_id */ |
d639b9d1 HL |
819 | __BPF_RET_TYPE_MAX, |
820 | ||
3c480732 HL |
821 | /* Extended ret_types. */ |
822 | RET_PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MAP_VALUE, | |
823 | RET_PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCKET, | |
824 | RET_PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_TCP_SOCK, | |
825 | RET_PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_SOCK_COMMON, | |
894f2a8b | 826 | RET_PTR_TO_RINGBUF_MEM_OR_NULL = PTR_MAYBE_NULL | MEM_RINGBUF | RET_PTR_TO_MEM, |
2de2669b | 827 | RET_PTR_TO_DYNPTR_MEM_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_MEM, |
3c480732 | 828 | RET_PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | RET_PTR_TO_BTF_ID, |
3f00c523 | 829 | RET_PTR_TO_BTF_ID_TRUSTED = PTR_TRUSTED | RET_PTR_TO_BTF_ID, |
3c480732 | 830 | |
d639b9d1 HL |
831 | /* This must be the last entry. Its purpose is to ensure the enum is |
832 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
833 | */ | |
834 | __BPF_RET_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
17a52670 | 835 | }; |
d639b9d1 | 836 | static_assert(__BPF_RET_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
17a52670 | 837 | |
09756af4 AS |
838 | /* eBPF function prototype used by verifier to allow BPF_CALLs from eBPF programs |
839 | * to in-kernel helper functions and for adjusting imm32 field in BPF_CALL | |
840 | * instructions after verifying | |
841 | */ | |
842 | struct bpf_func_proto { | |
843 | u64 (*func)(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |
844 | bool gpl_only; | |
36bbef52 | 845 | bool pkt_access; |
01685c5b | 846 | bool might_sleep; |
ae010757 EZ |
847 | /* set to true if helper follows contract for llvm |
848 | * attribute bpf_fastcall: | |
5b5f51bf EZ |
849 | * - void functions do not scratch r0 |
850 | * - functions taking N arguments scratch only registers r1-rN | |
851 | */ | |
ae010757 | 852 | bool allow_fastcall; |
17a52670 | 853 | enum bpf_return_type ret_type; |
a7658e1a AS |
854 | union { |
855 | struct { | |
856 | enum bpf_arg_type arg1_type; | |
857 | enum bpf_arg_type arg2_type; | |
858 | enum bpf_arg_type arg3_type; | |
859 | enum bpf_arg_type arg4_type; | |
860 | enum bpf_arg_type arg5_type; | |
861 | }; | |
862 | enum bpf_arg_type arg_type[5]; | |
863 | }; | |
9436ef6e LB |
864 | union { |
865 | struct { | |
866 | u32 *arg1_btf_id; | |
867 | u32 *arg2_btf_id; | |
868 | u32 *arg3_btf_id; | |
869 | u32 *arg4_btf_id; | |
870 | u32 *arg5_btf_id; | |
871 | }; | |
872 | u32 *arg_btf_id[5]; | |
508362ac MM |
873 | struct { |
874 | size_t arg1_size; | |
875 | size_t arg2_size; | |
876 | size_t arg3_size; | |
877 | size_t arg4_size; | |
878 | size_t arg5_size; | |
879 | }; | |
880 | size_t arg_size[5]; | |
9436ef6e | 881 | }; |
af7ec138 | 882 | int *ret_btf_id; /* return value btf_id */ |
eae2e83e | 883 | bool (*allowed)(const struct bpf_prog *prog); |
17a52670 AS |
884 | }; |
885 | ||
886 | /* bpf_context is intentionally undefined structure. Pointer to bpf_context is | |
887 | * the first argument to eBPF programs. | |
888 | * For socket filters: 'struct bpf_context *' == 'struct sk_buff *' | |
889 | */ | |
890 | struct bpf_context; | |
891 | ||
892 | enum bpf_access_type { | |
893 | BPF_READ = 1, | |
894 | BPF_WRITE = 2 | |
09756af4 AS |
895 | }; |
896 | ||
19de99f7 | 897 | /* types of values stored in eBPF registers */ |
f1174f77 EC |
898 | /* Pointer types represent: |
899 | * pointer | |
900 | * pointer + imm | |
901 | * pointer + (u16) var | |
902 | * pointer + (u16) var + imm | |
903 | * if (range > 0) then [ptr, ptr + range - off) is safe to access | |
904 | * if (id > 0) means that some 'var' was added | |
905 | * if (off > 0) means that 'imm' was added | |
906 | */ | |
19de99f7 AS |
907 | enum bpf_reg_type { |
908 | NOT_INIT = 0, /* nothing was written into register */ | |
f1174f77 | 909 | SCALAR_VALUE, /* reg doesn't contain a valid pointer */ |
19de99f7 AS |
910 | PTR_TO_CTX, /* reg points to bpf_context */ |
911 | CONST_PTR_TO_MAP, /* reg points to struct bpf_map */ | |
912 | PTR_TO_MAP_VALUE, /* reg points to map element value */ | |
c25b2ae1 | 913 | PTR_TO_MAP_KEY, /* reg points to a map element key */ |
f1174f77 | 914 | PTR_TO_STACK, /* reg == frame_pointer + offset */ |
de8f3a83 | 915 | PTR_TO_PACKET_META, /* skb->data - meta_len */ |
f1174f77 | 916 | PTR_TO_PACKET, /* reg points to skb->data */ |
19de99f7 | 917 | PTR_TO_PACKET_END, /* skb->data + headlen */ |
d58e468b | 918 | PTR_TO_FLOW_KEYS, /* reg points to bpf_flow_keys */ |
c64b7983 | 919 | PTR_TO_SOCKET, /* reg points to struct bpf_sock */ |
46f8bc92 | 920 | PTR_TO_SOCK_COMMON, /* reg points to sock_common */ |
655a51e5 | 921 | PTR_TO_TCP_SOCK, /* reg points to struct tcp_sock */ |
9df1c28b | 922 | PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */ |
fada7fdc | 923 | PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */ |
ba5f4cfe JF |
924 | /* PTR_TO_BTF_ID points to a kernel struct that does not need |
925 | * to be null checked by the BPF program. This does not imply the | |
926 | * pointer is _not_ null and in practice this can easily be a null | |
927 | * pointer when reading pointer chains. The assumption is program | |
928 | * context will handle null pointer dereference typically via fault | |
929 | * handling. The verifier must keep this in mind and can make no | |
930 | * assumptions about null or non-null when doing branch analysis. | |
931 | * Further, when passed into helpers the helpers can not, without | |
932 | * additional context, assume the value is non-null. | |
933 | */ | |
934 | PTR_TO_BTF_ID, | |
457f4436 | 935 | PTR_TO_MEM, /* reg points to valid memory region */ |
6082b6c3 | 936 | PTR_TO_ARENA, |
20b2aff4 | 937 | PTR_TO_BUF, /* reg points to a read/write buffer */ |
69c087ba | 938 | PTR_TO_FUNC, /* reg points to a bpf program function */ |
27060531 | 939 | CONST_PTR_TO_DYNPTR, /* reg points to a const struct bpf_dynptr */ |
e6ac2450 | 940 | __BPF_REG_TYPE_MAX, |
d639b9d1 | 941 | |
c25b2ae1 HL |
942 | /* Extended reg_types. */ |
943 | PTR_TO_MAP_VALUE_OR_NULL = PTR_MAYBE_NULL | PTR_TO_MAP_VALUE, | |
944 | PTR_TO_SOCKET_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCKET, | |
945 | PTR_TO_SOCK_COMMON_OR_NULL = PTR_MAYBE_NULL | PTR_TO_SOCK_COMMON, | |
946 | PTR_TO_TCP_SOCK_OR_NULL = PTR_MAYBE_NULL | PTR_TO_TCP_SOCK, | |
213a6952 MD |
947 | /* PTR_TO_BTF_ID_OR_NULL points to a kernel struct that has not |
948 | * been checked for null. Used primarily to inform the verifier | |
949 | * an explicit null check is required for this struct. | |
950 | */ | |
c25b2ae1 | 951 | PTR_TO_BTF_ID_OR_NULL = PTR_MAYBE_NULL | PTR_TO_BTF_ID, |
c25b2ae1 | 952 | |
d639b9d1 HL |
953 | /* This must be the last entry. Its purpose is to ensure the enum is |
954 | * wide enough to hold the higher bits reserved for bpf_type_flag. | |
955 | */ | |
956 | __BPF_REG_TYPE_LIMIT = BPF_TYPE_LIMIT, | |
19de99f7 | 957 | }; |
d639b9d1 | 958 | static_assert(__BPF_REG_TYPE_MAX <= BPF_BASE_TYPE_LIMIT); |
19de99f7 | 959 | |
23994631 YS |
960 | /* The information passed from prog-specific *_is_valid_access |
961 | * back to the verifier. | |
962 | */ | |
963 | struct bpf_insn_access_aux { | |
964 | enum bpf_reg_type reg_type; | |
92de3608 | 965 | bool is_ldsx; |
9e15db66 AS |
966 | union { |
967 | int ctx_field_size; | |
22dc4a0f AN |
968 | struct { |
969 | struct btf *btf; | |
970 | u32 btf_id; | |
971 | }; | |
9e15db66 AS |
972 | }; |
973 | struct bpf_verifier_log *log; /* for verbose logs */ | |
5d99e198 | 974 | bool is_retval; /* is accessing function return value ? */ |
23994631 YS |
975 | }; |
976 | ||
f96da094 DB |
977 | static inline void |
978 | bpf_ctx_record_field_size(struct bpf_insn_access_aux *aux, u32 size) | |
979 | { | |
980 | aux->ctx_field_size = size; | |
981 | } | |
982 | ||
3feb263b AN |
983 | static bool bpf_is_ldimm64(const struct bpf_insn *insn) |
984 | { | |
985 | return insn->code == (BPF_LD | BPF_IMM | BPF_DW); | |
986 | } | |
987 | ||
3990ed4c MKL |
988 | static inline bool bpf_pseudo_func(const struct bpf_insn *insn) |
989 | { | |
3feb263b | 990 | return bpf_is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC; |
3990ed4c MKL |
991 | } |
992 | ||
7de16e3a JK |
993 | struct bpf_prog_ops { |
994 | int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr, | |
995 | union bpf_attr __user *uattr); | |
996 | }; | |
997 | ||
6728aea7 | 998 | struct bpf_reg_state; |
09756af4 AS |
999 | struct bpf_verifier_ops { |
1000 | /* return eBPF function prototype for verification */ | |
5e43f899 AI |
1001 | const struct bpf_func_proto * |
1002 | (*get_func_proto)(enum bpf_func_id func_id, | |
1003 | const struct bpf_prog *prog); | |
17a52670 AS |
1004 | |
1005 | /* return true if 'size' wide access at offset 'off' within bpf_context | |
1006 | * with 'type' (read or write) is allowed | |
1007 | */ | |
19de99f7 | 1008 | bool (*is_valid_access)(int off, int size, enum bpf_access_type type, |
5e43f899 | 1009 | const struct bpf_prog *prog, |
23994631 | 1010 | struct bpf_insn_access_aux *info); |
36bbef52 DB |
1011 | int (*gen_prologue)(struct bpf_insn *insn, bool direct_write, |
1012 | const struct bpf_prog *prog); | |
169c3176 MKL |
1013 | int (*gen_epilogue)(struct bpf_insn *insn, const struct bpf_prog *prog, |
1014 | s16 ctx_stack_off); | |
e0cea7ce DB |
1015 | int (*gen_ld_abs)(const struct bpf_insn *orig, |
1016 | struct bpf_insn *insn_buf); | |
6b8cc1d1 DB |
1017 | u32 (*convert_ctx_access)(enum bpf_access_type type, |
1018 | const struct bpf_insn *src, | |
1019 | struct bpf_insn *dst, | |
f96da094 | 1020 | struct bpf_prog *prog, u32 *target_size); |
27ae7997 | 1021 | int (*btf_struct_access)(struct bpf_verifier_log *log, |
6728aea7 | 1022 | const struct bpf_reg_state *reg, |
b7e852a9 | 1023 | int off, int size); |
09756af4 AS |
1024 | }; |
1025 | ||
cae1927c | 1026 | struct bpf_prog_offload_ops { |
08ca90af | 1027 | /* verifier basic callbacks */ |
cae1927c JK |
1028 | int (*insn_hook)(struct bpf_verifier_env *env, |
1029 | int insn_idx, int prev_insn_idx); | |
c941ce9c | 1030 | int (*finalize)(struct bpf_verifier_env *env); |
08ca90af JK |
1031 | /* verifier optimization callbacks (called after .finalize) */ |
1032 | int (*replace_insn)(struct bpf_verifier_env *env, u32 off, | |
1033 | struct bpf_insn *insn); | |
1034 | int (*remove_insns)(struct bpf_verifier_env *env, u32 off, u32 cnt); | |
1035 | /* program management callbacks */ | |
16a8cb5c QM |
1036 | int (*prepare)(struct bpf_prog *prog); |
1037 | int (*translate)(struct bpf_prog *prog); | |
eb911947 | 1038 | void (*destroy)(struct bpf_prog *prog); |
cae1927c JK |
1039 | }; |
1040 | ||
0a9c1991 | 1041 | struct bpf_prog_offload { |
ab3f0063 JK |
1042 | struct bpf_prog *prog; |
1043 | struct net_device *netdev; | |
341b3e7b | 1044 | struct bpf_offload_dev *offdev; |
ab3f0063 JK |
1045 | void *dev_priv; |
1046 | struct list_head offloads; | |
1047 | bool dev_state; | |
08ca90af | 1048 | bool opt_failed; |
fcfb126d JW |
1049 | void *jited_image; |
1050 | u32 jited_len; | |
ab3f0063 JK |
1051 | }; |
1052 | ||
8bad74f9 RG |
1053 | enum bpf_cgroup_storage_type { |
1054 | BPF_CGROUP_STORAGE_SHARED, | |
b741f163 | 1055 | BPF_CGROUP_STORAGE_PERCPU, |
8bad74f9 RG |
1056 | __BPF_CGROUP_STORAGE_MAX |
1057 | }; | |
1058 | ||
1059 | #define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX | |
1060 | ||
f1b9509c AS |
1061 | /* The longest tracepoint has 12 args. |
1062 | * See include/trace/bpf_probe.h | |
1063 | */ | |
1064 | #define MAX_BPF_FUNC_ARGS 12 | |
1065 | ||
523a4cf4 DB |
1066 | /* The maximum number of arguments passed through registers |
1067 | * a single function may have. | |
1068 | */ | |
1069 | #define MAX_BPF_FUNC_REG_ARGS 5 | |
1070 | ||
720e6a43 YS |
1071 | /* The argument is a structure. */ |
1072 | #define BTF_FMODEL_STRUCT_ARG BIT(0) | |
1073 | ||
49f67f39 IL |
1074 | /* The argument is signed. */ |
1075 | #define BTF_FMODEL_SIGNED_ARG BIT(1) | |
1076 | ||
fec56f58 AS |
1077 | struct btf_func_model { |
1078 | u8 ret_size; | |
49f67f39 | 1079 | u8 ret_flags; |
fec56f58 AS |
1080 | u8 nr_args; |
1081 | u8 arg_size[MAX_BPF_FUNC_ARGS]; | |
720e6a43 | 1082 | u8 arg_flags[MAX_BPF_FUNC_ARGS]; |
fec56f58 AS |
1083 | }; |
1084 | ||
1085 | /* Restore arguments before returning from trampoline to let original function | |
1086 | * continue executing. This flag is used for fentry progs when there are no | |
1087 | * fexit progs. | |
1088 | */ | |
1089 | #define BPF_TRAMP_F_RESTORE_REGS BIT(0) | |
1090 | /* Call original function after fentry progs, but before fexit progs. | |
1091 | * Makes sense for fentry/fexit, normal calls and indirect calls. | |
1092 | */ | |
1093 | #define BPF_TRAMP_F_CALL_ORIG BIT(1) | |
1094 | /* Skip current frame and return to parent. Makes sense for fentry/fexit | |
1095 | * programs only. Should not be used with normal calls and indirect calls. | |
1096 | */ | |
1097 | #define BPF_TRAMP_F_SKIP_FRAME BIT(2) | |
7e6f3cd8 JO |
1098 | /* Store IP address of the caller on the trampoline stack, |
1099 | * so it's available for trampoline's programs. | |
1100 | */ | |
1101 | #define BPF_TRAMP_F_IP_ARG BIT(3) | |
356ed649 HT |
1102 | /* Return the return value of fentry prog. Only used by bpf_struct_ops. */ |
1103 | #define BPF_TRAMP_F_RET_FENTRY_RET BIT(4) | |
7e6f3cd8 | 1104 | |
316cba62 JO |
1105 | /* Get original function from stack instead of from provided direct address. |
1106 | * Makes sense for trampolines with fexit or fmod_ret programs. | |
1107 | */ | |
1108 | #define BPF_TRAMP_F_ORIG_STACK BIT(5) | |
1109 | ||
00963a2e SL |
1110 | /* This trampoline is on a function with another ftrace_ops with IPMODIFY, |
1111 | * e.g., a live patch. This flag is set and cleared by ftrace call backs, | |
1112 | */ | |
1113 | #define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6) | |
1114 | ||
2b5dcb31 LH |
1115 | /* Indicate that current trampoline is in a tail call context. Then, it has to |
1116 | * cache and restore tail_call_cnt to avoid infinite tail call loop. | |
1117 | */ | |
1118 | #define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7) | |
1119 | ||
2cd3e377 PZ |
1120 | /* |
1121 | * Indicate the trampoline should be suitable to receive indirect calls; | |
1122 | * without this indirectly calling the generated code can result in #UD/#CP, | |
1123 | * depending on the CFI options. | |
1124 | * | |
1125 | * Used by bpf_struct_ops. | |
1126 | * | |
1127 | * Incompatible with FENTRY usage, overloads @func_addr argument. | |
1128 | */ | |
1129 | #define BPF_TRAMP_F_INDIRECT BIT(8) | |
1130 | ||
88fd9e53 | 1131 | /* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50 |
b23316aa | 1132 | * bytes on x86. |
88fd9e53 | 1133 | */ |
390a07a9 | 1134 | enum { |
528eb2cb IL |
1135 | #if defined(__s390x__) |
1136 | BPF_MAX_TRAMP_LINKS = 27, | |
1137 | #else | |
390a07a9 | 1138 | BPF_MAX_TRAMP_LINKS = 38, |
528eb2cb | 1139 | #endif |
390a07a9 | 1140 | }; |
88fd9e53 | 1141 | |
f7e0beaf KFL |
1142 | struct bpf_tramp_links { |
1143 | struct bpf_tramp_link *links[BPF_MAX_TRAMP_LINKS]; | |
1144 | int nr_links; | |
88fd9e53 KS |
1145 | }; |
1146 | ||
e384c7b7 KFL |
1147 | struct bpf_tramp_run_ctx; |
1148 | ||
fec56f58 AS |
1149 | /* Different use cases for BPF trampoline: |
1150 | * 1. replace nop at the function entry (kprobe equivalent) | |
1151 | * flags = BPF_TRAMP_F_RESTORE_REGS | |
1152 | * fentry = a set of programs to run before returning from trampoline | |
1153 | * | |
1154 | * 2. replace nop at the function entry (kprobe + kretprobe equivalent) | |
1155 | * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME | |
1156 | * orig_call = fentry_ip + MCOUNT_INSN_SIZE | |
1157 | * fentry = a set of program to run before calling original function | |
1158 | * fexit = a set of program to run after original function | |
1159 | * | |
1160 | * 3. replace direct call instruction anywhere in the function body | |
1161 | * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid) | |
1162 | * With flags = 0 | |
1163 | * fentry = a set of programs to run before returning from trampoline | |
1164 | * With flags = BPF_TRAMP_F_CALL_ORIG | |
1165 | * orig_call = original callback addr or direct function addr | |
1166 | * fentry = a set of program to run before calling original function | |
1167 | * fexit = a set of program to run after original function | |
1168 | */ | |
e21aa341 | 1169 | struct bpf_tramp_image; |
7a3d9a15 | 1170 | int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end, |
85d33df3 | 1171 | const struct btf_func_model *m, u32 flags, |
f7e0beaf | 1172 | struct bpf_tramp_links *tlinks, |
7a3d9a15 | 1173 | void *func_addr); |
82583daa SL |
1174 | void *arch_alloc_bpf_trampoline(unsigned int size); |
1175 | void arch_free_bpf_trampoline(void *image, unsigned int size); | |
c733239f | 1176 | int __must_check arch_protect_bpf_trampoline(void *image, unsigned int size); |
96d1b7c0 SL |
1177 | int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, |
1178 | struct bpf_tramp_links *tlinks, void *func_addr); | |
82583daa | 1179 | |
271de525 MKL |
1180 | u64 notrace __bpf_prog_enter_sleepable_recur(struct bpf_prog *prog, |
1181 | struct bpf_tramp_run_ctx *run_ctx); | |
1182 | void notrace __bpf_prog_exit_sleepable_recur(struct bpf_prog *prog, u64 start, | |
1183 | struct bpf_tramp_run_ctx *run_ctx); | |
e21aa341 AS |
1184 | void notrace __bpf_tramp_enter(struct bpf_tramp_image *tr); |
1185 | void notrace __bpf_tramp_exit(struct bpf_tramp_image *tr); | |
271de525 MKL |
1186 | typedef u64 (*bpf_trampoline_enter_t)(struct bpf_prog *prog, |
1187 | struct bpf_tramp_run_ctx *run_ctx); | |
1188 | typedef void (*bpf_trampoline_exit_t)(struct bpf_prog *prog, u64 start, | |
1189 | struct bpf_tramp_run_ctx *run_ctx); | |
1190 | bpf_trampoline_enter_t bpf_trampoline_enter(const struct bpf_prog *prog); | |
1191 | bpf_trampoline_exit_t bpf_trampoline_exit(const struct bpf_prog *prog); | |
fec56f58 | 1192 | |
535911c8 JO |
1193 | struct bpf_ksym { |
1194 | unsigned long start; | |
1195 | unsigned long end; | |
bfea9a85 | 1196 | char name[KSYM_NAME_LEN]; |
ecb60d1c | 1197 | struct list_head lnode; |
ca4424c9 | 1198 | struct latch_tree_node tnode; |
cbd76f8d | 1199 | bool prog; |
535911c8 JO |
1200 | }; |
1201 | ||
fec56f58 AS |
1202 | enum bpf_tramp_prog_type { |
1203 | BPF_TRAMP_FENTRY, | |
1204 | BPF_TRAMP_FEXIT, | |
ae240823 | 1205 | BPF_TRAMP_MODIFY_RETURN, |
be8704ff AS |
1206 | BPF_TRAMP_MAX, |
1207 | BPF_TRAMP_REPLACE, /* more than MAX */ | |
fec56f58 AS |
1208 | }; |
1209 | ||
e21aa341 AS |
1210 | struct bpf_tramp_image { |
1211 | void *image; | |
26ef208c | 1212 | int size; |
e21aa341 AS |
1213 | struct bpf_ksym ksym; |
1214 | struct percpu_ref pcref; | |
1215 | void *ip_after_call; | |
1216 | void *ip_epilogue; | |
1217 | union { | |
1218 | struct rcu_head rcu; | |
1219 | struct work_struct work; | |
1220 | }; | |
1221 | }; | |
1222 | ||
fec56f58 AS |
1223 | struct bpf_trampoline { |
1224 | /* hlist for trampoline_table */ | |
1225 | struct hlist_node hlist; | |
00963a2e | 1226 | struct ftrace_ops *fops; |
fec56f58 AS |
1227 | /* serializes access to fields of this trampoline */ |
1228 | struct mutex mutex; | |
1229 | refcount_t refcnt; | |
00963a2e | 1230 | u32 flags; |
fec56f58 AS |
1231 | u64 key; |
1232 | struct { | |
1233 | struct btf_func_model model; | |
1234 | void *addr; | |
b91e014f | 1235 | bool ftrace_managed; |
fec56f58 | 1236 | } func; |
be8704ff AS |
1237 | /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF |
1238 | * program by replacing one of its functions. func.addr is the address | |
1239 | * of the function it replaced. | |
1240 | */ | |
1241 | struct bpf_prog *extension_prog; | |
fec56f58 AS |
1242 | /* list of BPF programs using this trampoline */ |
1243 | struct hlist_head progs_hlist[BPF_TRAMP_MAX]; | |
1244 | /* Number of attached programs. A counter per kind. */ | |
1245 | int progs_cnt[BPF_TRAMP_MAX]; | |
1246 | /* Executable image of trampoline */ | |
e21aa341 | 1247 | struct bpf_tramp_image *cur_image; |
fec56f58 | 1248 | }; |
75ccbef6 | 1249 | |
f7b12b6f THJ |
1250 | struct bpf_attach_target_info { |
1251 | struct btf_func_model fmodel; | |
1252 | long tgt_addr; | |
31bf1dbc | 1253 | struct module *tgt_mod; |
f7b12b6f THJ |
1254 | const char *tgt_name; |
1255 | const struct btf_type *tgt_type; | |
1256 | }; | |
1257 | ||
116eb788 | 1258 | #define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */ |
75ccbef6 BT |
1259 | |
1260 | struct bpf_dispatcher_prog { | |
1261 | struct bpf_prog *prog; | |
1262 | refcount_t users; | |
1263 | }; | |
1264 | ||
1265 | struct bpf_dispatcher { | |
1266 | /* dispatcher mutex */ | |
1267 | struct mutex mutex; | |
1268 | void *func; | |
1269 | struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX]; | |
1270 | int num_progs; | |
1271 | void *image; | |
19c02415 | 1272 | void *rw_image; |
75ccbef6 | 1273 | u32 image_off; |
517b75e4 | 1274 | struct bpf_ksym ksym; |
c86df29d PZ |
1275 | #ifdef CONFIG_HAVE_STATIC_CALL |
1276 | struct static_call_key *sc_key; | |
1277 | void *sc_tramp; | |
1278 | #endif | |
75ccbef6 BT |
1279 | }; |
1280 | ||
4f9087f1 PZ |
1281 | #ifndef __bpfcall |
1282 | #define __bpfcall __nocfi | |
1283 | #endif | |
1284 | ||
1285 | static __always_inline __bpfcall unsigned int bpf_dispatcher_nop_func( | |
7e6897f9 BT |
1286 | const void *ctx, |
1287 | const struct bpf_insn *insnsi, | |
af3f4134 | 1288 | bpf_func_t bpf_func) |
7e6897f9 BT |
1289 | { |
1290 | return bpf_func(ctx, insnsi); | |
1291 | } | |
f7e0beaf | 1292 | |
8357b366 JK |
1293 | /* the implementation of the opaque uapi struct bpf_dynptr */ |
1294 | struct bpf_dynptr_kern { | |
1295 | void *data; | |
1296 | /* Size represents the number of usable bytes of dynptr data. | |
1297 | * If for example the offset is at 4 for a local dynptr whose data is | |
1298 | * of type u64, the number of usable bytes is 4. | |
1299 | * | |
1300 | * The upper 8 bits are reserved. It is as follows: | |
1301 | * Bits 0 - 23 = size | |
1302 | * Bits 24 - 30 = dynptr type | |
1303 | * Bit 31 = whether dynptr is read-only | |
1304 | */ | |
1305 | u32 size; | |
1306 | u32 offset; | |
1307 | } __aligned(8); | |
1308 | ||
1309 | enum bpf_dynptr_type { | |
1310 | BPF_DYNPTR_TYPE_INVALID, | |
1311 | /* Points to memory that is local to the bpf program */ | |
1312 | BPF_DYNPTR_TYPE_LOCAL, | |
1313 | /* Underlying data is a ringbuf record */ | |
1314 | BPF_DYNPTR_TYPE_RINGBUF, | |
b5964b96 JK |
1315 | /* Underlying data is a sk_buff */ |
1316 | BPF_DYNPTR_TYPE_SKB, | |
05421aec JK |
1317 | /* Underlying data is a xdp_buff */ |
1318 | BPF_DYNPTR_TYPE_XDP, | |
8357b366 JK |
1319 | }; |
1320 | ||
1321 | int bpf_dynptr_check_size(u32 size); | |
26662d73 | 1322 | u32 __bpf_dynptr_size(const struct bpf_dynptr_kern *ptr); |
74523c06 SL |
1323 | const void *__bpf_dynptr_data(const struct bpf_dynptr_kern *ptr, u32 len); |
1324 | void *__bpf_dynptr_data_rw(const struct bpf_dynptr_kern *ptr, u32 len); | |
3e1c6f35 | 1325 | bool __bpf_dynptr_is_rdonly(const struct bpf_dynptr_kern *ptr); |
8357b366 | 1326 | |
fec56f58 | 1327 | #ifdef CONFIG_BPF_JIT |
d6083f04 LH |
1328 | int bpf_trampoline_link_prog(struct bpf_tramp_link *link, |
1329 | struct bpf_trampoline *tr, | |
1330 | struct bpf_prog *tgt_prog); | |
1331 | int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, | |
1332 | struct bpf_trampoline *tr, | |
1333 | struct bpf_prog *tgt_prog); | |
f7b12b6f THJ |
1334 | struct bpf_trampoline *bpf_trampoline_get(u64 key, |
1335 | struct bpf_attach_target_info *tgt_info); | |
fec56f58 | 1336 | void bpf_trampoline_put(struct bpf_trampoline *tr); |
19c02415 | 1337 | int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs); |
c86df29d PZ |
1338 | |
1339 | /* | |
1340 | * When the architecture supports STATIC_CALL replace the bpf_dispatcher_fn | |
1341 | * indirection with a direct call to the bpf program. If the architecture does | |
1342 | * not have STATIC_CALL, avoid a double-indirection. | |
1343 | */ | |
1344 | #ifdef CONFIG_HAVE_STATIC_CALL | |
1345 | ||
1346 | #define __BPF_DISPATCHER_SC_INIT(_name) \ | |
1347 | .sc_key = &STATIC_CALL_KEY(_name), \ | |
1348 | .sc_tramp = STATIC_CALL_TRAMP_ADDR(_name), | |
1349 | ||
1350 | #define __BPF_DISPATCHER_SC(name) \ | |
1351 | DEFINE_STATIC_CALL(bpf_dispatcher_##name##_call, bpf_dispatcher_nop_func) | |
1352 | ||
1353 | #define __BPF_DISPATCHER_CALL(name) \ | |
1354 | static_call(bpf_dispatcher_##name##_call)(ctx, insnsi, bpf_func) | |
1355 | ||
1356 | #define __BPF_DISPATCHER_UPDATE(_d, _new) \ | |
1357 | __static_call_update((_d)->sc_key, (_d)->sc_tramp, (_new)) | |
1358 | ||
1359 | #else | |
1360 | #define __BPF_DISPATCHER_SC_INIT(name) | |
1361 | #define __BPF_DISPATCHER_SC(name) | |
1362 | #define __BPF_DISPATCHER_CALL(name) bpf_func(ctx, insnsi) | |
1363 | #define __BPF_DISPATCHER_UPDATE(_d, _new) | |
1364 | #endif | |
dbe69b29 | 1365 | |
517b75e4 JO |
1366 | #define BPF_DISPATCHER_INIT(_name) { \ |
1367 | .mutex = __MUTEX_INITIALIZER(_name.mutex), \ | |
1368 | .func = &_name##_func, \ | |
1369 | .progs = {}, \ | |
1370 | .num_progs = 0, \ | |
1371 | .image = NULL, \ | |
1372 | .image_off = 0, \ | |
1373 | .ksym = { \ | |
1374 | .name = #_name, \ | |
1375 | .lnode = LIST_HEAD_INIT(_name.ksym.lnode), \ | |
1376 | }, \ | |
c86df29d | 1377 | __BPF_DISPATCHER_SC_INIT(_name##_call) \ |
75ccbef6 BT |
1378 | } |
1379 | ||
1380 | #define DEFINE_BPF_DISPATCHER(name) \ | |
c86df29d | 1381 | __BPF_DISPATCHER_SC(name); \ |
4f9087f1 | 1382 | noinline __bpfcall unsigned int bpf_dispatcher_##name##_func( \ |
75ccbef6 BT |
1383 | const void *ctx, \ |
1384 | const struct bpf_insn *insnsi, \ | |
af3f4134 | 1385 | bpf_func_t bpf_func) \ |
75ccbef6 | 1386 | { \ |
c86df29d | 1387 | return __BPF_DISPATCHER_CALL(name); \ |
75ccbef6 | 1388 | } \ |
6a64037d BT |
1389 | EXPORT_SYMBOL(bpf_dispatcher_##name##_func); \ |
1390 | struct bpf_dispatcher bpf_dispatcher_##name = \ | |
18acb7fa | 1391 | BPF_DISPATCHER_INIT(bpf_dispatcher_##name); |
dbe69b29 | 1392 | |
75ccbef6 | 1393 | #define DECLARE_BPF_DISPATCHER(name) \ |
6a64037d | 1394 | unsigned int bpf_dispatcher_##name##_func( \ |
75ccbef6 BT |
1395 | const void *ctx, \ |
1396 | const struct bpf_insn *insnsi, \ | |
af3f4134 | 1397 | bpf_func_t bpf_func); \ |
6a64037d | 1398 | extern struct bpf_dispatcher bpf_dispatcher_##name; |
c86df29d | 1399 | |
6a64037d BT |
1400 | #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_##name##_func |
1401 | #define BPF_DISPATCHER_PTR(name) (&bpf_dispatcher_##name) | |
75ccbef6 BT |
1402 | void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from, |
1403 | struct bpf_prog *to); | |
dba122fb | 1404 | /* Called only from JIT-enabled code, so there's no need for stubs. */ |
7c8ce4ff XK |
1405 | void bpf_image_ksym_init(void *data, unsigned int size, struct bpf_ksym *ksym); |
1406 | void bpf_image_ksym_add(struct bpf_ksym *ksym); | |
a108f7dc | 1407 | void bpf_image_ksym_del(struct bpf_ksym *ksym); |
dba122fb JO |
1408 | void bpf_ksym_add(struct bpf_ksym *ksym); |
1409 | void bpf_ksym_del(struct bpf_ksym *ksym); | |
3486bedd SL |
1410 | int bpf_jit_charge_modmem(u32 size); |
1411 | void bpf_jit_uncharge_modmem(u32 size); | |
f92c1e18 | 1412 | bool bpf_prog_has_trampoline(const struct bpf_prog *prog); |
fec56f58 | 1413 | #else |
f7e0beaf | 1414 | static inline int bpf_trampoline_link_prog(struct bpf_tramp_link *link, |
d6083f04 LH |
1415 | struct bpf_trampoline *tr, |
1416 | struct bpf_prog *tgt_prog) | |
fec56f58 AS |
1417 | { |
1418 | return -ENOTSUPP; | |
1419 | } | |
f7e0beaf | 1420 | static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link, |
d6083f04 LH |
1421 | struct bpf_trampoline *tr, |
1422 | struct bpf_prog *tgt_prog) | |
fec56f58 AS |
1423 | { |
1424 | return -ENOTSUPP; | |
1425 | } | |
f7b12b6f THJ |
1426 | static inline struct bpf_trampoline *bpf_trampoline_get(u64 key, |
1427 | struct bpf_attach_target_info *tgt_info) | |
1428 | { | |
b724a641 | 1429 | return NULL; |
f7b12b6f | 1430 | } |
fec56f58 | 1431 | static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {} |
75ccbef6 BT |
1432 | #define DEFINE_BPF_DISPATCHER(name) |
1433 | #define DECLARE_BPF_DISPATCHER(name) | |
6a64037d | 1434 | #define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nop_func |
75ccbef6 BT |
1435 | #define BPF_DISPATCHER_PTR(name) NULL |
1436 | static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, | |
1437 | struct bpf_prog *from, | |
1438 | struct bpf_prog *to) {} | |
e9b4e606 JO |
1439 | static inline bool is_bpf_image_address(unsigned long address) |
1440 | { | |
1441 | return false; | |
1442 | } | |
f92c1e18 JO |
1443 | static inline bool bpf_prog_has_trampoline(const struct bpf_prog *prog) |
1444 | { | |
1445 | return false; | |
1446 | } | |
fec56f58 AS |
1447 | #endif |
1448 | ||
8c1b6e69 | 1449 | struct bpf_func_info_aux { |
51c39bb1 | 1450 | u16 linkage; |
8c1b6e69 | 1451 | bool unreliable; |
2afae08c AN |
1452 | bool called : 1; |
1453 | bool verified : 1; | |
8c1b6e69 AS |
1454 | }; |
1455 | ||
a66886fe DB |
1456 | enum bpf_jit_poke_reason { |
1457 | BPF_POKE_REASON_TAIL_CALL, | |
1458 | }; | |
1459 | ||
1460 | /* Descriptor of pokes pointing /into/ the JITed image. */ | |
1461 | struct bpf_jit_poke_descriptor { | |
cf71b174 | 1462 | void *tailcall_target; |
ebf7d1f5 MF |
1463 | void *tailcall_bypass; |
1464 | void *bypass_addr; | |
f263a814 | 1465 | void *aux; |
a66886fe DB |
1466 | union { |
1467 | struct { | |
1468 | struct bpf_map *map; | |
1469 | u32 key; | |
1470 | } tail_call; | |
1471 | }; | |
cf71b174 | 1472 | bool tailcall_target_stable; |
a66886fe DB |
1473 | u8 adj_off; |
1474 | u16 reason; | |
a748c697 | 1475 | u32 insn_idx; |
a66886fe DB |
1476 | }; |
1477 | ||
3c32cc1b YS |
1478 | /* reg_type info for ctx arguments */ |
1479 | struct bpf_ctx_arg_aux { | |
1480 | u32 offset; | |
1481 | enum bpf_reg_type reg_type; | |
77c0208e | 1482 | struct btf *btf; |
951cf368 | 1483 | u32 btf_id; |
3c32cc1b YS |
1484 | }; |
1485 | ||
541c3bad AN |
1486 | struct btf_mod_pair { |
1487 | struct btf *btf; | |
1488 | struct module *module; | |
1489 | }; | |
1490 | ||
e6ac2450 MKL |
1491 | struct bpf_kfunc_desc_tab; |
1492 | ||
09756af4 | 1493 | struct bpf_prog_aux { |
85192dbf | 1494 | atomic64_t refcnt; |
24701ece | 1495 | u32 used_map_cnt; |
541c3bad | 1496 | u32 used_btf_cnt; |
32bbe007 | 1497 | u32 max_ctx_offset; |
e647815a | 1498 | u32 max_pkt_offset; |
9df1c28b | 1499 | u32 max_tp_access; |
8726679a | 1500 | u32 stack_depth; |
dc4bb0e2 | 1501 | u32 id; |
ba64e7d8 | 1502 | u32 func_cnt; /* used by non-func prog as the number of func progs */ |
335d1c5b | 1503 | u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */ |
ba64e7d8 | 1504 | u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */ |
ccfe29eb | 1505 | u32 attach_btf_id; /* in-kernel BTF type id to attach to */ |
3c32cc1b | 1506 | u32 ctx_arg_info_size; |
afbf21dc YS |
1507 | u32 max_rdonly_access; |
1508 | u32 max_rdwr_access; | |
22dc4a0f | 1509 | struct btf *attach_btf; |
3c32cc1b | 1510 | const struct bpf_ctx_arg_aux *ctx_arg_info; |
7d1cd70d | 1511 | void __percpu *priv_stack_ptr; |
3aac1ead THJ |
1512 | struct mutex dst_mutex; /* protects dst_* pointers below, *after* prog becomes visible */ |
1513 | struct bpf_prog *dst_prog; | |
1514 | struct bpf_trampoline *dst_trampoline; | |
4a1e7c0c THJ |
1515 | enum bpf_prog_type saved_dst_prog_type; |
1516 | enum bpf_attach_type saved_dst_attach_type; | |
a4b1d3c1 | 1517 | bool verifier_zext; /* Zero extensions has been inserted by verifier. */ |
2b3486bc SF |
1518 | bool dev_bound; /* Program is bound to the netdev. */ |
1519 | bool offload_requested; /* Program is bound and offloaded to the netdev. */ | |
38207291 | 1520 | bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */ |
19bfcdf9 | 1521 | bool attach_tracing_prog; /* true if tracing another tracing program */ |
8c1b6e69 | 1522 | bool func_proto_unreliable; |
ebf7d1f5 | 1523 | bool tail_call_reachable; |
c2f2cdbe | 1524 | bool xdp_has_frags; |
f18b03fa KKD |
1525 | bool exception_cb; |
1526 | bool exception_boundary; | |
d6083f04 | 1527 | bool is_extended; /* true if extended by freplace program */ |
e00931c0 | 1528 | bool jits_use_priv_stack; |
5bd36da1 | 1529 | bool priv_stack_requested; |
81f6d053 | 1530 | bool changes_pkt_data; |
d6083f04 LH |
1531 | u64 prog_array_member_cnt; /* counts how many times as member of prog_array */ |
1532 | struct mutex ext_mutex; /* mutex for is_extended and prog_array_member_cnt */ | |
2fe99eb0 | 1533 | struct bpf_arena *arena; |
5bd36da1 | 1534 | void (*recursion_detected)(struct bpf_prog *prog); /* callback if recursion is detected */ |
38207291 MKL |
1535 | /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */ |
1536 | const struct btf_type *attach_func_proto; | |
1537 | /* function name for valid attach_btf_id */ | |
1538 | const char *attach_func_name; | |
1c2a088a AS |
1539 | struct bpf_prog **func; |
1540 | void *jit_data; /* JIT specific data. arch dependent */ | |
a66886fe | 1541 | struct bpf_jit_poke_descriptor *poke_tab; |
e6ac2450 | 1542 | struct bpf_kfunc_desc_tab *kfunc_tab; |
2357672c | 1543 | struct bpf_kfunc_btf_tab *kfunc_btf_tab; |
a66886fe | 1544 | u32 size_poke_tab; |
4f9087f1 PZ |
1545 | #ifdef CONFIG_FINEIBT |
1546 | struct bpf_ksym ksym_prefix; | |
1547 | #endif | |
535911c8 | 1548 | struct bpf_ksym ksym; |
7de16e3a | 1549 | const struct bpf_prog_ops *ops; |
09756af4 | 1550 | struct bpf_map **used_maps; |
984fe94f | 1551 | struct mutex used_maps_mutex; /* mutex for used_maps and used_map_cnt */ |
541c3bad | 1552 | struct btf_mod_pair *used_btfs; |
09756af4 | 1553 | struct bpf_prog *prog; |
aaac3ba9 | 1554 | struct user_struct *user; |
cb4d2b3f | 1555 | u64 load_time; /* ns since boottime */ |
aba64c7d | 1556 | u32 verified_insns; |
69fd337a | 1557 | int cgroup_atype; /* enum cgroup_bpf_attach_type */ |
8bad74f9 | 1558 | struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; |
067cae47 | 1559 | char name[BPF_OBJ_NAME_LEN]; |
852486b3 | 1560 | u64 (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp, u64, u64); |
afdb09c7 CF |
1561 | #ifdef CONFIG_SECURITY |
1562 | void *security; | |
1563 | #endif | |
caf8f28e | 1564 | struct bpf_token *token; |
0a9c1991 | 1565 | struct bpf_prog_offload *offload; |
838e9690 | 1566 | struct btf *btf; |
ba64e7d8 | 1567 | struct bpf_func_info *func_info; |
8c1b6e69 | 1568 | struct bpf_func_info_aux *func_info_aux; |
c454a46b MKL |
1569 | /* bpf_line_info loaded from userspace. linfo->insn_off |
1570 | * has the xlated insn offset. | |
1571 | * Both the main and sub prog share the same linfo. | |
1572 | * The subprog can access its first linfo by | |
1573 | * using the linfo_idx. | |
1574 | */ | |
1575 | struct bpf_line_info *linfo; | |
1576 | /* jited_linfo is the jited addr of the linfo. It has a | |
1577 | * one to one mapping to linfo: | |
1578 | * jited_linfo[i] is the jited addr for the linfo[i]->insn_off. | |
1579 | * Both the main and sub prog share the same jited_linfo. | |
1580 | * The subprog can access its first jited_linfo by | |
1581 | * using the linfo_idx. | |
1582 | */ | |
1583 | void **jited_linfo; | |
ba64e7d8 | 1584 | u32 func_info_cnt; |
c454a46b MKL |
1585 | u32 nr_linfo; |
1586 | /* subprog can use linfo_idx to access its first linfo and | |
1587 | * jited_linfo. | |
1588 | * main prog always has linfo_idx == 0 | |
1589 | */ | |
1590 | u32 linfo_idx; | |
31bf1dbc | 1591 | struct module *mod; |
3dec541b AS |
1592 | u32 num_exentries; |
1593 | struct exception_table_entry *extable; | |
abf2e7d6 AS |
1594 | union { |
1595 | struct work_struct work; | |
1596 | struct rcu_head rcu; | |
1597 | }; | |
09756af4 AS |
1598 | }; |
1599 | ||
d687f621 DK |
1600 | struct bpf_prog { |
1601 | u16 pages; /* Number of allocated pages */ | |
1602 | u16 jited:1, /* Is our filter JIT'ed? */ | |
1603 | jit_requested:1,/* archs need to JIT the prog */ | |
1604 | gpl_compatible:1, /* Is filter GPL compatible? */ | |
1605 | cb_access:1, /* Is control block accessed? */ | |
1606 | dst_needed:1, /* Do we need dst entry? */ | |
1607 | blinding_requested:1, /* needs constant blinding */ | |
1608 | blinded:1, /* Was blinded */ | |
1609 | is_func:1, /* program is a bpf function */ | |
1610 | kprobe_override:1, /* Do we override a kprobe? */ | |
1611 | has_callchain_buf:1, /* callchain buffer allocated? */ | |
1612 | enforce_expected_attach_type:1, /* Enforce expected_attach_type checking at attach time */ | |
1613 | call_get_stack:1, /* Do we call bpf_get_stack() or bpf_get_stackid() */ | |
1614 | call_get_func_ip:1, /* Do we call get_func_ip() */ | |
66c84731 AN |
1615 | tstamp_type_access:1, /* Accessed __sk_buff->tstamp_type */ |
1616 | sleepable:1; /* BPF program is sleepable */ | |
d687f621 DK |
1617 | enum bpf_prog_type type; /* Type of BPF program */ |
1618 | enum bpf_attach_type expected_attach_type; /* For some prog types */ | |
1619 | u32 len; /* Number of filter blocks */ | |
1620 | u32 jited_len; /* Size of jited insns in bytes */ | |
1621 | u8 tag[BPF_TAG_SIZE]; | |
1622 | struct bpf_prog_stats __percpu *stats; | |
1623 | int __percpu *active; | |
1624 | unsigned int (*bpf_func)(const void *ctx, | |
1625 | const struct bpf_insn *insn); | |
1626 | struct bpf_prog_aux *aux; /* Auxiliary fields */ | |
1627 | struct sock_fprog_kern *orig_prog; /* Original BPF program */ | |
1628 | /* Instructions for interpreter */ | |
1629 | union { | |
1630 | DECLARE_FLEX_ARRAY(struct sock_filter, insns); | |
1631 | DECLARE_FLEX_ARRAY(struct bpf_insn, insnsi); | |
1632 | }; | |
1633 | }; | |
1634 | ||
2beee5f5 | 1635 | struct bpf_array_aux { |
da765a2f DB |
1636 | /* Programs with direct jumps into programs part of this array. */ |
1637 | struct list_head poke_progs; | |
1638 | struct bpf_map *map; | |
1639 | struct mutex poke_mutex; | |
1640 | struct work_struct work; | |
2beee5f5 DB |
1641 | }; |
1642 | ||
6cc7d1e8 AN |
1643 | struct bpf_link { |
1644 | atomic64_t refcnt; | |
1645 | u32 id; | |
1646 | enum bpf_link_type type; | |
1647 | const struct bpf_link_ops *ops; | |
1648 | struct bpf_prog *prog; | |
61c6fefa AN |
1649 | /* whether BPF link itself has "sleepable" semantics, which can differ |
1650 | * from underlying BPF program having a "sleepable" semantics, as BPF | |
1651 | * link's semantics is determined by target attach hook | |
1652 | */ | |
1653 | bool sleepable; | |
1a80dbcb AN |
1654 | /* rcu is used before freeing, work can be used to schedule that |
1655 | * RCU-based freeing before that, so they never overlap | |
1656 | */ | |
1657 | union { | |
1658 | struct rcu_head rcu; | |
1659 | struct work_struct work; | |
1660 | }; | |
6cc7d1e8 AN |
1661 | }; |
1662 | ||
1663 | struct bpf_link_ops { | |
1664 | void (*release)(struct bpf_link *link); | |
1a80dbcb AN |
1665 | /* deallocate link resources callback, called without RCU grace period |
1666 | * waiting | |
1667 | */ | |
6cc7d1e8 | 1668 | void (*dealloc)(struct bpf_link *link); |
1a80dbcb | 1669 | /* deallocate link resources callback, called after RCU grace period; |
61c6fefa AN |
1670 | * if either the underlying BPF program is sleepable or BPF link's |
1671 | * target hook is sleepable, we'll go through tasks trace RCU GP and | |
1672 | * then "classic" RCU GP; this need for chaining tasks trace and | |
1673 | * classic RCU GPs is designated by setting bpf_link->sleepable flag | |
1a80dbcb AN |
1674 | */ |
1675 | void (*dealloc_deferred)(struct bpf_link *link); | |
73b11c2a | 1676 | int (*detach)(struct bpf_link *link); |
6cc7d1e8 AN |
1677 | int (*update_prog)(struct bpf_link *link, struct bpf_prog *new_prog, |
1678 | struct bpf_prog *old_prog); | |
1679 | void (*show_fdinfo)(const struct bpf_link *link, struct seq_file *seq); | |
1680 | int (*fill_link_info)(const struct bpf_link *link, | |
1681 | struct bpf_link_info *info); | |
aef56f2e KFL |
1682 | int (*update_map)(struct bpf_link *link, struct bpf_map *new_map, |
1683 | struct bpf_map *old_map); | |
1adddc97 | 1684 | __poll_t (*poll)(struct file *file, struct poll_table_struct *pts); |
6cc7d1e8 AN |
1685 | }; |
1686 | ||
f7e0beaf KFL |
1687 | struct bpf_tramp_link { |
1688 | struct bpf_link link; | |
1689 | struct hlist_node tramp_hlist; | |
2fcc8241 | 1690 | u64 cookie; |
f7e0beaf KFL |
1691 | }; |
1692 | ||
69fd337a SF |
1693 | struct bpf_shim_tramp_link { |
1694 | struct bpf_tramp_link link; | |
1695 | struct bpf_trampoline *trampoline; | |
1696 | }; | |
1697 | ||
f7e0beaf KFL |
1698 | struct bpf_tracing_link { |
1699 | struct bpf_tramp_link link; | |
1700 | enum bpf_attach_type attach_type; | |
1701 | struct bpf_trampoline *trampoline; | |
1702 | struct bpf_prog *tgt_prog; | |
1703 | }; | |
1704 | ||
d4dfc570 AN |
1705 | struct bpf_raw_tp_link { |
1706 | struct bpf_link link; | |
1707 | struct bpf_raw_event_map *btp; | |
68ca5d4e | 1708 | u64 cookie; |
d4dfc570 AN |
1709 | }; |
1710 | ||
6cc7d1e8 AN |
1711 | struct bpf_link_primer { |
1712 | struct bpf_link *link; | |
1713 | struct file *file; | |
1714 | int fd; | |
1715 | u32 id; | |
1716 | }; | |
1717 | ||
6fe01d3c AN |
1718 | struct bpf_mount_opts { |
1719 | kuid_t uid; | |
1720 | kgid_t gid; | |
1721 | umode_t mode; | |
1722 | ||
1723 | /* BPF token-related delegation options */ | |
1724 | u64 delegate_cmds; | |
1725 | u64 delegate_maps; | |
1726 | u64 delegate_progs; | |
1727 | u64 delegate_attachs; | |
1728 | }; | |
1729 | ||
35f96de0 AN |
1730 | struct bpf_token { |
1731 | struct work_struct work; | |
1732 | atomic64_t refcnt; | |
1733 | struct user_namespace *userns; | |
1734 | u64 allowed_cmds; | |
a177fc2b | 1735 | u64 allowed_maps; |
caf8f28e AN |
1736 | u64 allowed_progs; |
1737 | u64 allowed_attachs; | |
f568a3d4 AN |
1738 | #ifdef CONFIG_SECURITY |
1739 | void *security; | |
1740 | #endif | |
35f96de0 AN |
1741 | }; |
1742 | ||
85d33df3 | 1743 | struct bpf_struct_ops_value; |
27ae7997 MKL |
1744 | struct btf_member; |
1745 | ||
1746 | #define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64 | |
bb48cf16 DV |
1747 | /** |
1748 | * struct bpf_struct_ops - A structure of callbacks allowing a subsystem to | |
1749 | * define a BPF_MAP_TYPE_STRUCT_OPS map type composed | |
1750 | * of BPF_PROG_TYPE_STRUCT_OPS progs. | |
1751 | * @verifier_ops: A structure of callbacks that are invoked by the verifier | |
1752 | * when determining whether the struct_ops progs in the | |
1753 | * struct_ops map are valid. | |
1754 | * @init: A callback that is invoked a single time, and before any other | |
1755 | * callback, to initialize the structure. A nonzero return value means | |
1756 | * the subsystem could not be initialized. | |
1757 | * @check_member: When defined, a callback invoked by the verifier to allow | |
1758 | * the subsystem to determine if an entry in the struct_ops map | |
1759 | * is valid. A nonzero return value means that the map is | |
1760 | * invalid and should be rejected by the verifier. | |
1761 | * @init_member: A callback that is invoked for each member of the struct_ops | |
1762 | * map to allow the subsystem to initialize the member. A nonzero | |
1763 | * value means the member could not be initialized. This callback | |
1764 | * is exclusive with the @type, @type_id, @value_type, and | |
1765 | * @value_id fields. | |
1766 | * @reg: A callback that is invoked when the struct_ops map has been | |
1767 | * initialized and is being attached to. Zero means the struct_ops map | |
1768 | * has been successfully registered and is live. A nonzero return value | |
1769 | * means the struct_ops map could not be registered. | |
1770 | * @unreg: A callback that is invoked when the struct_ops map should be | |
1771 | * unregistered. | |
1772 | * @update: A callback that is invoked when the live struct_ops map is being | |
1773 | * updated to contain new values. This callback is only invoked when | |
1774 | * the struct_ops map is loaded with BPF_F_LINK. If not defined, the | |
1775 | * it is assumed that the struct_ops map cannot be updated. | |
1776 | * @validate: A callback that is invoked after all of the members have been | |
1777 | * initialized. This callback should perform static checks on the | |
1778 | * map, meaning that it should either fail or succeed | |
1779 | * deterministically. A struct_ops map that has been validated may | |
1780 | * not necessarily succeed in being registered if the call to @reg | |
1781 | * fails. For example, a valid struct_ops map may be loaded, but | |
1782 | * then fail to be registered due to there being another active | |
1783 | * struct_ops map on the system in the subsystem already. For this | |
1784 | * reason, if this callback is not defined, the check is skipped as | |
1785 | * the struct_ops map will have final verification performed in | |
1786 | * @reg. | |
1787 | * @type: BTF type. | |
1788 | * @value_type: Value type. | |
1789 | * @name: The name of the struct bpf_struct_ops object. | |
1790 | * @func_models: Func models | |
1791 | * @type_id: BTF type id. | |
1792 | * @value_id: BTF value id. | |
1793 | */ | |
27ae7997 MKL |
1794 | struct bpf_struct_ops { |
1795 | const struct bpf_verifier_ops *verifier_ops; | |
1796 | int (*init)(struct btf *btf); | |
1797 | int (*check_member)(const struct btf_type *t, | |
51a52a29 DV |
1798 | const struct btf_member *member, |
1799 | const struct bpf_prog *prog); | |
85d33df3 MKL |
1800 | int (*init_member)(const struct btf_type *t, |
1801 | const struct btf_member *member, | |
1802 | void *kdata, const void *udata); | |
73287fe2 KFL |
1803 | int (*reg)(void *kdata, struct bpf_link *link); |
1804 | void (*unreg)(void *kdata, struct bpf_link *link); | |
1805 | int (*update)(void *kdata, void *old_kdata, struct bpf_link *link); | |
68b04864 | 1806 | int (*validate)(void *kdata); |
4c5763ed | 1807 | void *cfi_stubs; |
e3f87fdf | 1808 | struct module *owner; |
27ae7997 MKL |
1809 | const char *name; |
1810 | struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS]; | |
4c5763ed KFL |
1811 | }; |
1812 | ||
16116035 KFL |
1813 | /* Every member of a struct_ops type has an instance even a member is not |
1814 | * an operator (function pointer). The "info" field will be assigned to | |
1815 | * prog->aux->ctx_arg_info of BPF struct_ops programs to provide the | |
1816 | * argument information required by the verifier to verify the program. | |
1817 | * | |
1818 | * btf_ctx_access() will lookup prog->aux->ctx_arg_info to find the | |
1819 | * corresponding entry for an given argument. | |
1820 | */ | |
1821 | struct bpf_struct_ops_arg_info { | |
1822 | struct bpf_ctx_arg_aux *info; | |
1823 | u32 cnt; | |
1824 | }; | |
1825 | ||
4c5763ed KFL |
1826 | struct bpf_struct_ops_desc { |
1827 | struct bpf_struct_ops *st_ops; | |
1828 | ||
1829 | const struct btf_type *type; | |
1830 | const struct btf_type *value_type; | |
27ae7997 | 1831 | u32 type_id; |
85d33df3 | 1832 | u32 value_id; |
16116035 KFL |
1833 | |
1834 | /* Collection of argument information for each member */ | |
1835 | struct bpf_struct_ops_arg_info *arg_info; | |
27ae7997 MKL |
1836 | }; |
1837 | ||
612d087d KFL |
1838 | enum bpf_struct_ops_state { |
1839 | BPF_STRUCT_OPS_STATE_INIT, | |
1840 | BPF_STRUCT_OPS_STATE_INUSE, | |
1841 | BPF_STRUCT_OPS_STATE_TOBEFREE, | |
1842 | BPF_STRUCT_OPS_STATE_READY, | |
1843 | }; | |
1844 | ||
1845 | struct bpf_struct_ops_common_value { | |
1846 | refcount_t refcnt; | |
1847 | enum bpf_struct_ops_state state; | |
1848 | }; | |
1849 | ||
27ae7997 | 1850 | #if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL) |
f6be98d1 KFL |
1851 | /* This macro helps developer to register a struct_ops type and generate |
1852 | * type information correctly. Developers should use this macro to register | |
1853 | * a struct_ops type instead of calling __register_bpf_struct_ops() directly. | |
1854 | */ | |
1855 | #define register_bpf_struct_ops(st_ops, type) \ | |
1856 | ({ \ | |
1857 | struct bpf_struct_ops_##type { \ | |
1858 | struct bpf_struct_ops_common_value common; \ | |
1859 | struct type data ____cacheline_aligned_in_smp; \ | |
1860 | }; \ | |
1861 | BTF_TYPE_EMIT(struct bpf_struct_ops_##type); \ | |
1862 | __register_bpf_struct_ops(st_ops); \ | |
1863 | }) | |
85d33df3 | 1864 | #define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA)) |
85d33df3 MKL |
1865 | bool bpf_struct_ops_get(const void *kdata); |
1866 | void bpf_struct_ops_put(const void *kdata); | |
e42ac141 | 1867 | int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff); |
85d33df3 MKL |
1868 | int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key, |
1869 | void *value); | |
f7e0beaf KFL |
1870 | int bpf_struct_ops_prepare_trampoline(struct bpf_tramp_links *tlinks, |
1871 | struct bpf_tramp_link *link, | |
31a645ae | 1872 | const struct btf_func_model *model, |
2cd3e377 | 1873 | void *stub_func, |
187e2af0 KFL |
1874 | void **image, u32 *image_off, |
1875 | bool allow_alloc); | |
1876 | void bpf_struct_ops_image_free(void *image); | |
85d33df3 MKL |
1877 | static inline bool bpf_try_module_get(const void *data, struct module *owner) |
1878 | { | |
1879 | if (owner == BPF_MODULE_OWNER) | |
1880 | return bpf_struct_ops_get(data); | |
1881 | else | |
1882 | return try_module_get(owner); | |
1883 | } | |
1884 | static inline void bpf_module_put(const void *data, struct module *owner) | |
1885 | { | |
1886 | if (owner == BPF_MODULE_OWNER) | |
1887 | bpf_struct_ops_put(data); | |
1888 | else | |
1889 | module_put(owner); | |
1890 | } | |
68b04864 | 1891 | int bpf_struct_ops_link_create(union bpf_attr *attr); |
c196906d HT |
1892 | |
1893 | #ifdef CONFIG_NET | |
1894 | /* Define it here to avoid the use of forward declaration */ | |
1895 | struct bpf_dummy_ops_state { | |
1896 | int val; | |
1897 | }; | |
1898 | ||
1899 | struct bpf_dummy_ops { | |
1900 | int (*test_1)(struct bpf_dummy_ops_state *cb); | |
1901 | int (*test_2)(struct bpf_dummy_ops_state *cb, int a1, unsigned short a2, | |
1902 | char a3, unsigned long a4); | |
7dd88059 | 1903 | int (*test_sleepable)(struct bpf_dummy_ops_state *cb); |
c196906d HT |
1904 | }; |
1905 | ||
1906 | int bpf_struct_ops_test_run(struct bpf_prog *prog, const union bpf_attr *kattr, | |
1907 | union bpf_attr __user *uattr); | |
1908 | #endif | |
f6be98d1 KFL |
1909 | int bpf_struct_ops_desc_init(struct bpf_struct_ops_desc *st_ops_desc, |
1910 | struct btf *btf, | |
1911 | struct bpf_verifier_log *log); | |
1338b933 | 1912 | void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map); |
16116035 | 1913 | void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc); |
27ae7997 | 1914 | #else |
f6be98d1 | 1915 | #define register_bpf_struct_ops(st_ops, type) ({ (void *)(st_ops); 0; }) |
85d33df3 MKL |
1916 | static inline bool bpf_try_module_get(const void *data, struct module *owner) |
1917 | { | |
1918 | return try_module_get(owner); | |
1919 | } | |
1920 | static inline void bpf_module_put(const void *data, struct module *owner) | |
1921 | { | |
1922 | module_put(owner); | |
1923 | } | |
e42ac141 MKL |
1924 | static inline int bpf_struct_ops_supported(const struct bpf_struct_ops *st_ops, u32 moff) |
1925 | { | |
1926 | return -ENOTSUPP; | |
1927 | } | |
85d33df3 MKL |
1928 | static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, |
1929 | void *key, | |
1930 | void *value) | |
1931 | { | |
1932 | return -EINVAL; | |
1933 | } | |
68b04864 KFL |
1934 | static inline int bpf_struct_ops_link_create(union bpf_attr *attr) |
1935 | { | |
1936 | return -EOPNOTSUPP; | |
1937 | } | |
1338b933 KFL |
1938 | static inline void bpf_map_struct_ops_info_fill(struct bpf_map_info *info, struct bpf_map *map) |
1939 | { | |
1940 | } | |
68b04864 | 1941 | |
16116035 KFL |
1942 | static inline void bpf_struct_ops_desc_release(struct bpf_struct_ops_desc *st_ops_desc) |
1943 | { | |
1944 | } | |
1945 | ||
9cb61fda SF |
1946 | #endif |
1947 | ||
1948 | #if defined(CONFIG_CGROUP_BPF) && defined(CONFIG_BPF_LSM) | |
1949 | int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, | |
1950 | int cgroup_atype); | |
1951 | void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog); | |
1952 | #else | |
69fd337a SF |
1953 | static inline int bpf_trampoline_link_cgroup_shim(struct bpf_prog *prog, |
1954 | int cgroup_atype) | |
1955 | { | |
1956 | return -EOPNOTSUPP; | |
1957 | } | |
1958 | static inline void bpf_trampoline_unlink_cgroup_shim(struct bpf_prog *prog) | |
1959 | { | |
1960 | } | |
27ae7997 MKL |
1961 | #endif |
1962 | ||
04fd61ab AS |
1963 | struct bpf_array { |
1964 | struct bpf_map map; | |
1965 | u32 elem_size; | |
b2157399 | 1966 | u32 index_mask; |
2beee5f5 | 1967 | struct bpf_array_aux *aux; |
04fd61ab | 1968 | union { |
129d868e KC |
1969 | DECLARE_FLEX_ARRAY(char, value) __aligned(8); |
1970 | DECLARE_FLEX_ARRAY(void *, ptrs) __aligned(8); | |
1971 | DECLARE_FLEX_ARRAY(void __percpu *, pptrs) __aligned(8); | |
04fd61ab AS |
1972 | }; |
1973 | }; | |
3b1efb19 | 1974 | |
c04c0d2b | 1975 | #define BPF_COMPLEXITY_LIMIT_INSNS 1000000 /* yes. 1M insns */ |
ebf7f6f0 | 1976 | #define MAX_TAIL_CALL_CNT 33 |
04fd61ab | 1977 | |
6018e1f4 AN |
1978 | /* Maximum number of loops for bpf_loop and bpf_iter_num. |
1979 | * It's enum to expose it (and thus make it discoverable) through BTF. | |
1980 | */ | |
1981 | enum { | |
1982 | BPF_MAX_LOOPS = 8 * 1024 * 1024, | |
1983 | }; | |
1ade2371 | 1984 | |
591fe988 DB |
1985 | #define BPF_F_ACCESS_MASK (BPF_F_RDONLY | \ |
1986 | BPF_F_RDONLY_PROG | \ | |
1987 | BPF_F_WRONLY | \ | |
1988 | BPF_F_WRONLY_PROG) | |
1989 | ||
1990 | #define BPF_MAP_CAN_READ BIT(0) | |
1991 | #define BPF_MAP_CAN_WRITE BIT(1) | |
1992 | ||
20571567 DV |
1993 | /* Maximum number of user-producer ring buffer samples that can be drained in |
1994 | * a call to bpf_user_ringbuf_drain(). | |
1995 | */ | |
1996 | #define BPF_MAX_USER_RINGBUF_SAMPLES (128 * 1024) | |
1997 | ||
591fe988 DB |
1998 | static inline u32 bpf_map_flags_to_cap(struct bpf_map *map) |
1999 | { | |
2000 | u32 access_flags = map->map_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); | |
2001 | ||
2002 | /* Combination of BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG is | |
2003 | * not possible. | |
2004 | */ | |
2005 | if (access_flags & BPF_F_RDONLY_PROG) | |
2006 | return BPF_MAP_CAN_READ; | |
2007 | else if (access_flags & BPF_F_WRONLY_PROG) | |
2008 | return BPF_MAP_CAN_WRITE; | |
2009 | else | |
2010 | return BPF_MAP_CAN_READ | BPF_MAP_CAN_WRITE; | |
2011 | } | |
2012 | ||
2013 | static inline bool bpf_map_flags_access_ok(u32 access_flags) | |
2014 | { | |
2015 | return (access_flags & (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG)) != | |
2016 | (BPF_F_RDONLY_PROG | BPF_F_WRONLY_PROG); | |
2017 | } | |
2018 | ||
3b1efb19 DB |
2019 | struct bpf_event_entry { |
2020 | struct perf_event *event; | |
2021 | struct file *perf_file; | |
2022 | struct file *map_file; | |
2023 | struct rcu_head rcu; | |
2024 | }; | |
2025 | ||
f45d5b6c THJ |
2026 | static inline bool map_type_contains_progs(struct bpf_map *map) |
2027 | { | |
2028 | return map->map_type == BPF_MAP_TYPE_PROG_ARRAY || | |
2029 | map->map_type == BPF_MAP_TYPE_DEVMAP || | |
2030 | map->map_type == BPF_MAP_TYPE_CPUMAP; | |
2031 | } | |
2032 | ||
2033 | bool bpf_prog_map_compatible(struct bpf_map *map, const struct bpf_prog *fp); | |
f1f7714e | 2034 | int bpf_prog_calc_tag(struct bpf_prog *fp); |
bd570ff9 | 2035 | |
0756ea3e | 2036 | const struct bpf_func_proto *bpf_get_trace_printk_proto(void); |
10aceb62 | 2037 | const struct bpf_func_proto *bpf_get_trace_vprintk_proto(void); |
555c8a86 DB |
2038 | |
2039 | typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src, | |
aa7145c1 | 2040 | unsigned long off, unsigned long len); |
c64b7983 JS |
2041 | typedef u32 (*bpf_convert_ctx_access_t)(enum bpf_access_type type, |
2042 | const struct bpf_insn *src, | |
2043 | struct bpf_insn *dst, | |
2044 | struct bpf_prog *prog, | |
2045 | u32 *target_size); | |
555c8a86 DB |
2046 | |
2047 | u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size, | |
2048 | void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy); | |
04fd61ab | 2049 | |
324bda9e AS |
2050 | /* an array of programs to be executed under rcu_lock. |
2051 | * | |
2052 | * Typical usage: | |
055eb955 | 2053 | * ret = bpf_prog_run_array(rcu_dereference(&bpf_prog_array), ctx, bpf_prog_run); |
324bda9e AS |
2054 | * |
2055 | * the structure returned by bpf_prog_array_alloc() should be populated | |
2056 | * with program pointers and the last pointer must be NULL. | |
2057 | * The user has to keep refcnt on the program and make sure the program | |
2058 | * is removed from the array before bpf_prog_put(). | |
2059 | * The 'struct bpf_prog_array *' should only be replaced with xchg() | |
2060 | * since other cpus are walking the array of pointers in parallel. | |
2061 | */ | |
394e40a2 RG |
2062 | struct bpf_prog_array_item { |
2063 | struct bpf_prog *prog; | |
82e6b1ee AN |
2064 | union { |
2065 | struct bpf_cgroup_storage *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE]; | |
2066 | u64 bpf_cookie; | |
2067 | }; | |
394e40a2 RG |
2068 | }; |
2069 | ||
324bda9e AS |
2070 | struct bpf_prog_array { |
2071 | struct rcu_head rcu; | |
d7f10df8 | 2072 | struct bpf_prog_array_item items[]; |
324bda9e AS |
2073 | }; |
2074 | ||
46531a30 PB |
2075 | struct bpf_empty_prog_array { |
2076 | struct bpf_prog_array hdr; | |
2077 | struct bpf_prog *null_prog; | |
2078 | }; | |
2079 | ||
2080 | /* to avoid allocating empty bpf_prog_array for cgroups that | |
2081 | * don't have bpf program attached use one global 'bpf_empty_prog_array' | |
2082 | * It will not be modified the caller of bpf_prog_array_alloc() | |
2083 | * (since caller requested prog_cnt == 0) | |
2084 | * that pointer should be 'freed' by bpf_prog_array_free() | |
2085 | */ | |
2086 | extern struct bpf_empty_prog_array bpf_empty_prog_array; | |
2087 | ||
d29ab6e1 | 2088 | struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags); |
54e9c9d4 | 2089 | void bpf_prog_array_free(struct bpf_prog_array *progs); |
8c7dcb84 DK |
2090 | /* Use when traversal over the bpf_prog_array uses tasks_trace rcu */ |
2091 | void bpf_prog_array_free_sleepable(struct bpf_prog_array *progs); | |
54e9c9d4 | 2092 | int bpf_prog_array_length(struct bpf_prog_array *progs); |
0d01da6a | 2093 | bool bpf_prog_array_is_empty(struct bpf_prog_array *array); |
54e9c9d4 | 2094 | int bpf_prog_array_copy_to_user(struct bpf_prog_array *progs, |
468e2f64 | 2095 | __u32 __user *prog_ids, u32 cnt); |
324bda9e | 2096 | |
54e9c9d4 | 2097 | void bpf_prog_array_delete_safe(struct bpf_prog_array *progs, |
e87c6bc3 | 2098 | struct bpf_prog *old_prog); |
ce3aa9cc JS |
2099 | int bpf_prog_array_delete_safe_at(struct bpf_prog_array *array, int index); |
2100 | int bpf_prog_array_update_at(struct bpf_prog_array *array, int index, | |
2101 | struct bpf_prog *prog); | |
54e9c9d4 | 2102 | int bpf_prog_array_copy_info(struct bpf_prog_array *array, |
3a38bb98 YS |
2103 | u32 *prog_ids, u32 request_cnt, |
2104 | u32 *prog_cnt); | |
54e9c9d4 | 2105 | int bpf_prog_array_copy(struct bpf_prog_array *old_array, |
e87c6bc3 YS |
2106 | struct bpf_prog *exclude_prog, |
2107 | struct bpf_prog *include_prog, | |
82e6b1ee | 2108 | u64 bpf_cookie, |
e87c6bc3 YS |
2109 | struct bpf_prog_array **new_array); |
2110 | ||
c7603cfa AN |
2111 | struct bpf_run_ctx {}; |
2112 | ||
2113 | struct bpf_cg_run_ctx { | |
2114 | struct bpf_run_ctx run_ctx; | |
7d08c2c9 | 2115 | const struct bpf_prog_array_item *prog_item; |
c4dcfdd4 | 2116 | int retval; |
c7603cfa AN |
2117 | }; |
2118 | ||
82e6b1ee AN |
2119 | struct bpf_trace_run_ctx { |
2120 | struct bpf_run_ctx run_ctx; | |
2121 | u64 bpf_cookie; | |
a3c485a5 | 2122 | bool is_uprobe; |
82e6b1ee AN |
2123 | }; |
2124 | ||
e384c7b7 KFL |
2125 | struct bpf_tramp_run_ctx { |
2126 | struct bpf_run_ctx run_ctx; | |
2127 | u64 bpf_cookie; | |
2128 | struct bpf_run_ctx *saved_run_ctx; | |
2129 | }; | |
2130 | ||
7d08c2c9 AN |
2131 | static inline struct bpf_run_ctx *bpf_set_run_ctx(struct bpf_run_ctx *new_ctx) |
2132 | { | |
2133 | struct bpf_run_ctx *old_ctx = NULL; | |
2134 | ||
2135 | #ifdef CONFIG_BPF_SYSCALL | |
2136 | old_ctx = current->bpf_ctx; | |
2137 | current->bpf_ctx = new_ctx; | |
2138 | #endif | |
2139 | return old_ctx; | |
2140 | } | |
2141 | ||
2142 | static inline void bpf_reset_run_ctx(struct bpf_run_ctx *old_ctx) | |
2143 | { | |
2144 | #ifdef CONFIG_BPF_SYSCALL | |
2145 | current->bpf_ctx = old_ctx; | |
2146 | #endif | |
2147 | } | |
2148 | ||
77241217 SF |
2149 | /* BPF program asks to bypass CAP_NET_BIND_SERVICE in bind. */ |
2150 | #define BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE (1 << 0) | |
2151 | /* BPF program asks to set CN on the packet. */ | |
2152 | #define BPF_RET_SET_CN (1 << 0) | |
2153 | ||
7d08c2c9 AN |
2154 | typedef u32 (*bpf_prog_run_fn)(const struct bpf_prog *prog, const void *ctx); |
2155 | ||
7d08c2c9 | 2156 | static __always_inline u32 |
055eb955 | 2157 | bpf_prog_run_array(const struct bpf_prog_array *array, |
7d08c2c9 AN |
2158 | const void *ctx, bpf_prog_run_fn run_prog) |
2159 | { | |
2160 | const struct bpf_prog_array_item *item; | |
2161 | const struct bpf_prog *prog; | |
82e6b1ee AN |
2162 | struct bpf_run_ctx *old_run_ctx; |
2163 | struct bpf_trace_run_ctx run_ctx; | |
7d08c2c9 AN |
2164 | u32 ret = 1; |
2165 | ||
055eb955 SF |
2166 | RCU_LOCKDEP_WARN(!rcu_read_lock_held(), "no rcu lock held"); |
2167 | ||
7d08c2c9 | 2168 | if (unlikely(!array)) |
055eb955 SF |
2169 | return ret; |
2170 | ||
a3c485a5 JO |
2171 | run_ctx.is_uprobe = false; |
2172 | ||
055eb955 | 2173 | migrate_disable(); |
82e6b1ee | 2174 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
7d08c2c9 AN |
2175 | item = &array->items[0]; |
2176 | while ((prog = READ_ONCE(item->prog))) { | |
82e6b1ee | 2177 | run_ctx.bpf_cookie = item->bpf_cookie; |
7d08c2c9 AN |
2178 | ret &= run_prog(prog, ctx); |
2179 | item++; | |
2180 | } | |
82e6b1ee | 2181 | bpf_reset_run_ctx(old_run_ctx); |
7d08c2c9 AN |
2182 | migrate_enable(); |
2183 | return ret; | |
2184 | } | |
324bda9e | 2185 | |
8c7dcb84 DK |
2186 | /* Notes on RCU design for bpf_prog_arrays containing sleepable programs: |
2187 | * | |
2188 | * We use the tasks_trace rcu flavor read section to protect the bpf_prog_array | |
2189 | * overall. As a result, we must use the bpf_prog_array_free_sleepable | |
2190 | * in order to use the tasks_trace rcu grace period. | |
2191 | * | |
2192 | * When a non-sleepable program is inside the array, we take the rcu read | |
2193 | * section and disable preemption for that program alone, so it can access | |
2194 | * rcu-protected dynamically sized maps. | |
2195 | */ | |
2196 | static __always_inline u32 | |
7d0d6736 | 2197 | bpf_prog_run_array_uprobe(const struct bpf_prog_array *array, |
a3c485a5 | 2198 | const void *ctx, bpf_prog_run_fn run_prog) |
8c7dcb84 DK |
2199 | { |
2200 | const struct bpf_prog_array_item *item; | |
2201 | const struct bpf_prog *prog; | |
8c7dcb84 DK |
2202 | struct bpf_run_ctx *old_run_ctx; |
2203 | struct bpf_trace_run_ctx run_ctx; | |
2204 | u32 ret = 1; | |
2205 | ||
2206 | might_fault(); | |
7d0d6736 JH |
2207 | RCU_LOCKDEP_WARN(!rcu_read_lock_trace_held(), "no rcu lock held"); |
2208 | ||
2209 | if (unlikely(!array)) | |
2210 | return ret; | |
8c7dcb84 | 2211 | |
8c7dcb84 DK |
2212 | migrate_disable(); |
2213 | ||
a3c485a5 JO |
2214 | run_ctx.is_uprobe = true; |
2215 | ||
8c7dcb84 DK |
2216 | old_run_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
2217 | item = &array->items[0]; | |
2218 | while ((prog = READ_ONCE(item->prog))) { | |
66c84731 | 2219 | if (!prog->sleepable) |
8c7dcb84 DK |
2220 | rcu_read_lock(); |
2221 | ||
2222 | run_ctx.bpf_cookie = item->bpf_cookie; | |
2223 | ret &= run_prog(prog, ctx); | |
2224 | item++; | |
2225 | ||
66c84731 | 2226 | if (!prog->sleepable) |
8c7dcb84 DK |
2227 | rcu_read_unlock(); |
2228 | } | |
2229 | bpf_reset_run_ctx(old_run_ctx); | |
8c7dcb84 | 2230 | migrate_enable(); |
8c7dcb84 DK |
2231 | return ret; |
2232 | } | |
2233 | ||
89aa0758 | 2234 | #ifdef CONFIG_BPF_SYSCALL |
b121d1e7 | 2235 | DECLARE_PER_CPU(int, bpf_prog_active); |
d46edd67 | 2236 | extern struct mutex bpf_stats_enabled_mutex; |
b121d1e7 | 2237 | |
c518cfa0 TG |
2238 | /* |
2239 | * Block execution of BPF programs attached to instrumentation (perf, | |
2240 | * kprobes, tracepoints) to prevent deadlocks on map operations as any of | |
2241 | * these events can happen inside a region which holds a map bucket lock | |
2242 | * and can deadlock on it. | |
c518cfa0 TG |
2243 | */ |
2244 | static inline void bpf_disable_instrumentation(void) | |
2245 | { | |
2246 | migrate_disable(); | |
79364031 | 2247 | this_cpu_inc(bpf_prog_active); |
c518cfa0 TG |
2248 | } |
2249 | ||
2250 | static inline void bpf_enable_instrumentation(void) | |
2251 | { | |
79364031 | 2252 | this_cpu_dec(bpf_prog_active); |
c518cfa0 TG |
2253 | migrate_enable(); |
2254 | } | |
2255 | ||
35f96de0 | 2256 | extern const struct super_operations bpf_super_ops; |
f66e448c CF |
2257 | extern const struct file_operations bpf_map_fops; |
2258 | extern const struct file_operations bpf_prog_fops; | |
367ec3e4 | 2259 | extern const struct file_operations bpf_iter_fops; |
f66e448c | 2260 | |
91cc1a99 | 2261 | #define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \ |
7de16e3a JK |
2262 | extern const struct bpf_prog_ops _name ## _prog_ops; \ |
2263 | extern const struct bpf_verifier_ops _name ## _verifier_ops; | |
40077e0c JB |
2264 | #define BPF_MAP_TYPE(_id, _ops) \ |
2265 | extern const struct bpf_map_ops _ops; | |
f2e10bff | 2266 | #define BPF_LINK_TYPE(_id, _name) |
be9370a7 JB |
2267 | #include <linux/bpf_types.h> |
2268 | #undef BPF_PROG_TYPE | |
40077e0c | 2269 | #undef BPF_MAP_TYPE |
f2e10bff | 2270 | #undef BPF_LINK_TYPE |
0fc174de | 2271 | |
ab3f0063 | 2272 | extern const struct bpf_prog_ops bpf_offload_prog_ops; |
4f9218aa JK |
2273 | extern const struct bpf_verifier_ops tc_cls_act_analyzer_ops; |
2274 | extern const struct bpf_verifier_ops xdp_analyzer_ops; | |
2275 | ||
0fc174de | 2276 | struct bpf_prog *bpf_prog_get(u32 ufd); |
248f346f | 2277 | struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type, |
288b3de5 | 2278 | bool attach_drv); |
85192dbf | 2279 | void bpf_prog_add(struct bpf_prog *prog, int i); |
c540594f | 2280 | void bpf_prog_sub(struct bpf_prog *prog, int i); |
85192dbf | 2281 | void bpf_prog_inc(struct bpf_prog *prog); |
a6f6df69 | 2282 | struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog); |
61e021f3 DB |
2283 | void bpf_prog_put(struct bpf_prog *prog); |
2284 | ||
e7895f01 | 2285 | void bpf_prog_free_id(struct bpf_prog *prog); |
158e5e9e | 2286 | void bpf_map_free_id(struct bpf_map *map); |
ad8ad79f | 2287 | |
aa3496ac | 2288 | struct btf_field *btf_record_find(const struct btf_record *rec, |
74843b57 | 2289 | u32 offset, u32 field_mask); |
aa3496ac KKD |
2290 | void btf_record_free(struct btf_record *rec); |
2291 | void bpf_map_free_record(struct bpf_map *map); | |
2292 | struct btf_record *btf_record_dup(const struct btf_record *rec); | |
2293 | bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b); | |
db559117 | 2294 | void bpf_obj_free_timer(const struct btf_record *rec, void *obj); |
246331e3 | 2295 | void bpf_obj_free_workqueue(const struct btf_record *rec, void *obj); |
aa3496ac | 2296 | void bpf_obj_free_fields(const struct btf_record *rec, void *obj); |
e383a459 | 2297 | void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu); |
61df10c7 | 2298 | |
1ed4d924 | 2299 | struct bpf_map *bpf_map_get(u32 ufd); |
c9da161c | 2300 | struct bpf_map *bpf_map_get_with_uref(u32 ufd); |
55f32595 | 2301 | |
4e885fab AP |
2302 | /* |
2303 | * The __bpf_map_get() and __btf_get_by_fd() functions parse a file | |
2304 | * descriptor and return a corresponding map or btf object. | |
2305 | * Their names are double underscored to emphasize the fact that they | |
2306 | * do not increase refcnt. To also increase refcnt use corresponding | |
2307 | * bpf_map_get() and btf_get_by_fd() functions. | |
2308 | */ | |
2309 | ||
55f32595 AV |
2310 | static inline struct bpf_map *__bpf_map_get(struct fd f) |
2311 | { | |
2312 | if (fd_empty(f)) | |
2313 | return ERR_PTR(-EBADF); | |
2314 | if (unlikely(fd_file(f)->f_op != &bpf_map_fops)) | |
2315 | return ERR_PTR(-EINVAL); | |
2316 | return fd_file(f)->private_data; | |
2317 | } | |
2318 | ||
4e885fab AP |
2319 | static inline struct btf *__btf_get_by_fd(struct fd f) |
2320 | { | |
2321 | if (fd_empty(f)) | |
2322 | return ERR_PTR(-EBADF); | |
2323 | if (unlikely(fd_file(f)->f_op != &btf_fops)) | |
2324 | return ERR_PTR(-EINVAL); | |
2325 | return fd_file(f)->private_data; | |
2326 | } | |
2327 | ||
1e0bd5a0 AN |
2328 | void bpf_map_inc(struct bpf_map *map); |
2329 | void bpf_map_inc_with_uref(struct bpf_map *map); | |
b671c206 | 2330 | struct bpf_map *__bpf_map_inc_not_zero(struct bpf_map *map, bool uref); |
1e0bd5a0 | 2331 | struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map); |
c9da161c | 2332 | void bpf_map_put_with_uref(struct bpf_map *map); |
61e021f3 | 2333 | void bpf_map_put(struct bpf_map *map); |
196e8ca7 DB |
2334 | void *bpf_map_area_alloc(u64 size, int numa_node); |
2335 | void *bpf_map_area_mmapable_alloc(u64 size, int numa_node); | |
d407bd25 | 2336 | void bpf_map_area_free(void *base); |
353050be | 2337 | bool bpf_map_write_active(const struct bpf_map *map); |
bd475643 | 2338 | void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr); |
cb4d03ab BV |
2339 | int generic_map_lookup_batch(struct bpf_map *map, |
2340 | const union bpf_attr *attr, | |
aa2e93b8 | 2341 | union bpf_attr __user *uattr); |
3af43ba4 | 2342 | int generic_map_update_batch(struct bpf_map *map, struct file *map_file, |
aa2e93b8 BV |
2343 | const union bpf_attr *attr, |
2344 | union bpf_attr __user *uattr); | |
2345 | int generic_map_delete_batch(struct bpf_map *map, | |
2346 | const union bpf_attr *attr, | |
cb4d03ab | 2347 | union bpf_attr __user *uattr); |
6086d29d | 2348 | struct bpf_map *bpf_map_get_curr_or_next(u32 *id); |
a228a64f | 2349 | struct bpf_prog *bpf_prog_get_curr_or_next(u32 *id); |
61e021f3 | 2350 | |
31746031 AS |
2351 | int bpf_map_alloc_pages(const struct bpf_map *map, gfp_t gfp, int nid, |
2352 | unsigned long nr_pages, struct page **page_array); | |
3a3b7fec | 2353 | #ifdef CONFIG_MEMCG |
48edc1f7 RG |
2354 | void *bpf_map_kmalloc_node(const struct bpf_map *map, size_t size, gfp_t flags, |
2355 | int node); | |
2356 | void *bpf_map_kzalloc(const struct bpf_map *map, size_t size, gfp_t flags); | |
ddef81b5 YS |
2357 | void *bpf_map_kvcalloc(struct bpf_map *map, size_t n, size_t size, |
2358 | gfp_t flags); | |
48edc1f7 RG |
2359 | void __percpu *bpf_map_alloc_percpu(const struct bpf_map *map, size_t size, |
2360 | size_t align, gfp_t flags); | |
2361 | #else | |
3b0ba54d SB |
2362 | /* |
2363 | * These specialized allocators have to be macros for their allocations to be | |
2364 | * accounted separately (to have separate alloc_tag). | |
2365 | */ | |
2c321f3f SB |
2366 | #define bpf_map_kmalloc_node(_map, _size, _flags, _node) \ |
2367 | kmalloc_node(_size, _flags, _node) | |
2368 | #define bpf_map_kzalloc(_map, _size, _flags) \ | |
2369 | kzalloc(_size, _flags) | |
2370 | #define bpf_map_kvcalloc(_map, _n, _size, _flags) \ | |
2371 | kvcalloc(_n, _size, _flags) | |
2372 | #define bpf_map_alloc_percpu(_map, _size, _align, _flags) \ | |
2373 | __alloc_percpu_gfp(_size, _align, _flags) | |
48edc1f7 RG |
2374 | #endif |
2375 | ||
25954730 AP |
2376 | static inline int |
2377 | bpf_map_init_elem_count(struct bpf_map *map) | |
2378 | { | |
2379 | size_t size = sizeof(*map->elem_count), align = size; | |
2380 | gfp_t flags = GFP_USER | __GFP_NOWARN; | |
2381 | ||
2382 | map->elem_count = bpf_map_alloc_percpu(map, size, align, flags); | |
2383 | if (!map->elem_count) | |
2384 | return -ENOMEM; | |
2385 | ||
2386 | return 0; | |
2387 | } | |
2388 | ||
2389 | static inline void | |
2390 | bpf_map_free_elem_count(struct bpf_map *map) | |
2391 | { | |
2392 | free_percpu(map->elem_count); | |
2393 | } | |
2394 | ||
2395 | static inline void bpf_map_inc_elem_count(struct bpf_map *map) | |
2396 | { | |
2397 | this_cpu_inc(*map->elem_count); | |
2398 | } | |
2399 | ||
2400 | static inline void bpf_map_dec_elem_count(struct bpf_map *map) | |
2401 | { | |
2402 | this_cpu_dec(*map->elem_count); | |
2403 | } | |
2404 | ||
1be7f75d AS |
2405 | extern int sysctl_unprivileged_bpf_disabled; |
2406 | ||
35f96de0 AN |
2407 | bool bpf_token_capable(const struct bpf_token *token, int cap); |
2408 | ||
d79a3549 | 2409 | static inline bool bpf_allow_ptr_leaks(const struct bpf_token *token) |
2c78ee89 | 2410 | { |
d79a3549 | 2411 | return bpf_token_capable(token, CAP_PERFMON); |
2c78ee89 AS |
2412 | } |
2413 | ||
d79a3549 | 2414 | static inline bool bpf_allow_uninit_stack(const struct bpf_token *token) |
01f810ac | 2415 | { |
d79a3549 | 2416 | return bpf_token_capable(token, CAP_PERFMON); |
01f810ac AM |
2417 | } |
2418 | ||
d79a3549 | 2419 | static inline bool bpf_bypass_spec_v1(const struct bpf_token *token) |
2c78ee89 | 2420 | { |
d79a3549 | 2421 | return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); |
2c78ee89 AS |
2422 | } |
2423 | ||
d79a3549 | 2424 | static inline bool bpf_bypass_spec_v4(const struct bpf_token *token) |
2c78ee89 | 2425 | { |
d79a3549 | 2426 | return cpu_mitigations_off() || bpf_token_capable(token, CAP_PERFMON); |
2c78ee89 AS |
2427 | } |
2428 | ||
6e71b04a | 2429 | int bpf_map_new_fd(struct bpf_map *map, int flags); |
b2197755 DB |
2430 | int bpf_prog_new_fd(struct bpf_prog *prog); |
2431 | ||
f2e10bff | 2432 | void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
a3b80e10 | 2433 | const struct bpf_link_ops *ops, struct bpf_prog *prog); |
61c6fefa AN |
2434 | void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, |
2435 | const struct bpf_link_ops *ops, struct bpf_prog *prog, | |
2436 | bool sleepable); | |
a3b80e10 AN |
2437 | int bpf_link_prime(struct bpf_link *link, struct bpf_link_primer *primer); |
2438 | int bpf_link_settle(struct bpf_link_primer *primer); | |
2439 | void bpf_link_cleanup(struct bpf_link_primer *primer); | |
70ed506c | 2440 | void bpf_link_inc(struct bpf_link *link); |
67c3e835 | 2441 | struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link); |
70ed506c AN |
2442 | void bpf_link_put(struct bpf_link *link); |
2443 | int bpf_link_new_fd(struct bpf_link *link); | |
2444 | struct bpf_link *bpf_link_get_from_fd(u32 ufd); | |
9f883612 | 2445 | struct bpf_link *bpf_link_get_curr_or_next(u32 *id); |
70ed506c | 2446 | |
35f96de0 AN |
2447 | void bpf_token_inc(struct bpf_token *token); |
2448 | void bpf_token_put(struct bpf_token *token); | |
2449 | int bpf_token_create(union bpf_attr *attr); | |
2450 | struct bpf_token *bpf_token_get_from_fd(u32 ufd); | |
2451 | ||
2452 | bool bpf_token_allow_cmd(const struct bpf_token *token, enum bpf_cmd cmd); | |
a177fc2b | 2453 | bool bpf_token_allow_map_type(const struct bpf_token *token, enum bpf_map_type type); |
caf8f28e AN |
2454 | bool bpf_token_allow_prog_type(const struct bpf_token *token, |
2455 | enum bpf_prog_type prog_type, | |
2456 | enum bpf_attach_type attach_type); | |
35f96de0 | 2457 | |
cb8edce2 AN |
2458 | int bpf_obj_pin_user(u32 ufd, int path_fd, const char __user *pathname); |
2459 | int bpf_obj_get_user(int path_fd, const char __user *pathname, int flags); | |
35f96de0 AN |
2460 | struct inode *bpf_get_inode(struct super_block *sb, const struct inode *dir, |
2461 | umode_t mode); | |
b2197755 | 2462 | |
21aef70e | 2463 | #define BPF_ITER_FUNC_PREFIX "bpf_iter_" |
e5158d98 | 2464 | #define DEFINE_BPF_ITER_FUNC(target, args...) \ |
21aef70e YS |
2465 | extern int bpf_iter_ ## target(args); \ |
2466 | int __init bpf_iter_ ## target(args) { return 0; } | |
15d83c4d | 2467 | |
f0d74c4d KFL |
2468 | /* |
2469 | * The task type of iterators. | |
2470 | * | |
2471 | * For BPF task iterators, they can be parameterized with various | |
2472 | * parameters to visit only some of tasks. | |
2473 | * | |
2474 | * BPF_TASK_ITER_ALL (default) | |
2475 | * Iterate over resources of every task. | |
2476 | * | |
2477 | * BPF_TASK_ITER_TID | |
2478 | * Iterate over resources of a task/tid. | |
2479 | * | |
2480 | * BPF_TASK_ITER_TGID | |
2481 | * Iterate over resources of every task of a process / task group. | |
2482 | */ | |
2483 | enum bpf_iter_task_type { | |
2484 | BPF_TASK_ITER_ALL = 0, | |
2485 | BPF_TASK_ITER_TID, | |
2486 | BPF_TASK_ITER_TGID, | |
2487 | }; | |
2488 | ||
f9c79272 | 2489 | struct bpf_iter_aux_info { |
d4ccaf58 | 2490 | /* for map_elem iter */ |
a5cbe05a | 2491 | struct bpf_map *map; |
d4ccaf58 HL |
2492 | |
2493 | /* for cgroup iter */ | |
2494 | struct { | |
2495 | struct cgroup *start; /* starting cgroup */ | |
2496 | enum bpf_cgroup_iter_order order; | |
2497 | } cgroup; | |
f0d74c4d KFL |
2498 | struct { |
2499 | enum bpf_iter_task_type type; | |
2500 | u32 pid; | |
2501 | } task; | |
f9c79272 YS |
2502 | }; |
2503 | ||
5e7b3020 YS |
2504 | typedef int (*bpf_iter_attach_target_t)(struct bpf_prog *prog, |
2505 | union bpf_iter_link_info *linfo, | |
2506 | struct bpf_iter_aux_info *aux); | |
2507 | typedef void (*bpf_iter_detach_target_t)(struct bpf_iter_aux_info *aux); | |
6b0a249a YS |
2508 | typedef void (*bpf_iter_show_fdinfo_t) (const struct bpf_iter_aux_info *aux, |
2509 | struct seq_file *seq); | |
2510 | typedef int (*bpf_iter_fill_link_info_t)(const struct bpf_iter_aux_info *aux, | |
2511 | struct bpf_link_info *info); | |
3cee6fb8 MKL |
2512 | typedef const struct bpf_func_proto * |
2513 | (*bpf_iter_get_func_proto_t)(enum bpf_func_id func_id, | |
2514 | const struct bpf_prog *prog); | |
a5cbe05a | 2515 | |
cf83b2d2 YS |
2516 | enum bpf_iter_feature { |
2517 | BPF_ITER_RESCHED = BIT(0), | |
2518 | }; | |
2519 | ||
3c32cc1b | 2520 | #define BPF_ITER_CTX_ARG_MAX 2 |
ae24345d YS |
2521 | struct bpf_iter_reg { |
2522 | const char *target; | |
5e7b3020 YS |
2523 | bpf_iter_attach_target_t attach_target; |
2524 | bpf_iter_detach_target_t detach_target; | |
6b0a249a YS |
2525 | bpf_iter_show_fdinfo_t show_fdinfo; |
2526 | bpf_iter_fill_link_info_t fill_link_info; | |
3cee6fb8 | 2527 | bpf_iter_get_func_proto_t get_func_proto; |
3c32cc1b | 2528 | u32 ctx_arg_info_size; |
cf83b2d2 | 2529 | u32 feature; |
3c32cc1b | 2530 | struct bpf_ctx_arg_aux ctx_arg_info[BPF_ITER_CTX_ARG_MAX]; |
14fc6bd6 | 2531 | const struct bpf_iter_seq_info *seq_info; |
ae24345d YS |
2532 | }; |
2533 | ||
e5158d98 YS |
2534 | struct bpf_iter_meta { |
2535 | __bpf_md_ptr(struct seq_file *, seq); | |
2536 | u64 session_id; | |
2537 | u64 seq_num; | |
2538 | }; | |
2539 | ||
a5cbe05a YS |
2540 | struct bpf_iter__bpf_map_elem { |
2541 | __bpf_md_ptr(struct bpf_iter_meta *, meta); | |
2542 | __bpf_md_ptr(struct bpf_map *, map); | |
2543 | __bpf_md_ptr(void *, key); | |
2544 | __bpf_md_ptr(void *, value); | |
2545 | }; | |
2546 | ||
15172a46 | 2547 | int bpf_iter_reg_target(const struct bpf_iter_reg *reg_info); |
ab2ee4fc | 2548 | void bpf_iter_unreg_target(const struct bpf_iter_reg *reg_info); |
15d83c4d | 2549 | bool bpf_iter_prog_supported(struct bpf_prog *prog); |
3cee6fb8 MKL |
2550 | const struct bpf_func_proto * |
2551 | bpf_iter_get_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog); | |
af2ac3e1 | 2552 | int bpf_iter_link_attach(const union bpf_attr *attr, bpfptr_t uattr, struct bpf_prog *prog); |
ac51d99b | 2553 | int bpf_iter_new_fd(struct bpf_link *link); |
367ec3e4 | 2554 | bool bpf_link_is_iter(struct bpf_link *link); |
e5158d98 YS |
2555 | struct bpf_prog *bpf_iter_get_info(struct bpf_iter_meta *meta, bool in_stop); |
2556 | int bpf_iter_run_prog(struct bpf_prog *prog, void *ctx); | |
b76f2226 YS |
2557 | void bpf_iter_map_show_fdinfo(const struct bpf_iter_aux_info *aux, |
2558 | struct seq_file *seq); | |
2559 | int bpf_iter_map_fill_link_info(const struct bpf_iter_aux_info *aux, | |
2560 | struct bpf_link_info *info); | |
ae24345d | 2561 | |
314ee05e YS |
2562 | int map_set_for_each_callback_args(struct bpf_verifier_env *env, |
2563 | struct bpf_func_state *caller, | |
2564 | struct bpf_func_state *callee); | |
2565 | ||
15a07b33 AS |
2566 | int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value); |
2567 | int bpf_percpu_array_copy(struct bpf_map *map, void *key, void *value); | |
2568 | int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value, | |
2569 | u64 flags); | |
2570 | int bpf_percpu_array_update(struct bpf_map *map, void *key, void *value, | |
2571 | u64 flags); | |
d056a788 | 2572 | |
557c0c6e | 2573 | int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value); |
15a07b33 | 2574 | |
d056a788 DB |
2575 | int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file, |
2576 | void *key, void *value, u64 map_flags); | |
14dc6f04 | 2577 | int bpf_fd_array_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
bcc6b1b7 MKL |
2578 | int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file, |
2579 | void *key, void *value, u64 map_flags); | |
14dc6f04 | 2580 | int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value); |
d056a788 | 2581 | |
6e71b04a | 2582 | int bpf_get_file_flag(int flags); |
af2ac3e1 | 2583 | int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size, |
dcab51f1 | 2584 | size_t actual_size); |
6e71b04a | 2585 | |
61e021f3 | 2586 | /* verify correctness of eBPF program */ |
47a71c1f | 2587 | int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size); |
a643bff7 AN |
2588 | |
2589 | #ifndef CONFIG_BPF_JIT_ALWAYS_ON | |
1ea47e01 | 2590 | void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth); |
a643bff7 | 2591 | #endif |
46f55cff | 2592 | |
76654e67 AM |
2593 | struct btf *bpf_get_btf_vmlinux(void); |
2594 | ||
46f55cff | 2595 | /* Map specifics */ |
d53ad5d8 | 2596 | struct xdp_frame; |
6d5fc195 | 2597 | struct sk_buff; |
e6a4750f BT |
2598 | struct bpf_dtab_netdev; |
2599 | struct bpf_cpu_map_entry; | |
67f29e07 | 2600 | |
d839a731 | 2601 | void __dev_flush(struct list_head *flush_list); |
d53ad5d8 | 2602 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
1d233886 | 2603 | struct net_device *dev_rx); |
d53ad5d8 | 2604 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, |
38edddb8 | 2605 | struct net_device *dev_rx); |
d53ad5d8 | 2606 | int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, |
e624d4ed | 2607 | struct bpf_map *map, bool exclude_ingress); |
6d5fc195 | 2608 | int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb, |
7cd1107f | 2609 | const struct bpf_prog *xdp_prog); |
e624d4ed | 2610 | int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, |
7cd1107f AL |
2611 | const struct bpf_prog *xdp_prog, |
2612 | struct bpf_map *map, bool exclude_ingress); | |
46f55cff | 2613 | |
d839a731 | 2614 | void __cpu_map_flush(struct list_head *flush_list); |
d53ad5d8 | 2615 | int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf, |
9c270af3 | 2616 | struct net_device *dev_rx); |
11941f8a KKD |
2617 | int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
2618 | struct sk_buff *skb); | |
9c270af3 | 2619 | |
96eabe7a MKL |
2620 | /* Return map's numa specified by userspace */ |
2621 | static inline int bpf_map_attr_numa_node(const union bpf_attr *attr) | |
2622 | { | |
2623 | return (attr->map_flags & BPF_F_NUMA_NODE) ? | |
2624 | attr->numa_node : NUMA_NO_NODE; | |
2625 | } | |
2626 | ||
040ee692 | 2627 | struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type); |
5dc4c4b7 | 2628 | int array_map_alloc_check(union bpf_attr *attr); |
040ee692 | 2629 | |
c695865c SF |
2630 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
2631 | union bpf_attr __user *uattr); | |
2632 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, | |
2633 | union bpf_attr __user *uattr); | |
da00d2f1 KS |
2634 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2635 | const union bpf_attr *kattr, | |
2636 | union bpf_attr __user *uattr); | |
c695865c SF |
2637 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
2638 | const union bpf_attr *kattr, | |
2639 | union bpf_attr __user *uattr); | |
1b4d60ec SL |
2640 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, |
2641 | const union bpf_attr *kattr, | |
2642 | union bpf_attr __user *uattr); | |
7c32e8f8 LB |
2643 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
2644 | const union bpf_attr *kattr, | |
2645 | union bpf_attr __user *uattr); | |
2b99ef22 FW |
2646 | int bpf_prog_test_run_nf(struct bpf_prog *prog, |
2647 | const union bpf_attr *kattr, | |
2648 | union bpf_attr __user *uattr); | |
9e15db66 AS |
2649 | bool btf_ctx_access(int off, int size, enum bpf_access_type type, |
2650 | const struct bpf_prog *prog, | |
2651 | struct bpf_insn_access_aux *info); | |
35346ab6 HT |
2652 | |
2653 | static inline bool bpf_tracing_ctx_access(int off, int size, | |
2654 | enum bpf_access_type type) | |
2655 | { | |
2656 | if (off < 0 || off >= sizeof(__u64) * MAX_BPF_FUNC_ARGS) | |
2657 | return false; | |
2658 | if (type != BPF_READ) | |
2659 | return false; | |
2660 | if (off % size != 0) | |
2661 | return false; | |
2662 | return true; | |
2663 | } | |
2664 | ||
2665 | static inline bool bpf_tracing_btf_ctx_access(int off, int size, | |
2666 | enum bpf_access_type type, | |
2667 | const struct bpf_prog *prog, | |
2668 | struct bpf_insn_access_aux *info) | |
2669 | { | |
2670 | if (!bpf_tracing_ctx_access(off, size, type)) | |
2671 | return false; | |
2672 | return btf_ctx_access(off, size, type, prog, info); | |
2673 | } | |
2674 | ||
6728aea7 KKD |
2675 | int btf_struct_access(struct bpf_verifier_log *log, |
2676 | const struct bpf_reg_state *reg, | |
2677 | int off, int size, enum bpf_access_type atype, | |
63260df1 | 2678 | u32 *next_btf_id, enum bpf_type_flag *flag, const char **field_name); |
faaf4a79 | 2679 | bool btf_struct_ids_match(struct bpf_verifier_log *log, |
22dc4a0f | 2680 | const struct btf *btf, u32 id, int off, |
2ab3b380 KKD |
2681 | const struct btf *need_btf, u32 need_type_id, |
2682 | bool strict); | |
9e15db66 | 2683 | |
fec56f58 AS |
2684 | int btf_distill_func_proto(struct bpf_verifier_log *log, |
2685 | struct btf *btf, | |
2686 | const struct btf_type *func_proto, | |
2687 | const char *func_name, | |
2688 | struct btf_func_model *m); | |
2689 | ||
51c39bb1 | 2690 | struct bpf_reg_state; |
4ba1d0f2 | 2691 | int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog); |
efc68158 | 2692 | int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog, |
be8704ff | 2693 | struct btf *btf, const struct btf_type *t); |
b9ae0c9d KKD |
2694 | const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt, |
2695 | int comp_idx, const char *tag_key); | |
522bb2c1 AN |
2696 | int btf_find_next_decl_tag(const struct btf *btf, const struct btf_type *pt, |
2697 | int comp_idx, const char *tag_key, int last_id); | |
8c1b6e69 | 2698 | |
7e6897f9 | 2699 | struct bpf_prog *bpf_prog_by_id(u32 id); |
005142b8 | 2700 | struct bpf_link *bpf_link_by_id(u32 id); |
7e6897f9 | 2701 | |
bbc1d247 AN |
2702 | const struct bpf_func_proto *bpf_base_func_proto(enum bpf_func_id func_id, |
2703 | const struct bpf_prog *prog); | |
a10787e6 | 2704 | void bpf_task_storage_free(struct task_struct *task); |
c4bcfb38 | 2705 | void bpf_cgrp_storage_free(struct cgroup *cgroup); |
e6ac2450 MKL |
2706 | bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog); |
2707 | const struct btf_func_model * | |
2708 | bpf_jit_find_kfunc_model(const struct bpf_prog *prog, | |
2709 | const struct bpf_insn *insn); | |
1cf3bfc6 IL |
2710 | int bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, |
2711 | u16 btf_fd_idx, u8 **func_addr); | |
2712 | ||
fbd94c7a AS |
2713 | struct bpf_core_ctx { |
2714 | struct bpf_verifier_log *log; | |
2715 | const struct btf *btf; | |
2716 | }; | |
2717 | ||
57539b1c DV |
2718 | bool btf_nested_type_is_trusted(struct bpf_verifier_log *log, |
2719 | const struct bpf_reg_state *reg, | |
63260df1 | 2720 | const char *field_name, u32 btf_id, const char *suffix); |
57539b1c | 2721 | |
b613d335 DV |
2722 | bool btf_type_ids_nocast_alias(struct bpf_verifier_log *log, |
2723 | const struct btf *reg_btf, u32 reg_id, | |
2724 | const struct btf *arg_btf, u32 arg_id); | |
2725 | ||
fbd94c7a AS |
2726 | int bpf_core_apply(struct bpf_core_ctx *ctx, const struct bpf_core_relo *relo, |
2727 | int relo_idx, void *insn); | |
2728 | ||
44a3918c JP |
2729 | static inline bool unprivileged_ebpf_enabled(void) |
2730 | { | |
2731 | return !sysctl_unprivileged_bpf_disabled; | |
2732 | } | |
2733 | ||
24426654 MKL |
2734 | /* Not all bpf prog type has the bpf_ctx. |
2735 | * For the bpf prog type that has initialized the bpf_ctx, | |
2736 | * this function can be used to decide if a kernel function | |
2737 | * is called by a bpf program. | |
2738 | */ | |
2739 | static inline bool has_current_bpf_ctx(void) | |
2740 | { | |
2741 | return !!current->bpf_ctx; | |
2742 | } | |
05b24ff9 JO |
2743 | |
2744 | void notrace bpf_prog_inc_misses_counter(struct bpf_prog *prog); | |
8357b366 JK |
2745 | |
2746 | void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, | |
2747 | enum bpf_dynptr_type type, u32 offset, u32 size); | |
2748 | void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr); | |
2749 | void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr); | |
9a675ba5 | 2750 | |
9c270af3 | 2751 | #else /* !CONFIG_BPF_SYSCALL */ |
0fc174de DB |
2752 | static inline struct bpf_prog *bpf_prog_get(u32 ufd) |
2753 | { | |
2754 | return ERR_PTR(-EOPNOTSUPP); | |
2755 | } | |
2756 | ||
248f346f JK |
2757 | static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, |
2758 | enum bpf_prog_type type, | |
288b3de5 | 2759 | bool attach_drv) |
248f346f JK |
2760 | { |
2761 | return ERR_PTR(-EOPNOTSUPP); | |
2762 | } | |
2763 | ||
85192dbf | 2764 | static inline void bpf_prog_add(struct bpf_prog *prog, int i) |
cc2e0b3f | 2765 | { |
cc2e0b3f | 2766 | } |
113214be | 2767 | |
c540594f DB |
2768 | static inline void bpf_prog_sub(struct bpf_prog *prog, int i) |
2769 | { | |
2770 | } | |
2771 | ||
0fc174de DB |
2772 | static inline void bpf_prog_put(struct bpf_prog *prog) |
2773 | { | |
2774 | } | |
6d67942d | 2775 | |
85192dbf | 2776 | static inline void bpf_prog_inc(struct bpf_prog *prog) |
aa6a5f3c | 2777 | { |
aa6a5f3c | 2778 | } |
5ccb071e | 2779 | |
a6f6df69 JF |
2780 | static inline struct bpf_prog *__must_check |
2781 | bpf_prog_inc_not_zero(struct bpf_prog *prog) | |
2782 | { | |
2783 | return ERR_PTR(-EOPNOTSUPP); | |
2784 | } | |
2785 | ||
6cc7d1e8 AN |
2786 | static inline void bpf_link_init(struct bpf_link *link, enum bpf_link_type type, |
2787 | const struct bpf_link_ops *ops, | |
2788 | struct bpf_prog *prog) | |
2789 | { | |
2790 | } | |
2791 | ||
61c6fefa AN |
2792 | static inline void bpf_link_init_sleepable(struct bpf_link *link, enum bpf_link_type type, |
2793 | const struct bpf_link_ops *ops, struct bpf_prog *prog, | |
2794 | bool sleepable) | |
2795 | { | |
2796 | } | |
2797 | ||
6cc7d1e8 AN |
2798 | static inline int bpf_link_prime(struct bpf_link *link, |
2799 | struct bpf_link_primer *primer) | |
2800 | { | |
2801 | return -EOPNOTSUPP; | |
2802 | } | |
2803 | ||
2804 | static inline int bpf_link_settle(struct bpf_link_primer *primer) | |
2805 | { | |
2806 | return -EOPNOTSUPP; | |
2807 | } | |
2808 | ||
2809 | static inline void bpf_link_cleanup(struct bpf_link_primer *primer) | |
2810 | { | |
2811 | } | |
2812 | ||
2813 | static inline void bpf_link_inc(struct bpf_link *link) | |
2814 | { | |
2815 | } | |
2816 | ||
67c3e835 KFL |
2817 | static inline struct bpf_link *bpf_link_inc_not_zero(struct bpf_link *link) |
2818 | { | |
2819 | return NULL; | |
2820 | } | |
2821 | ||
6cc7d1e8 AN |
2822 | static inline void bpf_link_put(struct bpf_link *link) |
2823 | { | |
2824 | } | |
2825 | ||
6e71b04a | 2826 | static inline int bpf_obj_get_user(const char __user *pathname, int flags) |
98589a09 SL |
2827 | { |
2828 | return -EOPNOTSUPP; | |
2829 | } | |
2830 | ||
35f96de0 AN |
2831 | static inline bool bpf_token_capable(const struct bpf_token *token, int cap) |
2832 | { | |
2833 | return capable(cap) || (cap != CAP_SYS_ADMIN && capable(CAP_SYS_ADMIN)); | |
2834 | } | |
2835 | ||
2836 | static inline void bpf_token_inc(struct bpf_token *token) | |
2837 | { | |
2838 | } | |
2839 | ||
2840 | static inline void bpf_token_put(struct bpf_token *token) | |
2841 | { | |
2842 | } | |
2843 | ||
2844 | static inline struct bpf_token *bpf_token_get_from_fd(u32 ufd) | |
2845 | { | |
2846 | return ERR_PTR(-EOPNOTSUPP); | |
2847 | } | |
2848 | ||
d839a731 | 2849 | static inline void __dev_flush(struct list_head *flush_list) |
46f55cff JF |
2850 | { |
2851 | } | |
9c270af3 | 2852 | |
d53ad5d8 | 2853 | struct xdp_frame; |
67f29e07 | 2854 | struct bpf_dtab_netdev; |
e6a4750f | 2855 | struct bpf_cpu_map_entry; |
67f29e07 | 2856 | |
1d233886 | 2857 | static inline |
d53ad5d8 | 2858 | int dev_xdp_enqueue(struct net_device *dev, struct xdp_frame *xdpf, |
1d233886 THJ |
2859 | struct net_device *dev_rx) |
2860 | { | |
2861 | return 0; | |
2862 | } | |
2863 | ||
67f29e07 | 2864 | static inline |
d53ad5d8 | 2865 | int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_frame *xdpf, |
38edddb8 | 2866 | struct net_device *dev_rx) |
67f29e07 JDB |
2867 | { |
2868 | return 0; | |
2869 | } | |
2870 | ||
e624d4ed | 2871 | static inline |
d53ad5d8 | 2872 | int dev_map_enqueue_multi(struct xdp_frame *xdpf, struct net_device *dev_rx, |
e624d4ed HL |
2873 | struct bpf_map *map, bool exclude_ingress) |
2874 | { | |
2875 | return 0; | |
2876 | } | |
2877 | ||
6d5fc195 TM |
2878 | struct sk_buff; |
2879 | ||
2880 | static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, | |
2881 | struct sk_buff *skb, | |
7cd1107f | 2882 | const struct bpf_prog *xdp_prog) |
6d5fc195 TM |
2883 | { |
2884 | return 0; | |
2885 | } | |
2886 | ||
e624d4ed HL |
2887 | static inline |
2888 | int dev_map_redirect_multi(struct net_device *dev, struct sk_buff *skb, | |
7cd1107f AL |
2889 | const struct bpf_prog *xdp_prog, |
2890 | struct bpf_map *map, bool exclude_ingress) | |
e624d4ed HL |
2891 | { |
2892 | return 0; | |
2893 | } | |
2894 | ||
d839a731 | 2895 | static inline void __cpu_map_flush(struct list_head *flush_list) |
9c270af3 JDB |
2896 | { |
2897 | } | |
2898 | ||
9c270af3 | 2899 | static inline int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, |
d53ad5d8 | 2900 | struct xdp_frame *xdpf, |
9c270af3 JDB |
2901 | struct net_device *dev_rx) |
2902 | { | |
2903 | return 0; | |
2904 | } | |
040ee692 | 2905 | |
11941f8a KKD |
2906 | static inline int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu, |
2907 | struct sk_buff *skb) | |
2908 | { | |
2909 | return -EOPNOTSUPP; | |
2910 | } | |
2911 | ||
040ee692 AV |
2912 | static inline struct bpf_prog *bpf_prog_get_type_path(const char *name, |
2913 | enum bpf_prog_type type) | |
2914 | { | |
2915 | return ERR_PTR(-EOPNOTSUPP); | |
2916 | } | |
c695865c SF |
2917 | |
2918 | static inline int bpf_prog_test_run_xdp(struct bpf_prog *prog, | |
2919 | const union bpf_attr *kattr, | |
2920 | union bpf_attr __user *uattr) | |
2921 | { | |
2922 | return -ENOTSUPP; | |
2923 | } | |
2924 | ||
2925 | static inline int bpf_prog_test_run_skb(struct bpf_prog *prog, | |
2926 | const union bpf_attr *kattr, | |
2927 | union bpf_attr __user *uattr) | |
2928 | { | |
2929 | return -ENOTSUPP; | |
2930 | } | |
2931 | ||
da00d2f1 KS |
2932 | static inline int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
2933 | const union bpf_attr *kattr, | |
2934 | union bpf_attr __user *uattr) | |
2935 | { | |
2936 | return -ENOTSUPP; | |
2937 | } | |
2938 | ||
c695865c SF |
2939 | static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
2940 | const union bpf_attr *kattr, | |
2941 | union bpf_attr __user *uattr) | |
2942 | { | |
2943 | return -ENOTSUPP; | |
2944 | } | |
6332be04 | 2945 | |
7c32e8f8 LB |
2946 | static inline int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, |
2947 | const union bpf_attr *kattr, | |
2948 | union bpf_attr __user *uattr) | |
2949 | { | |
2950 | return -ENOTSUPP; | |
2951 | } | |
2952 | ||
6332be04 DB |
2953 | static inline void bpf_map_put(struct bpf_map *map) |
2954 | { | |
2955 | } | |
7e6897f9 BT |
2956 | |
2957 | static inline struct bpf_prog *bpf_prog_by_id(u32 id) | |
2958 | { | |
2959 | return ERR_PTR(-ENOTSUPP); | |
2960 | } | |
6890896b | 2961 | |
d4f7bdb2 | 2962 | static inline int btf_struct_access(struct bpf_verifier_log *log, |
6728aea7 KKD |
2963 | const struct bpf_reg_state *reg, |
2964 | int off, int size, enum bpf_access_type atype, | |
63260df1 AS |
2965 | u32 *next_btf_id, enum bpf_type_flag *flag, |
2966 | const char **field_name) | |
d4f7bdb2 DX |
2967 | { |
2968 | return -EACCES; | |
2969 | } | |
2970 | ||
6890896b | 2971 | static inline const struct bpf_func_proto * |
bbc1d247 | 2972 | bpf_base_func_proto(enum bpf_func_id func_id, const struct bpf_prog *prog) |
6890896b SF |
2973 | { |
2974 | return NULL; | |
2975 | } | |
a10787e6 SL |
2976 | |
2977 | static inline void bpf_task_storage_free(struct task_struct *task) | |
2978 | { | |
2979 | } | |
e6ac2450 MKL |
2980 | |
2981 | static inline bool bpf_prog_has_kfunc_call(const struct bpf_prog *prog) | |
2982 | { | |
2983 | return false; | |
2984 | } | |
2985 | ||
2986 | static inline const struct btf_func_model * | |
2987 | bpf_jit_find_kfunc_model(const struct bpf_prog *prog, | |
2988 | const struct bpf_insn *insn) | |
2989 | { | |
2990 | return NULL; | |
2991 | } | |
44a3918c | 2992 | |
1cf3bfc6 IL |
2993 | static inline int |
2994 | bpf_get_kfunc_addr(const struct bpf_prog *prog, u32 func_id, | |
2995 | u16 btf_fd_idx, u8 **func_addr) | |
2996 | { | |
2997 | return -ENOTSUPP; | |
2998 | } | |
2999 | ||
44a3918c JP |
3000 | static inline bool unprivileged_ebpf_enabled(void) |
3001 | { | |
3002 | return false; | |
3003 | } | |
3004 | ||
24426654 MKL |
3005 | static inline bool has_current_bpf_ctx(void) |
3006 | { | |
3007 | return false; | |
3008 | } | |
05b24ff9 JO |
3009 | |
3010 | static inline void bpf_prog_inc_misses_counter(struct bpf_prog *prog) | |
3011 | { | |
3012 | } | |
c4bcfb38 YS |
3013 | |
3014 | static inline void bpf_cgrp_storage_free(struct cgroup *cgroup) | |
3015 | { | |
3016 | } | |
8357b366 JK |
3017 | |
3018 | static inline void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data, | |
3019 | enum bpf_dynptr_type type, u32 offset, u32 size) | |
3020 | { | |
3021 | } | |
3022 | ||
3023 | static inline void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr) | |
3024 | { | |
3025 | } | |
3026 | ||
3027 | static inline void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr) | |
3028 | { | |
3029 | } | |
61e021f3 | 3030 | #endif /* CONFIG_BPF_SYSCALL */ |
09756af4 | 3031 | |
6a5a148a AB |
3032 | static __always_inline int |
3033 | bpf_probe_read_kernel_common(void *dst, u32 size, const void *unsafe_ptr) | |
3034 | { | |
3035 | int ret = -EFAULT; | |
3036 | ||
3037 | if (IS_ENABLED(CONFIG_BPF_EVENTS)) | |
3038 | ret = copy_from_kernel_nofault(dst, unsafe_ptr, size); | |
3039 | if (unlikely(ret < 0)) | |
3040 | memset(dst, 0, size); | |
3041 | return ret; | |
3042 | } | |
3043 | ||
ab224b9e | 3044 | void __bpf_free_used_btfs(struct btf_mod_pair *used_btfs, u32 len); |
541c3bad | 3045 | |
479321e9 JK |
3046 | static inline struct bpf_prog *bpf_prog_get_type(u32 ufd, |
3047 | enum bpf_prog_type type) | |
3048 | { | |
3049 | return bpf_prog_get_type_dev(ufd, type, false); | |
3050 | } | |
3051 | ||
936f8946 AN |
3052 | void __bpf_free_used_maps(struct bpf_prog_aux *aux, |
3053 | struct bpf_map **used_maps, u32 len); | |
3054 | ||
040ee692 AV |
3055 | bool bpf_prog_get_ok(struct bpf_prog *, enum bpf_prog_type *, bool); |
3056 | ||
ab3f0063 | 3057 | int bpf_prog_offload_compile(struct bpf_prog *prog); |
2b3486bc | 3058 | void bpf_prog_dev_bound_destroy(struct bpf_prog *prog); |
675fc275 JK |
3059 | int bpf_prog_offload_info_fill(struct bpf_prog_info *info, |
3060 | struct bpf_prog *prog); | |
ab3f0063 | 3061 | |
52775b33 JK |
3062 | int bpf_map_offload_info_fill(struct bpf_map_info *info, struct bpf_map *map); |
3063 | ||
a3884572 JK |
3064 | int bpf_map_offload_lookup_elem(struct bpf_map *map, void *key, void *value); |
3065 | int bpf_map_offload_update_elem(struct bpf_map *map, | |
3066 | void *key, void *value, u64 flags); | |
3067 | int bpf_map_offload_delete_elem(struct bpf_map *map, void *key); | |
3068 | int bpf_map_offload_get_next_key(struct bpf_map *map, | |
3069 | void *key, void *next_key); | |
3070 | ||
09728266 | 3071 | bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map); |
a3884572 | 3072 | |
1385d755 | 3073 | struct bpf_offload_dev * |
dd27c2e3 | 3074 | bpf_offload_dev_create(const struct bpf_prog_offload_ops *ops, void *priv); |
602144c2 | 3075 | void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev); |
dd27c2e3 | 3076 | void *bpf_offload_dev_priv(struct bpf_offload_dev *offdev); |
602144c2 JK |
3077 | int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev, |
3078 | struct net_device *netdev); | |
3079 | void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev, | |
3080 | struct net_device *netdev); | |
fd4f227d | 3081 | bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev); |
9fd7c555 | 3082 | |
2147c438 JP |
3083 | void unpriv_ebpf_notify(int new_state); |
3084 | ||
ab3f0063 | 3085 | #if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL) |
3d76a4d3 SF |
3086 | int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, |
3087 | struct bpf_prog_aux *prog_aux); | |
3088 | void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, u32 func_id); | |
2b3486bc | 3089 | int bpf_prog_dev_bound_init(struct bpf_prog *prog, union bpf_attr *attr); |
fd7c211d | 3090 | int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, struct bpf_prog *old_prog); |
2b3486bc | 3091 | void bpf_dev_bound_netdev_unregister(struct net_device *dev); |
ab3f0063 | 3092 | |
0d830032 | 3093 | static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) |
2b3486bc SF |
3094 | { |
3095 | return aux->dev_bound; | |
3096 | } | |
ab3f0063 | 3097 | |
9d03ebc7 | 3098 | static inline bool bpf_prog_is_offloaded(const struct bpf_prog_aux *aux) |
ab3f0063 | 3099 | { |
9a18eedb | 3100 | return aux->offload_requested; |
ab3f0063 | 3101 | } |
a3884572 | 3102 | |
fd7c211d THJ |
3103 | bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs); |
3104 | ||
9d03ebc7 | 3105 | static inline bool bpf_map_is_offloaded(struct bpf_map *map) |
a3884572 JK |
3106 | { |
3107 | return unlikely(map->ops == &bpf_map_offload_ops); | |
3108 | } | |
3109 | ||
3110 | struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr); | |
3111 | void bpf_map_offload_map_free(struct bpf_map *map); | |
9629363c | 3112 | u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map); |
79a7f8bd AS |
3113 | int bpf_prog_test_run_syscall(struct bpf_prog *prog, |
3114 | const union bpf_attr *kattr, | |
3115 | union bpf_attr __user *uattr); | |
17edea21 CW |
3116 | |
3117 | int sock_map_get_from_fd(const union bpf_attr *attr, struct bpf_prog *prog); | |
3118 | int sock_map_prog_detach(const union bpf_attr *attr, enum bpf_prog_type ptype); | |
3119 | int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, u64 flags); | |
748cd572 DZ |
3120 | int sock_map_bpf_prog_query(const union bpf_attr *attr, |
3121 | union bpf_attr __user *uattr); | |
699c23f0 | 3122 | int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog); |
748cd572 | 3123 | |
17edea21 | 3124 | void sock_map_unhash(struct sock *sk); |
d8616ee2 | 3125 | void sock_map_destroy(struct sock *sk); |
17edea21 | 3126 | void sock_map_close(struct sock *sk, long timeout); |
ab3f0063 | 3127 | #else |
3d76a4d3 SF |
3128 | static inline int bpf_dev_bound_kfunc_check(struct bpf_verifier_log *log, |
3129 | struct bpf_prog_aux *prog_aux) | |
3130 | { | |
3131 | return -EOPNOTSUPP; | |
3132 | } | |
3133 | ||
3134 | static inline void *bpf_dev_bound_resolve_kfunc(struct bpf_prog *prog, | |
3135 | u32 func_id) | |
3136 | { | |
3137 | return NULL; | |
3138 | } | |
3139 | ||
2b3486bc | 3140 | static inline int bpf_prog_dev_bound_init(struct bpf_prog *prog, |
3d76a4d3 | 3141 | union bpf_attr *attr) |
ab3f0063 JK |
3142 | { |
3143 | return -EOPNOTSUPP; | |
3144 | } | |
3145 | ||
fd7c211d THJ |
3146 | static inline int bpf_prog_dev_bound_inherit(struct bpf_prog *new_prog, |
3147 | struct bpf_prog *old_prog) | |
3148 | { | |
3149 | return -EOPNOTSUPP; | |
3150 | } | |
3151 | ||
2b3486bc SF |
3152 | static inline void bpf_dev_bound_netdev_unregister(struct net_device *dev) |
3153 | { | |
3154 | } | |
3155 | ||
3156 | static inline bool bpf_prog_is_dev_bound(const struct bpf_prog_aux *aux) | |
3157 | { | |
3158 | return false; | |
3159 | } | |
3160 | ||
9d03ebc7 | 3161 | static inline bool bpf_prog_is_offloaded(struct bpf_prog_aux *aux) |
ab3f0063 JK |
3162 | { |
3163 | return false; | |
3164 | } | |
a3884572 | 3165 | |
fd7c211d | 3166 | static inline bool bpf_prog_dev_bound_match(const struct bpf_prog *lhs, const struct bpf_prog *rhs) |
ab3f0063 JK |
3167 | { |
3168 | return false; | |
3169 | } | |
a3884572 | 3170 | |
9d03ebc7 | 3171 | static inline bool bpf_map_is_offloaded(struct bpf_map *map) |
a3884572 JK |
3172 | { |
3173 | return false; | |
3174 | } | |
3175 | ||
3176 | static inline struct bpf_map *bpf_map_offload_map_alloc(union bpf_attr *attr) | |
3177 | { | |
3178 | return ERR_PTR(-EOPNOTSUPP); | |
3179 | } | |
3180 | ||
3181 | static inline void bpf_map_offload_map_free(struct bpf_map *map) | |
3182 | { | |
3183 | } | |
79a7f8bd | 3184 | |
9629363c YS |
3185 | static inline u64 bpf_map_offload_map_mem_usage(const struct bpf_map *map) |
3186 | { | |
3187 | return 0; | |
3188 | } | |
3189 | ||
79a7f8bd AS |
3190 | static inline int bpf_prog_test_run_syscall(struct bpf_prog *prog, |
3191 | const union bpf_attr *kattr, | |
3192 | union bpf_attr __user *uattr) | |
3193 | { | |
3194 | return -ENOTSUPP; | |
3195 | } | |
fdb5c453 | 3196 | |
88759609 | 3197 | #ifdef CONFIG_BPF_SYSCALL |
604326b4 DB |
3198 | static inline int sock_map_get_from_fd(const union bpf_attr *attr, |
3199 | struct bpf_prog *prog) | |
fdb5c453 SY |
3200 | { |
3201 | return -EINVAL; | |
3202 | } | |
bb0de313 LB |
3203 | |
3204 | static inline int sock_map_prog_detach(const union bpf_attr *attr, | |
3205 | enum bpf_prog_type ptype) | |
3206 | { | |
3207 | return -EOPNOTSUPP; | |
3208 | } | |
13b79d3f LB |
3209 | |
3210 | static inline int sock_map_update_elem_sys(struct bpf_map *map, void *key, void *value, | |
3211 | u64 flags) | |
3212 | { | |
3213 | return -EOPNOTSUPP; | |
3214 | } | |
748cd572 DZ |
3215 | |
3216 | static inline int sock_map_bpf_prog_query(const union bpf_attr *attr, | |
3217 | union bpf_attr __user *uattr) | |
3218 | { | |
3219 | return -EINVAL; | |
3220 | } | |
699c23f0 YS |
3221 | |
3222 | static inline int sock_map_link_create(const union bpf_attr *attr, struct bpf_prog *prog) | |
3223 | { | |
3224 | return -EOPNOTSUPP; | |
3225 | } | |
17edea21 CW |
3226 | #endif /* CONFIG_BPF_SYSCALL */ |
3227 | #endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */ | |
5dc4c4b7 | 3228 | |
dd865789 JO |
3229 | static __always_inline void |
3230 | bpf_prog_inc_misses_counters(const struct bpf_prog_array *array) | |
3231 | { | |
3232 | const struct bpf_prog_array_item *item; | |
3233 | struct bpf_prog *prog; | |
3234 | ||
3235 | if (unlikely(!array)) | |
3236 | return; | |
3237 | ||
3238 | item = &array->items[0]; | |
3239 | while ((prog = READ_ONCE(item->prog))) { | |
3240 | bpf_prog_inc_misses_counter(prog); | |
3241 | item++; | |
3242 | } | |
3243 | } | |
3244 | ||
17edea21 CW |
3245 | #if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) |
3246 | void bpf_sk_reuseport_detach(struct sock *sk); | |
3247 | int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key, | |
3248 | void *value); | |
3249 | int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key, | |
3250 | void *value, u64 map_flags); | |
3251 | #else | |
3252 | static inline void bpf_sk_reuseport_detach(struct sock *sk) | |
3253 | { | |
3254 | } | |
5dc4c4b7 | 3255 | |
17edea21 | 3256 | #ifdef CONFIG_BPF_SYSCALL |
5dc4c4b7 MKL |
3257 | static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, |
3258 | void *key, void *value) | |
3259 | { | |
3260 | return -EOPNOTSUPP; | |
3261 | } | |
3262 | ||
3263 | static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, | |
3264 | void *key, void *value, | |
3265 | u64 map_flags) | |
3266 | { | |
3267 | return -EOPNOTSUPP; | |
3268 | } | |
3269 | #endif /* CONFIG_BPF_SYSCALL */ | |
3270 | #endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */ | |
3271 | ||
d0003ec0 | 3272 | /* verifier prototypes for helper functions called from eBPF programs */ |
a2c83fff DB |
3273 | extern const struct bpf_func_proto bpf_map_lookup_elem_proto; |
3274 | extern const struct bpf_func_proto bpf_map_update_elem_proto; | |
3275 | extern const struct bpf_func_proto bpf_map_delete_elem_proto; | |
f1a2e44a MV |
3276 | extern const struct bpf_func_proto bpf_map_push_elem_proto; |
3277 | extern const struct bpf_func_proto bpf_map_pop_elem_proto; | |
3278 | extern const struct bpf_func_proto bpf_map_peek_elem_proto; | |
07343110 | 3279 | extern const struct bpf_func_proto bpf_map_lookup_percpu_elem_proto; |
d0003ec0 | 3280 | |
03e69b50 | 3281 | extern const struct bpf_func_proto bpf_get_prandom_u32_proto; |
c04167ce | 3282 | extern const struct bpf_func_proto bpf_get_smp_processor_id_proto; |
2d0e30c3 | 3283 | extern const struct bpf_func_proto bpf_get_numa_node_id_proto; |
04fd61ab | 3284 | extern const struct bpf_func_proto bpf_tail_call_proto; |
17ca8cbf | 3285 | extern const struct bpf_func_proto bpf_ktime_get_ns_proto; |
71d19214 | 3286 | extern const struct bpf_func_proto bpf_ktime_get_boot_ns_proto; |
c8996c98 | 3287 | extern const struct bpf_func_proto bpf_ktime_get_tai_ns_proto; |
ffeedafb AS |
3288 | extern const struct bpf_func_proto bpf_get_current_pid_tgid_proto; |
3289 | extern const struct bpf_func_proto bpf_get_current_uid_gid_proto; | |
3290 | extern const struct bpf_func_proto bpf_get_current_comm_proto; | |
d5a3b1f6 | 3291 | extern const struct bpf_func_proto bpf_get_stackid_proto; |
c195651e | 3292 | extern const struct bpf_func_proto bpf_get_stack_proto; |
d4dd9775 | 3293 | extern const struct bpf_func_proto bpf_get_stack_sleepable_proto; |
fa28dcb8 | 3294 | extern const struct bpf_func_proto bpf_get_task_stack_proto; |
d4dd9775 | 3295 | extern const struct bpf_func_proto bpf_get_task_stack_sleepable_proto; |
7b04d6d6 SL |
3296 | extern const struct bpf_func_proto bpf_get_stackid_proto_pe; |
3297 | extern const struct bpf_func_proto bpf_get_stack_proto_pe; | |
174a79ff | 3298 | extern const struct bpf_func_proto bpf_sock_map_update_proto; |
81110384 | 3299 | extern const struct bpf_func_proto bpf_sock_hash_update_proto; |
bf6fa2c8 | 3300 | extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto; |
0f09abd1 | 3301 | extern const struct bpf_func_proto bpf_get_current_ancestor_cgroup_id_proto; |
bed89185 | 3302 | extern const struct bpf_func_proto bpf_get_cgroup_classid_curr_proto; |
7f628741 | 3303 | extern const struct bpf_func_proto bpf_current_task_under_cgroup_proto; |
604326b4 DB |
3304 | extern const struct bpf_func_proto bpf_msg_redirect_hash_proto; |
3305 | extern const struct bpf_func_proto bpf_msg_redirect_map_proto; | |
3306 | extern const struct bpf_func_proto bpf_sk_redirect_hash_proto; | |
3307 | extern const struct bpf_func_proto bpf_sk_redirect_map_proto; | |
d83525ca AS |
3308 | extern const struct bpf_func_proto bpf_spin_lock_proto; |
3309 | extern const struct bpf_func_proto bpf_spin_unlock_proto; | |
cd339431 | 3310 | extern const struct bpf_func_proto bpf_get_local_storage_proto; |
d7a4cb9b AI |
3311 | extern const struct bpf_func_proto bpf_strtol_proto; |
3312 | extern const struct bpf_func_proto bpf_strtoul_proto; | |
0d01da6a | 3313 | extern const struct bpf_func_proto bpf_tcp_sock_proto; |
5576b991 | 3314 | extern const struct bpf_func_proto bpf_jiffies64_proto; |
b4490c5c | 3315 | extern const struct bpf_func_proto bpf_get_ns_current_pid_tgid_proto; |
0456ea17 | 3316 | extern const struct bpf_func_proto bpf_event_output_data_proto; |
457f4436 AN |
3317 | extern const struct bpf_func_proto bpf_ringbuf_output_proto; |
3318 | extern const struct bpf_func_proto bpf_ringbuf_reserve_proto; | |
3319 | extern const struct bpf_func_proto bpf_ringbuf_submit_proto; | |
3320 | extern const struct bpf_func_proto bpf_ringbuf_discard_proto; | |
3321 | extern const struct bpf_func_proto bpf_ringbuf_query_proto; | |
bc34dee6 JK |
3322 | extern const struct bpf_func_proto bpf_ringbuf_reserve_dynptr_proto; |
3323 | extern const struct bpf_func_proto bpf_ringbuf_submit_dynptr_proto; | |
3324 | extern const struct bpf_func_proto bpf_ringbuf_discard_dynptr_proto; | |
af7ec138 | 3325 | extern const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto; |
478cfbdf YS |
3326 | extern const struct bpf_func_proto bpf_skc_to_tcp_sock_proto; |
3327 | extern const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto; | |
3328 | extern const struct bpf_func_proto bpf_skc_to_tcp_request_sock_proto; | |
0d4fad3e | 3329 | extern const struct bpf_func_proto bpf_skc_to_udp6_sock_proto; |
9eeb3aa3 | 3330 | extern const struct bpf_func_proto bpf_skc_to_unix_sock_proto; |
3bc253c2 | 3331 | extern const struct bpf_func_proto bpf_skc_to_mptcp_sock_proto; |
07be4c4a | 3332 | extern const struct bpf_func_proto bpf_copy_from_user_proto; |
c4d0bfb4 | 3333 | extern const struct bpf_func_proto bpf_snprintf_btf_proto; |
7b15523a | 3334 | extern const struct bpf_func_proto bpf_snprintf_proto; |
eaa6bcb7 | 3335 | extern const struct bpf_func_proto bpf_per_cpu_ptr_proto; |
63d9b80d | 3336 | extern const struct bpf_func_proto bpf_this_cpu_ptr_proto; |
d0551261 | 3337 | extern const struct bpf_func_proto bpf_ktime_get_coarse_ns_proto; |
b60da495 | 3338 | extern const struct bpf_func_proto bpf_sock_from_file_proto; |
c5dbb89f | 3339 | extern const struct bpf_func_proto bpf_get_socket_ptr_cookie_proto; |
0593dd34 | 3340 | extern const struct bpf_func_proto bpf_task_storage_get_recur_proto; |
a10787e6 | 3341 | extern const struct bpf_func_proto bpf_task_storage_get_proto; |
0593dd34 | 3342 | extern const struct bpf_func_proto bpf_task_storage_delete_recur_proto; |
a10787e6 | 3343 | extern const struct bpf_func_proto bpf_task_storage_delete_proto; |
69c087ba | 3344 | extern const struct bpf_func_proto bpf_for_each_map_elem_proto; |
3d78417b | 3345 | extern const struct bpf_func_proto bpf_btf_find_by_name_kind_proto; |
3cee6fb8 MKL |
3346 | extern const struct bpf_func_proto bpf_sk_setsockopt_proto; |
3347 | extern const struct bpf_func_proto bpf_sk_getsockopt_proto; | |
9113d7e4 SF |
3348 | extern const struct bpf_func_proto bpf_unlocked_sk_setsockopt_proto; |
3349 | extern const struct bpf_func_proto bpf_unlocked_sk_getsockopt_proto; | |
7c7e3d31 | 3350 | extern const struct bpf_func_proto bpf_find_vma_proto; |
e6f2dd0f | 3351 | extern const struct bpf_func_proto bpf_loop_proto; |
376040e4 | 3352 | extern const struct bpf_func_proto bpf_copy_from_user_task_proto; |
69fd337a SF |
3353 | extern const struct bpf_func_proto bpf_set_retval_proto; |
3354 | extern const struct bpf_func_proto bpf_get_retval_proto; | |
20571567 | 3355 | extern const struct bpf_func_proto bpf_user_ringbuf_drain_proto; |
c4bcfb38 YS |
3356 | extern const struct bpf_func_proto bpf_cgrp_storage_get_proto; |
3357 | extern const struct bpf_func_proto bpf_cgrp_storage_delete_proto; | |
cd339431 | 3358 | |
958a3f2d JO |
3359 | const struct bpf_func_proto *tracing_prog_func_proto( |
3360 | enum bpf_func_id func_id, const struct bpf_prog *prog); | |
3361 | ||
3ad00405 DB |
3362 | /* Shared helpers among cBPF and eBPF. */ |
3363 | void bpf_user_rnd_init_once(void); | |
3364 | u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); | |
6890896b | 3365 | u64 bpf_get_raw_cpu_id(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5); |
3ad00405 | 3366 | |
c64b7983 | 3367 | #if defined(CONFIG_NET) |
46f8bc92 MKL |
3368 | bool bpf_sock_common_is_valid_access(int off, int size, |
3369 | enum bpf_access_type type, | |
3370 | struct bpf_insn_access_aux *info); | |
c64b7983 JS |
3371 | bool bpf_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
3372 | struct bpf_insn_access_aux *info); | |
3373 | u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |
3374 | const struct bpf_insn *si, | |
3375 | struct bpf_insn *insn_buf, | |
3376 | struct bpf_prog *prog, | |
3377 | u32 *target_size); | |
cce4c40b DX |
3378 | int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, |
3379 | struct bpf_dynptr *ptr); | |
c64b7983 | 3380 | #else |
46f8bc92 MKL |
3381 | static inline bool bpf_sock_common_is_valid_access(int off, int size, |
3382 | enum bpf_access_type type, | |
3383 | struct bpf_insn_access_aux *info) | |
3384 | { | |
3385 | return false; | |
3386 | } | |
c64b7983 JS |
3387 | static inline bool bpf_sock_is_valid_access(int off, int size, |
3388 | enum bpf_access_type type, | |
3389 | struct bpf_insn_access_aux *info) | |
3390 | { | |
3391 | return false; | |
3392 | } | |
3393 | static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type, | |
3394 | const struct bpf_insn *si, | |
3395 | struct bpf_insn *insn_buf, | |
3396 | struct bpf_prog *prog, | |
3397 | u32 *target_size) | |
3398 | { | |
3399 | return 0; | |
3400 | } | |
cce4c40b DX |
3401 | static inline int bpf_dynptr_from_skb_rdonly(struct __sk_buff *skb, u64 flags, |
3402 | struct bpf_dynptr *ptr) | |
b5964b96 JK |
3403 | { |
3404 | return -EOPNOTSUPP; | |
3405 | } | |
c64b7983 JS |
3406 | #endif |
3407 | ||
655a51e5 | 3408 | #ifdef CONFIG_INET |
91cc1a99 AS |
3409 | struct sk_reuseport_kern { |
3410 | struct sk_buff *skb; | |
3411 | struct sock *sk; | |
3412 | struct sock *selected_sk; | |
d5e4ddae | 3413 | struct sock *migrating_sk; |
91cc1a99 AS |
3414 | void *data_end; |
3415 | u32 hash; | |
3416 | u32 reuseport_id; | |
3417 | bool bind_inany; | |
3418 | }; | |
655a51e5 MKL |
3419 | bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type, |
3420 | struct bpf_insn_access_aux *info); | |
3421 | ||
3422 | u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | |
3423 | const struct bpf_insn *si, | |
3424 | struct bpf_insn *insn_buf, | |
3425 | struct bpf_prog *prog, | |
3426 | u32 *target_size); | |
7f94208c Y |
3427 | |
3428 | bool bpf_xdp_sock_is_valid_access(int off, int size, enum bpf_access_type type, | |
3429 | struct bpf_insn_access_aux *info); | |
3430 | ||
3431 | u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, | |
3432 | const struct bpf_insn *si, | |
3433 | struct bpf_insn *insn_buf, | |
3434 | struct bpf_prog *prog, | |
3435 | u32 *target_size); | |
655a51e5 MKL |
3436 | #else |
3437 | static inline bool bpf_tcp_sock_is_valid_access(int off, int size, | |
3438 | enum bpf_access_type type, | |
3439 | struct bpf_insn_access_aux *info) | |
3440 | { | |
3441 | return false; | |
3442 | } | |
3443 | ||
3444 | static inline u32 bpf_tcp_sock_convert_ctx_access(enum bpf_access_type type, | |
3445 | const struct bpf_insn *si, | |
3446 | struct bpf_insn *insn_buf, | |
3447 | struct bpf_prog *prog, | |
3448 | u32 *target_size) | |
3449 | { | |
3450 | return 0; | |
3451 | } | |
7f94208c Y |
3452 | static inline bool bpf_xdp_sock_is_valid_access(int off, int size, |
3453 | enum bpf_access_type type, | |
3454 | struct bpf_insn_access_aux *info) | |
3455 | { | |
3456 | return false; | |
3457 | } | |
3458 | ||
3459 | static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type, | |
3460 | const struct bpf_insn *si, | |
3461 | struct bpf_insn *insn_buf, | |
3462 | struct bpf_prog *prog, | |
3463 | u32 *target_size) | |
3464 | { | |
3465 | return 0; | |
3466 | } | |
655a51e5 MKL |
3467 | #endif /* CONFIG_INET */ |
3468 | ||
5964b200 | 3469 | enum bpf_text_poke_type { |
b553a6ec DB |
3470 | BPF_MOD_CALL, |
3471 | BPF_MOD_JUMP, | |
5964b200 | 3472 | }; |
4b3da77b | 3473 | |
5964b200 AS |
3474 | int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t, |
3475 | void *addr1, void *addr2); | |
3476 | ||
4b7de801 JO |
3477 | void bpf_arch_poke_desc_update(struct bpf_jit_poke_descriptor *poke, |
3478 | struct bpf_prog *new, struct bpf_prog *old); | |
3479 | ||
ebc1415d | 3480 | void *bpf_arch_text_copy(void *dst, void *src, size_t len); |
fe736565 | 3481 | int bpf_arch_text_invalidate(void *dst, size_t len); |
ebc1415d | 3482 | |
eae2e83e | 3483 | struct btf_id_set; |
2af30f11 | 3484 | bool btf_id_set_contains(const struct btf_id_set *set, u32 id); |
eae2e83e | 3485 | |
335ff499 | 3486 | #define MAX_BPRINTF_VARARGS 12 |
e2bb9e01 | 3487 | #define MAX_BPRINTF_BUF 1024 |
335ff499 | 3488 | |
78aa1cc9 JO |
3489 | struct bpf_bprintf_data { |
3490 | u32 *bin_args; | |
e2bb9e01 | 3491 | char *buf; |
78aa1cc9 | 3492 | bool get_bin_args; |
e2bb9e01 | 3493 | bool get_buf; |
78aa1cc9 JO |
3494 | }; |
3495 | ||
48cac3f4 | 3496 | int bpf_bprintf_prepare(char *fmt, u32 fmt_size, const u64 *raw_args, |
78aa1cc9 | 3497 | u32 num_args, struct bpf_bprintf_data *data); |
f19a4050 | 3498 | void bpf_bprintf_cleanup(struct bpf_bprintf_data *data); |
d9c9e4db | 3499 | |
c0e19f2c SF |
3500 | #ifdef CONFIG_BPF_LSM |
3501 | void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype); | |
3502 | void bpf_cgroup_atype_put(int cgroup_atype); | |
3503 | #else | |
3504 | static inline void bpf_cgroup_atype_get(u32 attach_btf_id, int cgroup_atype) {} | |
3505 | static inline void bpf_cgroup_atype_put(int cgroup_atype) {} | |
3506 | #endif /* CONFIG_BPF_LSM */ | |
3507 | ||
f3cf4134 RS |
3508 | struct key; |
3509 | ||
3510 | #ifdef CONFIG_KEYS | |
3511 | struct bpf_key { | |
3512 | struct key *key; | |
3513 | bool has_ref; | |
3514 | }; | |
3515 | #endif /* CONFIG_KEYS */ | |
282de143 KKD |
3516 | |
3517 | static inline bool type_is_alloc(u32 type) | |
3518 | { | |
3519 | return type & MEM_ALLOC; | |
3520 | } | |
3521 | ||
ee53cbfb YS |
3522 | static inline gfp_t bpf_memcg_flags(gfp_t flags) |
3523 | { | |
3524 | if (memcg_bpf_enabled()) | |
3525 | return flags | __GFP_ACCOUNT; | |
3526 | return flags; | |
3527 | } | |
3528 | ||
9af27da6 KKD |
3529 | static inline bool bpf_is_subprog(const struct bpf_prog *prog) |
3530 | { | |
3531 | return prog->aux->func_idx != 0; | |
3532 | } | |
3533 | ||
99c55f7d | 3534 | #endif /* _LINUX_BPF_H */ |