]>
Commit | Line | Data |
---|---|---|
25763b3c | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
58e2af8b | 2 | /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com |
58e2af8b JK |
3 | */ |
4 | #ifndef _LINUX_BPF_VERIFIER_H | |
5 | #define _LINUX_BPF_VERIFIER_H 1 | |
6 | ||
7 | #include <linux/bpf.h> /* for enum bpf_reg_type */ | |
22dc4a0f | 8 | #include <linux/btf.h> /* for struct btf and btf_id() */ |
58e2af8b | 9 | #include <linux/filter.h> /* for MAX_BPF_STACK */ |
f1174f77 | 10 | #include <linux/tnum.h> |
58e2af8b | 11 | |
b03c9f9f EC |
12 | /* Maximum variable offset umax_value permitted when resolving memory accesses. |
13 | * In practice this is far bigger than any realistic pointer offset; this limit | |
14 | * ensures that umax_value + (int)off + (int)size cannot overflow a u64. | |
15 | */ | |
bb7f0f98 | 16 | #define BPF_MAX_VAR_OFF (1 << 29) |
b03c9f9f EC |
17 | /* Maximum variable size permitted for ARG_CONST_SIZE[_OR_ZERO]. This ensures |
18 | * that converting umax_value to int cannot overflow. | |
19 | */ | |
bb7f0f98 | 20 | #define BPF_MAX_VAR_SIZ (1 << 29) |
c25b2ae1 HL |
21 | /* size of type_str_buf in bpf_verifier. */ |
22 | #define TYPE_STR_BUF_LEN 64 | |
48461135 | 23 | |
8e9cd9ce EC |
24 | /* Liveness marks, used for registers and spilled-regs (in stack slots). |
25 | * Read marks propagate upwards until they find a write mark; they record that | |
26 | * "one of this state's descendants read this reg" (and therefore the reg is | |
27 | * relevant for states_equal() checks). | |
28 | * Write marks collect downwards and do not propagate; they record that "the | |
29 | * straight-line code that reached this state (from its parent) wrote this reg" | |
30 | * (and therefore that reads propagated from this state or its descendants | |
31 | * should not propagate to its parent). | |
32 | * A state with a write mark can receive read marks; it just won't propagate | |
33 | * them to its parent, since the write mark is a property, not of the state, | |
34 | * but of the link between it and its parent. See mark_reg_read() and | |
35 | * mark_stack_slot_read() in kernel/bpf/verifier.c. | |
36 | */ | |
dc503a8a EC |
37 | enum bpf_reg_liveness { |
38 | REG_LIVE_NONE = 0, /* reg hasn't been read or written this branch */ | |
5327ed3d JW |
39 | REG_LIVE_READ32 = 0x1, /* reg was read, so we're sensitive to initial value */ |
40 | REG_LIVE_READ64 = 0x2, /* likewise, but full 64-bit content matters */ | |
41 | REG_LIVE_READ = REG_LIVE_READ32 | REG_LIVE_READ64, | |
42 | REG_LIVE_WRITTEN = 0x4, /* reg was written first, screening off later reads */ | |
43 | REG_LIVE_DONE = 0x8, /* liveness won't be updating this register anymore */ | |
dc503a8a EC |
44 | }; |
45 | ||
58e2af8b | 46 | struct bpf_reg_state { |
679c782d | 47 | /* Ordering of fields matters. See states_equal() */ |
58e2af8b | 48 | enum bpf_reg_type type; |
22dc4a0f AN |
49 | /* Fixed part of pointer offset, pointer types only */ |
50 | s32 off; | |
58e2af8b | 51 | union { |
f1174f77 | 52 | /* valid when type == PTR_TO_PACKET */ |
6d94e741 | 53 | int range; |
58e2af8b JK |
54 | |
55 | /* valid when type == CONST_PTR_TO_MAP | PTR_TO_MAP_VALUE | | |
56 | * PTR_TO_MAP_VALUE_OR_NULL | |
57 | */ | |
3e8ce298 AS |
58 | struct { |
59 | struct bpf_map *map_ptr; | |
60 | /* To distinguish map lookups from outer map | |
61 | * the map_uid is non-zero for registers | |
62 | * pointing to inner maps. | |
63 | */ | |
64 | u32 map_uid; | |
65 | }; | |
0962590e | 66 | |
22dc4a0f AN |
67 | /* for PTR_TO_BTF_ID */ |
68 | struct { | |
69 | struct btf *btf; | |
70 | u32 btf_id; | |
71 | }; | |
9e15db66 | 72 | |
457f4436 AN |
73 | u32 mem_size; /* for PTR_TO_MEM | PTR_TO_MEM_OR_NULL */ |
74 | ||
0962590e | 75 | /* Max size from any of the above. */ |
22dc4a0f AN |
76 | struct { |
77 | unsigned long raw1; | |
78 | unsigned long raw2; | |
79 | } raw; | |
69c087ba YS |
80 | |
81 | u32 subprogno; /* for PTR_TO_FUNC */ | |
58e2af8b | 82 | }; |
f1174f77 EC |
83 | /* For PTR_TO_PACKET, used to find other pointers with the same variable |
84 | * offset, so they can share range knowledge. | |
85 | * For PTR_TO_MAP_VALUE_OR_NULL this is used to share which map value we | |
86 | * came from, when one is tested for != NULL. | |
457f4436 AN |
87 | * For PTR_TO_MEM_OR_NULL this is used to identify memory allocation |
88 | * for the purpose of tracking that it's freed. | |
c64b7983 JS |
89 | * For PTR_TO_SOCKET this is used to share which pointers retain the |
90 | * same reference to the socket, to determine proper reference freeing. | |
f1174f77 | 91 | */ |
d2a4dd37 | 92 | u32 id; |
1b986589 MKL |
93 | /* PTR_TO_SOCKET and PTR_TO_TCP_SOCK could be a ptr returned |
94 | * from a pointer-cast helper, bpf_sk_fullsock() and | |
95 | * bpf_tcp_sock(). | |
96 | * | |
97 | * Consider the following where "sk" is a reference counted | |
98 | * pointer returned from "sk = bpf_sk_lookup_tcp();": | |
99 | * | |
100 | * 1: sk = bpf_sk_lookup_tcp(); | |
101 | * 2: if (!sk) { return 0; } | |
102 | * 3: fullsock = bpf_sk_fullsock(sk); | |
103 | * 4: if (!fullsock) { bpf_sk_release(sk); return 0; } | |
104 | * 5: tp = bpf_tcp_sock(fullsock); | |
105 | * 6: if (!tp) { bpf_sk_release(sk); return 0; } | |
106 | * 7: bpf_sk_release(sk); | |
107 | * 8: snd_cwnd = tp->snd_cwnd; // verifier will complain | |
108 | * | |
109 | * After bpf_sk_release(sk) at line 7, both "fullsock" ptr and | |
110 | * "tp" ptr should be invalidated also. In order to do that, | |
111 | * the reg holding "fullsock" and "sk" need to remember | |
112 | * the original refcounted ptr id (i.e. sk_reg->id) in ref_obj_id | |
113 | * such that the verifier can reset all regs which have | |
114 | * ref_obj_id matching the sk_reg->id. | |
115 | * | |
116 | * sk_reg->ref_obj_id is set to sk_reg->id at line 1. | |
117 | * sk_reg->id will stay as NULL-marking purpose only. | |
118 | * After NULL-marking is done, sk_reg->id can be reset to 0. | |
119 | * | |
120 | * After "fullsock = bpf_sk_fullsock(sk);" at line 3, | |
121 | * fullsock_reg->ref_obj_id is set to sk_reg->ref_obj_id. | |
122 | * | |
123 | * After "tp = bpf_tcp_sock(fullsock);" at line 5, | |
124 | * tp_reg->ref_obj_id is set to fullsock_reg->ref_obj_id | |
125 | * which is the same as sk_reg->ref_obj_id. | |
126 | * | |
127 | * From the verifier perspective, if sk, fullsock and tp | |
128 | * are not NULL, they are the same ptr with different | |
129 | * reg->type. In particular, bpf_sk_release(tp) is also | |
130 | * allowed and has the same effect as bpf_sk_release(sk). | |
131 | */ | |
132 | u32 ref_obj_id; | |
f1174f77 EC |
133 | /* For scalar types (SCALAR_VALUE), this represents our knowledge of |
134 | * the actual value. | |
135 | * For pointer types, this represents the variable part of the offset | |
136 | * from the pointed-to object, and is shared with all bpf_reg_states | |
137 | * with the same id as us. | |
138 | */ | |
139 | struct tnum var_off; | |
d2a4dd37 | 140 | /* Used to determine if any memory access using this register will |
f1174f77 EC |
141 | * result in a bad access. |
142 | * These refer to the same value as var_off, not necessarily the actual | |
143 | * contents of the register. | |
d2a4dd37 | 144 | */ |
b03c9f9f EC |
145 | s64 smin_value; /* minimum possible (s64)value */ |
146 | s64 smax_value; /* maximum possible (s64)value */ | |
147 | u64 umin_value; /* minimum possible (u64)value */ | |
148 | u64 umax_value; /* maximum possible (u64)value */ | |
3f50f132 JF |
149 | s32 s32_min_value; /* minimum possible (s32)value */ |
150 | s32 s32_max_value; /* maximum possible (s32)value */ | |
151 | u32 u32_min_value; /* minimum possible (u32)value */ | |
152 | u32 u32_max_value; /* maximum possible (u32)value */ | |
679c782d EC |
153 | /* parentage chain for liveness checking */ |
154 | struct bpf_reg_state *parent; | |
f4d7e40a AS |
155 | /* Inside the callee two registers can be both PTR_TO_STACK like |
156 | * R1=fp-8 and R2=fp-8, but one of them points to this function stack | |
157 | * while another to the caller's stack. To differentiate them 'frameno' | |
158 | * is used which is an index in bpf_verifier_state->frame[] array | |
159 | * pointing to bpf_func_state. | |
f4d7e40a AS |
160 | */ |
161 | u32 frameno; | |
5327ed3d JW |
162 | /* Tracks subreg definition. The stored value is the insn_idx of the |
163 | * writing insn. This is safe because subreg_def is used before any insn | |
164 | * patching which only happens after main verification finished. | |
165 | */ | |
166 | s32 subreg_def; | |
dc503a8a | 167 | enum bpf_reg_liveness live; |
b5dc0163 AS |
168 | /* if (!precise && SCALAR_VALUE) min/max/tnum don't affect safety */ |
169 | bool precise; | |
58e2af8b JK |
170 | }; |
171 | ||
172 | enum bpf_stack_slot_type { | |
173 | STACK_INVALID, /* nothing was stored in this stack slot */ | |
174 | STACK_SPILL, /* register spilled into stack */ | |
cc2b14d5 AS |
175 | STACK_MISC, /* BPF program wrote some data into this slot */ |
176 | STACK_ZERO, /* BPF program wrote constant zero */ | |
58e2af8b JK |
177 | }; |
178 | ||
179 | #define BPF_REG_SIZE 8 /* size of eBPF register in bytes */ | |
180 | ||
638f5b90 AS |
181 | struct bpf_stack_state { |
182 | struct bpf_reg_state spilled_ptr; | |
183 | u8 slot_type[BPF_REG_SIZE]; | |
184 | }; | |
185 | ||
fd978bf7 JS |
186 | struct bpf_reference_state { |
187 | /* Track each reference created with a unique id, even if the same | |
188 | * instruction creates the reference multiple times (eg, via CALL). | |
189 | */ | |
190 | int id; | |
191 | /* Instruction where the allocation of this reference occurred. This | |
192 | * is used purely to inform the user of a reference leak. | |
193 | */ | |
194 | int insn_idx; | |
195 | }; | |
196 | ||
58e2af8b JK |
197 | /* state of the program: |
198 | * type of all registers and stack info | |
199 | */ | |
f4d7e40a | 200 | struct bpf_func_state { |
58e2af8b | 201 | struct bpf_reg_state regs[MAX_BPF_REG]; |
f4d7e40a AS |
202 | /* index of call instruction that called into this func */ |
203 | int callsite; | |
204 | /* stack frame number of this function state from pov of | |
205 | * enclosing bpf_verifier_state. | |
206 | * 0 = main function, 1 = first callee. | |
207 | */ | |
208 | u32 frameno; | |
01f810ac | 209 | /* subprog number == index within subprog_info |
f4d7e40a AS |
210 | * zero == main subprog |
211 | */ | |
212 | u32 subprogno; | |
bfc6bb74 AS |
213 | /* Every bpf_timer_start will increment async_entry_cnt. |
214 | * It's used to distinguish: | |
215 | * void foo(void) { for(;;); } | |
216 | * void foo(void) { bpf_timer_set_callback(,foo); } | |
217 | */ | |
218 | u32 async_entry_cnt; | |
219 | bool in_callback_fn; | |
220 | bool in_async_callback_fn; | |
f4d7e40a | 221 | |
fd978bf7 JS |
222 | /* The following fields should be last. See copy_func_state() */ |
223 | int acquired_refs; | |
224 | struct bpf_reference_state *refs; | |
638f5b90 AS |
225 | int allocated_stack; |
226 | struct bpf_stack_state *stack; | |
58e2af8b JK |
227 | }; |
228 | ||
b5dc0163 AS |
229 | struct bpf_idx_pair { |
230 | u32 prev_idx; | |
231 | u32 idx; | |
232 | }; | |
233 | ||
c9e73e3d LB |
234 | struct bpf_id_pair { |
235 | u32 old; | |
236 | u32 cur; | |
237 | }; | |
238 | ||
239 | /* Maximum number of register states that can exist at once */ | |
240 | #define BPF_ID_MAP_SIZE (MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) | |
f4d7e40a AS |
241 | #define MAX_CALL_FRAMES 8 |
242 | struct bpf_verifier_state { | |
243 | /* call stack tracking */ | |
244 | struct bpf_func_state *frame[MAX_CALL_FRAMES]; | |
2589726d AS |
245 | struct bpf_verifier_state *parent; |
246 | /* | |
247 | * 'branches' field is the number of branches left to explore: | |
248 | * 0 - all possible paths from this state reached bpf_exit or | |
249 | * were safely pruned | |
250 | * 1 - at least one path is being explored. | |
251 | * This state hasn't reached bpf_exit | |
252 | * 2 - at least two paths are being explored. | |
253 | * This state is an immediate parent of two children. | |
254 | * One is fallthrough branch with branches==1 and another | |
255 | * state is pushed into stack (to be explored later) also with | |
256 | * branches==1. The parent of this state has branches==1. | |
257 | * The verifier state tree connected via 'parent' pointer looks like: | |
258 | * 1 | |
259 | * 1 | |
260 | * 2 -> 1 (first 'if' pushed into stack) | |
261 | * 1 | |
262 | * 2 -> 1 (second 'if' pushed into stack) | |
263 | * 1 | |
264 | * 1 | |
265 | * 1 bpf_exit. | |
266 | * | |
267 | * Once do_check() reaches bpf_exit, it calls update_branch_counts() | |
268 | * and the verifier state tree will look: | |
269 | * 1 | |
270 | * 1 | |
271 | * 2 -> 1 (first 'if' pushed into stack) | |
272 | * 1 | |
273 | * 1 -> 1 (second 'if' pushed into stack) | |
274 | * 0 | |
275 | * 0 | |
276 | * 0 bpf_exit. | |
277 | * After pop_stack() the do_check() will resume at second 'if'. | |
278 | * | |
279 | * If is_state_visited() sees a state with branches > 0 it means | |
280 | * there is a loop. If such state is exactly equal to the current state | |
281 | * it's an infinite loop. Note states_equal() checks for states | |
282 | * equvalency, so two states being 'states_equal' does not mean | |
283 | * infinite loop. The exact comparison is provided by | |
284 | * states_maybe_looping() function. It's a stronger pre-check and | |
285 | * much faster than states_equal(). | |
286 | * | |
287 | * This algorithm may not find all possible infinite loops or | |
288 | * loop iteration count may be too high. | |
289 | * In such cases BPF_COMPLEXITY_LIMIT_INSNS limit kicks in. | |
290 | */ | |
291 | u32 branches; | |
dc2a4ebc | 292 | u32 insn_idx; |
f4d7e40a | 293 | u32 curframe; |
d83525ca | 294 | u32 active_spin_lock; |
979d63d5 | 295 | bool speculative; |
b5dc0163 AS |
296 | |
297 | /* first and last insn idx of this verifier state */ | |
298 | u32 first_insn_idx; | |
299 | u32 last_insn_idx; | |
300 | /* jmp history recorded from first to last. | |
301 | * backtracking is using it to go from last to first. | |
302 | * For most states jmp_history_cnt is [0-3]. | |
303 | * For loops can go up to ~40. | |
304 | */ | |
305 | struct bpf_idx_pair *jmp_history; | |
306 | u32 jmp_history_cnt; | |
f4d7e40a AS |
307 | }; |
308 | ||
f3709f69 JS |
309 | #define bpf_get_spilled_reg(slot, frame) \ |
310 | (((slot < frame->allocated_stack / BPF_REG_SIZE) && \ | |
311 | (frame->stack[slot].slot_type[0] == STACK_SPILL)) \ | |
312 | ? &frame->stack[slot].spilled_ptr : NULL) | |
313 | ||
314 | /* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */ | |
315 | #define bpf_for_each_spilled_reg(iter, frame, reg) \ | |
316 | for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \ | |
317 | iter < frame->allocated_stack / BPF_REG_SIZE; \ | |
318 | iter++, reg = bpf_get_spilled_reg(iter, frame)) | |
319 | ||
58e2af8b JK |
320 | /* linked list of verifier states used to prune search */ |
321 | struct bpf_verifier_state_list { | |
322 | struct bpf_verifier_state state; | |
323 | struct bpf_verifier_state_list *next; | |
9f4686c4 | 324 | int miss_cnt, hit_cnt; |
58e2af8b JK |
325 | }; |
326 | ||
979d63d5 | 327 | /* Possible states for alu_state member. */ |
801c6058 DB |
328 | #define BPF_ALU_SANITIZE_SRC (1U << 0) |
329 | #define BPF_ALU_SANITIZE_DST (1U << 1) | |
979d63d5 | 330 | #define BPF_ALU_NEG_VALUE (1U << 2) |
d3bd7413 | 331 | #define BPF_ALU_NON_POINTER (1U << 3) |
801c6058 | 332 | #define BPF_ALU_IMMEDIATE (1U << 4) |
979d63d5 DB |
333 | #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ |
334 | BPF_ALU_SANITIZE_DST) | |
335 | ||
58e2af8b | 336 | struct bpf_insn_aux_data { |
81ed18ab AS |
337 | union { |
338 | enum bpf_reg_type ptr_type; /* pointer type for load/store insns */ | |
d2e4c1e6 | 339 | unsigned long map_ptr_state; /* pointer/poison value for maps */ |
1c2a088a | 340 | s32 call_imm; /* saved imm field of call insn */ |
979d63d5 | 341 | u32 alu_limit; /* limit for add/sub register with pointer */ |
d8eca5bb DB |
342 | struct { |
343 | u32 map_index; /* index into used_maps[] */ | |
344 | u32 map_off; /* offset from value base address */ | |
345 | }; | |
4976b718 HL |
346 | struct { |
347 | enum bpf_reg_type reg_type; /* type of pseudo_btf_id */ | |
348 | union { | |
22dc4a0f AN |
349 | struct { |
350 | struct btf *btf; | |
351 | u32 btf_id; /* btf_id for struct typed var */ | |
352 | }; | |
4976b718 HL |
353 | u32 mem_size; /* mem_size for non-struct typed var */ |
354 | }; | |
355 | } btf_var; | |
81ed18ab | 356 | }; |
d2e4c1e6 | 357 | u64 map_key_state; /* constant (32 bit) key tracking for maps */ |
23994631 | 358 | int ctx_field_size; /* the ctx field size for load insn, maybe 0 */ |
51c39bb1 | 359 | u32 seen; /* this insn was processed by the verifier at env->pass_cnt */ |
2039f26f | 360 | bool sanitize_stack_spill; /* subject to Spectre v4 sanitation */ |
5327ed3d | 361 | bool zext_dst; /* this insn zero extends dst reg */ |
979d63d5 | 362 | u8 alu_state; /* used in combination with alu_limit */ |
51c39bb1 AS |
363 | |
364 | /* below fields are initialized once */ | |
9e4c24e7 | 365 | unsigned int orig_idx; /* original instruction index */ |
51c39bb1 | 366 | bool prune_point; |
58e2af8b JK |
367 | }; |
368 | ||
369 | #define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */ | |
541c3bad | 370 | #define MAX_USED_BTFS 64 /* max number of BTFs accessed by one BPF program */ |
58e2af8b | 371 | |
a2a7d570 JK |
372 | #define BPF_VERIFIER_TMP_LOG_SIZE 1024 |
373 | ||
b9193c1b | 374 | struct bpf_verifier_log { |
e7bf8249 | 375 | u32 level; |
a2a7d570 | 376 | char kbuf[BPF_VERIFIER_TMP_LOG_SIZE]; |
e7bf8249 JK |
377 | char __user *ubuf; |
378 | u32 len_used; | |
379 | u32 len_total; | |
380 | }; | |
381 | ||
b9193c1b | 382 | static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log) |
e7bf8249 JK |
383 | { |
384 | return log->len_used >= log->len_total - 1; | |
385 | } | |
386 | ||
06ee7115 AS |
387 | #define BPF_LOG_LEVEL1 1 |
388 | #define BPF_LOG_LEVEL2 2 | |
389 | #define BPF_LOG_STATS 4 | |
390 | #define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2) | |
391 | #define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS) | |
8580ac94 | 392 | #define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */ |
2e576648 CL |
393 | #define BPF_LOG_MIN_ALIGNMENT 8U |
394 | #define BPF_LOG_ALIGNMENT 40U | |
06ee7115 | 395 | |
77d2e05a MKL |
396 | static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log) |
397 | { | |
efc68158 THJ |
398 | return log && |
399 | ((log->level && log->ubuf && !bpf_verifier_log_full(log)) || | |
400 | log->level == BPF_LOG_KERNEL); | |
77d2e05a MKL |
401 | } |
402 | ||
866de407 HT |
403 | static inline bool |
404 | bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log) | |
405 | { | |
406 | return log->len_total >= 128 && log->len_total <= UINT_MAX >> 2 && | |
407 | log->level && log->ubuf && !(log->level & ~BPF_LOG_MASK); | |
408 | } | |
409 | ||
cc8b0b92 AS |
410 | #define BPF_MAX_SUBPROGS 256 |
411 | ||
9c8105bd | 412 | struct bpf_subprog_info { |
8c1b6e69 | 413 | /* 'start' has to be the first field otherwise find_subprog() won't work */ |
9c8105bd | 414 | u32 start; /* insn idx of function entry point */ |
c454a46b | 415 | u32 linfo_idx; /* The idx to the main_prog->aux->linfo */ |
9c8105bd | 416 | u16 stack_depth; /* max. stack depth used by this function */ |
7f6e4312 | 417 | bool has_tail_call; |
ebf7d1f5 | 418 | bool tail_call_reachable; |
09b28d76 | 419 | bool has_ld_abs; |
7ddc80a4 | 420 | bool is_async_cb; |
9c8105bd JW |
421 | }; |
422 | ||
58e2af8b JK |
423 | /* single container for all structs |
424 | * one verifier_env per bpf_check() call | |
425 | */ | |
426 | struct bpf_verifier_env { | |
c08435ec DB |
427 | u32 insn_idx; |
428 | u32 prev_insn_idx; | |
58e2af8b | 429 | struct bpf_prog *prog; /* eBPF program being verified */ |
00176a34 | 430 | const struct bpf_verifier_ops *ops; |
58e2af8b JK |
431 | struct bpf_verifier_stack_elem *head; /* stack of verifier states to be processed */ |
432 | int stack_size; /* number of states to be processed */ | |
e07b98d9 | 433 | bool strict_alignment; /* perform strict pointer alignment checks */ |
10d274e8 | 434 | bool test_state_freq; /* test verifier with different pruning frequency */ |
638f5b90 | 435 | struct bpf_verifier_state *cur_state; /* current verifier state */ |
58e2af8b | 436 | struct bpf_verifier_state_list **explored_states; /* search pruning optimization */ |
9f4686c4 | 437 | struct bpf_verifier_state_list *free_list; |
58e2af8b | 438 | struct bpf_map *used_maps[MAX_USED_MAPS]; /* array of map's used by eBPF program */ |
541c3bad | 439 | struct btf_mod_pair used_btfs[MAX_USED_BTFS]; /* array of BTF's used by BPF program */ |
58e2af8b | 440 | u32 used_map_cnt; /* number of used maps */ |
541c3bad | 441 | u32 used_btf_cnt; /* number of used BTF objects */ |
58e2af8b | 442 | u32 id_gen; /* used to generate unique reg IDs */ |
e042aa53 | 443 | bool explore_alu_limits; |
58e2af8b | 444 | bool allow_ptr_leaks; |
01f810ac | 445 | bool allow_uninit_stack; |
41c48f3a | 446 | bool allow_ptr_to_map_access; |
2c78ee89 AS |
447 | bool bpf_capable; |
448 | bool bypass_spec_v1; | |
449 | bool bypass_spec_v4; | |
58e2af8b JK |
450 | bool seen_direct_write; |
451 | struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */ | |
d9762e84 | 452 | const struct bpf_line_info *prev_linfo; |
b9193c1b | 453 | struct bpf_verifier_log log; |
9c8105bd | 454 | struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1]; |
c9e73e3d | 455 | struct bpf_id_pair idmap_scratch[BPF_ID_MAP_SIZE]; |
7df737e9 AS |
456 | struct { |
457 | int *insn_state; | |
458 | int *insn_stack; | |
459 | int cur_stack; | |
460 | } cfg; | |
51c39bb1 | 461 | u32 pass_cnt; /* number of times do_check() was called */ |
cc8b0b92 | 462 | u32 subprog_cnt; |
06ee7115 | 463 | /* number of instructions analyzed by the verifier */ |
2589726d AS |
464 | u32 prev_insn_processed, insn_processed; |
465 | /* number of jmps, calls, exits analyzed so far */ | |
466 | u32 prev_jmps_processed, jmps_processed; | |
06ee7115 AS |
467 | /* total verification time */ |
468 | u64 verification_time; | |
469 | /* maximum number of verifier states kept in 'branching' instructions */ | |
470 | u32 max_states_per_insn; | |
471 | /* total number of allocated verifier states */ | |
472 | u32 total_states; | |
473 | /* some states are freed during program analysis. | |
474 | * this is peak number of states. this number dominates kernel | |
475 | * memory consumption during verification | |
476 | */ | |
477 | u32 peak_states; | |
478 | /* longest register parentage chain walked for liveness marking */ | |
479 | u32 longest_mark_read_walk; | |
387544bf | 480 | bpfptr_t fd_array; |
0f55f9ed CL |
481 | |
482 | /* bit mask to keep track of whether a register has been accessed | |
483 | * since the last time the function state was printed | |
484 | */ | |
485 | u32 scratched_regs; | |
486 | /* Same as scratched_regs but for stack slots */ | |
487 | u64 scratched_stack_slots; | |
2e576648 | 488 | u32 prev_log_len, prev_insn_print_len; |
c25b2ae1 HL |
489 | /* buffer used in reg_type_str() to generate reg_type string */ |
490 | char type_str_buf[TYPE_STR_BUF_LEN]; | |
58e2af8b JK |
491 | }; |
492 | ||
be2d04d1 MM |
493 | __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log, |
494 | const char *fmt, va_list args); | |
430e68d1 QM |
495 | __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env, |
496 | const char *fmt, ...); | |
9e15db66 AS |
497 | __printf(2, 3) void bpf_log(struct bpf_verifier_log *log, |
498 | const char *fmt, ...); | |
430e68d1 | 499 | |
fd978bf7 | 500 | static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env) |
638f5b90 | 501 | { |
f4d7e40a AS |
502 | struct bpf_verifier_state *cur = env->cur_state; |
503 | ||
fd978bf7 JS |
504 | return cur->frame[cur->curframe]; |
505 | } | |
506 | ||
507 | static inline struct bpf_reg_state *cur_regs(struct bpf_verifier_env *env) | |
508 | { | |
509 | return cur_func(env)->regs; | |
638f5b90 AS |
510 | } |
511 | ||
a40a2632 | 512 | int bpf_prog_offload_verifier_prep(struct bpf_prog *prog); |
cae1927c JK |
513 | int bpf_prog_offload_verify_insn(struct bpf_verifier_env *env, |
514 | int insn_idx, int prev_insn_idx); | |
c941ce9c | 515 | int bpf_prog_offload_finalize(struct bpf_verifier_env *env); |
08ca90af JK |
516 | void |
517 | bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off, | |
518 | struct bpf_insn *insn); | |
519 | void | |
520 | bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt); | |
ab3f0063 | 521 | |
be80a1d3 DB |
522 | int check_ptr_off_reg(struct bpf_verifier_env *env, |
523 | const struct bpf_reg_state *reg, int regno); | |
e5069b9c DB |
524 | int check_mem_reg(struct bpf_verifier_env *env, struct bpf_reg_state *reg, |
525 | u32 regno, u32 mem_size); | |
51c39bb1 | 526 | |
f7b12b6f THJ |
527 | /* this lives here instead of in bpf.h because it needs to dereference tgt_prog */ |
528 | static inline u64 bpf_trampoline_compute_key(const struct bpf_prog *tgt_prog, | |
22dc4a0f | 529 | struct btf *btf, u32 btf_id) |
f7b12b6f | 530 | { |
22dc4a0f AN |
531 | if (tgt_prog) |
532 | return ((u64)tgt_prog->aux->id << 32) | btf_id; | |
533 | else | |
534 | return ((u64)btf_obj_id(btf) << 32) | 0x80000000 | btf_id; | |
f7b12b6f THJ |
535 | } |
536 | ||
441e8c66 THJ |
537 | /* unpack the IDs from the key as constructed above */ |
538 | static inline void bpf_trampoline_unpack_key(u64 key, u32 *obj_id, u32 *btf_id) | |
539 | { | |
540 | if (obj_id) | |
541 | *obj_id = key >> 32; | |
542 | if (btf_id) | |
543 | *btf_id = key & 0x7FFFFFFF; | |
544 | } | |
545 | ||
f7b12b6f THJ |
546 | int bpf_check_attach_target(struct bpf_verifier_log *log, |
547 | const struct bpf_prog *prog, | |
548 | const struct bpf_prog *tgt_prog, | |
549 | u32 btf_id, | |
550 | struct bpf_attach_target_info *tgt_info); | |
2357672c KKD |
551 | void bpf_free_kfunc_btf_tab(struct bpf_kfunc_btf_tab *tab); |
552 | ||
d639b9d1 HL |
553 | #define BPF_BASE_TYPE_MASK GENMASK(BPF_BASE_TYPE_BITS - 1, 0) |
554 | ||
555 | /* extract base type from bpf_{arg, return, reg}_type. */ | |
556 | static inline u32 base_type(u32 type) | |
557 | { | |
558 | return type & BPF_BASE_TYPE_MASK; | |
559 | } | |
560 | ||
561 | /* extract flags from an extended type. See bpf_type_flag in bpf.h. */ | |
562 | static inline u32 type_flag(u32 type) | |
563 | { | |
564 | return type & ~BPF_BASE_TYPE_MASK; | |
565 | } | |
f7b12b6f | 566 | |
58e2af8b | 567 | #endif /* _LINUX_BPF_VERIFIER_H */ |