1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
6 #include <uapi/linux/btf.h>
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/bpf_verifier.h>
11 #include <linux/math64.h>
12 #include <linux/string.h>
14 #define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
16 static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
18 /* ubuf and len_total should both be specified (or not) together */
19 if (!!log->ubuf != !!log->len_total)
21 /* log buf without log_level is meaningless */
22 if (log->ubuf && log->level == 0)
24 if (log->level & ~BPF_LOG_MASK)
26 if (log->len_total > UINT_MAX >> 2)
31 int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
32 char __user *log_buf, u32 log_size)
34 log->level = log_level;
36 log->len_total = log_size;
38 /* log attributes have to be sane */
39 if (!bpf_verifier_log_attr_valid(log))
45 static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
47 /* add_len includes terminal \0, so no need for +1. */
48 u64 len = log->end_pos + add_len;
50 /* log->len_max could be larger than our current len due to
51 * bpf_vlog_reset() calls, so we maintain the max of any length at any
55 log->len_max = UINT_MAX;
56 else if (len > log->len_max)
60 void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
66 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
68 if (log->level == BPF_LOG_KERNEL) {
69 bool newline = n > 0 && log->kbuf[n - 1] == '\n';
71 pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
75 n += 1; /* include terminating zero */
76 bpf_vlog_update_len_max(log, n);
78 if (log->level & BPF_LOG_FIXED) {
79 /* check if we have at least something to put into user buf */
81 if (log->end_pos < log->len_total) {
82 new_n = min_t(u32, log->len_total - log->end_pos, n);
83 log->kbuf[new_n - 1] = '\0';
86 cur_pos = log->end_pos;
87 log->end_pos += n - 1; /* don't count terminating '\0' */
89 if (log->ubuf && new_n &&
90 copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
93 u64 new_end, new_start;
94 u32 buf_start, buf_end;
96 new_end = log->end_pos + n;
97 if (new_end - log->start_pos >= log->len_total)
98 new_start = new_end - log->len_total;
100 new_start = log->start_pos;
102 log->start_pos = new_start;
103 log->end_pos = new_end - 1; /* don't count terminating '\0' */
108 new_n = min(n, log->len_total);
109 cur_pos = new_end - new_n;
110 div_u64_rem(cur_pos, log->len_total, &buf_start);
111 div_u64_rem(new_end, log->len_total, &buf_end);
112 /* new_end and buf_end are exclusive indices, so if buf_end is
113 * exactly zero, then it actually points right to the end of
114 * ubuf and there is no wrap around
117 buf_end = log->len_total;
119 /* if buf_start > buf_end, we wrapped around;
120 * if buf_start == buf_end, then we fill ubuf completely; we
121 * can't have buf_start == buf_end to mean that there is
122 * nothing to write, because we always write at least
123 * something, even if terminal '\0'
125 if (buf_start < buf_end) {
126 /* message fits within contiguous chunk of ubuf */
127 if (copy_to_user(log->ubuf + buf_start,
128 log->kbuf + n - new_n,
129 buf_end - buf_start))
132 /* message wraps around the end of ubuf, copy in two chunks */
133 if (copy_to_user(log->ubuf + buf_start,
134 log->kbuf + n - new_n,
135 log->len_total - buf_start))
137 if (copy_to_user(log->ubuf,
138 log->kbuf + n - buf_end,
149 void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
154 if (WARN_ON_ONCE(new_pos > log->end_pos))
157 if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
160 /* if position to which we reset is beyond current log window,
161 * then we didn't preserve any useful content and should adjust
162 * start_pos to end up with an empty log (start_pos == end_pos)
164 log->end_pos = new_pos;
165 if (log->end_pos < log->start_pos)
166 log->start_pos = log->end_pos;
171 if (log->level & BPF_LOG_FIXED)
172 pos = log->end_pos + 1;
174 div_u64_rem(new_pos, log->len_total, &pos);
176 if (pos < log->len_total && put_user(zero, log->ubuf + pos))
180 static void bpf_vlog_reverse_kbuf(char *buf, int len)
184 for (i = 0, j = len - 1; i < j; i++, j--)
185 swap(buf[i], buf[j]);
188 static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
190 /* we split log->kbuf into two equal parts for both ends of array */
191 int n = sizeof(log->kbuf) / 2, nn;
192 char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
194 /* Read ubuf's section [start, end) two chunks at a time, from left
195 * and right side; within each chunk, swap all the bytes; after that
196 * reverse the order of lbuf and rbuf and write result back to ubuf.
197 * This way we'll end up with swapped contents of specified
198 * [start, end) ubuf segment.
200 while (end - start > 1) {
201 nn = min(n, (end - start ) / 2);
203 if (copy_from_user(lbuf, log->ubuf + start, nn))
205 if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
208 bpf_vlog_reverse_kbuf(lbuf, nn);
209 bpf_vlog_reverse_kbuf(rbuf, nn);
211 /* we write lbuf to the right end of ubuf, while rbuf to the
212 * left one to end up with properly reversed overall ubuf
214 if (copy_to_user(log->ubuf + start, rbuf, nn))
216 if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
226 int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
231 *log_size_actual = 0;
232 if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
236 goto skip_log_rotate;
237 /* If we never truncated log, there is nothing to move around. */
238 if (log->start_pos == 0)
239 goto skip_log_rotate;
241 /* Otherwise we need to rotate log contents to make it start from the
242 * buffer beginning and be a continuous zero-terminated string. Note
243 * that if log->start_pos != 0 then we definitely filled up entire log
244 * buffer with no gaps, and we just need to shift buffer contents to
245 * the left by (log->start_pos % log->len_total) bytes.
247 * Unfortunately, user buffer could be huge and we don't want to
248 * allocate temporary kernel memory of the same size just to shift
249 * contents in a straightforward fashion. Instead, we'll be clever and
250 * do in-place array rotation. This is a leetcode-style problem, which
251 * could be solved by three rotations.
253 * Let's say we have log buffer that has to be shifted left by 7 bytes
254 * (spaces and vertical bar is just for demonstrative purposes):
255 * E F G H I J K | A B C D
257 * First, we reverse entire array:
258 * D C B A | K J I H G F E
260 * Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
261 * (KJIHGFE), resulting in a properly rotated array:
262 * A B C D | E F G H I J K
264 * We'll utilize log->kbuf to read user memory chunk by chunk, swap
265 * bytes, and write them back. Doing it byte-by-byte would be
266 * unnecessarily inefficient. Altogether we are going to read and
267 * write each byte twice, for total 4 memory copies between kernel and
271 /* length of the chopped off part that will be the beginning;
272 * len(ABCD) in the example above
274 div_u64_rem(log->start_pos, log->len_total, &sublen);
275 sublen = log->len_total - sublen;
277 err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
278 err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
279 err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
284 *log_size_actual = log->len_max;
286 /* properly initialized log has either both ubuf!=NULL and len_total>0
287 * or ubuf==NULL and len_total==0, so if this condition doesn't hold,
288 * we got a fault somewhere along the way, so report it back
290 if (!!log->ubuf != !!log->len_total)
293 /* did truncation actually happen? */
294 if (log->ubuf && log->len_max > log->len_total)
300 /* log_level controls verbosity level of eBPF verifier.
301 * bpf_verifier_log_write() is used to dump the verification trace to the log,
302 * so the user can figure out what's wrong with the program
304 __printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
305 const char *fmt, ...)
309 if (!bpf_verifier_log_needed(&env->log))
313 bpf_verifier_vlog(&env->log, fmt, args);
316 EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
318 __printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
319 const char *fmt, ...)
323 if (!bpf_verifier_log_needed(log))
327 bpf_verifier_vlog(log, fmt, args);
330 EXPORT_SYMBOL_GPL(bpf_log);
332 static const struct bpf_line_info *
333 find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
335 const struct bpf_line_info *linfo;
336 const struct bpf_prog *prog;
341 nr_linfo = prog->aux->nr_linfo;
343 if (!nr_linfo || insn_off >= prog->len)
346 linfo = prog->aux->linfo;
347 /* Loop invariant: linfo[l].insn_off <= insns_off.
348 * linfo[0].insn_off == 0 which always satisfies above condition.
349 * Binary search is searching for rightmost linfo entry that satisfies
350 * the above invariant, giving us the desired record that covers given
351 * instruction offset.
356 /* (r - l + 1) / 2 means we break a tie to the right, so if:
357 * l=1, r=2, linfo[l].insn_off <= insn_off, linfo[r].insn_off > insn_off,
358 * then m=2, we see that linfo[m].insn_off > insn_off, and so
359 * r becomes 1 and we exit the loop with correct l==1.
360 * If the tie was broken to the left, m=1 would end us up in
361 * an endless loop where l and m stay at 1 and r stays at 2.
363 m = l + (r - l + 1) / 2;
364 if (linfo[m].insn_off <= insn_off)
373 static const char *ltrim(const char *s)
381 __printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
383 const char *prefix_fmt, ...)
385 const struct bpf_line_info *linfo, *prev_linfo;
386 const struct btf *btf;
387 const char *s, *fname;
389 if (!bpf_verifier_log_needed(&env->log))
392 prev_linfo = env->prev_linfo;
393 linfo = find_linfo(env, insn_off);
394 if (!linfo || linfo == prev_linfo)
397 /* It often happens that two separate linfo records point to the same
398 * source code line, but have differing column numbers. Given verifier
399 * log doesn't emit column information, from user perspective we just
400 * end up emitting the same source code line twice unnecessarily.
401 * So instead check that previous and current linfo record point to
402 * the same file (file_name_offs match) and the same line number, and
403 * avoid emitting duplicated source code line in such case.
405 if (prev_linfo && linfo->file_name_off == prev_linfo->file_name_off &&
406 BPF_LINE_INFO_LINE_NUM(linfo->line_col) == BPF_LINE_INFO_LINE_NUM(prev_linfo->line_col))
412 va_start(args, prefix_fmt);
413 bpf_verifier_vlog(&env->log, prefix_fmt, args);
417 btf = env->prog->aux->btf;
418 s = ltrim(btf_name_by_offset(btf, linfo->line_off));
419 verbose(env, "%s", s); /* source code line */
421 s = btf_name_by_offset(btf, linfo->file_name_off);
422 /* leave only file name */
423 fname = strrchr(s, '/');
424 fname = fname ? fname + 1 : s;
425 verbose(env, " @ %s:%u\n", fname, BPF_LINE_INFO_LINE_NUM(linfo->line_col));
427 env->prev_linfo = linfo;
430 static const char *btf_type_name(const struct btf *btf, u32 id)
432 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
435 /* string representation of 'enum bpf_reg_type'
437 * Note that reg_type_str() can not appear more than once in a single verbose()
440 const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type)
442 char postfix[16] = {0}, prefix[64] = {0};
443 static const char * const str[] = {
445 [SCALAR_VALUE] = "scalar",
446 [PTR_TO_CTX] = "ctx",
447 [CONST_PTR_TO_MAP] = "map_ptr",
448 [PTR_TO_MAP_VALUE] = "map_value",
449 [PTR_TO_STACK] = "fp",
450 [PTR_TO_PACKET] = "pkt",
451 [PTR_TO_PACKET_META] = "pkt_meta",
452 [PTR_TO_PACKET_END] = "pkt_end",
453 [PTR_TO_FLOW_KEYS] = "flow_keys",
454 [PTR_TO_SOCKET] = "sock",
455 [PTR_TO_SOCK_COMMON] = "sock_common",
456 [PTR_TO_TCP_SOCK] = "tcp_sock",
457 [PTR_TO_TP_BUFFER] = "tp_buffer",
458 [PTR_TO_XDP_SOCK] = "xdp_sock",
459 [PTR_TO_BTF_ID] = "ptr_",
460 [PTR_TO_MEM] = "mem",
461 [PTR_TO_ARENA] = "arena",
462 [PTR_TO_BUF] = "buf",
463 [PTR_TO_FUNC] = "func",
464 [PTR_TO_MAP_KEY] = "map_key",
465 [CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
468 if (type & PTR_MAYBE_NULL) {
469 if (base_type(type) == PTR_TO_BTF_ID)
470 strscpy(postfix, "or_null_");
472 strscpy(postfix, "_or_null");
475 snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
476 type & MEM_RDONLY ? "rdonly_" : "",
477 type & MEM_RINGBUF ? "ringbuf_" : "",
478 type & MEM_USER ? "user_" : "",
479 type & MEM_PERCPU ? "percpu_" : "",
480 type & MEM_RCU ? "rcu_" : "",
481 type & PTR_UNTRUSTED ? "untrusted_" : "",
482 type & PTR_TRUSTED ? "trusted_" : ""
485 snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
486 prefix, str[base_type(type)], postfix);
487 return env->tmp_str_buf;
490 const char *dynptr_type_str(enum bpf_dynptr_type type)
493 case BPF_DYNPTR_TYPE_LOCAL:
495 case BPF_DYNPTR_TYPE_RINGBUF:
497 case BPF_DYNPTR_TYPE_SKB:
499 case BPF_DYNPTR_TYPE_XDP:
501 case BPF_DYNPTR_TYPE_INVALID:
504 WARN_ONCE(1, "unknown dynptr type %d\n", type);
509 const char *iter_type_str(const struct btf *btf, u32 btf_id)
511 if (!btf || btf_id == 0)
514 /* we already validated that type is valid and has conforming name */
515 return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
518 const char *iter_state_str(enum bpf_iter_state state)
521 case BPF_ITER_STATE_ACTIVE:
523 case BPF_ITER_STATE_DRAINED:
525 case BPF_ITER_STATE_INVALID:
528 WARN_ONCE(1, "unknown iter state %d\n", state);
533 static char slot_type_char[] = {
534 [STACK_INVALID] = '?',
538 [STACK_DYNPTR] = 'd',
540 [STACK_IRQ_FLAG] = 'f'
543 static void print_liveness(struct bpf_verifier_env *env,
544 enum bpf_reg_liveness live)
546 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
548 if (live & REG_LIVE_READ)
550 if (live & REG_LIVE_WRITTEN)
552 if (live & REG_LIVE_DONE)
556 #define UNUM_MAX_DECIMAL U16_MAX
557 #define SNUM_MAX_DECIMAL S16_MAX
558 #define SNUM_MIN_DECIMAL S16_MIN
560 static bool is_unum_decimal(u64 num)
562 return num <= UNUM_MAX_DECIMAL;
565 static bool is_snum_decimal(s64 num)
567 return num >= SNUM_MIN_DECIMAL && num <= SNUM_MAX_DECIMAL;
570 static void verbose_unum(struct bpf_verifier_env *env, u64 num)
572 if (is_unum_decimal(num))
573 verbose(env, "%llu", num);
575 verbose(env, "%#llx", num);
578 static void verbose_snum(struct bpf_verifier_env *env, s64 num)
580 if (is_snum_decimal(num))
581 verbose(env, "%lld", num);
583 verbose(env, "%#llx", num);
586 int tnum_strn(char *str, size_t size, struct tnum a)
588 /* print as a constant, if tnum is fully known */
590 if (is_unum_decimal(a.value))
591 return snprintf(str, size, "%llu", a.value);
593 return snprintf(str, size, "%#llx", a.value);
595 return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
597 EXPORT_SYMBOL_GPL(tnum_strn);
599 static void print_scalar_ranges(struct bpf_verifier_env *env,
600 const struct bpf_reg_state *reg,
603 /* For signed ranges, we want to unify 64-bit and 32-bit values in the
604 * output as much as possible, but there is a bit of a complication.
605 * If we choose to print values as decimals, this is natural to do,
606 * because negative 64-bit and 32-bit values >= -S32_MIN have the same
607 * representation due to sign extension. But if we choose to print
608 * them in hex format (see is_snum_decimal()), then sign extension is
610 * E.g., smin=-2 and smin32=-2 are exactly the same in decimal, but in
611 * hex they will be smin=0xfffffffffffffffe and smin32=0xfffffffe, two
612 * very different numbers.
613 * So we avoid sign extension if we choose to print values in hex.
620 {"smin", reg->smin_value, reg->smin_value == S64_MIN},
621 {"smax", reg->smax_value, reg->smax_value == S64_MAX},
622 {"umin", reg->umin_value, reg->umin_value == 0},
623 {"umax", reg->umax_value, reg->umax_value == U64_MAX},
625 is_snum_decimal((s64)reg->s32_min_value)
626 ? (s64)reg->s32_min_value
627 : (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN},
629 is_snum_decimal((s64)reg->s32_max_value)
630 ? (s64)reg->s32_max_value
631 : (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX},
632 {"umin32", reg->u32_min_value, reg->u32_min_value == 0},
633 {"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
634 }, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
637 for (m1 = &minmaxs[0]; m1 < mend; m1++) {
641 neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
643 verbose(env, "%s%s=", *sep, m1->name);
646 for (m2 = m1 + 2; m2 < mend; m2 += 2) {
647 if (m2->omit || m2->val != m1->val)
649 /* don't mix negatives with positives */
650 neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
654 verbose(env, "%s=", m2->name);
657 if (m1->name[0] == 's')
658 verbose_snum(env, m1->val);
660 verbose_unum(env, m1->val);
664 static bool type_is_map_ptr(enum bpf_reg_type t) {
665 switch (base_type(t)) {
666 case CONST_PTR_TO_MAP:
668 case PTR_TO_MAP_VALUE:
676 * _a stands for append, was shortened to avoid multiline statements below.
677 * This macro is used to output a comma separated list of attributes.
679 #define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
681 static void print_reg_state(struct bpf_verifier_env *env,
682 const struct bpf_func_state *state,
683 const struct bpf_reg_state *reg)
686 const char *sep = "";
689 if (t == SCALAR_VALUE && reg->precise)
691 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
692 verbose_snum(env, reg->var_off.value);
696 verbose(env, "%s", reg_type_str(env, t));
697 if (t == PTR_TO_ARENA)
699 if (t == PTR_TO_STACK) {
700 if (state->frameno != reg->frameno)
701 verbose(env, "[%d]", reg->frameno);
702 if (tnum_is_const(reg->var_off)) {
703 verbose_snum(env, reg->var_off.value + reg->off);
707 if (base_type(t) == PTR_TO_BTF_ID)
708 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
711 verbose_a("id=%d", reg->id & ~BPF_ADD_CONST);
712 if (reg->id & BPF_ADD_CONST)
713 verbose(env, "%+d", reg->off);
715 verbose_a("ref_obj_id=%d", reg->ref_obj_id);
716 if (type_is_non_owning_ref(reg->type))
717 verbose_a("%s", "non_own_ref");
718 if (type_is_map_ptr(t)) {
719 if (reg->map_ptr->name[0])
720 verbose_a("map=%s", reg->map_ptr->name);
721 verbose_a("ks=%d,vs=%d",
722 reg->map_ptr->key_size,
723 reg->map_ptr->value_size);
725 if (t != SCALAR_VALUE && reg->off) {
727 verbose_snum(env, reg->off);
729 if (type_is_pkt_pointer(t)) {
731 verbose_unum(env, reg->range);
733 if (base_type(t) == PTR_TO_MEM) {
735 verbose_unum(env, reg->mem_size);
737 if (t == CONST_PTR_TO_DYNPTR)
738 verbose_a("type=%s", dynptr_type_str(reg->dynptr.type));
739 if (tnum_is_const(reg->var_off)) {
740 /* a pointer register with fixed offset */
741 if (reg->var_off.value) {
743 verbose_snum(env, reg->var_off.value);
746 print_scalar_ranges(env, reg, &sep);
747 if (!tnum_is_unknown(reg->var_off)) {
750 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
751 verbose_a("var_off=%s", tn_buf);
757 void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
758 u32 frameno, bool print_all)
760 const struct bpf_func_state *state = vstate->frame[frameno];
761 const struct bpf_reg_state *reg;
765 verbose(env, " frame%d:", state->frameno);
766 for (i = 0; i < MAX_BPF_REG; i++) {
767 reg = &state->regs[i];
768 if (reg->type == NOT_INIT)
770 if (!print_all && !reg_scratched(env, i))
772 verbose(env, " R%d", i);
773 print_liveness(env, reg->live);
775 print_reg_state(env, state, reg);
777 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
778 char types_buf[BPF_REG_SIZE + 1];
779 const char *sep = "";
784 if (!print_all && !stack_slot_scratched(env, i))
787 for (j = 0; j < BPF_REG_SIZE; j++) {
788 slot_type = state->stack[i].slot_type[j];
789 if (slot_type != STACK_INVALID)
791 types_buf[j] = slot_type_char[slot_type];
793 types_buf[BPF_REG_SIZE] = 0;
797 reg = &state->stack[i].spilled_ptr;
798 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
800 /* print MISC/ZERO/INVALID slots above subreg spill */
801 for (j = 0; j < BPF_REG_SIZE; j++)
802 if (state->stack[i].slot_type[j] == STACK_SPILL)
806 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
807 print_liveness(env, reg->live);
808 verbose(env, "=%s", types_buf);
809 print_reg_state(env, state, reg);
812 /* skip to main dynptr slot */
813 i += BPF_DYNPTR_NR_SLOTS - 1;
814 reg = &state->stack[i].spilled_ptr;
816 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
817 print_liveness(env, reg->live);
818 verbose(env, "=dynptr_%s(", dynptr_type_str(reg->dynptr.type));
820 verbose_a("id=%d", reg->id);
822 verbose_a("ref_id=%d", reg->ref_obj_id);
824 verbose_a("dynptr_id=%d", reg->dynptr_id);
828 /* only main slot has ref_obj_id set; skip others */
829 if (!reg->ref_obj_id)
832 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
833 print_liveness(env, reg->live);
834 verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
835 iter_type_str(reg->iter.btf, reg->iter.btf_id),
836 reg->ref_obj_id, iter_state_str(reg->iter.state),
842 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
843 print_liveness(env, reg->live);
844 verbose(env, "=%s", types_buf);
848 if (vstate->acquired_refs && vstate->refs[0].id) {
849 verbose(env, " refs=%d", vstate->refs[0].id);
850 for (i = 1; i < vstate->acquired_refs; i++)
851 if (vstate->refs[i].id)
852 verbose(env, ",%d", vstate->refs[i].id);
854 if (state->in_callback_fn)
856 if (state->in_async_callback_fn)
857 verbose(env, " async_cb");
860 mark_verifier_state_clean(env);
863 static inline u32 vlog_alignment(u32 pos)
865 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
866 BPF_LOG_MIN_ALIGNMENT) - pos - 1;
869 void print_insn_state(struct bpf_verifier_env *env, const struct bpf_verifier_state *vstate,
872 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
873 /* remove new line character */
874 bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
875 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
877 verbose(env, "%d:", env->insn_idx);
879 print_verifier_state(env, vstate, frameno, false);