1 // SPDX-License-Identifier: GPL-2.0
3 #include "bpf_experimental.h"
6 __uint(type, BPF_MAP_TYPE_ARRAY);
7 __uint(max_entries, 8);
13 __uint(type, BPF_MAP_TYPE_USER_RINGBUF);
14 __uint(max_entries, 8);
15 } ringbuf SEC(".maps");
17 struct vm_area_struct;
29 __u8 choice_arr[2] = { 0, 1 };
31 static int unsafe_on_2nd_iter_cb(__u32 idx, struct buf_context *ctx)
34 ctx->buf = (char *)(0xDEAD);
38 if (bpf_probe_read_user(ctx->buf, 8, (void *)(0xBADC0FFEE)))
45 __failure __msg("R1 type=scalar expected=fp")
46 int unsafe_on_2nd_iter(void *unused)
49 struct buf_context loop_ctx = { .buf = buf };
51 bpf_loop(100, unsafe_on_2nd_iter_cb, &loop_ctx, 0);
55 static int unsafe_on_zero_iter_cb(__u32 idx, struct num_context *ctx)
62 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
63 int unsafe_on_zero_iter(void *unused)
65 struct num_context loop_ctx = { .i = 32 };
67 bpf_loop(100, unsafe_on_zero_iter_cb, &loop_ctx, 0);
68 return choice_arr[loop_ctx.i];
71 static int widening_cb(__u32 idx, struct num_context *ctx)
79 int widening(void *unused)
81 struct num_context loop_ctx = { .i = 0, .j = 1 };
83 bpf_loop(100, widening_cb, &loop_ctx, 0);
84 /* loop_ctx.j is not changed during callback iteration,
85 * verifier should not apply widening to it.
87 return choice_arr[loop_ctx.j];
90 static int loop_detection_cb(__u32 idx, struct num_context *ctx)
97 __failure __msg("infinite loop detected")
98 int loop_detection(void *unused)
100 struct num_context loop_ctx = { .i = 0 };
102 bpf_loop(100, loop_detection_cb, &loop_ctx, 0);
106 static __always_inline __u64 oob_state_machine(struct num_context *ctx)
119 static __u64 for_each_map_elem_cb(struct bpf_map *map, __u32 *key, __u64 *val, void *data)
121 return oob_state_machine(data);
125 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
126 int unsafe_for_each_map_elem(void *unused)
128 struct num_context loop_ctx = { .i = 0 };
130 bpf_for_each_map_elem(&map, for_each_map_elem_cb, &loop_ctx, 0);
131 return choice_arr[loop_ctx.i];
134 static __u64 ringbuf_drain_cb(struct bpf_dynptr *dynptr, void *data)
136 return oob_state_machine(data);
140 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
141 int unsafe_ringbuf_drain(void *unused)
143 struct num_context loop_ctx = { .i = 0 };
145 bpf_user_ringbuf_drain(&ringbuf, ringbuf_drain_cb, &loop_ctx, 0);
146 return choice_arr[loop_ctx.i];
149 static __u64 find_vma_cb(struct task_struct *task, struct vm_area_struct *vma, void *data)
151 return oob_state_machine(data);
155 __failure __msg("invalid access to map value, value_size=2 off=32 size=1")
156 int unsafe_find_vma(void *unused)
158 struct task_struct *task = bpf_get_current_task_btf();
159 struct num_context loop_ctx = { .i = 0 };
161 bpf_find_vma(task, 0, find_vma_cb, &loop_ctx, 0);
162 return choice_arr[loop_ctx.i];
165 static int iter_limit_cb(__u32 idx, struct num_context *ctx)
173 int bpf_loop_iter_limit_ok(void *unused)
175 struct num_context ctx = { .i = 0 };
177 bpf_loop(1, iter_limit_cb, &ctx, 0);
178 return choice_arr[ctx.i];
182 __failure __msg("invalid access to map value, value_size=2 off=2 size=1")
183 int bpf_loop_iter_limit_overflow(void *unused)
185 struct num_context ctx = { .i = 0 };
187 bpf_loop(2, iter_limit_cb, &ctx, 0);
188 return choice_arr[ctx.i];
191 static int iter_limit_level2a_cb(__u32 idx, struct num_context *ctx)
197 static int iter_limit_level2b_cb(__u32 idx, struct num_context *ctx)
203 static int iter_limit_level1_cb(__u32 idx, struct num_context *ctx)
206 bpf_loop(1, iter_limit_level2a_cb, ctx, 0);
207 bpf_loop(1, iter_limit_level2b_cb, ctx, 0);
211 /* Check that path visiting every callback function once had been
212 * reached by verifier. Variables 'ctx{1,2}i' below serve as flags,
213 * with each decimal digit corresponding to a callback visit marker.
216 __success __retval(111111)
217 int bpf_loop_iter_limit_nested(void *unused)
219 struct num_context ctx1 = { .i = 0 };
220 struct num_context ctx2 = { .i = 0 };
223 bpf_loop(1, iter_limit_level1_cb, &ctx1, 0);
224 bpf_loop(1, iter_limit_level1_cb, &ctx2, 0);
227 /* Force 'ctx1.i' and 'ctx2.i' precise. */
228 c = choice_arr[(a + b) % 2];
229 /* This makes 'c' zero, but neither clang nor verifier know it. */
231 /* Make sure that verifier does not visit 'impossible' states:
232 * enumerate all possible callback visit masks.
234 if (a != 0 && a != 1 && a != 11 && a != 101 && a != 111 &&
235 b != 0 && b != 1 && b != 11 && b != 101 && b != 111)
236 asm volatile ("r0 /= 0;" ::: "r0");
237 return 1000 * a + b + c;
240 struct iter_limit_bug_ctx {
246 static __naked void iter_limit_bug_cb(void)
248 /* This is the same as C code below, but written
249 * in assembly to control which branches are fall-through.
251 * switch (bpf_get_prandom_u32()) {
252 * case 1: ctx->a = 42; break;
253 * case 2: ctx->b = 42; break;
254 * default: ctx->c = 42; break;
259 "call %[bpf_get_prandom_u32];"
263 "if r1 == 0x1 goto 1f;"
264 "if r1 == 0x2 goto 2f;"
265 "*(u64 *)(r9 + 16) = r2;"
267 "1: *(u64 *)(r9 + 0) = r2;"
269 "2: *(u64 *)(r9 + 8) = r2;"
272 : __imm(bpf_get_prandom_u32)
279 __failure __msg("infinite loop detected at insn 2")
280 __naked void jgt_imm64_and_may_goto(void)
283 r0 = %[tmp_var] ll; \
284 l0_%=: .byte 0xe5; /* may_goto */ \
285 .byte 0; /* regs */ \
286 .short -3; /* off -3 */ \
288 if r0 > 10 goto l0_%=; \
291 " :: __imm_addr(tmp_var)
296 __failure __msg("infinite loop detected at insn 1")
297 __naked void may_goto_self(void)
300 r0 = *(u32 *)(r10 - 4); \
301 l0_%=: .byte 0xe5; /* may_goto */ \
302 .byte 0; /* regs */ \
303 .short -1; /* off -1 */ \
305 if r0 > 10 goto l0_%=; \
308 " ::: __clobber_all);
312 __success __retval(0)
313 __naked void may_goto_neg_off(void)
316 r0 = *(u32 *)(r10 - 4); \
319 l0_%=: .byte 0xe5; /* may_goto */ \
320 .byte 0; /* regs */ \
321 .short -2; /* off -2 */ \
323 if r0 > 10 goto l0_%=; \
326 " ::: __clobber_all);
331 __flag(BPF_F_TEST_STATE_FREQ)
332 int iter_limit_bug(struct __sk_buff *skb)
334 struct iter_limit_bug_ctx ctx = { 7, 7, 7 };
336 bpf_loop(2, iter_limit_bug_cb, &ctx, 0);
338 /* This is the same as C code below,
339 * written in assembly to guarantee checks order.
341 * if (ctx.a == 42 && ctx.b == 42 && ctx.c == 7)
342 * asm volatile("r1 /= 0;":::"r1");
345 "r1 = *(u64 *)%[ctx_a];"
346 "if r1 != 42 goto 1f;"
347 "r1 = *(u64 *)%[ctx_b];"
348 "if r1 != 42 goto 1f;"
349 "r1 = *(u64 *)%[ctx_c];"
350 "if r1 != 7 goto 1f;"
363 __success __retval(0)
364 __naked void ja_and_may_goto(void)
367 l0_%=: .byte 0xe5; /* may_goto */ \
368 .byte 0; /* regs */ \
369 .short 1; /* off 1 */ \
374 " ::: __clobber_common);
378 __success __retval(0)
379 __naked void ja_and_may_goto2(void)
383 .byte 0xe5; /* may_goto */ \
384 .byte 0; /* regs */ \
385 .short 1; /* off 1 */ \
390 " ::: __clobber_common);
394 __success __retval(0)
395 __naked void jlt_and_may_goto(void)
398 l0_%=: call %[bpf_jiffies64]; \
399 .byte 0xe5; /* may_goto */ \
400 .byte 0; /* regs */ \
401 .short 1; /* off 1 */ \
403 if r0 < 10 goto l0_%=; \
406 " :: __imm(bpf_jiffies64)
410 #if (defined(__TARGET_ARCH_arm64) || defined(__TARGET_ARCH_x86) || \
411 (defined(__TARGET_ARCH_riscv) && __riscv_xlen == 64) || \
412 defined(__TARGET_ARCH_arm) || defined(__TARGET_ARCH_s390) || \
413 defined(__TARGET_ARCH_loongarch)) && \
414 __clang_major__ >= 18
416 __success __retval(0)
417 __naked void gotol_and_may_goto(void)
421 .byte 0xe5; /* may_goto */ \
422 .byte 0; /* regs */ \
423 .short 1; /* off 1 */ \
428 " ::: __clobber_common);
433 __success __retval(0)
434 __naked void ja_and_may_goto_subprog(void)
437 call subprog_with_may_goto; \
439 " ::: __clobber_all);
442 static __naked __noinline __used
443 void subprog_with_may_goto(void)
446 l0_%=: .byte 0xe5; /* may_goto */ \
447 .byte 0; /* regs */ \
448 .short 1; /* off 1 */ \
453 " ::: __clobber_all);
456 #define ARR_SZ 1000000
461 __success __retval(0xd495cdc0)
462 int cond_break1(const void *ctx)
465 unsigned int sum = 0;
467 for (i = zero; i < ARR_SZ && can_loop; i++)
469 for (i = zero; i < ARR_SZ; i++) {
479 __success __retval(999000000)
480 int cond_break2(const void *ctx)
485 for (i = zero; i < 1000 && can_loop; i++)
486 for (j = zero; j < 1000; j++) {
493 static __noinline int loop(void)
497 for (i = zero; i <= 1000000 && can_loop; i++)
504 __success __retval(0x6a5a2920)
505 int cond_break3(const void *ctx)
511 __success __retval(1)
512 int cond_break4(const void *ctx)
517 /* should eventually break out of the loop */
521 /* if we looped a bit, it's a success */
522 return cnt > 1 ? 1 : 0;
525 static __noinline int static_subprog(void)
538 __success __retval(1)
539 int cond_break5(const void *ctx)
541 int cnt1 = zero, cnt2;
548 cnt2 = static_subprog();
550 /* main and subprog have to loop a bit */
551 return cnt1 > 1 && cnt2 > 1 ? 1 : 0;
559 __success __flag(BPF_F_TEST_STATE_FREQ)
560 int loop_inside_iter(const void *ctx)
562 struct bpf_iter_num it;
566 bpf_iter_num_new(&it, 0, ARR2_SZ);
567 while ((v = bpf_iter_num_next(&it))) {
571 bpf_iter_num_destroy(&it);
576 __success __flag(BPF_F_TEST_STATE_FREQ)
577 int loop_inside_iter_signed(const void *ctx)
579 struct bpf_iter_num it;
583 bpf_iter_num_new(&it, 0, ARR2_SZ);
584 while ((v = bpf_iter_num_next(&it))) {
585 if (i < ARR2_SZ && i >= 0)
588 bpf_iter_num_destroy(&it);
592 volatile const int limit = ARR2_SZ;
595 __success __flag(BPF_F_TEST_STATE_FREQ)
596 int loop_inside_iter_volatile_limit(const void *ctx)
598 struct bpf_iter_num it;
602 bpf_iter_num_new(&it, 0, ARR2_SZ);
603 while ((v = bpf_iter_num_next(&it))) {
607 bpf_iter_num_destroy(&it);
611 #define ARR_LONG_SZ 1000
613 SEC(".data.arr_long")
614 long arr_long[ARR_LONG_SZ];
618 int test1(const void *ctx)
622 for (i = 0; i < ARR_LONG_SZ && can_loop; i++)
629 int test2(const void *ctx)
633 for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
644 } arr_foo[ARR_LONG_SZ];
648 int test3(const void *ctx)
652 for (i = zero; i < ARR_LONG_SZ && can_loop; i++) {
662 int test4(const void *ctx)
666 for (i = zero + ARR_LONG_SZ - 1; i < ARR_LONG_SZ && i >= 0 && can_loop; i--) {
674 char buf[10] SEC(".data.buf");
677 __description("check add const")
679 __naked void check_add_const(void)
681 /* typical LLVM generated loop with may_goto */
683 call %[bpf_ktime_get_ns]; \
684 if r0 > 9 goto l1_%=; \
685 l0_%=: r1 = %[buf]; \
688 r3 = *(u8 *)(r1 +0); \
689 .byte 0xe5; /* may_goto */ \
690 .byte 0; /* regs */ \
691 .short 4; /* off of l1_%=: */ \
695 if r2 < 9 goto l0_%=; \
700 : __imm(bpf_ktime_get_ns),
707 __msg("*(u8 *)(r7 +0) = r0")
708 __msg("invalid access to map value, value_size=10 off=10 size=1")
709 __naked void check_add_const_3regs(void)
714 "call %[bpf_ktime_get_ns];"
715 "r1 = r0;" /* link r0.id == r1.id == r2.id */
717 "r1 += 1;" /* r1 == r0+1 */
718 "r2 += 2;" /* r2 == r0+2 */
719 "if r0 > 8 goto 1f;" /* r0 range [0, 8] */
720 "r6 += r1;" /* r1 range [1, 9] */
721 "r7 += r2;" /* r2 range [2, 10] */
722 "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
723 "*(u8 *)(r7 +0) = r0;" /* unsafe, out of bounds */
726 : __imm(bpf_ktime_get_ns),
733 __msg("*(u8 *)(r8 -1) = r0")
734 __msg("invalid access to map value, value_size=10 off=10 size=1")
735 __naked void check_add_const_3regs_2if(void)
741 "call %[bpf_ktime_get_ns];"
743 "r1 = r0;" /* link r0.id == r1.id == r2.id */
745 "r1 += 1;" /* r1 == r0+1 */
746 "r2 += 2;" /* r2 == r0+2 */
747 "if r2 > 11 goto 1f;" /* r2 range [0, 11] -> r0 range [-2, 9]; r1 range [-1, 10] */
748 "if r0 s< 0 goto 1f;" /* r0 range [0, 9] -> r1 range [1, 10]; r2 range [2, 11]; */
749 "r6 += r0;" /* r0 range [0, 9] */
750 "r7 += r1;" /* r1 range [1, 10] */
751 "r8 += r2;" /* r2 range [2, 11] */
752 "*(u8 *)(r6 +0) = r0;" /* safe, within bounds */
753 "*(u8 *)(r7 -1) = r0;" /* safe */
754 "*(u8 *)(r8 -1) = r0;" /* unsafe */
757 : __imm(bpf_ktime_get_ns),
764 __flag(BPF_F_TEST_STATE_FREQ)
765 __naked void check_add_const_regsafe_off(void)
769 "call %[bpf_ktime_get_ns];"
771 "call %[bpf_ktime_get_ns];"
773 "call %[bpf_ktime_get_ns];"
774 "r1 = r0;" /* same ids for r1 and r0 */
775 "if r6 > r7 goto 1f;" /* this jump can't be predicted */
776 "r1 += 1;" /* r1.off == +1 */
778 "1: r1 += 100;" /* r1.off == +100 */
779 "goto +0;" /* verify r1.off in regsafe() after this insn */
780 "2: if r0 > 8 goto 3f;" /* r0 range [0,8], r1 range either [1,9] or [100,108]*/
782 "*(u8 *)(r8 +0) = r0;" /* potentially unsafe, buf size is 10 */
785 : __imm(bpf_ktime_get_ns),
790 char _license[] SEC("license") = "GPL";