1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
6 #include <bpf/bpf_helpers.h>
8 #include "bpf_compiler.h"
10 static volatile int zero = 0;
14 int small_arr[16] SEC(".data.small_arr");
17 __uint(type, BPF_MAP_TYPE_HASH);
18 __uint(max_entries, 10);
24 #define MY_PID_GUARD() if (my_pid != (bpf_get_current_pid_tgid() >> 32)) return 0
26 #define MY_PID_GUARD() ({ })
30 __failure __msg("math between map_value pointer and register with unbounded min value is not allowed")
31 int iter_err_unsafe_c_loop(const void *ctx)
33 struct bpf_iter_num it;
34 int *v, i = zero; /* obscure initial value of i */
38 bpf_iter_num_new(&it, 0, 1000);
39 while ((v = bpf_iter_num_next(&it))) {
42 bpf_iter_num_destroy(&it);
44 small_arr[i] = 123; /* invalid */
50 __failure __msg("unbounded memory access")
51 int iter_err_unsafe_asm_loop(const void *ctx)
53 struct bpf_iter_num it;
58 "r6 = %[zero];" /* iteration counter */
59 "r1 = %[it];" /* iterator state */
63 "call %[bpf_iter_num_new];"
66 "call %[bpf_iter_num_next];"
67 "if r0 == 0 goto out;"
72 "call %[bpf_iter_num_destroy];"
77 "*(u32 *)(r1 + 0) = r6;" /* invalid */
80 [small_arr]"r"(small_arr),
82 __imm(bpf_iter_num_new),
83 __imm(bpf_iter_num_next),
84 __imm(bpf_iter_num_destroy)
85 : __clobber_common, "r6"
93 int iter_while_loop(const void *ctx)
95 struct bpf_iter_num it;
100 bpf_iter_num_new(&it, 0, 3);
101 while ((v = bpf_iter_num_next(&it))) {
102 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
104 bpf_iter_num_destroy(&it);
111 int iter_while_loop_auto_cleanup(const void *ctx)
113 __attribute__((cleanup(bpf_iter_num_destroy))) struct bpf_iter_num it;
118 bpf_iter_num_new(&it, 0, 3);
119 while ((v = bpf_iter_num_next(&it))) {
120 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
122 /* (!) no explicit bpf_iter_num_destroy() */
129 int iter_for_loop(const void *ctx)
131 struct bpf_iter_num it;
136 bpf_iter_num_new(&it, 5, 10);
137 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
138 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
140 bpf_iter_num_destroy(&it);
147 int iter_bpf_for_each_macro(const void *ctx)
153 bpf_for_each(num, v, 5, 10) {
154 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
162 int iter_bpf_for_macro(const void *ctx)
169 bpf_printk("ITER_BASIC: E2 VAL: v=%d", i);
177 int iter_pragma_unroll_loop(const void *ctx)
179 struct bpf_iter_num it;
184 bpf_iter_num_new(&it, 0, 2);
185 __pragma_loop_no_unroll
186 for (i = 0; i < 3; i++) {
187 v = bpf_iter_num_next(&it);
188 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
190 bpf_iter_num_destroy(&it);
197 int iter_manual_unroll_loop(const void *ctx)
199 struct bpf_iter_num it;
204 bpf_iter_num_new(&it, 100, 200);
205 v = bpf_iter_num_next(&it);
206 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
207 v = bpf_iter_num_next(&it);
208 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
209 v = bpf_iter_num_next(&it);
210 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
211 v = bpf_iter_num_next(&it);
212 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
213 bpf_iter_num_destroy(&it);
220 int iter_multiple_sequential_loops(const void *ctx)
222 struct bpf_iter_num it;
227 bpf_iter_num_new(&it, 0, 3);
228 while ((v = bpf_iter_num_next(&it))) {
229 bpf_printk("ITER_BASIC: E1 VAL: v=%d", *v);
231 bpf_iter_num_destroy(&it);
233 bpf_iter_num_new(&it, 5, 10);
234 for (v = bpf_iter_num_next(&it); v; v = bpf_iter_num_next(&it)) {
235 bpf_printk("ITER_BASIC: E2 VAL: v=%d", *v);
237 bpf_iter_num_destroy(&it);
239 bpf_iter_num_new(&it, 0, 2);
240 __pragma_loop_no_unroll
241 for (i = 0; i < 3; i++) {
242 v = bpf_iter_num_next(&it);
243 bpf_printk("ITER_BASIC: E3 VAL: i=%d v=%d", i, v ? *v : -1);
245 bpf_iter_num_destroy(&it);
247 bpf_iter_num_new(&it, 100, 200);
248 v = bpf_iter_num_next(&it);
249 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
250 v = bpf_iter_num_next(&it);
251 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
252 v = bpf_iter_num_next(&it);
253 bpf_printk("ITER_BASIC: E4 VAL: v=%d", v ? *v : -1);
254 v = bpf_iter_num_next(&it);
255 bpf_printk("ITER_BASIC: E4 VAL: v=%d\n", v ? *v : -1);
256 bpf_iter_num_destroy(&it);
263 int iter_limit_cond_break_loop(const void *ctx)
265 struct bpf_iter_num it;
266 int *v, i = 0, sum = 0;
270 bpf_iter_num_new(&it, 0, 10);
271 while ((v = bpf_iter_num_next(&it))) {
272 bpf_printk("ITER_SIMPLE: i=%d v=%d", i, *v);
279 bpf_iter_num_destroy(&it);
281 bpf_printk("ITER_SIMPLE: sum=%d\n", sum);
288 int iter_obfuscate_counter(const void *ctx)
290 struct bpf_iter_num it;
292 /* Make i's initial value unknowable for verifier to prevent it from
293 * pruning if/else branch inside the loop body and marking i as precise.
299 bpf_iter_num_new(&it, 0, 10);
300 while ((v = bpf_iter_num_next(&it))) {
305 /* If we initialized i as `int i = 0;` above, verifier would
306 * track that i becomes 1 on first iteration after increment
307 * above, and here verifier would eagerly prune else branch
308 * and mark i as precise, ruining open-coded iterator logic
309 * completely, as each next iteration would have a different
310 * *precise* value of i, and thus there would be no
311 * convergence of state. This would result in reaching maximum
312 * instruction limit, no matter what the limit is.
319 bpf_printk("ITER_OBFUSCATE_COUNTER: i=%d v=%d x=%d", i, *v, x);
323 bpf_iter_num_destroy(&it);
325 bpf_printk("ITER_OBFUSCATE_COUNTER: sum=%d\n", sum);
332 int iter_search_loop(const void *ctx)
334 struct bpf_iter_num it;
335 int *v, *elem = NULL;
340 bpf_iter_num_new(&it, 0, 10);
342 while ((v = bpf_iter_num_next(&it))) {
343 bpf_printk("ITER_SEARCH_LOOP: v=%d", *v);
352 /* should fail to verify if bpf_iter_num_destroy() is here */
355 /* here found element will be wrong, we should have copied
356 * value to a variable, but here we want to make sure we can
357 * access memory after the loop anyways
359 bpf_printk("ITER_SEARCH_LOOP: FOUND IT = %d!\n", *elem);
361 bpf_printk("ITER_SEARCH_LOOP: NOT FOUND IT!\n");
363 bpf_iter_num_destroy(&it);
370 int iter_array_fill(const void *ctx)
376 bpf_for(i, 0, ARRAY_SIZE(arr)) {
381 bpf_for(i, 0, ARRAY_SIZE(arr)) {
385 bpf_printk("ITER_ARRAY_FILL: sum=%d (should be %d)\n", sum, 255 * 256);
390 static int arr2d[4][5];
391 static int arr2d_row_sums[4];
392 static int arr2d_col_sums[5];
396 int iter_nested_iters(const void *ctx)
402 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
403 bpf_for( col, 0, ARRAY_SIZE(arr2d[0])) {
404 arr2d[row][col] = row * col;
408 /* zero-initialize sums */
410 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
411 arr2d_row_sums[row] = 0;
413 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
414 arr2d_col_sums[col] = 0;
418 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
419 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
420 sum += arr2d[row][col];
421 arr2d_row_sums[row] += arr2d[row][col];
422 arr2d_col_sums[col] += arr2d[row][col];
426 bpf_printk("ITER_NESTED_ITERS: total sum=%d", sum);
427 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
428 bpf_printk("ITER_NESTED_ITERS: row #%d sum=%d", row, arr2d_row_sums[row]);
430 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
431 bpf_printk("ITER_NESTED_ITERS: col #%d sum=%d%s",
432 col, arr2d_col_sums[col],
433 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
441 int iter_nested_deeply_iters(const void *ctx)
457 /* validate that we can break from inside bpf_repeat() */
464 static __noinline void fill_inner_dimension(int row)
468 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
469 arr2d[row][col] = row * col;
473 static __noinline int sum_inner_dimension(int row)
477 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
478 sum += arr2d[row][col];
479 arr2d_row_sums[row] += arr2d[row][col];
480 arr2d_col_sums[col] += arr2d[row][col];
488 int iter_subprog_iters(const void *ctx)
494 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
495 fill_inner_dimension(row);
498 /* zero-initialize sums */
500 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
501 arr2d_row_sums[row] = 0;
503 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
504 arr2d_col_sums[col] = 0;
508 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
509 sum += sum_inner_dimension(row);
512 bpf_printk("ITER_SUBPROG_ITERS: total sum=%d", sum);
513 bpf_for(row, 0, ARRAY_SIZE(arr2d)) {
514 bpf_printk("ITER_SUBPROG_ITERS: row #%d sum=%d",
515 row, arr2d_row_sums[row]);
517 bpf_for(col, 0, ARRAY_SIZE(arr2d[0])) {
518 bpf_printk("ITER_SUBPROG_ITERS: col #%d sum=%d%s",
519 col, arr2d_col_sums[col],
520 col == ARRAY_SIZE(arr2d[0]) - 1 ? "\n" : "");
527 __uint(type, BPF_MAP_TYPE_HASH);
530 __uint(max_entries, 1000);
531 } hash_map SEC(".maps");
534 __failure __msg("invalid mem access 'scalar'")
535 int iter_err_too_permissive1(const void *ctx)
542 map_val = bpf_map_lookup_elem(&hash_map, &key);
546 bpf_repeat(1000000) {
556 __failure __msg("invalid mem access 'map_value_or_null'")
557 int iter_err_too_permissive2(const void *ctx)
564 map_val = bpf_map_lookup_elem(&hash_map, &key);
568 bpf_repeat(1000000) {
569 map_val = bpf_map_lookup_elem(&hash_map, &key);
578 __failure __msg("invalid mem access 'map_value_or_null'")
579 int iter_err_too_permissive3(const void *ctx)
587 bpf_repeat(1000000) {
588 map_val = bpf_map_lookup_elem(&hash_map, &key);
600 int iter_tricky_but_fine(const void *ctx)
608 bpf_repeat(1000000) {
609 map_val = bpf_map_lookup_elem(&hash_map, &key);
622 #define __bpf_memzero(p, sz) bpf_probe_read_kernel((p), (sz), 0)
626 int iter_stack_array_loop(const void *ctx)
628 long arr1[16], arr2[16], sum = 0;
633 /* zero-init arr1 and arr2 in such a way that verifier doesn't know
634 * it's all zeros; if we don't do that, we'll make BPF verifier track
635 * all combination of zero/non-zero stack slots for arr1/arr2, which
636 * will lead to O(2^(ARRAY_SIZE(arr1)+ARRAY_SIZE(arr2))) different
639 __bpf_memzero(arr1, sizeof(arr1));
640 __bpf_memzero(arr2, sizeof(arr1));
642 /* validate that we can break and continue when using bpf_for() */
643 bpf_for(i, 0, ARRAY_SIZE(arr1)) {
653 bpf_for(i, 0, ARRAY_SIZE(arr1)) {
654 sum += arr1[i] + arr2[i];
660 static __noinline void fill(struct bpf_iter_num *it, int *arr, __u32 n, int mul)
664 while ((t = bpf_iter_num_next(it))) {
672 static __noinline int sum(struct bpf_iter_num *it, int *arr, __u32 n)
676 while ((t = bpf_iter_num_next(it))) {
688 int iter_pass_iter_ptr_to_subprog(const void *ctx)
690 int arr1[16], arr2[32];
691 struct bpf_iter_num it;
697 n = ARRAY_SIZE(arr1);
698 bpf_iter_num_new(&it, 0, n);
699 fill(&it, arr1, n, 2);
700 bpf_iter_num_destroy(&it);
703 n = ARRAY_SIZE(arr2);
704 bpf_iter_num_new(&it, 0, n);
705 fill(&it, arr2, n, 10);
706 bpf_iter_num_destroy(&it);
709 n = ARRAY_SIZE(arr1);
710 bpf_iter_num_new(&it, 0, n);
711 sum1 = sum(&it, arr1, n);
712 bpf_iter_num_destroy(&it);
715 n = ARRAY_SIZE(arr2);
716 bpf_iter_num_new(&it, 0, n);
717 sum2 = sum(&it, arr2, n);
718 bpf_iter_num_destroy(&it);
720 bpf_printk("sum1=%d, sum2=%d", sum1, sum2);
727 __msg("R1 type=scalar expected=fp")
728 __naked int delayed_read_mark(void)
730 /* This is equivalent to C program below.
731 * The call to bpf_iter_num_next() is reachable with r7 values &fp[-16] and 0xdead.
732 * State with r7=&fp[-16] is visited first and follows r6 != 42 ... continue branch.
733 * At this point iterator next() call is reached with r7 that has no read mark.
734 * Loop body with r7=0xdead would only be visited if verifier would decide to continue
735 * with second loop iteration. Absence of read mark on r7 might affect state
736 * equivalent logic used for iterator convergence tracking.
740 * r6 = bpf_get_prandom_u32()
741 * bpf_iter_num_new(&fp[-8], 0, 10)
742 * while (bpf_iter_num_next(&fp[-8])) {
748 * bpf_probe_read_user(r7, 8, 0xdeadbeef); // this is not safe
750 * bpf_iter_num_destroy(&fp[-8])
757 "*(u64 *)(r7 + 0) = r0;"
758 "call %[bpf_get_prandom_u32];"
764 "call %[bpf_iter_num_new];"
768 "call %[bpf_iter_num_next];"
769 "if r0 == 0 goto 2f;"
771 "if r6 != 42 goto 3f;"
778 "call %[bpf_probe_read_user];"
783 "call %[bpf_iter_num_destroy];"
787 : __imm(bpf_get_prandom_u32),
788 __imm(bpf_iter_num_new),
789 __imm(bpf_iter_num_next),
790 __imm(bpf_iter_num_destroy),
791 __imm(bpf_probe_read_user)
798 __msg("math between fp pointer and register with unbounded")
799 __naked int delayed_precision_mark(void)
801 /* This is equivalent to C program below.
802 * The test is similar to delayed_iter_mark but verifies that incomplete
803 * precision don't fool verifier.
804 * The call to bpf_iter_num_next() is reachable with r7 values -16 and -32.
805 * State with r7=-16 is visited first and follows r6 != 42 ... continue branch.
806 * At this point iterator next() call is reached with r7 that has no read
807 * and precision marks.
808 * Loop body with r7=-32 would only be visited if verifier would decide to continue
809 * with second loop iteration. Absence of precision mark on r7 might affect state
810 * equivalent logic used for iterator convergence tracking.
815 * r6 = bpf_get_prandom_u32()
816 * bpf_iter_num_new(&fp[-8], 0, 10)
817 * while (bpf_iter_num_next(&fp[-8])) {
820 * r6 = bpf_get_prandom_u32()
825 * r8 = *(u64 *)(r0 + 0) // this is not safe
826 * r6 = bpf_get_prandom_u32()
828 * bpf_iter_num_destroy(&fp[-8])
833 "*(u64 *)(r10 - 16) = r8;"
835 "call %[bpf_get_prandom_u32];"
841 "call %[bpf_iter_num_new];"
845 "call %[bpf_iter_num_next];"
846 "if r0 == 0 goto 2f;"
847 "if r6 != 42 goto 3f;"
849 "call %[bpf_get_prandom_u32];"
855 "r8 = *(u64 *)(r0 + 0);"
856 "call %[bpf_get_prandom_u32];"
862 "call %[bpf_iter_num_destroy];"
866 : __imm(bpf_get_prandom_u32),
867 __imm(bpf_iter_num_new),
868 __imm(bpf_iter_num_next),
869 __imm(bpf_iter_num_destroy),
870 __imm(bpf_probe_read_user)
877 __msg("math between fp pointer and register with unbounded")
878 __flag(BPF_F_TEST_STATE_FREQ)
879 __naked int loop_state_deps1(void)
881 /* This is equivalent to C program below.
883 * The case turns out to be tricky in a sense that:
884 * - states with c=-25 are explored only on a second iteration
886 * - states with read+precise mark on c are explored only on
887 * second iteration of the inner loop and in a state which
888 * is pushed to states stack first.
890 * Depending on the details of iterator convergence logic
891 * verifier might stop states traversal too early and miss
892 * unsafe c=-25 memory access.
894 * j = iter_new(); // fp[-16]
898 * while (iter_next(j)) {
899 * i = iter_new(); // fp[-8]
902 * while (iter_next(i)) {
906 * } else if (a == 0) {
908 * if (random() == 42)
911 * *(r10 + c) = 7; // this is not safe
931 "call %[bpf_iter_num_new];"
938 "call %[bpf_iter_num_next];"
939 "if r0 == 0 goto j_loop_end_%=;"
944 "call %[bpf_iter_num_new];"
950 "call %[bpf_iter_num_next];"
951 "if r0 == 0 goto i_loop_end_%=;"
953 "if r6 != 1 goto check_zero_r6_%=;"
958 "if r6 != 0 goto i_loop_%=;"
960 "call %[bpf_get_prandom_u32];"
961 "if r0 != 42 goto check_one_r7_%=;"
964 "if r7 != 1 goto i_loop_%=;"
968 "*(u64 *)(r0 + 0) = r1;"
971 "call %[bpf_iter_num_destroy];"
974 "call %[bpf_iter_num_destroy];"
980 "call %[bpf_iter_num_destroy];"
988 "call %[bpf_iter_num_destroy];"
992 : __imm(bpf_get_prandom_u32),
993 __imm(bpf_iter_num_new),
994 __imm(bpf_iter_num_next),
995 __imm(bpf_iter_num_destroy)
1002 __msg("math between fp pointer and register with unbounded")
1003 __flag(BPF_F_TEST_STATE_FREQ)
1004 __naked int loop_state_deps2(void)
1006 /* This is equivalent to C program below.
1008 * The case turns out to be tricky in a sense that:
1009 * - states with read+precise mark on c are explored only on a second
1010 * iteration of the first inner loop and in a state which is pushed to
1011 * states stack first.
1012 * - states with c=-25 are explored only on a second iteration of the
1013 * second inner loop and in a state which is pushed to states stack
1016 * Depending on the details of iterator convergence logic
1017 * verifier might stop states traversal too early and miss
1018 * unsafe c=-25 memory access.
1020 * j = iter_new(); // fp[-16]
1024 * while (iter_next(j)) {
1025 * i = iter_new(); // fp[-8]
1028 * while (iter_next(i)) {
1032 * } else if (a == 0) {
1034 * if (random() == 42)
1037 * *(r10 + c) = 7; // this is not safe
1045 * i = iter_new(); // fp[-8]
1048 * while (iter_next(i)) {
1052 * } else if (a == 0) {
1054 * if (random() == 42)
1072 "call %[bpf_iter_num_new];"
1079 "call %[bpf_iter_num_next];"
1080 "if r0 == 0 goto j_loop_end_%=;"
1082 /* first inner loop */
1087 "call %[bpf_iter_num_new];"
1093 "call %[bpf_iter_num_next];"
1094 "if r0 == 0 goto i_loop_end_%=;"
1096 "if r6 != 1 goto check_zero_r6_%=;"
1101 "if r6 != 0 goto i_loop_%=;"
1103 "call %[bpf_get_prandom_u32];"
1104 "if r0 != 42 goto check_one_r7_%=;"
1107 "if r7 != 1 goto i_loop_%=;"
1111 "*(u64 *)(r0 + 0) = r1;"
1114 "call %[bpf_iter_num_destroy];"
1117 "call %[bpf_iter_num_destroy];"
1123 "call %[bpf_iter_num_destroy];"
1125 /* second inner loop */
1130 "call %[bpf_iter_num_new];"
1136 "call %[bpf_iter_num_next];"
1137 "if r0 == 0 goto i2_loop_end_%=;"
1139 "if r6 != 1 goto check2_zero_r6_%=;"
1143 "check2_zero_r6_%=:"
1144 "if r6 != 0 goto i2_loop_%=;"
1146 "call %[bpf_get_prandom_u32];"
1147 "if r0 != 42 goto check2_one_r7_%=;"
1150 "if r7 != 1 goto i2_loop_%=;"
1157 "call %[bpf_iter_num_destroy];"
1165 "call %[bpf_iter_num_destroy];"
1169 : __imm(bpf_get_prandom_u32),
1170 __imm(bpf_iter_num_new),
1171 __imm(bpf_iter_num_next),
1172 __imm(bpf_iter_num_destroy)
1179 __naked int triple_continue(void)
1181 /* This is equivalent to C program below.
1182 * High branching factor of the loop body turned out to be
1183 * problematic for one of the iterator convergence tracking
1184 * algorithms explored.
1186 * r6 = bpf_get_prandom_u32()
1187 * bpf_iter_num_new(&fp[-8], 0, 10)
1188 * while (bpf_iter_num_next(&fp[-8])) {
1189 * if (bpf_get_prandom_u32() != 42)
1191 * if (bpf_get_prandom_u32() != 42)
1193 * if (bpf_get_prandom_u32() != 42)
1197 * bpf_iter_num_destroy(&fp[-8])
1205 "call %[bpf_iter_num_new];"
1209 "call %[bpf_iter_num_next];"
1210 "if r0 == 0 goto loop_end_%=;"
1211 "call %[bpf_get_prandom_u32];"
1212 "if r0 != 42 goto loop_%=;"
1213 "call %[bpf_get_prandom_u32];"
1214 "if r0 != 42 goto loop_%=;"
1215 "call %[bpf_get_prandom_u32];"
1216 "if r0 != 42 goto loop_%=;"
1222 "call %[bpf_iter_num_destroy];"
1226 : __imm(bpf_get_prandom_u32),
1227 __imm(bpf_iter_num_new),
1228 __imm(bpf_iter_num_next),
1229 __imm(bpf_iter_num_destroy)
1236 __naked int widen_spill(void)
1238 /* This is equivalent to C program below.
1239 * The counter is stored in fp[-16], if this counter is not widened
1240 * verifier states representing loop iterations would never converge.
1243 * bpf_iter_num_new(&fp[-8], 0, 10)
1244 * while (bpf_iter_num_next(&fp[-8])) {
1249 * bpf_iter_num_destroy(&fp[-8])
1254 "*(u64 *)(r10 - 16) = r0;"
1259 "call %[bpf_iter_num_new];"
1263 "call %[bpf_iter_num_next];"
1264 "if r0 == 0 goto loop_end_%=;"
1265 "r0 = *(u64 *)(r10 - 16);"
1267 "*(u64 *)(r10 - 16) = r0;"
1272 "call %[bpf_iter_num_destroy];"
1276 : __imm(bpf_iter_num_new),
1277 __imm(bpf_iter_num_next),
1278 __imm(bpf_iter_num_destroy)
1285 __naked int checkpoint_states_deletion(void)
1287 /* This is equivalent to C program below.
1289 * int *a, *b, *c, *d, *e, *f;
1291 * bpf_for(i, 0, 10) {
1292 * a = bpf_map_lookup_elem(&amap, &i);
1293 * b = bpf_map_lookup_elem(&amap, &i);
1294 * c = bpf_map_lookup_elem(&amap, &i);
1295 * d = bpf_map_lookup_elem(&amap, &i);
1296 * e = bpf_map_lookup_elem(&amap, &i);
1297 * f = bpf_map_lookup_elem(&amap, &i);
1307 * The body of the loop spawns multiple simulation paths
1308 * with different combination of NULL/non-NULL information for a/b/c/d/e/f.
1309 * Each combination is unique from states_equal() point of view.
1310 * Explored states checkpoint is created after each iterator next call.
1311 * Iterator convergence logic expects that eventually current state
1312 * would get equal to one of the explored states and thus loop
1313 * exploration would be finished (at-least for a specific path).
1314 * Verifier evicts explored states with high miss to hit ratio
1315 * to to avoid comparing current state with too many explored
1316 * states per instruction.
1317 * This test is designed to "stress test" eviction policy defined using formula:
1319 * sl->miss_cnt > sl->hit_cnt * N + N // if true sl->state is evicted
1321 * Currently N is set to 64, which allows for 6 variables in this test.
1327 "*(u64 *)(r10 - 24) = r6;" /* d */
1328 "*(u64 *)(r10 - 32) = r6;" /* e */
1329 "*(u64 *)(r10 - 40) = r6;" /* f */
1335 "call %[bpf_iter_num_new];"
1339 "call %[bpf_iter_num_next];"
1340 "if r0 == 0 goto loop_end_%=;"
1342 "*(u64 *)(r10 - 16) = r0;"
1347 "call %[bpf_map_lookup_elem];"
1353 "call %[bpf_map_lookup_elem];"
1359 "call %[bpf_map_lookup_elem];"
1365 "call %[bpf_map_lookup_elem];"
1366 "*(u64 *)(r10 - 24) = r0;"
1371 "call %[bpf_map_lookup_elem];"
1372 "*(u64 *)(r10 - 32) = r0;"
1377 "call %[bpf_map_lookup_elem];"
1378 "*(u64 *)(r10 - 40) = r0;"
1380 "if r6 == 0 goto +1;"
1382 "if r7 == 0 goto +1;"
1384 "if r8 == 0 goto +1;"
1386 "r0 = *(u64 *)(r10 - 24);"
1387 "if r0 == 0 goto +1;"
1389 "r0 = *(u64 *)(r10 - 32);"
1390 "if r0 == 0 goto +1;"
1392 "r0 = *(u64 *)(r10 - 40);"
1393 "if r0 == 0 goto +1;"
1400 "call %[bpf_iter_num_destroy];"
1404 : __imm(bpf_map_lookup_elem),
1405 __imm(bpf_iter_num_new),
1406 __imm(bpf_iter_num_next),
1407 __imm(bpf_iter_num_destroy),
1420 int iter_arr_with_actual_elem_count(const void *ctx)
1422 int i, n = loop_data.n, sum = 0;
1424 if (n > ARRAY_SIZE(loop_data.data))
1428 /* no rechecking of i against ARRAY_SIZE(loop_data.n) */
1429 sum += loop_data.data[i];
1435 __u32 upper, select_n, result;
1438 static __noinline bool nest_2(char *str)
1440 /* some insns (including branch insns) to ensure stacksafe() is triggered
1441 * in nest_2(). This way, stacksafe() can compare frame associated with nest_1().
1454 static __noinline bool nest_1(int n)
1456 /* case 0: allocate stack, case 1: no allocate stack */
1461 if (bpf_get_current_comm(comm, 16))
1463 return nest_2(comm);
1466 return nest_2((char *)&global);
1474 int iter_subprog_check_stacksafe(const void *ctx)
1478 bpf_for(i, 0, upper) {
1479 if (!nest_1(select_n)) {
1489 struct bpf_iter_num global_it;
1492 __failure __msg("arg#0 expected pointer to an iterator on stack")
1493 int iter_new_bad_arg(const void *ctx)
1495 bpf_iter_num_new(&global_it, 0, 1);
1500 __failure __msg("arg#0 expected pointer to an iterator on stack")
1501 int iter_next_bad_arg(const void *ctx)
1503 bpf_iter_num_next(&global_it);
1508 __failure __msg("arg#0 expected pointer to an iterator on stack")
1509 int iter_destroy_bad_arg(const void *ctx)
1511 bpf_iter_num_destroy(&global_it);
1515 char _license[] SEC("license") = "GPL";