1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/array_access.c */
5 #include <bpf/bpf_helpers.h>
16 __uint(type, BPF_MAP_TYPE_ARRAY);
17 __uint(max_entries, 1);
19 __type(value, struct test_val);
20 __uint(map_flags, BPF_F_RDONLY_PROG);
21 } map_array_ro SEC(".maps");
24 __uint(type, BPF_MAP_TYPE_ARRAY);
25 __uint(max_entries, 1);
27 __type(value, struct test_val);
28 __uint(map_flags, BPF_F_WRONLY_PROG);
29 } map_array_wo SEC(".maps");
32 __uint(type, BPF_MAP_TYPE_PERCPU_ARRAY);
33 __uint(max_entries, 2);
35 __type(value, struct test_val);
36 } map_array_pcpu SEC(".maps");
39 __uint(type, BPF_MAP_TYPE_ARRAY);
40 __uint(max_entries, 2);
42 __type(value, struct test_val);
43 } map_array SEC(".maps");
46 __uint(type, BPF_MAP_TYPE_HASH);
47 __uint(max_entries, 1);
48 __type(key, long long);
49 __type(value, struct test_val);
50 } map_hash_48b SEC(".maps");
53 __description("valid map access into an array with a constant")
54 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
56 __naked void an_array_with_a_constant_1(void)
60 *(u64*)(r10 - 8) = r1; \
63 r1 = %[map_hash_48b] ll; \
64 call %[bpf_map_lookup_elem]; \
65 if r0 == 0 goto l0_%=; \
66 r1 = %[test_val_foo]; \
67 *(u64*)(r0 + 0) = r1; \
70 : __imm(bpf_map_lookup_elem),
71 __imm_addr(map_hash_48b),
72 __imm_const(test_val_foo, offsetof(struct test_val, foo))
77 __description("valid map access into an array with a register")
78 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
79 __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
80 __naked void an_array_with_a_register_1(void)
84 *(u64*)(r10 - 8) = r1; \
87 r1 = %[map_hash_48b] ll; \
88 call %[bpf_map_lookup_elem]; \
89 if r0 == 0 goto l0_%=; \
93 r1 = %[test_val_foo]; \
94 *(u64*)(r0 + 0) = r1; \
97 : __imm(bpf_map_lookup_elem),
98 __imm_addr(map_hash_48b),
99 __imm_const(test_val_foo, offsetof(struct test_val, foo))
104 __description("valid map access into an array with a variable")
105 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
106 __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
107 __naked void an_array_with_a_variable_1(void)
111 *(u64*)(r10 - 8) = r1; \
114 r1 = %[map_hash_48b] ll; \
115 call %[bpf_map_lookup_elem]; \
116 if r0 == 0 goto l0_%=; \
117 r1 = *(u32*)(r0 + 0); \
118 if r1 >= %[max_entries] goto l0_%=; \
121 r1 = %[test_val_foo]; \
122 *(u64*)(r0 + 0) = r1; \
125 : __imm(bpf_map_lookup_elem),
126 __imm_addr(map_hash_48b),
127 __imm_const(max_entries, MAX_ENTRIES),
128 __imm_const(test_val_foo, offsetof(struct test_val, foo))
133 __description("valid map access into an array with a signed variable")
134 __success __failure_unpriv __msg_unpriv("R0 leaks addr")
135 __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
136 __naked void array_with_a_signed_variable(void)
140 *(u64*)(r10 - 8) = r1; \
143 r1 = %[map_hash_48b] ll; \
144 call %[bpf_map_lookup_elem]; \
145 if r0 == 0 goto l0_%=; \
146 r1 = *(u32*)(r0 + 0); \
147 if w1 s> 0xffffffff goto l1_%=; \
149 l1_%=: w2 = %[max_entries]; \
150 if r2 s> r1 goto l2_%=; \
154 r1 = %[test_val_foo]; \
155 *(u64*)(r0 + 0) = r1; \
158 : __imm(bpf_map_lookup_elem),
159 __imm_addr(map_hash_48b),
160 __imm_const(max_entries, MAX_ENTRIES),
161 __imm_const(test_val_foo, offsetof(struct test_val, foo))
166 __description("invalid map access into an array with a constant")
167 __failure __msg("invalid access to map value, value_size=48 off=48 size=8")
169 __naked void an_array_with_a_constant_2(void)
173 *(u64*)(r10 - 8) = r1; \
176 r1 = %[map_hash_48b] ll; \
177 call %[bpf_map_lookup_elem]; \
178 if r0 == 0 goto l0_%=; \
179 r1 = %[test_val_foo]; \
180 *(u64*)(r0 + %[__imm_0]) = r1; \
183 : __imm(bpf_map_lookup_elem),
184 __imm_addr(map_hash_48b),
185 __imm_const(__imm_0, (MAX_ENTRIES + 1) << 2),
186 __imm_const(test_val_foo, offsetof(struct test_val, foo))
191 __description("invalid map access into an array with a register")
192 __failure __msg("R0 min value is outside of the allowed memory range")
194 __flag(BPF_F_ANY_ALIGNMENT)
195 __naked void an_array_with_a_register_2(void)
199 *(u64*)(r10 - 8) = r1; \
202 r1 = %[map_hash_48b] ll; \
203 call %[bpf_map_lookup_elem]; \
204 if r0 == 0 goto l0_%=; \
208 r1 = %[test_val_foo]; \
209 *(u64*)(r0 + 0) = r1; \
212 : __imm(bpf_map_lookup_elem),
213 __imm_addr(map_hash_48b),
214 __imm_const(__imm_0, MAX_ENTRIES + 1),
215 __imm_const(test_val_foo, offsetof(struct test_val, foo))
220 __description("invalid map access into an array with a variable")
222 __msg("R0 unbounded memory access, make sure to bounds check any such access")
224 __flag(BPF_F_ANY_ALIGNMENT)
225 __naked void an_array_with_a_variable_2(void)
229 *(u64*)(r10 - 8) = r1; \
232 r1 = %[map_hash_48b] ll; \
233 call %[bpf_map_lookup_elem]; \
234 if r0 == 0 goto l0_%=; \
235 r1 = *(u32*)(r0 + 0); \
238 r1 = %[test_val_foo]; \
239 *(u64*)(r0 + 0) = r1; \
242 : __imm(bpf_map_lookup_elem),
243 __imm_addr(map_hash_48b),
244 __imm_const(test_val_foo, offsetof(struct test_val, foo))
249 __description("invalid map access into an array with no floor check")
250 __failure __msg("R0 unbounded memory access")
251 __failure_unpriv __msg_unpriv("R0 leaks addr")
252 __flag(BPF_F_ANY_ALIGNMENT)
253 __naked void array_with_no_floor_check(void)
257 *(u64*)(r10 - 8) = r1; \
260 r1 = %[map_hash_48b] ll; \
261 call %[bpf_map_lookup_elem]; \
262 if r0 == 0 goto l0_%=; \
263 r1 = *(u64*)(r0 + 0); \
264 w2 = %[max_entries]; \
265 if r2 s> r1 goto l1_%=; \
269 r1 = %[test_val_foo]; \
270 *(u64*)(r0 + 0) = r1; \
273 : __imm(bpf_map_lookup_elem),
274 __imm_addr(map_hash_48b),
275 __imm_const(max_entries, MAX_ENTRIES),
276 __imm_const(test_val_foo, offsetof(struct test_val, foo))
281 __description("invalid map access into an array with a invalid max check")
282 __failure __msg("invalid access to map value, value_size=48 off=44 size=8")
283 __failure_unpriv __msg_unpriv("R0 leaks addr")
284 __flag(BPF_F_ANY_ALIGNMENT)
285 __naked void with_a_invalid_max_check_1(void)
289 *(u64*)(r10 - 8) = r1; \
292 r1 = %[map_hash_48b] ll; \
293 call %[bpf_map_lookup_elem]; \
294 if r0 == 0 goto l0_%=; \
295 r1 = *(u32*)(r0 + 0); \
297 if r2 > r1 goto l1_%=; \
301 r1 = %[test_val_foo]; \
302 *(u64*)(r0 + 0) = r1; \
305 : __imm(bpf_map_lookup_elem),
306 __imm_addr(map_hash_48b),
307 __imm_const(__imm_0, MAX_ENTRIES + 1),
308 __imm_const(test_val_foo, offsetof(struct test_val, foo))
313 __description("invalid map access into an array with a invalid max check")
314 __failure __msg("R0 pointer += pointer")
316 __flag(BPF_F_ANY_ALIGNMENT)
317 __naked void with_a_invalid_max_check_2(void)
321 *(u64*)(r10 - 8) = r1; \
324 r1 = %[map_hash_48b] ll; \
325 call %[bpf_map_lookup_elem]; \
326 if r0 == 0 goto l0_%=; \
329 *(u64*)(r10 - 8) = r1; \
332 r1 = %[map_hash_48b] ll; \
333 call %[bpf_map_lookup_elem]; \
334 if r0 == 0 goto l0_%=; \
336 r0 = *(u32*)(r0 + %[test_val_foo]); \
339 : __imm(bpf_map_lookup_elem),
340 __imm_addr(map_hash_48b),
341 __imm_const(test_val_foo, offsetof(struct test_val, foo))
346 __description("valid read map access into a read-only array 1")
347 __success __success_unpriv __retval(28)
348 __naked void a_read_only_array_1_1(void)
352 *(u64*)(r10 - 8) = r1; \
355 r1 = %[map_array_ro] ll; \
356 call %[bpf_map_lookup_elem]; \
357 if r0 == 0 goto l0_%=; \
358 r0 = *(u32*)(r0 + 0); \
361 : __imm(bpf_map_lookup_elem),
362 __imm_addr(map_array_ro)
367 __description("valid read map access into a read-only array 2")
368 __success __retval(65507)
369 __naked void a_read_only_array_2_1(void)
373 *(u64*)(r10 - 8) = r1; \
376 r1 = %[map_array_ro] ll; \
377 call %[bpf_map_lookup_elem]; \
378 if r0 == 0 goto l0_%=; \
384 call %[bpf_csum_diff]; \
387 : __imm(bpf_csum_diff),
388 __imm(bpf_map_lookup_elem),
389 __imm_addr(map_array_ro)
394 __description("invalid write map access into a read-only array 1")
395 __failure __msg("write into map forbidden")
397 __naked void a_read_only_array_1_2(void)
401 *(u64*)(r10 - 8) = r1; \
404 r1 = %[map_array_ro] ll; \
405 call %[bpf_map_lookup_elem]; \
406 if r0 == 0 goto l0_%=; \
408 *(u64*)(r0 + 0) = r1; \
411 : __imm(bpf_map_lookup_elem),
412 __imm_addr(map_array_ro)
417 __description("invalid write map access into a read-only array 2")
418 __failure __msg("write into map forbidden")
419 __naked void a_read_only_array_2_2(void)
424 *(u64*)(r10 - 8) = r1; \
427 r1 = %[map_array_ro] ll; \
428 call %[bpf_map_lookup_elem]; \
429 if r0 == 0 goto l0_%=; \
434 call %[bpf_skb_load_bytes]; \
437 : __imm(bpf_map_lookup_elem),
438 __imm(bpf_skb_load_bytes),
439 __imm_addr(map_array_ro)
444 __description("valid write map access into a write-only array 1")
445 __success __success_unpriv __retval(1)
446 __naked void a_write_only_array_1_1(void)
450 *(u64*)(r10 - 8) = r1; \
453 r1 = %[map_array_wo] ll; \
454 call %[bpf_map_lookup_elem]; \
455 if r0 == 0 goto l0_%=; \
457 *(u64*)(r0 + 0) = r1; \
461 : __imm(bpf_map_lookup_elem),
462 __imm_addr(map_array_wo)
467 __description("valid write map access into a write-only array 2")
468 __success __retval(0)
469 __naked void a_write_only_array_2_1(void)
474 *(u64*)(r10 - 8) = r1; \
477 r1 = %[map_array_wo] ll; \
478 call %[bpf_map_lookup_elem]; \
479 if r0 == 0 goto l0_%=; \
484 call %[bpf_skb_load_bytes]; \
487 : __imm(bpf_map_lookup_elem),
488 __imm(bpf_skb_load_bytes),
489 __imm_addr(map_array_wo)
494 __description("invalid read map access into a write-only array 1")
495 __failure __msg("read from map forbidden")
497 __naked void a_write_only_array_1_2(void)
501 *(u64*)(r10 - 8) = r1; \
504 r1 = %[map_array_wo] ll; \
505 call %[bpf_map_lookup_elem]; \
506 if r0 == 0 goto l0_%=; \
507 r0 = *(u64*)(r0 + 0); \
510 : __imm(bpf_map_lookup_elem),
511 __imm_addr(map_array_wo)
516 __description("invalid read map access into a write-only array 2")
517 __failure __msg("read from map forbidden")
518 __naked void a_write_only_array_2_2(void)
522 *(u64*)(r10 - 8) = r1; \
525 r1 = %[map_array_wo] ll; \
526 call %[bpf_map_lookup_elem]; \
527 if r0 == 0 goto l0_%=; \
533 call %[bpf_csum_diff]; \
536 : __imm(bpf_csum_diff),
537 __imm(bpf_map_lookup_elem),
538 __imm_addr(map_array_wo)
543 __description("valid map access into an array using constant without nullness")
544 __success __retval(4) __log_level(2)
545 __msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
546 unsigned int an_array_with_a_constant_no_nullness(void)
548 /* Need 8-byte alignment for spill tracking */
549 __u32 __attribute__((aligned(8))) key = 1;
550 struct test_val *val;
552 val = bpf_map_lookup_elem(&map_array, &key);
553 val->index = offsetof(struct test_val, foo);
559 __description("valid multiple map access into an array using constant without nullness")
560 __success __retval(8) __log_level(2)
561 __msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -16) = {{(0|r[0-9])}}")
562 __msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
563 unsigned int multiple_array_with_a_constant_no_nullness(void)
565 __u32 __attribute__((aligned(8))) key = 1;
566 __u32 __attribute__((aligned(8))) key2 = 0;
567 struct test_val *val, *val2;
569 val = bpf_map_lookup_elem(&map_array, &key);
570 val->index = offsetof(struct test_val, foo);
572 val2 = bpf_map_lookup_elem(&map_array, &key2);
573 val2->index = offsetof(struct test_val, foo);
575 return val->index + val2->index;
579 __description("valid map access into an array using natural aligned 32-bit constant 0 without nullness")
580 __success __retval(4)
581 unsigned int an_array_with_a_32bit_constant_0_no_nullness(void)
583 /* Unlike the above tests, 32-bit zeroing is precisely tracked even
584 * if writes are not aligned to BPF_REG_SIZE. This tests that our
585 * STACK_ZERO handling functions.
587 struct test_val *val;
590 val = bpf_map_lookup_elem(&map_array, &key);
591 val->index = offsetof(struct test_val, foo);
597 __description("valid map access into a pcpu array using constant without nullness")
598 __success __retval(4) __log_level(2)
599 __msg("mark_precise: frame0: regs= stack=-8 before {{[0-9]}}: ({{[a-f0-9]+}}) *(u32 *)(r10 -8) = {{(1|r[0-9])}}")
600 unsigned int a_pcpu_array_with_a_constant_no_nullness(void)
602 __u32 __attribute__((aligned(8))) key = 1;
603 struct test_val *val;
605 val = bpf_map_lookup_elem(&map_array_pcpu, &key);
606 val->index = offsetof(struct test_val, foo);
612 __description("invalid map access into an array using constant without nullness")
613 __failure __msg("R0 invalid mem access 'map_value_or_null'")
614 unsigned int an_array_with_a_constant_no_nullness_out_of_bounds(void)
617 __u32 __attribute__((aligned(8))) key = 3;
618 struct test_val *val;
620 val = bpf_map_lookup_elem(&map_array, &key);
621 val->index = offsetof(struct test_val, foo);
627 __description("invalid map access into an array using constant smaller than key_size")
628 __failure __msg("R0 invalid mem access 'map_value_or_null'")
629 unsigned int an_array_with_a_constant_too_small(void)
631 __u32 __attribute__((aligned(8))) key;
632 struct test_val *val;
634 /* Mark entire key as STACK_MISC */
635 bpf_probe_read_user(&key, sizeof(key), NULL);
637 /* Spilling only the bottom byte results in a tnum const of 1.
638 * We want to check that the verifier rejects it, as the spill is < 4B.
641 val = bpf_map_lookup_elem(&map_array, &key);
643 /* Should fail, as verifier cannot prove in-bound lookup */
644 val->index = offsetof(struct test_val, foo);
650 __description("invalid map access into an array using constant larger than key_size")
651 __failure __msg("R0 invalid mem access 'map_value_or_null'")
652 unsigned int an_array_with_a_constant_too_big(void)
654 struct test_val *val;
657 /* Even if the constant value is < max_entries, if the spill size is
658 * larger than the key size, the set bits may not be where we expect them
659 * to be on different endian architectures.
661 val = bpf_map_lookup_elem(&map_array, &key);
662 val->index = offsetof(struct test_val, foo);
668 __description("invalid elided lookup using const and non-const key")
669 __failure __msg("R0 invalid mem access 'map_value_or_null'")
670 unsigned int mixed_const_and_non_const_key_lookup(void)
672 __u32 __attribute__((aligned(8))) key;
673 struct test_val *val;
676 rand = bpf_get_prandom_u32();
677 key = rand > 42 ? 1 : rand;
678 val = bpf_map_lookup_elem(&map_array, &key);
684 __failure __msg("invalid read from stack R2 off=4096 size=4")
685 __naked void key_lookup_at_invalid_fp(void)
688 r1 = %[map_array] ll; \
691 call %[bpf_map_lookup_elem]; \
692 r0 = *(u64*)(r0 + 0); \
695 : __imm(bpf_map_lookup_elem),
696 __imm_addr(map_array)
700 volatile __u32 __attribute__((aligned(8))) global_key;
703 __description("invalid elided lookup using non-stack key")
704 __failure __msg("R0 invalid mem access 'map_value_or_null'")
705 unsigned int non_stack_key_lookup(void)
707 struct test_val *val;
710 val = bpf_map_lookup_elem(&map_array, (void *)&global_key);
711 val->index = offsetof(struct test_val, foo);
716 char _license[] SEC("license") = "GPL";