1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/ref_tracking.c */
5 #include <bpf/bpf_helpers.h>
6 #include "../../../include/linux/filter.h"
9 #define BPF_SK_LOOKUP(func) \
10 /* struct bpf_sock_tuple tuple = {} */ \
12 "*(u32*)(r10 - 8) = r2;" \
13 "*(u64*)(r10 - 16) = r2;" \
14 "*(u64*)(r10 - 24) = r2;" \
15 "*(u64*)(r10 - 32) = r2;" \
16 "*(u64*)(r10 - 40) = r2;" \
17 "*(u64*)(r10 - 48) = r2;" \
18 /* sk = func(ctx, &tuple, sizeof tuple, 0, 0) */ \
21 "r3 = %[sizeof_bpf_sock_tuple];"\
26 struct bpf_key {} __attribute__((preserve_access_index));
28 extern void bpf_key_put(struct bpf_key *key) __ksym;
29 extern struct bpf_key *bpf_lookup_system_key(__u64 id) __ksym;
30 extern struct bpf_key *bpf_lookup_user_key(__u32 serial, __u64 flags) __ksym;
32 /* BTF FUNC records are not generated for kfuncs referenced
33 * from inline assembly. These records are necessary for
34 * libbpf to link the program. The function below is a hack
35 * to ensure that BTF FUNC records are generated.
37 void __kfunc_btf_root(void)
40 bpf_lookup_system_key(0);
41 bpf_lookup_user_key(0, 0);
44 #define MAX_ENTRIES 11
52 __uint(type, BPF_MAP_TYPE_ARRAY);
53 __uint(max_entries, 1);
55 __type(value, struct test_val);
56 } map_array_48b SEC(".maps");
59 __uint(type, BPF_MAP_TYPE_RINGBUF);
60 __uint(max_entries, 4096);
61 } map_ringbuf SEC(".maps");
63 void dummy_prog_42_tc(void);
64 void dummy_prog_24_tc(void);
65 void dummy_prog_loop1_tc(void);
68 __uint(type, BPF_MAP_TYPE_PROG_ARRAY);
69 __uint(max_entries, 4);
70 __uint(key_size, sizeof(int));
71 __array(values, void (void));
72 } map_prog1_tc SEC(".maps") = {
74 [0] = (void *)&dummy_prog_42_tc,
75 [1] = (void *)&dummy_prog_loop1_tc,
76 [2] = (void *)&dummy_prog_24_tc,
82 __naked void dummy_prog_42_tc(void)
84 asm volatile ("r0 = 42; exit;");
89 __naked void dummy_prog_24_tc(void)
91 asm volatile ("r0 = 24; exit;");
96 __naked void dummy_prog_loop1_tc(void)
100 r2 = %[map_prog1_tc] ll; \
101 call %[bpf_tail_call]; \
105 : __imm(bpf_tail_call),
106 __imm_addr(map_prog1_tc)
111 __description("reference tracking: leak potential reference")
112 __failure __msg("Unreleased reference")
113 __naked void reference_tracking_leak_potential_reference(void)
116 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
117 " r6 = r0; /* leak reference */ \
120 : __imm(bpf_sk_lookup_tcp),
121 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
126 __description("reference tracking: leak potential reference to sock_common")
127 __failure __msg("Unreleased reference")
128 __naked void potential_reference_to_sock_common_1(void)
131 BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
132 " r6 = r0; /* leak reference */ \
135 : __imm(bpf_skc_lookup_tcp),
136 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
141 __description("reference tracking: leak potential reference on stack")
142 __failure __msg("Unreleased reference")
143 __naked void leak_potential_reference_on_stack(void)
146 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
149 *(u64*)(r4 + 0) = r0; \
153 : __imm(bpf_sk_lookup_tcp),
154 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
159 __description("reference tracking: leak potential reference on stack 2")
160 __failure __msg("Unreleased reference")
161 __naked void potential_reference_on_stack_2(void)
164 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
167 *(u64*)(r4 + 0) = r0; \
170 *(u64*)(r4 + 0) = r1; \
173 : __imm(bpf_sk_lookup_tcp),
174 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
179 __description("reference tracking: zero potential reference")
180 __failure __msg("Unreleased reference")
181 __naked void reference_tracking_zero_potential_reference(void)
184 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
185 " r0 = 0; /* leak reference */ \
188 : __imm(bpf_sk_lookup_tcp),
189 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
194 __description("reference tracking: zero potential reference to sock_common")
195 __failure __msg("Unreleased reference")
196 __naked void potential_reference_to_sock_common_2(void)
199 BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
200 " r0 = 0; /* leak reference */ \
203 : __imm(bpf_skc_lookup_tcp),
204 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
209 __description("reference tracking: copy and zero potential references")
210 __failure __msg("Unreleased reference")
211 __naked void copy_and_zero_potential_references(void)
214 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
217 r7 = 0; /* leak reference */ \
220 : __imm(bpf_sk_lookup_tcp),
221 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
226 __description("reference tracking: acquire/release user key reference")
228 __naked void acquire_release_user_key_reference(void)
233 call %[bpf_lookup_user_key]; \
234 if r0 == 0 goto l0_%=; \
236 call %[bpf_key_put]; \
240 : __imm(bpf_key_put),
241 __imm(bpf_lookup_user_key)
246 __description("reference tracking: acquire/release system key reference")
248 __naked void acquire_release_system_key_reference(void)
252 call %[bpf_lookup_system_key]; \
253 if r0 == 0 goto l0_%=; \
255 call %[bpf_key_put]; \
259 : __imm(bpf_key_put),
260 __imm(bpf_lookup_system_key)
265 __description("reference tracking: release user key reference without check")
266 __failure __msg("Possibly NULL pointer passed to trusted arg0")
267 __naked void user_key_reference_without_check(void)
272 call %[bpf_lookup_user_key]; \
274 call %[bpf_key_put]; \
278 : __imm(bpf_key_put),
279 __imm(bpf_lookup_user_key)
284 __description("reference tracking: release system key reference without check")
285 __failure __msg("Possibly NULL pointer passed to trusted arg0")
286 __naked void system_key_reference_without_check(void)
290 call %[bpf_lookup_system_key]; \
292 call %[bpf_key_put]; \
296 : __imm(bpf_key_put),
297 __imm(bpf_lookup_system_key)
302 __description("reference tracking: release with NULL key pointer")
303 __failure __msg("Possibly NULL pointer passed to trusted arg0")
304 __naked void release_with_null_key_pointer(void)
308 call %[bpf_key_put]; \
317 __description("reference tracking: leak potential reference to user key")
318 __failure __msg("Unreleased reference")
319 __naked void potential_reference_to_user_key(void)
324 call %[bpf_lookup_user_key]; \
327 : __imm(bpf_lookup_user_key)
332 __description("reference tracking: leak potential reference to system key")
333 __failure __msg("Unreleased reference")
334 __naked void potential_reference_to_system_key(void)
338 call %[bpf_lookup_system_key]; \
341 : __imm(bpf_lookup_system_key)
346 __description("reference tracking: release reference without check")
347 __failure __msg("type=sock_or_null expected=sock")
348 __naked void tracking_release_reference_without_check(void)
351 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
352 " /* reference in r0 may be NULL */ \
355 call %[bpf_sk_release]; \
358 : __imm(bpf_sk_lookup_tcp),
359 __imm(bpf_sk_release),
360 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
365 __description("reference tracking: release reference to sock_common without check")
366 __failure __msg("type=sock_common_or_null expected=sock")
367 __naked void to_sock_common_without_check(void)
370 BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
371 " /* reference in r0 may be NULL */ \
374 call %[bpf_sk_release]; \
377 : __imm(bpf_sk_release),
378 __imm(bpf_skc_lookup_tcp),
379 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
384 __description("reference tracking: release reference")
385 __success __retval(0)
386 __naked void reference_tracking_release_reference(void)
389 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
391 if r0 == 0 goto l0_%=; \
392 call %[bpf_sk_release]; \
395 : __imm(bpf_sk_lookup_tcp),
396 __imm(bpf_sk_release),
397 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
402 __description("reference tracking: release reference to sock_common")
403 __success __retval(0)
404 __naked void release_reference_to_sock_common(void)
407 BPF_SK_LOOKUP(bpf_skc_lookup_tcp)
409 if r0 == 0 goto l0_%=; \
410 call %[bpf_sk_release]; \
413 : __imm(bpf_sk_release),
414 __imm(bpf_skc_lookup_tcp),
415 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
420 __description("reference tracking: release reference 2")
421 __success __retval(0)
422 __naked void reference_tracking_release_reference_2(void)
425 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
427 if r0 != 0 goto l0_%=; \
429 l0_%=: call %[bpf_sk_release]; \
432 : __imm(bpf_sk_lookup_tcp),
433 __imm(bpf_sk_release),
434 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
439 __description("reference tracking: release reference twice")
440 __failure __msg("type=scalar expected=sock")
441 __naked void reference_tracking_release_reference_twice(void)
444 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
447 if r0 == 0 goto l0_%=; \
448 call %[bpf_sk_release]; \
450 call %[bpf_sk_release]; \
453 : __imm(bpf_sk_lookup_tcp),
454 __imm(bpf_sk_release),
455 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
460 __description("reference tracking: release reference twice inside branch")
461 __failure __msg("type=scalar expected=sock")
462 __naked void release_reference_twice_inside_branch(void)
465 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
468 if r0 == 0 goto l0_%=; /* goto end */ \
469 call %[bpf_sk_release]; \
471 call %[bpf_sk_release]; \
474 : __imm(bpf_sk_lookup_tcp),
475 __imm(bpf_sk_release),
476 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
481 __description("reference tracking: alloc, check, free in one subbranch")
482 __failure __msg("Unreleased reference")
483 __flag(BPF_F_ANY_ALIGNMENT)
484 __naked void check_free_in_one_subbranch(void)
487 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
488 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
491 /* if (offsetof(skb, mark) > data_len) exit; */ \
492 if r0 <= r3 goto l0_%=; \
494 l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \
495 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
496 " if r6 == 0 goto l1_%=; /* mark == 0? */\
497 /* Leak reference in R0 */ \
499 l1_%=: if r0 == 0 goto l2_%=; /* sk NULL? */ \
501 call %[bpf_sk_release]; \
504 : __imm(bpf_sk_lookup_tcp),
505 __imm(bpf_sk_release),
506 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
507 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
508 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
509 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
514 __description("reference tracking: alloc, check, free in both subbranches")
515 __success __retval(0) __flag(BPF_F_ANY_ALIGNMENT)
516 __naked void check_free_in_both_subbranches(void)
519 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
520 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
523 /* if (offsetof(skb, mark) > data_len) exit; */ \
524 if r0 <= r3 goto l0_%=; \
526 l0_%=: r6 = *(u32*)(r2 + %[__sk_buff_mark]); \
527 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
528 " if r6 == 0 goto l1_%=; /* mark == 0? */\
529 if r0 == 0 goto l2_%=; /* sk NULL? */ \
531 call %[bpf_sk_release]; \
533 l1_%=: if r0 == 0 goto l3_%=; /* sk NULL? */ \
535 call %[bpf_sk_release]; \
538 : __imm(bpf_sk_lookup_tcp),
539 __imm(bpf_sk_release),
540 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
541 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
542 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
543 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
548 __description("reference tracking in call: free reference in subprog")
549 __success __retval(0)
550 __naked void call_free_reference_in_subprog(void)
553 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
554 " r1 = r0; /* unchecked reference */ \
555 call call_free_reference_in_subprog__1; \
559 : __imm(bpf_sk_lookup_tcp),
560 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
564 static __naked __noinline __attribute__((used))
565 void call_free_reference_in_subprog__1(void)
570 if r2 == 0 goto l0_%=; \
571 call %[bpf_sk_release]; \
574 : __imm(bpf_sk_release)
579 __description("reference tracking in call: free reference in subprog and outside")
580 __failure __msg("type=scalar expected=sock")
581 __naked void reference_in_subprog_and_outside(void)
584 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
585 " r1 = r0; /* unchecked reference */ \
587 call reference_in_subprog_and_outside__1; \
589 call %[bpf_sk_release]; \
592 : __imm(bpf_sk_lookup_tcp),
593 __imm(bpf_sk_release),
594 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
598 static __naked __noinline __attribute__((used))
599 void reference_in_subprog_and_outside__1(void)
604 if r2 == 0 goto l0_%=; \
605 call %[bpf_sk_release]; \
608 : __imm(bpf_sk_release)
613 __description("reference tracking in call: alloc & leak reference in subprog")
614 __failure __msg("Unreleased reference")
615 __naked void alloc_leak_reference_in_subprog(void)
620 call alloc_leak_reference_in_subprog__1; \
624 " ::: __clobber_all);
627 static __naked __noinline __attribute__((used))
628 void alloc_leak_reference_in_subprog__1(void)
633 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
634 " /* spill unchecked sk_ptr into stack of caller */\
635 *(u64*)(r6 + 0) = r0; \
639 : __imm(bpf_sk_lookup_tcp),
640 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
645 __description("reference tracking in call: alloc in subprog, release outside")
646 __success __retval(POINTER_VALUE)
647 __naked void alloc_in_subprog_release_outside(void)
651 call alloc_in_subprog_release_outside__1; \
653 if r0 == 0 goto l0_%=; \
654 call %[bpf_sk_release]; \
657 : __imm(bpf_sk_release)
661 static __naked __noinline __attribute__((used))
662 void alloc_in_subprog_release_outside__1(void)
666 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
667 " exit; /* return sk */ \
669 : __imm(bpf_sk_lookup_tcp),
670 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
675 __description("reference tracking in call: sk_ptr leak into caller stack")
676 __failure __msg("Unreleased reference")
677 __naked void ptr_leak_into_caller_stack(void)
682 call ptr_leak_into_caller_stack__1; \
685 " ::: __clobber_all);
688 static __naked __noinline __attribute__((used))
689 void ptr_leak_into_caller_stack__1(void)
695 *(u64*)(r5 + 0) = r4; \
696 call ptr_leak_into_caller_stack__2; \
697 /* spill unchecked sk_ptr into stack of caller */\
700 r4 = *(u64*)(r5 + 0); \
701 *(u64*)(r4 + 0) = r0; \
703 " ::: __clobber_all);
706 static __naked __noinline __attribute__((used))
707 void ptr_leak_into_caller_stack__2(void)
711 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
714 : __imm(bpf_sk_lookup_tcp),
715 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
720 __description("reference tracking in call: sk_ptr spill into caller stack")
721 __success __retval(0)
722 __naked void ptr_spill_into_caller_stack(void)
727 call ptr_spill_into_caller_stack__1; \
730 " ::: __clobber_all);
733 static __naked __noinline __attribute__((used))
734 void ptr_spill_into_caller_stack__1(void)
740 *(u64*)(r5 + 0) = r4; \
741 call ptr_spill_into_caller_stack__2; \
742 /* spill unchecked sk_ptr into stack of caller */\
745 r4 = *(u64*)(r5 + 0); \
746 *(u64*)(r4 + 0) = r0; \
747 if r0 == 0 goto l0_%=; \
748 /* now the sk_ptr is verified, free the reference */\
749 r1 = *(u64*)(r4 + 0); \
750 call %[bpf_sk_release]; \
753 : __imm(bpf_sk_release)
757 static __naked __noinline __attribute__((used))
758 void ptr_spill_into_caller_stack__2(void)
762 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
765 : __imm(bpf_sk_lookup_tcp),
766 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
771 __description("reference tracking: allow LD_ABS")
772 __success __retval(0)
773 __naked void reference_tracking_allow_ld_abs(void)
777 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
779 if r0 == 0 goto l0_%=; \
780 call %[bpf_sk_release]; \
781 l0_%=: r0 = *(u8*)skb[0]; \
782 r0 = *(u16*)skb[0]; \
783 r0 = *(u32*)skb[0]; \
786 : __imm(bpf_sk_lookup_tcp),
787 __imm(bpf_sk_release),
788 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
793 __description("reference tracking: forbid LD_ABS while holding reference")
794 __failure __msg("BPF_LD_[ABS|IND] would lead to reference leak")
795 __naked void ld_abs_while_holding_reference(void)
799 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
800 " r0 = *(u8*)skb[0]; \
801 r0 = *(u16*)skb[0]; \
802 r0 = *(u32*)skb[0]; \
804 if r0 == 0 goto l0_%=; \
805 call %[bpf_sk_release]; \
808 : __imm(bpf_sk_lookup_tcp),
809 __imm(bpf_sk_release),
810 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
815 __description("reference tracking: allow LD_IND")
816 __success __retval(1)
817 __naked void reference_tracking_allow_ld_ind(void)
821 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
823 if r0 == 0 goto l0_%=; \
824 call %[bpf_sk_release]; \
830 : __imm(bpf_sk_lookup_tcp),
831 __imm(bpf_sk_release),
832 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
833 __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
838 __description("reference tracking: forbid LD_IND while holding reference")
839 __failure __msg("BPF_LD_[ABS|IND] would lead to reference leak")
840 __naked void ld_ind_while_holding_reference(void)
844 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
850 if r1 == 0 goto l0_%=; \
851 call %[bpf_sk_release]; \
854 : __imm(bpf_sk_lookup_tcp),
855 __imm(bpf_sk_release),
856 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple)),
857 __imm_insn(ld_ind, BPF_LD_IND(BPF_W, BPF_REG_7, -0x200000))
862 __description("reference tracking: check reference or tail call")
863 __success __retval(0)
864 __naked void check_reference_or_tail_call(void)
868 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
869 " /* if (sk) bpf_sk_release() */ \
871 if r1 != 0 goto l0_%=; \
872 /* bpf_tail_call() */ \
874 r2 = %[map_prog1_tc] ll; \
876 call %[bpf_tail_call]; \
879 l0_%=: call %[bpf_sk_release]; \
882 : __imm(bpf_sk_lookup_tcp),
883 __imm(bpf_sk_release),
884 __imm(bpf_tail_call),
885 __imm_addr(map_prog1_tc),
886 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
891 __description("reference tracking: release reference then tail call")
892 __success __retval(0)
893 __naked void release_reference_then_tail_call(void)
897 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
898 " /* if (sk) bpf_sk_release() */ \
900 if r1 == 0 goto l0_%=; \
901 call %[bpf_sk_release]; \
902 l0_%=: /* bpf_tail_call() */ \
904 r2 = %[map_prog1_tc] ll; \
906 call %[bpf_tail_call]; \
910 : __imm(bpf_sk_lookup_tcp),
911 __imm(bpf_sk_release),
912 __imm(bpf_tail_call),
913 __imm_addr(map_prog1_tc),
914 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
919 __description("reference tracking: leak possible reference over tail call")
920 __failure __msg("tail_call would lead to reference leak")
921 __naked void possible_reference_over_tail_call(void)
925 /* Look up socket and store in REG_6 */ \
926 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
927 " /* bpf_tail_call() */ \
930 r2 = %[map_prog1_tc] ll; \
932 call %[bpf_tail_call]; \
934 /* if (sk) bpf_sk_release() */ \
936 if r1 == 0 goto l0_%=; \
937 call %[bpf_sk_release]; \
940 : __imm(bpf_sk_lookup_tcp),
941 __imm(bpf_sk_release),
942 __imm(bpf_tail_call),
943 __imm_addr(map_prog1_tc),
944 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
949 __description("reference tracking: leak checked reference over tail call")
950 __failure __msg("tail_call would lead to reference leak")
951 __naked void checked_reference_over_tail_call(void)
955 /* Look up socket and store in REG_6 */ \
956 " BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
958 /* if (!sk) goto end */ \
959 if r0 == 0 goto l0_%=; \
960 /* bpf_tail_call() */ \
962 r2 = %[map_prog1_tc] ll; \
964 call %[bpf_tail_call]; \
967 l0_%=: call %[bpf_sk_release]; \
970 : __imm(bpf_sk_lookup_tcp),
971 __imm(bpf_sk_release),
972 __imm(bpf_tail_call),
973 __imm_addr(map_prog1_tc),
974 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
979 __description("reference tracking: mangle and release sock_or_null")
980 __failure __msg("R1 pointer arithmetic on sock_or_null prohibited")
981 __naked void and_release_sock_or_null(void)
984 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
987 if r0 == 0 goto l0_%=; \
988 call %[bpf_sk_release]; \
991 : __imm(bpf_sk_lookup_tcp),
992 __imm(bpf_sk_release),
993 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
998 __description("reference tracking: mangle and release sock")
999 __failure __msg("R1 pointer arithmetic on sock prohibited")
1000 __naked void tracking_mangle_and_release_sock(void)
1003 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1005 if r0 == 0 goto l0_%=; \
1007 call %[bpf_sk_release]; \
1010 : __imm(bpf_sk_lookup_tcp),
1011 __imm(bpf_sk_release),
1012 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1017 __description("reference tracking: access member")
1018 __success __retval(0)
1019 __naked void reference_tracking_access_member(void)
1022 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1024 if r0 == 0 goto l0_%=; \
1025 r2 = *(u32*)(r0 + 4); \
1027 call %[bpf_sk_release]; \
1030 : __imm(bpf_sk_lookup_tcp),
1031 __imm(bpf_sk_release),
1032 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1037 __description("reference tracking: write to member")
1038 __failure __msg("cannot write into sock")
1039 __naked void reference_tracking_write_to_member(void)
1042 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1044 if r0 == 0 goto l0_%=; \
1047 *(u32*)(r1 + %[bpf_sock_mark]) = r2; \
1049 l0_%=: call %[bpf_sk_release]; \
1053 : __imm(bpf_sk_lookup_tcp),
1054 __imm(bpf_sk_release),
1055 __imm_const(bpf_sock_mark, offsetof(struct bpf_sock, mark)),
1056 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1061 __description("reference tracking: invalid 64-bit access of member")
1062 __failure __msg("invalid sock access off=0 size=8")
1063 __naked void _64_bit_access_of_member(void)
1066 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1068 if r0 == 0 goto l0_%=; \
1069 r2 = *(u64*)(r0 + 0); \
1071 call %[bpf_sk_release]; \
1074 : __imm(bpf_sk_lookup_tcp),
1075 __imm(bpf_sk_release),
1076 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1081 __description("reference tracking: access after release")
1082 __failure __msg("!read_ok")
1083 __naked void reference_tracking_access_after_release(void)
1086 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1088 if r0 == 0 goto l0_%=; \
1089 call %[bpf_sk_release]; \
1090 r2 = *(u32*)(r1 + 0); \
1093 : __imm(bpf_sk_lookup_tcp),
1094 __imm(bpf_sk_release),
1095 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1100 __description("reference tracking: direct access for lookup")
1101 __success __retval(0)
1102 __naked void tracking_direct_access_for_lookup(void)
1105 /* Check that the packet is at least 64B long */\
1106 r2 = *(u32*)(r1 + %[__sk_buff_data]); \
1107 r3 = *(u32*)(r1 + %[__sk_buff_data_end]); \
1110 if r0 > r3 goto l0_%=; \
1111 /* sk = sk_lookup_tcp(ctx, skb->data, ...) */ \
1112 r3 = %[sizeof_bpf_sock_tuple]; \
1115 call %[bpf_sk_lookup_tcp]; \
1117 if r0 == 0 goto l0_%=; \
1118 r2 = *(u32*)(r0 + 4); \
1120 call %[bpf_sk_release]; \
1123 : __imm(bpf_sk_lookup_tcp),
1124 __imm(bpf_sk_release),
1125 __imm_const(__sk_buff_data, offsetof(struct __sk_buff, data)),
1126 __imm_const(__sk_buff_data_end, offsetof(struct __sk_buff, data_end)),
1127 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1132 __description("reference tracking: use ptr from bpf_tcp_sock() after release")
1133 __failure __msg("invalid mem access")
1134 __flag(BPF_F_ANY_ALIGNMENT)
1135 __naked void bpf_tcp_sock_after_release(void)
1138 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1139 " if r0 != 0 goto l0_%=; \
1143 call %[bpf_tcp_sock]; \
1144 if r0 != 0 goto l1_%=; \
1146 call %[bpf_sk_release]; \
1150 call %[bpf_sk_release]; \
1151 r0 = *(u32*)(r7 + %[bpf_tcp_sock_snd_cwnd]); \
1154 : __imm(bpf_sk_lookup_tcp),
1155 __imm(bpf_sk_release),
1156 __imm(bpf_tcp_sock),
1157 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
1158 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1163 __description("reference tracking: use ptr from bpf_sk_fullsock() after release")
1164 __failure __msg("invalid mem access")
1165 __flag(BPF_F_ANY_ALIGNMENT)
1166 __naked void bpf_sk_fullsock_after_release(void)
1169 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1170 " if r0 != 0 goto l0_%=; \
1174 call %[bpf_sk_fullsock]; \
1175 if r0 != 0 goto l1_%=; \
1177 call %[bpf_sk_release]; \
1181 call %[bpf_sk_release]; \
1182 r0 = *(u32*)(r7 + %[bpf_sock_type]); \
1185 : __imm(bpf_sk_fullsock),
1186 __imm(bpf_sk_lookup_tcp),
1187 __imm(bpf_sk_release),
1188 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1189 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1194 __description("reference tracking: use ptr from bpf_sk_fullsock(tp) after release")
1195 __failure __msg("invalid mem access")
1196 __flag(BPF_F_ANY_ALIGNMENT)
1197 __naked void sk_fullsock_tp_after_release(void)
1200 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1201 " if r0 != 0 goto l0_%=; \
1205 call %[bpf_tcp_sock]; \
1206 if r0 != 0 goto l1_%=; \
1208 call %[bpf_sk_release]; \
1211 call %[bpf_sk_fullsock]; \
1214 call %[bpf_sk_release]; \
1215 if r6 != 0 goto l2_%=; \
1217 l2_%=: r0 = *(u32*)(r6 + %[bpf_sock_type]); \
1220 : __imm(bpf_sk_fullsock),
1221 __imm(bpf_sk_lookup_tcp),
1222 __imm(bpf_sk_release),
1223 __imm(bpf_tcp_sock),
1224 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1225 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1230 __description("reference tracking: use sk after bpf_sk_release(tp)")
1231 __failure __msg("invalid mem access")
1232 __flag(BPF_F_ANY_ALIGNMENT)
1233 __naked void after_bpf_sk_release_tp(void)
1236 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1237 " if r0 != 0 goto l0_%=; \
1241 call %[bpf_tcp_sock]; \
1242 if r0 != 0 goto l1_%=; \
1244 call %[bpf_sk_release]; \
1247 call %[bpf_sk_release]; \
1248 r0 = *(u32*)(r6 + %[bpf_sock_type]); \
1251 : __imm(bpf_sk_lookup_tcp),
1252 __imm(bpf_sk_release),
1253 __imm(bpf_tcp_sock),
1254 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1255 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1260 __description("reference tracking: use ptr from bpf_get_listener_sock() after bpf_sk_release(sk)")
1261 __success __retval(0)
1262 __naked void after_bpf_sk_release_sk(void)
1265 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1266 " if r0 != 0 goto l0_%=; \
1270 call %[bpf_get_listener_sock]; \
1271 if r0 != 0 goto l1_%=; \
1273 call %[bpf_sk_release]; \
1277 call %[bpf_sk_release]; \
1278 r0 = *(u32*)(r6 + %[bpf_sock_src_port]); \
1281 : __imm(bpf_get_listener_sock),
1282 __imm(bpf_sk_lookup_tcp),
1283 __imm(bpf_sk_release),
1284 __imm_const(bpf_sock_src_port, offsetof(struct bpf_sock, src_port)),
1285 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1290 __description("reference tracking: bpf_sk_release(listen_sk)")
1291 __failure __msg("R1 must be referenced when passed to release function")
1292 __naked void bpf_sk_release_listen_sk(void)
1295 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1296 " if r0 != 0 goto l0_%=; \
1300 call %[bpf_get_listener_sock]; \
1301 if r0 != 0 goto l1_%=; \
1303 call %[bpf_sk_release]; \
1306 call %[bpf_sk_release]; \
1307 r0 = *(u32*)(r6 + %[bpf_sock_type]); \
1309 call %[bpf_sk_release]; \
1312 : __imm(bpf_get_listener_sock),
1313 __imm(bpf_sk_lookup_tcp),
1314 __imm(bpf_sk_release),
1315 __imm_const(bpf_sock_type, offsetof(struct bpf_sock, type)),
1316 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1320 /* !bpf_sk_fullsock(sk) is checked but !bpf_tcp_sock(sk) is not checked */
1322 __description("reference tracking: tp->snd_cwnd after bpf_sk_fullsock(sk) and bpf_tcp_sock(sk)")
1323 __failure __msg("invalid mem access")
1324 __naked void and_bpf_tcp_sock_sk(void)
1327 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1328 " if r0 != 0 goto l0_%=; \
1332 call %[bpf_sk_fullsock]; \
1335 call %[bpf_tcp_sock]; \
1337 if r7 != 0 goto l1_%=; \
1339 call %[bpf_sk_release]; \
1341 l1_%=: r0 = *(u32*)(r8 + %[bpf_tcp_sock_snd_cwnd]); \
1343 call %[bpf_sk_release]; \
1346 : __imm(bpf_sk_fullsock),
1347 __imm(bpf_sk_lookup_tcp),
1348 __imm(bpf_sk_release),
1349 __imm(bpf_tcp_sock),
1350 __imm_const(bpf_tcp_sock_snd_cwnd, offsetof(struct bpf_tcp_sock, snd_cwnd)),
1351 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1356 __description("reference tracking: branch tracking valid pointer null comparison")
1357 __success __retval(0)
1358 __naked void tracking_valid_pointer_null_comparison(void)
1361 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1364 if r6 != 0 goto l0_%=; \
1366 l0_%=: if r6 == 0 goto l1_%=; \
1368 call %[bpf_sk_release]; \
1371 : __imm(bpf_sk_lookup_tcp),
1372 __imm(bpf_sk_release),
1373 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1378 __description("reference tracking: branch tracking valid pointer value comparison")
1379 __failure __msg("Unreleased reference")
1380 __naked void tracking_valid_pointer_value_comparison(void)
1383 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1386 if r6 == 0 goto l0_%=; \
1388 if r6 == 1234 goto l0_%=; \
1390 call %[bpf_sk_release]; \
1393 : __imm(bpf_sk_lookup_tcp),
1394 __imm(bpf_sk_release),
1395 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1400 __description("reference tracking: bpf_sk_release(btf_tcp_sock)")
1403 __naked void sk_release_btf_tcp_sock(void)
1406 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1407 " if r0 != 0 goto l0_%=; \
1411 call %[bpf_skc_to_tcp_sock]; \
1412 if r0 != 0 goto l1_%=; \
1414 call %[bpf_sk_release]; \
1417 call %[bpf_sk_release]; \
1420 : __imm(bpf_sk_lookup_tcp),
1421 __imm(bpf_sk_release),
1422 __imm(bpf_skc_to_tcp_sock),
1423 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1428 __description("reference tracking: use ptr from bpf_skc_to_tcp_sock() after release")
1429 __failure __msg("invalid mem access")
1430 __naked void to_tcp_sock_after_release(void)
1433 BPF_SK_LOOKUP(bpf_sk_lookup_tcp)
1434 " if r0 != 0 goto l0_%=; \
1438 call %[bpf_skc_to_tcp_sock]; \
1439 if r0 != 0 goto l1_%=; \
1441 call %[bpf_sk_release]; \
1445 call %[bpf_sk_release]; \
1446 r0 = *(u8*)(r7 + 0); \
1449 : __imm(bpf_sk_lookup_tcp),
1450 __imm(bpf_sk_release),
1451 __imm(bpf_skc_to_tcp_sock),
1452 __imm_const(sizeof_bpf_sock_tuple, sizeof(struct bpf_sock_tuple))
1457 __description("reference tracking: try to leak released ptr reg")
1458 __success __failure_unpriv __msg_unpriv("R8 !read_ok")
1460 __naked void to_leak_released_ptr_reg(void)
1464 *(u32*)(r10 - 4) = r0; \
1467 r1 = %[map_array_48b] ll; \
1468 call %[bpf_map_lookup_elem]; \
1469 if r0 != 0 goto l0_%=; \
1473 r1 = %[map_ringbuf] ll; \
1476 call %[bpf_ringbuf_reserve]; \
1477 if r0 != 0 goto l1_%=; \
1482 call %[bpf_ringbuf_discard]; \
1484 *(u64*)(r9 + 0) = r8; \
1487 : __imm(bpf_map_lookup_elem),
1488 __imm(bpf_ringbuf_discard),
1489 __imm(bpf_ringbuf_reserve),
1490 __imm_addr(map_array_48b),
1491 __imm_addr(map_ringbuf)
1495 char _license[] SEC("license") = "GPL";