1 // SPDX-License-Identifier: GPL-2.0
2 /* Converted from tools/testing/selftests/bpf/verifier/raw_stack.c */
5 #include <bpf/bpf_helpers.h>
9 __description("raw_stack: no skb_load_bytes")
11 __failure_unpriv __msg_unpriv("invalid read from stack R6 off=-8 size=8")
12 __naked void stack_no_skb_load_bytes(void)
20 /* Call to skb_load_bytes() omitted. */ \
21 r0 = *(u64*)(r6 + 0); \
27 __description("raw_stack: skb_load_bytes, negative len")
28 __failure __msg("R4 min value is negative")
29 __naked void skb_load_bytes_negative_len(void)
37 call %[bpf_skb_load_bytes]; \
38 r0 = *(u64*)(r6 + 0); \
41 : __imm(bpf_skb_load_bytes)
46 __description("raw_stack: skb_load_bytes, negative len 2")
47 __failure __msg("R4 min value is negative")
48 __naked void load_bytes_negative_len_2(void)
56 call %[bpf_skb_load_bytes]; \
57 r0 = *(u64*)(r6 + 0); \
60 : __imm(bpf_skb_load_bytes),
61 __imm_const(__imm_0, ~0)
66 __description("raw_stack: skb_load_bytes, zero len")
67 __failure __msg("R4 invalid zero-sized read: u64=[0,0]")
68 __naked void skb_load_bytes_zero_len(void)
76 call %[bpf_skb_load_bytes]; \
77 r0 = *(u64*)(r6 + 0); \
80 : __imm(bpf_skb_load_bytes)
85 __description("raw_stack: skb_load_bytes, no init")
87 __naked void skb_load_bytes_no_init(void)
95 call %[bpf_skb_load_bytes]; \
96 r0 = *(u64*)(r6 + 0); \
99 : __imm(bpf_skb_load_bytes)
104 __description("raw_stack: skb_load_bytes, init")
105 __success __retval(0)
106 __naked void stack_skb_load_bytes_init(void)
113 *(u64*)(r6 + 0) = r3; \
116 call %[bpf_skb_load_bytes]; \
117 r0 = *(u64*)(r6 + 0); \
120 : __imm(bpf_skb_load_bytes)
125 __description("raw_stack: skb_load_bytes, spilled regs around bounds")
126 __success __retval(0)
127 __naked void bytes_spilled_regs_around_bounds(void)
133 *(u64*)(r6 - 8) = r1; \
134 *(u64*)(r6 + 8) = r1; \
137 call %[bpf_skb_load_bytes]; \
138 r0 = *(u64*)(r6 - 8); \
139 r2 = *(u64*)(r6 + 8); \
140 r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
141 r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
145 : __imm(bpf_skb_load_bytes),
146 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
147 __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
152 __description("raw_stack: skb_load_bytes, spilled regs corruption")
153 __failure __msg("R0 invalid mem access 'scalar'")
154 __flag(BPF_F_ANY_ALIGNMENT)
155 __naked void load_bytes_spilled_regs_corruption(void)
161 *(u64*)(r6 + 0) = r1; \
164 call %[bpf_skb_load_bytes]; \
165 r0 = *(u64*)(r6 + 0); \
166 r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
169 : __imm(bpf_skb_load_bytes),
170 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark))
175 __description("raw_stack: skb_load_bytes, spilled regs corruption 2")
176 __failure __msg("R3 invalid mem access 'scalar'")
177 __flag(BPF_F_ANY_ALIGNMENT)
178 __naked void bytes_spilled_regs_corruption_2(void)
184 *(u64*)(r6 - 8) = r1; \
185 *(u64*)(r6 + 0) = r1; \
186 *(u64*)(r6 + 8) = r1; \
189 call %[bpf_skb_load_bytes]; \
190 r0 = *(u64*)(r6 - 8); \
191 r2 = *(u64*)(r6 + 8); \
192 r3 = *(u64*)(r6 + 0); \
193 r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
194 r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
196 r3 = *(u32*)(r3 + %[__sk_buff_pkt_type]); \
200 : __imm(bpf_skb_load_bytes),
201 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
202 __imm_const(__sk_buff_pkt_type, offsetof(struct __sk_buff, pkt_type)),
203 __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
208 __description("raw_stack: skb_load_bytes, spilled regs + data")
209 __success __retval(0)
210 __naked void load_bytes_spilled_regs_data(void)
216 *(u64*)(r6 - 8) = r1; \
217 *(u64*)(r6 + 0) = r1; \
218 *(u64*)(r6 + 8) = r1; \
221 call %[bpf_skb_load_bytes]; \
222 r0 = *(u64*)(r6 - 8); \
223 r2 = *(u64*)(r6 + 8); \
224 r3 = *(u64*)(r6 + 0); \
225 r0 = *(u32*)(r0 + %[__sk_buff_mark]); \
226 r2 = *(u32*)(r2 + %[__sk_buff_priority]); \
231 : __imm(bpf_skb_load_bytes),
232 __imm_const(__sk_buff_mark, offsetof(struct __sk_buff, mark)),
233 __imm_const(__sk_buff_priority, offsetof(struct __sk_buff, priority))
238 __description("raw_stack: skb_load_bytes, invalid access 1")
239 __failure __msg("invalid write to stack R3 off=-513 size=8")
240 __naked void load_bytes_invalid_access_1(void)
248 call %[bpf_skb_load_bytes]; \
249 r0 = *(u64*)(r6 + 0); \
252 : __imm(bpf_skb_load_bytes)
257 __description("raw_stack: skb_load_bytes, invalid access 2")
258 __failure __msg("invalid write to stack R3 off=-1 size=8")
259 __naked void load_bytes_invalid_access_2(void)
267 call %[bpf_skb_load_bytes]; \
268 r0 = *(u64*)(r6 + 0); \
271 : __imm(bpf_skb_load_bytes)
276 __description("raw_stack: skb_load_bytes, invalid access 3")
277 __failure __msg("R4 min value is negative")
278 __naked void load_bytes_invalid_access_3(void)
286 call %[bpf_skb_load_bytes]; \
287 r0 = *(u64*)(r6 + 0); \
290 : __imm(bpf_skb_load_bytes)
295 __description("raw_stack: skb_load_bytes, invalid access 4")
297 __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
298 __naked void load_bytes_invalid_access_4(void)
306 call %[bpf_skb_load_bytes]; \
307 r0 = *(u64*)(r6 + 0); \
310 : __imm(bpf_skb_load_bytes)
315 __description("raw_stack: skb_load_bytes, invalid access 5")
317 __msg("R4 unbounded memory access, use 'var &= const' or 'if (var < const)'")
318 __naked void load_bytes_invalid_access_5(void)
326 call %[bpf_skb_load_bytes]; \
327 r0 = *(u64*)(r6 + 0); \
330 : __imm(bpf_skb_load_bytes)
335 __description("raw_stack: skb_load_bytes, invalid access 6")
336 __failure __msg("invalid zero-sized read")
337 __naked void load_bytes_invalid_access_6(void)
345 call %[bpf_skb_load_bytes]; \
346 r0 = *(u64*)(r6 + 0); \
349 : __imm(bpf_skb_load_bytes)
354 __description("raw_stack: skb_load_bytes, large access")
355 __success __retval(0)
356 __naked void skb_load_bytes_large_access(void)
364 call %[bpf_skb_load_bytes]; \
365 r0 = *(u64*)(r6 + 0); \
368 : __imm(bpf_skb_load_bytes)
372 char _license[] SEC("license") = "GPL";