]>
Commit | Line | Data |
---|---|---|
25763b3c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1cf1cae9 | 2 | /* Copyright (c) 2017 Facebook |
1cf1cae9 AS |
3 | */ |
4 | #include <linux/bpf.h> | |
7bd1590d | 5 | #include <linux/btf_ids.h> |
1cf1cae9 AS |
6 | #include <linux/slab.h> |
7 | #include <linux/vmalloc.h> | |
8 | #include <linux/etherdevice.h> | |
9 | #include <linux/filter.h> | |
10 | #include <linux/sched/signal.h> | |
6ac99e8f | 11 | #include <net/bpf_sk_storage.h> |
2cb494a3 SL |
12 | #include <net/sock.h> |
13 | #include <net/tcp.h> | |
7c32e8f8 | 14 | #include <net/net_namespace.h> |
3d08b6f2 | 15 | #include <linux/error-injection.h> |
1b4d60ec | 16 | #include <linux/smp.h> |
7c32e8f8 | 17 | #include <linux/sock_diag.h> |
1cf1cae9 | 18 | |
e950e843 MM |
19 | #define CREATE_TRACE_POINTS |
20 | #include <trace/events/bpf_test_run.h> | |
21 | ||
607b9cc9 LB |
22 | struct bpf_test_timer { |
23 | enum { NO_PREEMPT, NO_MIGRATE } mode; | |
24 | u32 i; | |
25 | u64 time_start, time_spent; | |
26 | }; | |
27 | ||
28 | static void bpf_test_timer_enter(struct bpf_test_timer *t) | |
29 | __acquires(rcu) | |
30 | { | |
31 | rcu_read_lock(); | |
32 | if (t->mode == NO_PREEMPT) | |
33 | preempt_disable(); | |
34 | else | |
35 | migrate_disable(); | |
36 | ||
37 | t->time_start = ktime_get_ns(); | |
38 | } | |
39 | ||
40 | static void bpf_test_timer_leave(struct bpf_test_timer *t) | |
41 | __releases(rcu) | |
42 | { | |
43 | t->time_start = 0; | |
44 | ||
45 | if (t->mode == NO_PREEMPT) | |
46 | preempt_enable(); | |
47 | else | |
48 | migrate_enable(); | |
49 | rcu_read_unlock(); | |
50 | } | |
51 | ||
52 | static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration) | |
53 | __must_hold(rcu) | |
54 | { | |
55 | t->i++; | |
56 | if (t->i >= repeat) { | |
57 | /* We're done. */ | |
58 | t->time_spent += ktime_get_ns() - t->time_start; | |
59 | do_div(t->time_spent, t->i); | |
60 | *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; | |
61 | *err = 0; | |
62 | goto reset; | |
63 | } | |
64 | ||
65 | if (signal_pending(current)) { | |
66 | /* During iteration: we've been cancelled, abort. */ | |
67 | *err = -EINTR; | |
68 | goto reset; | |
69 | } | |
70 | ||
71 | if (need_resched()) { | |
72 | /* During iteration: we need to reschedule between runs. */ | |
73 | t->time_spent += ktime_get_ns() - t->time_start; | |
74 | bpf_test_timer_leave(t); | |
75 | cond_resched(); | |
76 | bpf_test_timer_enter(t); | |
77 | } | |
78 | ||
79 | /* Do another round. */ | |
80 | return true; | |
81 | ||
82 | reset: | |
83 | t->i = 0; | |
84 | return false; | |
85 | } | |
86 | ||
df1a2cb7 | 87 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, |
f23c4b39 | 88 | u32 *retval, u32 *time, bool xdp) |
1cf1cae9 | 89 | { |
71b91a50 | 90 | struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; |
607b9cc9 | 91 | struct bpf_test_timer t = { NO_MIGRATE }; |
8bad74f9 | 92 | enum bpf_cgroup_storage_type stype; |
607b9cc9 | 93 | int ret; |
1cf1cae9 | 94 | |
8bad74f9 RG |
95 | for_each_cgroup_storage_type(stype) { |
96 | storage[stype] = bpf_cgroup_storage_alloc(prog, stype); | |
97 | if (IS_ERR(storage[stype])) { | |
98 | storage[stype] = NULL; | |
99 | for_each_cgroup_storage_type(stype) | |
100 | bpf_cgroup_storage_free(storage[stype]); | |
101 | return -ENOMEM; | |
102 | } | |
103 | } | |
f42ee093 | 104 | |
1cf1cae9 AS |
105 | if (!repeat) |
106 | repeat = 1; | |
df1a2cb7 | 107 | |
607b9cc9 LB |
108 | bpf_test_timer_enter(&t); |
109 | do { | |
b910eaaa YS |
110 | ret = bpf_cgroup_storage_set(storage); |
111 | if (ret) | |
112 | break; | |
f23c4b39 BT |
113 | |
114 | if (xdp) | |
115 | *retval = bpf_prog_run_xdp(prog, ctx); | |
116 | else | |
117 | *retval = BPF_PROG_RUN(prog, ctx); | |
b910eaaa YS |
118 | |
119 | bpf_cgroup_storage_unset(); | |
607b9cc9 LB |
120 | } while (bpf_test_timer_continue(&t, repeat, &ret, time)); |
121 | bpf_test_timer_leave(&t); | |
1cf1cae9 | 122 | |
8bad74f9 RG |
123 | for_each_cgroup_storage_type(stype) |
124 | bpf_cgroup_storage_free(storage[stype]); | |
f42ee093 | 125 | |
df1a2cb7 | 126 | return ret; |
1cf1cae9 AS |
127 | } |
128 | ||
78e52272 DM |
129 | static int bpf_test_finish(const union bpf_attr *kattr, |
130 | union bpf_attr __user *uattr, const void *data, | |
1cf1cae9 AS |
131 | u32 size, u32 retval, u32 duration) |
132 | { | |
78e52272 | 133 | void __user *data_out = u64_to_user_ptr(kattr->test.data_out); |
1cf1cae9 | 134 | int err = -EFAULT; |
b5a36b1e | 135 | u32 copy_size = size; |
1cf1cae9 | 136 | |
b5a36b1e LB |
137 | /* Clamp copy if the user has provided a size hint, but copy the full |
138 | * buffer if not to retain old behaviour. | |
139 | */ | |
140 | if (kattr->test.data_size_out && | |
141 | copy_size > kattr->test.data_size_out) { | |
142 | copy_size = kattr->test.data_size_out; | |
143 | err = -ENOSPC; | |
144 | } | |
145 | ||
146 | if (data_out && copy_to_user(data_out, data, copy_size)) | |
1cf1cae9 AS |
147 | goto out; |
148 | if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) | |
149 | goto out; | |
150 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
151 | goto out; | |
152 | if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) | |
153 | goto out; | |
b5a36b1e LB |
154 | if (err != -ENOSPC) |
155 | err = 0; | |
1cf1cae9 | 156 | out: |
e950e843 | 157 | trace_bpf_test_finish(&err); |
1cf1cae9 AS |
158 | return err; |
159 | } | |
160 | ||
faeb2dce AS |
161 | /* Integer types of various sizes and pointer combinations cover variety of |
162 | * architecture dependent calling conventions. 7+ can be supported in the | |
163 | * future. | |
164 | */ | |
e9ff9d52 JPM |
165 | __diag_push(); |
166 | __diag_ignore(GCC, 8, "-Wmissing-prototypes", | |
167 | "Global functions as their definitions will be in vmlinux BTF"); | |
faeb2dce AS |
168 | int noinline bpf_fentry_test1(int a) |
169 | { | |
170 | return a + 1; | |
171 | } | |
172 | ||
173 | int noinline bpf_fentry_test2(int a, u64 b) | |
174 | { | |
175 | return a + b; | |
176 | } | |
177 | ||
178 | int noinline bpf_fentry_test3(char a, int b, u64 c) | |
179 | { | |
180 | return a + b + c; | |
181 | } | |
182 | ||
183 | int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) | |
184 | { | |
185 | return (long)a + b + c + d; | |
186 | } | |
187 | ||
188 | int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) | |
189 | { | |
190 | return a + (long)b + c + d + e; | |
191 | } | |
192 | ||
193 | int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) | |
194 | { | |
195 | return a + (long)b + c + d + (long)e + f; | |
196 | } | |
197 | ||
d923021c YS |
198 | struct bpf_fentry_test_t { |
199 | struct bpf_fentry_test_t *a; | |
200 | }; | |
201 | ||
202 | int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) | |
203 | { | |
204 | return (long)arg; | |
205 | } | |
206 | ||
207 | int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) | |
208 | { | |
209 | return (long)arg->a; | |
210 | } | |
211 | ||
3d08b6f2 KS |
212 | int noinline bpf_modify_return_test(int a, int *b) |
213 | { | |
214 | *b += 1; | |
215 | return a + *b; | |
216 | } | |
7bd1590d MKL |
217 | |
218 | u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) | |
219 | { | |
220 | return a + b + c + d; | |
221 | } | |
222 | ||
223 | int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) | |
224 | { | |
225 | return a + b; | |
226 | } | |
227 | ||
228 | struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) | |
229 | { | |
230 | return sk; | |
231 | } | |
232 | ||
e9ff9d52 | 233 | __diag_pop(); |
3d08b6f2 KS |
234 | |
235 | ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); | |
236 | ||
7bd1590d MKL |
237 | BTF_SET_START(test_sk_kfunc_ids) |
238 | BTF_ID(func, bpf_kfunc_call_test1) | |
239 | BTF_ID(func, bpf_kfunc_call_test2) | |
240 | BTF_ID(func, bpf_kfunc_call_test3) | |
241 | BTF_SET_END(test_sk_kfunc_ids) | |
242 | ||
243 | bool bpf_prog_test_check_kfunc_call(u32 kfunc_id) | |
244 | { | |
245 | return btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id); | |
246 | } | |
247 | ||
1cf1cae9 AS |
248 | static void *bpf_test_init(const union bpf_attr *kattr, u32 size, |
249 | u32 headroom, u32 tailroom) | |
250 | { | |
251 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); | |
d800bad6 | 252 | u32 user_size = kattr->test.data_size_in; |
1cf1cae9 AS |
253 | void *data; |
254 | ||
255 | if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) | |
256 | return ERR_PTR(-EINVAL); | |
257 | ||
d800bad6 JDB |
258 | if (user_size > size) |
259 | return ERR_PTR(-EMSGSIZE); | |
260 | ||
1cf1cae9 AS |
261 | data = kzalloc(size + headroom + tailroom, GFP_USER); |
262 | if (!data) | |
263 | return ERR_PTR(-ENOMEM); | |
264 | ||
d800bad6 | 265 | if (copy_from_user(data + headroom, data_in, user_size)) { |
1cf1cae9 AS |
266 | kfree(data); |
267 | return ERR_PTR(-EFAULT); | |
268 | } | |
da00d2f1 | 269 | |
1cf1cae9 AS |
270 | return data; |
271 | } | |
272 | ||
da00d2f1 KS |
273 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
274 | const union bpf_attr *kattr, | |
275 | union bpf_attr __user *uattr) | |
276 | { | |
d923021c | 277 | struct bpf_fentry_test_t arg = {}; |
3d08b6f2 KS |
278 | u16 side_effect = 0, ret = 0; |
279 | int b = 2, err = -EFAULT; | |
280 | u32 retval = 0; | |
da00d2f1 | 281 | |
1b4d60ec SL |
282 | if (kattr->test.flags || kattr->test.cpu) |
283 | return -EINVAL; | |
284 | ||
da00d2f1 KS |
285 | switch (prog->expected_attach_type) { |
286 | case BPF_TRACE_FENTRY: | |
287 | case BPF_TRACE_FEXIT: | |
288 | if (bpf_fentry_test1(1) != 2 || | |
289 | bpf_fentry_test2(2, 3) != 5 || | |
290 | bpf_fentry_test3(4, 5, 6) != 15 || | |
291 | bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || | |
292 | bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || | |
d923021c YS |
293 | bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || |
294 | bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || | |
295 | bpf_fentry_test8(&arg) != 0) | |
da00d2f1 KS |
296 | goto out; |
297 | break; | |
3d08b6f2 KS |
298 | case BPF_MODIFY_RETURN: |
299 | ret = bpf_modify_return_test(1, &b); | |
300 | if (b != 2) | |
301 | side_effect = 1; | |
302 | break; | |
da00d2f1 KS |
303 | default: |
304 | goto out; | |
305 | } | |
306 | ||
3d08b6f2 KS |
307 | retval = ((u32)side_effect << 16) | ret; |
308 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
309 | goto out; | |
310 | ||
da00d2f1 KS |
311 | err = 0; |
312 | out: | |
313 | trace_bpf_test_finish(&err); | |
314 | return err; | |
315 | } | |
316 | ||
1b4d60ec SL |
317 | struct bpf_raw_tp_test_run_info { |
318 | struct bpf_prog *prog; | |
319 | void *ctx; | |
320 | u32 retval; | |
321 | }; | |
322 | ||
323 | static void | |
324 | __bpf_prog_test_run_raw_tp(void *data) | |
325 | { | |
326 | struct bpf_raw_tp_test_run_info *info = data; | |
327 | ||
328 | rcu_read_lock(); | |
1b4d60ec | 329 | info->retval = BPF_PROG_RUN(info->prog, info->ctx); |
1b4d60ec SL |
330 | rcu_read_unlock(); |
331 | } | |
332 | ||
333 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, | |
334 | const union bpf_attr *kattr, | |
335 | union bpf_attr __user *uattr) | |
336 | { | |
337 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
338 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
339 | struct bpf_raw_tp_test_run_info info; | |
340 | int cpu = kattr->test.cpu, err = 0; | |
963ec27a | 341 | int current_cpu; |
1b4d60ec SL |
342 | |
343 | /* doesn't support data_in/out, ctx_out, duration, or repeat */ | |
344 | if (kattr->test.data_in || kattr->test.data_out || | |
345 | kattr->test.ctx_out || kattr->test.duration || | |
346 | kattr->test.repeat) | |
347 | return -EINVAL; | |
348 | ||
7ac6ad05 SL |
349 | if (ctx_size_in < prog->aux->max_ctx_offset || |
350 | ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) | |
1b4d60ec SL |
351 | return -EINVAL; |
352 | ||
353 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) | |
354 | return -EINVAL; | |
355 | ||
356 | if (ctx_size_in) { | |
357 | info.ctx = kzalloc(ctx_size_in, GFP_USER); | |
358 | if (!info.ctx) | |
359 | return -ENOMEM; | |
360 | if (copy_from_user(info.ctx, ctx_in, ctx_size_in)) { | |
361 | err = -EFAULT; | |
362 | goto out; | |
363 | } | |
364 | } else { | |
365 | info.ctx = NULL; | |
366 | } | |
367 | ||
368 | info.prog = prog; | |
369 | ||
963ec27a | 370 | current_cpu = get_cpu(); |
1b4d60ec | 371 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || |
963ec27a | 372 | cpu == current_cpu) { |
1b4d60ec | 373 | __bpf_prog_test_run_raw_tp(&info); |
963ec27a | 374 | } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
1b4d60ec SL |
375 | /* smp_call_function_single() also checks cpu_online() |
376 | * after csd_lock(). However, since cpu is from user | |
377 | * space, let's do an extra quick check to filter out | |
378 | * invalid value before smp_call_function_single(). | |
379 | */ | |
963ec27a SL |
380 | err = -ENXIO; |
381 | } else { | |
1b4d60ec SL |
382 | err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, |
383 | &info, 1); | |
1b4d60ec | 384 | } |
963ec27a | 385 | put_cpu(); |
1b4d60ec | 386 | |
963ec27a SL |
387 | if (!err && |
388 | copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) | |
1b4d60ec SL |
389 | err = -EFAULT; |
390 | ||
391 | out: | |
392 | kfree(info.ctx); | |
393 | return err; | |
394 | } | |
395 | ||
b0b9395d SF |
396 | static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) |
397 | { | |
398 | void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); | |
399 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
400 | u32 size = kattr->test.ctx_size_in; | |
401 | void *data; | |
402 | int err; | |
403 | ||
404 | if (!data_in && !data_out) | |
405 | return NULL; | |
406 | ||
407 | data = kzalloc(max_size, GFP_USER); | |
408 | if (!data) | |
409 | return ERR_PTR(-ENOMEM); | |
410 | ||
411 | if (data_in) { | |
af2ac3e1 | 412 | err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); |
b0b9395d SF |
413 | if (err) { |
414 | kfree(data); | |
415 | return ERR_PTR(err); | |
416 | } | |
417 | ||
418 | size = min_t(u32, max_size, size); | |
419 | if (copy_from_user(data, data_in, size)) { | |
420 | kfree(data); | |
421 | return ERR_PTR(-EFAULT); | |
422 | } | |
423 | } | |
424 | return data; | |
425 | } | |
426 | ||
427 | static int bpf_ctx_finish(const union bpf_attr *kattr, | |
428 | union bpf_attr __user *uattr, const void *data, | |
429 | u32 size) | |
430 | { | |
431 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
432 | int err = -EFAULT; | |
433 | u32 copy_size = size; | |
434 | ||
435 | if (!data || !data_out) | |
436 | return 0; | |
437 | ||
438 | if (copy_size > kattr->test.ctx_size_out) { | |
439 | copy_size = kattr->test.ctx_size_out; | |
440 | err = -ENOSPC; | |
441 | } | |
442 | ||
443 | if (copy_to_user(data_out, data, copy_size)) | |
444 | goto out; | |
445 | if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) | |
446 | goto out; | |
447 | if (err != -ENOSPC) | |
448 | err = 0; | |
449 | out: | |
450 | return err; | |
451 | } | |
452 | ||
453 | /** | |
454 | * range_is_zero - test whether buffer is initialized | |
455 | * @buf: buffer to check | |
456 | * @from: check from this position | |
457 | * @to: check up until (excluding) this position | |
458 | * | |
459 | * This function returns true if the there is a non-zero byte | |
460 | * in the buf in the range [from,to). | |
461 | */ | |
462 | static inline bool range_is_zero(void *buf, size_t from, size_t to) | |
463 | { | |
464 | return !memchr_inv((u8 *)buf + from, 0, to - from); | |
465 | } | |
466 | ||
467 | static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
468 | { | |
469 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
470 | ||
471 | if (!__skb) | |
472 | return 0; | |
473 | ||
474 | /* make sure the fields we don't use are zeroed */ | |
6de6c1f8 NS |
475 | if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) |
476 | return -EINVAL; | |
477 | ||
478 | /* mark is allowed */ | |
479 | ||
480 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), | |
481 | offsetof(struct __sk_buff, priority))) | |
b0b9395d SF |
482 | return -EINVAL; |
483 | ||
484 | /* priority is allowed */ | |
485 | ||
b590cb5f | 486 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, priority), |
21594c44 DY |
487 | offsetof(struct __sk_buff, ifindex))) |
488 | return -EINVAL; | |
489 | ||
490 | /* ifindex is allowed */ | |
491 | ||
492 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), | |
b0b9395d SF |
493 | offsetof(struct __sk_buff, cb))) |
494 | return -EINVAL; | |
495 | ||
496 | /* cb is allowed */ | |
497 | ||
b590cb5f | 498 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), |
ba940948 SF |
499 | offsetof(struct __sk_buff, tstamp))) |
500 | return -EINVAL; | |
501 | ||
502 | /* tstamp is allowed */ | |
850a88cc SF |
503 | /* wire_len is allowed */ |
504 | /* gso_segs is allowed */ | |
ba940948 | 505 | |
850a88cc | 506 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), |
cf62089b WB |
507 | offsetof(struct __sk_buff, gso_size))) |
508 | return -EINVAL; | |
509 | ||
510 | /* gso_size is allowed */ | |
511 | ||
512 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), | |
b0b9395d SF |
513 | sizeof(struct __sk_buff))) |
514 | return -EINVAL; | |
515 | ||
6de6c1f8 | 516 | skb->mark = __skb->mark; |
b0b9395d | 517 | skb->priority = __skb->priority; |
ba940948 | 518 | skb->tstamp = __skb->tstamp; |
b0b9395d SF |
519 | memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); |
520 | ||
850a88cc SF |
521 | if (__skb->wire_len == 0) { |
522 | cb->pkt_len = skb->len; | |
523 | } else { | |
524 | if (__skb->wire_len < skb->len || | |
525 | __skb->wire_len > GSO_MAX_SIZE) | |
526 | return -EINVAL; | |
527 | cb->pkt_len = __skb->wire_len; | |
528 | } | |
529 | ||
530 | if (__skb->gso_segs > GSO_MAX_SEGS) | |
531 | return -EINVAL; | |
532 | skb_shinfo(skb)->gso_segs = __skb->gso_segs; | |
cf62089b | 533 | skb_shinfo(skb)->gso_size = __skb->gso_size; |
850a88cc | 534 | |
b0b9395d SF |
535 | return 0; |
536 | } | |
537 | ||
538 | static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
539 | { | |
540 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
541 | ||
542 | if (!__skb) | |
543 | return; | |
544 | ||
6de6c1f8 | 545 | __skb->mark = skb->mark; |
b0b9395d | 546 | __skb->priority = skb->priority; |
21594c44 | 547 | __skb->ifindex = skb->dev->ifindex; |
ba940948 | 548 | __skb->tstamp = skb->tstamp; |
b0b9395d | 549 | memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); |
850a88cc SF |
550 | __skb->wire_len = cb->pkt_len; |
551 | __skb->gso_segs = skb_shinfo(skb)->gso_segs; | |
b0b9395d SF |
552 | } |
553 | ||
1cf1cae9 AS |
554 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
555 | union bpf_attr __user *uattr) | |
556 | { | |
557 | bool is_l2 = false, is_direct_pkt_access = false; | |
21594c44 DY |
558 | struct net *net = current->nsproxy->net_ns; |
559 | struct net_device *dev = net->loopback_dev; | |
1cf1cae9 AS |
560 | u32 size = kattr->test.data_size_in; |
561 | u32 repeat = kattr->test.repeat; | |
b0b9395d | 562 | struct __sk_buff *ctx = NULL; |
1cf1cae9 | 563 | u32 retval, duration; |
6e6fddc7 | 564 | int hh_len = ETH_HLEN; |
1cf1cae9 | 565 | struct sk_buff *skb; |
2cb494a3 | 566 | struct sock *sk; |
1cf1cae9 AS |
567 | void *data; |
568 | int ret; | |
569 | ||
1b4d60ec SL |
570 | if (kattr->test.flags || kattr->test.cpu) |
571 | return -EINVAL; | |
572 | ||
586f8525 | 573 | data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN, |
1cf1cae9 AS |
574 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
575 | if (IS_ERR(data)) | |
576 | return PTR_ERR(data); | |
577 | ||
b0b9395d SF |
578 | ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); |
579 | if (IS_ERR(ctx)) { | |
580 | kfree(data); | |
581 | return PTR_ERR(ctx); | |
582 | } | |
583 | ||
1cf1cae9 AS |
584 | switch (prog->type) { |
585 | case BPF_PROG_TYPE_SCHED_CLS: | |
586 | case BPF_PROG_TYPE_SCHED_ACT: | |
587 | is_l2 = true; | |
df561f66 | 588 | fallthrough; |
1cf1cae9 AS |
589 | case BPF_PROG_TYPE_LWT_IN: |
590 | case BPF_PROG_TYPE_LWT_OUT: | |
591 | case BPF_PROG_TYPE_LWT_XMIT: | |
592 | is_direct_pkt_access = true; | |
593 | break; | |
594 | default: | |
595 | break; | |
596 | } | |
597 | ||
2cb494a3 SL |
598 | sk = kzalloc(sizeof(struct sock), GFP_USER); |
599 | if (!sk) { | |
600 | kfree(data); | |
b0b9395d | 601 | kfree(ctx); |
2cb494a3 SL |
602 | return -ENOMEM; |
603 | } | |
21594c44 | 604 | sock_net_set(sk, net); |
2cb494a3 SL |
605 | sock_init_data(NULL, sk); |
606 | ||
1cf1cae9 AS |
607 | skb = build_skb(data, 0); |
608 | if (!skb) { | |
609 | kfree(data); | |
b0b9395d | 610 | kfree(ctx); |
2cb494a3 | 611 | kfree(sk); |
1cf1cae9 AS |
612 | return -ENOMEM; |
613 | } | |
2cb494a3 | 614 | skb->sk = sk; |
1cf1cae9 | 615 | |
586f8525 | 616 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
1cf1cae9 | 617 | __skb_put(skb, size); |
21594c44 DY |
618 | if (ctx && ctx->ifindex > 1) { |
619 | dev = dev_get_by_index(net, ctx->ifindex); | |
620 | if (!dev) { | |
621 | ret = -ENODEV; | |
622 | goto out; | |
623 | } | |
624 | } | |
625 | skb->protocol = eth_type_trans(skb, dev); | |
1cf1cae9 AS |
626 | skb_reset_network_header(skb); |
627 | ||
fa5cb548 DY |
628 | switch (skb->protocol) { |
629 | case htons(ETH_P_IP): | |
630 | sk->sk_family = AF_INET; | |
631 | if (sizeof(struct iphdr) <= skb_headlen(skb)) { | |
632 | sk->sk_rcv_saddr = ip_hdr(skb)->saddr; | |
633 | sk->sk_daddr = ip_hdr(skb)->daddr; | |
634 | } | |
635 | break; | |
636 | #if IS_ENABLED(CONFIG_IPV6) | |
637 | case htons(ETH_P_IPV6): | |
638 | sk->sk_family = AF_INET6; | |
639 | if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { | |
640 | sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; | |
641 | sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; | |
642 | } | |
643 | break; | |
644 | #endif | |
645 | default: | |
646 | break; | |
647 | } | |
648 | ||
1cf1cae9 | 649 | if (is_l2) |
6e6fddc7 | 650 | __skb_push(skb, hh_len); |
1cf1cae9 | 651 | if (is_direct_pkt_access) |
6aaae2b6 | 652 | bpf_compute_data_pointers(skb); |
b0b9395d SF |
653 | ret = convert___skb_to_skb(skb, ctx); |
654 | if (ret) | |
655 | goto out; | |
f23c4b39 | 656 | ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); |
b0b9395d SF |
657 | if (ret) |
658 | goto out; | |
6e6fddc7 DB |
659 | if (!is_l2) { |
660 | if (skb_headroom(skb) < hh_len) { | |
661 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); | |
662 | ||
663 | if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { | |
b0b9395d SF |
664 | ret = -ENOMEM; |
665 | goto out; | |
6e6fddc7 DB |
666 | } |
667 | } | |
668 | memset(__skb_push(skb, hh_len), 0, hh_len); | |
669 | } | |
b0b9395d | 670 | convert_skb_to___skb(skb, ctx); |
6e6fddc7 | 671 | |
1cf1cae9 AS |
672 | size = skb->len; |
673 | /* bpf program can never convert linear skb to non-linear */ | |
674 | if (WARN_ON_ONCE(skb_is_nonlinear(skb))) | |
675 | size = skb_headlen(skb); | |
78e52272 | 676 | ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration); |
b0b9395d SF |
677 | if (!ret) |
678 | ret = bpf_ctx_finish(kattr, uattr, ctx, | |
679 | sizeof(struct __sk_buff)); | |
680 | out: | |
21594c44 DY |
681 | if (dev && dev != net->loopback_dev) |
682 | dev_put(dev); | |
1cf1cae9 | 683 | kfree_skb(skb); |
6ac99e8f | 684 | bpf_sk_storage_free(sk); |
2cb494a3 | 685 | kfree(sk); |
b0b9395d | 686 | kfree(ctx); |
1cf1cae9 AS |
687 | return ret; |
688 | } | |
689 | ||
690 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, | |
691 | union bpf_attr __user *uattr) | |
692 | { | |
bc56c919 JDB |
693 | u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
694 | u32 headroom = XDP_PACKET_HEADROOM; | |
1cf1cae9 AS |
695 | u32 size = kattr->test.data_size_in; |
696 | u32 repeat = kattr->test.repeat; | |
65073a67 | 697 | struct netdev_rx_queue *rxqueue; |
1cf1cae9 AS |
698 | struct xdp_buff xdp = {}; |
699 | u32 retval, duration; | |
bc56c919 | 700 | u32 max_data_sz; |
1cf1cae9 AS |
701 | void *data; |
702 | int ret; | |
703 | ||
5e21bb4e XZ |
704 | if (prog->expected_attach_type == BPF_XDP_DEVMAP || |
705 | prog->expected_attach_type == BPF_XDP_CPUMAP) | |
706 | return -EINVAL; | |
947e8b59 SF |
707 | if (kattr->test.ctx_in || kattr->test.ctx_out) |
708 | return -EINVAL; | |
709 | ||
bc56c919 JDB |
710 | /* XDP have extra tailroom as (most) drivers use full page */ |
711 | max_data_sz = 4096 - headroom - tailroom; | |
bc56c919 JDB |
712 | |
713 | data = bpf_test_init(kattr, max_data_sz, headroom, tailroom); | |
1cf1cae9 AS |
714 | if (IS_ERR(data)) |
715 | return PTR_ERR(data); | |
716 | ||
65073a67 | 717 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); |
43b5169d LB |
718 | xdp_init_buff(&xdp, headroom + max_data_sz + tailroom, |
719 | &rxqueue->xdp_rxq); | |
be9df4af LB |
720 | xdp_prepare_buff(&xdp, data, headroom, size, true); |
721 | ||
f23c4b39 BT |
722 | bpf_prog_change_xdp(NULL, prog); |
723 | ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); | |
dcb40590 RG |
724 | if (ret) |
725 | goto out; | |
bc56c919 | 726 | if (xdp.data != data + headroom || xdp.data_end != xdp.data + size) |
1cf1cae9 | 727 | size = xdp.data_end - xdp.data; |
78e52272 | 728 | ret = bpf_test_finish(kattr, uattr, xdp.data, size, retval, duration); |
dcb40590 | 729 | out: |
f23c4b39 | 730 | bpf_prog_change_xdp(prog, NULL); |
1cf1cae9 AS |
731 | kfree(data); |
732 | return ret; | |
733 | } | |
b7a1848e | 734 | |
b2ca4e1c SF |
735 | static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) |
736 | { | |
737 | /* make sure the fields we don't use are zeroed */ | |
738 | if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) | |
739 | return -EINVAL; | |
740 | ||
741 | /* flags is allowed */ | |
742 | ||
b590cb5f | 743 | if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), |
b2ca4e1c SF |
744 | sizeof(struct bpf_flow_keys))) |
745 | return -EINVAL; | |
746 | ||
747 | return 0; | |
748 | } | |
749 | ||
b7a1848e SF |
750 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
751 | const union bpf_attr *kattr, | |
752 | union bpf_attr __user *uattr) | |
753 | { | |
607b9cc9 | 754 | struct bpf_test_timer t = { NO_PREEMPT }; |
b7a1848e | 755 | u32 size = kattr->test.data_size_in; |
7b8a1304 | 756 | struct bpf_flow_dissector ctx = {}; |
b7a1848e | 757 | u32 repeat = kattr->test.repeat; |
b2ca4e1c | 758 | struct bpf_flow_keys *user_ctx; |
b7a1848e | 759 | struct bpf_flow_keys flow_keys; |
7b8a1304 | 760 | const struct ethhdr *eth; |
b2ca4e1c | 761 | unsigned int flags = 0; |
b7a1848e | 762 | u32 retval, duration; |
b7a1848e SF |
763 | void *data; |
764 | int ret; | |
b7a1848e SF |
765 | |
766 | if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) | |
767 | return -EINVAL; | |
768 | ||
1b4d60ec SL |
769 | if (kattr->test.flags || kattr->test.cpu) |
770 | return -EINVAL; | |
771 | ||
7b8a1304 SF |
772 | if (size < ETH_HLEN) |
773 | return -EINVAL; | |
774 | ||
775 | data = bpf_test_init(kattr, size, 0, 0); | |
b7a1848e SF |
776 | if (IS_ERR(data)) |
777 | return PTR_ERR(data); | |
778 | ||
7b8a1304 | 779 | eth = (struct ethhdr *)data; |
b7a1848e | 780 | |
b7a1848e SF |
781 | if (!repeat) |
782 | repeat = 1; | |
783 | ||
b2ca4e1c SF |
784 | user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); |
785 | if (IS_ERR(user_ctx)) { | |
786 | kfree(data); | |
787 | return PTR_ERR(user_ctx); | |
788 | } | |
789 | if (user_ctx) { | |
790 | ret = verify_user_bpf_flow_keys(user_ctx); | |
791 | if (ret) | |
792 | goto out; | |
793 | flags = user_ctx->flags; | |
794 | } | |
795 | ||
7b8a1304 SF |
796 | ctx.flow_keys = &flow_keys; |
797 | ctx.data = data; | |
798 | ctx.data_end = (__u8 *)data + size; | |
799 | ||
607b9cc9 LB |
800 | bpf_test_timer_enter(&t); |
801 | do { | |
7b8a1304 | 802 | retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, |
b2ca4e1c | 803 | size, flags); |
607b9cc9 LB |
804 | } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); |
805 | bpf_test_timer_leave(&t); | |
7b8a1304 | 806 | |
607b9cc9 LB |
807 | if (ret < 0) |
808 | goto out; | |
b7a1848e SF |
809 | |
810 | ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys), | |
811 | retval, duration); | |
b2ca4e1c SF |
812 | if (!ret) |
813 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, | |
814 | sizeof(struct bpf_flow_keys)); | |
b7a1848e | 815 | |
a439184d | 816 | out: |
b2ca4e1c | 817 | kfree(user_ctx); |
7b8a1304 | 818 | kfree(data); |
b7a1848e SF |
819 | return ret; |
820 | } | |
7c32e8f8 LB |
821 | |
822 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, | |
823 | union bpf_attr __user *uattr) | |
824 | { | |
825 | struct bpf_test_timer t = { NO_PREEMPT }; | |
826 | struct bpf_prog_array *progs = NULL; | |
827 | struct bpf_sk_lookup_kern ctx = {}; | |
828 | u32 repeat = kattr->test.repeat; | |
829 | struct bpf_sk_lookup *user_ctx; | |
830 | u32 retval, duration; | |
831 | int ret = -EINVAL; | |
832 | ||
833 | if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) | |
834 | return -EINVAL; | |
835 | ||
836 | if (kattr->test.flags || kattr->test.cpu) | |
837 | return -EINVAL; | |
838 | ||
839 | if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || | |
840 | kattr->test.data_size_out) | |
841 | return -EINVAL; | |
842 | ||
843 | if (!repeat) | |
844 | repeat = 1; | |
845 | ||
846 | user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); | |
847 | if (IS_ERR(user_ctx)) | |
848 | return PTR_ERR(user_ctx); | |
849 | ||
850 | if (!user_ctx) | |
851 | return -EINVAL; | |
852 | ||
853 | if (user_ctx->sk) | |
854 | goto out; | |
855 | ||
856 | if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) | |
857 | goto out; | |
858 | ||
859 | if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) { | |
860 | ret = -ERANGE; | |
861 | goto out; | |
862 | } | |
863 | ||
864 | ctx.family = (u16)user_ctx->family; | |
865 | ctx.protocol = (u16)user_ctx->protocol; | |
866 | ctx.dport = (u16)user_ctx->local_port; | |
867 | ctx.sport = (__force __be16)user_ctx->remote_port; | |
868 | ||
869 | switch (ctx.family) { | |
870 | case AF_INET: | |
871 | ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; | |
872 | ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; | |
873 | break; | |
874 | ||
875 | #if IS_ENABLED(CONFIG_IPV6) | |
876 | case AF_INET6: | |
877 | ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; | |
878 | ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; | |
879 | break; | |
880 | #endif | |
881 | ||
882 | default: | |
883 | ret = -EAFNOSUPPORT; | |
884 | goto out; | |
885 | } | |
886 | ||
887 | progs = bpf_prog_array_alloc(1, GFP_KERNEL); | |
888 | if (!progs) { | |
889 | ret = -ENOMEM; | |
890 | goto out; | |
891 | } | |
892 | ||
893 | progs->items[0].prog = prog; | |
894 | ||
895 | bpf_test_timer_enter(&t); | |
896 | do { | |
897 | ctx.selected_sk = NULL; | |
898 | retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, BPF_PROG_RUN); | |
899 | } while (bpf_test_timer_continue(&t, repeat, &ret, &duration)); | |
900 | bpf_test_timer_leave(&t); | |
901 | ||
902 | if (ret < 0) | |
903 | goto out; | |
904 | ||
905 | user_ctx->cookie = 0; | |
906 | if (ctx.selected_sk) { | |
907 | if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { | |
908 | ret = -EOPNOTSUPP; | |
909 | goto out; | |
910 | } | |
911 | ||
912 | user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); | |
913 | } | |
914 | ||
915 | ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration); | |
916 | if (!ret) | |
917 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); | |
918 | ||
919 | out: | |
920 | bpf_prog_array_free(progs); | |
921 | kfree(user_ctx); | |
922 | return ret; | |
923 | } | |
79a7f8bd AS |
924 | |
925 | int bpf_prog_test_run_syscall(struct bpf_prog *prog, | |
926 | const union bpf_attr *kattr, | |
927 | union bpf_attr __user *uattr) | |
928 | { | |
929 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
930 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
931 | void *ctx = NULL; | |
932 | u32 retval; | |
933 | int err = 0; | |
934 | ||
935 | /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ | |
936 | if (kattr->test.data_in || kattr->test.data_out || | |
937 | kattr->test.ctx_out || kattr->test.duration || | |
938 | kattr->test.repeat || kattr->test.flags) | |
939 | return -EINVAL; | |
940 | ||
941 | if (ctx_size_in < prog->aux->max_ctx_offset || | |
942 | ctx_size_in > U16_MAX) | |
943 | return -EINVAL; | |
944 | ||
945 | if (ctx_size_in) { | |
946 | ctx = kzalloc(ctx_size_in, GFP_USER); | |
947 | if (!ctx) | |
948 | return -ENOMEM; | |
949 | if (copy_from_user(ctx, ctx_in, ctx_size_in)) { | |
950 | err = -EFAULT; | |
951 | goto out; | |
952 | } | |
953 | } | |
954 | retval = bpf_prog_run_pin_on_cpu(prog, ctx); | |
955 | ||
956 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { | |
957 | err = -EFAULT; | |
958 | goto out; | |
959 | } | |
960 | if (ctx_size_in) | |
961 | if (copy_to_user(ctx_in, ctx, ctx_size_in)) | |
962 | err = -EFAULT; | |
963 | out: | |
964 | kfree(ctx); | |
965 | return err; | |
966 | } |