]>
Commit | Line | Data |
---|---|---|
25763b3c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1cf1cae9 | 2 | /* Copyright (c) 2017 Facebook |
1cf1cae9 AS |
3 | */ |
4 | #include <linux/bpf.h> | |
c48e51c8 | 5 | #include <linux/btf.h> |
7bd1590d | 6 | #include <linux/btf_ids.h> |
1cf1cae9 | 7 | #include <linux/slab.h> |
b202d844 | 8 | #include <linux/init.h> |
1cf1cae9 AS |
9 | #include <linux/vmalloc.h> |
10 | #include <linux/etherdevice.h> | |
11 | #include <linux/filter.h> | |
87b7b533 | 12 | #include <linux/rcupdate_trace.h> |
1cf1cae9 | 13 | #include <linux/sched/signal.h> |
6ac99e8f | 14 | #include <net/bpf_sk_storage.h> |
2cb494a3 SL |
15 | #include <net/sock.h> |
16 | #include <net/tcp.h> | |
7c32e8f8 | 17 | #include <net/net_namespace.h> |
b530e9e1 | 18 | #include <net/page_pool.h> |
3d08b6f2 | 19 | #include <linux/error-injection.h> |
1b4d60ec | 20 | #include <linux/smp.h> |
7c32e8f8 | 21 | #include <linux/sock_diag.h> |
47316f4a | 22 | #include <net/xdp.h> |
1cf1cae9 | 23 | |
e950e843 MM |
24 | #define CREATE_TRACE_POINTS |
25 | #include <trace/events/bpf_test_run.h> | |
26 | ||
607b9cc9 LB |
27 | struct bpf_test_timer { |
28 | enum { NO_PREEMPT, NO_MIGRATE } mode; | |
29 | u32 i; | |
30 | u64 time_start, time_spent; | |
31 | }; | |
32 | ||
33 | static void bpf_test_timer_enter(struct bpf_test_timer *t) | |
34 | __acquires(rcu) | |
35 | { | |
36 | rcu_read_lock(); | |
37 | if (t->mode == NO_PREEMPT) | |
38 | preempt_disable(); | |
39 | else | |
40 | migrate_disable(); | |
41 | ||
42 | t->time_start = ktime_get_ns(); | |
43 | } | |
44 | ||
45 | static void bpf_test_timer_leave(struct bpf_test_timer *t) | |
46 | __releases(rcu) | |
47 | { | |
48 | t->time_start = 0; | |
49 | ||
50 | if (t->mode == NO_PREEMPT) | |
51 | preempt_enable(); | |
52 | else | |
53 | migrate_enable(); | |
54 | rcu_read_unlock(); | |
55 | } | |
56 | ||
b530e9e1 THJ |
57 | static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, |
58 | u32 repeat, int *err, u32 *duration) | |
607b9cc9 LB |
59 | __must_hold(rcu) |
60 | { | |
b530e9e1 | 61 | t->i += iterations; |
607b9cc9 LB |
62 | if (t->i >= repeat) { |
63 | /* We're done. */ | |
64 | t->time_spent += ktime_get_ns() - t->time_start; | |
65 | do_div(t->time_spent, t->i); | |
66 | *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; | |
67 | *err = 0; | |
68 | goto reset; | |
69 | } | |
70 | ||
71 | if (signal_pending(current)) { | |
72 | /* During iteration: we've been cancelled, abort. */ | |
73 | *err = -EINTR; | |
74 | goto reset; | |
75 | } | |
76 | ||
77 | if (need_resched()) { | |
78 | /* During iteration: we need to reschedule between runs. */ | |
79 | t->time_spent += ktime_get_ns() - t->time_start; | |
80 | bpf_test_timer_leave(t); | |
81 | cond_resched(); | |
82 | bpf_test_timer_enter(t); | |
83 | } | |
84 | ||
85 | /* Do another round. */ | |
86 | return true; | |
87 | ||
88 | reset: | |
89 | t->i = 0; | |
90 | return false; | |
91 | } | |
92 | ||
b530e9e1 THJ |
93 | /* We put this struct at the head of each page with a context and frame |
94 | * initialised when the page is allocated, so we don't have to do this on each | |
95 | * repetition of the test run. | |
96 | */ | |
97 | struct xdp_page_head { | |
98 | struct xdp_buff orig_ctx; | |
99 | struct xdp_buff ctx; | |
100 | struct xdp_frame frm; | |
101 | u8 data[]; | |
102 | }; | |
103 | ||
104 | struct xdp_test_data { | |
105 | struct xdp_buff *orig_ctx; | |
106 | struct xdp_rxq_info rxq; | |
107 | struct net_device *dev; | |
108 | struct page_pool *pp; | |
109 | struct xdp_frame **frames; | |
110 | struct sk_buff **skbs; | |
111 | u32 batch_size; | |
112 | u32 frame_cnt; | |
113 | }; | |
114 | ||
b6f1f780 | 115 | #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) |
b530e9e1 THJ |
116 | #define TEST_XDP_MAX_BATCH 256 |
117 | ||
118 | static void xdp_test_run_init_page(struct page *page, void *arg) | |
119 | { | |
120 | struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); | |
121 | struct xdp_buff *new_ctx, *orig_ctx; | |
122 | u32 headroom = XDP_PACKET_HEADROOM; | |
123 | struct xdp_test_data *xdp = arg; | |
124 | size_t frm_len, meta_len; | |
125 | struct xdp_frame *frm; | |
126 | void *data; | |
127 | ||
128 | orig_ctx = xdp->orig_ctx; | |
129 | frm_len = orig_ctx->data_end - orig_ctx->data_meta; | |
130 | meta_len = orig_ctx->data - orig_ctx->data_meta; | |
131 | headroom -= meta_len; | |
132 | ||
133 | new_ctx = &head->ctx; | |
134 | frm = &head->frm; | |
135 | data = &head->data; | |
136 | memcpy(data + headroom, orig_ctx->data_meta, frm_len); | |
137 | ||
138 | xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); | |
139 | xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); | |
140 | new_ctx->data = new_ctx->data_meta + meta_len; | |
141 | ||
142 | xdp_update_frame_from_buff(new_ctx, frm); | |
143 | frm->mem = new_ctx->rxq->mem; | |
144 | ||
145 | memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); | |
146 | } | |
147 | ||
148 | static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) | |
149 | { | |
150 | struct xdp_mem_info mem = {}; | |
151 | struct page_pool *pp; | |
152 | int err = -ENOMEM; | |
153 | struct page_pool_params pp_params = { | |
154 | .order = 0, | |
155 | .flags = 0, | |
156 | .pool_size = xdp->batch_size, | |
157 | .nid = NUMA_NO_NODE, | |
b530e9e1 THJ |
158 | .init_callback = xdp_test_run_init_page, |
159 | .init_arg = xdp, | |
160 | }; | |
161 | ||
162 | xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); | |
163 | if (!xdp->frames) | |
164 | return -ENOMEM; | |
165 | ||
166 | xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); | |
167 | if (!xdp->skbs) | |
168 | goto err_skbs; | |
169 | ||
170 | pp = page_pool_create(&pp_params); | |
171 | if (IS_ERR(pp)) { | |
172 | err = PTR_ERR(pp); | |
173 | goto err_pp; | |
174 | } | |
175 | ||
176 | /* will copy 'mem.id' into pp->xdp_mem_id */ | |
177 | err = xdp_reg_mem_model(&mem, MEM_TYPE_PAGE_POOL, pp); | |
178 | if (err) | |
179 | goto err_mmodel; | |
180 | ||
181 | xdp->pp = pp; | |
182 | ||
183 | /* We create a 'fake' RXQ referencing the original dev, but with an | |
184 | * xdp_mem_info pointing to our page_pool | |
185 | */ | |
186 | xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); | |
187 | xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; | |
188 | xdp->rxq.mem.id = pp->xdp_mem_id; | |
189 | xdp->dev = orig_ctx->rxq->dev; | |
190 | xdp->orig_ctx = orig_ctx; | |
191 | ||
192 | return 0; | |
193 | ||
194 | err_mmodel: | |
195 | page_pool_destroy(pp); | |
196 | err_pp: | |
743bec1b | 197 | kvfree(xdp->skbs); |
b530e9e1 | 198 | err_skbs: |
743bec1b | 199 | kvfree(xdp->frames); |
b530e9e1 THJ |
200 | return err; |
201 | } | |
202 | ||
203 | static void xdp_test_run_teardown(struct xdp_test_data *xdp) | |
204 | { | |
205 | page_pool_destroy(xdp->pp); | |
206 | kfree(xdp->frames); | |
207 | kfree(xdp->skbs); | |
208 | } | |
209 | ||
210 | static bool ctx_was_changed(struct xdp_page_head *head) | |
211 | { | |
212 | return head->orig_ctx.data != head->ctx.data || | |
213 | head->orig_ctx.data_meta != head->ctx.data_meta || | |
214 | head->orig_ctx.data_end != head->ctx.data_end; | |
215 | } | |
216 | ||
217 | static void reset_ctx(struct xdp_page_head *head) | |
218 | { | |
219 | if (likely(!ctx_was_changed(head))) | |
220 | return; | |
221 | ||
222 | head->ctx.data = head->orig_ctx.data; | |
223 | head->ctx.data_meta = head->orig_ctx.data_meta; | |
224 | head->ctx.data_end = head->orig_ctx.data_end; | |
225 | xdp_update_frame_from_buff(&head->ctx, &head->frm); | |
226 | } | |
227 | ||
228 | static int xdp_recv_frames(struct xdp_frame **frames, int nframes, | |
229 | struct sk_buff **skbs, | |
230 | struct net_device *dev) | |
231 | { | |
232 | gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; | |
233 | int i, n; | |
234 | LIST_HEAD(list); | |
235 | ||
236 | n = kmem_cache_alloc_bulk(skbuff_head_cache, gfp, nframes, (void **)skbs); | |
237 | if (unlikely(n == 0)) { | |
238 | for (i = 0; i < nframes; i++) | |
239 | xdp_return_frame(frames[i]); | |
240 | return -ENOMEM; | |
241 | } | |
242 | ||
243 | for (i = 0; i < nframes; i++) { | |
244 | struct xdp_frame *xdpf = frames[i]; | |
245 | struct sk_buff *skb = skbs[i]; | |
246 | ||
247 | skb = __xdp_build_skb_from_frame(xdpf, skb, dev); | |
248 | if (!skb) { | |
249 | xdp_return_frame(xdpf); | |
250 | continue; | |
251 | } | |
252 | ||
253 | list_add_tail(&skb->list, &list); | |
254 | } | |
255 | netif_receive_skb_list(&list); | |
256 | ||
257 | return 0; | |
258 | } | |
259 | ||
260 | static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, | |
261 | u32 repeat) | |
262 | { | |
263 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
264 | int err = 0, act, ret, i, nframes = 0, batch_sz; | |
265 | struct xdp_frame **frames = xdp->frames; | |
266 | struct xdp_page_head *head; | |
267 | struct xdp_frame *frm; | |
268 | bool redirect = false; | |
269 | struct xdp_buff *ctx; | |
270 | struct page *page; | |
271 | ||
272 | batch_sz = min_t(u32, repeat, xdp->batch_size); | |
273 | ||
274 | local_bh_disable(); | |
275 | xdp_set_return_frame_no_direct(); | |
276 | ||
277 | for (i = 0; i < batch_sz; i++) { | |
278 | page = page_pool_dev_alloc_pages(xdp->pp); | |
279 | if (!page) { | |
280 | err = -ENOMEM; | |
281 | goto out; | |
282 | } | |
283 | ||
284 | head = phys_to_virt(page_to_phys(page)); | |
285 | reset_ctx(head); | |
286 | ctx = &head->ctx; | |
287 | frm = &head->frm; | |
288 | xdp->frame_cnt++; | |
289 | ||
290 | act = bpf_prog_run_xdp(prog, ctx); | |
291 | ||
292 | /* if program changed pkt bounds we need to update the xdp_frame */ | |
293 | if (unlikely(ctx_was_changed(head))) { | |
294 | ret = xdp_update_frame_from_buff(ctx, frm); | |
295 | if (ret) { | |
296 | xdp_return_buff(ctx); | |
297 | continue; | |
298 | } | |
299 | } | |
300 | ||
301 | switch (act) { | |
302 | case XDP_TX: | |
303 | /* we can't do a real XDP_TX since we're not in the | |
304 | * driver, so turn it into a REDIRECT back to the same | |
305 | * index | |
306 | */ | |
307 | ri->tgt_index = xdp->dev->ifindex; | |
308 | ri->map_id = INT_MAX; | |
309 | ri->map_type = BPF_MAP_TYPE_UNSPEC; | |
310 | fallthrough; | |
311 | case XDP_REDIRECT: | |
312 | redirect = true; | |
313 | ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); | |
314 | if (ret) | |
315 | xdp_return_buff(ctx); | |
316 | break; | |
317 | case XDP_PASS: | |
318 | frames[nframes++] = frm; | |
319 | break; | |
320 | default: | |
321 | bpf_warn_invalid_xdp_action(NULL, prog, act); | |
322 | fallthrough; | |
323 | case XDP_DROP: | |
324 | xdp_return_buff(ctx); | |
325 | break; | |
326 | } | |
327 | } | |
328 | ||
329 | out: | |
330 | if (redirect) | |
331 | xdp_do_flush(); | |
332 | if (nframes) { | |
333 | ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); | |
334 | if (ret) | |
335 | err = ret; | |
336 | } | |
337 | ||
338 | xdp_clear_return_frame_no_direct(); | |
339 | local_bh_enable(); | |
340 | return err; | |
341 | } | |
342 | ||
343 | static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, | |
344 | u32 repeat, u32 batch_size, u32 *time) | |
345 | ||
346 | { | |
347 | struct xdp_test_data xdp = { .batch_size = batch_size }; | |
348 | struct bpf_test_timer t = { .mode = NO_MIGRATE }; | |
349 | int ret; | |
350 | ||
351 | if (!repeat) | |
352 | repeat = 1; | |
353 | ||
354 | ret = xdp_test_run_setup(&xdp, ctx); | |
355 | if (ret) | |
356 | return ret; | |
357 | ||
358 | bpf_test_timer_enter(&t); | |
359 | do { | |
360 | xdp.frame_cnt = 0; | |
361 | ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); | |
362 | if (unlikely(ret < 0)) | |
363 | break; | |
364 | } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); | |
365 | bpf_test_timer_leave(&t); | |
366 | ||
367 | xdp_test_run_teardown(&xdp); | |
368 | return ret; | |
369 | } | |
370 | ||
df1a2cb7 | 371 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, |
f23c4b39 | 372 | u32 *retval, u32 *time, bool xdp) |
1cf1cae9 | 373 | { |
c7603cfa AN |
374 | struct bpf_prog_array_item item = {.prog = prog}; |
375 | struct bpf_run_ctx *old_ctx; | |
376 | struct bpf_cg_run_ctx run_ctx; | |
607b9cc9 | 377 | struct bpf_test_timer t = { NO_MIGRATE }; |
8bad74f9 | 378 | enum bpf_cgroup_storage_type stype; |
607b9cc9 | 379 | int ret; |
1cf1cae9 | 380 | |
8bad74f9 | 381 | for_each_cgroup_storage_type(stype) { |
c7603cfa AN |
382 | item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); |
383 | if (IS_ERR(item.cgroup_storage[stype])) { | |
384 | item.cgroup_storage[stype] = NULL; | |
8bad74f9 | 385 | for_each_cgroup_storage_type(stype) |
c7603cfa | 386 | bpf_cgroup_storage_free(item.cgroup_storage[stype]); |
8bad74f9 RG |
387 | return -ENOMEM; |
388 | } | |
389 | } | |
f42ee093 | 390 | |
1cf1cae9 AS |
391 | if (!repeat) |
392 | repeat = 1; | |
df1a2cb7 | 393 | |
607b9cc9 | 394 | bpf_test_timer_enter(&t); |
c7603cfa | 395 | old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
607b9cc9 | 396 | do { |
c7603cfa | 397 | run_ctx.prog_item = &item; |
f23c4b39 BT |
398 | if (xdp) |
399 | *retval = bpf_prog_run_xdp(prog, ctx); | |
400 | else | |
fb7dd8bc | 401 | *retval = bpf_prog_run(prog, ctx); |
b530e9e1 | 402 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); |
c7603cfa | 403 | bpf_reset_run_ctx(old_ctx); |
607b9cc9 | 404 | bpf_test_timer_leave(&t); |
1cf1cae9 | 405 | |
8bad74f9 | 406 | for_each_cgroup_storage_type(stype) |
c7603cfa | 407 | bpf_cgroup_storage_free(item.cgroup_storage[stype]); |
f42ee093 | 408 | |
df1a2cb7 | 409 | return ret; |
1cf1cae9 AS |
410 | } |
411 | ||
78e52272 DM |
412 | static int bpf_test_finish(const union bpf_attr *kattr, |
413 | union bpf_attr __user *uattr, const void *data, | |
7855e0db LB |
414 | struct skb_shared_info *sinfo, u32 size, |
415 | u32 retval, u32 duration) | |
1cf1cae9 | 416 | { |
78e52272 | 417 | void __user *data_out = u64_to_user_ptr(kattr->test.data_out); |
1cf1cae9 | 418 | int err = -EFAULT; |
b5a36b1e | 419 | u32 copy_size = size; |
1cf1cae9 | 420 | |
b5a36b1e LB |
421 | /* Clamp copy if the user has provided a size hint, but copy the full |
422 | * buffer if not to retain old behaviour. | |
423 | */ | |
424 | if (kattr->test.data_size_out && | |
425 | copy_size > kattr->test.data_size_out) { | |
426 | copy_size = kattr->test.data_size_out; | |
427 | err = -ENOSPC; | |
428 | } | |
429 | ||
7855e0db LB |
430 | if (data_out) { |
431 | int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; | |
432 | ||
530e214c SF |
433 | if (len < 0) { |
434 | err = -ENOSPC; | |
435 | goto out; | |
436 | } | |
437 | ||
7855e0db LB |
438 | if (copy_to_user(data_out, data, len)) |
439 | goto out; | |
440 | ||
441 | if (sinfo) { | |
5d1e9f43 SF |
442 | int i, offset = len; |
443 | u32 data_len; | |
7855e0db LB |
444 | |
445 | for (i = 0; i < sinfo->nr_frags; i++) { | |
446 | skb_frag_t *frag = &sinfo->frags[i]; | |
447 | ||
448 | if (offset >= copy_size) { | |
449 | err = -ENOSPC; | |
450 | break; | |
451 | } | |
452 | ||
5d1e9f43 | 453 | data_len = min_t(u32, copy_size - offset, |
7855e0db LB |
454 | skb_frag_size(frag)); |
455 | ||
456 | if (copy_to_user(data_out + offset, | |
457 | skb_frag_address(frag), | |
458 | data_len)) | |
459 | goto out; | |
460 | ||
461 | offset += data_len; | |
462 | } | |
463 | } | |
464 | } | |
465 | ||
1cf1cae9 AS |
466 | if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) |
467 | goto out; | |
468 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
469 | goto out; | |
470 | if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) | |
471 | goto out; | |
b5a36b1e LB |
472 | if (err != -ENOSPC) |
473 | err = 0; | |
1cf1cae9 | 474 | out: |
e950e843 | 475 | trace_bpf_test_finish(&err); |
1cf1cae9 AS |
476 | return err; |
477 | } | |
478 | ||
faeb2dce AS |
479 | /* Integer types of various sizes and pointer combinations cover variety of |
480 | * architecture dependent calling conventions. 7+ can be supported in the | |
481 | * future. | |
482 | */ | |
e9ff9d52 | 483 | __diag_push(); |
0b206c6d KKD |
484 | __diag_ignore_all("-Wmissing-prototypes", |
485 | "Global functions as their definitions will be in vmlinux BTF"); | |
faeb2dce AS |
486 | int noinline bpf_fentry_test1(int a) |
487 | { | |
488 | return a + 1; | |
489 | } | |
46565696 KKD |
490 | EXPORT_SYMBOL_GPL(bpf_fentry_test1); |
491 | ALLOW_ERROR_INJECTION(bpf_fentry_test1, ERRNO); | |
faeb2dce AS |
492 | |
493 | int noinline bpf_fentry_test2(int a, u64 b) | |
494 | { | |
495 | return a + b; | |
496 | } | |
497 | ||
498 | int noinline bpf_fentry_test3(char a, int b, u64 c) | |
499 | { | |
500 | return a + b + c; | |
501 | } | |
502 | ||
503 | int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) | |
504 | { | |
505 | return (long)a + b + c + d; | |
506 | } | |
507 | ||
508 | int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) | |
509 | { | |
510 | return a + (long)b + c + d + e; | |
511 | } | |
512 | ||
513 | int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) | |
514 | { | |
515 | return a + (long)b + c + d + (long)e + f; | |
516 | } | |
517 | ||
d923021c YS |
518 | struct bpf_fentry_test_t { |
519 | struct bpf_fentry_test_t *a; | |
520 | }; | |
521 | ||
522 | int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) | |
523 | { | |
524 | return (long)arg; | |
525 | } | |
526 | ||
527 | int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) | |
528 | { | |
529 | return (long)arg->a; | |
530 | } | |
531 | ||
3d08b6f2 KS |
532 | int noinline bpf_modify_return_test(int a, int *b) |
533 | { | |
534 | *b += 1; | |
535 | return a + *b; | |
536 | } | |
7bd1590d MKL |
537 | |
538 | u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) | |
539 | { | |
540 | return a + b + c + d; | |
541 | } | |
542 | ||
543 | int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) | |
544 | { | |
545 | return a + b; | |
546 | } | |
547 | ||
548 | struct sock * noinline bpf_kfunc_call_test3(struct sock *sk) | |
549 | { | |
550 | return sk; | |
551 | } | |
552 | ||
8218ccb5 KKD |
553 | struct prog_test_member { |
554 | u64 c; | |
555 | }; | |
556 | ||
c1ff181f KKD |
557 | struct prog_test_ref_kfunc { |
558 | int a; | |
559 | int b; | |
8218ccb5 | 560 | struct prog_test_member memb; |
c1ff181f KKD |
561 | struct prog_test_ref_kfunc *next; |
562 | }; | |
563 | ||
564 | static struct prog_test_ref_kfunc prog_test_struct = { | |
565 | .a = 42, | |
566 | .b = 108, | |
567 | .next = &prog_test_struct, | |
568 | }; | |
569 | ||
570 | noinline struct prog_test_ref_kfunc * | |
571 | bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) | |
572 | { | |
573 | /* randomly return NULL */ | |
574 | if (get_jiffies_64() % 2) | |
575 | return NULL; | |
576 | return &prog_test_struct; | |
577 | } | |
578 | ||
579 | noinline void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) | |
580 | { | |
581 | } | |
582 | ||
8218ccb5 KKD |
583 | noinline void bpf_kfunc_call_memb_release(struct prog_test_member *p) |
584 | { | |
585 | } | |
586 | ||
c1ff181f KKD |
587 | struct prog_test_pass1 { |
588 | int x0; | |
589 | struct { | |
590 | int x1; | |
591 | struct { | |
592 | int x2; | |
593 | struct { | |
594 | int x3; | |
595 | }; | |
596 | }; | |
597 | }; | |
598 | }; | |
599 | ||
600 | struct prog_test_pass2 { | |
601 | int len; | |
602 | short arr1[4]; | |
603 | struct { | |
604 | char arr2[4]; | |
605 | unsigned long arr3[8]; | |
606 | } x; | |
607 | }; | |
608 | ||
609 | struct prog_test_fail1 { | |
610 | void *p; | |
611 | int x; | |
612 | }; | |
613 | ||
614 | struct prog_test_fail2 { | |
615 | int x8; | |
616 | struct prog_test_pass1 x; | |
617 | }; | |
618 | ||
619 | struct prog_test_fail3 { | |
620 | int len; | |
621 | char arr1[2]; | |
ed8bb032 | 622 | char arr2[]; |
c1ff181f KKD |
623 | }; |
624 | ||
625 | noinline void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) | |
626 | { | |
627 | } | |
628 | ||
629 | noinline void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) | |
630 | { | |
631 | } | |
632 | ||
633 | noinline void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) | |
634 | { | |
635 | } | |
636 | ||
637 | noinline void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) | |
638 | { | |
639 | } | |
640 | ||
641 | noinline void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) | |
642 | { | |
643 | } | |
644 | ||
645 | noinline void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) | |
646 | { | |
647 | } | |
648 | ||
649 | noinline void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) | |
650 | { | |
651 | } | |
652 | ||
653 | noinline void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) | |
654 | { | |
655 | } | |
656 | ||
657 | noinline void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) | |
658 | { | |
659 | } | |
660 | ||
e9ff9d52 | 661 | __diag_pop(); |
3d08b6f2 KS |
662 | |
663 | ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO); | |
664 | ||
b202d844 | 665 | BTF_SET_START(test_sk_check_kfunc_ids) |
7bd1590d MKL |
666 | BTF_ID(func, bpf_kfunc_call_test1) |
667 | BTF_ID(func, bpf_kfunc_call_test2) | |
668 | BTF_ID(func, bpf_kfunc_call_test3) | |
c1ff181f KKD |
669 | BTF_ID(func, bpf_kfunc_call_test_acquire) |
670 | BTF_ID(func, bpf_kfunc_call_test_release) | |
8218ccb5 | 671 | BTF_ID(func, bpf_kfunc_call_memb_release) |
c1ff181f KKD |
672 | BTF_ID(func, bpf_kfunc_call_test_pass_ctx) |
673 | BTF_ID(func, bpf_kfunc_call_test_pass1) | |
674 | BTF_ID(func, bpf_kfunc_call_test_pass2) | |
675 | BTF_ID(func, bpf_kfunc_call_test_fail1) | |
676 | BTF_ID(func, bpf_kfunc_call_test_fail2) | |
677 | BTF_ID(func, bpf_kfunc_call_test_fail3) | |
678 | BTF_ID(func, bpf_kfunc_call_test_mem_len_pass1) | |
679 | BTF_ID(func, bpf_kfunc_call_test_mem_len_fail1) | |
680 | BTF_ID(func, bpf_kfunc_call_test_mem_len_fail2) | |
b202d844 | 681 | BTF_SET_END(test_sk_check_kfunc_ids) |
7bd1590d | 682 | |
c1ff181f KKD |
683 | BTF_SET_START(test_sk_acquire_kfunc_ids) |
684 | BTF_ID(func, bpf_kfunc_call_test_acquire) | |
685 | BTF_SET_END(test_sk_acquire_kfunc_ids) | |
686 | ||
687 | BTF_SET_START(test_sk_release_kfunc_ids) | |
688 | BTF_ID(func, bpf_kfunc_call_test_release) | |
8218ccb5 | 689 | BTF_ID(func, bpf_kfunc_call_memb_release) |
c1ff181f KKD |
690 | BTF_SET_END(test_sk_release_kfunc_ids) |
691 | ||
692 | BTF_SET_START(test_sk_ret_null_kfunc_ids) | |
693 | BTF_ID(func, bpf_kfunc_call_test_acquire) | |
694 | BTF_SET_END(test_sk_ret_null_kfunc_ids) | |
695 | ||
be3d72a2 LB |
696 | static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, |
697 | u32 size, u32 headroom, u32 tailroom) | |
1cf1cae9 AS |
698 | { |
699 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); | |
700 | void *data; | |
701 | ||
702 | if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) | |
703 | return ERR_PTR(-EINVAL); | |
704 | ||
d800bad6 JDB |
705 | if (user_size > size) |
706 | return ERR_PTR(-EMSGSIZE); | |
707 | ||
1cf1cae9 AS |
708 | data = kzalloc(size + headroom + tailroom, GFP_USER); |
709 | if (!data) | |
710 | return ERR_PTR(-ENOMEM); | |
711 | ||
d800bad6 | 712 | if (copy_from_user(data + headroom, data_in, user_size)) { |
1cf1cae9 AS |
713 | kfree(data); |
714 | return ERR_PTR(-EFAULT); | |
715 | } | |
da00d2f1 | 716 | |
1cf1cae9 AS |
717 | return data; |
718 | } | |
719 | ||
da00d2f1 KS |
720 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
721 | const union bpf_attr *kattr, | |
722 | union bpf_attr __user *uattr) | |
723 | { | |
d923021c | 724 | struct bpf_fentry_test_t arg = {}; |
3d08b6f2 KS |
725 | u16 side_effect = 0, ret = 0; |
726 | int b = 2, err = -EFAULT; | |
727 | u32 retval = 0; | |
da00d2f1 | 728 | |
b530e9e1 | 729 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
730 | return -EINVAL; |
731 | ||
da00d2f1 KS |
732 | switch (prog->expected_attach_type) { |
733 | case BPF_TRACE_FENTRY: | |
734 | case BPF_TRACE_FEXIT: | |
735 | if (bpf_fentry_test1(1) != 2 || | |
736 | bpf_fentry_test2(2, 3) != 5 || | |
737 | bpf_fentry_test3(4, 5, 6) != 15 || | |
738 | bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || | |
739 | bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || | |
d923021c YS |
740 | bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || |
741 | bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || | |
742 | bpf_fentry_test8(&arg) != 0) | |
da00d2f1 KS |
743 | goto out; |
744 | break; | |
3d08b6f2 KS |
745 | case BPF_MODIFY_RETURN: |
746 | ret = bpf_modify_return_test(1, &b); | |
747 | if (b != 2) | |
748 | side_effect = 1; | |
749 | break; | |
da00d2f1 KS |
750 | default: |
751 | goto out; | |
752 | } | |
753 | ||
3d08b6f2 KS |
754 | retval = ((u32)side_effect << 16) | ret; |
755 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
756 | goto out; | |
757 | ||
da00d2f1 KS |
758 | err = 0; |
759 | out: | |
760 | trace_bpf_test_finish(&err); | |
761 | return err; | |
762 | } | |
763 | ||
1b4d60ec SL |
764 | struct bpf_raw_tp_test_run_info { |
765 | struct bpf_prog *prog; | |
766 | void *ctx; | |
767 | u32 retval; | |
768 | }; | |
769 | ||
770 | static void | |
771 | __bpf_prog_test_run_raw_tp(void *data) | |
772 | { | |
773 | struct bpf_raw_tp_test_run_info *info = data; | |
774 | ||
775 | rcu_read_lock(); | |
fb7dd8bc | 776 | info->retval = bpf_prog_run(info->prog, info->ctx); |
1b4d60ec SL |
777 | rcu_read_unlock(); |
778 | } | |
779 | ||
780 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, | |
781 | const union bpf_attr *kattr, | |
782 | union bpf_attr __user *uattr) | |
783 | { | |
784 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
785 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
786 | struct bpf_raw_tp_test_run_info info; | |
787 | int cpu = kattr->test.cpu, err = 0; | |
963ec27a | 788 | int current_cpu; |
1b4d60ec SL |
789 | |
790 | /* doesn't support data_in/out, ctx_out, duration, or repeat */ | |
791 | if (kattr->test.data_in || kattr->test.data_out || | |
792 | kattr->test.ctx_out || kattr->test.duration || | |
b530e9e1 | 793 | kattr->test.repeat || kattr->test.batch_size) |
1b4d60ec SL |
794 | return -EINVAL; |
795 | ||
7ac6ad05 SL |
796 | if (ctx_size_in < prog->aux->max_ctx_offset || |
797 | ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) | |
1b4d60ec SL |
798 | return -EINVAL; |
799 | ||
800 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) | |
801 | return -EINVAL; | |
802 | ||
803 | if (ctx_size_in) { | |
db5b6a46 QW |
804 | info.ctx = memdup_user(ctx_in, ctx_size_in); |
805 | if (IS_ERR(info.ctx)) | |
806 | return PTR_ERR(info.ctx); | |
1b4d60ec SL |
807 | } else { |
808 | info.ctx = NULL; | |
809 | } | |
810 | ||
811 | info.prog = prog; | |
812 | ||
963ec27a | 813 | current_cpu = get_cpu(); |
1b4d60ec | 814 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || |
963ec27a | 815 | cpu == current_cpu) { |
1b4d60ec | 816 | __bpf_prog_test_run_raw_tp(&info); |
963ec27a | 817 | } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
1b4d60ec SL |
818 | /* smp_call_function_single() also checks cpu_online() |
819 | * after csd_lock(). However, since cpu is from user | |
820 | * space, let's do an extra quick check to filter out | |
821 | * invalid value before smp_call_function_single(). | |
822 | */ | |
963ec27a SL |
823 | err = -ENXIO; |
824 | } else { | |
1b4d60ec SL |
825 | err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, |
826 | &info, 1); | |
1b4d60ec | 827 | } |
963ec27a | 828 | put_cpu(); |
1b4d60ec | 829 | |
963ec27a SL |
830 | if (!err && |
831 | copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) | |
1b4d60ec SL |
832 | err = -EFAULT; |
833 | ||
1b4d60ec SL |
834 | kfree(info.ctx); |
835 | return err; | |
836 | } | |
837 | ||
b0b9395d SF |
838 | static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) |
839 | { | |
840 | void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); | |
841 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
842 | u32 size = kattr->test.ctx_size_in; | |
843 | void *data; | |
844 | int err; | |
845 | ||
846 | if (!data_in && !data_out) | |
847 | return NULL; | |
848 | ||
849 | data = kzalloc(max_size, GFP_USER); | |
850 | if (!data) | |
851 | return ERR_PTR(-ENOMEM); | |
852 | ||
853 | if (data_in) { | |
af2ac3e1 | 854 | err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); |
b0b9395d SF |
855 | if (err) { |
856 | kfree(data); | |
857 | return ERR_PTR(err); | |
858 | } | |
859 | ||
860 | size = min_t(u32, max_size, size); | |
861 | if (copy_from_user(data, data_in, size)) { | |
862 | kfree(data); | |
863 | return ERR_PTR(-EFAULT); | |
864 | } | |
865 | } | |
866 | return data; | |
867 | } | |
868 | ||
869 | static int bpf_ctx_finish(const union bpf_attr *kattr, | |
870 | union bpf_attr __user *uattr, const void *data, | |
871 | u32 size) | |
872 | { | |
873 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
874 | int err = -EFAULT; | |
875 | u32 copy_size = size; | |
876 | ||
877 | if (!data || !data_out) | |
878 | return 0; | |
879 | ||
880 | if (copy_size > kattr->test.ctx_size_out) { | |
881 | copy_size = kattr->test.ctx_size_out; | |
882 | err = -ENOSPC; | |
883 | } | |
884 | ||
885 | if (copy_to_user(data_out, data, copy_size)) | |
886 | goto out; | |
887 | if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) | |
888 | goto out; | |
889 | if (err != -ENOSPC) | |
890 | err = 0; | |
891 | out: | |
892 | return err; | |
893 | } | |
894 | ||
895 | /** | |
896 | * range_is_zero - test whether buffer is initialized | |
897 | * @buf: buffer to check | |
898 | * @from: check from this position | |
899 | * @to: check up until (excluding) this position | |
900 | * | |
901 | * This function returns true if the there is a non-zero byte | |
902 | * in the buf in the range [from,to). | |
903 | */ | |
904 | static inline bool range_is_zero(void *buf, size_t from, size_t to) | |
905 | { | |
906 | return !memchr_inv((u8 *)buf + from, 0, to - from); | |
907 | } | |
908 | ||
909 | static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
910 | { | |
911 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
912 | ||
913 | if (!__skb) | |
914 | return 0; | |
915 | ||
916 | /* make sure the fields we don't use are zeroed */ | |
6de6c1f8 NS |
917 | if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) |
918 | return -EINVAL; | |
919 | ||
920 | /* mark is allowed */ | |
921 | ||
922 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), | |
923 | offsetof(struct __sk_buff, priority))) | |
b0b9395d SF |
924 | return -EINVAL; |
925 | ||
926 | /* priority is allowed */ | |
b238290b | 927 | /* ingress_ifindex is allowed */ |
21594c44 DY |
928 | /* ifindex is allowed */ |
929 | ||
930 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), | |
b0b9395d SF |
931 | offsetof(struct __sk_buff, cb))) |
932 | return -EINVAL; | |
933 | ||
934 | /* cb is allowed */ | |
935 | ||
b590cb5f | 936 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), |
ba940948 SF |
937 | offsetof(struct __sk_buff, tstamp))) |
938 | return -EINVAL; | |
939 | ||
940 | /* tstamp is allowed */ | |
850a88cc SF |
941 | /* wire_len is allowed */ |
942 | /* gso_segs is allowed */ | |
ba940948 | 943 | |
850a88cc | 944 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), |
cf62089b WB |
945 | offsetof(struct __sk_buff, gso_size))) |
946 | return -EINVAL; | |
947 | ||
948 | /* gso_size is allowed */ | |
949 | ||
950 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), | |
3384c7c7 VF |
951 | offsetof(struct __sk_buff, hwtstamp))) |
952 | return -EINVAL; | |
953 | ||
954 | /* hwtstamp is allowed */ | |
955 | ||
956 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), | |
b0b9395d SF |
957 | sizeof(struct __sk_buff))) |
958 | return -EINVAL; | |
959 | ||
6de6c1f8 | 960 | skb->mark = __skb->mark; |
b0b9395d | 961 | skb->priority = __skb->priority; |
b238290b | 962 | skb->skb_iif = __skb->ingress_ifindex; |
ba940948 | 963 | skb->tstamp = __skb->tstamp; |
b0b9395d SF |
964 | memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); |
965 | ||
850a88cc SF |
966 | if (__skb->wire_len == 0) { |
967 | cb->pkt_len = skb->len; | |
968 | } else { | |
969 | if (__skb->wire_len < skb->len || | |
970 | __skb->wire_len > GSO_MAX_SIZE) | |
971 | return -EINVAL; | |
972 | cb->pkt_len = __skb->wire_len; | |
973 | } | |
974 | ||
975 | if (__skb->gso_segs > GSO_MAX_SEGS) | |
976 | return -EINVAL; | |
977 | skb_shinfo(skb)->gso_segs = __skb->gso_segs; | |
cf62089b | 978 | skb_shinfo(skb)->gso_size = __skb->gso_size; |
3384c7c7 | 979 | skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; |
850a88cc | 980 | |
b0b9395d SF |
981 | return 0; |
982 | } | |
983 | ||
984 | static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
985 | { | |
986 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
987 | ||
988 | if (!__skb) | |
989 | return; | |
990 | ||
6de6c1f8 | 991 | __skb->mark = skb->mark; |
b0b9395d | 992 | __skb->priority = skb->priority; |
b238290b | 993 | __skb->ingress_ifindex = skb->skb_iif; |
21594c44 | 994 | __skb->ifindex = skb->dev->ifindex; |
ba940948 | 995 | __skb->tstamp = skb->tstamp; |
b0b9395d | 996 | memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); |
850a88cc SF |
997 | __skb->wire_len = cb->pkt_len; |
998 | __skb->gso_segs = skb_shinfo(skb)->gso_segs; | |
3384c7c7 | 999 | __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; |
b0b9395d SF |
1000 | } |
1001 | ||
435b08ec DB |
1002 | static struct proto bpf_dummy_proto = { |
1003 | .name = "bpf_dummy", | |
1004 | .owner = THIS_MODULE, | |
1005 | .obj_size = sizeof(struct sock), | |
1006 | }; | |
1007 | ||
1cf1cae9 AS |
1008 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
1009 | union bpf_attr __user *uattr) | |
1010 | { | |
1011 | bool is_l2 = false, is_direct_pkt_access = false; | |
21594c44 DY |
1012 | struct net *net = current->nsproxy->net_ns; |
1013 | struct net_device *dev = net->loopback_dev; | |
1cf1cae9 AS |
1014 | u32 size = kattr->test.data_size_in; |
1015 | u32 repeat = kattr->test.repeat; | |
b0b9395d | 1016 | struct __sk_buff *ctx = NULL; |
1cf1cae9 | 1017 | u32 retval, duration; |
6e6fddc7 | 1018 | int hh_len = ETH_HLEN; |
1cf1cae9 | 1019 | struct sk_buff *skb; |
2cb494a3 | 1020 | struct sock *sk; |
1cf1cae9 AS |
1021 | void *data; |
1022 | int ret; | |
1023 | ||
b530e9e1 | 1024 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
1025 | return -EINVAL; |
1026 | ||
be3d72a2 LB |
1027 | data = bpf_test_init(kattr, kattr->test.data_size_in, |
1028 | size, NET_SKB_PAD + NET_IP_ALIGN, | |
1cf1cae9 AS |
1029 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
1030 | if (IS_ERR(data)) | |
1031 | return PTR_ERR(data); | |
1032 | ||
b0b9395d SF |
1033 | ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); |
1034 | if (IS_ERR(ctx)) { | |
1035 | kfree(data); | |
1036 | return PTR_ERR(ctx); | |
1037 | } | |
1038 | ||
1cf1cae9 AS |
1039 | switch (prog->type) { |
1040 | case BPF_PROG_TYPE_SCHED_CLS: | |
1041 | case BPF_PROG_TYPE_SCHED_ACT: | |
1042 | is_l2 = true; | |
df561f66 | 1043 | fallthrough; |
1cf1cae9 AS |
1044 | case BPF_PROG_TYPE_LWT_IN: |
1045 | case BPF_PROG_TYPE_LWT_OUT: | |
1046 | case BPF_PROG_TYPE_LWT_XMIT: | |
1047 | is_direct_pkt_access = true; | |
1048 | break; | |
1049 | default: | |
1050 | break; | |
1051 | } | |
1052 | ||
435b08ec | 1053 | sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); |
2cb494a3 SL |
1054 | if (!sk) { |
1055 | kfree(data); | |
b0b9395d | 1056 | kfree(ctx); |
2cb494a3 SL |
1057 | return -ENOMEM; |
1058 | } | |
2cb494a3 SL |
1059 | sock_init_data(NULL, sk); |
1060 | ||
1cf1cae9 AS |
1061 | skb = build_skb(data, 0); |
1062 | if (!skb) { | |
1063 | kfree(data); | |
b0b9395d | 1064 | kfree(ctx); |
435b08ec | 1065 | sk_free(sk); |
1cf1cae9 AS |
1066 | return -ENOMEM; |
1067 | } | |
2cb494a3 | 1068 | skb->sk = sk; |
1cf1cae9 | 1069 | |
586f8525 | 1070 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
1cf1cae9 | 1071 | __skb_put(skb, size); |
21594c44 DY |
1072 | if (ctx && ctx->ifindex > 1) { |
1073 | dev = dev_get_by_index(net, ctx->ifindex); | |
1074 | if (!dev) { | |
1075 | ret = -ENODEV; | |
1076 | goto out; | |
1077 | } | |
1078 | } | |
1079 | skb->protocol = eth_type_trans(skb, dev); | |
1cf1cae9 AS |
1080 | skb_reset_network_header(skb); |
1081 | ||
fa5cb548 DY |
1082 | switch (skb->protocol) { |
1083 | case htons(ETH_P_IP): | |
1084 | sk->sk_family = AF_INET; | |
1085 | if (sizeof(struct iphdr) <= skb_headlen(skb)) { | |
1086 | sk->sk_rcv_saddr = ip_hdr(skb)->saddr; | |
1087 | sk->sk_daddr = ip_hdr(skb)->daddr; | |
1088 | } | |
1089 | break; | |
1090 | #if IS_ENABLED(CONFIG_IPV6) | |
1091 | case htons(ETH_P_IPV6): | |
1092 | sk->sk_family = AF_INET6; | |
1093 | if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { | |
1094 | sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; | |
1095 | sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; | |
1096 | } | |
1097 | break; | |
1098 | #endif | |
1099 | default: | |
1100 | break; | |
1101 | } | |
1102 | ||
1cf1cae9 | 1103 | if (is_l2) |
6e6fddc7 | 1104 | __skb_push(skb, hh_len); |
1cf1cae9 | 1105 | if (is_direct_pkt_access) |
6aaae2b6 | 1106 | bpf_compute_data_pointers(skb); |
b0b9395d SF |
1107 | ret = convert___skb_to_skb(skb, ctx); |
1108 | if (ret) | |
1109 | goto out; | |
f23c4b39 | 1110 | ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); |
b0b9395d SF |
1111 | if (ret) |
1112 | goto out; | |
6e6fddc7 DB |
1113 | if (!is_l2) { |
1114 | if (skb_headroom(skb) < hh_len) { | |
1115 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); | |
1116 | ||
1117 | if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { | |
b0b9395d SF |
1118 | ret = -ENOMEM; |
1119 | goto out; | |
6e6fddc7 DB |
1120 | } |
1121 | } | |
1122 | memset(__skb_push(skb, hh_len), 0, hh_len); | |
1123 | } | |
b0b9395d | 1124 | convert_skb_to___skb(skb, ctx); |
6e6fddc7 | 1125 | |
1cf1cae9 AS |
1126 | size = skb->len; |
1127 | /* bpf program can never convert linear skb to non-linear */ | |
1128 | if (WARN_ON_ONCE(skb_is_nonlinear(skb))) | |
1129 | size = skb_headlen(skb); | |
7855e0db LB |
1130 | ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, |
1131 | duration); | |
b0b9395d SF |
1132 | if (!ret) |
1133 | ret = bpf_ctx_finish(kattr, uattr, ctx, | |
1134 | sizeof(struct __sk_buff)); | |
1135 | out: | |
21594c44 DY |
1136 | if (dev && dev != net->loopback_dev) |
1137 | dev_put(dev); | |
1cf1cae9 | 1138 | kfree_skb(skb); |
435b08ec | 1139 | sk_free(sk); |
b0b9395d | 1140 | kfree(ctx); |
1cf1cae9 AS |
1141 | return ret; |
1142 | } | |
1143 | ||
47316f4a ZE |
1144 | static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) |
1145 | { | |
ec94670f ZE |
1146 | unsigned int ingress_ifindex, rx_queue_index; |
1147 | struct netdev_rx_queue *rxqueue; | |
1148 | struct net_device *device; | |
1149 | ||
47316f4a ZE |
1150 | if (!xdp_md) |
1151 | return 0; | |
1152 | ||
1153 | if (xdp_md->egress_ifindex != 0) | |
1154 | return -EINVAL; | |
1155 | ||
ec94670f ZE |
1156 | ingress_ifindex = xdp_md->ingress_ifindex; |
1157 | rx_queue_index = xdp_md->rx_queue_index; | |
1158 | ||
1159 | if (!ingress_ifindex && rx_queue_index) | |
47316f4a ZE |
1160 | return -EINVAL; |
1161 | ||
ec94670f ZE |
1162 | if (ingress_ifindex) { |
1163 | device = dev_get_by_index(current->nsproxy->net_ns, | |
1164 | ingress_ifindex); | |
1165 | if (!device) | |
1166 | return -ENODEV; | |
1167 | ||
1168 | if (rx_queue_index >= device->real_num_rx_queues) | |
1169 | goto free_dev; | |
1170 | ||
1171 | rxqueue = __netif_get_rx_queue(device, rx_queue_index); | |
47316f4a | 1172 | |
ec94670f ZE |
1173 | if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) |
1174 | goto free_dev; | |
1175 | ||
1176 | xdp->rxq = &rxqueue->xdp_rxq; | |
1177 | /* The device is now tracked in the xdp->rxq for later | |
1178 | * dev_put() | |
1179 | */ | |
1180 | } | |
1181 | ||
1182 | xdp->data = xdp->data_meta + xdp_md->data; | |
47316f4a | 1183 | return 0; |
ec94670f ZE |
1184 | |
1185 | free_dev: | |
1186 | dev_put(device); | |
1187 | return -EINVAL; | |
1188 | } | |
1189 | ||
1190 | static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) | |
1191 | { | |
1192 | if (!xdp_md) | |
1193 | return; | |
1194 | ||
1195 | xdp_md->data = xdp->data - xdp->data_meta; | |
1196 | xdp_md->data_end = xdp->data_end - xdp->data_meta; | |
1197 | ||
1198 | if (xdp_md->ingress_ifindex) | |
1199 | dev_put(xdp->rxq->dev); | |
47316f4a ZE |
1200 | } |
1201 | ||
1cf1cae9 AS |
1202 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
1203 | union bpf_attr __user *uattr) | |
1204 | { | |
b530e9e1 | 1205 | bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); |
bc56c919 | 1206 | u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
b530e9e1 | 1207 | u32 batch_size = kattr->test.batch_size; |
eecbfd97 | 1208 | u32 retval = 0, duration, max_data_sz; |
1cf1cae9 | 1209 | u32 size = kattr->test.data_size_in; |
1c194998 | 1210 | u32 headroom = XDP_PACKET_HEADROOM; |
1cf1cae9 | 1211 | u32 repeat = kattr->test.repeat; |
65073a67 | 1212 | struct netdev_rx_queue *rxqueue; |
1c194998 | 1213 | struct skb_shared_info *sinfo; |
1cf1cae9 | 1214 | struct xdp_buff xdp = {}; |
1c194998 | 1215 | int i, ret = -EINVAL; |
47316f4a | 1216 | struct xdp_md *ctx; |
1cf1cae9 | 1217 | void *data; |
1cf1cae9 | 1218 | |
5e21bb4e XZ |
1219 | if (prog->expected_attach_type == BPF_XDP_DEVMAP || |
1220 | prog->expected_attach_type == BPF_XDP_CPUMAP) | |
1221 | return -EINVAL; | |
6d4eb36d | 1222 | |
b530e9e1 THJ |
1223 | if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) |
1224 | return -EINVAL; | |
1225 | ||
1226 | if (do_live) { | |
1227 | if (!batch_size) | |
1228 | batch_size = NAPI_POLL_WEIGHT; | |
1229 | else if (batch_size > TEST_XDP_MAX_BATCH) | |
1230 | return -E2BIG; | |
b6f1f780 THJ |
1231 | |
1232 | headroom += sizeof(struct xdp_page_head); | |
b530e9e1 THJ |
1233 | } else if (batch_size) { |
1234 | return -EINVAL; | |
1235 | } | |
1236 | ||
47316f4a ZE |
1237 | ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); |
1238 | if (IS_ERR(ctx)) | |
1239 | return PTR_ERR(ctx); | |
1240 | ||
1241 | if (ctx) { | |
1242 | /* There can't be user provided data before the meta data */ | |
1243 | if (ctx->data_meta || ctx->data_end != size || | |
1244 | ctx->data > ctx->data_end || | |
b530e9e1 THJ |
1245 | unlikely(xdp_metalen_invalid(ctx->data)) || |
1246 | (do_live && (kattr->test.data_out || kattr->test.ctx_out))) | |
47316f4a ZE |
1247 | goto free_ctx; |
1248 | /* Meta data is allocated from the headroom */ | |
1249 | headroom -= ctx->data; | |
1250 | } | |
947e8b59 | 1251 | |
bc56c919 | 1252 | max_data_sz = 4096 - headroom - tailroom; |
b530e9e1 THJ |
1253 | if (size > max_data_sz) { |
1254 | /* disallow live data mode for jumbo frames */ | |
1255 | if (do_live) | |
1256 | goto free_ctx; | |
1257 | size = max_data_sz; | |
1258 | } | |
bc56c919 | 1259 | |
1c194998 | 1260 | data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); |
47316f4a ZE |
1261 | if (IS_ERR(data)) { |
1262 | ret = PTR_ERR(data); | |
1263 | goto free_ctx; | |
1264 | } | |
1cf1cae9 | 1265 | |
65073a67 | 1266 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); |
1c194998 LB |
1267 | rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; |
1268 | xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); | |
be9df4af | 1269 | xdp_prepare_buff(&xdp, data, headroom, size, true); |
1c194998 | 1270 | sinfo = xdp_get_shared_info_from_buff(&xdp); |
be9df4af | 1271 | |
47316f4a ZE |
1272 | ret = xdp_convert_md_to_buff(ctx, &xdp); |
1273 | if (ret) | |
1274 | goto free_data; | |
1275 | ||
1c194998 LB |
1276 | if (unlikely(kattr->test.data_size_in > size)) { |
1277 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); | |
1278 | ||
1279 | while (size < kattr->test.data_size_in) { | |
1280 | struct page *page; | |
1281 | skb_frag_t *frag; | |
9d63b59d | 1282 | u32 data_len; |
1c194998 | 1283 | |
a6763080 LB |
1284 | if (sinfo->nr_frags == MAX_SKB_FRAGS) { |
1285 | ret = -ENOMEM; | |
1286 | goto out; | |
1287 | } | |
1288 | ||
1c194998 LB |
1289 | page = alloc_page(GFP_KERNEL); |
1290 | if (!page) { | |
1291 | ret = -ENOMEM; | |
1292 | goto out; | |
1293 | } | |
1294 | ||
1295 | frag = &sinfo->frags[sinfo->nr_frags++]; | |
1296 | __skb_frag_set_page(frag, page); | |
1297 | ||
9d63b59d | 1298 | data_len = min_t(u32, kattr->test.data_size_in - size, |
1c194998 LB |
1299 | PAGE_SIZE); |
1300 | skb_frag_size_set(frag, data_len); | |
1301 | ||
1302 | if (copy_from_user(page_address(page), data_in + size, | |
1303 | data_len)) { | |
1304 | ret = -EFAULT; | |
1305 | goto out; | |
1306 | } | |
1307 | sinfo->xdp_frags_size += data_len; | |
1308 | size += data_len; | |
1309 | } | |
1310 | xdp_buff_set_frags_flag(&xdp); | |
1311 | } | |
1312 | ||
de21d8bf LB |
1313 | if (repeat > 1) |
1314 | bpf_prog_change_xdp(NULL, prog); | |
1c194998 | 1315 | |
b530e9e1 THJ |
1316 | if (do_live) |
1317 | ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); | |
1318 | else | |
1319 | ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); | |
ec94670f ZE |
1320 | /* We convert the xdp_buff back to an xdp_md before checking the return |
1321 | * code so the reference count of any held netdevice will be decremented | |
1322 | * even if the test run failed. | |
1323 | */ | |
1324 | xdp_convert_buff_to_md(&xdp, ctx); | |
dcb40590 RG |
1325 | if (ret) |
1326 | goto out; | |
47316f4a | 1327 | |
1c194998 | 1328 | size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; |
7855e0db LB |
1329 | ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, |
1330 | retval, duration); | |
47316f4a ZE |
1331 | if (!ret) |
1332 | ret = bpf_ctx_finish(kattr, uattr, ctx, | |
1333 | sizeof(struct xdp_md)); | |
1334 | ||
dcb40590 | 1335 | out: |
de21d8bf LB |
1336 | if (repeat > 1) |
1337 | bpf_prog_change_xdp(prog, NULL); | |
47316f4a | 1338 | free_data: |
1c194998 LB |
1339 | for (i = 0; i < sinfo->nr_frags; i++) |
1340 | __free_page(skb_frag_page(&sinfo->frags[i])); | |
1cf1cae9 | 1341 | kfree(data); |
47316f4a ZE |
1342 | free_ctx: |
1343 | kfree(ctx); | |
1cf1cae9 AS |
1344 | return ret; |
1345 | } | |
b7a1848e | 1346 | |
b2ca4e1c SF |
1347 | static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) |
1348 | { | |
1349 | /* make sure the fields we don't use are zeroed */ | |
1350 | if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) | |
1351 | return -EINVAL; | |
1352 | ||
1353 | /* flags is allowed */ | |
1354 | ||
b590cb5f | 1355 | if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), |
b2ca4e1c SF |
1356 | sizeof(struct bpf_flow_keys))) |
1357 | return -EINVAL; | |
1358 | ||
1359 | return 0; | |
1360 | } | |
1361 | ||
b7a1848e SF |
1362 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
1363 | const union bpf_attr *kattr, | |
1364 | union bpf_attr __user *uattr) | |
1365 | { | |
607b9cc9 | 1366 | struct bpf_test_timer t = { NO_PREEMPT }; |
b7a1848e | 1367 | u32 size = kattr->test.data_size_in; |
7b8a1304 | 1368 | struct bpf_flow_dissector ctx = {}; |
b7a1848e | 1369 | u32 repeat = kattr->test.repeat; |
b2ca4e1c | 1370 | struct bpf_flow_keys *user_ctx; |
b7a1848e | 1371 | struct bpf_flow_keys flow_keys; |
7b8a1304 | 1372 | const struct ethhdr *eth; |
b2ca4e1c | 1373 | unsigned int flags = 0; |
b7a1848e | 1374 | u32 retval, duration; |
b7a1848e SF |
1375 | void *data; |
1376 | int ret; | |
b7a1848e SF |
1377 | |
1378 | if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR) | |
1379 | return -EINVAL; | |
1380 | ||
b530e9e1 | 1381 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
1382 | return -EINVAL; |
1383 | ||
7b8a1304 SF |
1384 | if (size < ETH_HLEN) |
1385 | return -EINVAL; | |
1386 | ||
be3d72a2 | 1387 | data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); |
b7a1848e SF |
1388 | if (IS_ERR(data)) |
1389 | return PTR_ERR(data); | |
1390 | ||
7b8a1304 | 1391 | eth = (struct ethhdr *)data; |
b7a1848e | 1392 | |
b7a1848e SF |
1393 | if (!repeat) |
1394 | repeat = 1; | |
1395 | ||
b2ca4e1c SF |
1396 | user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); |
1397 | if (IS_ERR(user_ctx)) { | |
1398 | kfree(data); | |
1399 | return PTR_ERR(user_ctx); | |
1400 | } | |
1401 | if (user_ctx) { | |
1402 | ret = verify_user_bpf_flow_keys(user_ctx); | |
1403 | if (ret) | |
1404 | goto out; | |
1405 | flags = user_ctx->flags; | |
1406 | } | |
1407 | ||
7b8a1304 SF |
1408 | ctx.flow_keys = &flow_keys; |
1409 | ctx.data = data; | |
1410 | ctx.data_end = (__u8 *)data + size; | |
1411 | ||
607b9cc9 LB |
1412 | bpf_test_timer_enter(&t); |
1413 | do { | |
7b8a1304 | 1414 | retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, |
b2ca4e1c | 1415 | size, flags); |
b530e9e1 | 1416 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); |
607b9cc9 | 1417 | bpf_test_timer_leave(&t); |
7b8a1304 | 1418 | |
607b9cc9 LB |
1419 | if (ret < 0) |
1420 | goto out; | |
b7a1848e | 1421 | |
7855e0db LB |
1422 | ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, |
1423 | sizeof(flow_keys), retval, duration); | |
b2ca4e1c SF |
1424 | if (!ret) |
1425 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, | |
1426 | sizeof(struct bpf_flow_keys)); | |
b7a1848e | 1427 | |
a439184d | 1428 | out: |
b2ca4e1c | 1429 | kfree(user_ctx); |
7b8a1304 | 1430 | kfree(data); |
b7a1848e SF |
1431 | return ret; |
1432 | } | |
7c32e8f8 LB |
1433 | |
1434 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, | |
1435 | union bpf_attr __user *uattr) | |
1436 | { | |
1437 | struct bpf_test_timer t = { NO_PREEMPT }; | |
1438 | struct bpf_prog_array *progs = NULL; | |
1439 | struct bpf_sk_lookup_kern ctx = {}; | |
1440 | u32 repeat = kattr->test.repeat; | |
1441 | struct bpf_sk_lookup *user_ctx; | |
1442 | u32 retval, duration; | |
1443 | int ret = -EINVAL; | |
1444 | ||
1445 | if (prog->type != BPF_PROG_TYPE_SK_LOOKUP) | |
1446 | return -EINVAL; | |
1447 | ||
b530e9e1 | 1448 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
7c32e8f8 LB |
1449 | return -EINVAL; |
1450 | ||
1451 | if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || | |
1452 | kattr->test.data_size_out) | |
1453 | return -EINVAL; | |
1454 | ||
1455 | if (!repeat) | |
1456 | repeat = 1; | |
1457 | ||
1458 | user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); | |
1459 | if (IS_ERR(user_ctx)) | |
1460 | return PTR_ERR(user_ctx); | |
1461 | ||
1462 | if (!user_ctx) | |
1463 | return -EINVAL; | |
1464 | ||
1465 | if (user_ctx->sk) | |
1466 | goto out; | |
1467 | ||
1468 | if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) | |
1469 | goto out; | |
1470 | ||
9a69e2b3 | 1471 | if (user_ctx->local_port > U16_MAX) { |
7c32e8f8 LB |
1472 | ret = -ERANGE; |
1473 | goto out; | |
1474 | } | |
1475 | ||
1476 | ctx.family = (u16)user_ctx->family; | |
1477 | ctx.protocol = (u16)user_ctx->protocol; | |
1478 | ctx.dport = (u16)user_ctx->local_port; | |
9a69e2b3 | 1479 | ctx.sport = user_ctx->remote_port; |
7c32e8f8 LB |
1480 | |
1481 | switch (ctx.family) { | |
1482 | case AF_INET: | |
1483 | ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; | |
1484 | ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; | |
1485 | break; | |
1486 | ||
1487 | #if IS_ENABLED(CONFIG_IPV6) | |
1488 | case AF_INET6: | |
1489 | ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; | |
1490 | ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; | |
1491 | break; | |
1492 | #endif | |
1493 | ||
1494 | default: | |
1495 | ret = -EAFNOSUPPORT; | |
1496 | goto out; | |
1497 | } | |
1498 | ||
1499 | progs = bpf_prog_array_alloc(1, GFP_KERNEL); | |
1500 | if (!progs) { | |
1501 | ret = -ENOMEM; | |
1502 | goto out; | |
1503 | } | |
1504 | ||
1505 | progs->items[0].prog = prog; | |
1506 | ||
1507 | bpf_test_timer_enter(&t); | |
1508 | do { | |
1509 | ctx.selected_sk = NULL; | |
fb7dd8bc | 1510 | retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); |
b530e9e1 | 1511 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); |
7c32e8f8 LB |
1512 | bpf_test_timer_leave(&t); |
1513 | ||
1514 | if (ret < 0) | |
1515 | goto out; | |
1516 | ||
1517 | user_ctx->cookie = 0; | |
1518 | if (ctx.selected_sk) { | |
1519 | if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { | |
1520 | ret = -EOPNOTSUPP; | |
1521 | goto out; | |
1522 | } | |
1523 | ||
1524 | user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); | |
1525 | } | |
1526 | ||
7855e0db | 1527 | ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); |
7c32e8f8 LB |
1528 | if (!ret) |
1529 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); | |
1530 | ||
1531 | out: | |
1532 | bpf_prog_array_free(progs); | |
1533 | kfree(user_ctx); | |
1534 | return ret; | |
1535 | } | |
79a7f8bd AS |
1536 | |
1537 | int bpf_prog_test_run_syscall(struct bpf_prog *prog, | |
1538 | const union bpf_attr *kattr, | |
1539 | union bpf_attr __user *uattr) | |
1540 | { | |
1541 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
1542 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
1543 | void *ctx = NULL; | |
1544 | u32 retval; | |
1545 | int err = 0; | |
1546 | ||
1547 | /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ | |
1548 | if (kattr->test.data_in || kattr->test.data_out || | |
1549 | kattr->test.ctx_out || kattr->test.duration || | |
b530e9e1 THJ |
1550 | kattr->test.repeat || kattr->test.flags || |
1551 | kattr->test.batch_size) | |
79a7f8bd AS |
1552 | return -EINVAL; |
1553 | ||
1554 | if (ctx_size_in < prog->aux->max_ctx_offset || | |
1555 | ctx_size_in > U16_MAX) | |
1556 | return -EINVAL; | |
1557 | ||
1558 | if (ctx_size_in) { | |
db5b6a46 QW |
1559 | ctx = memdup_user(ctx_in, ctx_size_in); |
1560 | if (IS_ERR(ctx)) | |
1561 | return PTR_ERR(ctx); | |
79a7f8bd | 1562 | } |
87b7b533 YS |
1563 | |
1564 | rcu_read_lock_trace(); | |
79a7f8bd | 1565 | retval = bpf_prog_run_pin_on_cpu(prog, ctx); |
87b7b533 | 1566 | rcu_read_unlock_trace(); |
79a7f8bd AS |
1567 | |
1568 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { | |
1569 | err = -EFAULT; | |
1570 | goto out; | |
1571 | } | |
1572 | if (ctx_size_in) | |
1573 | if (copy_to_user(ctx_in, ctx, ctx_size_in)) | |
1574 | err = -EFAULT; | |
1575 | out: | |
1576 | kfree(ctx); | |
1577 | return err; | |
1578 | } | |
b202d844 KKD |
1579 | |
1580 | static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { | |
c1ff181f KKD |
1581 | .owner = THIS_MODULE, |
1582 | .check_set = &test_sk_check_kfunc_ids, | |
1583 | .acquire_set = &test_sk_acquire_kfunc_ids, | |
1584 | .release_set = &test_sk_release_kfunc_ids, | |
1585 | .ret_null_set = &test_sk_ret_null_kfunc_ids, | |
b202d844 KKD |
1586 | }; |
1587 | ||
1588 | static int __init bpf_prog_test_run_init(void) | |
1589 | { | |
1590 | return register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); | |
1591 | } | |
1592 | late_initcall(bpf_prog_test_run_init); |