]>
Commit | Line | Data |
---|---|---|
25763b3c | 1 | // SPDX-License-Identifier: GPL-2.0-only |
1cf1cae9 | 2 | /* Copyright (c) 2017 Facebook |
1cf1cae9 AS |
3 | */ |
4 | #include <linux/bpf.h> | |
c48e51c8 | 5 | #include <linux/btf.h> |
7bd1590d | 6 | #include <linux/btf_ids.h> |
1cf1cae9 | 7 | #include <linux/slab.h> |
b202d844 | 8 | #include <linux/init.h> |
1cf1cae9 AS |
9 | #include <linux/vmalloc.h> |
10 | #include <linux/etherdevice.h> | |
11 | #include <linux/filter.h> | |
87b7b533 | 12 | #include <linux/rcupdate_trace.h> |
1cf1cae9 | 13 | #include <linux/sched/signal.h> |
6ac99e8f | 14 | #include <net/bpf_sk_storage.h> |
2cb494a3 SL |
15 | #include <net/sock.h> |
16 | #include <net/tcp.h> | |
7c32e8f8 | 17 | #include <net/net_namespace.h> |
b530e9e1 | 18 | #include <net/page_pool.h> |
3d08b6f2 | 19 | #include <linux/error-injection.h> |
1b4d60ec | 20 | #include <linux/smp.h> |
7c32e8f8 | 21 | #include <linux/sock_diag.h> |
47316f4a | 22 | #include <net/xdp.h> |
1cf1cae9 | 23 | |
e950e843 MM |
24 | #define CREATE_TRACE_POINTS |
25 | #include <trace/events/bpf_test_run.h> | |
26 | ||
607b9cc9 LB |
27 | struct bpf_test_timer { |
28 | enum { NO_PREEMPT, NO_MIGRATE } mode; | |
29 | u32 i; | |
30 | u64 time_start, time_spent; | |
31 | }; | |
32 | ||
33 | static void bpf_test_timer_enter(struct bpf_test_timer *t) | |
34 | __acquires(rcu) | |
35 | { | |
36 | rcu_read_lock(); | |
37 | if (t->mode == NO_PREEMPT) | |
38 | preempt_disable(); | |
39 | else | |
40 | migrate_disable(); | |
41 | ||
42 | t->time_start = ktime_get_ns(); | |
43 | } | |
44 | ||
45 | static void bpf_test_timer_leave(struct bpf_test_timer *t) | |
46 | __releases(rcu) | |
47 | { | |
48 | t->time_start = 0; | |
49 | ||
50 | if (t->mode == NO_PREEMPT) | |
51 | preempt_enable(); | |
52 | else | |
53 | migrate_enable(); | |
54 | rcu_read_unlock(); | |
55 | } | |
56 | ||
b530e9e1 THJ |
57 | static bool bpf_test_timer_continue(struct bpf_test_timer *t, int iterations, |
58 | u32 repeat, int *err, u32 *duration) | |
607b9cc9 LB |
59 | __must_hold(rcu) |
60 | { | |
b530e9e1 | 61 | t->i += iterations; |
607b9cc9 LB |
62 | if (t->i >= repeat) { |
63 | /* We're done. */ | |
64 | t->time_spent += ktime_get_ns() - t->time_start; | |
65 | do_div(t->time_spent, t->i); | |
66 | *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent; | |
67 | *err = 0; | |
68 | goto reset; | |
69 | } | |
70 | ||
71 | if (signal_pending(current)) { | |
72 | /* During iteration: we've been cancelled, abort. */ | |
73 | *err = -EINTR; | |
74 | goto reset; | |
75 | } | |
76 | ||
77 | if (need_resched()) { | |
78 | /* During iteration: we need to reschedule between runs. */ | |
79 | t->time_spent += ktime_get_ns() - t->time_start; | |
80 | bpf_test_timer_leave(t); | |
81 | cond_resched(); | |
82 | bpf_test_timer_enter(t); | |
83 | } | |
84 | ||
85 | /* Do another round. */ | |
86 | return true; | |
87 | ||
88 | reset: | |
89 | t->i = 0; | |
90 | return false; | |
91 | } | |
92 | ||
b530e9e1 THJ |
93 | /* We put this struct at the head of each page with a context and frame |
94 | * initialised when the page is allocated, so we don't have to do this on each | |
95 | * repetition of the test run. | |
96 | */ | |
97 | struct xdp_page_head { | |
98 | struct xdp_buff orig_ctx; | |
99 | struct xdp_buff ctx; | |
294635a8 AL |
100 | union { |
101 | /* ::data_hard_start starts here */ | |
102 | DECLARE_FLEX_ARRAY(struct xdp_frame, frame); | |
103 | DECLARE_FLEX_ARRAY(u8, data); | |
104 | }; | |
b530e9e1 THJ |
105 | }; |
106 | ||
107 | struct xdp_test_data { | |
108 | struct xdp_buff *orig_ctx; | |
109 | struct xdp_rxq_info rxq; | |
110 | struct net_device *dev; | |
111 | struct page_pool *pp; | |
112 | struct xdp_frame **frames; | |
113 | struct sk_buff **skbs; | |
425d2393 | 114 | struct xdp_mem_info mem; |
b530e9e1 THJ |
115 | u32 batch_size; |
116 | u32 frame_cnt; | |
117 | }; | |
118 | ||
294635a8 AL |
119 | /* tools/testing/selftests/bpf/prog_tests/xdp_do_redirect.c:%MAX_PKT_SIZE |
120 | * must be updated accordingly this gets changed, otherwise BPF selftests | |
121 | * will fail. | |
122 | */ | |
b6f1f780 | 123 | #define TEST_XDP_FRAME_SIZE (PAGE_SIZE - sizeof(struct xdp_page_head)) |
b530e9e1 THJ |
124 | #define TEST_XDP_MAX_BATCH 256 |
125 | ||
126 | static void xdp_test_run_init_page(struct page *page, void *arg) | |
127 | { | |
128 | struct xdp_page_head *head = phys_to_virt(page_to_phys(page)); | |
129 | struct xdp_buff *new_ctx, *orig_ctx; | |
130 | u32 headroom = XDP_PACKET_HEADROOM; | |
131 | struct xdp_test_data *xdp = arg; | |
132 | size_t frm_len, meta_len; | |
133 | struct xdp_frame *frm; | |
134 | void *data; | |
135 | ||
136 | orig_ctx = xdp->orig_ctx; | |
137 | frm_len = orig_ctx->data_end - orig_ctx->data_meta; | |
138 | meta_len = orig_ctx->data - orig_ctx->data_meta; | |
139 | headroom -= meta_len; | |
140 | ||
141 | new_ctx = &head->ctx; | |
294635a8 AL |
142 | frm = head->frame; |
143 | data = head->data; | |
b530e9e1 THJ |
144 | memcpy(data + headroom, orig_ctx->data_meta, frm_len); |
145 | ||
146 | xdp_init_buff(new_ctx, TEST_XDP_FRAME_SIZE, &xdp->rxq); | |
147 | xdp_prepare_buff(new_ctx, data, headroom, frm_len, true); | |
148 | new_ctx->data = new_ctx->data_meta + meta_len; | |
149 | ||
150 | xdp_update_frame_from_buff(new_ctx, frm); | |
151 | frm->mem = new_ctx->rxq->mem; | |
152 | ||
153 | memcpy(&head->orig_ctx, new_ctx, sizeof(head->orig_ctx)); | |
154 | } | |
155 | ||
156 | static int xdp_test_run_setup(struct xdp_test_data *xdp, struct xdp_buff *orig_ctx) | |
157 | { | |
b530e9e1 THJ |
158 | struct page_pool *pp; |
159 | int err = -ENOMEM; | |
160 | struct page_pool_params pp_params = { | |
161 | .order = 0, | |
162 | .flags = 0, | |
163 | .pool_size = xdp->batch_size, | |
164 | .nid = NUMA_NO_NODE, | |
b530e9e1 THJ |
165 | .init_callback = xdp_test_run_init_page, |
166 | .init_arg = xdp, | |
167 | }; | |
168 | ||
169 | xdp->frames = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); | |
170 | if (!xdp->frames) | |
171 | return -ENOMEM; | |
172 | ||
173 | xdp->skbs = kvmalloc_array(xdp->batch_size, sizeof(void *), GFP_KERNEL); | |
174 | if (!xdp->skbs) | |
175 | goto err_skbs; | |
176 | ||
177 | pp = page_pool_create(&pp_params); | |
178 | if (IS_ERR(pp)) { | |
179 | err = PTR_ERR(pp); | |
180 | goto err_pp; | |
181 | } | |
182 | ||
183 | /* will copy 'mem.id' into pp->xdp_mem_id */ | |
425d2393 | 184 | err = xdp_reg_mem_model(&xdp->mem, MEM_TYPE_PAGE_POOL, pp); |
b530e9e1 THJ |
185 | if (err) |
186 | goto err_mmodel; | |
187 | ||
188 | xdp->pp = pp; | |
189 | ||
190 | /* We create a 'fake' RXQ referencing the original dev, but with an | |
191 | * xdp_mem_info pointing to our page_pool | |
192 | */ | |
193 | xdp_rxq_info_reg(&xdp->rxq, orig_ctx->rxq->dev, 0, 0); | |
194 | xdp->rxq.mem.type = MEM_TYPE_PAGE_POOL; | |
195 | xdp->rxq.mem.id = pp->xdp_mem_id; | |
196 | xdp->dev = orig_ctx->rxq->dev; | |
197 | xdp->orig_ctx = orig_ctx; | |
198 | ||
199 | return 0; | |
200 | ||
201 | err_mmodel: | |
202 | page_pool_destroy(pp); | |
203 | err_pp: | |
743bec1b | 204 | kvfree(xdp->skbs); |
b530e9e1 | 205 | err_skbs: |
743bec1b | 206 | kvfree(xdp->frames); |
b530e9e1 THJ |
207 | return err; |
208 | } | |
209 | ||
210 | static void xdp_test_run_teardown(struct xdp_test_data *xdp) | |
211 | { | |
425d2393 | 212 | xdp_unreg_mem_model(&xdp->mem); |
b530e9e1 THJ |
213 | page_pool_destroy(xdp->pp); |
214 | kfree(xdp->frames); | |
215 | kfree(xdp->skbs); | |
216 | } | |
217 | ||
218 | static bool ctx_was_changed(struct xdp_page_head *head) | |
219 | { | |
220 | return head->orig_ctx.data != head->ctx.data || | |
221 | head->orig_ctx.data_meta != head->ctx.data_meta || | |
222 | head->orig_ctx.data_end != head->ctx.data_end; | |
223 | } | |
224 | ||
225 | static void reset_ctx(struct xdp_page_head *head) | |
226 | { | |
227 | if (likely(!ctx_was_changed(head))) | |
228 | return; | |
229 | ||
230 | head->ctx.data = head->orig_ctx.data; | |
231 | head->ctx.data_meta = head->orig_ctx.data_meta; | |
232 | head->ctx.data_end = head->orig_ctx.data_end; | |
294635a8 | 233 | xdp_update_frame_from_buff(&head->ctx, head->frame); |
b530e9e1 THJ |
234 | } |
235 | ||
236 | static int xdp_recv_frames(struct xdp_frame **frames, int nframes, | |
237 | struct sk_buff **skbs, | |
238 | struct net_device *dev) | |
239 | { | |
240 | gfp_t gfp = __GFP_ZERO | GFP_ATOMIC; | |
241 | int i, n; | |
242 | LIST_HEAD(list); | |
243 | ||
025a785f | 244 | n = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, (void **)skbs); |
b530e9e1 THJ |
245 | if (unlikely(n == 0)) { |
246 | for (i = 0; i < nframes; i++) | |
247 | xdp_return_frame(frames[i]); | |
248 | return -ENOMEM; | |
249 | } | |
250 | ||
251 | for (i = 0; i < nframes; i++) { | |
252 | struct xdp_frame *xdpf = frames[i]; | |
253 | struct sk_buff *skb = skbs[i]; | |
254 | ||
255 | skb = __xdp_build_skb_from_frame(xdpf, skb, dev); | |
256 | if (!skb) { | |
257 | xdp_return_frame(xdpf); | |
258 | continue; | |
259 | } | |
260 | ||
261 | list_add_tail(&skb->list, &list); | |
262 | } | |
263 | netif_receive_skb_list(&list); | |
264 | ||
265 | return 0; | |
266 | } | |
267 | ||
268 | static int xdp_test_run_batch(struct xdp_test_data *xdp, struct bpf_prog *prog, | |
269 | u32 repeat) | |
270 | { | |
271 | struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info); | |
272 | int err = 0, act, ret, i, nframes = 0, batch_sz; | |
273 | struct xdp_frame **frames = xdp->frames; | |
274 | struct xdp_page_head *head; | |
275 | struct xdp_frame *frm; | |
276 | bool redirect = false; | |
277 | struct xdp_buff *ctx; | |
278 | struct page *page; | |
279 | ||
280 | batch_sz = min_t(u32, repeat, xdp->batch_size); | |
281 | ||
282 | local_bh_disable(); | |
283 | xdp_set_return_frame_no_direct(); | |
284 | ||
285 | for (i = 0; i < batch_sz; i++) { | |
286 | page = page_pool_dev_alloc_pages(xdp->pp); | |
287 | if (!page) { | |
288 | err = -ENOMEM; | |
289 | goto out; | |
290 | } | |
291 | ||
292 | head = phys_to_virt(page_to_phys(page)); | |
293 | reset_ctx(head); | |
294 | ctx = &head->ctx; | |
294635a8 | 295 | frm = head->frame; |
b530e9e1 THJ |
296 | xdp->frame_cnt++; |
297 | ||
298 | act = bpf_prog_run_xdp(prog, ctx); | |
299 | ||
300 | /* if program changed pkt bounds we need to update the xdp_frame */ | |
301 | if (unlikely(ctx_was_changed(head))) { | |
302 | ret = xdp_update_frame_from_buff(ctx, frm); | |
303 | if (ret) { | |
304 | xdp_return_buff(ctx); | |
305 | continue; | |
306 | } | |
307 | } | |
308 | ||
309 | switch (act) { | |
310 | case XDP_TX: | |
311 | /* we can't do a real XDP_TX since we're not in the | |
312 | * driver, so turn it into a REDIRECT back to the same | |
313 | * index | |
314 | */ | |
315 | ri->tgt_index = xdp->dev->ifindex; | |
316 | ri->map_id = INT_MAX; | |
317 | ri->map_type = BPF_MAP_TYPE_UNSPEC; | |
318 | fallthrough; | |
319 | case XDP_REDIRECT: | |
320 | redirect = true; | |
321 | ret = xdp_do_redirect_frame(xdp->dev, ctx, frm, prog); | |
322 | if (ret) | |
323 | xdp_return_buff(ctx); | |
324 | break; | |
325 | case XDP_PASS: | |
326 | frames[nframes++] = frm; | |
327 | break; | |
328 | default: | |
329 | bpf_warn_invalid_xdp_action(NULL, prog, act); | |
330 | fallthrough; | |
331 | case XDP_DROP: | |
332 | xdp_return_buff(ctx); | |
333 | break; | |
334 | } | |
335 | } | |
336 | ||
337 | out: | |
338 | if (redirect) | |
339 | xdp_do_flush(); | |
340 | if (nframes) { | |
341 | ret = xdp_recv_frames(frames, nframes, xdp->skbs, xdp->dev); | |
342 | if (ret) | |
343 | err = ret; | |
344 | } | |
345 | ||
346 | xdp_clear_return_frame_no_direct(); | |
347 | local_bh_enable(); | |
348 | return err; | |
349 | } | |
350 | ||
351 | static int bpf_test_run_xdp_live(struct bpf_prog *prog, struct xdp_buff *ctx, | |
352 | u32 repeat, u32 batch_size, u32 *time) | |
353 | ||
354 | { | |
355 | struct xdp_test_data xdp = { .batch_size = batch_size }; | |
356 | struct bpf_test_timer t = { .mode = NO_MIGRATE }; | |
357 | int ret; | |
358 | ||
359 | if (!repeat) | |
360 | repeat = 1; | |
361 | ||
362 | ret = xdp_test_run_setup(&xdp, ctx); | |
363 | if (ret) | |
364 | return ret; | |
365 | ||
366 | bpf_test_timer_enter(&t); | |
367 | do { | |
368 | xdp.frame_cnt = 0; | |
369 | ret = xdp_test_run_batch(&xdp, prog, repeat - t.i); | |
370 | if (unlikely(ret < 0)) | |
371 | break; | |
372 | } while (bpf_test_timer_continue(&t, xdp.frame_cnt, repeat, &ret, time)); | |
373 | bpf_test_timer_leave(&t); | |
374 | ||
375 | xdp_test_run_teardown(&xdp); | |
376 | return ret; | |
377 | } | |
378 | ||
df1a2cb7 | 379 | static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat, |
f23c4b39 | 380 | u32 *retval, u32 *time, bool xdp) |
1cf1cae9 | 381 | { |
c7603cfa AN |
382 | struct bpf_prog_array_item item = {.prog = prog}; |
383 | struct bpf_run_ctx *old_ctx; | |
384 | struct bpf_cg_run_ctx run_ctx; | |
607b9cc9 | 385 | struct bpf_test_timer t = { NO_MIGRATE }; |
8bad74f9 | 386 | enum bpf_cgroup_storage_type stype; |
607b9cc9 | 387 | int ret; |
1cf1cae9 | 388 | |
8bad74f9 | 389 | for_each_cgroup_storage_type(stype) { |
c7603cfa AN |
390 | item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype); |
391 | if (IS_ERR(item.cgroup_storage[stype])) { | |
392 | item.cgroup_storage[stype] = NULL; | |
8bad74f9 | 393 | for_each_cgroup_storage_type(stype) |
c7603cfa | 394 | bpf_cgroup_storage_free(item.cgroup_storage[stype]); |
8bad74f9 RG |
395 | return -ENOMEM; |
396 | } | |
397 | } | |
f42ee093 | 398 | |
1cf1cae9 AS |
399 | if (!repeat) |
400 | repeat = 1; | |
df1a2cb7 | 401 | |
607b9cc9 | 402 | bpf_test_timer_enter(&t); |
c7603cfa | 403 | old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx); |
607b9cc9 | 404 | do { |
c7603cfa | 405 | run_ctx.prog_item = &item; |
af2d0d09 | 406 | local_bh_disable(); |
f23c4b39 BT |
407 | if (xdp) |
408 | *retval = bpf_prog_run_xdp(prog, ctx); | |
409 | else | |
fb7dd8bc | 410 | *retval = bpf_prog_run(prog, ctx); |
af2d0d09 | 411 | local_bh_enable(); |
b530e9e1 | 412 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, time)); |
c7603cfa | 413 | bpf_reset_run_ctx(old_ctx); |
607b9cc9 | 414 | bpf_test_timer_leave(&t); |
1cf1cae9 | 415 | |
8bad74f9 | 416 | for_each_cgroup_storage_type(stype) |
c7603cfa | 417 | bpf_cgroup_storage_free(item.cgroup_storage[stype]); |
f42ee093 | 418 | |
df1a2cb7 | 419 | return ret; |
1cf1cae9 AS |
420 | } |
421 | ||
78e52272 DM |
422 | static int bpf_test_finish(const union bpf_attr *kattr, |
423 | union bpf_attr __user *uattr, const void *data, | |
7855e0db LB |
424 | struct skb_shared_info *sinfo, u32 size, |
425 | u32 retval, u32 duration) | |
1cf1cae9 | 426 | { |
78e52272 | 427 | void __user *data_out = u64_to_user_ptr(kattr->test.data_out); |
1cf1cae9 | 428 | int err = -EFAULT; |
b5a36b1e | 429 | u32 copy_size = size; |
1cf1cae9 | 430 | |
b5a36b1e LB |
431 | /* Clamp copy if the user has provided a size hint, but copy the full |
432 | * buffer if not to retain old behaviour. | |
433 | */ | |
434 | if (kattr->test.data_size_out && | |
435 | copy_size > kattr->test.data_size_out) { | |
436 | copy_size = kattr->test.data_size_out; | |
437 | err = -ENOSPC; | |
438 | } | |
439 | ||
7855e0db LB |
440 | if (data_out) { |
441 | int len = sinfo ? copy_size - sinfo->xdp_frags_size : copy_size; | |
442 | ||
530e214c SF |
443 | if (len < 0) { |
444 | err = -ENOSPC; | |
445 | goto out; | |
446 | } | |
447 | ||
7855e0db LB |
448 | if (copy_to_user(data_out, data, len)) |
449 | goto out; | |
450 | ||
451 | if (sinfo) { | |
5d1e9f43 SF |
452 | int i, offset = len; |
453 | u32 data_len; | |
7855e0db LB |
454 | |
455 | for (i = 0; i < sinfo->nr_frags; i++) { | |
456 | skb_frag_t *frag = &sinfo->frags[i]; | |
457 | ||
458 | if (offset >= copy_size) { | |
459 | err = -ENOSPC; | |
460 | break; | |
461 | } | |
462 | ||
5d1e9f43 | 463 | data_len = min_t(u32, copy_size - offset, |
7855e0db LB |
464 | skb_frag_size(frag)); |
465 | ||
466 | if (copy_to_user(data_out + offset, | |
467 | skb_frag_address(frag), | |
468 | data_len)) | |
469 | goto out; | |
470 | ||
471 | offset += data_len; | |
472 | } | |
473 | } | |
474 | } | |
475 | ||
1cf1cae9 AS |
476 | if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size))) |
477 | goto out; | |
478 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
479 | goto out; | |
480 | if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration))) | |
481 | goto out; | |
b5a36b1e LB |
482 | if (err != -ENOSPC) |
483 | err = 0; | |
1cf1cae9 | 484 | out: |
e950e843 | 485 | trace_bpf_test_finish(&err); |
1cf1cae9 AS |
486 | return err; |
487 | } | |
488 | ||
faeb2dce AS |
489 | /* Integer types of various sizes and pointer combinations cover variety of |
490 | * architecture dependent calling conventions. 7+ can be supported in the | |
491 | * future. | |
492 | */ | |
e9ff9d52 | 493 | __diag_push(); |
0b206c6d KKD |
494 | __diag_ignore_all("-Wmissing-prototypes", |
495 | "Global functions as their definitions will be in vmlinux BTF"); | |
400031e0 | 496 | __bpf_kfunc int bpf_fentry_test1(int a) |
faeb2dce AS |
497 | { |
498 | return a + 1; | |
499 | } | |
46565696 | 500 | EXPORT_SYMBOL_GPL(bpf_fentry_test1); |
faeb2dce AS |
501 | |
502 | int noinline bpf_fentry_test2(int a, u64 b) | |
503 | { | |
504 | return a + b; | |
505 | } | |
506 | ||
507 | int noinline bpf_fentry_test3(char a, int b, u64 c) | |
508 | { | |
509 | return a + b + c; | |
510 | } | |
511 | ||
512 | int noinline bpf_fentry_test4(void *a, char b, int c, u64 d) | |
513 | { | |
514 | return (long)a + b + c + d; | |
515 | } | |
516 | ||
517 | int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e) | |
518 | { | |
519 | return a + (long)b + c + d + e; | |
520 | } | |
521 | ||
522 | int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f) | |
523 | { | |
524 | return a + (long)b + c + d + (long)e + f; | |
525 | } | |
526 | ||
d923021c YS |
527 | struct bpf_fentry_test_t { |
528 | struct bpf_fentry_test_t *a; | |
529 | }; | |
530 | ||
531 | int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg) | |
532 | { | |
533 | return (long)arg; | |
534 | } | |
535 | ||
536 | int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg) | |
537 | { | |
538 | return (long)arg->a; | |
539 | } | |
540 | ||
400031e0 | 541 | __bpf_kfunc int bpf_modify_return_test(int a, int *b) |
3d08b6f2 KS |
542 | { |
543 | *b += 1; | |
544 | return a + *b; | |
545 | } | |
7bd1590d | 546 | |
400031e0 | 547 | __bpf_kfunc u64 bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d) |
7bd1590d MKL |
548 | { |
549 | return a + b + c + d; | |
550 | } | |
551 | ||
400031e0 | 552 | __bpf_kfunc int bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b) |
7bd1590d MKL |
553 | { |
554 | return a + b; | |
555 | } | |
556 | ||
400031e0 | 557 | __bpf_kfunc struct sock *bpf_kfunc_call_test3(struct sock *sk) |
7bd1590d MKL |
558 | { |
559 | return sk; | |
560 | } | |
561 | ||
be6b5c10 IL |
562 | long noinline bpf_kfunc_call_test4(signed char a, short b, int c, long d) |
563 | { | |
564 | /* Provoke the compiler to assume that the caller has sign-extended a, | |
565 | * b and c on platforms where this is required (e.g. s390x). | |
566 | */ | |
567 | return (long)a + (long)b + (long)c + d; | |
568 | } | |
569 | ||
792c0a34 KKD |
570 | struct prog_test_member1 { |
571 | int a; | |
572 | }; | |
573 | ||
8218ccb5 | 574 | struct prog_test_member { |
792c0a34 KKD |
575 | struct prog_test_member1 m; |
576 | int c; | |
8218ccb5 KKD |
577 | }; |
578 | ||
c1ff181f KKD |
579 | struct prog_test_ref_kfunc { |
580 | int a; | |
581 | int b; | |
8218ccb5 | 582 | struct prog_test_member memb; |
c1ff181f | 583 | struct prog_test_ref_kfunc *next; |
5cdccadc | 584 | refcount_t cnt; |
c1ff181f KKD |
585 | }; |
586 | ||
587 | static struct prog_test_ref_kfunc prog_test_struct = { | |
588 | .a = 42, | |
589 | .b = 108, | |
590 | .next = &prog_test_struct, | |
5cdccadc | 591 | .cnt = REFCOUNT_INIT(1), |
c1ff181f KKD |
592 | }; |
593 | ||
400031e0 | 594 | __bpf_kfunc struct prog_test_ref_kfunc * |
c1ff181f KKD |
595 | bpf_kfunc_call_test_acquire(unsigned long *scalar_ptr) |
596 | { | |
5cdccadc | 597 | refcount_inc(&prog_test_struct.cnt); |
c1ff181f KKD |
598 | return &prog_test_struct; |
599 | } | |
600 | ||
400031e0 | 601 | __bpf_kfunc struct prog_test_member * |
792c0a34 KKD |
602 | bpf_kfunc_call_memb_acquire(void) |
603 | { | |
5cdccadc KKD |
604 | WARN_ON_ONCE(1); |
605 | return NULL; | |
792c0a34 KKD |
606 | } |
607 | ||
400031e0 | 608 | __bpf_kfunc void bpf_kfunc_call_test_release(struct prog_test_ref_kfunc *p) |
c1ff181f | 609 | { |
5cdccadc KKD |
610 | if (!p) |
611 | return; | |
612 | ||
613 | refcount_dec(&p->cnt); | |
c1ff181f KKD |
614 | } |
615 | ||
400031e0 | 616 | __bpf_kfunc void bpf_kfunc_call_memb_release(struct prog_test_member *p) |
8218ccb5 KKD |
617 | { |
618 | } | |
619 | ||
400031e0 | 620 | __bpf_kfunc void bpf_kfunc_call_memb1_release(struct prog_test_member1 *p) |
792c0a34 | 621 | { |
5cdccadc | 622 | WARN_ON_ONCE(1); |
792c0a34 KKD |
623 | } |
624 | ||
22ed8d5a BT |
625 | static int *__bpf_kfunc_call_test_get_mem(struct prog_test_ref_kfunc *p, const int size) |
626 | { | |
627 | if (size > 2 * sizeof(int)) | |
628 | return NULL; | |
629 | ||
630 | return (int *)p; | |
631 | } | |
632 | ||
400031e0 DV |
633 | __bpf_kfunc int *bpf_kfunc_call_test_get_rdwr_mem(struct prog_test_ref_kfunc *p, |
634 | const int rdwr_buf_size) | |
22ed8d5a BT |
635 | { |
636 | return __bpf_kfunc_call_test_get_mem(p, rdwr_buf_size); | |
637 | } | |
638 | ||
400031e0 DV |
639 | __bpf_kfunc int *bpf_kfunc_call_test_get_rdonly_mem(struct prog_test_ref_kfunc *p, |
640 | const int rdonly_buf_size) | |
22ed8d5a BT |
641 | { |
642 | return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); | |
643 | } | |
644 | ||
645 | /* the next 2 ones can't be really used for testing expect to ensure | |
646 | * that the verifier rejects the call. | |
647 | * Acquire functions must return struct pointers, so these ones are | |
648 | * failing. | |
649 | */ | |
400031e0 DV |
650 | __bpf_kfunc int *bpf_kfunc_call_test_acq_rdonly_mem(struct prog_test_ref_kfunc *p, |
651 | const int rdonly_buf_size) | |
22ed8d5a BT |
652 | { |
653 | return __bpf_kfunc_call_test_get_mem(p, rdonly_buf_size); | |
654 | } | |
655 | ||
400031e0 | 656 | __bpf_kfunc void bpf_kfunc_call_int_mem_release(int *p) |
22ed8d5a BT |
657 | { |
658 | } | |
659 | ||
400031e0 | 660 | __bpf_kfunc struct prog_test_ref_kfunc * |
5cdccadc | 661 | bpf_kfunc_call_test_kptr_get(struct prog_test_ref_kfunc **pp, int a, int b) |
05a945de | 662 | { |
5cdccadc KKD |
663 | struct prog_test_ref_kfunc *p = READ_ONCE(*pp); |
664 | ||
665 | if (!p) | |
666 | return NULL; | |
667 | refcount_inc(&p->cnt); | |
668 | return p; | |
05a945de KKD |
669 | } |
670 | ||
c1ff181f KKD |
671 | struct prog_test_pass1 { |
672 | int x0; | |
673 | struct { | |
674 | int x1; | |
675 | struct { | |
676 | int x2; | |
677 | struct { | |
678 | int x3; | |
679 | }; | |
680 | }; | |
681 | }; | |
682 | }; | |
683 | ||
684 | struct prog_test_pass2 { | |
685 | int len; | |
686 | short arr1[4]; | |
687 | struct { | |
688 | char arr2[4]; | |
689 | unsigned long arr3[8]; | |
690 | } x; | |
691 | }; | |
692 | ||
693 | struct prog_test_fail1 { | |
694 | void *p; | |
695 | int x; | |
696 | }; | |
697 | ||
698 | struct prog_test_fail2 { | |
699 | int x8; | |
700 | struct prog_test_pass1 x; | |
701 | }; | |
702 | ||
703 | struct prog_test_fail3 { | |
704 | int len; | |
705 | char arr1[2]; | |
ed8bb032 | 706 | char arr2[]; |
c1ff181f KKD |
707 | }; |
708 | ||
400031e0 | 709 | __bpf_kfunc void bpf_kfunc_call_test_pass_ctx(struct __sk_buff *skb) |
c1ff181f KKD |
710 | { |
711 | } | |
712 | ||
400031e0 | 713 | __bpf_kfunc void bpf_kfunc_call_test_pass1(struct prog_test_pass1 *p) |
c1ff181f KKD |
714 | { |
715 | } | |
716 | ||
400031e0 | 717 | __bpf_kfunc void bpf_kfunc_call_test_pass2(struct prog_test_pass2 *p) |
c1ff181f KKD |
718 | { |
719 | } | |
720 | ||
400031e0 | 721 | __bpf_kfunc void bpf_kfunc_call_test_fail1(struct prog_test_fail1 *p) |
c1ff181f KKD |
722 | { |
723 | } | |
724 | ||
400031e0 | 725 | __bpf_kfunc void bpf_kfunc_call_test_fail2(struct prog_test_fail2 *p) |
c1ff181f KKD |
726 | { |
727 | } | |
728 | ||
400031e0 | 729 | __bpf_kfunc void bpf_kfunc_call_test_fail3(struct prog_test_fail3 *p) |
c1ff181f KKD |
730 | { |
731 | } | |
732 | ||
400031e0 | 733 | __bpf_kfunc void bpf_kfunc_call_test_mem_len_pass1(void *mem, int mem__sz) |
c1ff181f KKD |
734 | { |
735 | } | |
736 | ||
400031e0 | 737 | __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail1(void *mem, int len) |
c1ff181f KKD |
738 | { |
739 | } | |
740 | ||
400031e0 | 741 | __bpf_kfunc void bpf_kfunc_call_test_mem_len_fail2(u64 *mem, int len) |
c1ff181f KKD |
742 | { |
743 | } | |
744 | ||
400031e0 | 745 | __bpf_kfunc void bpf_kfunc_call_test_ref(struct prog_test_ref_kfunc *p) |
c1ff181f | 746 | { |
20c09d92 | 747 | /* p != NULL, but p->cnt could be 0 */ |
c1ff181f KKD |
748 | } |
749 | ||
400031e0 | 750 | __bpf_kfunc void bpf_kfunc_call_test_destructive(void) |
56e948ff KKD |
751 | { |
752 | } | |
753 | ||
6aed15e3 | 754 | __bpf_kfunc static u32 bpf_kfunc_call_test_static_unused_arg(u32 arg, u32 unused) |
e3389458 | 755 | { |
6aed15e3 | 756 | return arg; |
e3389458 AS |
757 | } |
758 | ||
e9ff9d52 | 759 | __diag_pop(); |
3d08b6f2 | 760 | |
5b481aca BT |
761 | BTF_SET8_START(bpf_test_modify_return_ids) |
762 | BTF_ID_FLAGS(func, bpf_modify_return_test) | |
763 | BTF_ID_FLAGS(func, bpf_fentry_test1, KF_SLEEPABLE) | |
764 | BTF_SET8_END(bpf_test_modify_return_ids) | |
765 | ||
766 | static const struct btf_kfunc_id_set bpf_test_modify_return_set = { | |
767 | .owner = THIS_MODULE, | |
768 | .set = &bpf_test_modify_return_ids, | |
769 | }; | |
3d08b6f2 | 770 | |
a4703e31 KKD |
771 | BTF_SET8_START(test_sk_check_kfunc_ids) |
772 | BTF_ID_FLAGS(func, bpf_kfunc_call_test1) | |
773 | BTF_ID_FLAGS(func, bpf_kfunc_call_test2) | |
774 | BTF_ID_FLAGS(func, bpf_kfunc_call_test3) | |
be6b5c10 | 775 | BTF_ID_FLAGS(func, bpf_kfunc_call_test4) |
a4703e31 KKD |
776 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_acquire, KF_ACQUIRE | KF_RET_NULL) |
777 | BTF_ID_FLAGS(func, bpf_kfunc_call_memb_acquire, KF_ACQUIRE | KF_RET_NULL) | |
778 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_release, KF_RELEASE) | |
779 | BTF_ID_FLAGS(func, bpf_kfunc_call_memb_release, KF_RELEASE) | |
780 | BTF_ID_FLAGS(func, bpf_kfunc_call_memb1_release, KF_RELEASE) | |
22ed8d5a BT |
781 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdwr_mem, KF_RET_NULL) |
782 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_get_rdonly_mem, KF_RET_NULL) | |
783 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_acq_rdonly_mem, KF_ACQUIRE | KF_RET_NULL) | |
784 | BTF_ID_FLAGS(func, bpf_kfunc_call_int_mem_release, KF_RELEASE) | |
a4703e31 KKD |
785 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_kptr_get, KF_ACQUIRE | KF_RET_NULL | KF_KPTR_GET) |
786 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass_ctx) | |
787 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass1) | |
788 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_pass2) | |
789 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail1) | |
790 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail2) | |
791 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_fail3) | |
792 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_pass1) | |
793 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail1) | |
794 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_mem_len_fail2) | |
20c09d92 | 795 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_ref, KF_TRUSTED_ARGS | KF_RCU) |
e3389458 | 796 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_destructive, KF_DESTRUCTIVE) |
6aed15e3 | 797 | BTF_ID_FLAGS(func, bpf_kfunc_call_test_static_unused_arg) |
a4703e31 | 798 | BTF_SET8_END(test_sk_check_kfunc_ids) |
05a945de | 799 | |
be3d72a2 LB |
800 | static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size, |
801 | u32 size, u32 headroom, u32 tailroom) | |
1cf1cae9 AS |
802 | { |
803 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); | |
804 | void *data; | |
805 | ||
806 | if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom) | |
807 | return ERR_PTR(-EINVAL); | |
808 | ||
d800bad6 JDB |
809 | if (user_size > size) |
810 | return ERR_PTR(-EMSGSIZE); | |
811 | ||
d3fd203f | 812 | size = SKB_DATA_ALIGN(size); |
1cf1cae9 AS |
813 | data = kzalloc(size + headroom + tailroom, GFP_USER); |
814 | if (!data) | |
815 | return ERR_PTR(-ENOMEM); | |
816 | ||
d800bad6 | 817 | if (copy_from_user(data + headroom, data_in, user_size)) { |
1cf1cae9 AS |
818 | kfree(data); |
819 | return ERR_PTR(-EFAULT); | |
820 | } | |
da00d2f1 | 821 | |
1cf1cae9 AS |
822 | return data; |
823 | } | |
824 | ||
da00d2f1 KS |
825 | int bpf_prog_test_run_tracing(struct bpf_prog *prog, |
826 | const union bpf_attr *kattr, | |
827 | union bpf_attr __user *uattr) | |
828 | { | |
d923021c | 829 | struct bpf_fentry_test_t arg = {}; |
3d08b6f2 KS |
830 | u16 side_effect = 0, ret = 0; |
831 | int b = 2, err = -EFAULT; | |
832 | u32 retval = 0; | |
da00d2f1 | 833 | |
b530e9e1 | 834 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
835 | return -EINVAL; |
836 | ||
da00d2f1 KS |
837 | switch (prog->expected_attach_type) { |
838 | case BPF_TRACE_FENTRY: | |
839 | case BPF_TRACE_FEXIT: | |
840 | if (bpf_fentry_test1(1) != 2 || | |
841 | bpf_fentry_test2(2, 3) != 5 || | |
842 | bpf_fentry_test3(4, 5, 6) != 15 || | |
843 | bpf_fentry_test4((void *)7, 8, 9, 10) != 34 || | |
844 | bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 || | |
d923021c YS |
845 | bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 || |
846 | bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 || | |
847 | bpf_fentry_test8(&arg) != 0) | |
da00d2f1 KS |
848 | goto out; |
849 | break; | |
3d08b6f2 KS |
850 | case BPF_MODIFY_RETURN: |
851 | ret = bpf_modify_return_test(1, &b); | |
852 | if (b != 2) | |
853 | side_effect = 1; | |
854 | break; | |
da00d2f1 KS |
855 | default: |
856 | goto out; | |
857 | } | |
858 | ||
3d08b6f2 KS |
859 | retval = ((u32)side_effect << 16) | ret; |
860 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval))) | |
861 | goto out; | |
862 | ||
da00d2f1 KS |
863 | err = 0; |
864 | out: | |
865 | trace_bpf_test_finish(&err); | |
866 | return err; | |
867 | } | |
868 | ||
1b4d60ec SL |
869 | struct bpf_raw_tp_test_run_info { |
870 | struct bpf_prog *prog; | |
871 | void *ctx; | |
872 | u32 retval; | |
873 | }; | |
874 | ||
875 | static void | |
876 | __bpf_prog_test_run_raw_tp(void *data) | |
877 | { | |
878 | struct bpf_raw_tp_test_run_info *info = data; | |
879 | ||
880 | rcu_read_lock(); | |
fb7dd8bc | 881 | info->retval = bpf_prog_run(info->prog, info->ctx); |
1b4d60ec SL |
882 | rcu_read_unlock(); |
883 | } | |
884 | ||
885 | int bpf_prog_test_run_raw_tp(struct bpf_prog *prog, | |
886 | const union bpf_attr *kattr, | |
887 | union bpf_attr __user *uattr) | |
888 | { | |
889 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
890 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
891 | struct bpf_raw_tp_test_run_info info; | |
892 | int cpu = kattr->test.cpu, err = 0; | |
963ec27a | 893 | int current_cpu; |
1b4d60ec SL |
894 | |
895 | /* doesn't support data_in/out, ctx_out, duration, or repeat */ | |
896 | if (kattr->test.data_in || kattr->test.data_out || | |
897 | kattr->test.ctx_out || kattr->test.duration || | |
b530e9e1 | 898 | kattr->test.repeat || kattr->test.batch_size) |
1b4d60ec SL |
899 | return -EINVAL; |
900 | ||
7ac6ad05 SL |
901 | if (ctx_size_in < prog->aux->max_ctx_offset || |
902 | ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64)) | |
1b4d60ec SL |
903 | return -EINVAL; |
904 | ||
905 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0) | |
906 | return -EINVAL; | |
907 | ||
908 | if (ctx_size_in) { | |
db5b6a46 QW |
909 | info.ctx = memdup_user(ctx_in, ctx_size_in); |
910 | if (IS_ERR(info.ctx)) | |
911 | return PTR_ERR(info.ctx); | |
1b4d60ec SL |
912 | } else { |
913 | info.ctx = NULL; | |
914 | } | |
915 | ||
916 | info.prog = prog; | |
917 | ||
963ec27a | 918 | current_cpu = get_cpu(); |
1b4d60ec | 919 | if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 || |
963ec27a | 920 | cpu == current_cpu) { |
1b4d60ec | 921 | __bpf_prog_test_run_raw_tp(&info); |
963ec27a | 922 | } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { |
1b4d60ec SL |
923 | /* smp_call_function_single() also checks cpu_online() |
924 | * after csd_lock(). However, since cpu is from user | |
925 | * space, let's do an extra quick check to filter out | |
926 | * invalid value before smp_call_function_single(). | |
927 | */ | |
963ec27a SL |
928 | err = -ENXIO; |
929 | } else { | |
1b4d60ec SL |
930 | err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp, |
931 | &info, 1); | |
1b4d60ec | 932 | } |
963ec27a | 933 | put_cpu(); |
1b4d60ec | 934 | |
963ec27a SL |
935 | if (!err && |
936 | copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32))) | |
1b4d60ec SL |
937 | err = -EFAULT; |
938 | ||
1b4d60ec SL |
939 | kfree(info.ctx); |
940 | return err; | |
941 | } | |
942 | ||
b0b9395d SF |
943 | static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size) |
944 | { | |
945 | void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in); | |
946 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
947 | u32 size = kattr->test.ctx_size_in; | |
948 | void *data; | |
949 | int err; | |
950 | ||
951 | if (!data_in && !data_out) | |
952 | return NULL; | |
953 | ||
954 | data = kzalloc(max_size, GFP_USER); | |
955 | if (!data) | |
956 | return ERR_PTR(-ENOMEM); | |
957 | ||
958 | if (data_in) { | |
af2ac3e1 | 959 | err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size); |
b0b9395d SF |
960 | if (err) { |
961 | kfree(data); | |
962 | return ERR_PTR(err); | |
963 | } | |
964 | ||
965 | size = min_t(u32, max_size, size); | |
966 | if (copy_from_user(data, data_in, size)) { | |
967 | kfree(data); | |
968 | return ERR_PTR(-EFAULT); | |
969 | } | |
970 | } | |
971 | return data; | |
972 | } | |
973 | ||
974 | static int bpf_ctx_finish(const union bpf_attr *kattr, | |
975 | union bpf_attr __user *uattr, const void *data, | |
976 | u32 size) | |
977 | { | |
978 | void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out); | |
979 | int err = -EFAULT; | |
980 | u32 copy_size = size; | |
981 | ||
982 | if (!data || !data_out) | |
983 | return 0; | |
984 | ||
985 | if (copy_size > kattr->test.ctx_size_out) { | |
986 | copy_size = kattr->test.ctx_size_out; | |
987 | err = -ENOSPC; | |
988 | } | |
989 | ||
990 | if (copy_to_user(data_out, data, copy_size)) | |
991 | goto out; | |
992 | if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size))) | |
993 | goto out; | |
994 | if (err != -ENOSPC) | |
995 | err = 0; | |
996 | out: | |
997 | return err; | |
998 | } | |
999 | ||
1000 | /** | |
1001 | * range_is_zero - test whether buffer is initialized | |
1002 | * @buf: buffer to check | |
1003 | * @from: check from this position | |
1004 | * @to: check up until (excluding) this position | |
1005 | * | |
1006 | * This function returns true if the there is a non-zero byte | |
1007 | * in the buf in the range [from,to). | |
1008 | */ | |
1009 | static inline bool range_is_zero(void *buf, size_t from, size_t to) | |
1010 | { | |
1011 | return !memchr_inv((u8 *)buf + from, 0, to - from); | |
1012 | } | |
1013 | ||
1014 | static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
1015 | { | |
1016 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
1017 | ||
1018 | if (!__skb) | |
1019 | return 0; | |
1020 | ||
1021 | /* make sure the fields we don't use are zeroed */ | |
6de6c1f8 NS |
1022 | if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark))) |
1023 | return -EINVAL; | |
1024 | ||
1025 | /* mark is allowed */ | |
1026 | ||
1027 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark), | |
1028 | offsetof(struct __sk_buff, priority))) | |
b0b9395d SF |
1029 | return -EINVAL; |
1030 | ||
1031 | /* priority is allowed */ | |
b238290b | 1032 | /* ingress_ifindex is allowed */ |
21594c44 DY |
1033 | /* ifindex is allowed */ |
1034 | ||
1035 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex), | |
b0b9395d SF |
1036 | offsetof(struct __sk_buff, cb))) |
1037 | return -EINVAL; | |
1038 | ||
1039 | /* cb is allowed */ | |
1040 | ||
b590cb5f | 1041 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb), |
ba940948 SF |
1042 | offsetof(struct __sk_buff, tstamp))) |
1043 | return -EINVAL; | |
1044 | ||
1045 | /* tstamp is allowed */ | |
850a88cc SF |
1046 | /* wire_len is allowed */ |
1047 | /* gso_segs is allowed */ | |
ba940948 | 1048 | |
850a88cc | 1049 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs), |
cf62089b WB |
1050 | offsetof(struct __sk_buff, gso_size))) |
1051 | return -EINVAL; | |
1052 | ||
1053 | /* gso_size is allowed */ | |
1054 | ||
1055 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size), | |
3384c7c7 VF |
1056 | offsetof(struct __sk_buff, hwtstamp))) |
1057 | return -EINVAL; | |
1058 | ||
1059 | /* hwtstamp is allowed */ | |
1060 | ||
1061 | if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp), | |
b0b9395d SF |
1062 | sizeof(struct __sk_buff))) |
1063 | return -EINVAL; | |
1064 | ||
6de6c1f8 | 1065 | skb->mark = __skb->mark; |
b0b9395d | 1066 | skb->priority = __skb->priority; |
b238290b | 1067 | skb->skb_iif = __skb->ingress_ifindex; |
ba940948 | 1068 | skb->tstamp = __skb->tstamp; |
b0b9395d SF |
1069 | memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN); |
1070 | ||
850a88cc SF |
1071 | if (__skb->wire_len == 0) { |
1072 | cb->pkt_len = skb->len; | |
1073 | } else { | |
1074 | if (__skb->wire_len < skb->len || | |
7c4e983c | 1075 | __skb->wire_len > GSO_LEGACY_MAX_SIZE) |
850a88cc SF |
1076 | return -EINVAL; |
1077 | cb->pkt_len = __skb->wire_len; | |
1078 | } | |
1079 | ||
1080 | if (__skb->gso_segs > GSO_MAX_SEGS) | |
1081 | return -EINVAL; | |
1082 | skb_shinfo(skb)->gso_segs = __skb->gso_segs; | |
cf62089b | 1083 | skb_shinfo(skb)->gso_size = __skb->gso_size; |
3384c7c7 | 1084 | skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp; |
850a88cc | 1085 | |
b0b9395d SF |
1086 | return 0; |
1087 | } | |
1088 | ||
1089 | static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb) | |
1090 | { | |
1091 | struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb; | |
1092 | ||
1093 | if (!__skb) | |
1094 | return; | |
1095 | ||
6de6c1f8 | 1096 | __skb->mark = skb->mark; |
b0b9395d | 1097 | __skb->priority = skb->priority; |
b238290b | 1098 | __skb->ingress_ifindex = skb->skb_iif; |
21594c44 | 1099 | __skb->ifindex = skb->dev->ifindex; |
ba940948 | 1100 | __skb->tstamp = skb->tstamp; |
b0b9395d | 1101 | memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN); |
850a88cc SF |
1102 | __skb->wire_len = cb->pkt_len; |
1103 | __skb->gso_segs = skb_shinfo(skb)->gso_segs; | |
3384c7c7 | 1104 | __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp; |
b0b9395d SF |
1105 | } |
1106 | ||
435b08ec DB |
1107 | static struct proto bpf_dummy_proto = { |
1108 | .name = "bpf_dummy", | |
1109 | .owner = THIS_MODULE, | |
1110 | .obj_size = sizeof(struct sock), | |
1111 | }; | |
1112 | ||
1cf1cae9 AS |
1113 | int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr, |
1114 | union bpf_attr __user *uattr) | |
1115 | { | |
1116 | bool is_l2 = false, is_direct_pkt_access = false; | |
21594c44 DY |
1117 | struct net *net = current->nsproxy->net_ns; |
1118 | struct net_device *dev = net->loopback_dev; | |
1cf1cae9 AS |
1119 | u32 size = kattr->test.data_size_in; |
1120 | u32 repeat = kattr->test.repeat; | |
b0b9395d | 1121 | struct __sk_buff *ctx = NULL; |
1cf1cae9 | 1122 | u32 retval, duration; |
6e6fddc7 | 1123 | int hh_len = ETH_HLEN; |
1cf1cae9 | 1124 | struct sk_buff *skb; |
2cb494a3 | 1125 | struct sock *sk; |
1cf1cae9 AS |
1126 | void *data; |
1127 | int ret; | |
1128 | ||
b530e9e1 | 1129 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
1130 | return -EINVAL; |
1131 | ||
be3d72a2 LB |
1132 | data = bpf_test_init(kattr, kattr->test.data_size_in, |
1133 | size, NET_SKB_PAD + NET_IP_ALIGN, | |
1cf1cae9 AS |
1134 | SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); |
1135 | if (IS_ERR(data)) | |
1136 | return PTR_ERR(data); | |
1137 | ||
b0b9395d SF |
1138 | ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff)); |
1139 | if (IS_ERR(ctx)) { | |
1140 | kfree(data); | |
1141 | return PTR_ERR(ctx); | |
1142 | } | |
1143 | ||
1cf1cae9 AS |
1144 | switch (prog->type) { |
1145 | case BPF_PROG_TYPE_SCHED_CLS: | |
1146 | case BPF_PROG_TYPE_SCHED_ACT: | |
1147 | is_l2 = true; | |
df561f66 | 1148 | fallthrough; |
1cf1cae9 AS |
1149 | case BPF_PROG_TYPE_LWT_IN: |
1150 | case BPF_PROG_TYPE_LWT_OUT: | |
1151 | case BPF_PROG_TYPE_LWT_XMIT: | |
1152 | is_direct_pkt_access = true; | |
1153 | break; | |
1154 | default: | |
1155 | break; | |
1156 | } | |
1157 | ||
435b08ec | 1158 | sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1); |
2cb494a3 SL |
1159 | if (!sk) { |
1160 | kfree(data); | |
b0b9395d | 1161 | kfree(ctx); |
2cb494a3 SL |
1162 | return -ENOMEM; |
1163 | } | |
2cb494a3 SL |
1164 | sock_init_data(NULL, sk); |
1165 | ||
ce098da1 | 1166 | skb = slab_build_skb(data); |
1cf1cae9 AS |
1167 | if (!skb) { |
1168 | kfree(data); | |
b0b9395d | 1169 | kfree(ctx); |
435b08ec | 1170 | sk_free(sk); |
1cf1cae9 AS |
1171 | return -ENOMEM; |
1172 | } | |
2cb494a3 | 1173 | skb->sk = sk; |
1cf1cae9 | 1174 | |
586f8525 | 1175 | skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN); |
1cf1cae9 | 1176 | __skb_put(skb, size); |
21594c44 DY |
1177 | if (ctx && ctx->ifindex > 1) { |
1178 | dev = dev_get_by_index(net, ctx->ifindex); | |
1179 | if (!dev) { | |
1180 | ret = -ENODEV; | |
1181 | goto out; | |
1182 | } | |
1183 | } | |
1184 | skb->protocol = eth_type_trans(skb, dev); | |
1cf1cae9 AS |
1185 | skb_reset_network_header(skb); |
1186 | ||
fa5cb548 DY |
1187 | switch (skb->protocol) { |
1188 | case htons(ETH_P_IP): | |
1189 | sk->sk_family = AF_INET; | |
1190 | if (sizeof(struct iphdr) <= skb_headlen(skb)) { | |
1191 | sk->sk_rcv_saddr = ip_hdr(skb)->saddr; | |
1192 | sk->sk_daddr = ip_hdr(skb)->daddr; | |
1193 | } | |
1194 | break; | |
1195 | #if IS_ENABLED(CONFIG_IPV6) | |
1196 | case htons(ETH_P_IPV6): | |
1197 | sk->sk_family = AF_INET6; | |
1198 | if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) { | |
1199 | sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr; | |
1200 | sk->sk_v6_daddr = ipv6_hdr(skb)->daddr; | |
1201 | } | |
1202 | break; | |
1203 | #endif | |
1204 | default: | |
1205 | break; | |
1206 | } | |
1207 | ||
1cf1cae9 | 1208 | if (is_l2) |
6e6fddc7 | 1209 | __skb_push(skb, hh_len); |
1cf1cae9 | 1210 | if (is_direct_pkt_access) |
6aaae2b6 | 1211 | bpf_compute_data_pointers(skb); |
b0b9395d SF |
1212 | ret = convert___skb_to_skb(skb, ctx); |
1213 | if (ret) | |
1214 | goto out; | |
f23c4b39 | 1215 | ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false); |
b0b9395d SF |
1216 | if (ret) |
1217 | goto out; | |
6e6fddc7 DB |
1218 | if (!is_l2) { |
1219 | if (skb_headroom(skb) < hh_len) { | |
1220 | int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb)); | |
1221 | ||
1222 | if (pskb_expand_head(skb, nhead, 0, GFP_USER)) { | |
b0b9395d SF |
1223 | ret = -ENOMEM; |
1224 | goto out; | |
6e6fddc7 DB |
1225 | } |
1226 | } | |
1227 | memset(__skb_push(skb, hh_len), 0, hh_len); | |
1228 | } | |
b0b9395d | 1229 | convert_skb_to___skb(skb, ctx); |
6e6fddc7 | 1230 | |
1cf1cae9 AS |
1231 | size = skb->len; |
1232 | /* bpf program can never convert linear skb to non-linear */ | |
1233 | if (WARN_ON_ONCE(skb_is_nonlinear(skb))) | |
1234 | size = skb_headlen(skb); | |
7855e0db LB |
1235 | ret = bpf_test_finish(kattr, uattr, skb->data, NULL, size, retval, |
1236 | duration); | |
b0b9395d SF |
1237 | if (!ret) |
1238 | ret = bpf_ctx_finish(kattr, uattr, ctx, | |
1239 | sizeof(struct __sk_buff)); | |
1240 | out: | |
21594c44 DY |
1241 | if (dev && dev != net->loopback_dev) |
1242 | dev_put(dev); | |
1cf1cae9 | 1243 | kfree_skb(skb); |
435b08ec | 1244 | sk_free(sk); |
b0b9395d | 1245 | kfree(ctx); |
1cf1cae9 AS |
1246 | return ret; |
1247 | } | |
1248 | ||
47316f4a ZE |
1249 | static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp) |
1250 | { | |
ec94670f ZE |
1251 | unsigned int ingress_ifindex, rx_queue_index; |
1252 | struct netdev_rx_queue *rxqueue; | |
1253 | struct net_device *device; | |
1254 | ||
47316f4a ZE |
1255 | if (!xdp_md) |
1256 | return 0; | |
1257 | ||
1258 | if (xdp_md->egress_ifindex != 0) | |
1259 | return -EINVAL; | |
1260 | ||
ec94670f ZE |
1261 | ingress_ifindex = xdp_md->ingress_ifindex; |
1262 | rx_queue_index = xdp_md->rx_queue_index; | |
1263 | ||
1264 | if (!ingress_ifindex && rx_queue_index) | |
47316f4a ZE |
1265 | return -EINVAL; |
1266 | ||
ec94670f ZE |
1267 | if (ingress_ifindex) { |
1268 | device = dev_get_by_index(current->nsproxy->net_ns, | |
1269 | ingress_ifindex); | |
1270 | if (!device) | |
1271 | return -ENODEV; | |
1272 | ||
1273 | if (rx_queue_index >= device->real_num_rx_queues) | |
1274 | goto free_dev; | |
1275 | ||
1276 | rxqueue = __netif_get_rx_queue(device, rx_queue_index); | |
47316f4a | 1277 | |
ec94670f ZE |
1278 | if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq)) |
1279 | goto free_dev; | |
1280 | ||
1281 | xdp->rxq = &rxqueue->xdp_rxq; | |
1282 | /* The device is now tracked in the xdp->rxq for later | |
1283 | * dev_put() | |
1284 | */ | |
1285 | } | |
1286 | ||
1287 | xdp->data = xdp->data_meta + xdp_md->data; | |
47316f4a | 1288 | return 0; |
ec94670f ZE |
1289 | |
1290 | free_dev: | |
1291 | dev_put(device); | |
1292 | return -EINVAL; | |
1293 | } | |
1294 | ||
1295 | static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md) | |
1296 | { | |
1297 | if (!xdp_md) | |
1298 | return; | |
1299 | ||
1300 | xdp_md->data = xdp->data - xdp->data_meta; | |
1301 | xdp_md->data_end = xdp->data_end - xdp->data_meta; | |
1302 | ||
1303 | if (xdp_md->ingress_ifindex) | |
1304 | dev_put(xdp->rxq->dev); | |
47316f4a ZE |
1305 | } |
1306 | ||
1cf1cae9 AS |
1307 | int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr, |
1308 | union bpf_attr __user *uattr) | |
1309 | { | |
b530e9e1 | 1310 | bool do_live = (kattr->test.flags & BPF_F_TEST_XDP_LIVE_FRAMES); |
bc56c919 | 1311 | u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); |
b530e9e1 | 1312 | u32 batch_size = kattr->test.batch_size; |
eecbfd97 | 1313 | u32 retval = 0, duration, max_data_sz; |
1cf1cae9 | 1314 | u32 size = kattr->test.data_size_in; |
1c194998 | 1315 | u32 headroom = XDP_PACKET_HEADROOM; |
1cf1cae9 | 1316 | u32 repeat = kattr->test.repeat; |
65073a67 | 1317 | struct netdev_rx_queue *rxqueue; |
1c194998 | 1318 | struct skb_shared_info *sinfo; |
1cf1cae9 | 1319 | struct xdp_buff xdp = {}; |
1c194998 | 1320 | int i, ret = -EINVAL; |
47316f4a | 1321 | struct xdp_md *ctx; |
1cf1cae9 | 1322 | void *data; |
1cf1cae9 | 1323 | |
5e21bb4e XZ |
1324 | if (prog->expected_attach_type == BPF_XDP_DEVMAP || |
1325 | prog->expected_attach_type == BPF_XDP_CPUMAP) | |
1326 | return -EINVAL; | |
6d4eb36d | 1327 | |
b530e9e1 THJ |
1328 | if (kattr->test.flags & ~BPF_F_TEST_XDP_LIVE_FRAMES) |
1329 | return -EINVAL; | |
1330 | ||
3d76a4d3 SF |
1331 | if (bpf_prog_is_dev_bound(prog->aux)) |
1332 | return -EINVAL; | |
1333 | ||
b530e9e1 THJ |
1334 | if (do_live) { |
1335 | if (!batch_size) | |
1336 | batch_size = NAPI_POLL_WEIGHT; | |
1337 | else if (batch_size > TEST_XDP_MAX_BATCH) | |
1338 | return -E2BIG; | |
b6f1f780 THJ |
1339 | |
1340 | headroom += sizeof(struct xdp_page_head); | |
b530e9e1 THJ |
1341 | } else if (batch_size) { |
1342 | return -EINVAL; | |
1343 | } | |
1344 | ||
47316f4a ZE |
1345 | ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md)); |
1346 | if (IS_ERR(ctx)) | |
1347 | return PTR_ERR(ctx); | |
1348 | ||
1349 | if (ctx) { | |
1350 | /* There can't be user provided data before the meta data */ | |
1351 | if (ctx->data_meta || ctx->data_end != size || | |
1352 | ctx->data > ctx->data_end || | |
b530e9e1 THJ |
1353 | unlikely(xdp_metalen_invalid(ctx->data)) || |
1354 | (do_live && (kattr->test.data_out || kattr->test.ctx_out))) | |
47316f4a ZE |
1355 | goto free_ctx; |
1356 | /* Meta data is allocated from the headroom */ | |
1357 | headroom -= ctx->data; | |
1358 | } | |
947e8b59 | 1359 | |
bc56c919 | 1360 | max_data_sz = 4096 - headroom - tailroom; |
b530e9e1 THJ |
1361 | if (size > max_data_sz) { |
1362 | /* disallow live data mode for jumbo frames */ | |
1363 | if (do_live) | |
1364 | goto free_ctx; | |
1365 | size = max_data_sz; | |
1366 | } | |
bc56c919 | 1367 | |
1c194998 | 1368 | data = bpf_test_init(kattr, size, max_data_sz, headroom, tailroom); |
47316f4a ZE |
1369 | if (IS_ERR(data)) { |
1370 | ret = PTR_ERR(data); | |
1371 | goto free_ctx; | |
1372 | } | |
1cf1cae9 | 1373 | |
65073a67 | 1374 | rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0); |
1c194998 LB |
1375 | rxqueue->xdp_rxq.frag_size = headroom + max_data_sz + tailroom; |
1376 | xdp_init_buff(&xdp, rxqueue->xdp_rxq.frag_size, &rxqueue->xdp_rxq); | |
be9df4af | 1377 | xdp_prepare_buff(&xdp, data, headroom, size, true); |
1c194998 | 1378 | sinfo = xdp_get_shared_info_from_buff(&xdp); |
be9df4af | 1379 | |
47316f4a ZE |
1380 | ret = xdp_convert_md_to_buff(ctx, &xdp); |
1381 | if (ret) | |
1382 | goto free_data; | |
1383 | ||
1c194998 LB |
1384 | if (unlikely(kattr->test.data_size_in > size)) { |
1385 | void __user *data_in = u64_to_user_ptr(kattr->test.data_in); | |
1386 | ||
1387 | while (size < kattr->test.data_size_in) { | |
1388 | struct page *page; | |
1389 | skb_frag_t *frag; | |
9d63b59d | 1390 | u32 data_len; |
1c194998 | 1391 | |
a6763080 LB |
1392 | if (sinfo->nr_frags == MAX_SKB_FRAGS) { |
1393 | ret = -ENOMEM; | |
1394 | goto out; | |
1395 | } | |
1396 | ||
1c194998 LB |
1397 | page = alloc_page(GFP_KERNEL); |
1398 | if (!page) { | |
1399 | ret = -ENOMEM; | |
1400 | goto out; | |
1401 | } | |
1402 | ||
1403 | frag = &sinfo->frags[sinfo->nr_frags++]; | |
1404 | __skb_frag_set_page(frag, page); | |
1405 | ||
9d63b59d | 1406 | data_len = min_t(u32, kattr->test.data_size_in - size, |
1c194998 LB |
1407 | PAGE_SIZE); |
1408 | skb_frag_size_set(frag, data_len); | |
1409 | ||
1410 | if (copy_from_user(page_address(page), data_in + size, | |
1411 | data_len)) { | |
1412 | ret = -EFAULT; | |
1413 | goto out; | |
1414 | } | |
1415 | sinfo->xdp_frags_size += data_len; | |
1416 | size += data_len; | |
1417 | } | |
1418 | xdp_buff_set_frags_flag(&xdp); | |
1419 | } | |
1420 | ||
de21d8bf LB |
1421 | if (repeat > 1) |
1422 | bpf_prog_change_xdp(NULL, prog); | |
1c194998 | 1423 | |
b530e9e1 THJ |
1424 | if (do_live) |
1425 | ret = bpf_test_run_xdp_live(prog, &xdp, repeat, batch_size, &duration); | |
1426 | else | |
1427 | ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true); | |
ec94670f ZE |
1428 | /* We convert the xdp_buff back to an xdp_md before checking the return |
1429 | * code so the reference count of any held netdevice will be decremented | |
1430 | * even if the test run failed. | |
1431 | */ | |
1432 | xdp_convert_buff_to_md(&xdp, ctx); | |
dcb40590 RG |
1433 | if (ret) |
1434 | goto out; | |
47316f4a | 1435 | |
1c194998 | 1436 | size = xdp.data_end - xdp.data_meta + sinfo->xdp_frags_size; |
7855e0db LB |
1437 | ret = bpf_test_finish(kattr, uattr, xdp.data_meta, sinfo, size, |
1438 | retval, duration); | |
47316f4a ZE |
1439 | if (!ret) |
1440 | ret = bpf_ctx_finish(kattr, uattr, ctx, | |
1441 | sizeof(struct xdp_md)); | |
1442 | ||
dcb40590 | 1443 | out: |
de21d8bf LB |
1444 | if (repeat > 1) |
1445 | bpf_prog_change_xdp(prog, NULL); | |
47316f4a | 1446 | free_data: |
1c194998 LB |
1447 | for (i = 0; i < sinfo->nr_frags; i++) |
1448 | __free_page(skb_frag_page(&sinfo->frags[i])); | |
1cf1cae9 | 1449 | kfree(data); |
47316f4a ZE |
1450 | free_ctx: |
1451 | kfree(ctx); | |
1cf1cae9 AS |
1452 | return ret; |
1453 | } | |
b7a1848e | 1454 | |
b2ca4e1c SF |
1455 | static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx) |
1456 | { | |
1457 | /* make sure the fields we don't use are zeroed */ | |
1458 | if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags))) | |
1459 | return -EINVAL; | |
1460 | ||
1461 | /* flags is allowed */ | |
1462 | ||
b590cb5f | 1463 | if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags), |
b2ca4e1c SF |
1464 | sizeof(struct bpf_flow_keys))) |
1465 | return -EINVAL; | |
1466 | ||
1467 | return 0; | |
1468 | } | |
1469 | ||
b7a1848e SF |
1470 | int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog, |
1471 | const union bpf_attr *kattr, | |
1472 | union bpf_attr __user *uattr) | |
1473 | { | |
607b9cc9 | 1474 | struct bpf_test_timer t = { NO_PREEMPT }; |
b7a1848e | 1475 | u32 size = kattr->test.data_size_in; |
7b8a1304 | 1476 | struct bpf_flow_dissector ctx = {}; |
b7a1848e | 1477 | u32 repeat = kattr->test.repeat; |
b2ca4e1c | 1478 | struct bpf_flow_keys *user_ctx; |
b7a1848e | 1479 | struct bpf_flow_keys flow_keys; |
7b8a1304 | 1480 | const struct ethhdr *eth; |
b2ca4e1c | 1481 | unsigned int flags = 0; |
b7a1848e | 1482 | u32 retval, duration; |
b7a1848e SF |
1483 | void *data; |
1484 | int ret; | |
b7a1848e | 1485 | |
b530e9e1 | 1486 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
1b4d60ec SL |
1487 | return -EINVAL; |
1488 | ||
7b8a1304 SF |
1489 | if (size < ETH_HLEN) |
1490 | return -EINVAL; | |
1491 | ||
be3d72a2 | 1492 | data = bpf_test_init(kattr, kattr->test.data_size_in, size, 0, 0); |
b7a1848e SF |
1493 | if (IS_ERR(data)) |
1494 | return PTR_ERR(data); | |
1495 | ||
7b8a1304 | 1496 | eth = (struct ethhdr *)data; |
b7a1848e | 1497 | |
b7a1848e SF |
1498 | if (!repeat) |
1499 | repeat = 1; | |
1500 | ||
b2ca4e1c SF |
1501 | user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys)); |
1502 | if (IS_ERR(user_ctx)) { | |
1503 | kfree(data); | |
1504 | return PTR_ERR(user_ctx); | |
1505 | } | |
1506 | if (user_ctx) { | |
1507 | ret = verify_user_bpf_flow_keys(user_ctx); | |
1508 | if (ret) | |
1509 | goto out; | |
1510 | flags = user_ctx->flags; | |
1511 | } | |
1512 | ||
7b8a1304 SF |
1513 | ctx.flow_keys = &flow_keys; |
1514 | ctx.data = data; | |
1515 | ctx.data_end = (__u8 *)data + size; | |
1516 | ||
607b9cc9 LB |
1517 | bpf_test_timer_enter(&t); |
1518 | do { | |
7b8a1304 | 1519 | retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN, |
b2ca4e1c | 1520 | size, flags); |
b530e9e1 | 1521 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); |
607b9cc9 | 1522 | bpf_test_timer_leave(&t); |
7b8a1304 | 1523 | |
607b9cc9 LB |
1524 | if (ret < 0) |
1525 | goto out; | |
b7a1848e | 1526 | |
7855e0db LB |
1527 | ret = bpf_test_finish(kattr, uattr, &flow_keys, NULL, |
1528 | sizeof(flow_keys), retval, duration); | |
b2ca4e1c SF |
1529 | if (!ret) |
1530 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, | |
1531 | sizeof(struct bpf_flow_keys)); | |
b7a1848e | 1532 | |
a439184d | 1533 | out: |
b2ca4e1c | 1534 | kfree(user_ctx); |
7b8a1304 | 1535 | kfree(data); |
b7a1848e SF |
1536 | return ret; |
1537 | } | |
7c32e8f8 LB |
1538 | |
1539 | int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr, | |
1540 | union bpf_attr __user *uattr) | |
1541 | { | |
1542 | struct bpf_test_timer t = { NO_PREEMPT }; | |
1543 | struct bpf_prog_array *progs = NULL; | |
1544 | struct bpf_sk_lookup_kern ctx = {}; | |
1545 | u32 repeat = kattr->test.repeat; | |
1546 | struct bpf_sk_lookup *user_ctx; | |
1547 | u32 retval, duration; | |
1548 | int ret = -EINVAL; | |
1549 | ||
b530e9e1 | 1550 | if (kattr->test.flags || kattr->test.cpu || kattr->test.batch_size) |
7c32e8f8 LB |
1551 | return -EINVAL; |
1552 | ||
1553 | if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out || | |
1554 | kattr->test.data_size_out) | |
1555 | return -EINVAL; | |
1556 | ||
1557 | if (!repeat) | |
1558 | repeat = 1; | |
1559 | ||
1560 | user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx)); | |
1561 | if (IS_ERR(user_ctx)) | |
1562 | return PTR_ERR(user_ctx); | |
1563 | ||
1564 | if (!user_ctx) | |
1565 | return -EINVAL; | |
1566 | ||
1567 | if (user_ctx->sk) | |
1568 | goto out; | |
1569 | ||
1570 | if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx))) | |
1571 | goto out; | |
1572 | ||
9a69e2b3 | 1573 | if (user_ctx->local_port > U16_MAX) { |
7c32e8f8 LB |
1574 | ret = -ERANGE; |
1575 | goto out; | |
1576 | } | |
1577 | ||
1578 | ctx.family = (u16)user_ctx->family; | |
1579 | ctx.protocol = (u16)user_ctx->protocol; | |
1580 | ctx.dport = (u16)user_ctx->local_port; | |
9a69e2b3 | 1581 | ctx.sport = user_ctx->remote_port; |
7c32e8f8 LB |
1582 | |
1583 | switch (ctx.family) { | |
1584 | case AF_INET: | |
1585 | ctx.v4.daddr = (__force __be32)user_ctx->local_ip4; | |
1586 | ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4; | |
1587 | break; | |
1588 | ||
1589 | #if IS_ENABLED(CONFIG_IPV6) | |
1590 | case AF_INET6: | |
1591 | ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6; | |
1592 | ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6; | |
1593 | break; | |
1594 | #endif | |
1595 | ||
1596 | default: | |
1597 | ret = -EAFNOSUPPORT; | |
1598 | goto out; | |
1599 | } | |
1600 | ||
1601 | progs = bpf_prog_array_alloc(1, GFP_KERNEL); | |
1602 | if (!progs) { | |
1603 | ret = -ENOMEM; | |
1604 | goto out; | |
1605 | } | |
1606 | ||
1607 | progs->items[0].prog = prog; | |
1608 | ||
1609 | bpf_test_timer_enter(&t); | |
1610 | do { | |
1611 | ctx.selected_sk = NULL; | |
fb7dd8bc | 1612 | retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run); |
b530e9e1 | 1613 | } while (bpf_test_timer_continue(&t, 1, repeat, &ret, &duration)); |
7c32e8f8 LB |
1614 | bpf_test_timer_leave(&t); |
1615 | ||
1616 | if (ret < 0) | |
1617 | goto out; | |
1618 | ||
1619 | user_ctx->cookie = 0; | |
1620 | if (ctx.selected_sk) { | |
1621 | if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) { | |
1622 | ret = -EOPNOTSUPP; | |
1623 | goto out; | |
1624 | } | |
1625 | ||
1626 | user_ctx->cookie = sock_gen_cookie(ctx.selected_sk); | |
1627 | } | |
1628 | ||
7855e0db | 1629 | ret = bpf_test_finish(kattr, uattr, NULL, NULL, 0, retval, duration); |
7c32e8f8 LB |
1630 | if (!ret) |
1631 | ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx)); | |
1632 | ||
1633 | out: | |
1634 | bpf_prog_array_free(progs); | |
1635 | kfree(user_ctx); | |
1636 | return ret; | |
1637 | } | |
79a7f8bd AS |
1638 | |
1639 | int bpf_prog_test_run_syscall(struct bpf_prog *prog, | |
1640 | const union bpf_attr *kattr, | |
1641 | union bpf_attr __user *uattr) | |
1642 | { | |
1643 | void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in); | |
1644 | __u32 ctx_size_in = kattr->test.ctx_size_in; | |
1645 | void *ctx = NULL; | |
1646 | u32 retval; | |
1647 | int err = 0; | |
1648 | ||
1649 | /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */ | |
1650 | if (kattr->test.data_in || kattr->test.data_out || | |
1651 | kattr->test.ctx_out || kattr->test.duration || | |
b530e9e1 THJ |
1652 | kattr->test.repeat || kattr->test.flags || |
1653 | kattr->test.batch_size) | |
79a7f8bd AS |
1654 | return -EINVAL; |
1655 | ||
1656 | if (ctx_size_in < prog->aux->max_ctx_offset || | |
1657 | ctx_size_in > U16_MAX) | |
1658 | return -EINVAL; | |
1659 | ||
1660 | if (ctx_size_in) { | |
db5b6a46 QW |
1661 | ctx = memdup_user(ctx_in, ctx_size_in); |
1662 | if (IS_ERR(ctx)) | |
1663 | return PTR_ERR(ctx); | |
79a7f8bd | 1664 | } |
87b7b533 YS |
1665 | |
1666 | rcu_read_lock_trace(); | |
79a7f8bd | 1667 | retval = bpf_prog_run_pin_on_cpu(prog, ctx); |
87b7b533 | 1668 | rcu_read_unlock_trace(); |
79a7f8bd AS |
1669 | |
1670 | if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) { | |
1671 | err = -EFAULT; | |
1672 | goto out; | |
1673 | } | |
1674 | if (ctx_size_in) | |
1675 | if (copy_to_user(ctx_in, ctx, ctx_size_in)) | |
1676 | err = -EFAULT; | |
1677 | out: | |
1678 | kfree(ctx); | |
1679 | return err; | |
1680 | } | |
b202d844 KKD |
1681 | |
1682 | static const struct btf_kfunc_id_set bpf_prog_test_kfunc_set = { | |
a4703e31 KKD |
1683 | .owner = THIS_MODULE, |
1684 | .set = &test_sk_check_kfunc_ids, | |
b202d844 KKD |
1685 | }; |
1686 | ||
05a945de KKD |
1687 | BTF_ID_LIST(bpf_prog_test_dtor_kfunc_ids) |
1688 | BTF_ID(struct, prog_test_ref_kfunc) | |
1689 | BTF_ID(func, bpf_kfunc_call_test_release) | |
1690 | BTF_ID(struct, prog_test_member) | |
1691 | BTF_ID(func, bpf_kfunc_call_memb_release) | |
1692 | ||
b202d844 KKD |
1693 | static int __init bpf_prog_test_run_init(void) |
1694 | { | |
05a945de KKD |
1695 | const struct btf_id_dtor_kfunc bpf_prog_test_dtor_kfunc[] = { |
1696 | { | |
1697 | .btf_id = bpf_prog_test_dtor_kfunc_ids[0], | |
1698 | .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[1] | |
1699 | }, | |
1700 | { | |
1701 | .btf_id = bpf_prog_test_dtor_kfunc_ids[2], | |
1702 | .kfunc_btf_id = bpf_prog_test_dtor_kfunc_ids[3], | |
1703 | }, | |
1704 | }; | |
1705 | int ret; | |
1706 | ||
5b481aca BT |
1707 | ret = register_btf_fmodret_id_set(&bpf_test_modify_return_set); |
1708 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SCHED_CLS, &bpf_prog_test_kfunc_set); | |
1f075262 | 1709 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_TRACING, &bpf_prog_test_kfunc_set); |
fb66223a | 1710 | ret = ret ?: register_btf_kfunc_id_set(BPF_PROG_TYPE_SYSCALL, &bpf_prog_test_kfunc_set); |
05a945de KKD |
1711 | return ret ?: register_btf_id_dtor_kfuncs(bpf_prog_test_dtor_kfunc, |
1712 | ARRAY_SIZE(bpf_prog_test_dtor_kfunc), | |
1713 | THIS_MODULE); | |
b202d844 KKD |
1714 | } |
1715 | late_initcall(bpf_prog_test_run_init); |