1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 Facebook
6 #include <linux/btf_ids.h>
7 #include <linux/slab.h>
8 #include <linux/vmalloc.h>
9 #include <linux/etherdevice.h>
10 #include <linux/filter.h>
11 #include <linux/rcupdate_trace.h>
12 #include <linux/sched/signal.h>
13 #include <net/bpf_sk_storage.h>
16 #include <net/net_namespace.h>
17 #include <linux/error-injection.h>
18 #include <linux/smp.h>
19 #include <linux/sock_diag.h>
22 #define CREATE_TRACE_POINTS
23 #include <trace/events/bpf_test_run.h>
25 struct bpf_test_timer {
26 enum { NO_PREEMPT, NO_MIGRATE } mode;
28 u64 time_start, time_spent;
31 static void bpf_test_timer_enter(struct bpf_test_timer *t)
35 if (t->mode == NO_PREEMPT)
40 t->time_start = ktime_get_ns();
43 static void bpf_test_timer_leave(struct bpf_test_timer *t)
48 if (t->mode == NO_PREEMPT)
55 static bool bpf_test_timer_continue(struct bpf_test_timer *t, u32 repeat, int *err, u32 *duration)
61 t->time_spent += ktime_get_ns() - t->time_start;
62 do_div(t->time_spent, t->i);
63 *duration = t->time_spent > U32_MAX ? U32_MAX : (u32)t->time_spent;
68 if (signal_pending(current)) {
69 /* During iteration: we've been cancelled, abort. */
75 /* During iteration: we need to reschedule between runs. */
76 t->time_spent += ktime_get_ns() - t->time_start;
77 bpf_test_timer_leave(t);
79 bpf_test_timer_enter(t);
82 /* Do another round. */
90 static int bpf_test_run(struct bpf_prog *prog, void *ctx, u32 repeat,
91 u32 *retval, u32 *time, bool xdp)
93 struct bpf_prog_array_item item = {.prog = prog};
94 struct bpf_run_ctx *old_ctx;
95 struct bpf_cg_run_ctx run_ctx;
96 struct bpf_test_timer t = { NO_MIGRATE };
97 enum bpf_cgroup_storage_type stype;
100 for_each_cgroup_storage_type(stype) {
101 item.cgroup_storage[stype] = bpf_cgroup_storage_alloc(prog, stype);
102 if (IS_ERR(item.cgroup_storage[stype])) {
103 item.cgroup_storage[stype] = NULL;
104 for_each_cgroup_storage_type(stype)
105 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
113 bpf_test_timer_enter(&t);
114 old_ctx = bpf_set_run_ctx(&run_ctx.run_ctx);
116 run_ctx.prog_item = &item;
118 *retval = bpf_prog_run_xdp(prog, ctx);
120 *retval = bpf_prog_run(prog, ctx);
121 } while (bpf_test_timer_continue(&t, repeat, &ret, time));
122 bpf_reset_run_ctx(old_ctx);
123 bpf_test_timer_leave(&t);
125 for_each_cgroup_storage_type(stype)
126 bpf_cgroup_storage_free(item.cgroup_storage[stype]);
131 static int bpf_test_finish(const union bpf_attr *kattr,
132 union bpf_attr __user *uattr, const void *data,
133 u32 size, u32 retval, u32 duration)
135 void __user *data_out = u64_to_user_ptr(kattr->test.data_out);
137 u32 copy_size = size;
139 /* Clamp copy if the user has provided a size hint, but copy the full
140 * buffer if not to retain old behaviour.
142 if (kattr->test.data_size_out &&
143 copy_size > kattr->test.data_size_out) {
144 copy_size = kattr->test.data_size_out;
148 if (data_out && copy_to_user(data_out, data, copy_size))
150 if (copy_to_user(&uattr->test.data_size_out, &size, sizeof(size)))
152 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
154 if (copy_to_user(&uattr->test.duration, &duration, sizeof(duration)))
159 trace_bpf_test_finish(&err);
163 /* Integer types of various sizes and pointer combinations cover variety of
164 * architecture dependent calling conventions. 7+ can be supported in the
168 __diag_ignore(GCC, 8, "-Wmissing-prototypes",
169 "Global functions as their definitions will be in vmlinux BTF");
170 int noinline bpf_fentry_test1(int a)
175 int noinline bpf_fentry_test2(int a, u64 b)
180 int noinline bpf_fentry_test3(char a, int b, u64 c)
185 int noinline bpf_fentry_test4(void *a, char b, int c, u64 d)
187 return (long)a + b + c + d;
190 int noinline bpf_fentry_test5(u64 a, void *b, short c, int d, u64 e)
192 return a + (long)b + c + d + e;
195 int noinline bpf_fentry_test6(u64 a, void *b, short c, int d, void *e, u64 f)
197 return a + (long)b + c + d + (long)e + f;
200 struct bpf_fentry_test_t {
201 struct bpf_fentry_test_t *a;
204 int noinline bpf_fentry_test7(struct bpf_fentry_test_t *arg)
209 int noinline bpf_fentry_test8(struct bpf_fentry_test_t *arg)
214 int noinline bpf_modify_return_test(int a, int *b)
220 u64 noinline bpf_kfunc_call_test1(struct sock *sk, u32 a, u64 b, u32 c, u64 d)
222 return a + b + c + d;
225 int noinline bpf_kfunc_call_test2(struct sock *sk, u32 a, u32 b)
230 struct sock * noinline bpf_kfunc_call_test3(struct sock *sk)
237 ALLOW_ERROR_INJECTION(bpf_modify_return_test, ERRNO);
239 BTF_SET_START(test_sk_kfunc_ids)
240 BTF_ID(func, bpf_kfunc_call_test1)
241 BTF_ID(func, bpf_kfunc_call_test2)
242 BTF_ID(func, bpf_kfunc_call_test3)
243 BTF_SET_END(test_sk_kfunc_ids)
245 bool bpf_prog_test_check_kfunc_call(u32 kfunc_id, struct module *owner)
247 if (btf_id_set_contains(&test_sk_kfunc_ids, kfunc_id))
249 return bpf_check_mod_kfunc_call(&prog_test_kfunc_list, kfunc_id, owner);
252 static void *bpf_test_init(const union bpf_attr *kattr, u32 size,
253 u32 headroom, u32 tailroom)
255 void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
256 u32 user_size = kattr->test.data_size_in;
259 if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
260 return ERR_PTR(-EINVAL);
262 if (user_size > size)
263 return ERR_PTR(-EMSGSIZE);
265 data = kzalloc(size + headroom + tailroom, GFP_USER);
267 return ERR_PTR(-ENOMEM);
269 if (copy_from_user(data + headroom, data_in, user_size)) {
271 return ERR_PTR(-EFAULT);
277 int bpf_prog_test_run_tracing(struct bpf_prog *prog,
278 const union bpf_attr *kattr,
279 union bpf_attr __user *uattr)
281 struct bpf_fentry_test_t arg = {};
282 u16 side_effect = 0, ret = 0;
283 int b = 2, err = -EFAULT;
286 if (kattr->test.flags || kattr->test.cpu)
289 switch (prog->expected_attach_type) {
290 case BPF_TRACE_FENTRY:
291 case BPF_TRACE_FEXIT:
292 if (bpf_fentry_test1(1) != 2 ||
293 bpf_fentry_test2(2, 3) != 5 ||
294 bpf_fentry_test3(4, 5, 6) != 15 ||
295 bpf_fentry_test4((void *)7, 8, 9, 10) != 34 ||
296 bpf_fentry_test5(11, (void *)12, 13, 14, 15) != 65 ||
297 bpf_fentry_test6(16, (void *)17, 18, 19, (void *)20, 21) != 111 ||
298 bpf_fentry_test7((struct bpf_fentry_test_t *)0) != 0 ||
299 bpf_fentry_test8(&arg) != 0)
302 case BPF_MODIFY_RETURN:
303 ret = bpf_modify_return_test(1, &b);
311 retval = ((u32)side_effect << 16) | ret;
312 if (copy_to_user(&uattr->test.retval, &retval, sizeof(retval)))
317 trace_bpf_test_finish(&err);
321 struct bpf_raw_tp_test_run_info {
322 struct bpf_prog *prog;
328 __bpf_prog_test_run_raw_tp(void *data)
330 struct bpf_raw_tp_test_run_info *info = data;
333 info->retval = bpf_prog_run(info->prog, info->ctx);
337 int bpf_prog_test_run_raw_tp(struct bpf_prog *prog,
338 const union bpf_attr *kattr,
339 union bpf_attr __user *uattr)
341 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
342 __u32 ctx_size_in = kattr->test.ctx_size_in;
343 struct bpf_raw_tp_test_run_info info;
344 int cpu = kattr->test.cpu, err = 0;
347 /* doesn't support data_in/out, ctx_out, duration, or repeat */
348 if (kattr->test.data_in || kattr->test.data_out ||
349 kattr->test.ctx_out || kattr->test.duration ||
353 if (ctx_size_in < prog->aux->max_ctx_offset ||
354 ctx_size_in > MAX_BPF_FUNC_ARGS * sizeof(u64))
357 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 && cpu != 0)
361 info.ctx = memdup_user(ctx_in, ctx_size_in);
362 if (IS_ERR(info.ctx))
363 return PTR_ERR(info.ctx);
370 current_cpu = get_cpu();
371 if ((kattr->test.flags & BPF_F_TEST_RUN_ON_CPU) == 0 ||
372 cpu == current_cpu) {
373 __bpf_prog_test_run_raw_tp(&info);
374 } else if (cpu >= nr_cpu_ids || !cpu_online(cpu)) {
375 /* smp_call_function_single() also checks cpu_online()
376 * after csd_lock(). However, since cpu is from user
377 * space, let's do an extra quick check to filter out
378 * invalid value before smp_call_function_single().
382 err = smp_call_function_single(cpu, __bpf_prog_test_run_raw_tp,
388 copy_to_user(&uattr->test.retval, &info.retval, sizeof(u32)))
395 static void *bpf_ctx_init(const union bpf_attr *kattr, u32 max_size)
397 void __user *data_in = u64_to_user_ptr(kattr->test.ctx_in);
398 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
399 u32 size = kattr->test.ctx_size_in;
403 if (!data_in && !data_out)
406 data = kzalloc(max_size, GFP_USER);
408 return ERR_PTR(-ENOMEM);
411 err = bpf_check_uarg_tail_zero(USER_BPFPTR(data_in), max_size, size);
417 size = min_t(u32, max_size, size);
418 if (copy_from_user(data, data_in, size)) {
420 return ERR_PTR(-EFAULT);
426 static int bpf_ctx_finish(const union bpf_attr *kattr,
427 union bpf_attr __user *uattr, const void *data,
430 void __user *data_out = u64_to_user_ptr(kattr->test.ctx_out);
432 u32 copy_size = size;
434 if (!data || !data_out)
437 if (copy_size > kattr->test.ctx_size_out) {
438 copy_size = kattr->test.ctx_size_out;
442 if (copy_to_user(data_out, data, copy_size))
444 if (copy_to_user(&uattr->test.ctx_size_out, &size, sizeof(size)))
453 * range_is_zero - test whether buffer is initialized
454 * @buf: buffer to check
455 * @from: check from this position
456 * @to: check up until (excluding) this position
458 * This function returns true if the there is a non-zero byte
459 * in the buf in the range [from,to).
461 static inline bool range_is_zero(void *buf, size_t from, size_t to)
463 return !memchr_inv((u8 *)buf + from, 0, to - from);
466 static int convert___skb_to_skb(struct sk_buff *skb, struct __sk_buff *__skb)
468 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
473 /* make sure the fields we don't use are zeroed */
474 if (!range_is_zero(__skb, 0, offsetof(struct __sk_buff, mark)))
477 /* mark is allowed */
479 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, mark),
480 offsetof(struct __sk_buff, priority)))
483 /* priority is allowed */
484 /* ingress_ifindex is allowed */
485 /* ifindex is allowed */
487 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, ifindex),
488 offsetof(struct __sk_buff, cb)))
493 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, cb),
494 offsetof(struct __sk_buff, tstamp)))
497 /* tstamp is allowed */
498 /* wire_len is allowed */
499 /* gso_segs is allowed */
501 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_segs),
502 offsetof(struct __sk_buff, gso_size)))
505 /* gso_size is allowed */
507 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, gso_size),
508 offsetof(struct __sk_buff, hwtstamp)))
511 /* hwtstamp is allowed */
513 if (!range_is_zero(__skb, offsetofend(struct __sk_buff, hwtstamp),
514 sizeof(struct __sk_buff)))
517 skb->mark = __skb->mark;
518 skb->priority = __skb->priority;
519 skb->skb_iif = __skb->ingress_ifindex;
520 skb->tstamp = __skb->tstamp;
521 memcpy(&cb->data, __skb->cb, QDISC_CB_PRIV_LEN);
523 if (__skb->wire_len == 0) {
524 cb->pkt_len = skb->len;
526 if (__skb->wire_len < skb->len ||
527 __skb->wire_len > GSO_MAX_SIZE)
529 cb->pkt_len = __skb->wire_len;
532 if (__skb->gso_segs > GSO_MAX_SEGS)
534 skb_shinfo(skb)->gso_segs = __skb->gso_segs;
535 skb_shinfo(skb)->gso_size = __skb->gso_size;
536 skb_shinfo(skb)->hwtstamps.hwtstamp = __skb->hwtstamp;
541 static void convert_skb_to___skb(struct sk_buff *skb, struct __sk_buff *__skb)
543 struct qdisc_skb_cb *cb = (struct qdisc_skb_cb *)skb->cb;
548 __skb->mark = skb->mark;
549 __skb->priority = skb->priority;
550 __skb->ingress_ifindex = skb->skb_iif;
551 __skb->ifindex = skb->dev->ifindex;
552 __skb->tstamp = skb->tstamp;
553 memcpy(__skb->cb, &cb->data, QDISC_CB_PRIV_LEN);
554 __skb->wire_len = cb->pkt_len;
555 __skb->gso_segs = skb_shinfo(skb)->gso_segs;
556 __skb->hwtstamp = skb_shinfo(skb)->hwtstamps.hwtstamp;
559 static struct proto bpf_dummy_proto = {
561 .owner = THIS_MODULE,
562 .obj_size = sizeof(struct sock),
565 int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
566 union bpf_attr __user *uattr)
568 bool is_l2 = false, is_direct_pkt_access = false;
569 struct net *net = current->nsproxy->net_ns;
570 struct net_device *dev = net->loopback_dev;
571 u32 size = kattr->test.data_size_in;
572 u32 repeat = kattr->test.repeat;
573 struct __sk_buff *ctx = NULL;
574 u32 retval, duration;
575 int hh_len = ETH_HLEN;
581 if (kattr->test.flags || kattr->test.cpu)
584 data = bpf_test_init(kattr, size, NET_SKB_PAD + NET_IP_ALIGN,
585 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
587 return PTR_ERR(data);
589 ctx = bpf_ctx_init(kattr, sizeof(struct __sk_buff));
595 switch (prog->type) {
596 case BPF_PROG_TYPE_SCHED_CLS:
597 case BPF_PROG_TYPE_SCHED_ACT:
600 case BPF_PROG_TYPE_LWT_IN:
601 case BPF_PROG_TYPE_LWT_OUT:
602 case BPF_PROG_TYPE_LWT_XMIT:
603 is_direct_pkt_access = true;
609 sk = sk_alloc(net, AF_UNSPEC, GFP_USER, &bpf_dummy_proto, 1);
615 sock_init_data(NULL, sk);
617 skb = build_skb(data, 0);
626 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN);
627 __skb_put(skb, size);
628 if (ctx && ctx->ifindex > 1) {
629 dev = dev_get_by_index(net, ctx->ifindex);
635 skb->protocol = eth_type_trans(skb, dev);
636 skb_reset_network_header(skb);
638 switch (skb->protocol) {
639 case htons(ETH_P_IP):
640 sk->sk_family = AF_INET;
641 if (sizeof(struct iphdr) <= skb_headlen(skb)) {
642 sk->sk_rcv_saddr = ip_hdr(skb)->saddr;
643 sk->sk_daddr = ip_hdr(skb)->daddr;
646 #if IS_ENABLED(CONFIG_IPV6)
647 case htons(ETH_P_IPV6):
648 sk->sk_family = AF_INET6;
649 if (sizeof(struct ipv6hdr) <= skb_headlen(skb)) {
650 sk->sk_v6_rcv_saddr = ipv6_hdr(skb)->saddr;
651 sk->sk_v6_daddr = ipv6_hdr(skb)->daddr;
660 __skb_push(skb, hh_len);
661 if (is_direct_pkt_access)
662 bpf_compute_data_pointers(skb);
663 ret = convert___skb_to_skb(skb, ctx);
666 ret = bpf_test_run(prog, skb, repeat, &retval, &duration, false);
670 if (skb_headroom(skb) < hh_len) {
671 int nhead = HH_DATA_ALIGN(hh_len - skb_headroom(skb));
673 if (pskb_expand_head(skb, nhead, 0, GFP_USER)) {
678 memset(__skb_push(skb, hh_len), 0, hh_len);
680 convert_skb_to___skb(skb, ctx);
683 /* bpf program can never convert linear skb to non-linear */
684 if (WARN_ON_ONCE(skb_is_nonlinear(skb)))
685 size = skb_headlen(skb);
686 ret = bpf_test_finish(kattr, uattr, skb->data, size, retval, duration);
688 ret = bpf_ctx_finish(kattr, uattr, ctx,
689 sizeof(struct __sk_buff));
691 if (dev && dev != net->loopback_dev)
699 static int xdp_convert_md_to_buff(struct xdp_md *xdp_md, struct xdp_buff *xdp)
701 unsigned int ingress_ifindex, rx_queue_index;
702 struct netdev_rx_queue *rxqueue;
703 struct net_device *device;
708 if (xdp_md->egress_ifindex != 0)
711 ingress_ifindex = xdp_md->ingress_ifindex;
712 rx_queue_index = xdp_md->rx_queue_index;
714 if (!ingress_ifindex && rx_queue_index)
717 if (ingress_ifindex) {
718 device = dev_get_by_index(current->nsproxy->net_ns,
723 if (rx_queue_index >= device->real_num_rx_queues)
726 rxqueue = __netif_get_rx_queue(device, rx_queue_index);
728 if (!xdp_rxq_info_is_reg(&rxqueue->xdp_rxq))
731 xdp->rxq = &rxqueue->xdp_rxq;
732 /* The device is now tracked in the xdp->rxq for later
737 xdp->data = xdp->data_meta + xdp_md->data;
745 static void xdp_convert_buff_to_md(struct xdp_buff *xdp, struct xdp_md *xdp_md)
750 xdp_md->data = xdp->data - xdp->data_meta;
751 xdp_md->data_end = xdp->data_end - xdp->data_meta;
753 if (xdp_md->ingress_ifindex)
754 dev_put(xdp->rxq->dev);
757 int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
758 union bpf_attr __user *uattr)
760 u32 tailroom = SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
761 u32 headroom = XDP_PACKET_HEADROOM;
762 u32 size = kattr->test.data_size_in;
763 u32 repeat = kattr->test.repeat;
764 struct netdev_rx_queue *rxqueue;
765 struct xdp_buff xdp = {};
766 u32 retval, duration;
772 if (prog->expected_attach_type == BPF_XDP_DEVMAP ||
773 prog->expected_attach_type == BPF_XDP_CPUMAP)
776 ctx = bpf_ctx_init(kattr, sizeof(struct xdp_md));
781 /* There can't be user provided data before the meta data */
782 if (ctx->data_meta || ctx->data_end != size ||
783 ctx->data > ctx->data_end ||
784 unlikely(xdp_metalen_invalid(ctx->data)))
786 /* Meta data is allocated from the headroom */
787 headroom -= ctx->data;
790 /* XDP have extra tailroom as (most) drivers use full page */
791 max_data_sz = 4096 - headroom - tailroom;
793 data = bpf_test_init(kattr, max_data_sz, headroom, tailroom);
799 rxqueue = __netif_get_rx_queue(current->nsproxy->net_ns->loopback_dev, 0);
800 xdp_init_buff(&xdp, headroom + max_data_sz + tailroom,
802 xdp_prepare_buff(&xdp, data, headroom, size, true);
804 ret = xdp_convert_md_to_buff(ctx, &xdp);
809 bpf_prog_change_xdp(NULL, prog);
810 ret = bpf_test_run(prog, &xdp, repeat, &retval, &duration, true);
811 /* We convert the xdp_buff back to an xdp_md before checking the return
812 * code so the reference count of any held netdevice will be decremented
813 * even if the test run failed.
815 xdp_convert_buff_to_md(&xdp, ctx);
819 if (xdp.data_meta != data + headroom ||
820 xdp.data_end != xdp.data_meta + size)
821 size = xdp.data_end - xdp.data_meta;
823 ret = bpf_test_finish(kattr, uattr, xdp.data_meta, size, retval,
826 ret = bpf_ctx_finish(kattr, uattr, ctx,
827 sizeof(struct xdp_md));
831 bpf_prog_change_xdp(prog, NULL);
839 static int verify_user_bpf_flow_keys(struct bpf_flow_keys *ctx)
841 /* make sure the fields we don't use are zeroed */
842 if (!range_is_zero(ctx, 0, offsetof(struct bpf_flow_keys, flags)))
845 /* flags is allowed */
847 if (!range_is_zero(ctx, offsetofend(struct bpf_flow_keys, flags),
848 sizeof(struct bpf_flow_keys)))
854 int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
855 const union bpf_attr *kattr,
856 union bpf_attr __user *uattr)
858 struct bpf_test_timer t = { NO_PREEMPT };
859 u32 size = kattr->test.data_size_in;
860 struct bpf_flow_dissector ctx = {};
861 u32 repeat = kattr->test.repeat;
862 struct bpf_flow_keys *user_ctx;
863 struct bpf_flow_keys flow_keys;
864 const struct ethhdr *eth;
865 unsigned int flags = 0;
866 u32 retval, duration;
870 if (prog->type != BPF_PROG_TYPE_FLOW_DISSECTOR)
873 if (kattr->test.flags || kattr->test.cpu)
879 data = bpf_test_init(kattr, size, 0, 0);
881 return PTR_ERR(data);
883 eth = (struct ethhdr *)data;
888 user_ctx = bpf_ctx_init(kattr, sizeof(struct bpf_flow_keys));
889 if (IS_ERR(user_ctx)) {
891 return PTR_ERR(user_ctx);
894 ret = verify_user_bpf_flow_keys(user_ctx);
897 flags = user_ctx->flags;
900 ctx.flow_keys = &flow_keys;
902 ctx.data_end = (__u8 *)data + size;
904 bpf_test_timer_enter(&t);
906 retval = bpf_flow_dissect(prog, &ctx, eth->h_proto, ETH_HLEN,
908 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
909 bpf_test_timer_leave(&t);
914 ret = bpf_test_finish(kattr, uattr, &flow_keys, sizeof(flow_keys),
917 ret = bpf_ctx_finish(kattr, uattr, user_ctx,
918 sizeof(struct bpf_flow_keys));
926 int bpf_prog_test_run_sk_lookup(struct bpf_prog *prog, const union bpf_attr *kattr,
927 union bpf_attr __user *uattr)
929 struct bpf_test_timer t = { NO_PREEMPT };
930 struct bpf_prog_array *progs = NULL;
931 struct bpf_sk_lookup_kern ctx = {};
932 u32 repeat = kattr->test.repeat;
933 struct bpf_sk_lookup *user_ctx;
934 u32 retval, duration;
937 if (prog->type != BPF_PROG_TYPE_SK_LOOKUP)
940 if (kattr->test.flags || kattr->test.cpu)
943 if (kattr->test.data_in || kattr->test.data_size_in || kattr->test.data_out ||
944 kattr->test.data_size_out)
950 user_ctx = bpf_ctx_init(kattr, sizeof(*user_ctx));
951 if (IS_ERR(user_ctx))
952 return PTR_ERR(user_ctx);
960 if (!range_is_zero(user_ctx, offsetofend(typeof(*user_ctx), local_port), sizeof(*user_ctx)))
963 if (user_ctx->local_port > U16_MAX || user_ctx->remote_port > U16_MAX) {
968 ctx.family = (u16)user_ctx->family;
969 ctx.protocol = (u16)user_ctx->protocol;
970 ctx.dport = (u16)user_ctx->local_port;
971 ctx.sport = (__force __be16)user_ctx->remote_port;
973 switch (ctx.family) {
975 ctx.v4.daddr = (__force __be32)user_ctx->local_ip4;
976 ctx.v4.saddr = (__force __be32)user_ctx->remote_ip4;
979 #if IS_ENABLED(CONFIG_IPV6)
981 ctx.v6.daddr = (struct in6_addr *)user_ctx->local_ip6;
982 ctx.v6.saddr = (struct in6_addr *)user_ctx->remote_ip6;
991 progs = bpf_prog_array_alloc(1, GFP_KERNEL);
997 progs->items[0].prog = prog;
999 bpf_test_timer_enter(&t);
1001 ctx.selected_sk = NULL;
1002 retval = BPF_PROG_SK_LOOKUP_RUN_ARRAY(progs, ctx, bpf_prog_run);
1003 } while (bpf_test_timer_continue(&t, repeat, &ret, &duration));
1004 bpf_test_timer_leave(&t);
1009 user_ctx->cookie = 0;
1010 if (ctx.selected_sk) {
1011 if (ctx.selected_sk->sk_reuseport && !ctx.no_reuseport) {
1016 user_ctx->cookie = sock_gen_cookie(ctx.selected_sk);
1019 ret = bpf_test_finish(kattr, uattr, NULL, 0, retval, duration);
1021 ret = bpf_ctx_finish(kattr, uattr, user_ctx, sizeof(*user_ctx));
1024 bpf_prog_array_free(progs);
1029 int bpf_prog_test_run_syscall(struct bpf_prog *prog,
1030 const union bpf_attr *kattr,
1031 union bpf_attr __user *uattr)
1033 void __user *ctx_in = u64_to_user_ptr(kattr->test.ctx_in);
1034 __u32 ctx_size_in = kattr->test.ctx_size_in;
1039 /* doesn't support data_in/out, ctx_out, duration, or repeat or flags */
1040 if (kattr->test.data_in || kattr->test.data_out ||
1041 kattr->test.ctx_out || kattr->test.duration ||
1042 kattr->test.repeat || kattr->test.flags)
1045 if (ctx_size_in < prog->aux->max_ctx_offset ||
1046 ctx_size_in > U16_MAX)
1050 ctx = memdup_user(ctx_in, ctx_size_in);
1052 return PTR_ERR(ctx);
1055 rcu_read_lock_trace();
1056 retval = bpf_prog_run_pin_on_cpu(prog, ctx);
1057 rcu_read_unlock_trace();
1059 if (copy_to_user(&uattr->test.retval, &retval, sizeof(u32))) {
1064 if (copy_to_user(ctx_in, ctx, ctx_size_in))