1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Cloudflare
4 #include <netinet/tcp.h>
7 #include "test_progs.h"
8 #include "test_skmsg_load_helpers.skel.h"
9 #include "test_sockmap_update.skel.h"
10 #include "test_sockmap_invalid_update.skel.h"
11 #include "test_sockmap_skb_verdict_attach.skel.h"
12 #include "test_sockmap_progs_query.skel.h"
13 #include "test_sockmap_pass_prog.skel.h"
14 #include "test_sockmap_drop_prog.skel.h"
15 #include "bpf_iter_sockmap.skel.h"
17 #include "sockmap_helpers.h"
19 #define TCP_REPAIR 19 /* TCP sock is under repair right now */
21 #define TCP_REPAIR_ON 1
22 #define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
24 static int connected_socket_v4(void)
26 struct sockaddr_in addr = {
27 .sin_family = AF_INET,
28 .sin_port = htons(80),
29 .sin_addr = { inet_addr("127.0.0.1") },
31 socklen_t len = sizeof(addr);
34 s = socket(AF_INET, SOCK_STREAM, 0);
35 if (!ASSERT_GE(s, 0, "socket"))
38 repair = TCP_REPAIR_ON;
39 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
40 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
43 err = connect(s, (struct sockaddr *)&addr, len);
44 if (!ASSERT_OK(err, "connect"))
47 repair = TCP_REPAIR_OFF_NO_WP;
48 err = setsockopt(s, SOL_TCP, TCP_REPAIR, &repair, sizeof(repair));
49 if (!ASSERT_OK(err, "setsockopt(TCP_REPAIR)"))
59 static void compare_cookies(struct bpf_map *src, struct bpf_map *dst)
61 __u32 i, max_entries = bpf_map__max_entries(src);
62 int err, src_fd, dst_fd;
64 src_fd = bpf_map__fd(src);
65 dst_fd = bpf_map__fd(dst);
67 for (i = 0; i < max_entries; i++) {
68 __u64 src_cookie, dst_cookie;
70 err = bpf_map_lookup_elem(src_fd, &i, &src_cookie);
71 if (err && errno == ENOENT) {
72 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
73 ASSERT_ERR(err, "map_lookup_elem(dst)");
74 ASSERT_EQ(errno, ENOENT, "map_lookup_elem(dst)");
77 if (!ASSERT_OK(err, "lookup_elem(src)"))
80 err = bpf_map_lookup_elem(dst_fd, &i, &dst_cookie);
81 if (!ASSERT_OK(err, "lookup_elem(dst)"))
84 ASSERT_EQ(dst_cookie, src_cookie, "cookie mismatch");
88 /* Create a map, populate it with one socket, and free the map. */
89 static void test_sockmap_create_update_free(enum bpf_map_type map_type)
94 s = connected_socket_v4();
95 if (!ASSERT_GE(s, 0, "connected_socket_v4"))
98 map = bpf_map_create(map_type, NULL, sizeof(int), sizeof(int), 1, NULL);
99 if (!ASSERT_GE(map, 0, "bpf_map_create"))
102 err = bpf_map_update_elem(map, &zero, &s, BPF_NOEXIST);
103 if (!ASSERT_OK(err, "bpf_map_update"))
111 static void test_skmsg_helpers(enum bpf_map_type map_type)
113 struct test_skmsg_load_helpers *skel;
114 int err, map, verdict;
116 skel = test_skmsg_load_helpers__open_and_load();
117 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
120 verdict = bpf_program__fd(skel->progs.prog_msg_verdict);
121 map = bpf_map__fd(skel->maps.sock_map);
123 err = bpf_prog_attach(verdict, map, BPF_SK_MSG_VERDICT, 0);
124 if (!ASSERT_OK(err, "bpf_prog_attach"))
127 err = bpf_prog_detach2(verdict, map, BPF_SK_MSG_VERDICT);
128 if (!ASSERT_OK(err, "bpf_prog_detach2"))
131 test_skmsg_load_helpers__destroy(skel);
134 static void test_skmsg_helpers_with_link(enum bpf_map_type map_type)
136 struct bpf_program *prog, *prog_clone, *prog_clone2;
137 DECLARE_LIBBPF_OPTS(bpf_link_update_opts, opts);
138 struct test_skmsg_load_helpers *skel;
139 struct bpf_link *link, *link2;
142 skel = test_skmsg_load_helpers__open_and_load();
143 if (!ASSERT_OK_PTR(skel, "test_skmsg_load_helpers__open_and_load"))
146 prog = skel->progs.prog_msg_verdict;
147 prog_clone = skel->progs.prog_msg_verdict_clone;
148 prog_clone2 = skel->progs.prog_msg_verdict_clone2;
149 map = bpf_map__fd(skel->maps.sock_map);
151 link = bpf_program__attach_sockmap(prog, map);
152 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
155 /* Fail since bpf_link for the same prog has been created. */
156 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_MSG_VERDICT, 0);
157 if (!ASSERT_ERR(err, "bpf_prog_attach"))
160 /* Fail since bpf_link for the same prog type has been created. */
161 link2 = bpf_program__attach_sockmap(prog_clone, map);
162 if (!ASSERT_ERR_PTR(link2, "bpf_program__attach_sockmap")) {
163 bpf_link__detach(link2);
167 err = bpf_link__update_program(link, prog_clone);
168 if (!ASSERT_OK(err, "bpf_link__update_program"))
171 /* Fail since a prog with different type attempts to do update. */
172 err = bpf_link__update_program(link, skel->progs.prog_skb_verdict);
173 if (!ASSERT_ERR(err, "bpf_link__update_program"))
176 /* Fail since the old prog does not match the one in the kernel. */
177 opts.old_prog_fd = bpf_program__fd(prog_clone2);
178 opts.flags = BPF_F_REPLACE;
179 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
180 if (!ASSERT_ERR(err, "bpf_link_update"))
183 opts.old_prog_fd = bpf_program__fd(prog_clone);
184 opts.flags = BPF_F_REPLACE;
185 err = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), &opts);
186 if (!ASSERT_OK(err, "bpf_link_update"))
189 bpf_link__detach(link);
190 test_skmsg_load_helpers__destroy(skel);
193 static void test_sockmap_update(enum bpf_map_type map_type)
196 struct test_sockmap_update *skel;
197 struct bpf_map *dst_map;
198 const __u32 zero = 0;
199 char dummy[14] = {0};
200 LIBBPF_OPTS(bpf_test_run_opts, topts,
202 .data_size_in = sizeof(dummy),
207 sk = connected_socket_v4();
208 if (!ASSERT_NEQ(sk, -1, "connected_socket_v4"))
211 skel = test_sockmap_update__open_and_load();
212 if (!ASSERT_OK_PTR(skel, "open_and_load"))
215 prog = bpf_program__fd(skel->progs.copy_sock_map);
216 src = bpf_map__fd(skel->maps.src);
217 if (map_type == BPF_MAP_TYPE_SOCKMAP)
218 dst_map = skel->maps.dst_sock_map;
220 dst_map = skel->maps.dst_sock_hash;
222 err = bpf_map_update_elem(src, &zero, &sk, BPF_NOEXIST);
223 if (!ASSERT_OK(err, "update_elem(src)"))
226 err = bpf_prog_test_run_opts(prog, &topts);
227 if (!ASSERT_OK(err, "test_run"))
229 if (!ASSERT_NEQ(topts.retval, 0, "test_run retval"))
232 compare_cookies(skel->maps.src, dst_map);
235 test_sockmap_update__destroy(skel);
240 static void test_sockmap_invalid_update(void)
242 struct test_sockmap_invalid_update *skel;
244 skel = test_sockmap_invalid_update__open_and_load();
245 if (!ASSERT_NULL(skel, "open_and_load"))
246 test_sockmap_invalid_update__destroy(skel);
249 static void test_sockmap_copy(enum bpf_map_type map_type)
251 DECLARE_LIBBPF_OPTS(bpf_iter_attach_opts, opts);
252 int err, len, src_fd, iter_fd;
253 union bpf_iter_link_info linfo = {};
254 __u32 i, num_sockets, num_elems;
255 struct bpf_iter_sockmap *skel;
256 __s64 *sock_fd = NULL;
257 struct bpf_link *link;
261 skel = bpf_iter_sockmap__open_and_load();
262 if (!ASSERT_OK_PTR(skel, "bpf_iter_sockmap__open_and_load"))
265 if (map_type == BPF_MAP_TYPE_SOCKMAP) {
266 src = skel->maps.sockmap;
267 num_elems = bpf_map__max_entries(src);
268 num_sockets = num_elems - 1;
270 src = skel->maps.sockhash;
271 num_elems = bpf_map__max_entries(src) - 1;
272 num_sockets = num_elems;
275 sock_fd = calloc(num_sockets, sizeof(*sock_fd));
276 if (!ASSERT_OK_PTR(sock_fd, "calloc(sock_fd)"))
279 for (i = 0; i < num_sockets; i++)
282 src_fd = bpf_map__fd(src);
284 for (i = 0; i < num_sockets; i++) {
285 sock_fd[i] = connected_socket_v4();
286 if (!ASSERT_NEQ(sock_fd[i], -1, "connected_socket_v4"))
289 err = bpf_map_update_elem(src_fd, &i, &sock_fd[i], BPF_NOEXIST);
290 if (!ASSERT_OK(err, "map_update"))
294 linfo.map.map_fd = src_fd;
295 opts.link_info = &linfo;
296 opts.link_info_len = sizeof(linfo);
297 link = bpf_program__attach_iter(skel->progs.copy, &opts);
298 if (!ASSERT_OK_PTR(link, "attach_iter"))
301 iter_fd = bpf_iter_create(bpf_link__fd(link));
302 if (!ASSERT_GE(iter_fd, 0, "create_iter"))
306 while ((len = read(iter_fd, buf, sizeof(buf))) > 0)
308 if (!ASSERT_GE(len, 0, "read"))
312 if (!ASSERT_EQ(skel->bss->elems, num_elems, "elems"))
315 if (!ASSERT_EQ(skel->bss->socks, num_sockets, "socks"))
318 compare_cookies(src, skel->maps.dst);
323 bpf_link__destroy(link);
325 for (i = 0; sock_fd && i < num_sockets; i++)
330 bpf_iter_sockmap__destroy(skel);
333 static void test_sockmap_skb_verdict_attach(enum bpf_attach_type first,
334 enum bpf_attach_type second)
336 struct test_sockmap_skb_verdict_attach *skel;
337 int err, map, verdict;
339 skel = test_sockmap_skb_verdict_attach__open_and_load();
340 if (!ASSERT_OK_PTR(skel, "open_and_load"))
343 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
344 map = bpf_map__fd(skel->maps.sock_map);
346 err = bpf_prog_attach(verdict, map, first, 0);
347 if (!ASSERT_OK(err, "bpf_prog_attach"))
350 err = bpf_prog_attach(verdict, map, second, 0);
351 ASSERT_EQ(err, -EBUSY, "prog_attach_fail");
353 err = bpf_prog_detach2(verdict, map, first);
354 if (!ASSERT_OK(err, "bpf_prog_detach2"))
357 test_sockmap_skb_verdict_attach__destroy(skel);
360 static void test_sockmap_skb_verdict_attach_with_link(void)
362 struct test_sockmap_skb_verdict_attach *skel;
363 struct bpf_program *prog;
364 struct bpf_link *link;
367 skel = test_sockmap_skb_verdict_attach__open_and_load();
368 if (!ASSERT_OK_PTR(skel, "open_and_load"))
370 prog = skel->progs.prog_skb_verdict;
371 map = bpf_map__fd(skel->maps.sock_map);
372 link = bpf_program__attach_sockmap(prog, map);
373 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
376 bpf_link__detach(link);
378 err = bpf_prog_attach(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT, 0);
379 if (!ASSERT_OK(err, "bpf_prog_attach"))
382 /* Fail since attaching with the same prog/map has been done. */
383 link = bpf_program__attach_sockmap(prog, map);
384 if (!ASSERT_ERR_PTR(link, "bpf_program__attach_sockmap"))
385 bpf_link__detach(link);
387 err = bpf_prog_detach2(bpf_program__fd(prog), map, BPF_SK_SKB_STREAM_VERDICT);
388 if (!ASSERT_OK(err, "bpf_prog_detach2"))
391 test_sockmap_skb_verdict_attach__destroy(skel);
394 static __u32 query_prog_id(int prog_fd)
396 struct bpf_prog_info info = {};
397 __u32 info_len = sizeof(info);
400 err = bpf_prog_get_info_by_fd(prog_fd, &info, &info_len);
401 if (!ASSERT_OK(err, "bpf_prog_get_info_by_fd") ||
402 !ASSERT_EQ(info_len, sizeof(info), "bpf_prog_get_info_by_fd"))
408 static void test_sockmap_progs_query(enum bpf_attach_type attach_type)
410 struct test_sockmap_progs_query *skel;
411 int err, map_fd, verdict_fd;
412 __u32 attach_flags = 0;
413 __u32 prog_ids[3] = {};
416 skel = test_sockmap_progs_query__open_and_load();
417 if (!ASSERT_OK_PTR(skel, "test_sockmap_progs_query__open_and_load"))
420 map_fd = bpf_map__fd(skel->maps.sock_map);
422 if (attach_type == BPF_SK_MSG_VERDICT)
423 verdict_fd = bpf_program__fd(skel->progs.prog_skmsg_verdict);
425 verdict_fd = bpf_program__fd(skel->progs.prog_skb_verdict);
427 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
428 &attach_flags, prog_ids, &prog_cnt);
429 ASSERT_OK(err, "bpf_prog_query failed");
430 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
431 ASSERT_EQ(prog_cnt, 0, "wrong program count on query");
433 err = bpf_prog_attach(verdict_fd, map_fd, attach_type, 0);
434 if (!ASSERT_OK(err, "bpf_prog_attach failed"))
438 err = bpf_prog_query(map_fd, attach_type, 0 /* query flags */,
439 &attach_flags, prog_ids, &prog_cnt);
440 ASSERT_OK(err, "bpf_prog_query failed");
441 ASSERT_EQ(attach_flags, 0, "wrong attach_flags on query");
442 ASSERT_EQ(prog_cnt, 1, "wrong program count on query");
443 ASSERT_EQ(prog_ids[0], query_prog_id(verdict_fd),
444 "wrong prog_ids on query");
446 bpf_prog_detach2(verdict_fd, map_fd, attach_type);
448 test_sockmap_progs_query__destroy(skel);
451 #define MAX_EVENTS 10
452 static void test_sockmap_skb_verdict_shutdown(void)
454 struct epoll_event ev, events[MAX_EVENTS];
455 int n, err, map, verdict, s, c1 = -1, p1 = -1;
456 struct test_sockmap_pass_prog *skel;
461 skel = test_sockmap_pass_prog__open_and_load();
462 if (!ASSERT_OK_PTR(skel, "open_and_load"))
465 verdict = bpf_program__fd(skel->progs.prog_skb_verdict);
466 map = bpf_map__fd(skel->maps.sock_map_rx);
468 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
469 if (!ASSERT_OK(err, "bpf_prog_attach"))
472 s = socket_loopback(AF_INET, SOCK_STREAM);
475 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
479 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
483 shutdown(p1, SHUT_WR);
488 epollfd = epoll_create1(0);
489 if (!ASSERT_GT(epollfd, -1, "epoll_create(0)"))
491 err = epoll_ctl(epollfd, EPOLL_CTL_ADD, c1, &ev);
492 if (!ASSERT_OK(err, "epoll_ctl(EPOLL_CTL_ADD)"))
494 err = epoll_wait(epollfd, events, MAX_EVENTS, -1);
495 if (!ASSERT_EQ(err, 1, "epoll_wait(fd)"))
498 n = recv(c1, &b, 1, SOCK_NONBLOCK);
499 ASSERT_EQ(n, 0, "recv_timeout(fin)");
504 test_sockmap_pass_prog__destroy(skel);
507 static void test_sockmap_skb_verdict_fionread(bool pass_prog)
509 int expected, zero = 0, sent, recvd, avail;
510 int err, map, verdict, s, c0 = -1, c1 = -1, p0 = -1, p1 = -1;
511 struct test_sockmap_pass_prog *pass = NULL;
512 struct test_sockmap_drop_prog *drop = NULL;
513 char buf[256] = "0123456789";
516 pass = test_sockmap_pass_prog__open_and_load();
517 if (!ASSERT_OK_PTR(pass, "open_and_load"))
519 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
520 map = bpf_map__fd(pass->maps.sock_map_rx);
521 expected = sizeof(buf);
523 drop = test_sockmap_drop_prog__open_and_load();
524 if (!ASSERT_OK_PTR(drop, "open_and_load"))
526 verdict = bpf_program__fd(drop->progs.prog_skb_verdict);
527 map = bpf_map__fd(drop->maps.sock_map_rx);
528 /* On drop data is consumed immediately and copied_seq inc'd */
533 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
534 if (!ASSERT_OK(err, "bpf_prog_attach"))
537 s = socket_loopback(AF_INET, SOCK_STREAM);
538 if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
540 err = create_socket_pairs(s, AF_INET, SOCK_STREAM, &c0, &c1, &p0, &p1);
541 if (!ASSERT_OK(err, "create_socket_pairs(s)"))
544 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
545 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
548 sent = xsend(p1, &buf, sizeof(buf), 0);
549 ASSERT_EQ(sent, sizeof(buf), "xsend(p0)");
550 err = ioctl(c1, FIONREAD, &avail);
551 ASSERT_OK(err, "ioctl(FIONREAD) error");
552 ASSERT_EQ(avail, expected, "ioctl(FIONREAD)");
553 /* On DROP test there will be no data to read */
555 recvd = recv_timeout(c1, &buf, sizeof(buf), SOCK_NONBLOCK, IO_TIMEOUT_SEC);
556 ASSERT_EQ(recvd, sizeof(buf), "recv_timeout(c0)");
566 test_sockmap_pass_prog__destroy(pass);
568 test_sockmap_drop_prog__destroy(drop);
571 static void test_sockmap_skb_verdict_peek_helper(int map)
573 int err, s, c1, p1, zero = 0, sent, recvd, avail;
574 char snd[256] = "0123456789";
577 s = socket_loopback(AF_INET, SOCK_STREAM);
578 if (!ASSERT_GT(s, -1, "socket_loopback(s)"))
581 err = create_pair(s, AF_INET, SOCK_STREAM, &c1, &p1);
582 if (!ASSERT_OK(err, "create_pairs(s)"))
585 err = bpf_map_update_elem(map, &zero, &c1, BPF_NOEXIST);
586 if (!ASSERT_OK(err, "bpf_map_update_elem(c1)"))
589 sent = xsend(p1, snd, sizeof(snd), 0);
590 ASSERT_EQ(sent, sizeof(snd), "xsend(p1)");
591 recvd = recv(c1, rcv, sizeof(rcv), MSG_PEEK);
592 ASSERT_EQ(recvd, sizeof(rcv), "recv(c1)");
593 err = ioctl(c1, FIONREAD, &avail);
594 ASSERT_OK(err, "ioctl(FIONREAD) error");
595 ASSERT_EQ(avail, sizeof(snd), "after peek ioctl(FIONREAD)");
596 recvd = recv(c1, rcv, sizeof(rcv), 0);
597 ASSERT_EQ(recvd, sizeof(rcv), "recv(p0)");
598 err = ioctl(c1, FIONREAD, &avail);
599 ASSERT_OK(err, "ioctl(FIONREAD) error");
600 ASSERT_EQ(avail, 0, "after read ioctl(FIONREAD)");
607 static void test_sockmap_skb_verdict_peek(void)
609 struct test_sockmap_pass_prog *pass;
610 int err, map, verdict;
612 pass = test_sockmap_pass_prog__open_and_load();
613 if (!ASSERT_OK_PTR(pass, "open_and_load"))
615 verdict = bpf_program__fd(pass->progs.prog_skb_verdict);
616 map = bpf_map__fd(pass->maps.sock_map_rx);
618 err = bpf_prog_attach(verdict, map, BPF_SK_SKB_STREAM_VERDICT, 0);
619 if (!ASSERT_OK(err, "bpf_prog_attach"))
622 test_sockmap_skb_verdict_peek_helper(map);
625 test_sockmap_pass_prog__destroy(pass);
628 static void test_sockmap_skb_verdict_peek_with_link(void)
630 struct test_sockmap_pass_prog *pass;
631 struct bpf_program *prog;
632 struct bpf_link *link;
635 pass = test_sockmap_pass_prog__open_and_load();
636 if (!ASSERT_OK_PTR(pass, "open_and_load"))
638 prog = pass->progs.prog_skb_verdict;
639 map = bpf_map__fd(pass->maps.sock_map_rx);
640 link = bpf_program__attach_sockmap(prog, map);
641 if (!ASSERT_OK_PTR(link, "bpf_program__attach_sockmap"))
644 err = bpf_link__update_program(link, pass->progs.prog_skb_verdict_clone);
645 if (!ASSERT_OK(err, "bpf_link__update_program"))
648 /* Fail since a prog with different attach type attempts to do update. */
649 err = bpf_link__update_program(link, pass->progs.prog_skb_parser);
650 if (!ASSERT_ERR(err, "bpf_link__update_program"))
653 test_sockmap_skb_verdict_peek_helper(map);
654 ASSERT_EQ(pass->bss->clone_called, 1, "clone_called");
656 bpf_link__detach(link);
657 test_sockmap_pass_prog__destroy(pass);
660 static void test_sockmap_unconnected_unix(void)
662 int err, map, stream = 0, dgram = 0, zero = 0;
663 struct test_sockmap_pass_prog *skel;
665 skel = test_sockmap_pass_prog__open_and_load();
666 if (!ASSERT_OK_PTR(skel, "open_and_load"))
669 map = bpf_map__fd(skel->maps.sock_map_rx);
671 stream = xsocket(AF_UNIX, SOCK_STREAM, 0);
675 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
681 err = bpf_map_update_elem(map, &zero, &stream, BPF_ANY);
682 ASSERT_ERR(err, "bpf_map_update_elem(stream)");
684 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
685 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
691 static void test_sockmap_many_socket(void)
693 struct test_sockmap_pass_prog *skel;
694 int stream[2], dgram, udp, tcp;
695 int i, err, map, entry = 0;
697 skel = test_sockmap_pass_prog__open_and_load();
698 if (!ASSERT_OK_PTR(skel, "open_and_load"))
701 map = bpf_map__fd(skel->maps.sock_map_rx);
703 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
705 test_sockmap_pass_prog__destroy(skel);
709 tcp = connected_socket_v4();
710 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
712 test_sockmap_pass_prog__destroy(skel);
716 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
720 test_sockmap_pass_prog__destroy(skel);
724 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
725 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
729 for (i = 0; i < 2; i++, entry++) {
730 err = bpf_map_update_elem(map, &entry, &stream[0], BPF_ANY);
731 ASSERT_OK(err, "bpf_map_update_elem(stream)");
733 for (i = 0; i < 2; i++, entry++) {
734 err = bpf_map_update_elem(map, &entry, &dgram, BPF_ANY);
735 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
737 for (i = 0; i < 2; i++, entry++) {
738 err = bpf_map_update_elem(map, &entry, &udp, BPF_ANY);
739 ASSERT_OK(err, "bpf_map_update_elem(udp)");
741 for (i = 0; i < 2; i++, entry++) {
742 err = bpf_map_update_elem(map, &entry, &tcp, BPF_ANY);
743 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
745 for (entry--; entry >= 0; entry--) {
746 err = bpf_map_delete_elem(map, &entry);
747 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
756 test_sockmap_pass_prog__destroy(skel);
759 static void test_sockmap_many_maps(void)
761 struct test_sockmap_pass_prog *skel;
762 int stream[2], dgram, udp, tcp;
763 int i, err, map[2], entry = 0;
765 skel = test_sockmap_pass_prog__open_and_load();
766 if (!ASSERT_OK_PTR(skel, "open_and_load"))
769 map[0] = bpf_map__fd(skel->maps.sock_map_rx);
770 map[1] = bpf_map__fd(skel->maps.sock_map_tx);
772 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
774 test_sockmap_pass_prog__destroy(skel);
778 tcp = connected_socket_v4();
779 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
781 test_sockmap_pass_prog__destroy(skel);
785 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
789 test_sockmap_pass_prog__destroy(skel);
793 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
794 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
798 for (i = 0; i < 2; i++, entry++) {
799 err = bpf_map_update_elem(map[i], &entry, &stream[0], BPF_ANY);
800 ASSERT_OK(err, "bpf_map_update_elem(stream)");
802 for (i = 0; i < 2; i++, entry++) {
803 err = bpf_map_update_elem(map[i], &entry, &dgram, BPF_ANY);
804 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
806 for (i = 0; i < 2; i++, entry++) {
807 err = bpf_map_update_elem(map[i], &entry, &udp, BPF_ANY);
808 ASSERT_OK(err, "bpf_map_update_elem(udp)");
810 for (i = 0; i < 2; i++, entry++) {
811 err = bpf_map_update_elem(map[i], &entry, &tcp, BPF_ANY);
812 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
814 for (entry--; entry >= 0; entry--) {
815 err = bpf_map_delete_elem(map[1], &entry);
817 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
818 err = bpf_map_delete_elem(map[0], &entry);
819 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
828 test_sockmap_pass_prog__destroy(skel);
831 static void test_sockmap_same_sock(void)
833 struct test_sockmap_pass_prog *skel;
834 int stream[2], dgram, udp, tcp;
835 int i, err, map, zero = 0;
837 skel = test_sockmap_pass_prog__open_and_load();
838 if (!ASSERT_OK_PTR(skel, "open_and_load"))
841 map = bpf_map__fd(skel->maps.sock_map_rx);
843 dgram = xsocket(AF_UNIX, SOCK_DGRAM, 0);
845 test_sockmap_pass_prog__destroy(skel);
849 tcp = connected_socket_v4();
850 if (!ASSERT_GE(tcp, 0, "connected_socket_v4")) {
852 test_sockmap_pass_prog__destroy(skel);
856 udp = xsocket(AF_INET, SOCK_DGRAM | SOCK_NONBLOCK, 0);
860 test_sockmap_pass_prog__destroy(skel);
864 err = socketpair(AF_UNIX, SOCK_STREAM, 0, stream);
865 ASSERT_OK(err, "socketpair(af_unix, sock_stream)");
869 for (i = 0; i < 2; i++) {
870 err = bpf_map_update_elem(map, &zero, &stream[0], BPF_ANY);
871 ASSERT_OK(err, "bpf_map_update_elem(stream)");
873 for (i = 0; i < 2; i++) {
874 err = bpf_map_update_elem(map, &zero, &dgram, BPF_ANY);
875 ASSERT_OK(err, "bpf_map_update_elem(dgram)");
877 for (i = 0; i < 2; i++) {
878 err = bpf_map_update_elem(map, &zero, &udp, BPF_ANY);
879 ASSERT_OK(err, "bpf_map_update_elem(udp)");
881 for (i = 0; i < 2; i++) {
882 err = bpf_map_update_elem(map, &zero, &tcp, BPF_ANY);
883 ASSERT_OK(err, "bpf_map_update_elem(tcp)");
886 err = bpf_map_delete_elem(map, &zero);
887 ASSERT_OK(err, "bpf_map_delete_elem(entry)");
895 test_sockmap_pass_prog__destroy(skel);
898 void test_sockmap_basic(void)
900 if (test__start_subtest("sockmap create_update_free"))
901 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKMAP);
902 if (test__start_subtest("sockhash create_update_free"))
903 test_sockmap_create_update_free(BPF_MAP_TYPE_SOCKHASH);
904 if (test__start_subtest("sockmap sk_msg load helpers"))
905 test_skmsg_helpers(BPF_MAP_TYPE_SOCKMAP);
906 if (test__start_subtest("sockhash sk_msg load helpers"))
907 test_skmsg_helpers(BPF_MAP_TYPE_SOCKHASH);
908 if (test__start_subtest("sockmap update"))
909 test_sockmap_update(BPF_MAP_TYPE_SOCKMAP);
910 if (test__start_subtest("sockhash update"))
911 test_sockmap_update(BPF_MAP_TYPE_SOCKHASH);
912 if (test__start_subtest("sockmap update in unsafe context"))
913 test_sockmap_invalid_update();
914 if (test__start_subtest("sockmap copy"))
915 test_sockmap_copy(BPF_MAP_TYPE_SOCKMAP);
916 if (test__start_subtest("sockhash copy"))
917 test_sockmap_copy(BPF_MAP_TYPE_SOCKHASH);
918 if (test__start_subtest("sockmap skb_verdict attach")) {
919 test_sockmap_skb_verdict_attach(BPF_SK_SKB_VERDICT,
920 BPF_SK_SKB_STREAM_VERDICT);
921 test_sockmap_skb_verdict_attach(BPF_SK_SKB_STREAM_VERDICT,
924 if (test__start_subtest("sockmap skb_verdict attach_with_link"))
925 test_sockmap_skb_verdict_attach_with_link();
926 if (test__start_subtest("sockmap msg_verdict progs query"))
927 test_sockmap_progs_query(BPF_SK_MSG_VERDICT);
928 if (test__start_subtest("sockmap stream_parser progs query"))
929 test_sockmap_progs_query(BPF_SK_SKB_STREAM_PARSER);
930 if (test__start_subtest("sockmap stream_verdict progs query"))
931 test_sockmap_progs_query(BPF_SK_SKB_STREAM_VERDICT);
932 if (test__start_subtest("sockmap skb_verdict progs query"))
933 test_sockmap_progs_query(BPF_SK_SKB_VERDICT);
934 if (test__start_subtest("sockmap skb_verdict shutdown"))
935 test_sockmap_skb_verdict_shutdown();
936 if (test__start_subtest("sockmap skb_verdict fionread"))
937 test_sockmap_skb_verdict_fionread(true);
938 if (test__start_subtest("sockmap skb_verdict fionread on drop"))
939 test_sockmap_skb_verdict_fionread(false);
940 if (test__start_subtest("sockmap skb_verdict msg_f_peek"))
941 test_sockmap_skb_verdict_peek();
942 if (test__start_subtest("sockmap skb_verdict msg_f_peek with link"))
943 test_sockmap_skb_verdict_peek_with_link();
944 if (test__start_subtest("sockmap unconnected af_unix"))
945 test_sockmap_unconnected_unix();
946 if (test__start_subtest("sockmap one socket to many map entries"))
947 test_sockmap_many_socket();
948 if (test__start_subtest("sockmap one socket to many maps"))
949 test_sockmap_many_maps();
950 if (test__start_subtest("sockmap same socket replace"))
951 test_sockmap_same_sock();
952 if (test__start_subtest("sockmap sk_msg attach sockmap helpers with link"))
953 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKMAP);
954 if (test__start_subtest("sockhash sk_msg attach sockhash helpers with link"))
955 test_skmsg_helpers_with_link(BPF_MAP_TYPE_SOCKHASH);