1 // SPDX-License-Identifier: GPL-2.0
3 #include <test_progs.h>
5 #include "test_ringbuf_multi.skel.h"
7 static int duration = 0;
16 static int process_sample(void *ctx, void *data, size_t len)
18 int ring = (unsigned long)ctx;
19 struct sample *s = data;
23 CHECK(ring != 1, "sample1_ring", "exp %d, got %d\n", 1, ring);
24 CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
28 CHECK(ring != 2, "sample2_ring", "exp %d, got %d\n", 2, ring);
29 CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
33 CHECK(true, "extra_sample", "unexpected sample seq %d, val %ld\n",
41 void test_ringbuf_multi(void)
43 struct test_ringbuf_multi *skel;
44 struct ring_buffer *ringbuf = NULL;
45 struct ring *ring_old;
48 int page_size = getpagesize();
51 skel = test_ringbuf_multi__open();
52 if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
55 /* validate ringbuf size adjustment logic */
56 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_before");
57 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size + 1), "rb1_resize");
58 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), 2 * page_size, "rb1_size_after");
59 ASSERT_OK(bpf_map__set_max_entries(skel->maps.ringbuf1, page_size), "rb1_reset");
60 ASSERT_EQ(bpf_map__max_entries(skel->maps.ringbuf1), page_size, "rb1_size_final");
62 proto_fd = bpf_map_create(BPF_MAP_TYPE_RINGBUF, NULL, 0, 0, page_size, NULL);
63 if (CHECK(proto_fd < 0, "bpf_map_create", "bpf_map_create failed\n"))
66 err = bpf_map__set_inner_map_fd(skel->maps.ringbuf_hash, proto_fd);
67 if (CHECK(err != 0, "bpf_map__set_inner_map_fd", "bpf_map__set_inner_map_fd failed\n"))
70 err = test_ringbuf_multi__load(skel);
71 if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
77 /* make sure we can't resize ringbuf after object load */
78 if (!ASSERT_ERR(bpf_map__set_max_entries(skel->maps.ringbuf1, 3 * page_size), "rb1_resize_after_load"))
81 /* only trigger BPF program for current process */
82 skel->bss->pid = getpid();
84 ringbuf = ring_buffer__new(bpf_map__fd(skel->maps.ringbuf1),
85 process_sample, (void *)(long)1, NULL);
86 if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
89 /* verify ring_buffer__ring returns expected results */
90 ring = ring_buffer__ring(ringbuf, 0);
91 if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
94 ring = ring_buffer__ring(ringbuf, 1);
95 ASSERT_ERR_PTR(ring, "ring_buffer__ring_idx_1");
97 err = ring_buffer__add(ringbuf, bpf_map__fd(skel->maps.ringbuf2),
98 process_sample, (void *)(long)2);
99 if (CHECK(err, "ringbuf_add", "failed to add another ring\n"))
102 /* verify adding a new ring didn't invalidate our older pointer */
103 ring = ring_buffer__ring(ringbuf, 0);
104 if (!ASSERT_EQ(ring, ring_old, "ring_buffer__ring_again"))
107 err = test_ringbuf_multi__attach(skel);
108 if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
111 /* trigger few samples, some will be skipped */
112 skel->bss->target_ring = 0;
113 skel->bss->value = 333;
114 syscall(__NR_getpgid);
116 /* skipped, no ringbuf in slot 1 */
117 skel->bss->target_ring = 1;
118 skel->bss->value = 555;
119 syscall(__NR_getpgid);
121 skel->bss->target_ring = 2;
122 skel->bss->value = 777;
123 syscall(__NR_getpgid);
125 /* poll for samples, should get 2 ringbufs back */
126 err = ring_buffer__poll(ringbuf, -1);
127 if (CHECK(err != 2, "poll_res", "expected 2 records, got %d\n", err))
130 /* expect extra polling to return nothing */
131 err = ring_buffer__poll(ringbuf, 0);
132 if (CHECK(err < 0, "extra_samples", "poll result: %d\n", err))
135 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
136 0L, skel->bss->dropped);
137 CHECK(skel->bss->skipped != 1, "err_skipped", "exp %ld, got %ld\n",
138 1L, skel->bss->skipped);
139 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
140 2L, skel->bss->total);
145 ring_buffer__free(ringbuf);
146 test_ringbuf_multi__destroy(skel);