1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/compiler.h>
4 #include <asm/barrier.h>
5 #include <test_progs.h>
12 #include <sys/sysinfo.h>
13 #include <linux/perf_event.h>
14 #include <linux/ring_buffer.h>
16 #include "test_ringbuf.lskel.h"
17 #include "test_ringbuf_n.lskel.h"
18 #include "test_ringbuf_map_key.lskel.h"
19 #include "test_ringbuf_write.lskel.h"
23 static int duration = 0;
32 static int sample_cnt;
34 static void atomic_inc(int *cnt)
36 __atomic_add_fetch(cnt, 1, __ATOMIC_SEQ_CST);
39 static int atomic_xchg(int *cnt, int val)
41 return __atomic_exchange_n(cnt, val, __ATOMIC_SEQ_CST);
44 static int process_sample(void *ctx, void *data, size_t len)
46 struct sample *s = data;
48 atomic_inc(&sample_cnt);
52 CHECK(s->value != 333, "sample1_value", "exp %ld, got %ld\n",
56 CHECK(s->value != 777, "sample2_value", "exp %ld, got %ld\n",
60 /* we don't care about the rest */
65 static struct test_ringbuf_map_key_lskel *skel_map_key;
66 static struct test_ringbuf_lskel *skel;
67 static struct ring_buffer *ringbuf;
69 static void trigger_samples()
71 skel->bss->dropped = 0;
73 skel->bss->discarded = 0;
75 /* trigger exactly two samples */
76 skel->bss->value = 333;
77 syscall(__NR_getpgid);
78 skel->bss->value = 777;
79 syscall(__NR_getpgid);
82 static void *poll_thread(void *input)
84 long timeout = (long)input;
86 return (void *)(long)ring_buffer__poll(ringbuf, timeout);
89 static void ringbuf_write_subtest(void)
91 struct test_ringbuf_write_lskel *skel;
92 int page_size = getpagesize();
96 skel = test_ringbuf_write_lskel__open();
97 if (!ASSERT_OK_PTR(skel, "skel_open"))
100 skel->maps.ringbuf.max_entries = 0x4000;
102 err = test_ringbuf_write_lskel__load(skel);
103 if (!ASSERT_OK(err, "skel_load"))
106 rb_fd = skel->maps.ringbuf.map_fd;
108 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
109 if (!ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos"))
112 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
114 skel->bss->pid = getpid();
116 ringbuf = ring_buffer__new(rb_fd, process_sample, NULL, NULL);
117 if (!ASSERT_OK_PTR(ringbuf, "ringbuf_new"))
120 err = test_ringbuf_write_lskel__attach(skel);
121 if (!ASSERT_OK(err, "skel_attach"))
122 goto cleanup_ringbuf;
124 skel->bss->discarded = 0;
125 skel->bss->passed = 0;
127 /* trigger exactly two samples */
128 syscall(__NR_getpgid);
129 syscall(__NR_getpgid);
131 ASSERT_EQ(skel->bss->discarded, 2, "discarded");
132 ASSERT_EQ(skel->bss->passed, 0, "passed");
134 test_ringbuf_write_lskel__detach(skel);
136 ring_buffer__free(ringbuf);
138 test_ringbuf_write_lskel__destroy(skel);
141 static void ringbuf_subtest(void)
143 const size_t rec_sz = BPF_RINGBUF_HDR_SZ + sizeof(struct sample);
147 int page_size = getpagesize();
148 void *mmap_ptr, *tmp_ptr;
151 unsigned long avail_data, ring_size, cons_pos, prod_pos;
153 skel = test_ringbuf_lskel__open();
154 if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
157 skel->maps.ringbuf.max_entries = page_size;
159 err = test_ringbuf_lskel__load(skel);
160 if (CHECK(err != 0, "skel_load", "skeleton load failed\n"))
163 rb_fd = skel->maps.ringbuf.map_fd;
164 /* good read/write cons_pos */
165 mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, rb_fd, 0);
166 ASSERT_OK_PTR(mmap_ptr, "rw_cons_pos");
167 tmp_ptr = mremap(mmap_ptr, page_size, 2 * page_size, MREMAP_MAYMOVE);
168 if (!ASSERT_ERR_PTR(tmp_ptr, "rw_extend"))
170 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
171 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw");
173 /* bad writeable prod_pos */
174 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, page_size);
176 ASSERT_ERR_PTR(mmap_ptr, "wr_prod_pos");
177 ASSERT_EQ(err, -EPERM, "wr_prod_pos_err");
179 /* bad writeable data pages */
180 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
182 ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_one");
183 ASSERT_EQ(err, -EPERM, "wr_data_page_one_err");
184 mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 3 * page_size);
185 ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_two");
186 mmap_ptr = mmap(NULL, 2 * page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
187 ASSERT_ERR_PTR(mmap_ptr, "wr_data_page_all");
189 /* good read-only pages */
190 mmap_ptr = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
191 if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
194 ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_WRITE), "write_protect");
195 ASSERT_ERR(mprotect(mmap_ptr, 4 * page_size, PROT_EXEC), "exec_protect");
196 ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "ro_remap");
197 ASSERT_OK(munmap(mmap_ptr, 4 * page_size), "unmap_ro");
199 /* good read-only pages with initial offset */
200 mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, page_size);
201 if (!ASSERT_OK_PTR(mmap_ptr, "ro_prod_pos"))
204 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_protect");
205 ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_protect");
206 ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 3 * page_size, MREMAP_MAYMOVE), "ro_remap");
207 ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro");
209 /* only trigger BPF program for current process */
210 skel->bss->pid = getpid();
212 ringbuf = ring_buffer__new(skel->maps.ringbuf.map_fd,
213 process_sample, NULL, NULL);
214 if (CHECK(!ringbuf, "ringbuf_create", "failed to create ringbuf\n"))
217 err = test_ringbuf_lskel__attach(skel);
218 if (CHECK(err, "skel_attach", "skeleton attachment failed: %d\n", err))
223 ring = ring_buffer__ring(ringbuf, 0);
224 if (!ASSERT_OK_PTR(ring, "ring_buffer__ring_idx_0"))
227 map_fd = ring__map_fd(ring);
228 ASSERT_EQ(map_fd, skel->maps.ringbuf.map_fd, "ring_map_fd");
230 /* 2 submitted + 1 discarded records */
231 CHECK(skel->bss->avail_data != 3 * rec_sz,
232 "err_avail_size", "exp %ld, got %ld\n",
233 3L * rec_sz, skel->bss->avail_data);
234 CHECK(skel->bss->ring_size != page_size,
235 "err_ring_size", "exp %ld, got %ld\n",
236 (long)page_size, skel->bss->ring_size);
237 CHECK(skel->bss->cons_pos != 0,
238 "err_cons_pos", "exp %ld, got %ld\n",
239 0L, skel->bss->cons_pos);
240 CHECK(skel->bss->prod_pos != 3 * rec_sz,
241 "err_prod_pos", "exp %ld, got %ld\n",
242 3L * rec_sz, skel->bss->prod_pos);
244 /* verify getting this data directly via the ring object yields the same
247 avail_data = ring__avail_data_size(ring);
248 ASSERT_EQ(avail_data, 3 * rec_sz, "ring_avail_size");
249 ring_size = ring__size(ring);
250 ASSERT_EQ(ring_size, page_size, "ring_ring_size");
251 cons_pos = ring__consumer_pos(ring);
252 ASSERT_EQ(cons_pos, 0, "ring_cons_pos");
253 prod_pos = ring__producer_pos(ring);
254 ASSERT_EQ(prod_pos, 3 * rec_sz, "ring_prod_pos");
256 /* poll for samples */
257 err = ring_buffer__poll(ringbuf, -1);
259 /* -EDONE is used as an indicator that we are done */
260 if (CHECK(err != -EDONE, "err_done", "done err: %d\n", err))
262 cnt = atomic_xchg(&sample_cnt, 0);
263 CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
265 /* we expect extra polling to return nothing */
266 err = ring_buffer__poll(ringbuf, 0);
267 if (CHECK(err != 0, "extra_samples", "poll result: %d\n", err))
269 cnt = atomic_xchg(&sample_cnt, 0);
270 CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
272 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
273 0L, skel->bss->dropped);
274 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
275 2L, skel->bss->total);
276 CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
277 1L, skel->bss->discarded);
279 /* now validate consumer position is updated and returned */
281 CHECK(skel->bss->cons_pos != 3 * rec_sz,
282 "err_cons_pos", "exp %ld, got %ld\n",
283 3L * rec_sz, skel->bss->cons_pos);
284 err = ring_buffer__poll(ringbuf, -1);
285 CHECK(err <= 0, "poll_err", "err %d\n", err);
286 cnt = atomic_xchg(&sample_cnt, 0);
287 CHECK(cnt != 2, "cnt", "exp %d samples, got %d\n", 2, cnt);
289 /* start poll in background w/ long timeout */
290 err = pthread_create(&thread, NULL, poll_thread, (void *)(long)10000);
291 if (CHECK(err, "bg_poll", "pthread_create failed: %d\n", err))
294 /* turn off notifications now */
295 skel->bss->flags = BPF_RB_NO_WAKEUP;
297 /* give background thread a bit of a time */
300 /* sleeping arbitrarily is bad, but no better way to know that
301 * epoll_wait() **DID NOT** unblock in background thread
304 /* background poll should still be blocked */
305 err = pthread_tryjoin_np(thread, (void **)&bg_ret);
306 if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
309 /* BPF side did everything right */
310 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
311 0L, skel->bss->dropped);
312 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
313 2L, skel->bss->total);
314 CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
315 1L, skel->bss->discarded);
316 cnt = atomic_xchg(&sample_cnt, 0);
317 CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
319 /* clear flags to return to "adaptive" notification mode */
320 skel->bss->flags = 0;
322 /* produce new samples, no notification should be triggered, because
323 * consumer is now behind
327 /* background poll should still be blocked */
328 err = pthread_tryjoin_np(thread, (void **)&bg_ret);
329 if (CHECK(err != EBUSY, "try_join", "err %d\n", err))
332 /* still no samples, because consumer is behind */
333 cnt = atomic_xchg(&sample_cnt, 0);
334 CHECK(cnt != 0, "cnt", "exp %d samples, got %d\n", 0, cnt);
336 skel->bss->dropped = 0;
337 skel->bss->total = 0;
338 skel->bss->discarded = 0;
340 skel->bss->value = 333;
341 syscall(__NR_getpgid);
342 /* now force notifications */
343 skel->bss->flags = BPF_RB_FORCE_WAKEUP;
344 skel->bss->value = 777;
345 syscall(__NR_getpgid);
347 /* now we should get a pending notification */
349 err = pthread_tryjoin_np(thread, (void **)&bg_ret);
350 if (CHECK(err, "join_bg", "err %d\n", err))
353 if (CHECK(bg_ret <= 0, "bg_ret", "epoll_wait result: %ld", bg_ret))
356 /* due to timing variations, there could still be non-notified
357 * samples, so consume them here to collect all the samples
359 err = ring_buffer__consume(ringbuf);
360 CHECK(err < 0, "rb_consume", "failed: %d\b", err);
362 /* also consume using ring__consume to make sure it works the same */
363 err = ring__consume(ring);
364 ASSERT_GE(err, 0, "ring_consume");
366 /* 3 rounds, 2 samples each */
367 cnt = atomic_xchg(&sample_cnt, 0);
368 CHECK(cnt != 6, "cnt", "exp %d samples, got %d\n", 6, cnt);
370 /* BPF side did everything right */
371 CHECK(skel->bss->dropped != 0, "err_dropped", "exp %ld, got %ld\n",
372 0L, skel->bss->dropped);
373 CHECK(skel->bss->total != 2, "err_total", "exp %ld, got %ld\n",
374 2L, skel->bss->total);
375 CHECK(skel->bss->discarded != 1, "err_discarded", "exp %ld, got %ld\n",
376 1L, skel->bss->discarded);
378 test_ringbuf_lskel__detach(skel);
380 ring_buffer__free(ringbuf);
381 test_ringbuf_lskel__destroy(skel);
385 * Test ring_buffer__consume_n() by producing N_TOT_SAMPLES samples in the ring
386 * buffer, via getpid(), and consuming them in chunks of N_SAMPLES.
388 #define N_TOT_SAMPLES 32
391 /* Sample value to verify the callback validity */
392 #define SAMPLE_VALUE 42L
394 static int process_n_sample(void *ctx, void *data, size_t len)
396 struct sample *s = data;
398 ASSERT_EQ(s->value, SAMPLE_VALUE, "sample_value");
403 static void ringbuf_n_subtest(void)
405 struct test_ringbuf_n_lskel *skel_n;
408 skel_n = test_ringbuf_n_lskel__open();
409 if (!ASSERT_OK_PTR(skel_n, "test_ringbuf_n_lskel__open"))
412 skel_n->maps.ringbuf.max_entries = getpagesize();
413 skel_n->bss->pid = getpid();
415 err = test_ringbuf_n_lskel__load(skel_n);
416 if (!ASSERT_OK(err, "test_ringbuf_n_lskel__load"))
419 ringbuf = ring_buffer__new(skel_n->maps.ringbuf.map_fd,
420 process_n_sample, NULL, NULL);
421 if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
424 err = test_ringbuf_n_lskel__attach(skel_n);
425 if (!ASSERT_OK(err, "test_ringbuf_n_lskel__attach"))
426 goto cleanup_ringbuf;
428 /* Produce N_TOT_SAMPLES samples in the ring buffer by calling getpid() */
429 skel_n->bss->value = SAMPLE_VALUE;
430 for (i = 0; i < N_TOT_SAMPLES; i++)
431 syscall(__NR_getpgid);
433 /* Consume all samples from the ring buffer in batches of N_SAMPLES */
434 for (i = 0; i < N_TOT_SAMPLES; i += err) {
435 err = ring_buffer__consume_n(ringbuf, N_SAMPLES);
436 if (!ASSERT_EQ(err, N_SAMPLES, "rb_consume"))
437 goto cleanup_ringbuf;
441 ring_buffer__free(ringbuf);
443 test_ringbuf_n_lskel__destroy(skel_n);
446 static int process_map_key_sample(void *ctx, void *data, size_t len)
454 ASSERT_EQ(s->value, 42, "sample_value");
455 err = bpf_map_lookup_elem(skel_map_key->maps.hash_map.map_fd,
457 ASSERT_OK(err, "hash_map bpf_map_lookup_elem");
458 ASSERT_EQ(val, 1, "hash_map val");
465 static void ringbuf_map_key_subtest(void)
469 skel_map_key = test_ringbuf_map_key_lskel__open();
470 if (!ASSERT_OK_PTR(skel_map_key, "test_ringbuf_map_key_lskel__open"))
473 skel_map_key->maps.ringbuf.max_entries = getpagesize();
474 skel_map_key->bss->pid = getpid();
476 err = test_ringbuf_map_key_lskel__load(skel_map_key);
477 if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__load"))
480 ringbuf = ring_buffer__new(skel_map_key->maps.ringbuf.map_fd,
481 process_map_key_sample, NULL, NULL);
482 if (!ASSERT_OK_PTR(ringbuf, "ring_buffer__new"))
485 err = test_ringbuf_map_key_lskel__attach(skel_map_key);
486 if (!ASSERT_OK(err, "test_ringbuf_map_key_lskel__attach"))
487 goto cleanup_ringbuf;
489 syscall(__NR_getpgid);
490 ASSERT_EQ(skel_map_key->bss->seq, 1, "skel_map_key->bss->seq");
491 err = ring_buffer__poll(ringbuf, -1);
492 ASSERT_EQ(err, -EDONE, "ring_buffer__poll");
495 ring_buffer__free(ringbuf);
497 test_ringbuf_map_key_lskel__destroy(skel_map_key);
500 void test_ringbuf(void)
502 if (test__start_subtest("ringbuf"))
504 if (test__start_subtest("ringbuf_n"))
506 if (test__start_subtest("ringbuf_map_key"))
507 ringbuf_map_key_subtest();
508 if (test__start_subtest("ringbuf_write"))
509 ringbuf_write_subtest();