]> Git Repo - linux.git/blob - tools/testing/selftests/bpf/prog_tests/user_ringbuf.c
Linux 6.14-rc3
[linux.git] / tools / testing / selftests / bpf / prog_tests / user_ringbuf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2022 Meta Platforms, Inc. and affiliates. */
3
4 #define _GNU_SOURCE
5 #include <linux/compiler.h>
6 #include <linux/ring_buffer.h>
7 #include <linux/build_bug.h>
8 #include <pthread.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <sys/mman.h>
12 #include <sys/syscall.h>
13 #include <sys/sysinfo.h>
14 #include <test_progs.h>
15 #include <uapi/linux/bpf.h>
16 #include <unistd.h>
17
18 #include "user_ringbuf_fail.skel.h"
19 #include "user_ringbuf_success.skel.h"
20
21 #include "../progs/test_user_ringbuf.h"
22
23 static const long c_sample_size = sizeof(struct sample) + BPF_RINGBUF_HDR_SZ;
24 static const long c_ringbuf_size = 1 << 12; /* 1 small page */
25 static const long c_max_entries = c_ringbuf_size / c_sample_size;
26
27 static void drain_current_samples(void)
28 {
29         syscall(__NR_getpgid);
30 }
31
32 static int write_samples(struct user_ring_buffer *ringbuf, uint32_t num_samples)
33 {
34         int i, err = 0;
35
36         /* Write some number of samples to the ring buffer. */
37         for (i = 0; i < num_samples; i++) {
38                 struct sample *entry;
39                 int read;
40
41                 entry = user_ring_buffer__reserve(ringbuf, sizeof(*entry));
42                 if (!entry) {
43                         err = -errno;
44                         goto done;
45                 }
46
47                 entry->pid = getpid();
48                 entry->seq = i;
49                 entry->value = i * i;
50
51                 read = snprintf(entry->comm, sizeof(entry->comm), "%u", i);
52                 if (read <= 0) {
53                         /* Assert on the error path to avoid spamming logs with
54                          * mostly success messages.
55                          */
56                         ASSERT_GT(read, 0, "snprintf_comm");
57                         err = read;
58                         user_ring_buffer__discard(ringbuf, entry);
59                         goto done;
60                 }
61
62                 user_ring_buffer__submit(ringbuf, entry);
63         }
64
65 done:
66         drain_current_samples();
67
68         return err;
69 }
70
71 static struct user_ringbuf_success *open_load_ringbuf_skel(void)
72 {
73         struct user_ringbuf_success *skel;
74         int err;
75
76         skel = user_ringbuf_success__open();
77         if (!ASSERT_OK_PTR(skel, "skel_open"))
78                 return NULL;
79
80         err = bpf_map__set_max_entries(skel->maps.user_ringbuf, c_ringbuf_size);
81         if (!ASSERT_OK(err, "set_max_entries"))
82                 goto cleanup;
83
84         err = bpf_map__set_max_entries(skel->maps.kernel_ringbuf, c_ringbuf_size);
85         if (!ASSERT_OK(err, "set_max_entries"))
86                 goto cleanup;
87
88         err = user_ringbuf_success__load(skel);
89         if (!ASSERT_OK(err, "skel_load"))
90                 goto cleanup;
91
92         return skel;
93
94 cleanup:
95         user_ringbuf_success__destroy(skel);
96         return NULL;
97 }
98
99 static void test_user_ringbuf_mappings(void)
100 {
101         int err, rb_fd;
102         int page_size = getpagesize();
103         void *mmap_ptr;
104         struct user_ringbuf_success *skel;
105
106         skel = open_load_ringbuf_skel();
107         if (!skel)
108                 return;
109
110         rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
111         /* cons_pos can be mapped R/O, can't add +X with mprotect. */
112         mmap_ptr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, rb_fd, 0);
113         ASSERT_OK_PTR(mmap_ptr, "ro_cons_pos");
114         ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_WRITE), "write_cons_pos_protect");
115         ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_cons_pos_protect");
116         ASSERT_ERR_PTR(mremap(mmap_ptr, 0, 4 * page_size, MREMAP_MAYMOVE), "wr_prod_pos");
117         err = -errno;
118         ASSERT_ERR(err, "wr_prod_pos_err");
119         ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_ro_cons");
120
121         /* prod_pos can be mapped RW, can't add +X with mprotect. */
122         mmap_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED,
123                         rb_fd, page_size);
124         ASSERT_OK_PTR(mmap_ptr, "rw_prod_pos");
125         ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_prod_pos_protect");
126         err = -errno;
127         ASSERT_ERR(err, "wr_prod_pos_err");
128         ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_prod");
129
130         /* data pages can be mapped RW, can't add +X with mprotect. */
131         mmap_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd,
132                         2 * page_size);
133         ASSERT_OK_PTR(mmap_ptr, "rw_data");
134         ASSERT_ERR(mprotect(mmap_ptr, page_size, PROT_EXEC), "exec_data_protect");
135         err = -errno;
136         ASSERT_ERR(err, "exec_data_err");
137         ASSERT_OK(munmap(mmap_ptr, page_size), "unmap_rw_data");
138
139         user_ringbuf_success__destroy(skel);
140 }
141
142 static int load_skel_create_ringbufs(struct user_ringbuf_success **skel_out,
143                                      struct ring_buffer **kern_ringbuf_out,
144                                      ring_buffer_sample_fn callback,
145                                      struct user_ring_buffer **user_ringbuf_out)
146 {
147         struct user_ringbuf_success *skel;
148         struct ring_buffer *kern_ringbuf = NULL;
149         struct user_ring_buffer *user_ringbuf = NULL;
150         int err = -ENOMEM, rb_fd;
151
152         skel = open_load_ringbuf_skel();
153         if (!skel)
154                 return err;
155
156         /* only trigger BPF program for current process */
157         skel->bss->pid = getpid();
158
159         if (kern_ringbuf_out) {
160                 rb_fd = bpf_map__fd(skel->maps.kernel_ringbuf);
161                 kern_ringbuf = ring_buffer__new(rb_fd, callback, skel, NULL);
162                 if (!ASSERT_OK_PTR(kern_ringbuf, "kern_ringbuf_create"))
163                         goto cleanup;
164
165                 *kern_ringbuf_out = kern_ringbuf;
166         }
167
168         if (user_ringbuf_out) {
169                 rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
170                 user_ringbuf = user_ring_buffer__new(rb_fd, NULL);
171                 if (!ASSERT_OK_PTR(user_ringbuf, "user_ringbuf_create"))
172                         goto cleanup;
173
174                 *user_ringbuf_out = user_ringbuf;
175                 ASSERT_EQ(skel->bss->read, 0, "no_reads_after_load");
176         }
177
178         err = user_ringbuf_success__attach(skel);
179         if (!ASSERT_OK(err, "skel_attach"))
180                 goto cleanup;
181
182         *skel_out = skel;
183         return 0;
184
185 cleanup:
186         if (kern_ringbuf_out)
187                 *kern_ringbuf_out = NULL;
188         if (user_ringbuf_out)
189                 *user_ringbuf_out = NULL;
190         ring_buffer__free(kern_ringbuf);
191         user_ring_buffer__free(user_ringbuf);
192         user_ringbuf_success__destroy(skel);
193         return err;
194 }
195
196 static int load_skel_create_user_ringbuf(struct user_ringbuf_success **skel_out,
197                                          struct user_ring_buffer **ringbuf_out)
198 {
199         return load_skel_create_ringbufs(skel_out, NULL, NULL, ringbuf_out);
200 }
201
202 static void manually_write_test_invalid_sample(struct user_ringbuf_success *skel,
203                                                __u32 size, __u64 producer_pos, int err)
204 {
205         void *data_ptr;
206         __u64 *producer_pos_ptr;
207         int rb_fd, page_size = getpagesize();
208
209         rb_fd = bpf_map__fd(skel->maps.user_ringbuf);
210
211         ASSERT_EQ(skel->bss->read, 0, "num_samples_before_bad_sample");
212
213         /* Map the producer_pos as RW. */
214         producer_pos_ptr = mmap(NULL, page_size, PROT_READ | PROT_WRITE,
215                                 MAP_SHARED, rb_fd, page_size);
216         ASSERT_OK_PTR(producer_pos_ptr, "producer_pos_ptr");
217
218         /* Map the data pages as RW. */
219         data_ptr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, rb_fd, 2 * page_size);
220         ASSERT_OK_PTR(data_ptr, "rw_data");
221
222         memset(data_ptr, 0, BPF_RINGBUF_HDR_SZ);
223         *(__u32 *)data_ptr = size;
224
225         /* Synchronizes with smp_load_acquire() in __bpf_user_ringbuf_peek() in the kernel. */
226         smp_store_release(producer_pos_ptr, producer_pos + BPF_RINGBUF_HDR_SZ);
227
228         drain_current_samples();
229         ASSERT_EQ(skel->bss->read, 0, "num_samples_after_bad_sample");
230         ASSERT_EQ(skel->bss->err, err, "err_after_bad_sample");
231
232         ASSERT_OK(munmap(producer_pos_ptr, page_size), "unmap_producer_pos");
233         ASSERT_OK(munmap(data_ptr, page_size), "unmap_data_ptr");
234 }
235
236 static void test_user_ringbuf_post_misaligned(void)
237 {
238         struct user_ringbuf_success *skel;
239         struct user_ring_buffer *ringbuf;
240         int err;
241         __u32 size = (1 << 5) + 7;
242
243         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
244         if (!ASSERT_OK(err, "misaligned_skel"))
245                 return;
246
247         manually_write_test_invalid_sample(skel, size, size, -EINVAL);
248         user_ring_buffer__free(ringbuf);
249         user_ringbuf_success__destroy(skel);
250 }
251
252 static void test_user_ringbuf_post_producer_wrong_offset(void)
253 {
254         struct user_ringbuf_success *skel;
255         struct user_ring_buffer *ringbuf;
256         int err;
257         __u32 size = (1 << 5);
258
259         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
260         if (!ASSERT_OK(err, "wrong_offset_skel"))
261                 return;
262
263         manually_write_test_invalid_sample(skel, size, size - 8, -EINVAL);
264         user_ring_buffer__free(ringbuf);
265         user_ringbuf_success__destroy(skel);
266 }
267
268 static void test_user_ringbuf_post_larger_than_ringbuf_sz(void)
269 {
270         struct user_ringbuf_success *skel;
271         struct user_ring_buffer *ringbuf;
272         int err;
273         __u32 size = c_ringbuf_size;
274
275         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
276         if (!ASSERT_OK(err, "huge_sample_skel"))
277                 return;
278
279         manually_write_test_invalid_sample(skel, size, size, -E2BIG);
280         user_ring_buffer__free(ringbuf);
281         user_ringbuf_success__destroy(skel);
282 }
283
284 static void test_user_ringbuf_basic(void)
285 {
286         struct user_ringbuf_success *skel;
287         struct user_ring_buffer *ringbuf;
288         int err;
289
290         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
291         if (!ASSERT_OK(err, "ringbuf_basic_skel"))
292                 return;
293
294         ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
295
296         err = write_samples(ringbuf, 2);
297         if (!ASSERT_OK(err, "write_samples"))
298                 goto cleanup;
299
300         ASSERT_EQ(skel->bss->read, 2, "num_samples_read_after");
301
302 cleanup:
303         user_ring_buffer__free(ringbuf);
304         user_ringbuf_success__destroy(skel);
305 }
306
307 static void test_user_ringbuf_sample_full_ring_buffer(void)
308 {
309         struct user_ringbuf_success *skel;
310         struct user_ring_buffer *ringbuf;
311         int err;
312         void *sample;
313
314         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
315         if (!ASSERT_OK(err, "ringbuf_full_sample_skel"))
316                 return;
317
318         sample = user_ring_buffer__reserve(ringbuf, c_ringbuf_size - BPF_RINGBUF_HDR_SZ);
319         if (!ASSERT_OK_PTR(sample, "full_sample"))
320                 goto cleanup;
321
322         user_ring_buffer__submit(ringbuf, sample);
323         ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
324         drain_current_samples();
325         ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
326
327 cleanup:
328         user_ring_buffer__free(ringbuf);
329         user_ringbuf_success__destroy(skel);
330 }
331
332 static void test_user_ringbuf_post_alignment_autoadjust(void)
333 {
334         struct user_ringbuf_success *skel;
335         struct user_ring_buffer *ringbuf;
336         struct sample *sample;
337         int err;
338
339         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
340         if (!ASSERT_OK(err, "ringbuf_align_autoadjust_skel"))
341                 return;
342
343         /* libbpf should automatically round any sample up to an 8-byte alignment. */
344         sample = user_ring_buffer__reserve(ringbuf, sizeof(*sample) + 1);
345         ASSERT_OK_PTR(sample, "reserve_autoaligned");
346         user_ring_buffer__submit(ringbuf, sample);
347
348         ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
349         drain_current_samples();
350         ASSERT_EQ(skel->bss->read, 1, "num_samples_read_after");
351
352         user_ring_buffer__free(ringbuf);
353         user_ringbuf_success__destroy(skel);
354 }
355
356 static void test_user_ringbuf_overfill(void)
357 {
358         struct user_ringbuf_success *skel;
359         struct user_ring_buffer *ringbuf;
360         int err;
361
362         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
363         if (err)
364                 return;
365
366         err = write_samples(ringbuf, c_max_entries * 5);
367         ASSERT_ERR(err, "write_samples");
368         ASSERT_EQ(skel->bss->read, c_max_entries, "max_entries");
369
370         user_ring_buffer__free(ringbuf);
371         user_ringbuf_success__destroy(skel);
372 }
373
374 static void test_user_ringbuf_discards_properly_ignored(void)
375 {
376         struct user_ringbuf_success *skel;
377         struct user_ring_buffer *ringbuf;
378         int err, num_discarded = 0;
379         __u64 *token;
380
381         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
382         if (err)
383                 return;
384
385         ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
386
387         while (1) {
388                 /* Write samples until the buffer is full. */
389                 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
390                 if (!token)
391                         break;
392
393                 user_ring_buffer__discard(ringbuf, token);
394                 num_discarded++;
395         }
396
397         if (!ASSERT_GE(num_discarded, 0, "num_discarded"))
398                 goto cleanup;
399
400         /* Should not read any samples, as they are all discarded. */
401         ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
402         drain_current_samples();
403         ASSERT_EQ(skel->bss->read, 0, "num_post_kick");
404
405         /* Now that the ring buffer has been drained, we should be able to
406          * reserve another token.
407          */
408         token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
409
410         if (!ASSERT_OK_PTR(token, "new_token"))
411                 goto cleanup;
412
413         user_ring_buffer__discard(ringbuf, token);
414 cleanup:
415         user_ring_buffer__free(ringbuf);
416         user_ringbuf_success__destroy(skel);
417 }
418
419 static void test_user_ringbuf_loop(void)
420 {
421         struct user_ringbuf_success *skel;
422         struct user_ring_buffer *ringbuf;
423         uint32_t total_samples = 8192;
424         uint32_t remaining_samples = total_samples;
425         int err;
426
427         BUILD_BUG_ON(total_samples <= c_max_entries);
428         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
429         if (err)
430                 return;
431
432         do  {
433                 uint32_t curr_samples;
434
435                 curr_samples = remaining_samples > c_max_entries
436                         ? c_max_entries : remaining_samples;
437                 err = write_samples(ringbuf, curr_samples);
438                 if (err != 0) {
439                         /* Assert inside of if statement to avoid flooding logs
440                          * on the success path.
441                          */
442                         ASSERT_OK(err, "write_samples");
443                         goto cleanup;
444                 }
445
446                 remaining_samples -= curr_samples;
447                 ASSERT_EQ(skel->bss->read, total_samples - remaining_samples,
448                           "current_batched_entries");
449         } while (remaining_samples > 0);
450         ASSERT_EQ(skel->bss->read, total_samples, "total_batched_entries");
451
452 cleanup:
453         user_ring_buffer__free(ringbuf);
454         user_ringbuf_success__destroy(skel);
455 }
456
457 static int send_test_message(struct user_ring_buffer *ringbuf,
458                              enum test_msg_op op, s64 operand_64,
459                              s32 operand_32)
460 {
461         struct test_msg *msg;
462
463         msg = user_ring_buffer__reserve(ringbuf, sizeof(*msg));
464         if (!msg) {
465                 /* Assert on the error path to avoid spamming logs with mostly
466                  * success messages.
467                  */
468                 ASSERT_OK_PTR(msg, "reserve_msg");
469                 return -ENOMEM;
470         }
471
472         msg->msg_op = op;
473
474         switch (op) {
475         case TEST_MSG_OP_INC64:
476         case TEST_MSG_OP_MUL64:
477                 msg->operand_64 = operand_64;
478                 break;
479         case TEST_MSG_OP_INC32:
480         case TEST_MSG_OP_MUL32:
481                 msg->operand_32 = operand_32;
482                 break;
483         default:
484                 PRINT_FAIL("Invalid operand %d\n", op);
485                 user_ring_buffer__discard(ringbuf, msg);
486                 return -EINVAL;
487         }
488
489         user_ring_buffer__submit(ringbuf, msg);
490
491         return 0;
492 }
493
494 static void kick_kernel_read_messages(void)
495 {
496         syscall(__NR_prctl);
497 }
498
499 static int handle_kernel_msg(void *ctx, void *data, size_t len)
500 {
501         struct user_ringbuf_success *skel = ctx;
502         struct test_msg *msg = data;
503
504         switch (msg->msg_op) {
505         case TEST_MSG_OP_INC64:
506                 skel->bss->user_mutated += msg->operand_64;
507                 return 0;
508         case TEST_MSG_OP_INC32:
509                 skel->bss->user_mutated += msg->operand_32;
510                 return 0;
511         case TEST_MSG_OP_MUL64:
512                 skel->bss->user_mutated *= msg->operand_64;
513                 return 0;
514         case TEST_MSG_OP_MUL32:
515                 skel->bss->user_mutated *= msg->operand_32;
516                 return 0;
517         default:
518                 fprintf(stderr, "Invalid operand %d\n", msg->msg_op);
519                 return -EINVAL;
520         }
521 }
522
523 static void drain_kernel_messages_buffer(struct ring_buffer *kern_ringbuf,
524                                          struct user_ringbuf_success *skel)
525 {
526         int cnt;
527
528         cnt = ring_buffer__consume(kern_ringbuf);
529         ASSERT_EQ(cnt, 8, "consume_kern_ringbuf");
530         ASSERT_OK(skel->bss->err, "consume_kern_ringbuf_err");
531 }
532
533 static void test_user_ringbuf_msg_protocol(void)
534 {
535         struct user_ringbuf_success *skel;
536         struct user_ring_buffer *user_ringbuf;
537         struct ring_buffer *kern_ringbuf;
538         int err, i;
539         __u64 expected_kern = 0;
540
541         err = load_skel_create_ringbufs(&skel, &kern_ringbuf, handle_kernel_msg, &user_ringbuf);
542         if (!ASSERT_OK(err, "create_ringbufs"))
543                 return;
544
545         for (i = 0; i < 64; i++) {
546                 enum test_msg_op op = i % TEST_MSG_OP_NUM_OPS;
547                 __u64 operand_64 = TEST_OP_64;
548                 __u32 operand_32 = TEST_OP_32;
549
550                 err = send_test_message(user_ringbuf, op, operand_64, operand_32);
551                 if (err) {
552                         /* Only assert on a failure to avoid spamming success logs. */
553                         ASSERT_OK(err, "send_test_message");
554                         goto cleanup;
555                 }
556
557                 switch (op) {
558                 case TEST_MSG_OP_INC64:
559                         expected_kern += operand_64;
560                         break;
561                 case TEST_MSG_OP_INC32:
562                         expected_kern += operand_32;
563                         break;
564                 case TEST_MSG_OP_MUL64:
565                         expected_kern *= operand_64;
566                         break;
567                 case TEST_MSG_OP_MUL32:
568                         expected_kern *= operand_32;
569                         break;
570                 default:
571                         PRINT_FAIL("Unexpected op %d\n", op);
572                         goto cleanup;
573                 }
574
575                 if (i % 8 == 0) {
576                         kick_kernel_read_messages();
577                         ASSERT_EQ(skel->bss->kern_mutated, expected_kern, "expected_kern");
578                         ASSERT_EQ(skel->bss->err, 0, "bpf_prog_err");
579                         drain_kernel_messages_buffer(kern_ringbuf, skel);
580                 }
581         }
582
583 cleanup:
584         ring_buffer__free(kern_ringbuf);
585         user_ring_buffer__free(user_ringbuf);
586         user_ringbuf_success__destroy(skel);
587 }
588
589 static void *kick_kernel_cb(void *arg)
590 {
591         /* Kick the kernel, causing it to drain the ring buffer and then wake
592          * up the test thread waiting on epoll.
593          */
594         syscall(__NR_prlimit64);
595
596         return NULL;
597 }
598
599 static int spawn_kick_thread_for_poll(void)
600 {
601         pthread_t thread;
602
603         return pthread_create(&thread, NULL, kick_kernel_cb, NULL);
604 }
605
606 static void test_user_ringbuf_blocking_reserve(void)
607 {
608         struct user_ringbuf_success *skel;
609         struct user_ring_buffer *ringbuf;
610         int err, num_written = 0;
611         __u64 *token;
612
613         err = load_skel_create_user_ringbuf(&skel, &ringbuf);
614         if (err)
615                 return;
616
617         ASSERT_EQ(skel->bss->read, 0, "num_samples_read_before");
618
619         while (1) {
620                 /* Write samples until the buffer is full. */
621                 token = user_ring_buffer__reserve(ringbuf, sizeof(*token));
622                 if (!token)
623                         break;
624
625                 *token = 0xdeadbeef;
626
627                 user_ring_buffer__submit(ringbuf, token);
628                 num_written++;
629         }
630
631         if (!ASSERT_GE(num_written, 0, "num_written"))
632                 goto cleanup;
633
634         /* Should not have read any samples until the kernel is kicked. */
635         ASSERT_EQ(skel->bss->read, 0, "num_pre_kick");
636
637         /* We correctly time out after 1 second, without a sample. */
638         token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 1000);
639         if (!ASSERT_EQ(token, NULL, "pre_kick_timeout_token"))
640                 goto cleanup;
641
642         err = spawn_kick_thread_for_poll();
643         if (!ASSERT_EQ(err, 0, "deferred_kick_thread\n"))
644                 goto cleanup;
645
646         /* After spawning another thread that asynchronously kicks the kernel to
647          * drain the messages, we're able to block and successfully get a
648          * sample once we receive an event notification.
649          */
650         token = user_ring_buffer__reserve_blocking(ringbuf, sizeof(*token), 10000);
651
652         if (!ASSERT_OK_PTR(token, "block_token"))
653                 goto cleanup;
654
655         ASSERT_GT(skel->bss->read, 0, "num_post_kill");
656         ASSERT_LE(skel->bss->read, num_written, "num_post_kill");
657         ASSERT_EQ(skel->bss->err, 0, "err_post_poll");
658         user_ring_buffer__discard(ringbuf, token);
659
660 cleanup:
661         user_ring_buffer__free(ringbuf);
662         user_ringbuf_success__destroy(skel);
663 }
664
665 #define SUCCESS_TEST(_func) { _func, #_func }
666
667 static struct {
668         void (*test_callback)(void);
669         const char *test_name;
670 } success_tests[] = {
671         SUCCESS_TEST(test_user_ringbuf_mappings),
672         SUCCESS_TEST(test_user_ringbuf_post_misaligned),
673         SUCCESS_TEST(test_user_ringbuf_post_producer_wrong_offset),
674         SUCCESS_TEST(test_user_ringbuf_post_larger_than_ringbuf_sz),
675         SUCCESS_TEST(test_user_ringbuf_basic),
676         SUCCESS_TEST(test_user_ringbuf_sample_full_ring_buffer),
677         SUCCESS_TEST(test_user_ringbuf_post_alignment_autoadjust),
678         SUCCESS_TEST(test_user_ringbuf_overfill),
679         SUCCESS_TEST(test_user_ringbuf_discards_properly_ignored),
680         SUCCESS_TEST(test_user_ringbuf_loop),
681         SUCCESS_TEST(test_user_ringbuf_msg_protocol),
682         SUCCESS_TEST(test_user_ringbuf_blocking_reserve),
683 };
684
685 void test_user_ringbuf(void)
686 {
687         int i;
688
689         for (i = 0; i < ARRAY_SIZE(success_tests); i++) {
690                 if (!test__start_subtest(success_tests[i].test_name))
691                         continue;
692
693                 success_tests[i].test_callback();
694         }
695
696         RUN_TESTS(user_ringbuf_fail);
697 }
This page took 0.070053 seconds and 4 git commands to generate.