]> Git Repo - J-linux.git/blob - tools/testing/selftests/bpf/prog_tests/perf_buffer.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / tools / testing / selftests / bpf / prog_tests / perf_buffer.c
1 // SPDX-License-Identifier: GPL-2.0
2 #define _GNU_SOURCE
3 #include <pthread.h>
4 #include <sched.h>
5 #include <sys/socket.h>
6 #include <test_progs.h>
7 #include "test_perf_buffer.skel.h"
8 #include "bpf/libbpf_internal.h"
9
10 static int duration;
11
12 /* AddressSanitizer sometimes crashes due to data dereference below, due to
13  * this being mmap()'ed memory. Disable instrumentation with
14  * no_sanitize_address attribute
15  */
16 __attribute__((no_sanitize_address))
17 static void on_sample(void *ctx, int cpu, void *data, __u32 size)
18 {
19         int cpu_data = *(int *)data, duration = 0;
20         cpu_set_t *cpu_seen = ctx;
21
22         if (cpu_data != cpu)
23                 CHECK(cpu_data != cpu, "check_cpu_data",
24                       "cpu_data %d != cpu %d\n", cpu_data, cpu);
25
26         CPU_SET(cpu, cpu_seen);
27 }
28
29 int trigger_on_cpu(int cpu)
30 {
31         cpu_set_t cpu_set;
32         int err;
33
34         CPU_ZERO(&cpu_set);
35         CPU_SET(cpu, &cpu_set);
36
37         err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
38         if (err && CHECK(err, "set_affinity", "cpu #%d, err %d\n", cpu, err))
39                 return err;
40
41         usleep(1);
42
43         return 0;
44 }
45
46 void serial_test_perf_buffer(void)
47 {
48         int err, on_len, nr_on_cpus = 0, nr_cpus, i, j;
49         int zero = 0, my_pid = getpid();
50         struct test_perf_buffer *skel;
51         cpu_set_t cpu_seen;
52         struct perf_buffer *pb;
53         int last_fd = -1, fd;
54         bool *online;
55
56         nr_cpus = libbpf_num_possible_cpus();
57         if (CHECK(nr_cpus < 0, "nr_cpus", "err %d\n", nr_cpus))
58                 return;
59
60         err = parse_cpu_mask_file("/sys/devices/system/cpu/online",
61                                   &online, &on_len);
62         if (CHECK(err, "nr_on_cpus", "err %d\n", err))
63                 return;
64
65         for (i = 0; i < on_len; i++)
66                 if (online[i])
67                         nr_on_cpus++;
68
69         /* load program */
70         skel = test_perf_buffer__open_and_load();
71         if (CHECK(!skel, "skel_load", "skeleton open/load failed\n"))
72                 goto out_close;
73
74         err = bpf_map_update_elem(bpf_map__fd(skel->maps.my_pid_map), &zero, &my_pid, 0);
75         if (!ASSERT_OK(err, "my_pid_update"))
76                 goto out_close;
77
78         /* attach probe */
79         err = test_perf_buffer__attach(skel);
80         if (CHECK(err, "attach_kprobe", "err %d\n", err))
81                 goto out_close;
82
83         /* set up perf buffer */
84         pb = perf_buffer__new(bpf_map__fd(skel->maps.perf_buf_map), 1,
85                               on_sample, NULL, &cpu_seen, NULL);
86         if (!ASSERT_OK_PTR(pb, "perf_buf__new"))
87                 goto out_close;
88
89         CHECK(perf_buffer__epoll_fd(pb) < 0, "epoll_fd",
90               "bad fd: %d\n", perf_buffer__epoll_fd(pb));
91
92         /* trigger kprobe on every CPU */
93         CPU_ZERO(&cpu_seen);
94         for (i = 0; i < nr_cpus; i++) {
95                 if (i >= on_len || !online[i]) {
96                         printf("skipping offline CPU #%d\n", i);
97                         continue;
98                 }
99
100                 if (trigger_on_cpu(i))
101                         goto out_close;
102         }
103
104         /* read perf buffer */
105         err = perf_buffer__poll(pb, 100);
106         if (CHECK(err < 0, "perf_buffer__poll", "err %d\n", err))
107                 goto out_free_pb;
108
109         if (CHECK(CPU_COUNT(&cpu_seen) != nr_on_cpus, "seen_cpu_cnt",
110                   "expect %d, seen %d\n", nr_on_cpus, CPU_COUNT(&cpu_seen)))
111                 goto out_free_pb;
112
113         if (CHECK(perf_buffer__buffer_cnt(pb) != nr_on_cpus, "buf_cnt",
114                   "got %zu, expected %d\n", perf_buffer__buffer_cnt(pb), nr_on_cpus))
115                 goto out_close;
116
117         for (i = 0, j = 0; i < nr_cpus; i++) {
118                 if (i >= on_len || !online[i])
119                         continue;
120
121                 fd = perf_buffer__buffer_fd(pb, j);
122                 CHECK(fd < 0 || last_fd == fd, "fd_check", "last fd %d == fd %d\n", last_fd, fd);
123                 last_fd = fd;
124
125                 err = perf_buffer__consume_buffer(pb, j);
126                 if (CHECK(err, "drain_buf", "cpu %d, err %d\n", i, err))
127                         goto out_close;
128
129                 CPU_CLR(i, &cpu_seen);
130                 if (trigger_on_cpu(i))
131                         goto out_close;
132
133                 err = perf_buffer__consume_buffer(pb, j);
134                 if (CHECK(err, "consume_buf", "cpu %d, err %d\n", j, err))
135                         goto out_close;
136
137                 if (CHECK(!CPU_ISSET(i, &cpu_seen), "cpu_seen", "cpu %d not seen\n", i))
138                         goto out_close;
139                 j++;
140         }
141
142 out_free_pb:
143         perf_buffer__free(pb);
144 out_close:
145         test_perf_buffer__destroy(skel);
146         free(online);
147 }
This page took 0.03471 seconds and 4 git commands to generate.