]> Git Repo - J-linux.git/blob - tools/testing/selftests/bpf/prog_tests/perf_event_stackmap.c
Merge tag 'vfs-6.13-rc7.fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/vfs/vfs
[J-linux.git] / tools / testing / selftests / bpf / prog_tests / perf_event_stackmap.c
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2020 Facebook
3 #define _GNU_SOURCE
4 #include <pthread.h>
5 #include <sched.h>
6 #include <test_progs.h>
7 #include "perf_event_stackmap.skel.h"
8
9 #ifndef noinline
10 #define noinline __attribute__((noinline))
11 #endif
12
13 noinline int func_1(void)
14 {
15         static int val = 1;
16
17         val += 1;
18
19         usleep(100);
20         return val;
21 }
22
23 noinline int func_2(void)
24 {
25         return func_1();
26 }
27
28 noinline int func_3(void)
29 {
30         return func_2();
31 }
32
33 noinline int func_4(void)
34 {
35         return func_3();
36 }
37
38 noinline int func_5(void)
39 {
40         return func_4();
41 }
42
43 noinline int func_6(void)
44 {
45         int i, val = 1;
46
47         for (i = 0; i < 100; i++)
48                 val += func_5();
49
50         return val;
51 }
52
53 void test_perf_event_stackmap(void)
54 {
55         struct perf_event_attr attr = {
56                 /* .type = PERF_TYPE_SOFTWARE, */
57                 .type = PERF_TYPE_HARDWARE,
58                 .config = PERF_COUNT_HW_CPU_CYCLES,
59                 .precise_ip = 2,
60                 .sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_BRANCH_STACK |
61                         PERF_SAMPLE_CALLCHAIN,
62                 .branch_sample_type = PERF_SAMPLE_BRANCH_USER |
63                         PERF_SAMPLE_BRANCH_NO_FLAGS |
64                         PERF_SAMPLE_BRANCH_NO_CYCLES |
65                         PERF_SAMPLE_BRANCH_CALL_STACK,
66                 .freq = 1,
67                 .sample_freq = read_perf_max_sample_freq(),
68                 .size = sizeof(struct perf_event_attr),
69         };
70         struct perf_event_stackmap *skel;
71         __u32 duration = 0;
72         cpu_set_t cpu_set;
73         int pmu_fd, err;
74
75         skel = perf_event_stackmap__open();
76
77         if (CHECK(!skel, "skel_open", "skeleton open failed\n"))
78                 return;
79
80         err = perf_event_stackmap__load(skel);
81         if (CHECK(err, "skel_load", "skeleton load failed: %d\n", err))
82                 goto cleanup;
83
84         CPU_ZERO(&cpu_set);
85         CPU_SET(0, &cpu_set);
86         err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set), &cpu_set);
87         if (CHECK(err, "set_affinity", "err %d, errno %d\n", err, errno))
88                 goto cleanup;
89
90         pmu_fd = syscall(__NR_perf_event_open, &attr, -1 /* pid */,
91                          0 /* cpu 0 */, -1 /* group id */,
92                          0 /* flags */);
93         if (pmu_fd < 0) {
94                 printf("%s:SKIP:cpu doesn't support the event\n", __func__);
95                 test__skip();
96                 goto cleanup;
97         }
98
99         skel->links.oncpu = bpf_program__attach_perf_event(skel->progs.oncpu,
100                                                            pmu_fd);
101         if (!ASSERT_OK_PTR(skel->links.oncpu, "attach_perf_event")) {
102                 close(pmu_fd);
103                 goto cleanup;
104         }
105
106         /* create kernel and user stack traces for testing */
107         func_6();
108
109         CHECK(skel->data->stackid_kernel != 2, "get_stackid_kernel", "failed\n");
110         CHECK(skel->data->stackid_user != 2, "get_stackid_user", "failed\n");
111         CHECK(skel->data->stack_kernel != 2, "get_stack_kernel", "failed\n");
112         CHECK(skel->data->stack_user != 2, "get_stack_user", "failed\n");
113
114 cleanup:
115         perf_event_stackmap__destroy(skel);
116 }
This page took 0.032017 seconds and 4 git commands to generate.