8 #include "util/ftrace.h"
9 #include "util/cpumap.h"
10 #include "util/thread_map.h"
11 #include "util/debug.h"
12 #include "util/evlist.h"
13 #include "util/bpf_counter.h"
14 #include "util/stat.h"
16 #include "util/bpf_skel/func_latency.skel.h"
18 static struct func_latency_bpf *skel;
20 int perf_ftrace__latency_prepare_bpf(struct perf_ftrace *ftrace)
23 int i, ncpus = 1, ntasks = 1;
24 struct filter_entry *func;
26 if (!list_is_singular(&ftrace->filters)) {
27 pr_err("ERROR: %s target function(s).\n",
28 list_empty(&ftrace->filters) ? "No" : "Too many");
32 func = list_first_entry(&ftrace->filters, struct filter_entry, list);
34 skel = func_latency_bpf__open();
36 pr_err("Failed to open func latency skeleton\n");
40 skel->rodata->bucket_range = ftrace->bucket_range;
41 skel->rodata->min_latency = ftrace->min_latency;
43 /* don't need to set cpu filter for system-wide mode */
44 if (ftrace->target.cpu_list) {
45 ncpus = perf_cpu_map__nr(ftrace->evlist->core.user_requested_cpus);
46 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
47 skel->rodata->has_cpu = 1;
50 if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
51 ntasks = perf_thread_map__nr(ftrace->evlist->core.threads);
52 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
53 skel->rodata->has_task = 1;
56 skel->rodata->use_nsec = ftrace->use_nsec;
60 err = func_latency_bpf__load(skel);
62 pr_err("Failed to load func latency skeleton\n");
66 if (ftrace->target.cpu_list) {
70 fd = bpf_map__fd(skel->maps.cpu_filter);
72 for (i = 0; i < ncpus; i++) {
73 cpu = perf_cpu_map__cpu(ftrace->evlist->core.user_requested_cpus, i).cpu;
74 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
78 if (target__has_task(&ftrace->target) || target__none(&ftrace->target)) {
82 fd = bpf_map__fd(skel->maps.task_filter);
84 for (i = 0; i < ntasks; i++) {
85 pid = perf_thread_map__pid(ftrace->evlist->core.threads, i);
86 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
90 skel->bss->min = INT64_MAX;
92 skel->links.func_begin = bpf_program__attach_kprobe(skel->progs.func_begin,
94 if (IS_ERR(skel->links.func_begin)) {
95 pr_err("Failed to attach fentry program\n");
96 err = PTR_ERR(skel->links.func_begin);
100 skel->links.func_end = bpf_program__attach_kprobe(skel->progs.func_end,
102 if (IS_ERR(skel->links.func_end)) {
103 pr_err("Failed to attach fexit program\n");
104 err = PTR_ERR(skel->links.func_end);
108 /* XXX: we don't actually use this fd - just for poll() */
109 return open("/dev/null", O_RDONLY);
115 int perf_ftrace__latency_start_bpf(struct perf_ftrace *ftrace __maybe_unused)
117 skel->bss->enabled = 1;
121 int perf_ftrace__latency_stop_bpf(struct perf_ftrace *ftrace __maybe_unused)
123 skel->bss->enabled = 0;
127 int perf_ftrace__latency_read_bpf(struct perf_ftrace *ftrace __maybe_unused,
128 int buckets[], struct stats *stats)
133 int ncpus = cpu__max_cpu().cpu;
135 fd = bpf_map__fd(skel->maps.latency);
137 hist = calloc(ncpus, sizeof(*hist));
141 for (idx = 0; idx < NUM_BUCKET; idx++) {
142 err = bpf_map_lookup_elem(fd, &idx, hist);
148 for (i = 0; i < ncpus; i++)
149 buckets[idx] += hist[i];
152 if (skel->bss->count) {
153 stats->mean = skel->bss->total / skel->bss->count;
154 stats->n = skel->bss->count;
155 stats->max = skel->bss->max;
156 stats->min = skel->bss->min;
163 int perf_ftrace__latency_cleanup_bpf(struct perf_ftrace *ftrace __maybe_unused)
165 func_latency_bpf__destroy(skel);