1 // SPDX-License-Identifier: GPL-2.0
13 #include <linux/time64.h>
15 #include "util/debug.h"
16 #include "util/kwork.h"
20 #include "util/bpf_skel/kwork_trace.skel.h"
23 * This should be in sync with "util/kwork_trace.bpf.c"
25 #define MAX_KWORKNAME 128
41 struct kwork_class_bpf {
42 struct kwork_class *class;
44 void (*load_prepare)(struct perf_kwork *kwork);
45 int (*get_work_name)(struct work_key *key, char **ret_name);
48 static struct kwork_trace_bpf *skel;
50 static struct timespec ts_start;
51 static struct timespec ts_end;
53 void perf_kwork__trace_start(void)
55 clock_gettime(CLOCK_MONOTONIC, &ts_start);
56 skel->bss->enabled = 1;
59 void perf_kwork__trace_finish(void)
61 clock_gettime(CLOCK_MONOTONIC, &ts_end);
62 skel->bss->enabled = 0;
65 static int get_work_name_from_map(struct work_key *key, char **ret_name)
67 char name[MAX_KWORKNAME] = { 0 };
68 int fd = bpf_map__fd(skel->maps.perf_kwork_names);
73 pr_debug("Invalid names map fd\n");
77 if ((bpf_map_lookup_elem(fd, key, name) == 0) && (strlen(name) != 0)) {
78 *ret_name = strdup(name);
79 if (*ret_name == NULL) {
80 pr_err("Failed to copy work name\n");
88 static void irq_load_prepare(struct perf_kwork *kwork)
90 if (kwork->report == KWORK_REPORT_RUNTIME) {
91 bpf_program__set_autoload(skel->progs.report_irq_handler_entry, true);
92 bpf_program__set_autoload(skel->progs.report_irq_handler_exit, true);
96 static struct kwork_class_bpf kwork_irq_bpf = {
97 .load_prepare = irq_load_prepare,
98 .get_work_name = get_work_name_from_map,
101 static void softirq_load_prepare(struct perf_kwork *kwork)
103 if (kwork->report == KWORK_REPORT_RUNTIME) {
104 bpf_program__set_autoload(skel->progs.report_softirq_entry, true);
105 bpf_program__set_autoload(skel->progs.report_softirq_exit, true);
106 } else if (kwork->report == KWORK_REPORT_LATENCY) {
107 bpf_program__set_autoload(skel->progs.latency_softirq_raise, true);
108 bpf_program__set_autoload(skel->progs.latency_softirq_entry, true);
112 static struct kwork_class_bpf kwork_softirq_bpf = {
113 .load_prepare = softirq_load_prepare,
114 .get_work_name = get_work_name_from_map,
117 static void workqueue_load_prepare(struct perf_kwork *kwork)
119 if (kwork->report == KWORK_REPORT_RUNTIME) {
120 bpf_program__set_autoload(skel->progs.report_workqueue_execute_start, true);
121 bpf_program__set_autoload(skel->progs.report_workqueue_execute_end, true);
122 } else if (kwork->report == KWORK_REPORT_LATENCY) {
123 bpf_program__set_autoload(skel->progs.latency_workqueue_activate_work, true);
124 bpf_program__set_autoload(skel->progs.latency_workqueue_execute_start, true);
128 static struct kwork_class_bpf kwork_workqueue_bpf = {
129 .load_prepare = workqueue_load_prepare,
130 .get_work_name = get_work_name_from_map,
133 static struct kwork_class_bpf *
134 kwork_class_bpf_supported_list[KWORK_CLASS_MAX] = {
135 [KWORK_CLASS_IRQ] = &kwork_irq_bpf,
136 [KWORK_CLASS_SOFTIRQ] = &kwork_softirq_bpf,
137 [KWORK_CLASS_WORKQUEUE] = &kwork_workqueue_bpf,
140 static bool valid_kwork_class_type(enum kwork_class_type type)
142 return type >= 0 && type < KWORK_CLASS_MAX ? true : false;
145 static int setup_filters(struct perf_kwork *kwork)
148 int i, nr_cpus, key, fd;
149 struct perf_cpu_map *map;
151 if (kwork->cpu_list != NULL) {
152 fd = bpf_map__fd(skel->maps.perf_kwork_cpu_filter);
154 pr_debug("Invalid cpu filter fd\n");
158 map = perf_cpu_map__new(kwork->cpu_list);
160 pr_debug("Invalid cpu_list\n");
164 nr_cpus = libbpf_num_possible_cpus();
165 for (i = 0; i < perf_cpu_map__nr(map); i++) {
166 struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
168 if (cpu.cpu >= nr_cpus) {
169 perf_cpu_map__put(map);
170 pr_err("Requested cpu %d too large\n", cpu.cpu);
173 bpf_map_update_elem(fd, &cpu.cpu, &val, BPF_ANY);
175 perf_cpu_map__put(map);
177 skel->bss->has_cpu_filter = 1;
180 if (kwork->profile_name != NULL) {
181 if (strlen(kwork->profile_name) >= MAX_KWORKNAME) {
182 pr_err("Requested name filter %s too large, limit to %d\n",
183 kwork->profile_name, MAX_KWORKNAME - 1);
187 fd = bpf_map__fd(skel->maps.perf_kwork_name_filter);
189 pr_debug("Invalid name filter fd\n");
194 bpf_map_update_elem(fd, &key, kwork->profile_name, BPF_ANY);
196 skel->bss->has_name_filter = 1;
202 int perf_kwork__trace_prepare_bpf(struct perf_kwork *kwork)
204 struct bpf_program *prog;
205 struct kwork_class *class;
206 struct kwork_class_bpf *class_bpf;
207 enum kwork_class_type type;
209 skel = kwork_trace_bpf__open();
211 pr_debug("Failed to open kwork trace skeleton\n");
216 * set all progs to non-autoload,
217 * then set corresponding progs according to config
219 bpf_object__for_each_program(prog, skel->obj)
220 bpf_program__set_autoload(prog, false);
222 list_for_each_entry(class, &kwork->class_list, list) {
224 if (!valid_kwork_class_type(type) ||
225 (kwork_class_bpf_supported_list[type] == NULL)) {
226 pr_err("Unsupported bpf trace class %s\n", class->name);
230 class_bpf = kwork_class_bpf_supported_list[type];
231 class_bpf->class = class;
233 if (class_bpf->load_prepare != NULL)
234 class_bpf->load_prepare(kwork);
237 if (kwork_trace_bpf__load(skel)) {
238 pr_debug("Failed to load kwork trace skeleton\n");
242 if (setup_filters(kwork))
245 if (kwork_trace_bpf__attach(skel)) {
246 pr_debug("Failed to attach kwork trace skeleton\n");
253 kwork_trace_bpf__destroy(skel);
257 static int add_work(struct perf_kwork *kwork,
258 struct work_key *key,
259 struct report_data *data)
261 struct kwork_work *work;
262 struct kwork_class_bpf *bpf_trace;
263 struct kwork_work tmp = {
268 enum kwork_class_type type = key->type;
270 if (!valid_kwork_class_type(type)) {
271 pr_debug("Invalid class type %d to add work\n", type);
275 bpf_trace = kwork_class_bpf_supported_list[type];
276 tmp.class = bpf_trace->class;
278 if ((bpf_trace->get_work_name != NULL) &&
279 (bpf_trace->get_work_name(key, &tmp.name)))
282 work = perf_kwork_add_work(kwork, tmp.class, &tmp);
286 if (kwork->report == KWORK_REPORT_RUNTIME) {
287 work->nr_atoms = data->nr;
288 work->total_runtime = data->total_time;
289 work->max_runtime = data->max_time;
290 work->max_runtime_start = data->max_time_start;
291 work->max_runtime_end = data->max_time_end;
292 } else if (kwork->report == KWORK_REPORT_LATENCY) {
293 work->nr_atoms = data->nr;
294 work->total_latency = data->total_time;
295 work->max_latency = data->max_time;
296 work->max_latency_start = data->max_time_start;
297 work->max_latency_end = data->max_time_end;
299 pr_debug("Invalid bpf report type %d\n", kwork->report);
303 kwork->timestart = (u64)ts_start.tv_sec * NSEC_PER_SEC + ts_start.tv_nsec;
304 kwork->timeend = (u64)ts_end.tv_sec * NSEC_PER_SEC + ts_end.tv_nsec;
309 int perf_kwork__report_read_bpf(struct perf_kwork *kwork)
311 struct report_data data;
312 struct work_key key = {
317 struct work_key prev = {
322 int fd = bpf_map__fd(skel->maps.perf_kwork_report);
325 pr_debug("Invalid report fd\n");
329 while (!bpf_map_get_next_key(fd, &prev, &key)) {
330 if ((bpf_map_lookup_elem(fd, &key, &data)) != 0) {
331 pr_debug("Failed to lookup report elem\n");
335 if ((data.nr != 0) && (add_work(kwork, &key, &data) != 0))
343 void perf_kwork__report_cleanup_bpf(void)
345 kwork_trace_bpf__destroy(skel);