1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/list.h>
3 #include <linux/list_sort.h>
4 #include <linux/string.h>
5 #include <linux/zalloc.h>
6 #include <subcmd/pager.h>
18 #include "print-events.h"
22 * core_pmus: A PMU belongs to core_pmus if it's name is "cpu" or it's sysfs
23 * directory contains "cpus" file. All PMUs belonging to core_pmus
24 * must have pmu->is_core=1. If there are more than one PMU in
25 * this list, perf interprets it as a heterogeneous platform.
26 * (FWIW, certain ARM platforms having heterogeneous cores uses
27 * homogeneous PMU, and thus they are treated as homogeneous
28 * platform by perf because core_pmus will have only one entry)
29 * other_pmus: All other PMUs which are not part of core_pmus list. It doesn't
30 * matter whether PMU is present per SMT-thread or outside of the
31 * core in the hw. For e.g., an instance of AMD ibs_fetch// and
32 * ibs_op// PMUs is present in each hw SMT thread, however they
33 * are captured under other_pmus. PMUs belonging to other_pmus
34 * must have pmu->is_core=0 but pmu->is_uncore could be 0 or 1.
36 static LIST_HEAD(core_pmus);
37 static LIST_HEAD(other_pmus);
38 static bool read_sysfs_core_pmus;
39 static bool read_sysfs_all_pmus;
41 static void pmu_read_sysfs(bool core_only);
43 size_t pmu_name_len_no_suffix(const char *str)
46 bool has_hex_digits = false;
48 orig_len = len = strlen(str);
50 /* Count trailing digits. */
51 while (len > 0 && isxdigit(str[len - 1])) {
52 if (!isdigit(str[len - 1]))
53 has_hex_digits = true;
57 if (len > 0 && len != orig_len && str[len - 1] == '_') {
59 * There is a '_{num}' suffix. For decimal suffixes any length
60 * will do, for hexadecimal ensure more than 2 hex digits so
61 * that S390's cpum_cf PMU doesn't match.
63 if (!has_hex_digits || (orig_len - len) > 2)
66 /* Use the full length. */
70 int pmu_name_cmp(const char *lhs_pmu_name, const char *rhs_pmu_name)
72 unsigned long lhs_num = 0, rhs_num = 0;
73 size_t lhs_pmu_name_len = pmu_name_len_no_suffix(lhs_pmu_name);
74 size_t rhs_pmu_name_len = pmu_name_len_no_suffix(rhs_pmu_name);
75 int ret = strncmp(lhs_pmu_name, rhs_pmu_name,
76 lhs_pmu_name_len < rhs_pmu_name_len ? lhs_pmu_name_len : rhs_pmu_name_len);
78 if (lhs_pmu_name_len != rhs_pmu_name_len || ret != 0 || lhs_pmu_name_len == 0)
81 if (lhs_pmu_name_len + 1 < strlen(lhs_pmu_name))
82 lhs_num = strtoul(&lhs_pmu_name[lhs_pmu_name_len + 1], NULL, 16);
83 if (rhs_pmu_name_len + 1 < strlen(rhs_pmu_name))
84 rhs_num = strtoul(&rhs_pmu_name[rhs_pmu_name_len + 1], NULL, 16);
86 return lhs_num < rhs_num ? -1 : (lhs_num > rhs_num ? 1 : 0);
89 void perf_pmus__destroy(void)
91 struct perf_pmu *pmu, *tmp;
93 list_for_each_entry_safe(pmu, tmp, &core_pmus, list) {
96 perf_pmu__delete(pmu);
98 list_for_each_entry_safe(pmu, tmp, &other_pmus, list) {
101 perf_pmu__delete(pmu);
103 read_sysfs_core_pmus = false;
104 read_sysfs_all_pmus = false;
107 static struct perf_pmu *pmu_find(const char *name)
109 struct perf_pmu *pmu;
111 list_for_each_entry(pmu, &core_pmus, list) {
112 if (!strcmp(pmu->name, name) ||
113 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
116 list_for_each_entry(pmu, &other_pmus, list) {
117 if (!strcmp(pmu->name, name) ||
118 (pmu->alias_name && !strcmp(pmu->alias_name, name)))
125 struct perf_pmu *perf_pmus__find(const char *name)
127 struct perf_pmu *pmu;
132 * Once PMU is loaded it stays in the list,
133 * so we keep us from multiple reading/parsing
134 * the pmu format definitions.
136 pmu = pmu_find(name);
140 if (read_sysfs_all_pmus)
143 core_pmu = is_pmu_core(name);
144 if (core_pmu && read_sysfs_core_pmus)
147 dirfd = perf_pmu__event_source_devices_fd();
148 pmu = perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
149 /*eager_load=*/false);
154 * Looking up an inidividual PMU failed. This may mean name is
155 * an alias, so read the PMUs from sysfs and try to find again.
157 pmu_read_sysfs(core_pmu);
158 pmu = pmu_find(name);
163 static struct perf_pmu *perf_pmu__find2(int dirfd, const char *name)
165 struct perf_pmu *pmu;
169 * Once PMU is loaded it stays in the list,
170 * so we keep us from multiple reading/parsing
171 * the pmu format definitions.
173 pmu = pmu_find(name);
177 if (read_sysfs_all_pmus)
180 core_pmu = is_pmu_core(name);
181 if (core_pmu && read_sysfs_core_pmus)
184 return perf_pmu__lookup(core_pmu ? &core_pmus : &other_pmus, dirfd, name,
185 /*eager_load=*/false);
188 static int pmus_cmp(void *priv __maybe_unused,
189 const struct list_head *lhs, const struct list_head *rhs)
191 struct perf_pmu *lhs_pmu = container_of(lhs, struct perf_pmu, list);
192 struct perf_pmu *rhs_pmu = container_of(rhs, struct perf_pmu, list);
194 return pmu_name_cmp(lhs_pmu->name ?: "", rhs_pmu->name ?: "");
197 /* Add all pmus in sysfs to pmu list: */
198 static void pmu_read_sysfs(bool core_only)
204 if (read_sysfs_all_pmus || (core_only && read_sysfs_core_pmus))
207 fd = perf_pmu__event_source_devices_fd();
217 while ((dent = readdir(dir))) {
218 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
220 if (core_only && !is_pmu_core(dent->d_name))
222 /* add to static LIST_HEAD(core_pmus) or LIST_HEAD(other_pmus): */
223 perf_pmu__find2(fd, dent->d_name);
227 if (list_empty(&core_pmus)) {
228 if (!perf_pmu__create_placeholder_core_pmu(&core_pmus))
229 pr_err("Failure to set up any core PMUs\n");
231 list_sort(NULL, &core_pmus, pmus_cmp);
232 list_sort(NULL, &other_pmus, pmus_cmp);
233 if (!list_empty(&core_pmus)) {
234 read_sysfs_core_pmus = true;
236 read_sysfs_all_pmus = true;
240 static struct perf_pmu *__perf_pmus__find_by_type(unsigned int type)
242 struct perf_pmu *pmu;
244 list_for_each_entry(pmu, &core_pmus, list) {
245 if (pmu->type == type)
249 list_for_each_entry(pmu, &other_pmus, list) {
250 if (pmu->type == type)
256 struct perf_pmu *perf_pmus__find_by_type(unsigned int type)
258 struct perf_pmu *pmu = __perf_pmus__find_by_type(type);
260 if (pmu || read_sysfs_all_pmus)
263 pmu_read_sysfs(/*core_only=*/false);
264 pmu = __perf_pmus__find_by_type(type);
269 * pmu iterator: If pmu is NULL, we start at the begin, otherwise return the
270 * next pmu. Returns NULL on end.
272 struct perf_pmu *perf_pmus__scan(struct perf_pmu *pmu)
274 bool use_core_pmus = !pmu || pmu->is_core;
277 pmu_read_sysfs(/*core_only=*/false);
278 pmu = list_prepare_entry(pmu, &core_pmus, list);
281 list_for_each_entry_continue(pmu, &core_pmus, list)
285 pmu = list_prepare_entry(pmu, &other_pmus, list);
287 list_for_each_entry_continue(pmu, &other_pmus, list)
292 struct perf_pmu *perf_pmus__scan_core(struct perf_pmu *pmu)
295 pmu_read_sysfs(/*core_only=*/true);
296 return list_first_entry_or_null(&core_pmus, typeof(*pmu), list);
298 list_for_each_entry_continue(pmu, &core_pmus, list)
304 static struct perf_pmu *perf_pmus__scan_skip_duplicates(struct perf_pmu *pmu)
306 bool use_core_pmus = !pmu || pmu->is_core;
307 int last_pmu_name_len = 0;
308 const char *last_pmu_name = (pmu && pmu->name) ? pmu->name : "";
311 pmu_read_sysfs(/*core_only=*/false);
312 pmu = list_prepare_entry(pmu, &core_pmus, list);
314 last_pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
317 list_for_each_entry_continue(pmu, &core_pmus, list) {
318 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
320 if (last_pmu_name_len == pmu_name_len &&
321 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
327 pmu = list_prepare_entry(pmu, &other_pmus, list);
329 list_for_each_entry_continue(pmu, &other_pmus, list) {
330 int pmu_name_len = pmu_name_len_no_suffix(pmu->name ?: "");
332 if (last_pmu_name_len == pmu_name_len &&
333 !strncmp(last_pmu_name, pmu->name ?: "", pmu_name_len))
341 const struct perf_pmu *perf_pmus__pmu_for_pmu_filter(const char *str)
343 struct perf_pmu *pmu = NULL;
345 while ((pmu = perf_pmus__scan(pmu)) != NULL) {
346 if (!strcmp(pmu->name, str))
348 /* Ignore "uncore_" prefix. */
349 if (!strncmp(pmu->name, "uncore_", 7)) {
350 if (!strcmp(pmu->name + 7, str))
353 /* Ignore "cpu_" prefix on Intel hybrid PMUs. */
354 if (!strncmp(pmu->name, "cpu_", 4)) {
355 if (!strcmp(pmu->name + 4, str))
362 /** Struct for ordering events as output in perf list. */
364 /** PMU for event. */
365 const struct perf_pmu *pmu;
368 const char *scale_unit;
370 const char *long_desc;
371 const char *encoding_desc;
373 const char *pmu_name;
377 static int cmp_sevent(const void *a, const void *b)
379 const struct sevent *as = a;
380 const struct sevent *bs = b;
381 bool a_iscpu, b_iscpu;
384 /* Put extra events last. */
385 if (!!as->desc != !!bs->desc)
386 return !!as->desc - !!bs->desc;
388 /* Order by topics. */
389 ret = strcmp(as->topic ?: "", bs->topic ?: "");
393 /* Order CPU core events to be first */
394 a_iscpu = as->pmu ? as->pmu->is_core : true;
395 b_iscpu = bs->pmu ? bs->pmu->is_core : true;
396 if (a_iscpu != b_iscpu)
397 return a_iscpu ? -1 : 1;
399 /* Order by PMU name. */
400 if (as->pmu != bs->pmu) {
401 ret = strcmp(as->pmu_name ?: "", bs->pmu_name ?: "");
406 /* Order by event name. */
407 return strcmp(as->name, bs->name);
410 static bool pmu_alias_is_duplicate(struct sevent *a, struct sevent *b)
412 /* Different names -> never duplicates */
413 if (strcmp(a->name ?: "//", b->name ?: "//"))
416 /* Don't remove duplicates for different PMUs */
417 return strcmp(a->pmu_name, b->pmu_name) == 0;
420 struct events_callback_state {
421 struct sevent *aliases;
426 static int perf_pmus__print_pmu_events__callback(void *vstate,
427 struct pmu_event_info *info)
429 struct events_callback_state *state = vstate;
432 if (state->index >= state->aliases_len) {
433 pr_err("Unexpected event %s/%s/\n", info->pmu->name, info->name);
436 s = &state->aliases[state->index];
438 #define COPY_STR(str) s->str = info->str ? strdup(info->str) : NULL
441 COPY_STR(scale_unit);
444 COPY_STR(encoding_desc);
448 s->deprecated = info->deprecated;
453 void perf_pmus__print_pmu_events(const struct print_callbacks *print_cb, void *print_state)
455 struct perf_pmu *pmu;
458 struct sevent *aliases;
459 struct events_callback_state state;
460 bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
461 struct perf_pmu *(*scan_fn)(struct perf_pmu *);
463 if (skip_duplicate_pmus)
464 scan_fn = perf_pmus__scan_skip_duplicates;
466 scan_fn = perf_pmus__scan;
470 while ((pmu = scan_fn(pmu)) != NULL)
471 len += perf_pmu__num_events(pmu);
473 aliases = zalloc(sizeof(struct sevent) * len);
475 pr_err("FATAL: not enough memory to print PMU events\n");
479 state = (struct events_callback_state) {
484 while ((pmu = scan_fn(pmu)) != NULL) {
485 perf_pmu__for_each_event(pmu, skip_duplicate_pmus, &state,
486 perf_pmus__print_pmu_events__callback);
488 qsort(aliases, len, sizeof(struct sevent), cmp_sevent);
489 for (int j = 0; j < len; j++) {
490 /* Skip duplicates */
491 if (j < len - 1 && pmu_alias_is_duplicate(&aliases[j], &aliases[j + 1]))
494 print_cb->print_event(print_state,
499 aliases[j].scale_unit,
500 aliases[j].deprecated,
503 aliases[j].long_desc,
504 aliases[j].encoding_desc);
506 zfree(&aliases[j].name);
507 zfree(&aliases[j].alias);
508 zfree(&aliases[j].scale_unit);
509 zfree(&aliases[j].desc);
510 zfree(&aliases[j].long_desc);
511 zfree(&aliases[j].encoding_desc);
512 zfree(&aliases[j].topic);
513 zfree(&aliases[j].pmu_name);
515 if (printed && pager_in_use())
521 struct build_format_string_args {
522 struct strbuf short_string;
523 struct strbuf long_string;
527 static int build_format_string(void *state, const char *name, int config,
528 const unsigned long *bits)
530 struct build_format_string_args *args = state;
531 unsigned int num_bits;
536 if (args->num_formats > 1) {
537 strbuf_addch(&args->long_string, ',');
538 if (args->num_formats < 4)
539 strbuf_addch(&args->short_string, ',');
541 num_bits = bits ? bitmap_weight(bits, PERF_PMU_FORMAT_BITS) : 0;
543 ret1 = strbuf_addf(&args->long_string, "%s", name);
544 if (args->num_formats < 4)
545 ret2 = strbuf_addf(&args->short_string, "%s", name);
546 } else if (num_bits > 8) {
547 ret1 = strbuf_addf(&args->long_string, "%s=0..0x%llx", name,
548 ULLONG_MAX >> (64 - num_bits));
549 if (args->num_formats < 4) {
550 ret2 = strbuf_addf(&args->short_string, "%s=0..0x%llx", name,
551 ULLONG_MAX >> (64 - num_bits));
554 ret1 = strbuf_addf(&args->long_string, "%s=0..%llu", name,
555 ULLONG_MAX >> (64 - num_bits));
556 if (args->num_formats < 4) {
557 ret2 = strbuf_addf(&args->short_string, "%s=0..%llu", name,
558 ULLONG_MAX >> (64 - num_bits));
561 return ret1 < 0 ? ret1 : (ret2 < 0 ? ret2 : 0);
564 void perf_pmus__print_raw_pmu_events(const struct print_callbacks *print_cb, void *print_state)
566 bool skip_duplicate_pmus = print_cb->skip_duplicate_pmus(print_state);
567 struct perf_pmu *(*scan_fn)(struct perf_pmu *);
568 struct perf_pmu *pmu = NULL;
570 if (skip_duplicate_pmus)
571 scan_fn = perf_pmus__scan_skip_duplicates;
573 scan_fn = perf_pmus__scan;
575 while ((pmu = scan_fn(pmu)) != NULL) {
576 struct build_format_string_args format_args = {
577 .short_string = STRBUF_INIT,
578 .long_string = STRBUF_INIT,
581 int len = pmu_name_len_no_suffix(pmu->name);
582 const char *desc = "(see 'man perf-list' or 'man perf-record' on how to encode it)";
587 strbuf_addf(&format_args.short_string, "%.*s/", len, pmu->name);
588 strbuf_addf(&format_args.long_string, "%.*s/", len, pmu->name);
589 perf_pmu__for_each_format(pmu, &format_args, build_format_string);
591 if (format_args.num_formats > 3)
592 strbuf_addf(&format_args.short_string, ",.../modifier");
594 strbuf_addf(&format_args.short_string, "/modifier");
596 strbuf_addf(&format_args.long_string, "/modifier");
597 print_cb->print_event(print_state,
600 format_args.short_string.buf,
601 /*event_alias=*/NULL,
603 /*deprecated=*/false,
604 "Raw event descriptor",
607 format_args.long_string.buf);
609 strbuf_release(&format_args.short_string);
610 strbuf_release(&format_args.long_string);
614 bool perf_pmus__have_event(const char *pname, const char *name)
616 struct perf_pmu *pmu = perf_pmus__find(pname);
618 return pmu && perf_pmu__have_event(pmu, name);
621 int perf_pmus__num_core_pmus(void)
626 struct perf_pmu *pmu = NULL;
628 while ((pmu = perf_pmus__scan_core(pmu)) != NULL)
634 static bool __perf_pmus__supports_extended_type(void)
636 struct perf_pmu *pmu = NULL;
638 if (perf_pmus__num_core_pmus() <= 1)
641 while ((pmu = perf_pmus__scan_core(pmu)) != NULL) {
642 if (!is_event_supported(PERF_TYPE_HARDWARE, PERF_COUNT_HW_CPU_CYCLES | ((__u64)pmu->type << PERF_PMU_TYPE_SHIFT)))
649 static bool perf_pmus__do_support_extended_type;
651 static void perf_pmus__init_supports_extended_type(void)
653 perf_pmus__do_support_extended_type = __perf_pmus__supports_extended_type();
656 bool perf_pmus__supports_extended_type(void)
658 static pthread_once_t extended_type_once = PTHREAD_ONCE_INIT;
660 pthread_once(&extended_type_once, perf_pmus__init_supports_extended_type);
662 return perf_pmus__do_support_extended_type;
665 char *perf_pmus__default_pmu_name(void)
672 if (!list_empty(&core_pmus))
673 return strdup(list_first_entry(&core_pmus, struct perf_pmu, list)->name);
675 fd = perf_pmu__event_source_devices_fd();
677 return strdup("cpu");
682 return strdup("cpu");
685 while ((dent = readdir(dir))) {
686 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, ".."))
688 if (is_pmu_core(dent->d_name)) {
689 result = strdup(dent->d_name);
695 return result ?: strdup("cpu");
698 struct perf_pmu *evsel__find_pmu(const struct evsel *evsel)
700 struct perf_pmu *pmu = evsel->pmu;
703 pmu = perf_pmus__find_by_type(evsel->core.attr.type);
704 ((struct evsel *)evsel)->pmu = pmu;
709 struct perf_pmu *perf_pmus__find_core_pmu(void)
711 return perf_pmus__scan_core(NULL);
714 struct perf_pmu *perf_pmus__add_test_pmu(int test_sysfs_dirfd, const char *name)
717 * Some PMU functions read from the sysfs mount point, so care is
718 * needed, hence passing the eager_load flag to load things like the
721 return perf_pmu__lookup(&other_pmus, test_sysfs_dirfd, name, /*eager_load=*/true);