1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/mman.h>
7 #include <linux/time64.h>
12 #include "cacheline.h"
17 #include "map_symbol.h"
25 #include "mem-events.h"
28 #include "time-utils.h"
31 #include "trace-event.h"
32 #include <linux/kernel.h>
33 #include <linux/string.h>
35 #ifdef HAVE_LIBTRACEEVENT
36 #include <traceevent/event-parse.h>
40 const char default_parent_pattern[] = "^sys_|^do_page_fault";
41 const char *parent_pattern = default_parent_pattern;
42 const char *default_sort_order = "comm,dso,symbol";
43 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
44 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
45 const char default_top_sort_order[] = "dso,symbol";
46 const char default_diff_sort_order[] = "dso,symbol";
47 const char default_tracepoint_sort_order[] = "trace";
48 const char *sort_order;
49 const char *field_order;
50 regex_t ignore_callees_regex;
51 int have_ignore_callees = 0;
52 enum sort_mode sort__mode = SORT_MODE__NORMAL;
53 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
54 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
57 * Some architectures have Adjacent Cacheline Prefetch feature, which
58 * behaves like the cacheline size is doubled. Enable this flag to
59 * check things in double cacheline granularity.
64 * Replaces all occurrences of a char used with the:
66 * -t, --field-separator
68 * option, that uses a special separator character and don't pad with spaces,
69 * replacing all occurrences of this separator in symbol names (and other
70 * output) with a '.' character, that thus it's the only non valid separator.
72 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
78 n = vsnprintf(bf, size, fmt, ap);
79 if (symbol_conf.field_sep && n > 0) {
83 sep = strchr(sep, *symbol_conf.field_sep);
96 static int64_t cmp_null(const void *l, const void *r)
109 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
111 return right->thread->tid - left->thread->tid;
114 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
115 size_t size, unsigned int width)
117 const char *comm = thread__comm_str(he->thread);
119 width = max(7U, width) - 8;
120 return repsep_snprintf(bf, size, "%7d:%-*.*s", he->thread->tid,
121 width, width, comm ?: "");
124 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
126 const struct thread *th = arg;
128 if (type != HIST_FILTER__THREAD)
131 return th && he->thread != th;
134 struct sort_entry sort_thread = {
135 .se_header = " Pid:Command",
136 .se_cmp = sort__thread_cmp,
137 .se_snprintf = hist_entry__thread_snprintf,
138 .se_filter = hist_entry__thread_filter,
139 .se_width_idx = HISTC_THREAD,
145 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
147 if (left->simd_flags.arch != right->simd_flags.arch)
148 return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
150 return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
153 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
155 u64 arch = simd_flags->arch;
157 if (arch & SIMD_OP_FLAGS_ARCH_SVE)
163 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
164 size_t size, unsigned int width __maybe_unused)
168 if (!he->simd_flags.arch)
169 return repsep_snprintf(bf, size, "");
171 name = hist_entry__get_simd_name(&he->simd_flags);
173 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
174 return repsep_snprintf(bf, size, "[e] %s", name);
175 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
176 return repsep_snprintf(bf, size, "[p] %s", name);
178 return repsep_snprintf(bf, size, "[.] %s", name);
181 struct sort_entry sort_simd = {
182 .se_header = "Simd ",
183 .se_cmp = sort__simd_cmp,
184 .se_snprintf = hist_entry__simd_snprintf,
185 .se_width_idx = HISTC_SIMD,
191 * We can't use pointer comparison in functions below,
192 * because it gives different results based on pointer
193 * values, which could break some sorting assumptions.
196 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
198 return strcmp(comm__str(right->comm), comm__str(left->comm));
202 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
204 return strcmp(comm__str(right->comm), comm__str(left->comm));
208 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
210 return strcmp(comm__str(right->comm), comm__str(left->comm));
213 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
214 size_t size, unsigned int width)
216 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
219 struct sort_entry sort_comm = {
220 .se_header = "Command",
221 .se_cmp = sort__comm_cmp,
222 .se_collapse = sort__comm_collapse,
223 .se_sort = sort__comm_sort,
224 .se_snprintf = hist_entry__comm_snprintf,
225 .se_filter = hist_entry__thread_filter,
226 .se_width_idx = HISTC_COMM,
231 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
233 struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
234 struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
235 const char *dso_name_l, *dso_name_r;
237 if (!dso_l || !dso_r)
238 return cmp_null(dso_r, dso_l);
241 dso_name_l = dso_l->long_name;
242 dso_name_r = dso_r->long_name;
244 dso_name_l = dso_l->short_name;
245 dso_name_r = dso_r->short_name;
248 return strcmp(dso_name_l, dso_name_r);
252 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
254 return _sort__dso_cmp(right->ms.map, left->ms.map);
257 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
258 size_t size, unsigned int width)
260 const struct dso *dso = map ? map__dso(map) : NULL;
261 const char *dso_name = "[unknown]";
264 dso_name = verbose > 0 ? dso->long_name : dso->short_name;
266 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
269 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
270 size_t size, unsigned int width)
272 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
275 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
277 const struct dso *dso = arg;
279 if (type != HIST_FILTER__DSO)
282 return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
285 struct sort_entry sort_dso = {
286 .se_header = "Shared Object",
287 .se_cmp = sort__dso_cmp,
288 .se_snprintf = hist_entry__dso_snprintf,
289 .se_filter = hist_entry__dso_filter,
290 .se_width_idx = HISTC_DSO,
295 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
297 return (int64_t)(right_ip - left_ip);
300 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
302 if (!sym_l || !sym_r)
303 return cmp_null(sym_l, sym_r);
308 if (sym_l->inlined || sym_r->inlined) {
309 int ret = strcmp(sym_l->name, sym_r->name);
313 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
317 if (sym_l->start != sym_r->start)
318 return (int64_t)(sym_r->start - sym_l->start);
320 return (int64_t)(sym_r->end - sym_l->end);
324 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
328 if (!left->ms.sym && !right->ms.sym)
329 return _sort__addr_cmp(left->ip, right->ip);
332 * comparing symbol address alone is not enough since it's a
333 * relative address within a dso.
335 if (!hists__has(left->hists, dso) || hists__has(right->hists, dso)) {
336 ret = sort__dso_cmp(left, right);
341 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
345 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
347 if (!left->ms.sym || !right->ms.sym)
348 return cmp_null(left->ms.sym, right->ms.sym);
350 return strcmp(right->ms.sym->name, left->ms.sym->name);
353 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
354 u64 ip, char level, char *bf, size_t size,
357 struct symbol *sym = ms->sym;
358 struct map *map = ms->map;
362 struct dso *dso = map ? map__dso(map) : NULL;
363 char o = dso ? dso__symtab_origin(dso) : '!';
366 if (dso && dso->kernel && dso->adjust_symbols)
367 rip = map__unmap_ip(map, ip);
369 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
370 BITS_PER_LONG / 4 + 2, rip, o);
373 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
375 if (sym->type == STT_OBJECT) {
376 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
377 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
378 ip - map__unmap_ip(map, sym->start));
380 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
384 ret += repsep_snprintf(bf + ret, size - ret,
388 size_t len = BITS_PER_LONG / 4;
389 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
396 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
398 return _hist_entry__sym_snprintf(&he->ms, he->ip,
399 he->level, bf, size, width);
402 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
404 const char *sym = arg;
406 if (type != HIST_FILTER__SYMBOL)
409 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
412 struct sort_entry sort_sym = {
413 .se_header = "Symbol",
414 .se_cmp = sort__sym_cmp,
415 .se_sort = sort__sym_sort,
416 .se_snprintf = hist_entry__sym_snprintf,
417 .se_filter = hist_entry__sym_filter,
418 .se_width_idx = HISTC_SYMBOL,
423 char *hist_entry__srcline(struct hist_entry *he)
425 return map__srcline(he->ms.map, he->ip, he->ms.sym);
429 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
433 ret = _sort__addr_cmp(left->ip, right->ip);
437 return sort__dso_cmp(left, right);
441 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
444 left->srcline = hist_entry__srcline(left);
446 right->srcline = hist_entry__srcline(right);
448 return strcmp(right->srcline, left->srcline);
452 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
454 return sort__srcline_collapse(left, right);
458 sort__srcline_init(struct hist_entry *he)
461 he->srcline = hist_entry__srcline(he);
464 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
465 size_t size, unsigned int width)
467 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
470 struct sort_entry sort_srcline = {
471 .se_header = "Source:Line",
472 .se_cmp = sort__srcline_cmp,
473 .se_collapse = sort__srcline_collapse,
474 .se_sort = sort__srcline_sort,
475 .se_init = sort__srcline_init,
476 .se_snprintf = hist_entry__srcline_snprintf,
477 .se_width_idx = HISTC_SRCLINE,
480 /* --sort srcline_from */
482 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
484 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
488 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
490 return left->branch_info->from.addr - right->branch_info->from.addr;
494 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
496 if (!left->branch_info->srcline_from)
497 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
499 if (!right->branch_info->srcline_from)
500 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
502 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
506 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
508 return sort__srcline_from_collapse(left, right);
511 static void sort__srcline_from_init(struct hist_entry *he)
513 if (!he->branch_info->srcline_from)
514 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
517 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
518 size_t size, unsigned int width)
520 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
523 struct sort_entry sort_srcline_from = {
524 .se_header = "From Source:Line",
525 .se_cmp = sort__srcline_from_cmp,
526 .se_collapse = sort__srcline_from_collapse,
527 .se_sort = sort__srcline_from_sort,
528 .se_init = sort__srcline_from_init,
529 .se_snprintf = hist_entry__srcline_from_snprintf,
530 .se_width_idx = HISTC_SRCLINE_FROM,
533 /* --sort srcline_to */
536 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
538 return left->branch_info->to.addr - right->branch_info->to.addr;
542 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
544 if (!left->branch_info->srcline_to)
545 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
547 if (!right->branch_info->srcline_to)
548 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
550 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
554 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
556 return sort__srcline_to_collapse(left, right);
559 static void sort__srcline_to_init(struct hist_entry *he)
561 if (!he->branch_info->srcline_to)
562 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
565 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
566 size_t size, unsigned int width)
568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
571 struct sort_entry sort_srcline_to = {
572 .se_header = "To Source:Line",
573 .se_cmp = sort__srcline_to_cmp,
574 .se_collapse = sort__srcline_to_collapse,
575 .se_sort = sort__srcline_to_sort,
576 .se_init = sort__srcline_to_init,
577 .se_snprintf = hist_entry__srcline_to_snprintf,
578 .se_width_idx = HISTC_SRCLINE_TO,
581 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
582 size_t size, unsigned int width)
585 struct symbol *sym = he->ms.sym;
586 struct annotation *notes;
587 double ipc = 0.0, coverage = 0.0;
591 return repsep_snprintf(bf, size, "%-*s", width, "-");
593 notes = symbol__annotation(sym);
595 if (notes->hit_cycles)
596 ipc = notes->hit_insn / ((double)notes->hit_cycles);
598 if (notes->total_insn) {
599 coverage = notes->cover_insn * 100.0 /
600 ((double)notes->total_insn);
603 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
604 return repsep_snprintf(bf, size, "%-*s", width, tmp);
607 struct sort_entry sort_sym_ipc = {
608 .se_header = "IPC [IPC Coverage]",
609 .se_cmp = sort__sym_cmp,
610 .se_snprintf = hist_entry__sym_ipc_snprintf,
611 .se_width_idx = HISTC_SYMBOL_IPC,
614 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
616 char *bf, size_t size,
621 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
622 return repsep_snprintf(bf, size, "%-*s", width, tmp);
625 struct sort_entry sort_sym_ipc_null = {
626 .se_header = "IPC [IPC Coverage]",
627 .se_cmp = sort__sym_cmp,
628 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
629 .se_width_idx = HISTC_SYMBOL_IPC,
634 static char no_srcfile[1];
636 static char *hist_entry__get_srcfile(struct hist_entry *e)
639 struct map *map = e->ms.map;
644 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
645 e->ms.sym, false, true, true, e->ip);
646 if (!strcmp(sf, SRCLINE_UNKNOWN))
658 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
660 return sort__srcline_cmp(left, right);
664 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
667 left->srcfile = hist_entry__get_srcfile(left);
669 right->srcfile = hist_entry__get_srcfile(right);
671 return strcmp(right->srcfile, left->srcfile);
675 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
677 return sort__srcfile_collapse(left, right);
680 static void sort__srcfile_init(struct hist_entry *he)
683 he->srcfile = hist_entry__get_srcfile(he);
686 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
687 size_t size, unsigned int width)
689 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
692 struct sort_entry sort_srcfile = {
693 .se_header = "Source File",
694 .se_cmp = sort__srcfile_cmp,
695 .se_collapse = sort__srcfile_collapse,
696 .se_sort = sort__srcfile_sort,
697 .se_init = sort__srcfile_init,
698 .se_snprintf = hist_entry__srcfile_snprintf,
699 .se_width_idx = HISTC_SRCFILE,
705 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
707 struct symbol *sym_l = left->parent;
708 struct symbol *sym_r = right->parent;
710 if (!sym_l || !sym_r)
711 return cmp_null(sym_l, sym_r);
713 return strcmp(sym_r->name, sym_l->name);
716 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
717 size_t size, unsigned int width)
719 return repsep_snprintf(bf, size, "%-*.*s", width, width,
720 he->parent ? he->parent->name : "[other]");
723 struct sort_entry sort_parent = {
724 .se_header = "Parent symbol",
725 .se_cmp = sort__parent_cmp,
726 .se_snprintf = hist_entry__parent_snprintf,
727 .se_width_idx = HISTC_PARENT,
733 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
735 return right->cpu - left->cpu;
738 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
739 size_t size, unsigned int width)
741 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
744 struct sort_entry sort_cpu = {
746 .se_cmp = sort__cpu_cmp,
747 .se_snprintf = hist_entry__cpu_snprintf,
748 .se_width_idx = HISTC_CPU,
751 /* --sort cgroup_id */
753 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
755 return (int64_t)(right_dev - left_dev);
758 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
760 return (int64_t)(right_ino - left_ino);
764 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
768 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
772 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
773 left->cgroup_id.ino);
776 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
777 char *bf, size_t size,
778 unsigned int width __maybe_unused)
780 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
784 struct sort_entry sort_cgroup_id = {
785 .se_header = "cgroup id (dev/inode)",
786 .se_cmp = sort__cgroup_id_cmp,
787 .se_snprintf = hist_entry__cgroup_id_snprintf,
788 .se_width_idx = HISTC_CGROUP_ID,
794 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
796 return right->cgroup - left->cgroup;
799 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
800 char *bf, size_t size,
801 unsigned int width __maybe_unused)
803 const char *cgrp_name = "N/A";
806 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
809 cgrp_name = cgrp->name;
811 cgrp_name = "unknown";
814 return repsep_snprintf(bf, size, "%s", cgrp_name);
817 struct sort_entry sort_cgroup = {
818 .se_header = "Cgroup",
819 .se_cmp = sort__cgroup_cmp,
820 .se_snprintf = hist_entry__cgroup_snprintf,
821 .se_width_idx = HISTC_CGROUP,
827 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
829 return right->socket - left->socket;
832 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
833 size_t size, unsigned int width)
835 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
838 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
840 int sk = *(const int *)arg;
842 if (type != HIST_FILTER__SOCKET)
845 return sk >= 0 && he->socket != sk;
848 struct sort_entry sort_socket = {
849 .se_header = "Socket",
850 .se_cmp = sort__socket_cmp,
851 .se_snprintf = hist_entry__socket_snprintf,
852 .se_filter = hist_entry__socket_filter,
853 .se_width_idx = HISTC_SOCKET,
859 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
861 return right->time - left->time;
864 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
865 size_t size, unsigned int width)
869 if (symbol_conf.nanosecs)
870 timestamp__scnprintf_nsec(he->time, he_time,
873 timestamp__scnprintf_usec(he->time, he_time,
876 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
879 struct sort_entry sort_time = {
881 .se_cmp = sort__time_cmp,
882 .se_snprintf = hist_entry__time_snprintf,
883 .se_width_idx = HISTC_TIME,
888 #ifdef HAVE_LIBTRACEEVENT
889 static char *get_trace_output(struct hist_entry *he)
891 struct trace_seq seq;
893 struct tep_record rec = {
894 .data = he->raw_data,
895 .size = he->raw_size,
898 evsel = hists_to_evsel(he->hists);
900 trace_seq_init(&seq);
901 if (symbol_conf.raw_trace) {
902 tep_print_fields(&seq, he->raw_data, he->raw_size,
905 tep_print_event(evsel->tp_format->tep,
906 &seq, &rec, "%s", TEP_PRINT_INFO);
909 * Trim the buffer, it starts at 4KB and we're not going to
910 * add anything more to this buffer.
912 return realloc(seq.buffer, seq.len + 1);
916 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
920 evsel = hists_to_evsel(left->hists);
921 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
924 if (left->trace_output == NULL)
925 left->trace_output = get_trace_output(left);
926 if (right->trace_output == NULL)
927 right->trace_output = get_trace_output(right);
929 return strcmp(right->trace_output, left->trace_output);
932 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
933 size_t size, unsigned int width)
937 evsel = hists_to_evsel(he->hists);
938 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
939 return scnprintf(bf, size, "%-.*s", width, "N/A");
941 if (he->trace_output == NULL)
942 he->trace_output = get_trace_output(he);
943 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
946 struct sort_entry sort_trace = {
947 .se_header = "Trace output",
948 .se_cmp = sort__trace_cmp,
949 .se_snprintf = hist_entry__trace_snprintf,
950 .se_width_idx = HISTC_TRACE,
952 #endif /* HAVE_LIBTRACEEVENT */
954 /* sort keys for branch stacks */
957 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
959 if (!left->branch_info || !right->branch_info)
960 return cmp_null(left->branch_info, right->branch_info);
962 return _sort__dso_cmp(left->branch_info->from.ms.map,
963 right->branch_info->from.ms.map);
966 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
967 size_t size, unsigned int width)
970 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
973 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
976 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
979 const struct dso *dso = arg;
981 if (type != HIST_FILTER__DSO)
984 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
985 map__dso(he->branch_info->from.ms.map) != dso);
989 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
991 if (!left->branch_info || !right->branch_info)
992 return cmp_null(left->branch_info, right->branch_info);
994 return _sort__dso_cmp(left->branch_info->to.ms.map,
995 right->branch_info->to.ms.map);
998 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
999 size_t size, unsigned int width)
1001 if (he->branch_info)
1002 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1005 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1008 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1011 const struct dso *dso = arg;
1013 if (type != HIST_FILTER__DSO)
1016 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1017 map__dso(he->branch_info->to.ms.map) != dso);
1021 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1023 struct addr_map_symbol *from_l, *from_r;
1025 if (!left->branch_info || !right->branch_info)
1026 return cmp_null(left->branch_info, right->branch_info);
1028 from_l = &left->branch_info->from;
1029 from_r = &right->branch_info->from;
1031 if (!from_l->ms.sym && !from_r->ms.sym)
1032 return _sort__addr_cmp(from_l->addr, from_r->addr);
1034 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1038 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1040 struct addr_map_symbol *to_l, *to_r;
1042 if (!left->branch_info || !right->branch_info)
1043 return cmp_null(left->branch_info, right->branch_info);
1045 to_l = &left->branch_info->to;
1046 to_r = &right->branch_info->to;
1048 if (!to_l->ms.sym && !to_r->ms.sym)
1049 return _sort__addr_cmp(to_l->addr, to_r->addr);
1051 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1054 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1055 size_t size, unsigned int width)
1057 if (he->branch_info) {
1058 struct addr_map_symbol *from = &he->branch_info->from;
1060 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1061 from->al_level, bf, size, width);
1064 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1067 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1068 size_t size, unsigned int width)
1070 if (he->branch_info) {
1071 struct addr_map_symbol *to = &he->branch_info->to;
1073 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1074 to->al_level, bf, size, width);
1077 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1080 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1083 const char *sym = arg;
1085 if (type != HIST_FILTER__SYMBOL)
1088 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1089 strstr(he->branch_info->from.ms.sym->name, sym));
1092 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1095 const char *sym = arg;
1097 if (type != HIST_FILTER__SYMBOL)
1100 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1101 strstr(he->branch_info->to.ms.sym->name, sym));
1104 struct sort_entry sort_dso_from = {
1105 .se_header = "Source Shared Object",
1106 .se_cmp = sort__dso_from_cmp,
1107 .se_snprintf = hist_entry__dso_from_snprintf,
1108 .se_filter = hist_entry__dso_from_filter,
1109 .se_width_idx = HISTC_DSO_FROM,
1112 struct sort_entry sort_dso_to = {
1113 .se_header = "Target Shared Object",
1114 .se_cmp = sort__dso_to_cmp,
1115 .se_snprintf = hist_entry__dso_to_snprintf,
1116 .se_filter = hist_entry__dso_to_filter,
1117 .se_width_idx = HISTC_DSO_TO,
1120 struct sort_entry sort_sym_from = {
1121 .se_header = "Source Symbol",
1122 .se_cmp = sort__sym_from_cmp,
1123 .se_snprintf = hist_entry__sym_from_snprintf,
1124 .se_filter = hist_entry__sym_from_filter,
1125 .se_width_idx = HISTC_SYMBOL_FROM,
1128 struct sort_entry sort_sym_to = {
1129 .se_header = "Target Symbol",
1130 .se_cmp = sort__sym_to_cmp,
1131 .se_snprintf = hist_entry__sym_to_snprintf,
1132 .se_filter = hist_entry__sym_to_filter,
1133 .se_width_idx = HISTC_SYMBOL_TO,
1136 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1137 u64 ip, char level, char *bf, size_t size,
1140 struct symbol *sym = ms->sym;
1141 struct map *map = ms->map;
1142 size_t ret = 0, offs;
1144 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1146 if (sym->type == STT_OBJECT) {
1147 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1148 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1149 ip - map__unmap_ip(map, sym->start));
1151 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1154 offs = ip - sym->start;
1156 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1159 size_t len = BITS_PER_LONG / 4;
1160 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1167 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1168 size_t size, unsigned int width)
1170 if (he->branch_info) {
1171 struct addr_map_symbol *from = &he->branch_info->from;
1173 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1174 he->level, bf, size, width);
1177 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1180 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1181 size_t size, unsigned int width)
1183 if (he->branch_info) {
1184 struct addr_map_symbol *to = &he->branch_info->to;
1186 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1187 he->level, bf, size, width);
1190 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1194 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1196 struct addr_map_symbol *from_l;
1197 struct addr_map_symbol *from_r;
1200 if (!left->branch_info || !right->branch_info)
1201 return cmp_null(left->branch_info, right->branch_info);
1203 from_l = &left->branch_info->from;
1204 from_r = &right->branch_info->from;
1207 * comparing symbol address alone is not enough since it's a
1208 * relative address within a dso.
1210 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1214 return _sort__addr_cmp(from_l->addr, from_r->addr);
1218 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1220 struct addr_map_symbol *to_l;
1221 struct addr_map_symbol *to_r;
1224 if (!left->branch_info || !right->branch_info)
1225 return cmp_null(left->branch_info, right->branch_info);
1227 to_l = &left->branch_info->to;
1228 to_r = &right->branch_info->to;
1231 * comparing symbol address alone is not enough since it's a
1232 * relative address within a dso.
1234 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1238 return _sort__addr_cmp(to_l->addr, to_r->addr);
1241 struct sort_entry sort_addr_from = {
1242 .se_header = "Source Address",
1243 .se_cmp = sort__addr_from_cmp,
1244 .se_snprintf = hist_entry__addr_from_snprintf,
1245 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1246 .se_width_idx = HISTC_ADDR_FROM,
1249 struct sort_entry sort_addr_to = {
1250 .se_header = "Target Address",
1251 .se_cmp = sort__addr_to_cmp,
1252 .se_snprintf = hist_entry__addr_to_snprintf,
1253 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1254 .se_width_idx = HISTC_ADDR_TO,
1259 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1261 unsigned char mp, p;
1263 if (!left->branch_info || !right->branch_info)
1264 return cmp_null(left->branch_info, right->branch_info);
1266 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1267 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1271 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1272 size_t size, unsigned int width){
1273 static const char *out = "N/A";
1275 if (he->branch_info) {
1276 if (he->branch_info->flags.predicted)
1278 else if (he->branch_info->flags.mispred)
1282 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1286 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1288 if (!left->branch_info || !right->branch_info)
1289 return cmp_null(left->branch_info, right->branch_info);
1291 return left->branch_info->flags.cycles -
1292 right->branch_info->flags.cycles;
1295 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1296 size_t size, unsigned int width)
1298 if (!he->branch_info)
1299 return scnprintf(bf, size, "%-.*s", width, "N/A");
1300 if (he->branch_info->flags.cycles == 0)
1301 return repsep_snprintf(bf, size, "%-*s", width, "-");
1302 return repsep_snprintf(bf, size, "%-*hd", width,
1303 he->branch_info->flags.cycles);
1306 struct sort_entry sort_cycles = {
1307 .se_header = "Basic Block Cycles",
1308 .se_cmp = sort__cycles_cmp,
1309 .se_snprintf = hist_entry__cycles_snprintf,
1310 .se_width_idx = HISTC_CYCLES,
1313 /* --sort daddr_sym */
1315 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1317 uint64_t l = 0, r = 0;
1320 l = left->mem_info->daddr.addr;
1321 if (right->mem_info)
1322 r = right->mem_info->daddr.addr;
1324 return (int64_t)(r - l);
1327 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1328 size_t size, unsigned int width)
1331 struct map_symbol *ms = NULL;
1334 addr = he->mem_info->daddr.addr;
1335 ms = &he->mem_info->daddr.ms;
1337 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1341 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1343 uint64_t l = 0, r = 0;
1346 l = left->mem_info->iaddr.addr;
1347 if (right->mem_info)
1348 r = right->mem_info->iaddr.addr;
1350 return (int64_t)(r - l);
1353 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1354 size_t size, unsigned int width)
1357 struct map_symbol *ms = NULL;
1360 addr = he->mem_info->iaddr.addr;
1361 ms = &he->mem_info->iaddr.ms;
1363 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1367 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1369 struct map *map_l = NULL;
1370 struct map *map_r = NULL;
1373 map_l = left->mem_info->daddr.ms.map;
1374 if (right->mem_info)
1375 map_r = right->mem_info->daddr.ms.map;
1377 return _sort__dso_cmp(map_l, map_r);
1380 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1381 size_t size, unsigned int width)
1383 struct map *map = NULL;
1386 map = he->mem_info->daddr.ms.map;
1388 return _hist_entry__dso_snprintf(map, bf, size, width);
1392 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1394 union perf_mem_data_src data_src_l;
1395 union perf_mem_data_src data_src_r;
1398 data_src_l = left->mem_info->data_src;
1400 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1402 if (right->mem_info)
1403 data_src_r = right->mem_info->data_src;
1405 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1407 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1410 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1411 size_t size, unsigned int width)
1415 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1416 return repsep_snprintf(bf, size, "%.*s", width, out);
1420 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1422 union perf_mem_data_src data_src_l;
1423 union perf_mem_data_src data_src_r;
1426 data_src_l = left->mem_info->data_src;
1428 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1430 if (right->mem_info)
1431 data_src_r = right->mem_info->data_src;
1433 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1435 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1438 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1439 size_t size, unsigned int width)
1443 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1444 return repsep_snprintf(bf, size, "%-*s", width, out);
1448 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1450 union perf_mem_data_src data_src_l;
1451 union perf_mem_data_src data_src_r;
1454 data_src_l = left->mem_info->data_src;
1456 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1458 if (right->mem_info)
1459 data_src_r = right->mem_info->data_src;
1461 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1463 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1466 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1467 size_t size, unsigned int width)
1471 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1472 return repsep_snprintf(bf, size, "%-*s", width, out);
1476 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1478 union perf_mem_data_src data_src_l;
1479 union perf_mem_data_src data_src_r;
1482 data_src_l = left->mem_info->data_src;
1484 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1486 if (right->mem_info)
1487 data_src_r = right->mem_info->data_src;
1489 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1491 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1494 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1495 size_t size, unsigned int width)
1499 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1500 return repsep_snprintf(bf, size, "%-*s", width, out);
1504 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1507 struct map *l_map, *r_map;
1508 struct dso *l_dso, *r_dso;
1511 if (!left->mem_info) return -1;
1512 if (!right->mem_info) return 1;
1514 /* group event types together */
1515 if (left->cpumode > right->cpumode) return -1;
1516 if (left->cpumode < right->cpumode) return 1;
1518 l_map = left->mem_info->daddr.ms.map;
1519 r_map = right->mem_info->daddr.ms.map;
1521 /* if both are NULL, jump to sort on al_addr instead */
1522 if (!l_map && !r_map)
1525 if (!l_map) return -1;
1526 if (!r_map) return 1;
1528 l_dso = map__dso(l_map);
1529 r_dso = map__dso(r_map);
1530 rc = dso__cmp_id(l_dso, r_dso);
1534 * Addresses with no major/minor numbers are assumed to be
1535 * anonymous in userspace. Sort those on pid then address.
1537 * The kernel and non-zero major/minor mapped areas are
1538 * assumed to be unity mapped. Sort those on address.
1541 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1542 (!(map__flags(l_map) & MAP_SHARED)) && !l_dso->id.maj && !l_dso->id.min &&
1543 !l_dso->id.ino && !l_dso->id.ino_generation) {
1544 /* userspace anonymous */
1546 if (left->thread->pid_ > right->thread->pid_) return -1;
1547 if (left->thread->pid_ < right->thread->pid_) return 1;
1551 /* al_addr does all the right addr - start + offset calculations */
1552 l = cl_address(left->mem_info->daddr.al_addr, chk_double_cl);
1553 r = cl_address(right->mem_info->daddr.al_addr, chk_double_cl);
1555 if (l > r) return -1;
1556 if (l < r) return 1;
1561 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1562 size_t size, unsigned int width)
1566 struct map_symbol *ms = NULL;
1567 char level = he->level;
1570 struct map *map = he->mem_info->daddr.ms.map;
1571 struct dso *dso = map ? map__dso(map) : NULL;
1573 addr = cl_address(he->mem_info->daddr.al_addr, chk_double_cl);
1574 ms = &he->mem_info->daddr.ms;
1576 /* print [s] for shared data mmaps */
1577 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1578 map && !(map__prot(map) & PROT_EXEC) &&
1579 (map__flags(map) & MAP_SHARED) &&
1580 (dso->id.maj || dso->id.min || dso->id.ino || dso->id.ino_generation))
1585 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1588 struct sort_entry sort_mispredict = {
1589 .se_header = "Branch Mispredicted",
1590 .se_cmp = sort__mispredict_cmp,
1591 .se_snprintf = hist_entry__mispredict_snprintf,
1592 .se_width_idx = HISTC_MISPREDICT,
1596 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1598 return left->weight - right->weight;
1601 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1602 size_t size, unsigned int width)
1604 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1607 struct sort_entry sort_local_weight = {
1608 .se_header = "Local Weight",
1609 .se_cmp = sort__weight_cmp,
1610 .se_snprintf = hist_entry__local_weight_snprintf,
1611 .se_width_idx = HISTC_LOCAL_WEIGHT,
1614 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1615 size_t size, unsigned int width)
1617 return repsep_snprintf(bf, size, "%-*llu", width,
1618 he->weight * he->stat.nr_events);
1621 struct sort_entry sort_global_weight = {
1622 .se_header = "Weight",
1623 .se_cmp = sort__weight_cmp,
1624 .se_snprintf = hist_entry__global_weight_snprintf,
1625 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1629 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1631 return left->ins_lat - right->ins_lat;
1634 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1635 size_t size, unsigned int width)
1637 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1640 struct sort_entry sort_local_ins_lat = {
1641 .se_header = "Local INSTR Latency",
1642 .se_cmp = sort__ins_lat_cmp,
1643 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1644 .se_width_idx = HISTC_LOCAL_INS_LAT,
1647 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1648 size_t size, unsigned int width)
1650 return repsep_snprintf(bf, size, "%-*u", width,
1651 he->ins_lat * he->stat.nr_events);
1654 struct sort_entry sort_global_ins_lat = {
1655 .se_header = "INSTR Latency",
1656 .se_cmp = sort__ins_lat_cmp,
1657 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1658 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1662 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1664 return left->p_stage_cyc - right->p_stage_cyc;
1667 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1668 size_t size, unsigned int width)
1670 return repsep_snprintf(bf, size, "%-*u", width,
1671 he->p_stage_cyc * he->stat.nr_events);
1675 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1676 size_t size, unsigned int width)
1678 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1681 struct sort_entry sort_local_p_stage_cyc = {
1682 .se_header = "Local Pipeline Stage Cycle",
1683 .se_cmp = sort__p_stage_cyc_cmp,
1684 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1685 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1688 struct sort_entry sort_global_p_stage_cyc = {
1689 .se_header = "Pipeline Stage Cycle",
1690 .se_cmp = sort__p_stage_cyc_cmp,
1691 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1692 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1695 struct sort_entry sort_mem_daddr_sym = {
1696 .se_header = "Data Symbol",
1697 .se_cmp = sort__daddr_cmp,
1698 .se_snprintf = hist_entry__daddr_snprintf,
1699 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1702 struct sort_entry sort_mem_iaddr_sym = {
1703 .se_header = "Code Symbol",
1704 .se_cmp = sort__iaddr_cmp,
1705 .se_snprintf = hist_entry__iaddr_snprintf,
1706 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1709 struct sort_entry sort_mem_daddr_dso = {
1710 .se_header = "Data Object",
1711 .se_cmp = sort__dso_daddr_cmp,
1712 .se_snprintf = hist_entry__dso_daddr_snprintf,
1713 .se_width_idx = HISTC_MEM_DADDR_DSO,
1716 struct sort_entry sort_mem_locked = {
1717 .se_header = "Locked",
1718 .se_cmp = sort__locked_cmp,
1719 .se_snprintf = hist_entry__locked_snprintf,
1720 .se_width_idx = HISTC_MEM_LOCKED,
1723 struct sort_entry sort_mem_tlb = {
1724 .se_header = "TLB access",
1725 .se_cmp = sort__tlb_cmp,
1726 .se_snprintf = hist_entry__tlb_snprintf,
1727 .se_width_idx = HISTC_MEM_TLB,
1730 struct sort_entry sort_mem_lvl = {
1731 .se_header = "Memory access",
1732 .se_cmp = sort__lvl_cmp,
1733 .se_snprintf = hist_entry__lvl_snprintf,
1734 .se_width_idx = HISTC_MEM_LVL,
1737 struct sort_entry sort_mem_snoop = {
1738 .se_header = "Snoop",
1739 .se_cmp = sort__snoop_cmp,
1740 .se_snprintf = hist_entry__snoop_snprintf,
1741 .se_width_idx = HISTC_MEM_SNOOP,
1744 struct sort_entry sort_mem_dcacheline = {
1745 .se_header = "Data Cacheline",
1746 .se_cmp = sort__dcacheline_cmp,
1747 .se_snprintf = hist_entry__dcacheline_snprintf,
1748 .se_width_idx = HISTC_MEM_DCACHELINE,
1752 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1754 union perf_mem_data_src data_src_l;
1755 union perf_mem_data_src data_src_r;
1758 data_src_l = left->mem_info->data_src;
1760 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1762 if (right->mem_info)
1763 data_src_r = right->mem_info->data_src;
1765 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1767 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1770 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1771 size_t size, unsigned int width)
1775 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1776 return repsep_snprintf(bf, size, "%.*s", width, out);
1779 struct sort_entry sort_mem_blocked = {
1780 .se_header = "Blocked",
1781 .se_cmp = sort__blocked_cmp,
1782 .se_snprintf = hist_entry__blocked_snprintf,
1783 .se_width_idx = HISTC_MEM_BLOCKED,
1787 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1789 uint64_t l = 0, r = 0;
1792 l = left->mem_info->daddr.phys_addr;
1793 if (right->mem_info)
1794 r = right->mem_info->daddr.phys_addr;
1796 return (int64_t)(r - l);
1799 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1800 size_t size, unsigned int width)
1804 size_t len = BITS_PER_LONG / 4;
1806 addr = he->mem_info->daddr.phys_addr;
1808 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1810 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1812 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1820 struct sort_entry sort_mem_phys_daddr = {
1821 .se_header = "Data Physical Address",
1822 .se_cmp = sort__phys_daddr_cmp,
1823 .se_snprintf = hist_entry__phys_daddr_snprintf,
1824 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1828 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1830 uint64_t l = 0, r = 0;
1833 l = left->mem_info->daddr.data_page_size;
1834 if (right->mem_info)
1835 r = right->mem_info->daddr.data_page_size;
1837 return (int64_t)(r - l);
1840 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1841 size_t size, unsigned int width)
1843 char str[PAGE_SIZE_NAME_LEN];
1845 return repsep_snprintf(bf, size, "%-*s", width,
1846 get_page_size_name(he->mem_info->daddr.data_page_size, str));
1849 struct sort_entry sort_mem_data_page_size = {
1850 .se_header = "Data Page Size",
1851 .se_cmp = sort__data_page_size_cmp,
1852 .se_snprintf = hist_entry__data_page_size_snprintf,
1853 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
1857 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1859 uint64_t l = left->code_page_size;
1860 uint64_t r = right->code_page_size;
1862 return (int64_t)(r - l);
1865 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1866 size_t size, unsigned int width)
1868 char str[PAGE_SIZE_NAME_LEN];
1870 return repsep_snprintf(bf, size, "%-*s", width,
1871 get_page_size_name(he->code_page_size, str));
1874 struct sort_entry sort_code_page_size = {
1875 .se_header = "Code Page Size",
1876 .se_cmp = sort__code_page_size_cmp,
1877 .se_snprintf = hist_entry__code_page_size_snprintf,
1878 .se_width_idx = HISTC_CODE_PAGE_SIZE,
1882 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1884 if (!left->branch_info || !right->branch_info)
1885 return cmp_null(left->branch_info, right->branch_info);
1887 return left->branch_info->flags.abort !=
1888 right->branch_info->flags.abort;
1891 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1892 size_t size, unsigned int width)
1894 static const char *out = "N/A";
1896 if (he->branch_info) {
1897 if (he->branch_info->flags.abort)
1903 return repsep_snprintf(bf, size, "%-*s", width, out);
1906 struct sort_entry sort_abort = {
1907 .se_header = "Transaction abort",
1908 .se_cmp = sort__abort_cmp,
1909 .se_snprintf = hist_entry__abort_snprintf,
1910 .se_width_idx = HISTC_ABORT,
1914 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1916 if (!left->branch_info || !right->branch_info)
1917 return cmp_null(left->branch_info, right->branch_info);
1919 return left->branch_info->flags.in_tx !=
1920 right->branch_info->flags.in_tx;
1923 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1924 size_t size, unsigned int width)
1926 static const char *out = "N/A";
1928 if (he->branch_info) {
1929 if (he->branch_info->flags.in_tx)
1935 return repsep_snprintf(bf, size, "%-*s", width, out);
1938 struct sort_entry sort_in_tx = {
1939 .se_header = "Branch in transaction",
1940 .se_cmp = sort__in_tx_cmp,
1941 .se_snprintf = hist_entry__in_tx_snprintf,
1942 .se_width_idx = HISTC_IN_TX,
1946 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1948 return left->transaction - right->transaction;
1951 static inline char *add_str(char *p, const char *str)
1954 return p + strlen(str);
1957 static struct txbit {
1962 { PERF_TXN_ELISION, "EL ", 0 },
1963 { PERF_TXN_TRANSACTION, "TX ", 1 },
1964 { PERF_TXN_SYNC, "SYNC ", 1 },
1965 { PERF_TXN_ASYNC, "ASYNC ", 0 },
1966 { PERF_TXN_RETRY, "RETRY ", 0 },
1967 { PERF_TXN_CONFLICT, "CON ", 0 },
1968 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
1969 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
1973 int hist_entry__transaction_len(void)
1978 for (i = 0; txbits[i].name; i++) {
1979 if (!txbits[i].skip_for_len)
1980 len += strlen(txbits[i].name);
1982 len += 4; /* :XX<space> */
1986 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
1987 size_t size, unsigned int width)
1989 u64 t = he->transaction;
1995 for (i = 0; txbits[i].name; i++)
1996 if (txbits[i].flag & t)
1997 p = add_str(p, txbits[i].name);
1998 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
1999 p = add_str(p, "NEITHER ");
2000 if (t & PERF_TXN_ABORT_MASK) {
2001 sprintf(p, ":%" PRIx64,
2002 (t & PERF_TXN_ABORT_MASK) >>
2003 PERF_TXN_ABORT_SHIFT);
2007 return repsep_snprintf(bf, size, "%-*s", width, buf);
2010 struct sort_entry sort_transaction = {
2011 .se_header = "Transaction ",
2012 .se_cmp = sort__transaction_cmp,
2013 .se_snprintf = hist_entry__transaction_snprintf,
2014 .se_width_idx = HISTC_TRANSACTION,
2017 /* --sort symbol_size */
2019 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2021 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2022 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2024 return size_l < size_r ? -1 :
2025 size_l == size_r ? 0 : 1;
2029 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2031 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2034 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2035 size_t bf_size, unsigned int width)
2038 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2040 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2043 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2044 size_t size, unsigned int width)
2046 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2049 struct sort_entry sort_sym_size = {
2050 .se_header = "Symbol size",
2051 .se_cmp = sort__sym_size_cmp,
2052 .se_snprintf = hist_entry__sym_size_snprintf,
2053 .se_width_idx = HISTC_SYM_SIZE,
2056 /* --sort dso_size */
2058 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2060 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2061 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2063 return size_l < size_r ? -1 :
2064 size_l == size_r ? 0 : 1;
2068 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2070 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2073 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2074 size_t bf_size, unsigned int width)
2076 if (map && map__dso(map))
2077 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2079 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2082 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2083 size_t size, unsigned int width)
2085 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2088 struct sort_entry sort_dso_size = {
2089 .se_header = "DSO size",
2090 .se_cmp = sort__dso_size_cmp,
2091 .se_snprintf = hist_entry__dso_size_snprintf,
2092 .se_width_idx = HISTC_DSO_SIZE,
2095 /* --sort dso_size */
2098 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2100 u64 left_ip = left->ip;
2101 u64 right_ip = right->ip;
2102 struct map *left_map = left->ms.map;
2103 struct map *right_map = right->ms.map;
2106 left_ip = map__unmap_ip(left_map, left_ip);
2108 right_ip = map__unmap_ip(right_map, right_ip);
2110 return _sort__addr_cmp(left_ip, right_ip);
2113 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2114 size_t size, unsigned int width)
2117 struct map *map = he->ms.map;
2120 ip = map__unmap_ip(map, ip);
2122 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2125 struct sort_entry sort_addr = {
2126 .se_header = "Address",
2127 .se_cmp = sort__addr_cmp,
2128 .se_snprintf = hist_entry__addr_snprintf,
2129 .se_width_idx = HISTC_ADDR,
2133 struct sort_dimension {
2135 struct sort_entry *entry;
2139 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2144 const char * __weak arch_perf_header_entry(const char *se_header)
2149 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2151 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2154 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2156 static struct sort_dimension common_sort_dimensions[] = {
2157 DIM(SORT_PID, "pid", sort_thread),
2158 DIM(SORT_COMM, "comm", sort_comm),
2159 DIM(SORT_DSO, "dso", sort_dso),
2160 DIM(SORT_SYM, "symbol", sort_sym),
2161 DIM(SORT_PARENT, "parent", sort_parent),
2162 DIM(SORT_CPU, "cpu", sort_cpu),
2163 DIM(SORT_SOCKET, "socket", sort_socket),
2164 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2165 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2166 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2167 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2168 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2169 #ifdef HAVE_LIBTRACEEVENT
2170 DIM(SORT_TRACE, "trace", sort_trace),
2172 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2173 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2174 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2175 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2176 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2177 DIM(SORT_TIME, "time", sort_time),
2178 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2179 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2180 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2181 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2182 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2183 DIM(SORT_ADDR, "addr", sort_addr),
2184 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2185 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2186 DIM(SORT_SIMD, "simd", sort_simd)
2191 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2193 static struct sort_dimension bstack_sort_dimensions[] = {
2194 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2195 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2196 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2197 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2198 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2199 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2200 DIM(SORT_ABORT, "abort", sort_abort),
2201 DIM(SORT_CYCLES, "cycles", sort_cycles),
2202 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2203 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2204 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2205 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2206 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2211 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2213 static struct sort_dimension memory_sort_dimensions[] = {
2214 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2215 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2216 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2217 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2218 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2219 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2220 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2221 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2222 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2223 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2224 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2229 struct hpp_dimension {
2231 struct perf_hpp_fmt *fmt;
2235 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2237 static struct hpp_dimension hpp_sort_dimensions[] = {
2238 DIM(PERF_HPP__OVERHEAD, "overhead"),
2239 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2240 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2241 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2242 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2243 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2244 DIM(PERF_HPP__SAMPLES, "sample"),
2245 DIM(PERF_HPP__PERIOD, "period"),
2250 struct hpp_sort_entry {
2251 struct perf_hpp_fmt hpp;
2252 struct sort_entry *se;
2255 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2257 struct hpp_sort_entry *hse;
2259 if (!perf_hpp__is_sort_entry(fmt))
2262 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2263 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2266 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2267 struct hists *hists, int line __maybe_unused,
2268 int *span __maybe_unused)
2270 struct hpp_sort_entry *hse;
2271 size_t len = fmt->user_len;
2273 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2276 len = hists__col_len(hists, hse->se->se_width_idx);
2278 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2281 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2282 struct perf_hpp *hpp __maybe_unused,
2283 struct hists *hists)
2285 struct hpp_sort_entry *hse;
2286 size_t len = fmt->user_len;
2288 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2291 len = hists__col_len(hists, hse->se->se_width_idx);
2296 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2297 struct hist_entry *he)
2299 struct hpp_sort_entry *hse;
2300 size_t len = fmt->user_len;
2302 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2305 len = hists__col_len(he->hists, hse->se->se_width_idx);
2307 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2310 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2311 struct hist_entry *a, struct hist_entry *b)
2313 struct hpp_sort_entry *hse;
2315 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2316 return hse->se->se_cmp(a, b);
2319 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2320 struct hist_entry *a, struct hist_entry *b)
2322 struct hpp_sort_entry *hse;
2323 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2325 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2326 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2327 return collapse_fn(a, b);
2330 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2331 struct hist_entry *a, struct hist_entry *b)
2333 struct hpp_sort_entry *hse;
2334 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2336 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2337 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2338 return sort_fn(a, b);
2341 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2343 return format->header == __sort__hpp_header;
2346 #define MK_SORT_ENTRY_CHK(key) \
2347 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2349 struct hpp_sort_entry *hse; \
2351 if (!perf_hpp__is_sort_entry(fmt)) \
2354 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2355 return hse->se == &sort_ ## key ; \
2358 #ifdef HAVE_LIBTRACEEVENT
2359 MK_SORT_ENTRY_CHK(trace)
2361 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2366 MK_SORT_ENTRY_CHK(srcline)
2367 MK_SORT_ENTRY_CHK(srcfile)
2368 MK_SORT_ENTRY_CHK(thread)
2369 MK_SORT_ENTRY_CHK(comm)
2370 MK_SORT_ENTRY_CHK(dso)
2371 MK_SORT_ENTRY_CHK(sym)
2374 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2376 struct hpp_sort_entry *hse_a;
2377 struct hpp_sort_entry *hse_b;
2379 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2382 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2383 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2385 return hse_a->se == hse_b->se;
2388 static void hse_free(struct perf_hpp_fmt *fmt)
2390 struct hpp_sort_entry *hse;
2392 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2396 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2398 struct hpp_sort_entry *hse;
2400 if (!perf_hpp__is_sort_entry(fmt))
2403 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2405 if (hse->se->se_init)
2406 hse->se->se_init(he);
2409 static struct hpp_sort_entry *
2410 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2412 struct hpp_sort_entry *hse;
2414 hse = malloc(sizeof(*hse));
2416 pr_err("Memory allocation failed\n");
2420 hse->se = sd->entry;
2421 hse->hpp.name = sd->entry->se_header;
2422 hse->hpp.header = __sort__hpp_header;
2423 hse->hpp.width = __sort__hpp_width;
2424 hse->hpp.entry = __sort__hpp_entry;
2425 hse->hpp.color = NULL;
2427 hse->hpp.cmp = __sort__hpp_cmp;
2428 hse->hpp.collapse = __sort__hpp_collapse;
2429 hse->hpp.sort = __sort__hpp_sort;
2430 hse->hpp.equal = __sort__hpp_equal;
2431 hse->hpp.free = hse_free;
2432 hse->hpp.init = hse_init;
2434 INIT_LIST_HEAD(&hse->hpp.list);
2435 INIT_LIST_HEAD(&hse->hpp.sort_list);
2436 hse->hpp.elide = false;
2438 hse->hpp.user_len = 0;
2439 hse->hpp.level = level;
2444 static void hpp_free(struct perf_hpp_fmt *fmt)
2449 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2452 struct perf_hpp_fmt *fmt;
2454 fmt = memdup(hd->fmt, sizeof(*fmt));
2456 INIT_LIST_HEAD(&fmt->list);
2457 INIT_LIST_HEAD(&fmt->sort_list);
2458 fmt->free = hpp_free;
2465 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2467 struct perf_hpp_fmt *fmt;
2468 struct hpp_sort_entry *hse;
2472 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2473 if (!perf_hpp__is_sort_entry(fmt))
2476 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2477 if (hse->se->se_filter == NULL)
2481 * hist entry is filtered if any of sort key in the hpp list
2482 * is applied. But it should skip non-matched filter types.
2484 r = hse->se->se_filter(he, type, arg);
2495 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2496 struct perf_hpp_list *list,
2499 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2504 perf_hpp_list__register_sort_field(list, &hse->hpp);
2508 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2509 struct perf_hpp_list *list)
2511 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2516 perf_hpp_list__column_register(list, &hse->hpp);
2520 #ifndef HAVE_LIBTRACEEVENT
2521 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2525 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2526 struct hists *hists __maybe_unused)
2531 struct hpp_dynamic_entry {
2532 struct perf_hpp_fmt hpp;
2533 struct evsel *evsel;
2534 struct tep_format_field *field;
2535 unsigned dynamic_len;
2539 static int hde_width(struct hpp_dynamic_entry *hde)
2541 if (!hde->hpp.len) {
2542 int len = hde->dynamic_len;
2543 int namelen = strlen(hde->field->name);
2544 int fieldlen = hde->field->size;
2549 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2550 /* length for print hex numbers */
2551 fieldlen = hde->field->size * 2 + 2;
2558 return hde->hpp.len;
2561 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2562 struct hist_entry *he)
2565 struct tep_format_field *field = hde->field;
2572 /* parse pretty print result and update max length */
2573 if (!he->trace_output)
2574 he->trace_output = get_trace_output(he);
2576 namelen = strlen(field->name);
2577 str = he->trace_output;
2580 pos = strchr(str, ' ');
2583 pos = str + strlen(str);
2586 if (!strncmp(str, field->name, namelen)) {
2592 if (len > hde->dynamic_len)
2593 hde->dynamic_len = len;
2604 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2605 struct hists *hists __maybe_unused,
2606 int line __maybe_unused,
2607 int *span __maybe_unused)
2609 struct hpp_dynamic_entry *hde;
2610 size_t len = fmt->user_len;
2612 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2615 len = hde_width(hde);
2617 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2620 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2621 struct perf_hpp *hpp __maybe_unused,
2622 struct hists *hists __maybe_unused)
2624 struct hpp_dynamic_entry *hde;
2625 size_t len = fmt->user_len;
2627 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2630 len = hde_width(hde);
2635 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2637 struct hpp_dynamic_entry *hde;
2639 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2641 return hists_to_evsel(hists) == hde->evsel;
2644 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2645 struct hist_entry *he)
2647 struct hpp_dynamic_entry *hde;
2648 size_t len = fmt->user_len;
2650 struct tep_format_field *field;
2655 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2658 len = hde_width(hde);
2663 if (!he->trace_output)
2664 he->trace_output = get_trace_output(he);
2667 namelen = strlen(field->name);
2668 str = he->trace_output;
2671 pos = strchr(str, ' ');
2674 pos = str + strlen(str);
2677 if (!strncmp(str, field->name, namelen)) {
2679 str = strndup(str, pos - str);
2682 return scnprintf(hpp->buf, hpp->size,
2683 "%*.*s", len, len, "ERROR");
2694 struct trace_seq seq;
2696 trace_seq_init(&seq);
2697 tep_print_field(&seq, he->raw_data, hde->field);
2701 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2706 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2707 struct hist_entry *a, struct hist_entry *b)
2709 struct hpp_dynamic_entry *hde;
2710 struct tep_format_field *field;
2711 unsigned offset, size;
2713 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2716 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2717 unsigned long long dyn;
2719 tep_read_number_field(field, a->raw_data, &dyn);
2720 offset = dyn & 0xffff;
2721 size = (dyn >> 16) & 0xffff;
2722 if (tep_field_is_relative(field->flags))
2723 offset += field->offset + field->size;
2724 /* record max width for output */
2725 if (size > hde->dynamic_len)
2726 hde->dynamic_len = size;
2728 offset = field->offset;
2732 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2735 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2737 return fmt->cmp == __sort__hde_cmp;
2740 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2742 struct hpp_dynamic_entry *hde_a;
2743 struct hpp_dynamic_entry *hde_b;
2745 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
2748 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
2749 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
2751 return hde_a->field == hde_b->field;
2754 static void hde_free(struct perf_hpp_fmt *fmt)
2756 struct hpp_dynamic_entry *hde;
2758 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2762 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2764 struct hpp_dynamic_entry *hde;
2766 if (!perf_hpp__is_dynamic_entry(fmt))
2769 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2770 update_dynamic_len(hde, he);
2773 static struct hpp_dynamic_entry *
2774 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
2777 struct hpp_dynamic_entry *hde;
2779 hde = malloc(sizeof(*hde));
2781 pr_debug("Memory allocation failed\n");
2787 hde->dynamic_len = 0;
2789 hde->hpp.name = field->name;
2790 hde->hpp.header = __sort__hde_header;
2791 hde->hpp.width = __sort__hde_width;
2792 hde->hpp.entry = __sort__hde_entry;
2793 hde->hpp.color = NULL;
2795 hde->hpp.init = __sort__hde_init;
2796 hde->hpp.cmp = __sort__hde_cmp;
2797 hde->hpp.collapse = __sort__hde_cmp;
2798 hde->hpp.sort = __sort__hde_cmp;
2799 hde->hpp.equal = __sort__hde_equal;
2800 hde->hpp.free = hde_free;
2802 INIT_LIST_HEAD(&hde->hpp.list);
2803 INIT_LIST_HEAD(&hde->hpp.sort_list);
2804 hde->hpp.elide = false;
2806 hde->hpp.user_len = 0;
2807 hde->hpp.level = level;
2811 #endif /* HAVE_LIBTRACEEVENT */
2813 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
2815 struct perf_hpp_fmt *new_fmt = NULL;
2817 if (perf_hpp__is_sort_entry(fmt)) {
2818 struct hpp_sort_entry *hse, *new_hse;
2820 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2821 new_hse = memdup(hse, sizeof(*hse));
2823 new_fmt = &new_hse->hpp;
2824 #ifdef HAVE_LIBTRACEEVENT
2825 } else if (perf_hpp__is_dynamic_entry(fmt)) {
2826 struct hpp_dynamic_entry *hde, *new_hde;
2828 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2829 new_hde = memdup(hde, sizeof(*hde));
2831 new_fmt = &new_hde->hpp;
2834 new_fmt = memdup(fmt, sizeof(*fmt));
2837 INIT_LIST_HEAD(&new_fmt->list);
2838 INIT_LIST_HEAD(&new_fmt->sort_list);
2843 static int parse_field_name(char *str, char **event, char **field, char **opt)
2845 char *event_name, *field_name, *opt_name;
2848 field_name = strchr(str, '.');
2851 *field_name++ = '\0';
2857 opt_name = strchr(field_name, '/');
2861 *event = event_name;
2862 *field = field_name;
2868 /* find match evsel using a given event name. The event name can be:
2869 * 1. '%' + event index (e.g. '%1' for first event)
2870 * 2. full event name (e.g. sched:sched_switch)
2871 * 3. partial event name (should not contain ':')
2873 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
2875 struct evsel *evsel = NULL;
2880 if (event_name[0] == '%') {
2881 int nr = strtol(event_name+1, NULL, 0);
2883 if (nr > evlist->core.nr_entries)
2886 evsel = evlist__first(evlist);
2888 evsel = evsel__next(evsel);
2893 full_name = !!strchr(event_name, ':');
2894 evlist__for_each_entry(evlist, pos) {
2896 if (full_name && evsel__name_is(pos, event_name))
2899 if (!full_name && strstr(pos->name, event_name)) {
2901 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
2902 event_name, evsel->name, pos->name);
2912 #ifdef HAVE_LIBTRACEEVENT
2913 static int __dynamic_dimension__add(struct evsel *evsel,
2914 struct tep_format_field *field,
2915 bool raw_trace, int level)
2917 struct hpp_dynamic_entry *hde;
2919 hde = __alloc_dynamic_entry(evsel, field, level);
2923 hde->raw_trace = raw_trace;
2925 perf_hpp__register_sort_field(&hde->hpp);
2929 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
2932 struct tep_format_field *field;
2934 field = evsel->tp_format->format.fields;
2936 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2940 field = field->next;
2945 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
2949 struct evsel *evsel;
2951 evlist__for_each_entry(evlist, evsel) {
2952 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2955 ret = add_evsel_fields(evsel, raw_trace, level);
2962 static int add_all_matching_fields(struct evlist *evlist,
2963 char *field_name, bool raw_trace, int level)
2966 struct evsel *evsel;
2967 struct tep_format_field *field;
2969 evlist__for_each_entry(evlist, evsel) {
2970 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
2973 field = tep_find_any_field(evsel->tp_format, field_name);
2977 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
2983 #endif /* HAVE_LIBTRACEEVENT */
2985 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
2988 char *str, *event_name, *field_name, *opt_name;
2989 struct evsel *evsel;
2990 bool raw_trace = symbol_conf.raw_trace;
3000 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3006 if (strcmp(opt_name, "raw")) {
3007 pr_debug("unsupported field option %s\n", opt_name);
3014 #ifdef HAVE_LIBTRACEEVENT
3015 if (!strcmp(field_name, "trace_fields")) {
3016 ret = add_all_dynamic_fields(evlist, raw_trace, level);
3020 if (event_name == NULL) {
3021 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3025 evlist__for_each_entry(evlist, evsel) {
3026 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3027 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3038 evsel = find_evsel(evlist, event_name);
3039 if (evsel == NULL) {
3040 pr_debug("Cannot find event: %s\n", event_name);
3045 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3046 pr_debug("%s is not a tracepoint event\n", event_name);
3051 #ifdef HAVE_LIBTRACEEVENT
3052 if (!strcmp(field_name, "*")) {
3053 ret = add_evsel_fields(evsel, raw_trace, level);
3055 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3057 if (field == NULL) {
3058 pr_debug("Cannot find event field for %s.%s\n",
3059 event_name, field_name);
3063 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3068 #endif /* HAVE_LIBTRACEEVENT */
3075 static int __sort_dimension__add(struct sort_dimension *sd,
3076 struct perf_hpp_list *list,
3082 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3085 if (sd->entry->se_collapse)
3086 list->need_collapse = 1;
3093 static int __hpp_dimension__add(struct hpp_dimension *hd,
3094 struct perf_hpp_list *list,
3097 struct perf_hpp_fmt *fmt;
3102 fmt = __hpp_dimension__alloc_hpp(hd, level);
3107 perf_hpp_list__register_sort_field(list, fmt);
3111 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3112 struct sort_dimension *sd)
3117 if (__sort_dimension__add_hpp_output(sd, list) < 0)
3124 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3125 struct hpp_dimension *hd)
3127 struct perf_hpp_fmt *fmt;
3132 fmt = __hpp_dimension__alloc_hpp(hd, 0);
3137 perf_hpp_list__column_register(list, fmt);
3141 int hpp_dimension__add_output(unsigned col)
3143 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3144 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3147 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3148 struct evlist *evlist,
3154 * Check to see if there are any arch specific
3155 * sort dimensions not applicable for the current
3156 * architecture. If so, Skip that sort key since
3157 * we don't want to display it in the output fields.
3159 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3160 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3161 !arch_support_sort_key(tok)) {
3166 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3167 struct sort_dimension *sd = &common_sort_dimensions[i];
3169 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3172 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3173 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3174 sort_dimension_add_dynamic_header(sd);
3177 if (sd->entry == &sort_parent) {
3178 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3182 regerror(ret, &parent_regex, err, sizeof(err));
3183 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3187 } else if (sd->entry == &sort_sym) {
3190 * perf diff displays the performance difference amongst
3191 * two or more perf.data files. Those files could come
3192 * from different binaries. So we should not compare
3193 * their ips, but the name of symbol.
3195 if (sort__mode == SORT_MODE__DIFF)
3196 sd->entry->se_collapse = sort__sym_sort;
3198 } else if (sd->entry == &sort_dso) {
3200 } else if (sd->entry == &sort_socket) {
3202 } else if (sd->entry == &sort_thread) {
3204 } else if (sd->entry == &sort_comm) {
3208 return __sort_dimension__add(sd, list, level);
3211 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3212 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3214 if (strncasecmp(tok, hd->name, strlen(tok)))
3217 return __hpp_dimension__add(hd, list, level);
3220 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3221 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3223 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3226 if (sort__mode != SORT_MODE__BRANCH)
3229 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3232 __sort_dimension__add(sd, list, level);
3236 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3237 struct sort_dimension *sd = &memory_sort_dimensions[i];
3239 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3242 if (sort__mode != SORT_MODE__MEMORY)
3245 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3248 if (sd->entry == &sort_mem_daddr_sym)
3251 __sort_dimension__add(sd, list, level);
3255 if (!add_dynamic_entry(evlist, tok, level))
3261 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3262 struct evlist *evlist)
3268 bool in_group = false;
3272 tmp = strpbrk(str, "{}, ");
3277 next_level = level + 1;
3281 else if (*tmp == '}')
3289 ret = sort_dimension__add(list, tok, evlist, level);
3290 if (ret == -EINVAL) {
3291 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3292 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3294 ui__error("Invalid --sort key: `%s'", tok);
3296 } else if (ret == -ESRCH) {
3297 ui__error("Unknown --sort key: `%s'", tok);
3308 static const char *get_default_sort_order(struct evlist *evlist)
3310 const char *default_sort_orders[] = {
3312 default_branch_sort_order,
3313 default_mem_sort_order,
3314 default_top_sort_order,
3315 default_diff_sort_order,
3316 default_tracepoint_sort_order,
3318 bool use_trace = true;
3319 struct evsel *evsel;
3321 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3323 if (evlist == NULL || evlist__empty(evlist))
3326 evlist__for_each_entry(evlist, evsel) {
3327 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3334 sort__mode = SORT_MODE__TRACEPOINT;
3335 if (symbol_conf.raw_trace)
3336 return "trace_fields";
3339 return default_sort_orders[sort__mode];
3342 static int setup_sort_order(struct evlist *evlist)
3344 char *new_sort_order;
3347 * Append '+'-prefixed sort order to the default sort
3350 if (!sort_order || is_strict_order(sort_order))
3353 if (sort_order[1] == '\0') {
3354 ui__error("Invalid --sort key: `+'");
3359 * We allocate new sort_order string, but we never free it,
3360 * because it's checked over the rest of the code.
3362 if (asprintf(&new_sort_order, "%s,%s",
3363 get_default_sort_order(evlist), sort_order + 1) < 0) {
3364 pr_err("Not enough memory to set up --sort");
3368 sort_order = new_sort_order;
3373 * Adds 'pre,' prefix into 'str' is 'pre' is
3374 * not already part of 'str'.
3376 static char *prefix_if_not_in(const char *pre, char *str)
3380 if (!str || strstr(str, pre))
3383 if (asprintf(&n, "%s,%s", pre, str) < 0)
3390 static char *setup_overhead(char *keys)
3392 if (sort__mode == SORT_MODE__DIFF)
3395 keys = prefix_if_not_in("overhead", keys);
3397 if (symbol_conf.cumulate_callchain)
3398 keys = prefix_if_not_in("overhead_children", keys);
3403 static int __setup_sorting(struct evlist *evlist)
3406 const char *sort_keys;
3409 ret = setup_sort_order(evlist);
3413 sort_keys = sort_order;
3414 if (sort_keys == NULL) {
3415 if (is_strict_order(field_order)) {
3417 * If user specified field order but no sort order,
3418 * we'll honor it and not add default sort orders.
3423 sort_keys = get_default_sort_order(evlist);
3426 str = strdup(sort_keys);
3428 pr_err("Not enough memory to setup sort keys");
3433 * Prepend overhead fields for backward compatibility.
3435 if (!is_strict_order(field_order)) {
3436 str = setup_overhead(str);
3438 pr_err("Not enough memory to setup overhead keys");
3443 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3449 void perf_hpp__set_elide(int idx, bool elide)
3451 struct perf_hpp_fmt *fmt;
3452 struct hpp_sort_entry *hse;
3454 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3455 if (!perf_hpp__is_sort_entry(fmt))
3458 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3459 if (hse->se->se_width_idx == idx) {
3466 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3468 if (list && strlist__nr_entries(list) == 1) {
3470 fprintf(fp, "# %s: %s\n", list_name,
3471 strlist__entry(list, 0)->s);
3477 static bool get_elide(int idx, FILE *output)
3481 return __get_elide(symbol_conf.sym_list, "symbol", output);
3483 return __get_elide(symbol_conf.dso_list, "dso", output);
3485 return __get_elide(symbol_conf.comm_list, "comm", output);
3490 if (sort__mode != SORT_MODE__BRANCH)
3494 case HISTC_SYMBOL_FROM:
3495 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3496 case HISTC_SYMBOL_TO:
3497 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3498 case HISTC_DSO_FROM:
3499 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3501 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3502 case HISTC_ADDR_FROM:
3503 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3505 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3513 void sort__setup_elide(FILE *output)
3515 struct perf_hpp_fmt *fmt;
3516 struct hpp_sort_entry *hse;
3518 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3519 if (!perf_hpp__is_sort_entry(fmt))
3522 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3523 fmt->elide = get_elide(hse->se->se_width_idx, output);
3527 * It makes no sense to elide all of sort entries.
3528 * Just revert them to show up again.
3530 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3531 if (!perf_hpp__is_sort_entry(fmt))
3538 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3539 if (!perf_hpp__is_sort_entry(fmt))
3546 int output_field_add(struct perf_hpp_list *list, char *tok)
3550 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3551 struct sort_dimension *sd = &common_sort_dimensions[i];
3553 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3556 return __sort_dimension__add_output(list, sd);
3559 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3560 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3562 if (strncasecmp(tok, hd->name, strlen(tok)))
3565 return __hpp_dimension__add_output(list, hd);
3568 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3569 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3571 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3574 if (sort__mode != SORT_MODE__BRANCH)
3577 return __sort_dimension__add_output(list, sd);
3580 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3581 struct sort_dimension *sd = &memory_sort_dimensions[i];
3583 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3586 if (sort__mode != SORT_MODE__MEMORY)
3589 return __sort_dimension__add_output(list, sd);
3595 static int setup_output_list(struct perf_hpp_list *list, char *str)
3600 for (tok = strtok_r(str, ", ", &tmp);
3601 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3602 ret = output_field_add(list, tok);
3603 if (ret == -EINVAL) {
3604 ui__error("Invalid --fields key: `%s'", tok);
3606 } else if (ret == -ESRCH) {
3607 ui__error("Unknown --fields key: `%s'", tok);
3615 void reset_dimensions(void)
3619 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3620 common_sort_dimensions[i].taken = 0;
3622 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3623 hpp_sort_dimensions[i].taken = 0;
3625 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3626 bstack_sort_dimensions[i].taken = 0;
3628 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3629 memory_sort_dimensions[i].taken = 0;
3632 bool is_strict_order(const char *order)
3634 return order && (*order != '+');
3637 static int __setup_output_field(void)
3642 if (field_order == NULL)
3645 strp = str = strdup(field_order);
3647 pr_err("Not enough memory to setup output fields");
3651 if (!is_strict_order(field_order))
3654 if (!strlen(strp)) {
3655 ui__error("Invalid --fields key: `+'");
3659 ret = setup_output_list(&perf_hpp_list, strp);
3666 int setup_sorting(struct evlist *evlist)
3670 err = __setup_sorting(evlist);
3674 if (parent_pattern != default_parent_pattern) {
3675 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3683 * perf diff doesn't use default hpp output fields.
3685 if (sort__mode != SORT_MODE__DIFF)
3688 err = __setup_output_field();
3692 /* copy sort keys to output fields */
3693 perf_hpp__setup_output_field(&perf_hpp_list);
3694 /* and then copy output fields to sort keys */
3695 perf_hpp__append_sort_keys(&perf_hpp_list);
3697 /* setup hists-specific output fields */
3698 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3704 void reset_output_field(void)
3706 perf_hpp_list.need_collapse = 0;
3707 perf_hpp_list.parent = 0;
3708 perf_hpp_list.sym = 0;
3709 perf_hpp_list.dso = 0;
3715 perf_hpp__reset_output_field(&perf_hpp_list);
3718 #define INDENT (3*8 + 1)
3720 static void add_key(struct strbuf *sb, const char *str, int *llen)
3726 strbuf_addstr(sb, "\n\t\t\t ");
3729 strbuf_addf(sb, " %s", str);
3730 *llen += strlen(str) + 1;
3733 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
3738 for (i = 0; i < n; i++)
3739 add_key(sb, s[i].name, llen);
3742 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
3747 for (i = 0; i < n; i++)
3748 add_key(sb, s[i].name, llen);
3751 char *sort_help(const char *prefix)
3755 int len = strlen(prefix) + INDENT;
3757 strbuf_init(&sb, 300);
3758 strbuf_addstr(&sb, prefix);
3759 add_hpp_sort_string(&sb, hpp_sort_dimensions,
3760 ARRAY_SIZE(hpp_sort_dimensions), &len);
3761 add_sort_string(&sb, common_sort_dimensions,
3762 ARRAY_SIZE(common_sort_dimensions), &len);
3763 add_sort_string(&sb, bstack_sort_dimensions,
3764 ARRAY_SIZE(bstack_sort_dimensions), &len);
3765 add_sort_string(&sb, memory_sort_dimensions,
3766 ARRAY_SIZE(memory_sort_dimensions), &len);
3767 s = strbuf_detach(&sb, NULL);
3768 strbuf_release(&sb);