1 // SPDX-License-Identifier: GPL-2.0
9 #include "map_symbol.h"
11 #include "mem-events.h"
13 #include "namespaces.h"
23 #include "block-info.h"
24 #include "ui/progress.h"
28 #include <sys/param.h>
29 #include <linux/rbtree.h>
30 #include <linux/string.h>
31 #include <linux/time64.h>
32 #include <linux/zalloc.h>
34 static bool hists__filter_entry_by_dso(struct hists *hists,
35 struct hist_entry *he);
36 static bool hists__filter_entry_by_thread(struct hists *hists,
37 struct hist_entry *he);
38 static bool hists__filter_entry_by_symbol(struct hists *hists,
39 struct hist_entry *he);
40 static bool hists__filter_entry_by_socket(struct hists *hists,
41 struct hist_entry *he);
43 u16 hists__col_len(struct hists *hists, enum hist_column col)
45 return hists->col_len[col];
48 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
50 hists->col_len[col] = len;
53 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
55 if (len > hists__col_len(hists, col)) {
56 hists__set_col_len(hists, col, len);
62 void hists__reset_col_len(struct hists *hists)
66 for (col = 0; col < HISTC_NR_COLS; ++col)
67 hists__set_col_len(hists, col, 0);
70 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
72 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
74 if (hists__col_len(hists, dso) < unresolved_col_width &&
75 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
76 !symbol_conf.dso_list)
77 hists__set_col_len(hists, dso, unresolved_col_width);
80 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
82 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
89 * +4 accounts for '[x] ' priv level info
90 * +2 accounts for 0x prefix on raw addresses
91 * +3 accounts for ' y ' symtab origin info
94 symlen = h->ms.sym->namelen + 4;
96 symlen += BITS_PER_LONG / 4 + 2 + 3;
97 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
99 symlen = unresolved_col_width + 4 + 2;
100 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
101 hists__set_unres_dso_col_len(hists, HISTC_DSO);
104 len = thread__comm_len(h->thread);
105 if (hists__new_col_len(hists, HISTC_COMM, len))
106 hists__set_col_len(hists, HISTC_THREAD, len + 8);
109 len = dso__name_len(map__dso(h->ms.map));
110 hists__new_col_len(hists, HISTC_DSO, len);
114 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
116 if (h->branch_info) {
117 if (h->branch_info->from.ms.sym) {
118 symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
120 symlen += BITS_PER_LONG / 4 + 2 + 3;
121 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
123 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
124 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
126 symlen = unresolved_col_width + 4 + 2;
127 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
129 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
132 if (h->branch_info->to.ms.sym) {
133 symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
135 symlen += BITS_PER_LONG / 4 + 2 + 3;
136 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
138 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
139 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
141 symlen = unresolved_col_width + 4 + 2;
142 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143 hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
147 if (h->branch_info->srcline_from)
148 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
149 strlen(h->branch_info->srcline_from));
150 if (h->branch_info->srcline_to)
151 hists__new_col_len(hists, HISTC_SRCLINE_TO,
152 strlen(h->branch_info->srcline_to));
156 if (h->mem_info->daddr.ms.sym) {
157 symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
158 + unresolved_col_width + 2;
159 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
161 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
164 symlen = unresolved_col_width + 4 + 2;
165 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
167 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
171 if (h->mem_info->iaddr.ms.sym) {
172 symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
173 + unresolved_col_width + 2;
174 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
177 symlen = unresolved_col_width + 4 + 2;
178 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
182 if (h->mem_info->daddr.ms.map) {
183 symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
184 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
187 symlen = unresolved_col_width + 4 + 2;
188 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
191 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
192 unresolved_col_width + 4 + 2);
194 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
195 unresolved_col_width + 4 + 2);
198 symlen = unresolved_col_width + 4 + 2;
199 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
200 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
201 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
204 hists__new_col_len(hists, HISTC_CGROUP, 6);
205 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
206 hists__new_col_len(hists, HISTC_CPU, 3);
207 hists__new_col_len(hists, HISTC_SOCKET, 6);
208 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
209 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
210 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
211 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
212 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
213 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
214 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
215 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
216 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
217 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
218 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
219 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
221 if (symbol_conf.nanosecs)
222 hists__new_col_len(hists, HISTC_TIME, 16);
224 hists__new_col_len(hists, HISTC_TIME, 12);
225 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
228 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
229 hists__new_col_len(hists, HISTC_SRCLINE, len);
233 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
236 hists__new_col_len(hists, HISTC_TRANSACTION,
237 hist_entry__transaction_len());
240 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
243 const char *cgrp_name = "unknown";
244 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
247 cgrp_name = cgrp->name;
249 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
253 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
255 struct rb_node *next = rb_first_cached(&hists->entries);
256 struct hist_entry *n;
259 hists__reset_col_len(hists);
261 while (next && row++ < max_rows) {
262 n = rb_entry(next, struct hist_entry, rb_node);
264 hists__calc_col_len(hists, n);
265 next = rb_next(&n->rb_node);
269 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
270 unsigned int cpumode, u64 period)
273 case PERF_RECORD_MISC_KERNEL:
274 he_stat->period_sys += period;
276 case PERF_RECORD_MISC_USER:
277 he_stat->period_us += period;
279 case PERF_RECORD_MISC_GUEST_KERNEL:
280 he_stat->period_guest_sys += period;
282 case PERF_RECORD_MISC_GUEST_USER:
283 he_stat->period_guest_us += period;
290 static long hist_time(unsigned long htime)
292 unsigned long time_quantum = symbol_conf.time_quantum;
294 return (htime / time_quantum) * time_quantum;
298 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
300 he_stat->period += period;
301 he_stat->nr_events += 1;
304 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
306 dest->period += src->period;
307 dest->period_sys += src->period_sys;
308 dest->period_us += src->period_us;
309 dest->period_guest_sys += src->period_guest_sys;
310 dest->period_guest_us += src->period_guest_us;
311 dest->nr_events += src->nr_events;
314 static void he_stat__decay(struct he_stat *he_stat)
316 he_stat->period = (he_stat->period * 7) / 8;
317 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
318 /* XXX need decay for weight too? */
321 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
323 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
325 u64 prev_period = he->stat.period;
328 if (prev_period == 0)
331 he_stat__decay(&he->stat);
332 if (symbol_conf.cumulate_callchain)
333 he_stat__decay(he->stat_acc);
334 decay_callchain(he->callchain);
336 diff = prev_period - he->stat.period;
339 hists->stats.total_period -= diff;
341 hists->stats.total_non_filtered_period -= diff;
345 struct hist_entry *child;
346 struct rb_node *node = rb_first_cached(&he->hroot_out);
348 child = rb_entry(node, struct hist_entry, rb_node);
349 node = rb_next(node);
351 if (hists__decay_entry(hists, child))
352 hists__delete_entry(hists, child);
356 return he->stat.period == 0;
359 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
361 struct rb_root_cached *root_in;
362 struct rb_root_cached *root_out;
365 root_in = &he->parent_he->hroot_in;
366 root_out = &he->parent_he->hroot_out;
368 if (hists__has(hists, need_collapse))
369 root_in = &hists->entries_collapsed;
371 root_in = hists->entries_in;
372 root_out = &hists->entries;
375 rb_erase_cached(&he->rb_node_in, root_in);
376 rb_erase_cached(&he->rb_node, root_out);
380 --hists->nr_non_filtered_entries;
382 hist_entry__delete(he);
385 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
387 struct rb_node *next = rb_first_cached(&hists->entries);
388 struct hist_entry *n;
391 n = rb_entry(next, struct hist_entry, rb_node);
392 next = rb_next(&n->rb_node);
393 if (((zap_user && n->level == '.') ||
394 (zap_kernel && n->level != '.') ||
395 hists__decay_entry(hists, n))) {
396 hists__delete_entry(hists, n);
401 void hists__delete_entries(struct hists *hists)
403 struct rb_node *next = rb_first_cached(&hists->entries);
404 struct hist_entry *n;
407 n = rb_entry(next, struct hist_entry, rb_node);
408 next = rb_next(&n->rb_node);
410 hists__delete_entry(hists, n);
414 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
416 struct rb_node *next = rb_first_cached(&hists->entries);
417 struct hist_entry *n;
421 n = rb_entry(next, struct hist_entry, rb_node);
425 next = rb_next(&n->rb_node);
433 * histogram, sorted on item, collects periods
436 static int hist_entry__init(struct hist_entry *he,
437 struct hist_entry *template,
439 size_t callchain_size)
442 he->callchain_size = callchain_size;
444 if (symbol_conf.cumulate_callchain) {
445 he->stat_acc = malloc(sizeof(he->stat));
446 if (he->stat_acc == NULL)
448 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
450 memset(&he->stat, 0, sizeof(he->stat));
453 he->ms.maps = maps__get(he->ms.maps);
454 he->ms.map = map__get(he->ms.map);
456 if (he->branch_info) {
458 * This branch info is (a part of) allocated from
459 * sample__resolve_bstack() and will be freed after
460 * adding new entries. So we need to save a copy.
462 he->branch_info = malloc(sizeof(*he->branch_info));
463 if (he->branch_info == NULL)
466 memcpy(he->branch_info, template->branch_info,
467 sizeof(*he->branch_info));
469 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
470 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
474 he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
475 he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
478 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
479 callchain_init(he->callchain);
482 he->raw_data = memdup(he->raw_data, he->raw_size);
483 if (he->raw_data == NULL)
487 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
488 he->srcline = strdup(he->srcline);
489 if (he->srcline == NULL)
493 if (symbol_conf.res_sample) {
494 he->res_samples = calloc(sizeof(struct res_sample),
495 symbol_conf.res_sample);
496 if (!he->res_samples)
500 INIT_LIST_HEAD(&he->pairs.node);
501 he->thread = thread__get(he->thread);
502 he->hroot_in = RB_ROOT_CACHED;
503 he->hroot_out = RB_ROOT_CACHED;
505 if (!symbol_conf.report_hierarchy)
514 zfree(&he->raw_data);
517 if (he->branch_info) {
518 map__put(he->branch_info->from.ms.map);
519 map__put(he->branch_info->to.ms.map);
520 zfree(&he->branch_info);
523 map__put(he->mem_info->iaddr.ms.map);
524 map__put(he->mem_info->daddr.ms.map);
527 maps__zput(he->ms.maps);
528 map__zput(he->ms.map);
529 zfree(&he->stat_acc);
533 static void *hist_entry__zalloc(size_t size)
535 return zalloc(size + sizeof(struct hist_entry));
538 static void hist_entry__free(void *ptr)
543 static struct hist_entry_ops default_ops = {
544 .new = hist_entry__zalloc,
545 .free = hist_entry__free,
548 static struct hist_entry *hist_entry__new(struct hist_entry *template,
551 struct hist_entry_ops *ops = template->ops;
552 size_t callchain_size = 0;
553 struct hist_entry *he;
557 ops = template->ops = &default_ops;
559 if (symbol_conf.use_callchain)
560 callchain_size = sizeof(struct callchain_root);
562 he = ops->new(callchain_size);
564 err = hist_entry__init(he, template, sample_self, callchain_size);
574 static u8 symbol__parent_filter(const struct symbol *parent)
576 if (symbol_conf.exclude_other && parent == NULL)
577 return 1 << HIST_FILTER__PARENT;
581 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
583 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
586 he->hists->callchain_period += period;
588 he->hists->callchain_non_filtered_period += period;
591 static struct hist_entry *hists__findnew_entry(struct hists *hists,
592 struct hist_entry *entry,
593 const struct addr_location *al,
597 struct rb_node *parent = NULL;
598 struct hist_entry *he;
600 u64 period = entry->stat.period;
601 bool leftmost = true;
603 p = &hists->entries_in->rb_root.rb_node;
607 he = rb_entry(parent, struct hist_entry, rb_node_in);
610 * Make sure that it receives arguments in a same order as
611 * hist_entry__collapse() so that we can use an appropriate
612 * function when searching an entry regardless which sort
615 cmp = hist_entry__cmp(he, entry);
618 he_stat__add_period(&he->stat, period);
619 hist_entry__add_callchain_period(he, period);
621 if (symbol_conf.cumulate_callchain)
622 he_stat__add_period(he->stat_acc, period);
625 * This mem info was allocated from sample__resolve_mem
626 * and will not be used anymore.
628 mem_info__zput(entry->mem_info);
630 block_info__zput(entry->block_info);
632 kvm_info__zput(entry->kvm_info);
634 /* If the map of an existing hist_entry has
635 * become out-of-date due to an exec() or
636 * similar, update it. Otherwise we will
637 * mis-adjust symbol addresses when computing
638 * the history counter to increment.
640 if (he->ms.map != entry->ms.map) {
641 map__put(he->ms.map);
642 he->ms.map = map__get(entry->ms.map);
655 he = hist_entry__new(entry, sample_self);
660 hist_entry__add_callchain_period(he, period);
663 rb_link_node(&he->rb_node_in, parent, p);
664 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
667 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
668 if (symbol_conf.cumulate_callchain)
669 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
673 static unsigned random_max(unsigned high)
675 unsigned thresh = -high % high;
677 unsigned r = random();
683 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
685 struct res_sample *r;
688 if (he->num_res < symbol_conf.res_sample) {
691 j = random_max(symbol_conf.res_sample);
693 r = &he->res_samples[j];
694 r->time = sample->time;
695 r->cpu = sample->cpu;
696 r->tid = sample->tid;
699 static struct hist_entry*
700 __hists__add_entry(struct hists *hists,
701 struct addr_location *al,
702 struct symbol *sym_parent,
703 struct branch_info *bi,
706 struct block_info *block_info,
707 struct perf_sample *sample,
709 struct hist_entry_ops *ops)
711 struct namespaces *ns = thread__namespaces(al->thread);
712 struct hist_entry entry = {
713 .thread = al->thread,
714 .comm = thread__comm(al->thread),
716 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
717 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
719 .cgroup = sample->cgroup,
725 .srcline = (char *) al->srcline,
726 .socket = al->socket,
728 .cpumode = al->cpumode,
731 .code_page_size = sample->code_page_size,
734 .period = sample->period,
736 .parent = sym_parent,
737 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
742 .block_info = block_info,
743 .transaction = sample->transaction,
744 .raw_data = sample->raw_data,
745 .raw_size = sample->raw_size,
747 .time = hist_time(sample->time),
748 .weight = sample->weight,
749 .ins_lat = sample->ins_lat,
750 .p_stage_cyc = sample->p_stage_cyc,
751 .simd_flags = sample->simd_flags,
752 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
754 if (!hists->has_callchains && he && he->callchain_size != 0)
755 hists->has_callchains = true;
756 if (he && symbol_conf.res_sample)
757 hists__res_sample(he, sample);
761 struct hist_entry *hists__add_entry(struct hists *hists,
762 struct addr_location *al,
763 struct symbol *sym_parent,
764 struct branch_info *bi,
767 struct perf_sample *sample,
770 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
771 sample, sample_self, NULL);
774 struct hist_entry *hists__add_entry_ops(struct hists *hists,
775 struct hist_entry_ops *ops,
776 struct addr_location *al,
777 struct symbol *sym_parent,
778 struct branch_info *bi,
781 struct perf_sample *sample,
784 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
785 sample, sample_self, ops);
788 struct hist_entry *hists__add_entry_block(struct hists *hists,
789 struct addr_location *al,
790 struct block_info *block_info)
792 struct hist_entry entry = {
793 .block_info = block_info,
800 }, *he = hists__findnew_entry(hists, &entry, al, false);
806 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
807 struct addr_location *al __maybe_unused)
813 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
814 struct addr_location *al __maybe_unused)
820 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
822 struct perf_sample *sample = iter->sample;
825 mi = sample__resolve_mem(sample, al);
834 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
837 struct mem_info *mi = iter->priv;
838 struct hists *hists = evsel__hists(iter->evsel);
839 struct perf_sample *sample = iter->sample;
840 struct hist_entry *he;
845 cost = sample->weight;
850 * must pass period=weight in order to get the correct
851 * sorting from hists__collapse_resort() which is solely
852 * based on periods. We want sorting be done on nr_events * weight
853 * and this is indirectly achieved by passing period=weight here
854 * and the he_stat__add_period() function.
856 sample->period = cost;
858 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
868 iter_finish_mem_entry(struct hist_entry_iter *iter,
869 struct addr_location *al __maybe_unused)
871 struct evsel *evsel = iter->evsel;
872 struct hists *hists = evsel__hists(evsel);
873 struct hist_entry *he = iter->he;
879 hists__inc_nr_samples(hists, he->filtered);
881 err = hist_entry__append_callchain(he, iter->sample);
885 * We don't need to free iter->priv (mem_info) here since the mem info
886 * was either already freed in hists__findnew_entry() or passed to a
887 * new hist entry by hist_entry__new().
896 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
898 struct branch_info *bi;
899 struct perf_sample *sample = iter->sample;
901 bi = sample__resolve_bstack(sample, al);
906 iter->total = sample->branch_stack->nr;
913 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
914 struct addr_location *al __maybe_unused)
920 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
922 struct branch_info *bi = iter->priv;
928 if (iter->curr >= iter->total)
932 al->maps = maps__get(bi[i].to.ms.maps);
934 al->map = map__get(bi[i].to.ms.map);
935 al->sym = bi[i].to.ms.sym;
936 al->addr = bi[i].to.addr;
941 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
943 struct branch_info *bi;
944 struct evsel *evsel = iter->evsel;
945 struct hists *hists = evsel__hists(evsel);
946 struct perf_sample *sample = iter->sample;
947 struct hist_entry *he = NULL;
953 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
957 * The report shows the percentage of total branches captured
958 * and not events sampled. Thus we use a pseudo period of 1.
961 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
963 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
968 hists__inc_nr_samples(hists, he->filtered);
977 iter_finish_branch_entry(struct hist_entry_iter *iter,
978 struct addr_location *al __maybe_unused)
983 return iter->curr >= iter->total ? 0 : -1;
987 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
988 struct addr_location *al __maybe_unused)
994 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
996 struct evsel *evsel = iter->evsel;
997 struct perf_sample *sample = iter->sample;
998 struct hist_entry *he;
1000 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1001 NULL, sample, true);
1010 iter_finish_normal_entry(struct hist_entry_iter *iter,
1011 struct addr_location *al __maybe_unused)
1013 struct hist_entry *he = iter->he;
1014 struct evsel *evsel = iter->evsel;
1015 struct perf_sample *sample = iter->sample;
1022 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1024 return hist_entry__append_callchain(he, sample);
1028 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1029 struct addr_location *al __maybe_unused)
1031 struct hist_entry **he_cache;
1032 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1037 callchain_cursor_commit(cursor);
1040 * This is for detecting cycles or recursions so that they're
1041 * cumulated only one time to prevent entries more than 100%
1044 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
1045 if (he_cache == NULL)
1048 iter->priv = he_cache;
1055 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1056 struct addr_location *al)
1058 struct evsel *evsel = iter->evsel;
1059 struct hists *hists = evsel__hists(evsel);
1060 struct perf_sample *sample = iter->sample;
1061 struct hist_entry **he_cache = iter->priv;
1062 struct hist_entry *he;
1065 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1071 he_cache[iter->curr++] = he;
1073 hist_entry__append_callchain(he, sample);
1076 * We need to re-initialize the cursor since callchain_append()
1077 * advanced the cursor to the end.
1079 callchain_cursor_commit(get_tls_callchain_cursor());
1081 hists__inc_nr_samples(hists, he->filtered);
1087 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1088 struct addr_location *al)
1090 struct callchain_cursor_node *node;
1092 node = callchain_cursor_current(get_tls_callchain_cursor());
1096 return fill_callchain_info(al, node, iter->hide_unresolved);
1100 hist_entry__fast__sym_diff(struct hist_entry *left,
1101 struct hist_entry *right)
1103 struct symbol *sym_l = left->ms.sym;
1104 struct symbol *sym_r = right->ms.sym;
1106 if (!sym_l && !sym_r)
1107 return left->ip != right->ip;
1109 return !!_sort__sym_cmp(sym_l, sym_r);
1114 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1115 struct addr_location *al)
1117 struct evsel *evsel = iter->evsel;
1118 struct perf_sample *sample = iter->sample;
1119 struct hist_entry **he_cache = iter->priv;
1120 struct hist_entry *he;
1121 struct hist_entry he_tmp = {
1122 .hists = evsel__hists(evsel),
1124 .thread = al->thread,
1125 .comm = thread__comm(al->thread),
1132 .srcline = (char *) al->srcline,
1133 .parent = iter->parent,
1134 .raw_data = sample->raw_data,
1135 .raw_size = sample->raw_size,
1138 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
1139 bool fast = hists__has(he_tmp.hists, sym);
1141 if (tls_cursor == NULL)
1144 callchain_cursor_snapshot(&cursor, tls_cursor);
1146 callchain_cursor_advance(tls_cursor);
1149 * Check if there's duplicate entries in the callchain.
1150 * It's possible that it has cycles or recursive calls.
1152 for (i = 0; i < iter->curr; i++) {
1154 * For most cases, there are no duplicate entries in callchain.
1155 * The symbols are usually different. Do a quick check for
1158 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1161 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1162 /* to avoid calling callback function */
1168 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1169 NULL, sample, false);
1174 he_cache[iter->curr++] = he;
1176 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1177 callchain_append(he->callchain, &cursor, sample->period);
1182 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1183 struct addr_location *al __maybe_unused)
1191 const struct hist_iter_ops hist_iter_mem = {
1192 .prepare_entry = iter_prepare_mem_entry,
1193 .add_single_entry = iter_add_single_mem_entry,
1194 .next_entry = iter_next_nop_entry,
1195 .add_next_entry = iter_add_next_nop_entry,
1196 .finish_entry = iter_finish_mem_entry,
1199 const struct hist_iter_ops hist_iter_branch = {
1200 .prepare_entry = iter_prepare_branch_entry,
1201 .add_single_entry = iter_add_single_branch_entry,
1202 .next_entry = iter_next_branch_entry,
1203 .add_next_entry = iter_add_next_branch_entry,
1204 .finish_entry = iter_finish_branch_entry,
1207 const struct hist_iter_ops hist_iter_normal = {
1208 .prepare_entry = iter_prepare_normal_entry,
1209 .add_single_entry = iter_add_single_normal_entry,
1210 .next_entry = iter_next_nop_entry,
1211 .add_next_entry = iter_add_next_nop_entry,
1212 .finish_entry = iter_finish_normal_entry,
1215 const struct hist_iter_ops hist_iter_cumulative = {
1216 .prepare_entry = iter_prepare_cumulative_entry,
1217 .add_single_entry = iter_add_single_cumulative_entry,
1218 .next_entry = iter_next_cumulative_entry,
1219 .add_next_entry = iter_add_next_cumulative_entry,
1220 .finish_entry = iter_finish_cumulative_entry,
1223 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1224 int max_stack_depth, void *arg)
1227 struct map *alm = NULL;
1230 alm = map__get(al->map);
1232 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
1233 iter->evsel, al, max_stack_depth);
1239 err = iter->ops->prepare_entry(iter, al);
1243 err = iter->ops->add_single_entry(iter, al);
1247 if (iter->he && iter->add_entry_cb) {
1248 err = iter->add_entry_cb(iter, al, true, arg);
1253 while (iter->ops->next_entry(iter, al)) {
1254 err = iter->ops->add_next_entry(iter, al);
1258 if (iter->he && iter->add_entry_cb) {
1259 err = iter->add_entry_cb(iter, al, false, arg);
1266 err2 = iter->ops->finish_entry(iter, al);
1276 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1278 struct hists *hists = left->hists;
1279 struct perf_hpp_fmt *fmt;
1282 hists__for_each_sort_list(hists, fmt) {
1283 if (perf_hpp__is_dynamic_entry(fmt) &&
1284 !perf_hpp__defined_dynamic_entry(fmt, hists))
1287 cmp = fmt->cmp(fmt, left, right);
1296 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1298 struct hists *hists = left->hists;
1299 struct perf_hpp_fmt *fmt;
1302 hists__for_each_sort_list(hists, fmt) {
1303 if (perf_hpp__is_dynamic_entry(fmt) &&
1304 !perf_hpp__defined_dynamic_entry(fmt, hists))
1307 cmp = fmt->collapse(fmt, left, right);
1315 void hist_entry__delete(struct hist_entry *he)
1317 struct hist_entry_ops *ops = he->ops;
1319 thread__zput(he->thread);
1320 maps__zput(he->ms.maps);
1321 map__zput(he->ms.map);
1323 if (he->branch_info) {
1324 map__zput(he->branch_info->from.ms.map);
1325 map__zput(he->branch_info->to.ms.map);
1326 zfree_srcline(&he->branch_info->srcline_from);
1327 zfree_srcline(&he->branch_info->srcline_to);
1328 zfree(&he->branch_info);
1332 map__zput(he->mem_info->iaddr.ms.map);
1333 map__zput(he->mem_info->daddr.ms.map);
1334 mem_info__zput(he->mem_info);
1338 block_info__zput(he->block_info);
1341 kvm_info__zput(he->kvm_info);
1343 zfree(&he->res_samples);
1344 zfree(&he->stat_acc);
1345 zfree_srcline(&he->srcline);
1346 if (he->srcfile && he->srcfile[0])
1347 zfree(&he->srcfile);
1348 free_callchain(he->callchain);
1349 zfree(&he->trace_output);
1350 zfree(&he->raw_data);
1355 * If this is not the last column, then we need to pad it according to the
1356 * pre-calculated max length for this column, otherwise don't bother adding
1357 * spaces because that would break viewing this with, for instance, 'less',
1358 * that would show tons of trailing spaces when a long C++ demangled method
1361 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1362 struct perf_hpp_fmt *fmt, int printed)
1364 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1365 const int width = fmt->width(fmt, hpp, he->hists);
1366 if (printed < width) {
1367 advance_hpp(hpp, printed);
1368 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1376 * collapse the histogram
1379 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1380 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1381 enum hist_filter type);
1383 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1385 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1387 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1390 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1391 enum hist_filter type,
1394 struct perf_hpp_fmt *fmt;
1395 bool type_match = false;
1396 struct hist_entry *parent = he->parent_he;
1399 case HIST_FILTER__THREAD:
1400 if (symbol_conf.comm_list == NULL &&
1401 symbol_conf.pid_list == NULL &&
1402 symbol_conf.tid_list == NULL)
1405 case HIST_FILTER__DSO:
1406 if (symbol_conf.dso_list == NULL)
1409 case HIST_FILTER__SYMBOL:
1410 if (symbol_conf.sym_list == NULL)
1413 case HIST_FILTER__PARENT:
1414 case HIST_FILTER__GUEST:
1415 case HIST_FILTER__HOST:
1416 case HIST_FILTER__SOCKET:
1417 case HIST_FILTER__C2C:
1422 /* if it's filtered by own fmt, it has to have filter bits */
1423 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1432 * If the filter is for current level entry, propagate
1433 * filter marker to parents. The marker bit was
1434 * already set by default so it only needs to clear
1435 * non-filtered entries.
1437 if (!(he->filtered & (1 << type))) {
1439 parent->filtered &= ~(1 << type);
1440 parent = parent->parent_he;
1445 * If current entry doesn't have matching formats, set
1446 * filter marker for upper level entries. it will be
1447 * cleared if its lower level entries is not filtered.
1449 * For lower-level entries, it inherits parent's
1450 * filter bit so that lower level entries of a
1451 * non-filtered entry won't set the filter marker.
1454 he->filtered |= (1 << type);
1456 he->filtered |= (parent->filtered & (1 << type));
1460 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1462 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1463 check_thread_entry);
1465 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1466 perf_hpp__is_dso_entry);
1468 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1469 perf_hpp__is_sym_entry);
1471 hists__apply_filters(he->hists, he);
1474 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1475 struct rb_root_cached *root,
1476 struct hist_entry *he,
1477 struct hist_entry *parent_he,
1478 struct perf_hpp_list *hpp_list)
1480 struct rb_node **p = &root->rb_root.rb_node;
1481 struct rb_node *parent = NULL;
1482 struct hist_entry *iter, *new;
1483 struct perf_hpp_fmt *fmt;
1485 bool leftmost = true;
1487 while (*p != NULL) {
1489 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1492 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1493 cmp = fmt->collapse(fmt, iter, he);
1499 he_stat__add_stat(&iter->stat, &he->stat);
1504 p = &parent->rb_left;
1506 p = &parent->rb_right;
1511 new = hist_entry__new(he, true);
1515 hists->nr_entries++;
1517 /* save related format list for output */
1518 new->hpp_list = hpp_list;
1519 new->parent_he = parent_he;
1521 hist_entry__apply_hierarchy_filters(new);
1523 /* some fields are now passed to 'new' */
1524 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1525 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1526 he->trace_output = NULL;
1528 new->trace_output = NULL;
1530 if (perf_hpp__is_srcline_entry(fmt))
1533 new->srcline = NULL;
1535 if (perf_hpp__is_srcfile_entry(fmt))
1538 new->srcfile = NULL;
1541 rb_link_node(&new->rb_node_in, parent, p);
1542 rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1546 static int hists__hierarchy_insert_entry(struct hists *hists,
1547 struct rb_root_cached *root,
1548 struct hist_entry *he)
1550 struct perf_hpp_list_node *node;
1551 struct hist_entry *new_he = NULL;
1552 struct hist_entry *parent = NULL;
1556 list_for_each_entry(node, &hists->hpp_formats, list) {
1557 /* skip period (overhead) and elided columns */
1558 if (node->level == 0 || node->skip)
1561 /* insert copy of 'he' for each fmt into the hierarchy */
1562 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1563 if (new_he == NULL) {
1568 root = &new_he->hroot_in;
1569 new_he->depth = depth++;
1574 new_he->leaf = true;
1576 if (hist_entry__has_callchains(new_he) &&
1577 symbol_conf.use_callchain) {
1578 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1583 callchain_cursor_reset(cursor);
1584 if (callchain_merge(cursor,
1591 /* 'he' is no longer used */
1592 hist_entry__delete(he);
1594 /* return 0 (or -1) since it already applied filters */
1598 static int hists__collapse_insert_entry(struct hists *hists,
1599 struct rb_root_cached *root,
1600 struct hist_entry *he)
1602 struct rb_node **p = &root->rb_root.rb_node;
1603 struct rb_node *parent = NULL;
1604 struct hist_entry *iter;
1606 bool leftmost = true;
1608 if (symbol_conf.report_hierarchy)
1609 return hists__hierarchy_insert_entry(hists, root, he);
1611 while (*p != NULL) {
1613 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1615 cmp = hist_entry__collapse(iter, he);
1620 he_stat__add_stat(&iter->stat, &he->stat);
1621 if (symbol_conf.cumulate_callchain)
1622 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1624 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1625 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1627 if (cursor != NULL) {
1628 callchain_cursor_reset(cursor);
1629 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1635 hist_entry__delete(he);
1642 p = &(*p)->rb_right;
1646 hists->nr_entries++;
1648 rb_link_node(&he->rb_node_in, parent, p);
1649 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1653 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1655 struct rb_root_cached *root;
1657 mutex_lock(&hists->lock);
1659 root = hists->entries_in;
1660 if (++hists->entries_in > &hists->entries_in_array[1])
1661 hists->entries_in = &hists->entries_in_array[0];
1663 mutex_unlock(&hists->lock);
1668 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1670 hists__filter_entry_by_dso(hists, he);
1671 hists__filter_entry_by_thread(hists, he);
1672 hists__filter_entry_by_symbol(hists, he);
1673 hists__filter_entry_by_socket(hists, he);
1676 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1678 struct rb_root_cached *root;
1679 struct rb_node *next;
1680 struct hist_entry *n;
1683 if (!hists__has(hists, need_collapse))
1686 hists->nr_entries = 0;
1688 root = hists__get_rotate_entries_in(hists);
1690 next = rb_first_cached(root);
1695 n = rb_entry(next, struct hist_entry, rb_node_in);
1696 next = rb_next(&n->rb_node_in);
1698 rb_erase_cached(&n->rb_node_in, root);
1699 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1705 * If it wasn't combined with one of the entries already
1706 * collapsed, we need to apply the filters that may have
1707 * been set by, say, the hist_browser.
1709 hists__apply_filters(hists, n);
1712 ui_progress__update(prog, 1);
1717 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1719 struct hists *hists = a->hists;
1720 struct perf_hpp_fmt *fmt;
1723 hists__for_each_sort_list(hists, fmt) {
1724 if (perf_hpp__should_skip(fmt, a->hists))
1727 cmp = fmt->sort(fmt, a, b);
1735 static void hists__reset_filter_stats(struct hists *hists)
1737 hists->nr_non_filtered_entries = 0;
1738 hists->stats.total_non_filtered_period = 0;
1741 void hists__reset_stats(struct hists *hists)
1743 hists->nr_entries = 0;
1744 hists->stats.total_period = 0;
1746 hists__reset_filter_stats(hists);
1749 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1751 hists->nr_non_filtered_entries++;
1752 hists->stats.total_non_filtered_period += h->stat.period;
1755 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1758 hists__inc_filter_stats(hists, h);
1760 hists->nr_entries++;
1761 hists->stats.total_period += h->stat.period;
1764 static void hierarchy_recalc_total_periods(struct hists *hists)
1766 struct rb_node *node;
1767 struct hist_entry *he;
1769 node = rb_first_cached(&hists->entries);
1771 hists->stats.total_period = 0;
1772 hists->stats.total_non_filtered_period = 0;
1775 * recalculate total period using top-level entries only
1776 * since lower level entries only see non-filtered entries
1777 * but upper level entries have sum of both entries.
1780 he = rb_entry(node, struct hist_entry, rb_node);
1781 node = rb_next(node);
1783 hists->stats.total_period += he->stat.period;
1785 hists->stats.total_non_filtered_period += he->stat.period;
1789 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1790 struct hist_entry *he)
1792 struct rb_node **p = &root->rb_root.rb_node;
1793 struct rb_node *parent = NULL;
1794 struct hist_entry *iter;
1795 struct perf_hpp_fmt *fmt;
1796 bool leftmost = true;
1798 while (*p != NULL) {
1800 iter = rb_entry(parent, struct hist_entry, rb_node);
1802 if (hist_entry__sort(he, iter) > 0)
1803 p = &parent->rb_left;
1805 p = &parent->rb_right;
1810 rb_link_node(&he->rb_node, parent, p);
1811 rb_insert_color_cached(&he->rb_node, root, leftmost);
1813 /* update column width of dynamic entry */
1814 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1820 static void hists__hierarchy_output_resort(struct hists *hists,
1821 struct ui_progress *prog,
1822 struct rb_root_cached *root_in,
1823 struct rb_root_cached *root_out,
1824 u64 min_callchain_hits,
1827 struct rb_node *node;
1828 struct hist_entry *he;
1830 *root_out = RB_ROOT_CACHED;
1831 node = rb_first_cached(root_in);
1834 he = rb_entry(node, struct hist_entry, rb_node_in);
1835 node = rb_next(node);
1837 hierarchy_insert_output_entry(root_out, he);
1840 ui_progress__update(prog, 1);
1842 hists->nr_entries++;
1843 if (!he->filtered) {
1844 hists->nr_non_filtered_entries++;
1845 hists__calc_col_len(hists, he);
1849 hists__hierarchy_output_resort(hists, prog,
1860 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1861 u64 total = he->stat.period;
1863 if (symbol_conf.cumulate_callchain)
1864 total = he->stat_acc->period;
1866 min_callchain_hits = total * (callchain_param.min_percent / 100);
1869 callchain_param.sort(&he->sorted_chain, he->callchain,
1870 min_callchain_hits, &callchain_param);
1874 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1875 struct hist_entry *he,
1876 u64 min_callchain_hits,
1879 struct rb_node **p = &entries->rb_root.rb_node;
1880 struct rb_node *parent = NULL;
1881 struct hist_entry *iter;
1882 struct perf_hpp_fmt *fmt;
1883 bool leftmost = true;
1885 if (use_callchain) {
1886 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1887 u64 total = he->stat.period;
1889 if (symbol_conf.cumulate_callchain)
1890 total = he->stat_acc->period;
1892 min_callchain_hits = total * (callchain_param.min_percent / 100);
1894 callchain_param.sort(&he->sorted_chain, he->callchain,
1895 min_callchain_hits, &callchain_param);
1898 while (*p != NULL) {
1900 iter = rb_entry(parent, struct hist_entry, rb_node);
1902 if (hist_entry__sort(he, iter) > 0)
1905 p = &(*p)->rb_right;
1910 rb_link_node(&he->rb_node, parent, p);
1911 rb_insert_color_cached(&he->rb_node, entries, leftmost);
1913 /* update column width of dynamic entries */
1914 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1920 static void output_resort(struct hists *hists, struct ui_progress *prog,
1921 bool use_callchain, hists__resort_cb_t cb,
1924 struct rb_root_cached *root;
1925 struct rb_node *next;
1926 struct hist_entry *n;
1927 u64 callchain_total;
1928 u64 min_callchain_hits;
1930 callchain_total = hists->callchain_period;
1931 if (symbol_conf.filter_relative)
1932 callchain_total = hists->callchain_non_filtered_period;
1934 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1936 hists__reset_stats(hists);
1937 hists__reset_col_len(hists);
1939 if (symbol_conf.report_hierarchy) {
1940 hists__hierarchy_output_resort(hists, prog,
1941 &hists->entries_collapsed,
1945 hierarchy_recalc_total_periods(hists);
1949 if (hists__has(hists, need_collapse))
1950 root = &hists->entries_collapsed;
1952 root = hists->entries_in;
1954 next = rb_first_cached(root);
1955 hists->entries = RB_ROOT_CACHED;
1958 n = rb_entry(next, struct hist_entry, rb_node_in);
1959 next = rb_next(&n->rb_node_in);
1961 if (cb && cb(n, cb_arg))
1964 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1965 hists__inc_stats(hists, n);
1968 hists__calc_col_len(hists, n);
1971 ui_progress__update(prog, 1);
1975 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1976 hists__resort_cb_t cb, void *cb_arg)
1980 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1981 use_callchain = evsel__has_callchain(evsel);
1983 use_callchain = symbol_conf.use_callchain;
1985 use_callchain |= symbol_conf.show_branchflag_count;
1987 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1990 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1992 return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1995 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1997 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
2000 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
2001 hists__resort_cb_t cb)
2003 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
2006 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2008 if (he->leaf || hmd == HMD_FORCE_SIBLING)
2011 if (he->unfolded || hmd == HMD_FORCE_CHILD)
2017 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2019 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2021 while (can_goto_child(he, HMD_NORMAL)) {
2022 node = rb_last(&he->hroot_out.rb_root);
2023 he = rb_entry(node, struct hist_entry, rb_node);
2028 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2030 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2032 if (can_goto_child(he, hmd))
2033 node = rb_first_cached(&he->hroot_out);
2035 node = rb_next(node);
2037 while (node == NULL) {
2042 node = rb_next(&he->rb_node);
2047 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2049 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2051 node = rb_prev(node);
2053 return rb_hierarchy_last(node);
2059 return &he->rb_node;
2062 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2064 struct rb_node *node;
2065 struct hist_entry *child;
2071 node = rb_first_cached(&he->hroot_out);
2072 child = rb_entry(node, struct hist_entry, rb_node);
2074 while (node && child->filtered) {
2075 node = rb_next(node);
2076 child = rb_entry(node, struct hist_entry, rb_node);
2080 percent = hist_entry__get_percent_limit(child);
2084 return node && percent >= limit;
2087 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2088 enum hist_filter filter)
2090 h->filtered &= ~(1 << filter);
2092 if (symbol_conf.report_hierarchy) {
2093 struct hist_entry *parent = h->parent_he;
2096 he_stat__add_stat(&parent->stat, &h->stat);
2098 parent->filtered &= ~(1 << filter);
2100 if (parent->filtered)
2103 /* force fold unfiltered entry for simplicity */
2104 parent->unfolded = false;
2105 parent->has_no_entry = false;
2106 parent->row_offset = 0;
2107 parent->nr_rows = 0;
2109 parent = parent->parent_he;
2116 /* force fold unfiltered entry for simplicity */
2117 h->unfolded = false;
2118 h->has_no_entry = false;
2122 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2124 hists__inc_filter_stats(hists, h);
2125 hists__calc_col_len(hists, h);
2129 static bool hists__filter_entry_by_dso(struct hists *hists,
2130 struct hist_entry *he)
2132 if (hists->dso_filter != NULL &&
2133 (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
2134 he->filtered |= (1 << HIST_FILTER__DSO);
2141 static bool hists__filter_entry_by_thread(struct hists *hists,
2142 struct hist_entry *he)
2144 if (hists->thread_filter != NULL &&
2145 RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(hists->thread_filter)) {
2146 he->filtered |= (1 << HIST_FILTER__THREAD);
2153 static bool hists__filter_entry_by_symbol(struct hists *hists,
2154 struct hist_entry *he)
2156 if (hists->symbol_filter_str != NULL &&
2157 (!he->ms.sym || strstr(he->ms.sym->name,
2158 hists->symbol_filter_str) == NULL)) {
2159 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2166 static bool hists__filter_entry_by_socket(struct hists *hists,
2167 struct hist_entry *he)
2169 if ((hists->socket_filter > -1) &&
2170 (he->socket != hists->socket_filter)) {
2171 he->filtered |= (1 << HIST_FILTER__SOCKET);
2178 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2180 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2184 hists->stats.nr_non_filtered_samples = 0;
2186 hists__reset_filter_stats(hists);
2187 hists__reset_col_len(hists);
2189 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2190 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2192 if (filter(hists, h))
2195 hists__remove_entry_filter(hists, h, type);
2199 static void resort_filtered_entry(struct rb_root_cached *root,
2200 struct hist_entry *he)
2202 struct rb_node **p = &root->rb_root.rb_node;
2203 struct rb_node *parent = NULL;
2204 struct hist_entry *iter;
2205 struct rb_root_cached new_root = RB_ROOT_CACHED;
2207 bool leftmost = true;
2209 while (*p != NULL) {
2211 iter = rb_entry(parent, struct hist_entry, rb_node);
2213 if (hist_entry__sort(he, iter) > 0)
2216 p = &(*p)->rb_right;
2221 rb_link_node(&he->rb_node, parent, p);
2222 rb_insert_color_cached(&he->rb_node, root, leftmost);
2224 if (he->leaf || he->filtered)
2227 nd = rb_first_cached(&he->hroot_out);
2229 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2232 rb_erase_cached(&h->rb_node, &he->hroot_out);
2234 resort_filtered_entry(&new_root, h);
2237 he->hroot_out = new_root;
2240 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2243 struct rb_root_cached new_root = RB_ROOT_CACHED;
2245 hists->stats.nr_non_filtered_samples = 0;
2247 hists__reset_filter_stats(hists);
2248 hists__reset_col_len(hists);
2250 nd = rb_first_cached(&hists->entries);
2252 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2255 ret = hist_entry__filter(h, type, arg);
2258 * case 1. non-matching type
2259 * zero out the period, set filter marker and move to child
2262 memset(&h->stat, 0, sizeof(h->stat));
2263 h->filtered |= (1 << type);
2265 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2268 * case 2. matched type (filter out)
2269 * set filter marker and move to next
2271 else if (ret == 1) {
2272 h->filtered |= (1 << type);
2274 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2277 * case 3. ok (not filtered)
2278 * add period to hists and parents, erase the filter marker
2279 * and move to next sibling
2282 hists__remove_entry_filter(hists, h, type);
2284 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2288 hierarchy_recalc_total_periods(hists);
2291 * resort output after applying a new filter since filter in a lower
2292 * hierarchy can change periods in a upper hierarchy.
2294 nd = rb_first_cached(&hists->entries);
2296 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2299 rb_erase_cached(&h->rb_node, &hists->entries);
2301 resort_filtered_entry(&new_root, h);
2304 hists->entries = new_root;
2307 void hists__filter_by_thread(struct hists *hists)
2309 if (symbol_conf.report_hierarchy)
2310 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2311 hists->thread_filter);
2313 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2314 hists__filter_entry_by_thread);
2317 void hists__filter_by_dso(struct hists *hists)
2319 if (symbol_conf.report_hierarchy)
2320 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2323 hists__filter_by_type(hists, HIST_FILTER__DSO,
2324 hists__filter_entry_by_dso);
2327 void hists__filter_by_symbol(struct hists *hists)
2329 if (symbol_conf.report_hierarchy)
2330 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2331 hists->symbol_filter_str);
2333 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2334 hists__filter_entry_by_symbol);
2337 void hists__filter_by_socket(struct hists *hists)
2339 if (symbol_conf.report_hierarchy)
2340 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2341 &hists->socket_filter);
2343 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2344 hists__filter_entry_by_socket);
2347 void events_stats__inc(struct events_stats *stats, u32 type)
2349 ++stats->nr_events[0];
2350 ++stats->nr_events[type];
2353 static void hists_stats__inc(struct hists_stats *stats)
2355 ++stats->nr_samples;
2358 void hists__inc_nr_events(struct hists *hists)
2360 hists_stats__inc(&hists->stats);
2363 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2365 hists_stats__inc(&hists->stats);
2367 hists->stats.nr_non_filtered_samples++;
2370 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2372 hists->stats.nr_lost_samples += lost;
2375 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2376 struct hist_entry *pair)
2378 struct rb_root_cached *root;
2380 struct rb_node *parent = NULL;
2381 struct hist_entry *he;
2383 bool leftmost = true;
2385 if (hists__has(hists, need_collapse))
2386 root = &hists->entries_collapsed;
2388 root = hists->entries_in;
2390 p = &root->rb_root.rb_node;
2392 while (*p != NULL) {
2394 he = rb_entry(parent, struct hist_entry, rb_node_in);
2396 cmp = hist_entry__collapse(he, pair);
2404 p = &(*p)->rb_right;
2409 he = hist_entry__new(pair, true);
2411 memset(&he->stat, 0, sizeof(he->stat));
2413 if (symbol_conf.cumulate_callchain)
2414 memset(he->stat_acc, 0, sizeof(he->stat));
2415 rb_link_node(&he->rb_node_in, parent, p);
2416 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2417 hists__inc_stats(hists, he);
2424 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2425 struct rb_root_cached *root,
2426 struct hist_entry *pair)
2429 struct rb_node *parent = NULL;
2430 struct hist_entry *he;
2431 struct perf_hpp_fmt *fmt;
2432 bool leftmost = true;
2434 p = &root->rb_root.rb_node;
2435 while (*p != NULL) {
2439 he = rb_entry(parent, struct hist_entry, rb_node_in);
2441 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2442 cmp = fmt->collapse(fmt, he, pair);
2450 p = &parent->rb_left;
2452 p = &parent->rb_right;
2457 he = hist_entry__new(pair, true);
2459 rb_link_node(&he->rb_node_in, parent, p);
2460 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2464 memset(&he->stat, 0, sizeof(he->stat));
2465 hists__inc_stats(hists, he);
2471 static struct hist_entry *hists__find_entry(struct hists *hists,
2472 struct hist_entry *he)
2476 if (hists__has(hists, need_collapse))
2477 n = hists->entries_collapsed.rb_root.rb_node;
2479 n = hists->entries_in->rb_root.rb_node;
2482 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2483 int64_t cmp = hist_entry__collapse(iter, he);
2496 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2497 struct hist_entry *he)
2499 struct rb_node *n = root->rb_root.rb_node;
2502 struct hist_entry *iter;
2503 struct perf_hpp_fmt *fmt;
2506 iter = rb_entry(n, struct hist_entry, rb_node_in);
2507 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2508 cmp = fmt->collapse(fmt, iter, he);
2524 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2525 struct rb_root_cached *other_root)
2528 struct hist_entry *pos, *pair;
2530 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2531 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2532 pair = hists__find_hierarchy_entry(other_root, pos);
2535 hist_entry__add_pair(pair, pos);
2536 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2542 * Look for pairs to link to the leader buckets (hist_entries):
2544 void hists__match(struct hists *leader, struct hists *other)
2546 struct rb_root_cached *root;
2548 struct hist_entry *pos, *pair;
2550 if (symbol_conf.report_hierarchy) {
2551 /* hierarchy report always collapses entries */
2552 return hists__match_hierarchy(&leader->entries_collapsed,
2553 &other->entries_collapsed);
2556 if (hists__has(leader, need_collapse))
2557 root = &leader->entries_collapsed;
2559 root = leader->entries_in;
2561 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2562 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2563 pair = hists__find_entry(other, pos);
2566 hist_entry__add_pair(pair, pos);
2570 static int hists__link_hierarchy(struct hists *leader_hists,
2571 struct hist_entry *parent,
2572 struct rb_root_cached *leader_root,
2573 struct rb_root_cached *other_root)
2576 struct hist_entry *pos, *leader;
2578 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2579 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2581 if (hist_entry__has_pairs(pos)) {
2584 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2585 if (leader->hists == leader_hists) {
2593 leader = add_dummy_hierarchy_entry(leader_hists,
2598 /* do not point parent in the pos */
2599 leader->parent_he = parent;
2601 hist_entry__add_pair(pos, leader);
2605 if (hists__link_hierarchy(leader_hists, leader,
2607 &pos->hroot_in) < 0)
2615 * Look for entries in the other hists that are not present in the leader, if
2616 * we find them, just add a dummy entry on the leader hists, with period=0,
2617 * nr_events=0, to serve as the list header.
2619 int hists__link(struct hists *leader, struct hists *other)
2621 struct rb_root_cached *root;
2623 struct hist_entry *pos, *pair;
2625 if (symbol_conf.report_hierarchy) {
2626 /* hierarchy report always collapses entries */
2627 return hists__link_hierarchy(leader, NULL,
2628 &leader->entries_collapsed,
2629 &other->entries_collapsed);
2632 if (hists__has(other, need_collapse))
2633 root = &other->entries_collapsed;
2635 root = other->entries_in;
2637 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2638 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2640 if (!hist_entry__has_pairs(pos)) {
2641 pair = hists__add_dummy_entry(leader, pos);
2644 hist_entry__add_pair(pos, pair);
2651 int hists__unlink(struct hists *hists)
2653 struct rb_root_cached *root;
2655 struct hist_entry *pos;
2657 if (hists__has(hists, need_collapse))
2658 root = &hists->entries_collapsed;
2660 root = hists->entries_in;
2662 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2663 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2664 list_del_init(&pos->pairs.node);
2670 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2671 struct perf_sample *sample, bool nonany_branch_mode,
2674 struct branch_info *bi;
2675 struct branch_entry *entries = perf_sample__branch_entries(sample);
2677 /* If we have branch cycles always annotate them. */
2678 if (bs && bs->nr && entries[0].flags.cycles) {
2681 bi = sample__resolve_bstack(sample, al);
2683 struct addr_map_symbol *prev = NULL;
2686 * Ignore errors, still want to process the
2689 * For non standard branch modes always
2690 * force no IPC (prev == NULL)
2692 * Note that perf stores branches reversed from
2695 for (i = bs->nr - 1; i >= 0; i--) {
2696 addr_map_symbol__account_cycles(&bi[i].from,
2697 nonany_branch_mode ? NULL : prev,
2698 bi[i].flags.cycles);
2702 *total_cycles += bi[i].flags.cycles;
2709 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2715 evlist__for_each_entry(evlist, pos) {
2716 struct hists *hists = evsel__hists(pos);
2718 if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
2721 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2722 if (hists->stats.nr_samples)
2723 ret += fprintf(fp, "%16s events: %10d\n",
2724 "SAMPLE", hists->stats.nr_samples);
2725 if (hists->stats.nr_lost_samples)
2726 ret += fprintf(fp, "%16s events: %10d\n",
2727 "LOST_SAMPLES", hists->stats.nr_lost_samples);
2734 u64 hists__total_period(struct hists *hists)
2736 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2737 hists->stats.total_period;
2740 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2744 const struct dso *dso = hists->dso_filter;
2745 struct thread *thread = hists->thread_filter;
2746 int socket_id = hists->socket_filter;
2747 unsigned long nr_samples = hists->stats.nr_samples;
2748 u64 nr_events = hists->stats.total_period;
2749 struct evsel *evsel = hists_to_evsel(hists);
2750 const char *ev_name = evsel__name(evsel);
2751 char buf[512], sample_freq_str[64] = "";
2752 size_t buflen = sizeof(buf);
2753 char ref[30] = " show reference callgraph, ";
2754 bool enable_ref = false;
2756 if (symbol_conf.filter_relative) {
2757 nr_samples = hists->stats.nr_non_filtered_samples;
2758 nr_events = hists->stats.total_non_filtered_period;
2761 if (evsel__is_group_event(evsel)) {
2764 evsel__group_desc(evsel, buf, buflen);
2767 for_each_group_member(pos, evsel) {
2768 struct hists *pos_hists = evsel__hists(pos);
2770 if (symbol_conf.filter_relative) {
2771 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2772 nr_events += pos_hists->stats.total_non_filtered_period;
2774 nr_samples += pos_hists->stats.nr_samples;
2775 nr_events += pos_hists->stats.total_period;
2780 if (symbol_conf.show_ref_callgraph &&
2781 strstr(ev_name, "call-graph=no"))
2785 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2787 nr_samples = convert_unit(nr_samples, &unit);
2788 printed = scnprintf(bf, size,
2789 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2790 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2791 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2794 if (hists->uid_filter_str)
2795 printed += snprintf(bf + printed, size - printed,
2796 ", UID: %s", hists->uid_filter_str);
2798 if (hists__has(hists, thread)) {
2799 printed += scnprintf(bf + printed, size - printed,
2801 (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2802 thread__tid(thread));
2804 printed += scnprintf(bf + printed, size - printed,
2806 (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2810 printed += scnprintf(bf + printed, size - printed,
2811 ", DSO: %s", dso->short_name);
2813 printed += scnprintf(bf + printed, size - printed,
2814 ", Processor Socket: %d", socket_id);
2819 int parse_filter_percentage(const struct option *opt __maybe_unused,
2820 const char *arg, int unset __maybe_unused)
2822 if (!strcmp(arg, "relative"))
2823 symbol_conf.filter_relative = true;
2824 else if (!strcmp(arg, "absolute"))
2825 symbol_conf.filter_relative = false;
2827 pr_debug("Invalid percentage: %s\n", arg);
2834 int perf_hist_config(const char *var, const char *value)
2836 if (!strcmp(var, "hist.percentage"))
2837 return parse_filter_percentage(NULL, value, 0);
2842 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2844 memset(hists, 0, sizeof(*hists));
2845 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2846 hists->entries_in = &hists->entries_in_array[0];
2847 hists->entries_collapsed = RB_ROOT_CACHED;
2848 hists->entries = RB_ROOT_CACHED;
2849 mutex_init(&hists->lock);
2850 hists->socket_filter = -1;
2851 hists->hpp_list = hpp_list;
2852 INIT_LIST_HEAD(&hists->hpp_formats);
2856 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2858 struct rb_node *node;
2859 struct hist_entry *he;
2861 while (!RB_EMPTY_ROOT(&root->rb_root)) {
2862 node = rb_first_cached(root);
2863 rb_erase_cached(node, root);
2865 he = rb_entry(node, struct hist_entry, rb_node_in);
2866 hist_entry__delete(he);
2870 static void hists__delete_all_entries(struct hists *hists)
2872 hists__delete_entries(hists);
2873 hists__delete_remaining_entries(&hists->entries_in_array[0]);
2874 hists__delete_remaining_entries(&hists->entries_in_array[1]);
2875 hists__delete_remaining_entries(&hists->entries_collapsed);
2878 static void hists_evsel__exit(struct evsel *evsel)
2880 struct hists *hists = evsel__hists(evsel);
2881 struct perf_hpp_fmt *fmt, *pos;
2882 struct perf_hpp_list_node *node, *tmp;
2884 hists__delete_all_entries(hists);
2886 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2887 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2888 list_del_init(&fmt->list);
2891 list_del_init(&node->list);
2896 static int hists_evsel__init(struct evsel *evsel)
2898 struct hists *hists = evsel__hists(evsel);
2900 __hists__init(hists, &perf_hpp_list);
2905 * XXX We probably need a hists_evsel__exit() to free the hist_entries
2906 * stored in the rbtree...
2909 int hists__init(void)
2911 int err = evsel__object_config(sizeof(struct hists_evsel),
2912 hists_evsel__init, hists_evsel__exit);
2914 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2919 void perf_hpp_list__init(struct perf_hpp_list *list)
2921 INIT_LIST_HEAD(&list->fields);
2922 INIT_LIST_HEAD(&list->sorts);