10 static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12 static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
24 struct callchain_param callchain_param = {
25 .mode = CHAIN_GRAPH_REL,
27 .order = ORDER_CALLEE,
31 u16 hists__col_len(struct hists *hists, enum hist_column col)
33 return hists->col_len[col];
36 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
38 hists->col_len[col] = len;
41 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
43 if (len > hists__col_len(hists, col)) {
44 hists__set_col_len(hists, col, len);
50 void hists__reset_col_len(struct hists *hists)
54 for (col = 0; col < HISTC_NR_COLS; ++col)
55 hists__set_col_len(hists, col, 0);
58 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
60 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
62 if (hists__col_len(hists, dso) < unresolved_col_width &&
63 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
64 !symbol_conf.dso_list)
65 hists__set_col_len(hists, dso, unresolved_col_width);
68 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
70 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
75 * +4 accounts for '[x] ' priv level info
76 * +2 accounts for 0x prefix on raw addresses
77 * +3 accounts for ' y ' symtab origin info
80 symlen = h->ms.sym->namelen + 4;
82 symlen += BITS_PER_LONG / 4 + 2 + 3;
83 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
85 symlen = unresolved_col_width + 4 + 2;
86 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
87 hists__set_unres_dso_col_len(hists, HISTC_DSO);
90 len = thread__comm_len(h->thread);
91 if (hists__new_col_len(hists, HISTC_COMM, len))
92 hists__set_col_len(hists, HISTC_THREAD, len + 6);
95 len = dso__name_len(h->ms.map->dso);
96 hists__new_col_len(hists, HISTC_DSO, len);
100 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
102 if (h->branch_info) {
103 if (h->branch_info->from.sym) {
104 symlen = (int)h->branch_info->from.sym->namelen + 4;
106 symlen += BITS_PER_LONG / 4 + 2 + 3;
107 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
109 symlen = dso__name_len(h->branch_info->from.map->dso);
110 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
112 symlen = unresolved_col_width + 4 + 2;
113 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
114 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
117 if (h->branch_info->to.sym) {
118 symlen = (int)h->branch_info->to.sym->namelen + 4;
120 symlen += BITS_PER_LONG / 4 + 2 + 3;
121 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
123 symlen = dso__name_len(h->branch_info->to.map->dso);
124 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
126 symlen = unresolved_col_width + 4 + 2;
127 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
128 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
133 if (h->mem_info->daddr.sym) {
134 symlen = (int)h->mem_info->daddr.sym->namelen + 4
135 + unresolved_col_width + 2;
136 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
139 symlen = unresolved_col_width + 4 + 2;
140 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
143 if (h->mem_info->daddr.map) {
144 symlen = dso__name_len(h->mem_info->daddr.map->dso);
145 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
148 symlen = unresolved_col_width + 4 + 2;
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
152 symlen = unresolved_col_width + 4 + 2;
153 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
154 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
157 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
158 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
159 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
160 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
161 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
162 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
165 hists__new_col_len(hists, HISTC_TRANSACTION,
166 hist_entry__transaction_len());
169 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
171 struct rb_node *next = rb_first(&hists->entries);
172 struct hist_entry *n;
175 hists__reset_col_len(hists);
177 while (next && row++ < max_rows) {
178 n = rb_entry(next, struct hist_entry, rb_node);
180 hists__calc_col_len(hists, n);
181 next = rb_next(&n->rb_node);
185 static void hist_entry__add_cpumode_period(struct hist_entry *he,
186 unsigned int cpumode, u64 period)
189 case PERF_RECORD_MISC_KERNEL:
190 he->stat.period_sys += period;
192 case PERF_RECORD_MISC_USER:
193 he->stat.period_us += period;
195 case PERF_RECORD_MISC_GUEST_KERNEL:
196 he->stat.period_guest_sys += period;
198 case PERF_RECORD_MISC_GUEST_USER:
199 he->stat.period_guest_us += period;
206 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
210 he_stat->period += period;
211 he_stat->weight += weight;
212 he_stat->nr_events += 1;
215 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
217 dest->period += src->period;
218 dest->period_sys += src->period_sys;
219 dest->period_us += src->period_us;
220 dest->period_guest_sys += src->period_guest_sys;
221 dest->period_guest_us += src->period_guest_us;
222 dest->nr_events += src->nr_events;
223 dest->weight += src->weight;
226 static void hist_entry__decay(struct hist_entry *he)
228 he->stat.period = (he->stat.period * 7) / 8;
229 he->stat.nr_events = (he->stat.nr_events * 7) / 8;
230 /* XXX need decay for weight too? */
233 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
235 u64 prev_period = he->stat.period;
237 if (prev_period == 0)
240 hist_entry__decay(he);
243 hists->stats.total_period -= prev_period - he->stat.period;
245 return he->stat.period == 0;
248 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
250 struct rb_node *next = rb_first(&hists->entries);
251 struct hist_entry *n;
254 n = rb_entry(next, struct hist_entry, rb_node);
255 next = rb_next(&n->rb_node);
257 * We may be annotating this, for instance, so keep it here in
258 * case some it gets new samples, we'll eventually free it when
259 * the user stops browsing and it agains gets fully decayed.
261 if (((zap_user && n->level == '.') ||
262 (zap_kernel && n->level != '.') ||
263 hists__decay_entry(hists, n)) &&
265 rb_erase(&n->rb_node, &hists->entries);
267 if (sort__need_collapse)
268 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
277 * histogram, sorted on item, collects periods
280 static struct hist_entry *hist_entry__new(struct hist_entry *template)
282 size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0;
283 struct hist_entry *he = zalloc(sizeof(*he) + callchain_size);
289 he->ms.map->referenced = true;
291 if (he->branch_info) {
293 * This branch info is (a part of) allocated from
294 * machine__resolve_bstack() and will be freed after
295 * adding new entries. So we need to save a copy.
297 he->branch_info = malloc(sizeof(*he->branch_info));
298 if (he->branch_info == NULL) {
303 memcpy(he->branch_info, template->branch_info,
304 sizeof(*he->branch_info));
306 if (he->branch_info->from.map)
307 he->branch_info->from.map->referenced = true;
308 if (he->branch_info->to.map)
309 he->branch_info->to.map->referenced = true;
313 if (he->mem_info->iaddr.map)
314 he->mem_info->iaddr.map->referenced = true;
315 if (he->mem_info->daddr.map)
316 he->mem_info->daddr.map->referenced = true;
319 if (symbol_conf.use_callchain)
320 callchain_init(he->callchain);
322 INIT_LIST_HEAD(&he->pairs.node);
328 void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h)
331 hists__calc_col_len(hists, h);
333 hists->stats.total_period += h->stat.period;
337 static u8 symbol__parent_filter(const struct symbol *parent)
339 if (symbol_conf.exclude_other && parent == NULL)
340 return 1 << HIST_FILTER__PARENT;
344 static struct hist_entry *add_hist_entry(struct hists *hists,
345 struct hist_entry *entry,
346 struct addr_location *al,
351 struct rb_node *parent = NULL;
352 struct hist_entry *he;
355 p = &hists->entries_in->rb_node;
359 he = rb_entry(parent, struct hist_entry, rb_node_in);
362 * Make sure that it receives arguments in a same order as
363 * hist_entry__collapse() so that we can use an appropriate
364 * function when searching an entry regardless which sort
367 cmp = hist_entry__cmp(he, entry);
370 he_stat__add_period(&he->stat, period, weight);
373 * This mem info was allocated from machine__resolve_mem
374 * and will not be used anymore.
376 free(entry->mem_info);
378 /* If the map of an existing hist_entry has
379 * become out-of-date due to an exec() or
380 * similar, update it. Otherwise we will
381 * mis-adjust symbol addresses when computing
382 * the history counter to increment.
384 if (he->ms.map != entry->ms.map) {
385 he->ms.map = entry->ms.map;
387 he->ms.map->referenced = true;
398 he = hist_entry__new(entry);
403 rb_link_node(&he->rb_node_in, parent, p);
404 rb_insert_color(&he->rb_node_in, hists->entries_in);
406 hist_entry__add_cpumode_period(he, al->cpumode, period);
410 struct hist_entry *__hists__add_entry(struct hists *hists,
411 struct addr_location *al,
412 struct symbol *sym_parent,
413 struct branch_info *bi,
415 u64 period, u64 weight, u64 transaction)
417 struct hist_entry entry = {
418 .thread = al->thread,
419 .comm = thread__comm(al->thread),
432 .parent = sym_parent,
433 .filtered = symbol__parent_filter(sym_parent),
437 .transaction = transaction,
440 return add_hist_entry(hists, &entry, al, period, weight);
444 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
446 struct sort_entry *se;
449 list_for_each_entry(se, &hist_entry__sort_list, list) {
450 cmp = se->se_cmp(left, right);
459 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
461 struct sort_entry *se;
464 list_for_each_entry(se, &hist_entry__sort_list, list) {
465 int64_t (*f)(struct hist_entry *, struct hist_entry *);
467 f = se->se_collapse ?: se->se_cmp;
469 cmp = f(left, right);
477 void hist_entry__free(struct hist_entry *he)
479 free(he->branch_info);
481 free_srcline(he->srcline);
486 * collapse the histogram
489 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
490 struct rb_root *root,
491 struct hist_entry *he)
493 struct rb_node **p = &root->rb_node;
494 struct rb_node *parent = NULL;
495 struct hist_entry *iter;
500 iter = rb_entry(parent, struct hist_entry, rb_node_in);
502 cmp = hist_entry__collapse(iter, he);
505 he_stat__add_stat(&iter->stat, &he->stat);
507 if (symbol_conf.use_callchain) {
508 callchain_cursor_reset(&callchain_cursor);
509 callchain_merge(&callchain_cursor,
513 hist_entry__free(he);
523 rb_link_node(&he->rb_node_in, parent, p);
524 rb_insert_color(&he->rb_node_in, root);
528 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
530 struct rb_root *root;
532 pthread_mutex_lock(&hists->lock);
534 root = hists->entries_in;
535 if (++hists->entries_in > &hists->entries_in_array[1])
536 hists->entries_in = &hists->entries_in_array[0];
538 pthread_mutex_unlock(&hists->lock);
543 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
545 hists__filter_entry_by_dso(hists, he);
546 hists__filter_entry_by_thread(hists, he);
547 hists__filter_entry_by_symbol(hists, he);
550 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
552 struct rb_root *root;
553 struct rb_node *next;
554 struct hist_entry *n;
556 if (!sort__need_collapse)
559 root = hists__get_rotate_entries_in(hists);
560 next = rb_first(root);
565 n = rb_entry(next, struct hist_entry, rb_node_in);
566 next = rb_next(&n->rb_node_in);
568 rb_erase(&n->rb_node_in, root);
569 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
571 * If it wasn't combined with one of the entries already
572 * collapsed, we need to apply the filters that may have
573 * been set by, say, the hist_browser.
575 hists__apply_filters(hists, n);
578 ui_progress__update(prog, 1);
583 * reverse the map, sort on period.
586 static int period_cmp(u64 period_a, u64 period_b)
588 if (period_a > period_b)
590 if (period_a < period_b)
595 static int hist_entry__sort_on_period(struct hist_entry *a,
596 struct hist_entry *b)
600 struct perf_evsel *evsel;
601 struct hist_entry *pair;
602 u64 *periods_a, *periods_b;
604 ret = period_cmp(a->stat.period, b->stat.period);
605 if (ret || !symbol_conf.event_group)
608 evsel = hists_to_evsel(a->hists);
609 nr_members = evsel->nr_members;
613 periods_a = zalloc(sizeof(periods_a) * nr_members);
614 periods_b = zalloc(sizeof(periods_b) * nr_members);
616 if (!periods_a || !periods_b)
619 list_for_each_entry(pair, &a->pairs.head, pairs.node) {
620 evsel = hists_to_evsel(pair->hists);
621 periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period;
624 list_for_each_entry(pair, &b->pairs.head, pairs.node) {
625 evsel = hists_to_evsel(pair->hists);
626 periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period;
629 for (i = 1; i < nr_members; i++) {
630 ret = period_cmp(periods_a[i], periods_b[i]);
642 static void __hists__insert_output_entry(struct rb_root *entries,
643 struct hist_entry *he,
644 u64 min_callchain_hits)
646 struct rb_node **p = &entries->rb_node;
647 struct rb_node *parent = NULL;
648 struct hist_entry *iter;
650 if (symbol_conf.use_callchain)
651 callchain_param.sort(&he->sorted_chain, he->callchain,
652 min_callchain_hits, &callchain_param);
656 iter = rb_entry(parent, struct hist_entry, rb_node);
658 if (hist_entry__sort_on_period(he, iter) > 0)
664 rb_link_node(&he->rb_node, parent, p);
665 rb_insert_color(&he->rb_node, entries);
668 void hists__output_resort(struct hists *hists)
670 struct rb_root *root;
671 struct rb_node *next;
672 struct hist_entry *n;
673 u64 min_callchain_hits;
675 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
677 if (sort__need_collapse)
678 root = &hists->entries_collapsed;
680 root = hists->entries_in;
682 next = rb_first(root);
683 hists->entries = RB_ROOT;
685 hists->nr_entries = 0;
686 hists->stats.total_period = 0;
687 hists__reset_col_len(hists);
690 n = rb_entry(next, struct hist_entry, rb_node_in);
691 next = rb_next(&n->rb_node_in);
693 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
694 hists__inc_nr_entries(hists, n);
698 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
699 enum hist_filter filter)
701 h->filtered &= ~(1 << filter);
707 hists->nr_entries += h->nr_rows;
709 hists->stats.total_period += h->stat.period;
710 hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events;
712 hists__calc_col_len(hists, h);
716 static bool hists__filter_entry_by_dso(struct hists *hists,
717 struct hist_entry *he)
719 if (hists->dso_filter != NULL &&
720 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
721 he->filtered |= (1 << HIST_FILTER__DSO);
728 void hists__filter_by_dso(struct hists *hists)
732 hists->nr_entries = hists->stats.total_period = 0;
733 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
734 hists__reset_col_len(hists);
736 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
737 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
739 if (symbol_conf.exclude_other && !h->parent)
742 if (hists__filter_entry_by_dso(hists, h))
745 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
749 static bool hists__filter_entry_by_thread(struct hists *hists,
750 struct hist_entry *he)
752 if (hists->thread_filter != NULL &&
753 he->thread != hists->thread_filter) {
754 he->filtered |= (1 << HIST_FILTER__THREAD);
761 void hists__filter_by_thread(struct hists *hists)
765 hists->nr_entries = hists->stats.total_period = 0;
766 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
767 hists__reset_col_len(hists);
769 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
770 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
772 if (hists__filter_entry_by_thread(hists, h))
775 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
779 static bool hists__filter_entry_by_symbol(struct hists *hists,
780 struct hist_entry *he)
782 if (hists->symbol_filter_str != NULL &&
783 (!he->ms.sym || strstr(he->ms.sym->name,
784 hists->symbol_filter_str) == NULL)) {
785 he->filtered |= (1 << HIST_FILTER__SYMBOL);
792 void hists__filter_by_symbol(struct hists *hists)
796 hists->nr_entries = hists->stats.total_period = 0;
797 hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0;
798 hists__reset_col_len(hists);
800 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
801 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
803 if (hists__filter_entry_by_symbol(hists, h))
806 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
810 int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip)
812 return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip);
815 int hist_entry__annotate(struct hist_entry *he, size_t privsize)
817 return symbol__annotate(he->ms.sym, he->ms.map, privsize);
820 void events_stats__inc(struct events_stats *stats, u32 type)
822 ++stats->nr_events[0];
823 ++stats->nr_events[type];
826 void hists__inc_nr_events(struct hists *hists, u32 type)
828 events_stats__inc(&hists->stats, type);
831 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
832 struct hist_entry *pair)
834 struct rb_root *root;
836 struct rb_node *parent = NULL;
837 struct hist_entry *he;
840 if (sort__need_collapse)
841 root = &hists->entries_collapsed;
843 root = hists->entries_in;
849 he = rb_entry(parent, struct hist_entry, rb_node_in);
851 cmp = hist_entry__collapse(he, pair);
862 he = hist_entry__new(pair);
864 memset(&he->stat, 0, sizeof(he->stat));
866 rb_link_node(&he->rb_node_in, parent, p);
867 rb_insert_color(&he->rb_node_in, root);
868 hists__inc_nr_entries(hists, he);
875 static struct hist_entry *hists__find_entry(struct hists *hists,
876 struct hist_entry *he)
880 if (sort__need_collapse)
881 n = hists->entries_collapsed.rb_node;
883 n = hists->entries_in->rb_node;
886 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
887 int64_t cmp = hist_entry__collapse(iter, he);
901 * Look for pairs to link to the leader buckets (hist_entries):
903 void hists__match(struct hists *leader, struct hists *other)
905 struct rb_root *root;
907 struct hist_entry *pos, *pair;
909 if (sort__need_collapse)
910 root = &leader->entries_collapsed;
912 root = leader->entries_in;
914 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
915 pos = rb_entry(nd, struct hist_entry, rb_node_in);
916 pair = hists__find_entry(other, pos);
919 hist_entry__add_pair(pair, pos);
924 * Look for entries in the other hists that are not present in the leader, if
925 * we find them, just add a dummy entry on the leader hists, with period=0,
926 * nr_events=0, to serve as the list header.
928 int hists__link(struct hists *leader, struct hists *other)
930 struct rb_root *root;
932 struct hist_entry *pos, *pair;
934 if (sort__need_collapse)
935 root = &other->entries_collapsed;
937 root = other->entries_in;
939 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
940 pos = rb_entry(nd, struct hist_entry, rb_node_in);
942 if (!hist_entry__has_pairs(pos)) {
943 pair = hists__add_dummy_entry(leader, pos);
946 hist_entry__add_pair(pos, pair);