]> Git Repo - linux.git/blob - tools/perf/util/hist.c
driver core: Return proper error code when dev_set_name() fails
[linux.git] / tools / perf / util / hist.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include "callchain.h"
3 #include "debug.h"
4 #include "dso.h"
5 #include "build-id.h"
6 #include "hist.h"
7 #include "kvm-stat.h"
8 #include "map.h"
9 #include "map_symbol.h"
10 #include "branch.h"
11 #include "mem-events.h"
12 #include "session.h"
13 #include "namespaces.h"
14 #include "cgroup.h"
15 #include "sort.h"
16 #include "units.h"
17 #include "evlist.h"
18 #include "evsel.h"
19 #include "annotate.h"
20 #include "srcline.h"
21 #include "symbol.h"
22 #include "thread.h"
23 #include "block-info.h"
24 #include "ui/progress.h"
25 #include <errno.h>
26 #include <math.h>
27 #include <inttypes.h>
28 #include <sys/param.h>
29 #include <linux/rbtree.h>
30 #include <linux/string.h>
31 #include <linux/time64.h>
32 #include <linux/zalloc.h>
33
34 static bool hists__filter_entry_by_dso(struct hists *hists,
35                                        struct hist_entry *he);
36 static bool hists__filter_entry_by_thread(struct hists *hists,
37                                           struct hist_entry *he);
38 static bool hists__filter_entry_by_symbol(struct hists *hists,
39                                           struct hist_entry *he);
40 static bool hists__filter_entry_by_socket(struct hists *hists,
41                                           struct hist_entry *he);
42
43 u16 hists__col_len(struct hists *hists, enum hist_column col)
44 {
45         return hists->col_len[col];
46 }
47
48 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
49 {
50         hists->col_len[col] = len;
51 }
52
53 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
54 {
55         if (len > hists__col_len(hists, col)) {
56                 hists__set_col_len(hists, col, len);
57                 return true;
58         }
59         return false;
60 }
61
62 void hists__reset_col_len(struct hists *hists)
63 {
64         enum hist_column col;
65
66         for (col = 0; col < HISTC_NR_COLS; ++col)
67                 hists__set_col_len(hists, col, 0);
68 }
69
70 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
71 {
72         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
73
74         if (hists__col_len(hists, dso) < unresolved_col_width &&
75             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
76             !symbol_conf.dso_list)
77                 hists__set_col_len(hists, dso, unresolved_col_width);
78 }
79
80 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
81 {
82         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
83         int symlen;
84         u16 len;
85
86         if (h->block_info)
87                 return;
88         /*
89          * +4 accounts for '[x] ' priv level info
90          * +2 accounts for 0x prefix on raw addresses
91          * +3 accounts for ' y ' symtab origin info
92          */
93         if (h->ms.sym) {
94                 symlen = h->ms.sym->namelen + 4;
95                 if (verbose > 0)
96                         symlen += BITS_PER_LONG / 4 + 2 + 3;
97                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
98         } else {
99                 symlen = unresolved_col_width + 4 + 2;
100                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
101                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
102         }
103
104         len = thread__comm_len(h->thread);
105         if (hists__new_col_len(hists, HISTC_COMM, len))
106                 hists__set_col_len(hists, HISTC_THREAD, len + 8);
107
108         if (h->ms.map) {
109                 len = dso__name_len(map__dso(h->ms.map));
110                 hists__new_col_len(hists, HISTC_DSO, len);
111         }
112
113         if (h->parent)
114                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
115
116         if (h->branch_info) {
117                 if (h->branch_info->from.ms.sym) {
118                         symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
119                         if (verbose > 0)
120                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
121                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
122
123                         symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
124                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
125                 } else {
126                         symlen = unresolved_col_width + 4 + 2;
127                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128                         hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
129                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
130                 }
131
132                 if (h->branch_info->to.ms.sym) {
133                         symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
134                         if (verbose > 0)
135                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
136                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
137
138                         symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
139                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
140                 } else {
141                         symlen = unresolved_col_width + 4 + 2;
142                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143                         hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
144                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
145                 }
146
147                 if (h->branch_info->srcline_from)
148                         hists__new_col_len(hists, HISTC_SRCLINE_FROM,
149                                         strlen(h->branch_info->srcline_from));
150                 if (h->branch_info->srcline_to)
151                         hists__new_col_len(hists, HISTC_SRCLINE_TO,
152                                         strlen(h->branch_info->srcline_to));
153         }
154
155         if (h->mem_info) {
156                 if (h->mem_info->daddr.ms.sym) {
157                         symlen = (int)h->mem_info->daddr.ms.sym->namelen + 4
158                                + unresolved_col_width + 2;
159                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
160                                            symlen);
161                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
162                                            symlen + 1);
163                 } else {
164                         symlen = unresolved_col_width + 4 + 2;
165                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
166                                            symlen);
167                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
168                                            symlen);
169                 }
170
171                 if (h->mem_info->iaddr.ms.sym) {
172                         symlen = (int)h->mem_info->iaddr.ms.sym->namelen + 4
173                                + unresolved_col_width + 2;
174                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
175                                            symlen);
176                 } else {
177                         symlen = unresolved_col_width + 4 + 2;
178                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
179                                            symlen);
180                 }
181
182                 if (h->mem_info->daddr.ms.map) {
183                         symlen = dso__name_len(map__dso(h->mem_info->daddr.ms.map));
184                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
185                                            symlen);
186                 } else {
187                         symlen = unresolved_col_width + 4 + 2;
188                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
189                 }
190
191                 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
192                                    unresolved_col_width + 4 + 2);
193
194                 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
195                                    unresolved_col_width + 4 + 2);
196
197         } else {
198                 symlen = unresolved_col_width + 4 + 2;
199                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
200                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
201                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
202         }
203
204         hists__new_col_len(hists, HISTC_CGROUP, 6);
205         hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
206         hists__new_col_len(hists, HISTC_CPU, 3);
207         hists__new_col_len(hists, HISTC_SOCKET, 6);
208         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
209         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
210         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
211         hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
212         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
213         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
214         hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
215         hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
216         hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
217         hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
218         hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
219         hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
220
221         if (symbol_conf.nanosecs)
222                 hists__new_col_len(hists, HISTC_TIME, 16);
223         else
224                 hists__new_col_len(hists, HISTC_TIME, 12);
225         hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
226
227         if (h->srcline) {
228                 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
229                 hists__new_col_len(hists, HISTC_SRCLINE, len);
230         }
231
232         if (h->srcfile)
233                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
234
235         if (h->transaction)
236                 hists__new_col_len(hists, HISTC_TRANSACTION,
237                                    hist_entry__transaction_len());
238
239         if (h->trace_output)
240                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
241
242         if (h->cgroup) {
243                 const char *cgrp_name = "unknown";
244                 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
245                                                    h->cgroup);
246                 if (cgrp != NULL)
247                         cgrp_name = cgrp->name;
248
249                 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
250         }
251 }
252
253 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
254 {
255         struct rb_node *next = rb_first_cached(&hists->entries);
256         struct hist_entry *n;
257         int row = 0;
258
259         hists__reset_col_len(hists);
260
261         while (next && row++ < max_rows) {
262                 n = rb_entry(next, struct hist_entry, rb_node);
263                 if (!n->filtered)
264                         hists__calc_col_len(hists, n);
265                 next = rb_next(&n->rb_node);
266         }
267 }
268
269 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
270                                         unsigned int cpumode, u64 period)
271 {
272         switch (cpumode) {
273         case PERF_RECORD_MISC_KERNEL:
274                 he_stat->period_sys += period;
275                 break;
276         case PERF_RECORD_MISC_USER:
277                 he_stat->period_us += period;
278                 break;
279         case PERF_RECORD_MISC_GUEST_KERNEL:
280                 he_stat->period_guest_sys += period;
281                 break;
282         case PERF_RECORD_MISC_GUEST_USER:
283                 he_stat->period_guest_us += period;
284                 break;
285         default:
286                 break;
287         }
288 }
289
290 static long hist_time(unsigned long htime)
291 {
292         unsigned long time_quantum = symbol_conf.time_quantum;
293         if (time_quantum)
294                 return (htime / time_quantum) * time_quantum;
295         return htime;
296 }
297
298 static void he_stat__add_period(struct he_stat *he_stat, u64 period)
299 {
300         he_stat->period         += period;
301         he_stat->nr_events      += 1;
302 }
303
304 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
305 {
306         dest->period            += src->period;
307         dest->period_sys        += src->period_sys;
308         dest->period_us         += src->period_us;
309         dest->period_guest_sys  += src->period_guest_sys;
310         dest->period_guest_us   += src->period_guest_us;
311         dest->nr_events         += src->nr_events;
312 }
313
314 static void he_stat__decay(struct he_stat *he_stat)
315 {
316         he_stat->period = (he_stat->period * 7) / 8;
317         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
318         /* XXX need decay for weight too? */
319 }
320
321 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
322
323 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
324 {
325         u64 prev_period = he->stat.period;
326         u64 diff;
327
328         if (prev_period == 0)
329                 return true;
330
331         he_stat__decay(&he->stat);
332         if (symbol_conf.cumulate_callchain)
333                 he_stat__decay(he->stat_acc);
334         decay_callchain(he->callchain);
335
336         diff = prev_period - he->stat.period;
337
338         if (!he->depth) {
339                 hists->stats.total_period -= diff;
340                 if (!he->filtered)
341                         hists->stats.total_non_filtered_period -= diff;
342         }
343
344         if (!he->leaf) {
345                 struct hist_entry *child;
346                 struct rb_node *node = rb_first_cached(&he->hroot_out);
347                 while (node) {
348                         child = rb_entry(node, struct hist_entry, rb_node);
349                         node = rb_next(node);
350
351                         if (hists__decay_entry(hists, child))
352                                 hists__delete_entry(hists, child);
353                 }
354         }
355
356         return he->stat.period == 0;
357 }
358
359 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
360 {
361         struct rb_root_cached *root_in;
362         struct rb_root_cached *root_out;
363
364         if (he->parent_he) {
365                 root_in  = &he->parent_he->hroot_in;
366                 root_out = &he->parent_he->hroot_out;
367         } else {
368                 if (hists__has(hists, need_collapse))
369                         root_in = &hists->entries_collapsed;
370                 else
371                         root_in = hists->entries_in;
372                 root_out = &hists->entries;
373         }
374
375         rb_erase_cached(&he->rb_node_in, root_in);
376         rb_erase_cached(&he->rb_node, root_out);
377
378         --hists->nr_entries;
379         if (!he->filtered)
380                 --hists->nr_non_filtered_entries;
381
382         hist_entry__delete(he);
383 }
384
385 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
386 {
387         struct rb_node *next = rb_first_cached(&hists->entries);
388         struct hist_entry *n;
389
390         while (next) {
391                 n = rb_entry(next, struct hist_entry, rb_node);
392                 next = rb_next(&n->rb_node);
393                 if (((zap_user && n->level == '.') ||
394                      (zap_kernel && n->level != '.') ||
395                      hists__decay_entry(hists, n))) {
396                         hists__delete_entry(hists, n);
397                 }
398         }
399 }
400
401 void hists__delete_entries(struct hists *hists)
402 {
403         struct rb_node *next = rb_first_cached(&hists->entries);
404         struct hist_entry *n;
405
406         while (next) {
407                 n = rb_entry(next, struct hist_entry, rb_node);
408                 next = rb_next(&n->rb_node);
409
410                 hists__delete_entry(hists, n);
411         }
412 }
413
414 struct hist_entry *hists__get_entry(struct hists *hists, int idx)
415 {
416         struct rb_node *next = rb_first_cached(&hists->entries);
417         struct hist_entry *n;
418         int i = 0;
419
420         while (next) {
421                 n = rb_entry(next, struct hist_entry, rb_node);
422                 if (i == idx)
423                         return n;
424
425                 next = rb_next(&n->rb_node);
426                 i++;
427         }
428
429         return NULL;
430 }
431
432 /*
433  * histogram, sorted on item, collects periods
434  */
435
436 static int hist_entry__init(struct hist_entry *he,
437                             struct hist_entry *template,
438                             bool sample_self,
439                             size_t callchain_size)
440 {
441         *he = *template;
442         he->callchain_size = callchain_size;
443
444         if (symbol_conf.cumulate_callchain) {
445                 he->stat_acc = malloc(sizeof(he->stat));
446                 if (he->stat_acc == NULL)
447                         return -ENOMEM;
448                 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
449                 if (!sample_self)
450                         memset(&he->stat, 0, sizeof(he->stat));
451         }
452
453         he->ms.maps = maps__get(he->ms.maps);
454         he->ms.map = map__get(he->ms.map);
455
456         if (he->branch_info) {
457                 /*
458                  * This branch info is (a part of) allocated from
459                  * sample__resolve_bstack() and will be freed after
460                  * adding new entries.  So we need to save a copy.
461                  */
462                 he->branch_info = malloc(sizeof(*he->branch_info));
463                 if (he->branch_info == NULL)
464                         goto err;
465
466                 memcpy(he->branch_info, template->branch_info,
467                        sizeof(*he->branch_info));
468
469                 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
470                 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
471         }
472
473         if (he->mem_info) {
474                 he->mem_info->iaddr.ms.map = map__get(he->mem_info->iaddr.ms.map);
475                 he->mem_info->daddr.ms.map = map__get(he->mem_info->daddr.ms.map);
476         }
477
478         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
479                 callchain_init(he->callchain);
480
481         if (he->raw_data) {
482                 he->raw_data = memdup(he->raw_data, he->raw_size);
483                 if (he->raw_data == NULL)
484                         goto err_infos;
485         }
486
487         if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
488                 he->srcline = strdup(he->srcline);
489                 if (he->srcline == NULL)
490                         goto err_rawdata;
491         }
492
493         if (symbol_conf.res_sample) {
494                 he->res_samples = calloc(sizeof(struct res_sample),
495                                         symbol_conf.res_sample);
496                 if (!he->res_samples)
497                         goto err_srcline;
498         }
499
500         INIT_LIST_HEAD(&he->pairs.node);
501         he->thread = thread__get(he->thread);
502         he->hroot_in  = RB_ROOT_CACHED;
503         he->hroot_out = RB_ROOT_CACHED;
504
505         if (!symbol_conf.report_hierarchy)
506                 he->leaf = true;
507
508         return 0;
509
510 err_srcline:
511         zfree(&he->srcline);
512
513 err_rawdata:
514         zfree(&he->raw_data);
515
516 err_infos:
517         if (he->branch_info) {
518                 map__put(he->branch_info->from.ms.map);
519                 map__put(he->branch_info->to.ms.map);
520                 zfree(&he->branch_info);
521         }
522         if (he->mem_info) {
523                 map__put(he->mem_info->iaddr.ms.map);
524                 map__put(he->mem_info->daddr.ms.map);
525         }
526 err:
527         maps__zput(he->ms.maps);
528         map__zput(he->ms.map);
529         zfree(&he->stat_acc);
530         return -ENOMEM;
531 }
532
533 static void *hist_entry__zalloc(size_t size)
534 {
535         return zalloc(size + sizeof(struct hist_entry));
536 }
537
538 static void hist_entry__free(void *ptr)
539 {
540         free(ptr);
541 }
542
543 static struct hist_entry_ops default_ops = {
544         .new    = hist_entry__zalloc,
545         .free   = hist_entry__free,
546 };
547
548 static struct hist_entry *hist_entry__new(struct hist_entry *template,
549                                           bool sample_self)
550 {
551         struct hist_entry_ops *ops = template->ops;
552         size_t callchain_size = 0;
553         struct hist_entry *he;
554         int err = 0;
555
556         if (!ops)
557                 ops = template->ops = &default_ops;
558
559         if (symbol_conf.use_callchain)
560                 callchain_size = sizeof(struct callchain_root);
561
562         he = ops->new(callchain_size);
563         if (he) {
564                 err = hist_entry__init(he, template, sample_self, callchain_size);
565                 if (err) {
566                         ops->free(he);
567                         he = NULL;
568                 }
569         }
570
571         return he;
572 }
573
574 static u8 symbol__parent_filter(const struct symbol *parent)
575 {
576         if (symbol_conf.exclude_other && parent == NULL)
577                 return 1 << HIST_FILTER__PARENT;
578         return 0;
579 }
580
581 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
582 {
583         if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
584                 return;
585
586         he->hists->callchain_period += period;
587         if (!he->filtered)
588                 he->hists->callchain_non_filtered_period += period;
589 }
590
591 static struct hist_entry *hists__findnew_entry(struct hists *hists,
592                                                struct hist_entry *entry,
593                                                const struct addr_location *al,
594                                                bool sample_self)
595 {
596         struct rb_node **p;
597         struct rb_node *parent = NULL;
598         struct hist_entry *he;
599         int64_t cmp;
600         u64 period = entry->stat.period;
601         bool leftmost = true;
602
603         p = &hists->entries_in->rb_root.rb_node;
604
605         while (*p != NULL) {
606                 parent = *p;
607                 he = rb_entry(parent, struct hist_entry, rb_node_in);
608
609                 /*
610                  * Make sure that it receives arguments in a same order as
611                  * hist_entry__collapse() so that we can use an appropriate
612                  * function when searching an entry regardless which sort
613                  * keys were used.
614                  */
615                 cmp = hist_entry__cmp(he, entry);
616                 if (!cmp) {
617                         if (sample_self) {
618                                 he_stat__add_period(&he->stat, period);
619                                 hist_entry__add_callchain_period(he, period);
620                         }
621                         if (symbol_conf.cumulate_callchain)
622                                 he_stat__add_period(he->stat_acc, period);
623
624                         /*
625                          * This mem info was allocated from sample__resolve_mem
626                          * and will not be used anymore.
627                          */
628                         mem_info__zput(entry->mem_info);
629
630                         block_info__zput(entry->block_info);
631
632                         kvm_info__zput(entry->kvm_info);
633
634                         /* If the map of an existing hist_entry has
635                          * become out-of-date due to an exec() or
636                          * similar, update it.  Otherwise we will
637                          * mis-adjust symbol addresses when computing
638                          * the history counter to increment.
639                          */
640                         if (he->ms.map != entry->ms.map) {
641                                 map__put(he->ms.map);
642                                 he->ms.map = map__get(entry->ms.map);
643                         }
644                         goto out;
645                 }
646
647                 if (cmp < 0)
648                         p = &(*p)->rb_left;
649                 else {
650                         p = &(*p)->rb_right;
651                         leftmost = false;
652                 }
653         }
654
655         he = hist_entry__new(entry, sample_self);
656         if (!he)
657                 return NULL;
658
659         if (sample_self)
660                 hist_entry__add_callchain_period(he, period);
661         hists->nr_entries++;
662
663         rb_link_node(&he->rb_node_in, parent, p);
664         rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
665 out:
666         if (sample_self)
667                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
668         if (symbol_conf.cumulate_callchain)
669                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
670         return he;
671 }
672
673 static unsigned random_max(unsigned high)
674 {
675         unsigned thresh = -high % high;
676         for (;;) {
677                 unsigned r = random();
678                 if (r >= thresh)
679                         return r % high;
680         }
681 }
682
683 static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
684 {
685         struct res_sample *r;
686         int j;
687
688         if (he->num_res < symbol_conf.res_sample) {
689                 j = he->num_res++;
690         } else {
691                 j = random_max(symbol_conf.res_sample);
692         }
693         r = &he->res_samples[j];
694         r->time = sample->time;
695         r->cpu = sample->cpu;
696         r->tid = sample->tid;
697 }
698
699 static struct hist_entry*
700 __hists__add_entry(struct hists *hists,
701                    struct addr_location *al,
702                    struct symbol *sym_parent,
703                    struct branch_info *bi,
704                    struct mem_info *mi,
705                    struct kvm_info *ki,
706                    struct block_info *block_info,
707                    struct perf_sample *sample,
708                    bool sample_self,
709                    struct hist_entry_ops *ops)
710 {
711         struct namespaces *ns = thread__namespaces(al->thread);
712         struct hist_entry entry = {
713                 .thread = al->thread,
714                 .comm = thread__comm(al->thread),
715                 .cgroup_id = {
716                         .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
717                         .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
718                 },
719                 .cgroup = sample->cgroup,
720                 .ms = {
721                         .maps   = al->maps,
722                         .map    = al->map,
723                         .sym    = al->sym,
724                 },
725                 .srcline = (char *) al->srcline,
726                 .socket  = al->socket,
727                 .cpu     = al->cpu,
728                 .cpumode = al->cpumode,
729                 .ip      = al->addr,
730                 .level   = al->level,
731                 .code_page_size = sample->code_page_size,
732                 .stat = {
733                         .nr_events = 1,
734                         .period = sample->period,
735                 },
736                 .parent = sym_parent,
737                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
738                 .hists  = hists,
739                 .branch_info = bi,
740                 .mem_info = mi,
741                 .kvm_info = ki,
742                 .block_info = block_info,
743                 .transaction = sample->transaction,
744                 .raw_data = sample->raw_data,
745                 .raw_size = sample->raw_size,
746                 .ops = ops,
747                 .time = hist_time(sample->time),
748                 .weight = sample->weight,
749                 .ins_lat = sample->ins_lat,
750                 .p_stage_cyc = sample->p_stage_cyc,
751                 .simd_flags = sample->simd_flags,
752         }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
753
754         if (!hists->has_callchains && he && he->callchain_size != 0)
755                 hists->has_callchains = true;
756         if (he && symbol_conf.res_sample)
757                 hists__res_sample(he, sample);
758         return he;
759 }
760
761 struct hist_entry *hists__add_entry(struct hists *hists,
762                                     struct addr_location *al,
763                                     struct symbol *sym_parent,
764                                     struct branch_info *bi,
765                                     struct mem_info *mi,
766                                     struct kvm_info *ki,
767                                     struct perf_sample *sample,
768                                     bool sample_self)
769 {
770         return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
771                                   sample, sample_self, NULL);
772 }
773
774 struct hist_entry *hists__add_entry_ops(struct hists *hists,
775                                         struct hist_entry_ops *ops,
776                                         struct addr_location *al,
777                                         struct symbol *sym_parent,
778                                         struct branch_info *bi,
779                                         struct mem_info *mi,
780                                         struct kvm_info *ki,
781                                         struct perf_sample *sample,
782                                         bool sample_self)
783 {
784         return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
785                                   sample, sample_self, ops);
786 }
787
788 struct hist_entry *hists__add_entry_block(struct hists *hists,
789                                           struct addr_location *al,
790                                           struct block_info *block_info)
791 {
792         struct hist_entry entry = {
793                 .block_info = block_info,
794                 .hists = hists,
795                 .ms = {
796                         .maps = al->maps,
797                         .map = al->map,
798                         .sym = al->sym,
799                 },
800         }, *he = hists__findnew_entry(hists, &entry, al, false);
801
802         return he;
803 }
804
805 static int
806 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
807                     struct addr_location *al __maybe_unused)
808 {
809         return 0;
810 }
811
812 static int
813 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
814                         struct addr_location *al __maybe_unused)
815 {
816         return 0;
817 }
818
819 static int
820 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
821 {
822         struct perf_sample *sample = iter->sample;
823         struct mem_info *mi;
824
825         mi = sample__resolve_mem(sample, al);
826         if (mi == NULL)
827                 return -ENOMEM;
828
829         iter->priv = mi;
830         return 0;
831 }
832
833 static int
834 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
835 {
836         u64 cost;
837         struct mem_info *mi = iter->priv;
838         struct hists *hists = evsel__hists(iter->evsel);
839         struct perf_sample *sample = iter->sample;
840         struct hist_entry *he;
841
842         if (mi == NULL)
843                 return -EINVAL;
844
845         cost = sample->weight;
846         if (!cost)
847                 cost = 1;
848
849         /*
850          * must pass period=weight in order to get the correct
851          * sorting from hists__collapse_resort() which is solely
852          * based on periods. We want sorting be done on nr_events * weight
853          * and this is indirectly achieved by passing period=weight here
854          * and the he_stat__add_period() function.
855          */
856         sample->period = cost;
857
858         he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
859                               sample, true);
860         if (!he)
861                 return -ENOMEM;
862
863         iter->he = he;
864         return 0;
865 }
866
867 static int
868 iter_finish_mem_entry(struct hist_entry_iter *iter,
869                       struct addr_location *al __maybe_unused)
870 {
871         struct evsel *evsel = iter->evsel;
872         struct hists *hists = evsel__hists(evsel);
873         struct hist_entry *he = iter->he;
874         int err = -EINVAL;
875
876         if (he == NULL)
877                 goto out;
878
879         hists__inc_nr_samples(hists, he->filtered);
880
881         err = hist_entry__append_callchain(he, iter->sample);
882
883 out:
884         /*
885          * We don't need to free iter->priv (mem_info) here since the mem info
886          * was either already freed in hists__findnew_entry() or passed to a
887          * new hist entry by hist_entry__new().
888          */
889         iter->priv = NULL;
890
891         iter->he = NULL;
892         return err;
893 }
894
895 static int
896 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
897 {
898         struct branch_info *bi;
899         struct perf_sample *sample = iter->sample;
900
901         bi = sample__resolve_bstack(sample, al);
902         if (!bi)
903                 return -ENOMEM;
904
905         iter->curr = 0;
906         iter->total = sample->branch_stack->nr;
907
908         iter->priv = bi;
909         return 0;
910 }
911
912 static int
913 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
914                              struct addr_location *al __maybe_unused)
915 {
916         return 0;
917 }
918
919 static int
920 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
921 {
922         struct branch_info *bi = iter->priv;
923         int i = iter->curr;
924
925         if (bi == NULL)
926                 return 0;
927
928         if (iter->curr >= iter->total)
929                 return 0;
930
931         maps__put(al->maps);
932         al->maps = maps__get(bi[i].to.ms.maps);
933         map__put(al->map);
934         al->map = map__get(bi[i].to.ms.map);
935         al->sym = bi[i].to.ms.sym;
936         al->addr = bi[i].to.addr;
937         return 1;
938 }
939
940 static int
941 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
942 {
943         struct branch_info *bi;
944         struct evsel *evsel = iter->evsel;
945         struct hists *hists = evsel__hists(evsel);
946         struct perf_sample *sample = iter->sample;
947         struct hist_entry *he = NULL;
948         int i = iter->curr;
949         int err = 0;
950
951         bi = iter->priv;
952
953         if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
954                 goto out;
955
956         /*
957          * The report shows the percentage of total branches captured
958          * and not events sampled. Thus we use a pseudo period of 1.
959          */
960         sample->period = 1;
961         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
962
963         he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
964                               sample, true);
965         if (he == NULL)
966                 return -ENOMEM;
967
968         hists__inc_nr_samples(hists, he->filtered);
969
970 out:
971         iter->he = he;
972         iter->curr++;
973         return err;
974 }
975
976 static int
977 iter_finish_branch_entry(struct hist_entry_iter *iter,
978                          struct addr_location *al __maybe_unused)
979 {
980         zfree(&iter->priv);
981         iter->he = NULL;
982
983         return iter->curr >= iter->total ? 0 : -1;
984 }
985
986 static int
987 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
988                           struct addr_location *al __maybe_unused)
989 {
990         return 0;
991 }
992
993 static int
994 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
995 {
996         struct evsel *evsel = iter->evsel;
997         struct perf_sample *sample = iter->sample;
998         struct hist_entry *he;
999
1000         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1001                               NULL, sample, true);
1002         if (he == NULL)
1003                 return -ENOMEM;
1004
1005         iter->he = he;
1006         return 0;
1007 }
1008
1009 static int
1010 iter_finish_normal_entry(struct hist_entry_iter *iter,
1011                          struct addr_location *al __maybe_unused)
1012 {
1013         struct hist_entry *he = iter->he;
1014         struct evsel *evsel = iter->evsel;
1015         struct perf_sample *sample = iter->sample;
1016
1017         if (he == NULL)
1018                 return 0;
1019
1020         iter->he = NULL;
1021
1022         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
1023
1024         return hist_entry__append_callchain(he, sample);
1025 }
1026
1027 static int
1028 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
1029                               struct addr_location *al __maybe_unused)
1030 {
1031         struct hist_entry **he_cache;
1032         struct callchain_cursor *cursor = get_tls_callchain_cursor();
1033
1034         if (cursor == NULL)
1035                 return -ENOMEM;
1036
1037         callchain_cursor_commit(cursor);
1038
1039         /*
1040          * This is for detecting cycles or recursions so that they're
1041          * cumulated only one time to prevent entries more than 100%
1042          * overhead.
1043          */
1044         he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
1045         if (he_cache == NULL)
1046                 return -ENOMEM;
1047
1048         iter->priv = he_cache;
1049         iter->curr = 0;
1050
1051         return 0;
1052 }
1053
1054 static int
1055 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1056                                  struct addr_location *al)
1057 {
1058         struct evsel *evsel = iter->evsel;
1059         struct hists *hists = evsel__hists(evsel);
1060         struct perf_sample *sample = iter->sample;
1061         struct hist_entry **he_cache = iter->priv;
1062         struct hist_entry *he;
1063         int err = 0;
1064
1065         he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
1066                               sample, true);
1067         if (he == NULL)
1068                 return -ENOMEM;
1069
1070         iter->he = he;
1071         he_cache[iter->curr++] = he;
1072
1073         hist_entry__append_callchain(he, sample);
1074
1075         /*
1076          * We need to re-initialize the cursor since callchain_append()
1077          * advanced the cursor to the end.
1078          */
1079         callchain_cursor_commit(get_tls_callchain_cursor());
1080
1081         hists__inc_nr_samples(hists, he->filtered);
1082
1083         return err;
1084 }
1085
1086 static int
1087 iter_next_cumulative_entry(struct hist_entry_iter *iter,
1088                            struct addr_location *al)
1089 {
1090         struct callchain_cursor_node *node;
1091
1092         node = callchain_cursor_current(get_tls_callchain_cursor());
1093         if (node == NULL)
1094                 return 0;
1095
1096         return fill_callchain_info(al, node, iter->hide_unresolved);
1097 }
1098
1099 static bool
1100 hist_entry__fast__sym_diff(struct hist_entry *left,
1101                            struct hist_entry *right)
1102 {
1103         struct symbol *sym_l = left->ms.sym;
1104         struct symbol *sym_r = right->ms.sym;
1105
1106         if (!sym_l && !sym_r)
1107                 return left->ip != right->ip;
1108
1109         return !!_sort__sym_cmp(sym_l, sym_r);
1110 }
1111
1112
1113 static int
1114 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1115                                struct addr_location *al)
1116 {
1117         struct evsel *evsel = iter->evsel;
1118         struct perf_sample *sample = iter->sample;
1119         struct hist_entry **he_cache = iter->priv;
1120         struct hist_entry *he;
1121         struct hist_entry he_tmp = {
1122                 .hists = evsel__hists(evsel),
1123                 .cpu = al->cpu,
1124                 .thread = al->thread,
1125                 .comm = thread__comm(al->thread),
1126                 .ip = al->addr,
1127                 .ms = {
1128                         .maps = al->maps,
1129                         .map = al->map,
1130                         .sym = al->sym,
1131                 },
1132                 .srcline = (char *) al->srcline,
1133                 .parent = iter->parent,
1134                 .raw_data = sample->raw_data,
1135                 .raw_size = sample->raw_size,
1136         };
1137         int i;
1138         struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
1139         bool fast = hists__has(he_tmp.hists, sym);
1140
1141         if (tls_cursor == NULL)
1142                 return -ENOMEM;
1143
1144         callchain_cursor_snapshot(&cursor, tls_cursor);
1145
1146         callchain_cursor_advance(tls_cursor);
1147
1148         /*
1149          * Check if there's duplicate entries in the callchain.
1150          * It's possible that it has cycles or recursive calls.
1151          */
1152         for (i = 0; i < iter->curr; i++) {
1153                 /*
1154                  * For most cases, there are no duplicate entries in callchain.
1155                  * The symbols are usually different. Do a quick check for
1156                  * symbols first.
1157                  */
1158                 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1159                         continue;
1160
1161                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1162                         /* to avoid calling callback function */
1163                         iter->he = NULL;
1164                         return 0;
1165                 }
1166         }
1167
1168         he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
1169                               NULL, sample, false);
1170         if (he == NULL)
1171                 return -ENOMEM;
1172
1173         iter->he = he;
1174         he_cache[iter->curr++] = he;
1175
1176         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
1177                 callchain_append(he->callchain, &cursor, sample->period);
1178         return 0;
1179 }
1180
1181 static int
1182 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1183                              struct addr_location *al __maybe_unused)
1184 {
1185         zfree(&iter->priv);
1186         iter->he = NULL;
1187
1188         return 0;
1189 }
1190
1191 const struct hist_iter_ops hist_iter_mem = {
1192         .prepare_entry          = iter_prepare_mem_entry,
1193         .add_single_entry       = iter_add_single_mem_entry,
1194         .next_entry             = iter_next_nop_entry,
1195         .add_next_entry         = iter_add_next_nop_entry,
1196         .finish_entry           = iter_finish_mem_entry,
1197 };
1198
1199 const struct hist_iter_ops hist_iter_branch = {
1200         .prepare_entry          = iter_prepare_branch_entry,
1201         .add_single_entry       = iter_add_single_branch_entry,
1202         .next_entry             = iter_next_branch_entry,
1203         .add_next_entry         = iter_add_next_branch_entry,
1204         .finish_entry           = iter_finish_branch_entry,
1205 };
1206
1207 const struct hist_iter_ops hist_iter_normal = {
1208         .prepare_entry          = iter_prepare_normal_entry,
1209         .add_single_entry       = iter_add_single_normal_entry,
1210         .next_entry             = iter_next_nop_entry,
1211         .add_next_entry         = iter_add_next_nop_entry,
1212         .finish_entry           = iter_finish_normal_entry,
1213 };
1214
1215 const struct hist_iter_ops hist_iter_cumulative = {
1216         .prepare_entry          = iter_prepare_cumulative_entry,
1217         .add_single_entry       = iter_add_single_cumulative_entry,
1218         .next_entry             = iter_next_cumulative_entry,
1219         .add_next_entry         = iter_add_next_cumulative_entry,
1220         .finish_entry           = iter_finish_cumulative_entry,
1221 };
1222
1223 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
1224                          int max_stack_depth, void *arg)
1225 {
1226         int err, err2;
1227         struct map *alm = NULL;
1228
1229         if (al)
1230                 alm = map__get(al->map);
1231
1232         err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
1233                                         iter->evsel, al, max_stack_depth);
1234         if (err) {
1235                 map__put(alm);
1236                 return err;
1237         }
1238
1239         err = iter->ops->prepare_entry(iter, al);
1240         if (err)
1241                 goto out;
1242
1243         err = iter->ops->add_single_entry(iter, al);
1244         if (err)
1245                 goto out;
1246
1247         if (iter->he && iter->add_entry_cb) {
1248                 err = iter->add_entry_cb(iter, al, true, arg);
1249                 if (err)
1250                         goto out;
1251         }
1252
1253         while (iter->ops->next_entry(iter, al)) {
1254                 err = iter->ops->add_next_entry(iter, al);
1255                 if (err)
1256                         break;
1257
1258                 if (iter->he && iter->add_entry_cb) {
1259                         err = iter->add_entry_cb(iter, al, false, arg);
1260                         if (err)
1261                                 goto out;
1262                 }
1263         }
1264
1265 out:
1266         err2 = iter->ops->finish_entry(iter, al);
1267         if (!err)
1268                 err = err2;
1269
1270         map__put(alm);
1271
1272         return err;
1273 }
1274
1275 int64_t
1276 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
1277 {
1278         struct hists *hists = left->hists;
1279         struct perf_hpp_fmt *fmt;
1280         int64_t cmp = 0;
1281
1282         hists__for_each_sort_list(hists, fmt) {
1283                 if (perf_hpp__is_dynamic_entry(fmt) &&
1284                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1285                         continue;
1286
1287                 cmp = fmt->cmp(fmt, left, right);
1288                 if (cmp)
1289                         break;
1290         }
1291
1292         return cmp;
1293 }
1294
1295 int64_t
1296 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1297 {
1298         struct hists *hists = left->hists;
1299         struct perf_hpp_fmt *fmt;
1300         int64_t cmp = 0;
1301
1302         hists__for_each_sort_list(hists, fmt) {
1303                 if (perf_hpp__is_dynamic_entry(fmt) &&
1304                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1305                         continue;
1306
1307                 cmp = fmt->collapse(fmt, left, right);
1308                 if (cmp)
1309                         break;
1310         }
1311
1312         return cmp;
1313 }
1314
1315 void hist_entry__delete(struct hist_entry *he)
1316 {
1317         struct hist_entry_ops *ops = he->ops;
1318
1319         thread__zput(he->thread);
1320         maps__zput(he->ms.maps);
1321         map__zput(he->ms.map);
1322
1323         if (he->branch_info) {
1324                 map__zput(he->branch_info->from.ms.map);
1325                 map__zput(he->branch_info->to.ms.map);
1326                 zfree_srcline(&he->branch_info->srcline_from);
1327                 zfree_srcline(&he->branch_info->srcline_to);
1328                 zfree(&he->branch_info);
1329         }
1330
1331         if (he->mem_info) {
1332                 map__zput(he->mem_info->iaddr.ms.map);
1333                 map__zput(he->mem_info->daddr.ms.map);
1334                 mem_info__zput(he->mem_info);
1335         }
1336
1337         if (he->block_info)
1338                 block_info__zput(he->block_info);
1339
1340         if (he->kvm_info)
1341                 kvm_info__zput(he->kvm_info);
1342
1343         zfree(&he->res_samples);
1344         zfree(&he->stat_acc);
1345         zfree_srcline(&he->srcline);
1346         if (he->srcfile && he->srcfile[0])
1347                 zfree(&he->srcfile);
1348         free_callchain(he->callchain);
1349         zfree(&he->trace_output);
1350         zfree(&he->raw_data);
1351         ops->free(he);
1352 }
1353
1354 /*
1355  * If this is not the last column, then we need to pad it according to the
1356  * pre-calculated max length for this column, otherwise don't bother adding
1357  * spaces because that would break viewing this with, for instance, 'less',
1358  * that would show tons of trailing spaces when a long C++ demangled method
1359  * names is sampled.
1360 */
1361 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1362                                    struct perf_hpp_fmt *fmt, int printed)
1363 {
1364         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1365                 const int width = fmt->width(fmt, hpp, he->hists);
1366                 if (printed < width) {
1367                         advance_hpp(hpp, printed);
1368                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1369                 }
1370         }
1371
1372         return printed;
1373 }
1374
1375 /*
1376  * collapse the histogram
1377  */
1378
1379 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1380 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1381                                        enum hist_filter type);
1382
1383 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1384
1385 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1386 {
1387         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1388 }
1389
1390 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1391                                                 enum hist_filter type,
1392                                                 fmt_chk_fn check)
1393 {
1394         struct perf_hpp_fmt *fmt;
1395         bool type_match = false;
1396         struct hist_entry *parent = he->parent_he;
1397
1398         switch (type) {
1399         case HIST_FILTER__THREAD:
1400                 if (symbol_conf.comm_list == NULL &&
1401                     symbol_conf.pid_list == NULL &&
1402                     symbol_conf.tid_list == NULL)
1403                         return;
1404                 break;
1405         case HIST_FILTER__DSO:
1406                 if (symbol_conf.dso_list == NULL)
1407                         return;
1408                 break;
1409         case HIST_FILTER__SYMBOL:
1410                 if (symbol_conf.sym_list == NULL)
1411                         return;
1412                 break;
1413         case HIST_FILTER__PARENT:
1414         case HIST_FILTER__GUEST:
1415         case HIST_FILTER__HOST:
1416         case HIST_FILTER__SOCKET:
1417         case HIST_FILTER__C2C:
1418         default:
1419                 return;
1420         }
1421
1422         /* if it's filtered by own fmt, it has to have filter bits */
1423         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1424                 if (check(fmt)) {
1425                         type_match = true;
1426                         break;
1427                 }
1428         }
1429
1430         if (type_match) {
1431                 /*
1432                  * If the filter is for current level entry, propagate
1433                  * filter marker to parents.  The marker bit was
1434                  * already set by default so it only needs to clear
1435                  * non-filtered entries.
1436                  */
1437                 if (!(he->filtered & (1 << type))) {
1438                         while (parent) {
1439                                 parent->filtered &= ~(1 << type);
1440                                 parent = parent->parent_he;
1441                         }
1442                 }
1443         } else {
1444                 /*
1445                  * If current entry doesn't have matching formats, set
1446                  * filter marker for upper level entries.  it will be
1447                  * cleared if its lower level entries is not filtered.
1448                  *
1449                  * For lower-level entries, it inherits parent's
1450                  * filter bit so that lower level entries of a
1451                  * non-filtered entry won't set the filter marker.
1452                  */
1453                 if (parent == NULL)
1454                         he->filtered |= (1 << type);
1455                 else
1456                         he->filtered |= (parent->filtered & (1 << type));
1457         }
1458 }
1459
1460 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1461 {
1462         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1463                                             check_thread_entry);
1464
1465         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1466                                             perf_hpp__is_dso_entry);
1467
1468         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1469                                             perf_hpp__is_sym_entry);
1470
1471         hists__apply_filters(he->hists, he);
1472 }
1473
1474 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1475                                                  struct rb_root_cached *root,
1476                                                  struct hist_entry *he,
1477                                                  struct hist_entry *parent_he,
1478                                                  struct perf_hpp_list *hpp_list)
1479 {
1480         struct rb_node **p = &root->rb_root.rb_node;
1481         struct rb_node *parent = NULL;
1482         struct hist_entry *iter, *new;
1483         struct perf_hpp_fmt *fmt;
1484         int64_t cmp;
1485         bool leftmost = true;
1486
1487         while (*p != NULL) {
1488                 parent = *p;
1489                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1490
1491                 cmp = 0;
1492                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1493                         cmp = fmt->collapse(fmt, iter, he);
1494                         if (cmp)
1495                                 break;
1496                 }
1497
1498                 if (!cmp) {
1499                         he_stat__add_stat(&iter->stat, &he->stat);
1500                         return iter;
1501                 }
1502
1503                 if (cmp < 0)
1504                         p = &parent->rb_left;
1505                 else {
1506                         p = &parent->rb_right;
1507                         leftmost = false;
1508                 }
1509         }
1510
1511         new = hist_entry__new(he, true);
1512         if (new == NULL)
1513                 return NULL;
1514
1515         hists->nr_entries++;
1516
1517         /* save related format list for output */
1518         new->hpp_list = hpp_list;
1519         new->parent_he = parent_he;
1520
1521         hist_entry__apply_hierarchy_filters(new);
1522
1523         /* some fields are now passed to 'new' */
1524         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1525                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1526                         he->trace_output = NULL;
1527                 else
1528                         new->trace_output = NULL;
1529
1530                 if (perf_hpp__is_srcline_entry(fmt))
1531                         he->srcline = NULL;
1532                 else
1533                         new->srcline = NULL;
1534
1535                 if (perf_hpp__is_srcfile_entry(fmt))
1536                         he->srcfile = NULL;
1537                 else
1538                         new->srcfile = NULL;
1539         }
1540
1541         rb_link_node(&new->rb_node_in, parent, p);
1542         rb_insert_color_cached(&new->rb_node_in, root, leftmost);
1543         return new;
1544 }
1545
1546 static int hists__hierarchy_insert_entry(struct hists *hists,
1547                                          struct rb_root_cached *root,
1548                                          struct hist_entry *he)
1549 {
1550         struct perf_hpp_list_node *node;
1551         struct hist_entry *new_he = NULL;
1552         struct hist_entry *parent = NULL;
1553         int depth = 0;
1554         int ret = 0;
1555
1556         list_for_each_entry(node, &hists->hpp_formats, list) {
1557                 /* skip period (overhead) and elided columns */
1558                 if (node->level == 0 || node->skip)
1559                         continue;
1560
1561                 /* insert copy of 'he' for each fmt into the hierarchy */
1562                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1563                 if (new_he == NULL) {
1564                         ret = -1;
1565                         break;
1566                 }
1567
1568                 root = &new_he->hroot_in;
1569                 new_he->depth = depth++;
1570                 parent = new_he;
1571         }
1572
1573         if (new_he) {
1574                 new_he->leaf = true;
1575
1576                 if (hist_entry__has_callchains(new_he) &&
1577                     symbol_conf.use_callchain) {
1578                         struct callchain_cursor *cursor = get_tls_callchain_cursor();
1579
1580                         if (cursor == NULL)
1581                                 return -1;
1582
1583                         callchain_cursor_reset(cursor);
1584                         if (callchain_merge(cursor,
1585                                             new_he->callchain,
1586                                             he->callchain) < 0)
1587                                 ret = -1;
1588                 }
1589         }
1590
1591         /* 'he' is no longer used */
1592         hist_entry__delete(he);
1593
1594         /* return 0 (or -1) since it already applied filters */
1595         return ret;
1596 }
1597
1598 static int hists__collapse_insert_entry(struct hists *hists,
1599                                         struct rb_root_cached *root,
1600                                         struct hist_entry *he)
1601 {
1602         struct rb_node **p = &root->rb_root.rb_node;
1603         struct rb_node *parent = NULL;
1604         struct hist_entry *iter;
1605         int64_t cmp;
1606         bool leftmost = true;
1607
1608         if (symbol_conf.report_hierarchy)
1609                 return hists__hierarchy_insert_entry(hists, root, he);
1610
1611         while (*p != NULL) {
1612                 parent = *p;
1613                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1614
1615                 cmp = hist_entry__collapse(iter, he);
1616
1617                 if (!cmp) {
1618                         int ret = 0;
1619
1620                         he_stat__add_stat(&iter->stat, &he->stat);
1621                         if (symbol_conf.cumulate_callchain)
1622                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1623
1624                         if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
1625                                 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1626
1627                                 if (cursor != NULL) {
1628                                         callchain_cursor_reset(cursor);
1629                                         if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1630                                                 ret = -1;
1631                                 } else {
1632                                         ret = 0;
1633                                 }
1634                         }
1635                         hist_entry__delete(he);
1636                         return ret;
1637                 }
1638
1639                 if (cmp < 0)
1640                         p = &(*p)->rb_left;
1641                 else {
1642                         p = &(*p)->rb_right;
1643                         leftmost = false;
1644                 }
1645         }
1646         hists->nr_entries++;
1647
1648         rb_link_node(&he->rb_node_in, parent, p);
1649         rb_insert_color_cached(&he->rb_node_in, root, leftmost);
1650         return 1;
1651 }
1652
1653 struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
1654 {
1655         struct rb_root_cached *root;
1656
1657         mutex_lock(&hists->lock);
1658
1659         root = hists->entries_in;
1660         if (++hists->entries_in > &hists->entries_in_array[1])
1661                 hists->entries_in = &hists->entries_in_array[0];
1662
1663         mutex_unlock(&hists->lock);
1664
1665         return root;
1666 }
1667
1668 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1669 {
1670         hists__filter_entry_by_dso(hists, he);
1671         hists__filter_entry_by_thread(hists, he);
1672         hists__filter_entry_by_symbol(hists, he);
1673         hists__filter_entry_by_socket(hists, he);
1674 }
1675
1676 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1677 {
1678         struct rb_root_cached *root;
1679         struct rb_node *next;
1680         struct hist_entry *n;
1681         int ret;
1682
1683         if (!hists__has(hists, need_collapse))
1684                 return 0;
1685
1686         hists->nr_entries = 0;
1687
1688         root = hists__get_rotate_entries_in(hists);
1689
1690         next = rb_first_cached(root);
1691
1692         while (next) {
1693                 if (session_done())
1694                         break;
1695                 n = rb_entry(next, struct hist_entry, rb_node_in);
1696                 next = rb_next(&n->rb_node_in);
1697
1698                 rb_erase_cached(&n->rb_node_in, root);
1699                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1700                 if (ret < 0)
1701                         return -1;
1702
1703                 if (ret) {
1704                         /*
1705                          * If it wasn't combined with one of the entries already
1706                          * collapsed, we need to apply the filters that may have
1707                          * been set by, say, the hist_browser.
1708                          */
1709                         hists__apply_filters(hists, n);
1710                 }
1711                 if (prog)
1712                         ui_progress__update(prog, 1);
1713         }
1714         return 0;
1715 }
1716
1717 static int64_t hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1718 {
1719         struct hists *hists = a->hists;
1720         struct perf_hpp_fmt *fmt;
1721         int64_t cmp = 0;
1722
1723         hists__for_each_sort_list(hists, fmt) {
1724                 if (perf_hpp__should_skip(fmt, a->hists))
1725                         continue;
1726
1727                 cmp = fmt->sort(fmt, a, b);
1728                 if (cmp)
1729                         break;
1730         }
1731
1732         return cmp;
1733 }
1734
1735 static void hists__reset_filter_stats(struct hists *hists)
1736 {
1737         hists->nr_non_filtered_entries = 0;
1738         hists->stats.total_non_filtered_period = 0;
1739 }
1740
1741 void hists__reset_stats(struct hists *hists)
1742 {
1743         hists->nr_entries = 0;
1744         hists->stats.total_period = 0;
1745
1746         hists__reset_filter_stats(hists);
1747 }
1748
1749 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1750 {
1751         hists->nr_non_filtered_entries++;
1752         hists->stats.total_non_filtered_period += h->stat.period;
1753 }
1754
1755 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1756 {
1757         if (!h->filtered)
1758                 hists__inc_filter_stats(hists, h);
1759
1760         hists->nr_entries++;
1761         hists->stats.total_period += h->stat.period;
1762 }
1763
1764 static void hierarchy_recalc_total_periods(struct hists *hists)
1765 {
1766         struct rb_node *node;
1767         struct hist_entry *he;
1768
1769         node = rb_first_cached(&hists->entries);
1770
1771         hists->stats.total_period = 0;
1772         hists->stats.total_non_filtered_period = 0;
1773
1774         /*
1775          * recalculate total period using top-level entries only
1776          * since lower level entries only see non-filtered entries
1777          * but upper level entries have sum of both entries.
1778          */
1779         while (node) {
1780                 he = rb_entry(node, struct hist_entry, rb_node);
1781                 node = rb_next(node);
1782
1783                 hists->stats.total_period += he->stat.period;
1784                 if (!he->filtered)
1785                         hists->stats.total_non_filtered_period += he->stat.period;
1786         }
1787 }
1788
1789 static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1790                                           struct hist_entry *he)
1791 {
1792         struct rb_node **p = &root->rb_root.rb_node;
1793         struct rb_node *parent = NULL;
1794         struct hist_entry *iter;
1795         struct perf_hpp_fmt *fmt;
1796         bool leftmost = true;
1797
1798         while (*p != NULL) {
1799                 parent = *p;
1800                 iter = rb_entry(parent, struct hist_entry, rb_node);
1801
1802                 if (hist_entry__sort(he, iter) > 0)
1803                         p = &parent->rb_left;
1804                 else {
1805                         p = &parent->rb_right;
1806                         leftmost = false;
1807                 }
1808         }
1809
1810         rb_link_node(&he->rb_node, parent, p);
1811         rb_insert_color_cached(&he->rb_node, root, leftmost);
1812
1813         /* update column width of dynamic entry */
1814         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1815                 if (fmt->init)
1816                         fmt->init(fmt, he);
1817         }
1818 }
1819
1820 static void hists__hierarchy_output_resort(struct hists *hists,
1821                                            struct ui_progress *prog,
1822                                            struct rb_root_cached *root_in,
1823                                            struct rb_root_cached *root_out,
1824                                            u64 min_callchain_hits,
1825                                            bool use_callchain)
1826 {
1827         struct rb_node *node;
1828         struct hist_entry *he;
1829
1830         *root_out = RB_ROOT_CACHED;
1831         node = rb_first_cached(root_in);
1832
1833         while (node) {
1834                 he = rb_entry(node, struct hist_entry, rb_node_in);
1835                 node = rb_next(node);
1836
1837                 hierarchy_insert_output_entry(root_out, he);
1838
1839                 if (prog)
1840                         ui_progress__update(prog, 1);
1841
1842                 hists->nr_entries++;
1843                 if (!he->filtered) {
1844                         hists->nr_non_filtered_entries++;
1845                         hists__calc_col_len(hists, he);
1846                 }
1847
1848                 if (!he->leaf) {
1849                         hists__hierarchy_output_resort(hists, prog,
1850                                                        &he->hroot_in,
1851                                                        &he->hroot_out,
1852                                                        min_callchain_hits,
1853                                                        use_callchain);
1854                         continue;
1855                 }
1856
1857                 if (!use_callchain)
1858                         continue;
1859
1860                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1861                         u64 total = he->stat.period;
1862
1863                         if (symbol_conf.cumulate_callchain)
1864                                 total = he->stat_acc->period;
1865
1866                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1867                 }
1868
1869                 callchain_param.sort(&he->sorted_chain, he->callchain,
1870                                      min_callchain_hits, &callchain_param);
1871         }
1872 }
1873
1874 static void __hists__insert_output_entry(struct rb_root_cached *entries,
1875                                          struct hist_entry *he,
1876                                          u64 min_callchain_hits,
1877                                          bool use_callchain)
1878 {
1879         struct rb_node **p = &entries->rb_root.rb_node;
1880         struct rb_node *parent = NULL;
1881         struct hist_entry *iter;
1882         struct perf_hpp_fmt *fmt;
1883         bool leftmost = true;
1884
1885         if (use_callchain) {
1886                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1887                         u64 total = he->stat.period;
1888
1889                         if (symbol_conf.cumulate_callchain)
1890                                 total = he->stat_acc->period;
1891
1892                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1893                 }
1894                 callchain_param.sort(&he->sorted_chain, he->callchain,
1895                                       min_callchain_hits, &callchain_param);
1896         }
1897
1898         while (*p != NULL) {
1899                 parent = *p;
1900                 iter = rb_entry(parent, struct hist_entry, rb_node);
1901
1902                 if (hist_entry__sort(he, iter) > 0)
1903                         p = &(*p)->rb_left;
1904                 else {
1905                         p = &(*p)->rb_right;
1906                         leftmost = false;
1907                 }
1908         }
1909
1910         rb_link_node(&he->rb_node, parent, p);
1911         rb_insert_color_cached(&he->rb_node, entries, leftmost);
1912
1913         /* update column width of dynamic entries */
1914         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1915                 if (fmt->init)
1916                         fmt->init(fmt, he);
1917         }
1918 }
1919
1920 static void output_resort(struct hists *hists, struct ui_progress *prog,
1921                           bool use_callchain, hists__resort_cb_t cb,
1922                           void *cb_arg)
1923 {
1924         struct rb_root_cached *root;
1925         struct rb_node *next;
1926         struct hist_entry *n;
1927         u64 callchain_total;
1928         u64 min_callchain_hits;
1929
1930         callchain_total = hists->callchain_period;
1931         if (symbol_conf.filter_relative)
1932                 callchain_total = hists->callchain_non_filtered_period;
1933
1934         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1935
1936         hists__reset_stats(hists);
1937         hists__reset_col_len(hists);
1938
1939         if (symbol_conf.report_hierarchy) {
1940                 hists__hierarchy_output_resort(hists, prog,
1941                                                &hists->entries_collapsed,
1942                                                &hists->entries,
1943                                                min_callchain_hits,
1944                                                use_callchain);
1945                 hierarchy_recalc_total_periods(hists);
1946                 return;
1947         }
1948
1949         if (hists__has(hists, need_collapse))
1950                 root = &hists->entries_collapsed;
1951         else
1952                 root = hists->entries_in;
1953
1954         next = rb_first_cached(root);
1955         hists->entries = RB_ROOT_CACHED;
1956
1957         while (next) {
1958                 n = rb_entry(next, struct hist_entry, rb_node_in);
1959                 next = rb_next(&n->rb_node_in);
1960
1961                 if (cb && cb(n, cb_arg))
1962                         continue;
1963
1964                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1965                 hists__inc_stats(hists, n);
1966
1967                 if (!n->filtered)
1968                         hists__calc_col_len(hists, n);
1969
1970                 if (prog)
1971                         ui_progress__update(prog, 1);
1972         }
1973 }
1974
1975 void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
1976                              hists__resort_cb_t cb, void *cb_arg)
1977 {
1978         bool use_callchain;
1979
1980         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1981                 use_callchain = evsel__has_callchain(evsel);
1982         else
1983                 use_callchain = symbol_conf.use_callchain;
1984
1985         use_callchain |= symbol_conf.show_branchflag_count;
1986
1987         output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
1988 }
1989
1990 void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
1991 {
1992         return evsel__output_resort_cb(evsel, prog, NULL, NULL);
1993 }
1994
1995 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1996 {
1997         output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
1998 }
1999
2000 void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
2001                              hists__resort_cb_t cb)
2002 {
2003         output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
2004 }
2005
2006 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2007 {
2008         if (he->leaf || hmd == HMD_FORCE_SIBLING)
2009                 return false;
2010
2011         if (he->unfolded || hmd == HMD_FORCE_CHILD)
2012                 return true;
2013
2014         return false;
2015 }
2016
2017 struct rb_node *rb_hierarchy_last(struct rb_node *node)
2018 {
2019         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2020
2021         while (can_goto_child(he, HMD_NORMAL)) {
2022                 node = rb_last(&he->hroot_out.rb_root);
2023                 he = rb_entry(node, struct hist_entry, rb_node);
2024         }
2025         return node;
2026 }
2027
2028 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2029 {
2030         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2031
2032         if (can_goto_child(he, hmd))
2033                 node = rb_first_cached(&he->hroot_out);
2034         else
2035                 node = rb_next(node);
2036
2037         while (node == NULL) {
2038                 he = he->parent_he;
2039                 if (he == NULL)
2040                         break;
2041
2042                 node = rb_next(&he->rb_node);
2043         }
2044         return node;
2045 }
2046
2047 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2048 {
2049         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2050
2051         node = rb_prev(node);
2052         if (node)
2053                 return rb_hierarchy_last(node);
2054
2055         he = he->parent_he;
2056         if (he == NULL)
2057                 return NULL;
2058
2059         return &he->rb_node;
2060 }
2061
2062 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2063 {
2064         struct rb_node *node;
2065         struct hist_entry *child;
2066         float percent;
2067
2068         if (he->leaf)
2069                 return false;
2070
2071         node = rb_first_cached(&he->hroot_out);
2072         child = rb_entry(node, struct hist_entry, rb_node);
2073
2074         while (node && child->filtered) {
2075                 node = rb_next(node);
2076                 child = rb_entry(node, struct hist_entry, rb_node);
2077         }
2078
2079         if (node)
2080                 percent = hist_entry__get_percent_limit(child);
2081         else
2082                 percent = 0;
2083
2084         return node && percent >= limit;
2085 }
2086
2087 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
2088                                        enum hist_filter filter)
2089 {
2090         h->filtered &= ~(1 << filter);
2091
2092         if (symbol_conf.report_hierarchy) {
2093                 struct hist_entry *parent = h->parent_he;
2094
2095                 while (parent) {
2096                         he_stat__add_stat(&parent->stat, &h->stat);
2097
2098                         parent->filtered &= ~(1 << filter);
2099
2100                         if (parent->filtered)
2101                                 goto next;
2102
2103                         /* force fold unfiltered entry for simplicity */
2104                         parent->unfolded = false;
2105                         parent->has_no_entry = false;
2106                         parent->row_offset = 0;
2107                         parent->nr_rows = 0;
2108 next:
2109                         parent = parent->parent_he;
2110                 }
2111         }
2112
2113         if (h->filtered)
2114                 return;
2115
2116         /* force fold unfiltered entry for simplicity */
2117         h->unfolded = false;
2118         h->has_no_entry = false;
2119         h->row_offset = 0;
2120         h->nr_rows = 0;
2121
2122         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
2123
2124         hists__inc_filter_stats(hists, h);
2125         hists__calc_col_len(hists, h);
2126 }
2127
2128
2129 static bool hists__filter_entry_by_dso(struct hists *hists,
2130                                        struct hist_entry *he)
2131 {
2132         if (hists->dso_filter != NULL &&
2133             (he->ms.map == NULL || map__dso(he->ms.map) != hists->dso_filter)) {
2134                 he->filtered |= (1 << HIST_FILTER__DSO);
2135                 return true;
2136         }
2137
2138         return false;
2139 }
2140
2141 static bool hists__filter_entry_by_thread(struct hists *hists,
2142                                           struct hist_entry *he)
2143 {
2144         if (hists->thread_filter != NULL &&
2145             RC_CHK_ACCESS(he->thread) != RC_CHK_ACCESS(hists->thread_filter)) {
2146                 he->filtered |= (1 << HIST_FILTER__THREAD);
2147                 return true;
2148         }
2149
2150         return false;
2151 }
2152
2153 static bool hists__filter_entry_by_symbol(struct hists *hists,
2154                                           struct hist_entry *he)
2155 {
2156         if (hists->symbol_filter_str != NULL &&
2157             (!he->ms.sym || strstr(he->ms.sym->name,
2158                                    hists->symbol_filter_str) == NULL)) {
2159                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2160                 return true;
2161         }
2162
2163         return false;
2164 }
2165
2166 static bool hists__filter_entry_by_socket(struct hists *hists,
2167                                           struct hist_entry *he)
2168 {
2169         if ((hists->socket_filter > -1) &&
2170             (he->socket != hists->socket_filter)) {
2171                 he->filtered |= (1 << HIST_FILTER__SOCKET);
2172                 return true;
2173         }
2174
2175         return false;
2176 }
2177
2178 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2179
2180 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
2181 {
2182         struct rb_node *nd;
2183
2184         hists->stats.nr_non_filtered_samples = 0;
2185
2186         hists__reset_filter_stats(hists);
2187         hists__reset_col_len(hists);
2188
2189         for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
2190                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2191
2192                 if (filter(hists, h))
2193                         continue;
2194
2195                 hists__remove_entry_filter(hists, h, type);
2196         }
2197 }
2198
2199 static void resort_filtered_entry(struct rb_root_cached *root,
2200                                   struct hist_entry *he)
2201 {
2202         struct rb_node **p = &root->rb_root.rb_node;
2203         struct rb_node *parent = NULL;
2204         struct hist_entry *iter;
2205         struct rb_root_cached new_root = RB_ROOT_CACHED;
2206         struct rb_node *nd;
2207         bool leftmost = true;
2208
2209         while (*p != NULL) {
2210                 parent = *p;
2211                 iter = rb_entry(parent, struct hist_entry, rb_node);
2212
2213                 if (hist_entry__sort(he, iter) > 0)
2214                         p = &(*p)->rb_left;
2215                 else {
2216                         p = &(*p)->rb_right;
2217                         leftmost = false;
2218                 }
2219         }
2220
2221         rb_link_node(&he->rb_node, parent, p);
2222         rb_insert_color_cached(&he->rb_node, root, leftmost);
2223
2224         if (he->leaf || he->filtered)
2225                 return;
2226
2227         nd = rb_first_cached(&he->hroot_out);
2228         while (nd) {
2229                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2230
2231                 nd = rb_next(nd);
2232                 rb_erase_cached(&h->rb_node, &he->hroot_out);
2233
2234                 resort_filtered_entry(&new_root, h);
2235         }
2236
2237         he->hroot_out = new_root;
2238 }
2239
2240 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2241 {
2242         struct rb_node *nd;
2243         struct rb_root_cached new_root = RB_ROOT_CACHED;
2244
2245         hists->stats.nr_non_filtered_samples = 0;
2246
2247         hists__reset_filter_stats(hists);
2248         hists__reset_col_len(hists);
2249
2250         nd = rb_first_cached(&hists->entries);
2251         while (nd) {
2252                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2253                 int ret;
2254
2255                 ret = hist_entry__filter(h, type, arg);
2256
2257                 /*
2258                  * case 1. non-matching type
2259                  * zero out the period, set filter marker and move to child
2260                  */
2261                 if (ret < 0) {
2262                         memset(&h->stat, 0, sizeof(h->stat));
2263                         h->filtered |= (1 << type);
2264
2265                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2266                 }
2267                 /*
2268                  * case 2. matched type (filter out)
2269                  * set filter marker and move to next
2270                  */
2271                 else if (ret == 1) {
2272                         h->filtered |= (1 << type);
2273
2274                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2275                 }
2276                 /*
2277                  * case 3. ok (not filtered)
2278                  * add period to hists and parents, erase the filter marker
2279                  * and move to next sibling
2280                  */
2281                 else {
2282                         hists__remove_entry_filter(hists, h, type);
2283
2284                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2285                 }
2286         }
2287
2288         hierarchy_recalc_total_periods(hists);
2289
2290         /*
2291          * resort output after applying a new filter since filter in a lower
2292          * hierarchy can change periods in a upper hierarchy.
2293          */
2294         nd = rb_first_cached(&hists->entries);
2295         while (nd) {
2296                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2297
2298                 nd = rb_next(nd);
2299                 rb_erase_cached(&h->rb_node, &hists->entries);
2300
2301                 resort_filtered_entry(&new_root, h);
2302         }
2303
2304         hists->entries = new_root;
2305 }
2306
2307 void hists__filter_by_thread(struct hists *hists)
2308 {
2309         if (symbol_conf.report_hierarchy)
2310                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2311                                         hists->thread_filter);
2312         else
2313                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2314                                       hists__filter_entry_by_thread);
2315 }
2316
2317 void hists__filter_by_dso(struct hists *hists)
2318 {
2319         if (symbol_conf.report_hierarchy)
2320                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2321                                         hists->dso_filter);
2322         else
2323                 hists__filter_by_type(hists, HIST_FILTER__DSO,
2324                                       hists__filter_entry_by_dso);
2325 }
2326
2327 void hists__filter_by_symbol(struct hists *hists)
2328 {
2329         if (symbol_conf.report_hierarchy)
2330                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2331                                         hists->symbol_filter_str);
2332         else
2333                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2334                                       hists__filter_entry_by_symbol);
2335 }
2336
2337 void hists__filter_by_socket(struct hists *hists)
2338 {
2339         if (symbol_conf.report_hierarchy)
2340                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2341                                         &hists->socket_filter);
2342         else
2343                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2344                                       hists__filter_entry_by_socket);
2345 }
2346
2347 void events_stats__inc(struct events_stats *stats, u32 type)
2348 {
2349         ++stats->nr_events[0];
2350         ++stats->nr_events[type];
2351 }
2352
2353 static void hists_stats__inc(struct hists_stats *stats)
2354 {
2355         ++stats->nr_samples;
2356 }
2357
2358 void hists__inc_nr_events(struct hists *hists)
2359 {
2360         hists_stats__inc(&hists->stats);
2361 }
2362
2363 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2364 {
2365         hists_stats__inc(&hists->stats);
2366         if (!filtered)
2367                 hists->stats.nr_non_filtered_samples++;
2368 }
2369
2370 void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2371 {
2372         hists->stats.nr_lost_samples += lost;
2373 }
2374
2375 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2376                                                  struct hist_entry *pair)
2377 {
2378         struct rb_root_cached *root;
2379         struct rb_node **p;
2380         struct rb_node *parent = NULL;
2381         struct hist_entry *he;
2382         int64_t cmp;
2383         bool leftmost = true;
2384
2385         if (hists__has(hists, need_collapse))
2386                 root = &hists->entries_collapsed;
2387         else
2388                 root = hists->entries_in;
2389
2390         p = &root->rb_root.rb_node;
2391
2392         while (*p != NULL) {
2393                 parent = *p;
2394                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2395
2396                 cmp = hist_entry__collapse(he, pair);
2397
2398                 if (!cmp)
2399                         goto out;
2400
2401                 if (cmp < 0)
2402                         p = &(*p)->rb_left;
2403                 else {
2404                         p = &(*p)->rb_right;
2405                         leftmost = false;
2406                 }
2407         }
2408
2409         he = hist_entry__new(pair, true);
2410         if (he) {
2411                 memset(&he->stat, 0, sizeof(he->stat));
2412                 he->hists = hists;
2413                 if (symbol_conf.cumulate_callchain)
2414                         memset(he->stat_acc, 0, sizeof(he->stat));
2415                 rb_link_node(&he->rb_node_in, parent, p);
2416                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2417                 hists__inc_stats(hists, he);
2418                 he->dummy = true;
2419         }
2420 out:
2421         return he;
2422 }
2423
2424 static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2425                                                     struct rb_root_cached *root,
2426                                                     struct hist_entry *pair)
2427 {
2428         struct rb_node **p;
2429         struct rb_node *parent = NULL;
2430         struct hist_entry *he;
2431         struct perf_hpp_fmt *fmt;
2432         bool leftmost = true;
2433
2434         p = &root->rb_root.rb_node;
2435         while (*p != NULL) {
2436                 int64_t cmp = 0;
2437
2438                 parent = *p;
2439                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2440
2441                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2442                         cmp = fmt->collapse(fmt, he, pair);
2443                         if (cmp)
2444                                 break;
2445                 }
2446                 if (!cmp)
2447                         goto out;
2448
2449                 if (cmp < 0)
2450                         p = &parent->rb_left;
2451                 else {
2452                         p = &parent->rb_right;
2453                         leftmost = false;
2454                 }
2455         }
2456
2457         he = hist_entry__new(pair, true);
2458         if (he) {
2459                 rb_link_node(&he->rb_node_in, parent, p);
2460                 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
2461
2462                 he->dummy = true;
2463                 he->hists = hists;
2464                 memset(&he->stat, 0, sizeof(he->stat));
2465                 hists__inc_stats(hists, he);
2466         }
2467 out:
2468         return he;
2469 }
2470
2471 static struct hist_entry *hists__find_entry(struct hists *hists,
2472                                             struct hist_entry *he)
2473 {
2474         struct rb_node *n;
2475
2476         if (hists__has(hists, need_collapse))
2477                 n = hists->entries_collapsed.rb_root.rb_node;
2478         else
2479                 n = hists->entries_in->rb_root.rb_node;
2480
2481         while (n) {
2482                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2483                 int64_t cmp = hist_entry__collapse(iter, he);
2484
2485                 if (cmp < 0)
2486                         n = n->rb_left;
2487                 else if (cmp > 0)
2488                         n = n->rb_right;
2489                 else
2490                         return iter;
2491         }
2492
2493         return NULL;
2494 }
2495
2496 static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
2497                                                       struct hist_entry *he)
2498 {
2499         struct rb_node *n = root->rb_root.rb_node;
2500
2501         while (n) {
2502                 struct hist_entry *iter;
2503                 struct perf_hpp_fmt *fmt;
2504                 int64_t cmp = 0;
2505
2506                 iter = rb_entry(n, struct hist_entry, rb_node_in);
2507                 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
2508                         cmp = fmt->collapse(fmt, iter, he);
2509                         if (cmp)
2510                                 break;
2511                 }
2512
2513                 if (cmp < 0)
2514                         n = n->rb_left;
2515                 else if (cmp > 0)
2516                         n = n->rb_right;
2517                 else
2518                         return iter;
2519         }
2520
2521         return NULL;
2522 }
2523
2524 static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2525                                    struct rb_root_cached *other_root)
2526 {
2527         struct rb_node *nd;
2528         struct hist_entry *pos, *pair;
2529
2530         for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
2531                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2532                 pair = hists__find_hierarchy_entry(other_root, pos);
2533
2534                 if (pair) {
2535                         hist_entry__add_pair(pair, pos);
2536                         hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2537                 }
2538         }
2539 }
2540
2541 /*
2542  * Look for pairs to link to the leader buckets (hist_entries):
2543  */
2544 void hists__match(struct hists *leader, struct hists *other)
2545 {
2546         struct rb_root_cached *root;
2547         struct rb_node *nd;
2548         struct hist_entry *pos, *pair;
2549
2550         if (symbol_conf.report_hierarchy) {
2551                 /* hierarchy report always collapses entries */
2552                 return hists__match_hierarchy(&leader->entries_collapsed,
2553                                               &other->entries_collapsed);
2554         }
2555
2556         if (hists__has(leader, need_collapse))
2557                 root = &leader->entries_collapsed;
2558         else
2559                 root = leader->entries_in;
2560
2561         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2562                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2563                 pair = hists__find_entry(other, pos);
2564
2565                 if (pair)
2566                         hist_entry__add_pair(pair, pos);
2567         }
2568 }
2569
2570 static int hists__link_hierarchy(struct hists *leader_hists,
2571                                  struct hist_entry *parent,
2572                                  struct rb_root_cached *leader_root,
2573                                  struct rb_root_cached *other_root)
2574 {
2575         struct rb_node *nd;
2576         struct hist_entry *pos, *leader;
2577
2578         for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
2579                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2580
2581                 if (hist_entry__has_pairs(pos)) {
2582                         bool found = false;
2583
2584                         list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2585                                 if (leader->hists == leader_hists) {
2586                                         found = true;
2587                                         break;
2588                                 }
2589                         }
2590                         if (!found)
2591                                 return -1;
2592                 } else {
2593                         leader = add_dummy_hierarchy_entry(leader_hists,
2594                                                            leader_root, pos);
2595                         if (leader == NULL)
2596                                 return -1;
2597
2598                         /* do not point parent in the pos */
2599                         leader->parent_he = parent;
2600
2601                         hist_entry__add_pair(pos, leader);
2602                 }
2603
2604                 if (!pos->leaf) {
2605                         if (hists__link_hierarchy(leader_hists, leader,
2606                                                   &leader->hroot_in,
2607                                                   &pos->hroot_in) < 0)
2608                                 return -1;
2609                 }
2610         }
2611         return 0;
2612 }
2613
2614 /*
2615  * Look for entries in the other hists that are not present in the leader, if
2616  * we find them, just add a dummy entry on the leader hists, with period=0,
2617  * nr_events=0, to serve as the list header.
2618  */
2619 int hists__link(struct hists *leader, struct hists *other)
2620 {
2621         struct rb_root_cached *root;
2622         struct rb_node *nd;
2623         struct hist_entry *pos, *pair;
2624
2625         if (symbol_conf.report_hierarchy) {
2626                 /* hierarchy report always collapses entries */
2627                 return hists__link_hierarchy(leader, NULL,
2628                                              &leader->entries_collapsed,
2629                                              &other->entries_collapsed);
2630         }
2631
2632         if (hists__has(other, need_collapse))
2633                 root = &other->entries_collapsed;
2634         else
2635                 root = other->entries_in;
2636
2637         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2638                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2639
2640                 if (!hist_entry__has_pairs(pos)) {
2641                         pair = hists__add_dummy_entry(leader, pos);
2642                         if (pair == NULL)
2643                                 return -1;
2644                         hist_entry__add_pair(pos, pair);
2645                 }
2646         }
2647
2648         return 0;
2649 }
2650
2651 int hists__unlink(struct hists *hists)
2652 {
2653         struct rb_root_cached *root;
2654         struct rb_node *nd;
2655         struct hist_entry *pos;
2656
2657         if (hists__has(hists, need_collapse))
2658                 root = &hists->entries_collapsed;
2659         else
2660                 root = hists->entries_in;
2661
2662         for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2663                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2664                 list_del_init(&pos->pairs.node);
2665         }
2666
2667         return 0;
2668 }
2669
2670 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2671                           struct perf_sample *sample, bool nonany_branch_mode,
2672                           u64 *total_cycles)
2673 {
2674         struct branch_info *bi;
2675         struct branch_entry *entries = perf_sample__branch_entries(sample);
2676
2677         /* If we have branch cycles always annotate them. */
2678         if (bs && bs->nr && entries[0].flags.cycles) {
2679                 int i;
2680
2681                 bi = sample__resolve_bstack(sample, al);
2682                 if (bi) {
2683                         struct addr_map_symbol *prev = NULL;
2684
2685                         /*
2686                          * Ignore errors, still want to process the
2687                          * other entries.
2688                          *
2689                          * For non standard branch modes always
2690                          * force no IPC (prev == NULL)
2691                          *
2692                          * Note that perf stores branches reversed from
2693                          * program order!
2694                          */
2695                         for (i = bs->nr - 1; i >= 0; i--) {
2696                                 addr_map_symbol__account_cycles(&bi[i].from,
2697                                         nonany_branch_mode ? NULL : prev,
2698                                         bi[i].flags.cycles);
2699                                 prev = &bi[i].to;
2700
2701                                 if (total_cycles)
2702                                         *total_cycles += bi[i].flags.cycles;
2703                         }
2704                         free(bi);
2705                 }
2706         }
2707 }
2708
2709 size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp,
2710                                  bool skip_empty)
2711 {
2712         struct evsel *pos;
2713         size_t ret = 0;
2714
2715         evlist__for_each_entry(evlist, pos) {
2716                 struct hists *hists = evsel__hists(pos);
2717
2718                 if (skip_empty && !hists->stats.nr_samples && !hists->stats.nr_lost_samples)
2719                         continue;
2720
2721                 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
2722                 if (hists->stats.nr_samples)
2723                         ret += fprintf(fp, "%16s events: %10d\n",
2724                                        "SAMPLE", hists->stats.nr_samples);
2725                 if (hists->stats.nr_lost_samples)
2726                         ret += fprintf(fp, "%16s events: %10d\n",
2727                                        "LOST_SAMPLES", hists->stats.nr_lost_samples);
2728         }
2729
2730         return ret;
2731 }
2732
2733
2734 u64 hists__total_period(struct hists *hists)
2735 {
2736         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2737                 hists->stats.total_period;
2738 }
2739
2740 int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2741 {
2742         char unit;
2743         int printed;
2744         const struct dso *dso = hists->dso_filter;
2745         struct thread *thread = hists->thread_filter;
2746         int socket_id = hists->socket_filter;
2747         unsigned long nr_samples = hists->stats.nr_samples;
2748         u64 nr_events = hists->stats.total_period;
2749         struct evsel *evsel = hists_to_evsel(hists);
2750         const char *ev_name = evsel__name(evsel);
2751         char buf[512], sample_freq_str[64] = "";
2752         size_t buflen = sizeof(buf);
2753         char ref[30] = " show reference callgraph, ";
2754         bool enable_ref = false;
2755
2756         if (symbol_conf.filter_relative) {
2757                 nr_samples = hists->stats.nr_non_filtered_samples;
2758                 nr_events = hists->stats.total_non_filtered_period;
2759         }
2760
2761         if (evsel__is_group_event(evsel)) {
2762                 struct evsel *pos;
2763
2764                 evsel__group_desc(evsel, buf, buflen);
2765                 ev_name = buf;
2766
2767                 for_each_group_member(pos, evsel) {
2768                         struct hists *pos_hists = evsel__hists(pos);
2769
2770                         if (symbol_conf.filter_relative) {
2771                                 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2772                                 nr_events += pos_hists->stats.total_non_filtered_period;
2773                         } else {
2774                                 nr_samples += pos_hists->stats.nr_samples;
2775                                 nr_events += pos_hists->stats.total_period;
2776                         }
2777                 }
2778         }
2779
2780         if (symbol_conf.show_ref_callgraph &&
2781             strstr(ev_name, "call-graph=no"))
2782                 enable_ref = true;
2783
2784         if (show_freq)
2785                 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
2786
2787         nr_samples = convert_unit(nr_samples, &unit);
2788         printed = scnprintf(bf, size,
2789                            "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
2790                            nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
2791                            ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2792
2793
2794         if (hists->uid_filter_str)
2795                 printed += snprintf(bf + printed, size - printed,
2796                                     ", UID: %s", hists->uid_filter_str);
2797         if (thread) {
2798                 if (hists__has(hists, thread)) {
2799                         printed += scnprintf(bf + printed, size - printed,
2800                                     ", Thread: %s(%d)",
2801                                     (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2802                                         thread__tid(thread));
2803                 } else {
2804                         printed += scnprintf(bf + printed, size - printed,
2805                                     ", Thread: %s",
2806                                     (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
2807                 }
2808         }
2809         if (dso)
2810                 printed += scnprintf(bf + printed, size - printed,
2811                                     ", DSO: %s", dso->short_name);
2812         if (socket_id > -1)
2813                 printed += scnprintf(bf + printed, size - printed,
2814                                     ", Processor Socket: %d", socket_id);
2815
2816         return printed;
2817 }
2818
2819 int parse_filter_percentage(const struct option *opt __maybe_unused,
2820                             const char *arg, int unset __maybe_unused)
2821 {
2822         if (!strcmp(arg, "relative"))
2823                 symbol_conf.filter_relative = true;
2824         else if (!strcmp(arg, "absolute"))
2825                 symbol_conf.filter_relative = false;
2826         else {
2827                 pr_debug("Invalid percentage: %s\n", arg);
2828                 return -1;
2829         }
2830
2831         return 0;
2832 }
2833
2834 int perf_hist_config(const char *var, const char *value)
2835 {
2836         if (!strcmp(var, "hist.percentage"))
2837                 return parse_filter_percentage(NULL, value, 0);
2838
2839         return 0;
2840 }
2841
2842 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2843 {
2844         memset(hists, 0, sizeof(*hists));
2845         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
2846         hists->entries_in = &hists->entries_in_array[0];
2847         hists->entries_collapsed = RB_ROOT_CACHED;
2848         hists->entries = RB_ROOT_CACHED;
2849         mutex_init(&hists->lock);
2850         hists->socket_filter = -1;
2851         hists->hpp_list = hpp_list;
2852         INIT_LIST_HEAD(&hists->hpp_formats);
2853         return 0;
2854 }
2855
2856 static void hists__delete_remaining_entries(struct rb_root_cached *root)
2857 {
2858         struct rb_node *node;
2859         struct hist_entry *he;
2860
2861         while (!RB_EMPTY_ROOT(&root->rb_root)) {
2862                 node = rb_first_cached(root);
2863                 rb_erase_cached(node, root);
2864
2865                 he = rb_entry(node, struct hist_entry, rb_node_in);
2866                 hist_entry__delete(he);
2867         }
2868 }
2869
2870 static void hists__delete_all_entries(struct hists *hists)
2871 {
2872         hists__delete_entries(hists);
2873         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2874         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2875         hists__delete_remaining_entries(&hists->entries_collapsed);
2876 }
2877
2878 static void hists_evsel__exit(struct evsel *evsel)
2879 {
2880         struct hists *hists = evsel__hists(evsel);
2881         struct perf_hpp_fmt *fmt, *pos;
2882         struct perf_hpp_list_node *node, *tmp;
2883
2884         hists__delete_all_entries(hists);
2885
2886         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2887                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2888                         list_del_init(&fmt->list);
2889                         free(fmt);
2890                 }
2891                 list_del_init(&node->list);
2892                 free(node);
2893         }
2894 }
2895
2896 static int hists_evsel__init(struct evsel *evsel)
2897 {
2898         struct hists *hists = evsel__hists(evsel);
2899
2900         __hists__init(hists, &perf_hpp_list);
2901         return 0;
2902 }
2903
2904 /*
2905  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2906  * stored in the rbtree...
2907  */
2908
2909 int hists__init(void)
2910 {
2911         int err = evsel__object_config(sizeof(struct hists_evsel),
2912                                        hists_evsel__init, hists_evsel__exit);
2913         if (err)
2914                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2915
2916         return err;
2917 }
2918
2919 void perf_hpp_list__init(struct perf_hpp_list *list)
2920 {
2921         INIT_LIST_HEAD(&list->fields);
2922         INIT_LIST_HEAD(&list->sorts);
2923 }
This page took 0.195888 seconds and 4 git commands to generate.