10 #include "parse-events.h"
25 } fake_mmap_info[] = {
26 { 100, 0x40000, "perf" },
27 { 100, 0x50000, "libc" },
28 { 100, 0xf0000, "[kernel]" },
29 { 200, 0x40000, "perf" },
30 { 200, 0x50000, "libc" },
31 { 200, 0xf0000, "[kernel]" },
32 { 300, 0x40000, "bash" },
33 { 300, 0x50000, "libc" },
34 { 300, 0xf0000, "[kernel]" },
43 static struct fake_sym perf_syms[] = {
45 { 800, 100, "run_command" },
46 { 900, 100, "cmd_record" },
49 static struct fake_sym bash_syms[] = {
51 { 800, 100, "xmalloc" },
52 { 900, 100, "xfree" },
55 static struct fake_sym libc_syms[] = {
56 { 700, 100, "malloc" },
58 { 900, 100, "realloc" },
61 static struct fake_sym kernel_syms[] = {
62 { 700, 100, "schedule" },
63 { 800, 100, "page_fault" },
64 { 900, 100, "sys_perf_event_open" },
69 struct fake_sym *syms;
72 { "perf", perf_syms, ARRAY_SIZE(perf_syms) },
73 { "bash", bash_syms, ARRAY_SIZE(bash_syms) },
74 { "libc", libc_syms, ARRAY_SIZE(libc_syms) },
75 { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
78 static struct machine *setup_fake_machine(struct machines *machines)
80 struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
83 if (machine == NULL) {
84 pr_debug("Not enough memory for machine setup\n");
88 for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
89 struct thread *thread;
91 thread = machine__findnew_thread(machine, fake_threads[i].pid);
95 thread__set_comm(thread, fake_threads[i].comm);
98 for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
99 union perf_event fake_mmap_event = {
101 .header = { .misc = PERF_RECORD_MISC_USER, },
102 .pid = fake_mmap_info[i].pid,
103 .start = fake_mmap_info[i].start,
109 strcpy(fake_mmap_event.mmap.filename,
110 fake_mmap_info[i].filename);
112 machine__process_mmap_event(machine, &fake_mmap_event);
115 for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
119 dso = __dsos__findnew(&machine->user_dsos,
120 fake_symbols[i].dso_name);
124 /* emulate dso__load() */
125 dso__set_loaded(dso, MAP__FUNCTION);
127 for (k = 0; k < fake_symbols[i].nr_syms; k++) {
129 struct fake_sym *fsym = &fake_symbols[i].syms[k];
131 sym = symbol__new(fsym->start, fsym->length,
132 STB_GLOBAL, fsym->name);
136 symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
143 pr_debug("Not enough memory for machine setup\n");
144 machine__delete_threads(machine);
145 machine__delete(machine);
152 struct thread *thread;
157 static struct sample fake_common_samples[] = {
158 /* perf [kernel] schedule() */
159 { .pid = 100, .ip = 0xf0000 + 700, },
160 /* perf [perf] main() */
161 { .pid = 200, .ip = 0x40000 + 700, },
162 /* perf [perf] cmd_record() */
163 { .pid = 200, .ip = 0x40000 + 900, },
164 /* bash [bash] xmalloc() */
165 { .pid = 300, .ip = 0x40000 + 800, },
166 /* bash [libc] malloc() */
167 { .pid = 300, .ip = 0x50000 + 700, },
170 static struct sample fake_samples[][5] = {
172 /* perf [perf] run_command() */
173 { .pid = 100, .ip = 0x40000 + 800, },
174 /* perf [libc] malloc() */
175 { .pid = 100, .ip = 0x50000 + 700, },
176 /* perf [kernel] page_fault() */
177 { .pid = 100, .ip = 0xf0000 + 800, },
178 /* perf [kernel] sys_perf_event_open() */
179 { .pid = 200, .ip = 0xf0000 + 900, },
180 /* bash [libc] free() */
181 { .pid = 300, .ip = 0x50000 + 800, },
184 /* perf [libc] free() */
185 { .pid = 200, .ip = 0x50000 + 800, },
186 /* bash [libc] malloc() */
187 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
188 /* bash [bash] xfee() */
189 { .pid = 300, .ip = 0x40000 + 900, },
190 /* bash [libc] realloc() */
191 { .pid = 300, .ip = 0x50000 + 900, },
192 /* bash [kernel] page_fault() */
193 { .pid = 300, .ip = 0xf0000 + 800, },
197 static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
199 struct perf_evsel *evsel;
200 struct addr_location al;
201 struct hist_entry *he;
202 struct perf_sample sample = { .cpu = 0, };
206 * each evsel will have 10 samples - 5 common and 5 distinct.
207 * However the second evsel also has a collapsed entry for
208 * "bash [libc] malloc" so total 9 entries will be in the tree.
210 list_for_each_entry(evsel, &evlist->entries, node) {
211 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
212 const union perf_event event = {
215 .misc = PERF_RECORD_MISC_USER,
217 .pid = fake_common_samples[k].pid,
218 .ip = fake_common_samples[k].ip,
222 if (perf_event__preprocess_sample(&event, machine, &al,
226 he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
230 fake_common_samples[k].thread = al.thread;
231 fake_common_samples[k].map = al.map;
232 fake_common_samples[k].sym = al.sym;
235 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
236 const union perf_event event = {
239 .misc = PERF_RECORD_MISC_USER,
241 .pid = fake_samples[i][k].pid,
242 .ip = fake_samples[i][k].ip,
246 if (perf_event__preprocess_sample(&event, machine, &al,
250 he = __hists__add_entry(&evsel->hists, &al, NULL, 1, 1);
254 fake_samples[i][k].thread = al.thread;
255 fake_samples[i][k].map = al.map;
256 fake_samples[i][k].sym = al.sym;
264 pr_debug("Not enough memory for adding a hist entry\n");
268 static int find_sample(struct sample *samples, size_t nr_samples,
269 struct thread *t, struct map *m, struct symbol *s)
271 while (nr_samples--) {
272 if (samples->thread == t && samples->map == m &&
280 static int __validate_match(struct hists *hists)
283 struct rb_root *root;
284 struct rb_node *node;
287 * Only entries from fake_common_samples should have a pair.
289 if (sort__need_collapse)
290 root = &hists->entries_collapsed;
292 root = hists->entries_in;
294 node = rb_first(root);
296 struct hist_entry *he;
298 he = rb_entry(node, struct hist_entry, rb_node_in);
300 if (hist_entry__has_pairs(he)) {
301 if (find_sample(fake_common_samples,
302 ARRAY_SIZE(fake_common_samples),
303 he->thread, he->ms.map, he->ms.sym)) {
306 pr_debug("Can't find the matched entry\n");
311 node = rb_next(node);
314 if (count != ARRAY_SIZE(fake_common_samples)) {
315 pr_debug("Invalid count for matched entries: %zd of %zd\n",
316 count, ARRAY_SIZE(fake_common_samples));
323 static int validate_match(struct hists *leader, struct hists *other)
325 return __validate_match(leader) || __validate_match(other);
328 static int __validate_link(struct hists *hists, int idx)
331 size_t count_pair = 0;
332 size_t count_dummy = 0;
333 struct rb_root *root;
334 struct rb_node *node;
337 * Leader hists (idx = 0) will have dummy entries from other,
338 * and some entries will have no pair. However every entry
339 * in other hists should have (dummy) pair.
341 if (sort__need_collapse)
342 root = &hists->entries_collapsed;
344 root = hists->entries_in;
346 node = rb_first(root);
348 struct hist_entry *he;
350 he = rb_entry(node, struct hist_entry, rb_node_in);
352 if (hist_entry__has_pairs(he)) {
353 if (!find_sample(fake_common_samples,
354 ARRAY_SIZE(fake_common_samples),
355 he->thread, he->ms.map, he->ms.sym) &&
356 !find_sample(fake_samples[idx],
357 ARRAY_SIZE(fake_samples[idx]),
358 he->thread, he->ms.map, he->ms.sym)) {
363 pr_debug("A entry from the other hists should have pair\n");
368 node = rb_next(node);
372 * Note that we have a entry collapsed in the other (idx = 1) hists.
375 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
376 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
377 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
380 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
381 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
382 count, count_pair + ARRAY_SIZE(fake_samples[0]));
386 if (count != count_pair) {
387 pr_debug("Invalid count of total other entries: %zd of %zd\n",
391 if (count_dummy > 0) {
392 pr_debug("Other hists should not have dummy entries: %zd\n",
401 static int validate_link(struct hists *leader, struct hists *other)
403 return __validate_link(leader, 0) || __validate_link(other, 1);
406 static void print_hists(struct hists *hists)
409 struct rb_root *root;
410 struct rb_node *node;
412 if (sort__need_collapse)
413 root = &hists->entries_collapsed;
415 root = hists->entries_in;
417 pr_info("----- %s --------\n", __func__);
418 node = rb_first(root);
420 struct hist_entry *he;
422 he = rb_entry(node, struct hist_entry, rb_node_in);
424 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
425 i, he->thread->comm, he->ms.map->dso->short_name,
426 he->ms.sym->name, he->stat.period);
429 node = rb_next(node);
433 int test__hists_link(void)
436 struct machines machines;
437 struct machine *machine = NULL;
438 struct perf_evsel *evsel, *first;
439 struct perf_evlist *evlist = perf_evlist__new();
444 err = parse_events(evlist, "cpu-clock");
447 err = parse_events(evlist, "task-clock");
451 /* default sort order (comm,dso,sym) will be used */
452 if (setup_sorting() < 0)
455 machines__init(&machines);
457 /* setup threads/dso/map/symbols also */
458 machine = setup_fake_machine(&machines);
463 machine__fprintf(machine, stderr);
465 /* process sample events */
466 err = add_hist_entries(evlist, machine);
470 list_for_each_entry(evsel, &evlist->entries, node) {
471 hists__collapse_resort(&evsel->hists);
474 print_hists(&evsel->hists);
477 first = perf_evlist__first(evlist);
478 evsel = perf_evlist__last(evlist);
480 /* match common entries */
481 hists__match(&first->hists, &evsel->hists);
482 err = validate_match(&first->hists, &evsel->hists);
486 /* link common and/or dummy entries */
487 hists__link(&first->hists, &evsel->hists);
488 err = validate_link(&first->hists, &evsel->hists);
495 /* tear down everything */
496 perf_evlist__delete(evlist);
497 machines__exit(&machines);