]> Git Repo - linux.git/blob - tools/perf/util/event.c
scsi: zfcp: Trace when request remove fails after qdio send fails
[linux.git] / tools / perf / util / event.c
1 #include <errno.h>
2 #include <fcntl.h>
3 #include <inttypes.h>
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <perf/cpumap.h>
7 #include <sys/types.h>
8 #include <sys/stat.h>
9 #include <unistd.h>
10 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
11 #include <linux/perf_event.h>
12 #include <linux/zalloc.h>
13 #include "cpumap.h"
14 #include "dso.h"
15 #include "event.h"
16 #include "debug.h"
17 #include "hist.h"
18 #include "machine.h"
19 #include "sort.h"
20 #include "string2.h"
21 #include "strlist.h"
22 #include "thread.h"
23 #include "thread_map.h"
24 #include "time-utils.h"
25 #include <linux/ctype.h>
26 #include "map.h"
27 #include "util/namespaces.h"
28 #include "symbol.h"
29 #include "symbol/kallsyms.h"
30 #include "asm/bug.h"
31 #include "stat.h"
32 #include "session.h"
33 #include "bpf-event.h"
34 #include "print_binary.h"
35 #include "tool.h"
36 #include "../perf.h"
37
38 static const char *perf_event__names[] = {
39         [0]                                     = "TOTAL",
40         [PERF_RECORD_MMAP]                      = "MMAP",
41         [PERF_RECORD_MMAP2]                     = "MMAP2",
42         [PERF_RECORD_LOST]                      = "LOST",
43         [PERF_RECORD_COMM]                      = "COMM",
44         [PERF_RECORD_EXIT]                      = "EXIT",
45         [PERF_RECORD_THROTTLE]                  = "THROTTLE",
46         [PERF_RECORD_UNTHROTTLE]                = "UNTHROTTLE",
47         [PERF_RECORD_FORK]                      = "FORK",
48         [PERF_RECORD_READ]                      = "READ",
49         [PERF_RECORD_SAMPLE]                    = "SAMPLE",
50         [PERF_RECORD_AUX]                       = "AUX",
51         [PERF_RECORD_ITRACE_START]              = "ITRACE_START",
52         [PERF_RECORD_LOST_SAMPLES]              = "LOST_SAMPLES",
53         [PERF_RECORD_SWITCH]                    = "SWITCH",
54         [PERF_RECORD_SWITCH_CPU_WIDE]           = "SWITCH_CPU_WIDE",
55         [PERF_RECORD_NAMESPACES]                = "NAMESPACES",
56         [PERF_RECORD_KSYMBOL]                   = "KSYMBOL",
57         [PERF_RECORD_BPF_EVENT]                 = "BPF_EVENT",
58         [PERF_RECORD_CGROUP]                    = "CGROUP",
59         [PERF_RECORD_TEXT_POKE]                 = "TEXT_POKE",
60         [PERF_RECORD_AUX_OUTPUT_HW_ID]          = "AUX_OUTPUT_HW_ID",
61         [PERF_RECORD_HEADER_ATTR]               = "ATTR",
62         [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
63         [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
64         [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
65         [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
66         [PERF_RECORD_ID_INDEX]                  = "ID_INDEX",
67         [PERF_RECORD_AUXTRACE_INFO]             = "AUXTRACE_INFO",
68         [PERF_RECORD_AUXTRACE]                  = "AUXTRACE",
69         [PERF_RECORD_AUXTRACE_ERROR]            = "AUXTRACE_ERROR",
70         [PERF_RECORD_THREAD_MAP]                = "THREAD_MAP",
71         [PERF_RECORD_CPU_MAP]                   = "CPU_MAP",
72         [PERF_RECORD_STAT_CONFIG]               = "STAT_CONFIG",
73         [PERF_RECORD_STAT]                      = "STAT",
74         [PERF_RECORD_STAT_ROUND]                = "STAT_ROUND",
75         [PERF_RECORD_EVENT_UPDATE]              = "EVENT_UPDATE",
76         [PERF_RECORD_TIME_CONV]                 = "TIME_CONV",
77         [PERF_RECORD_HEADER_FEATURE]            = "FEATURE",
78         [PERF_RECORD_COMPRESSED]                = "COMPRESSED",
79         [PERF_RECORD_FINISHED_INIT]             = "FINISHED_INIT",
80 };
81
82 const char *perf_event__name(unsigned int id)
83 {
84         if (id >= ARRAY_SIZE(perf_event__names))
85                 return "INVALID";
86         if (!perf_event__names[id])
87                 return "UNKNOWN";
88         return perf_event__names[id];
89 }
90
91 struct process_symbol_args {
92         const char *name;
93         u64        start;
94 };
95
96 static int find_symbol_cb(void *arg, const char *name, char type,
97                           u64 start)
98 {
99         struct process_symbol_args *args = arg;
100
101         /*
102          * Must be a function or at least an alias, as in PARISC64, where "_text" is
103          * an 'A' to the same address as "_stext".
104          */
105         if (!(kallsyms__is_function(type) ||
106               type == 'A') || strcmp(name, args->name))
107                 return 0;
108
109         args->start = start;
110         return 1;
111 }
112
113 int kallsyms__get_function_start(const char *kallsyms_filename,
114                                  const char *symbol_name, u64 *addr)
115 {
116         struct process_symbol_args args = { .name = symbol_name, };
117
118         if (kallsyms__parse(kallsyms_filename, &args, find_symbol_cb) <= 0)
119                 return -1;
120
121         *addr = args.start;
122         return 0;
123 }
124
125 void perf_event__read_stat_config(struct perf_stat_config *config,
126                                   struct perf_record_stat_config *event)
127 {
128         unsigned i;
129
130         for (i = 0; i < event->nr; i++) {
131
132                 switch (event->data[i].tag) {
133 #define CASE(__term, __val)                                     \
134                 case PERF_STAT_CONFIG_TERM__##__term:           \
135                         config->__val = event->data[i].val;     \
136                         break;
137
138                 CASE(AGGR_MODE, aggr_mode)
139                 CASE(SCALE,     scale)
140                 CASE(INTERVAL,  interval)
141 #undef CASE
142                 default:
143                         pr_warning("unknown stat config term %" PRI_lu64 "\n",
144                                    event->data[i].tag);
145                 }
146         }
147 }
148
149 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp)
150 {
151         const char *s;
152
153         if (event->header.misc & PERF_RECORD_MISC_COMM_EXEC)
154                 s = " exec";
155         else
156                 s = "";
157
158         return fprintf(fp, "%s: %s:%d/%d\n", s, event->comm.comm, event->comm.pid, event->comm.tid);
159 }
160
161 size_t perf_event__fprintf_namespaces(union perf_event *event, FILE *fp)
162 {
163         size_t ret = 0;
164         struct perf_ns_link_info *ns_link_info;
165         u32 nr_namespaces, idx;
166
167         ns_link_info = event->namespaces.link_info;
168         nr_namespaces = event->namespaces.nr_namespaces;
169
170         ret += fprintf(fp, " %d/%d - nr_namespaces: %u\n\t\t[",
171                        event->namespaces.pid,
172                        event->namespaces.tid,
173                        nr_namespaces);
174
175         for (idx = 0; idx < nr_namespaces; idx++) {
176                 if (idx && (idx % 4 == 0))
177                         ret += fprintf(fp, "\n\t\t ");
178
179                 ret  += fprintf(fp, "%u/%s: %" PRIu64 "/%#" PRIx64 "%s", idx,
180                                 perf_ns__name(idx), (u64)ns_link_info[idx].dev,
181                                 (u64)ns_link_info[idx].ino,
182                                 ((idx + 1) != nr_namespaces) ? ", " : "]\n");
183         }
184
185         return ret;
186 }
187
188 size_t perf_event__fprintf_cgroup(union perf_event *event, FILE *fp)
189 {
190         return fprintf(fp, " cgroup: %" PRI_lu64 " %s\n",
191                        event->cgroup.id, event->cgroup.path);
192 }
193
194 int perf_event__process_comm(struct perf_tool *tool __maybe_unused,
195                              union perf_event *event,
196                              struct perf_sample *sample,
197                              struct machine *machine)
198 {
199         return machine__process_comm_event(machine, event, sample);
200 }
201
202 int perf_event__process_namespaces(struct perf_tool *tool __maybe_unused,
203                                    union perf_event *event,
204                                    struct perf_sample *sample,
205                                    struct machine *machine)
206 {
207         return machine__process_namespaces_event(machine, event, sample);
208 }
209
210 int perf_event__process_cgroup(struct perf_tool *tool __maybe_unused,
211                                union perf_event *event,
212                                struct perf_sample *sample,
213                                struct machine *machine)
214 {
215         return machine__process_cgroup_event(machine, event, sample);
216 }
217
218 int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
219                              union perf_event *event,
220                              struct perf_sample *sample,
221                              struct machine *machine)
222 {
223         return machine__process_lost_event(machine, event, sample);
224 }
225
226 int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
227                             union perf_event *event,
228                             struct perf_sample *sample __maybe_unused,
229                             struct machine *machine)
230 {
231         return machine__process_aux_event(machine, event);
232 }
233
234 int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
235                                      union perf_event *event,
236                                      struct perf_sample *sample __maybe_unused,
237                                      struct machine *machine)
238 {
239         return machine__process_itrace_start_event(machine, event);
240 }
241
242 int perf_event__process_aux_output_hw_id(struct perf_tool *tool __maybe_unused,
243                                          union perf_event *event,
244                                          struct perf_sample *sample __maybe_unused,
245                                          struct machine *machine)
246 {
247         return machine__process_aux_output_hw_id_event(machine, event);
248 }
249
250 int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
251                                      union perf_event *event,
252                                      struct perf_sample *sample,
253                                      struct machine *machine)
254 {
255         return machine__process_lost_samples_event(machine, event, sample);
256 }
257
258 int perf_event__process_switch(struct perf_tool *tool __maybe_unused,
259                                union perf_event *event,
260                                struct perf_sample *sample __maybe_unused,
261                                struct machine *machine)
262 {
263         return machine__process_switch_event(machine, event);
264 }
265
266 int perf_event__process_ksymbol(struct perf_tool *tool __maybe_unused,
267                                 union perf_event *event,
268                                 struct perf_sample *sample __maybe_unused,
269                                 struct machine *machine)
270 {
271         return machine__process_ksymbol(machine, event, sample);
272 }
273
274 int perf_event__process_bpf(struct perf_tool *tool __maybe_unused,
275                             union perf_event *event,
276                             struct perf_sample *sample,
277                             struct machine *machine)
278 {
279         return machine__process_bpf(machine, event, sample);
280 }
281
282 int perf_event__process_text_poke(struct perf_tool *tool __maybe_unused,
283                                   union perf_event *event,
284                                   struct perf_sample *sample,
285                                   struct machine *machine)
286 {
287         return machine__process_text_poke(machine, event, sample);
288 }
289
290 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
291 {
292         return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64 "]: %c %s\n",
293                        event->mmap.pid, event->mmap.tid, event->mmap.start,
294                        event->mmap.len, event->mmap.pgoff,
295                        (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
296                        event->mmap.filename);
297 }
298
299 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
300 {
301         if (event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID) {
302                 char sbuild_id[SBUILD_ID_SIZE];
303                 struct build_id bid;
304
305                 build_id__init(&bid, event->mmap2.build_id,
306                                event->mmap2.build_id_size);
307                 build_id__sprintf(&bid, sbuild_id);
308
309                 return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
310                                    " <%s>]: %c%c%c%c %s\n",
311                                event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
312                                event->mmap2.len, event->mmap2.pgoff, sbuild_id,
313                                (event->mmap2.prot & PROT_READ) ? 'r' : '-',
314                                (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
315                                (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
316                                (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
317                                event->mmap2.filename);
318         } else {
319                 return fprintf(fp, " %d/%d: [%#" PRI_lx64 "(%#" PRI_lx64 ") @ %#" PRI_lx64
320                                    " %02x:%02x %"PRI_lu64" %"PRI_lu64"]: %c%c%c%c %s\n",
321                                event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
322                                event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
323                                event->mmap2.min, event->mmap2.ino,
324                                event->mmap2.ino_generation,
325                                (event->mmap2.prot & PROT_READ) ? 'r' : '-',
326                                (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
327                                (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
328                                (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
329                                event->mmap2.filename);
330         }
331 }
332
333 size_t perf_event__fprintf_thread_map(union perf_event *event, FILE *fp)
334 {
335         struct perf_thread_map *threads = thread_map__new_event(&event->thread_map);
336         size_t ret;
337
338         ret = fprintf(fp, " nr: ");
339
340         if (threads)
341                 ret += thread_map__fprintf(threads, fp);
342         else
343                 ret += fprintf(fp, "failed to get threads from event\n");
344
345         perf_thread_map__put(threads);
346         return ret;
347 }
348
349 size_t perf_event__fprintf_cpu_map(union perf_event *event, FILE *fp)
350 {
351         struct perf_cpu_map *cpus = cpu_map__new_data(&event->cpu_map.data);
352         size_t ret;
353
354         ret = fprintf(fp, ": ");
355
356         if (cpus)
357                 ret += cpu_map__fprintf(cpus, fp);
358         else
359                 ret += fprintf(fp, "failed to get cpumap from event\n");
360
361         perf_cpu_map__put(cpus);
362         return ret;
363 }
364
365 int perf_event__process_mmap(struct perf_tool *tool __maybe_unused,
366                              union perf_event *event,
367                              struct perf_sample *sample,
368                              struct machine *machine)
369 {
370         return machine__process_mmap_event(machine, event, sample);
371 }
372
373 int perf_event__process_mmap2(struct perf_tool *tool __maybe_unused,
374                              union perf_event *event,
375                              struct perf_sample *sample,
376                              struct machine *machine)
377 {
378         return machine__process_mmap2_event(machine, event, sample);
379 }
380
381 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp)
382 {
383         return fprintf(fp, "(%d:%d):(%d:%d)\n",
384                        event->fork.pid, event->fork.tid,
385                        event->fork.ppid, event->fork.ptid);
386 }
387
388 int perf_event__process_fork(struct perf_tool *tool __maybe_unused,
389                              union perf_event *event,
390                              struct perf_sample *sample,
391                              struct machine *machine)
392 {
393         return machine__process_fork_event(machine, event, sample);
394 }
395
396 int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
397                              union perf_event *event,
398                              struct perf_sample *sample,
399                              struct machine *machine)
400 {
401         return machine__process_exit_event(machine, event, sample);
402 }
403
404 size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
405 {
406         return fprintf(fp, " offset: %#"PRI_lx64" size: %#"PRI_lx64" flags: %#"PRI_lx64" [%s%s%s]\n",
407                        event->aux.aux_offset, event->aux.aux_size,
408                        event->aux.flags,
409                        event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
410                        event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "",
411                        event->aux.flags & PERF_AUX_FLAG_PARTIAL   ? "P" : "");
412 }
413
414 size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
415 {
416         return fprintf(fp, " pid: %u tid: %u\n",
417                        event->itrace_start.pid, event->itrace_start.tid);
418 }
419
420 size_t perf_event__fprintf_aux_output_hw_id(union perf_event *event, FILE *fp)
421 {
422         return fprintf(fp, " hw_id: %#"PRI_lx64"\n",
423                        event->aux_output_hw_id.hw_id);
424 }
425
426 size_t perf_event__fprintf_switch(union perf_event *event, FILE *fp)
427 {
428         bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
429         const char *in_out = !out ? "IN         " :
430                 !(event->header.misc & PERF_RECORD_MISC_SWITCH_OUT_PREEMPT) ?
431                                     "OUT        " : "OUT preempt";
432
433         if (event->header.type == PERF_RECORD_SWITCH)
434                 return fprintf(fp, " %s\n", in_out);
435
436         return fprintf(fp, " %s  %s pid/tid: %5d/%-5d\n",
437                        in_out, out ? "next" : "prev",
438                        event->context_switch.next_prev_pid,
439                        event->context_switch.next_prev_tid);
440 }
441
442 static size_t perf_event__fprintf_lost(union perf_event *event, FILE *fp)
443 {
444         return fprintf(fp, " lost %" PRI_lu64 "\n", event->lost.lost);
445 }
446
447 size_t perf_event__fprintf_ksymbol(union perf_event *event, FILE *fp)
448 {
449         return fprintf(fp, " addr %" PRI_lx64 " len %u type %u flags 0x%x name %s\n",
450                        event->ksymbol.addr, event->ksymbol.len,
451                        event->ksymbol.ksym_type,
452                        event->ksymbol.flags, event->ksymbol.name);
453 }
454
455 size_t perf_event__fprintf_bpf(union perf_event *event, FILE *fp)
456 {
457         return fprintf(fp, " type %u, flags %u, id %u\n",
458                        event->bpf.type, event->bpf.flags, event->bpf.id);
459 }
460
461 static int text_poke_printer(enum binary_printer_ops op, unsigned int val,
462                              void *extra, FILE *fp)
463 {
464         bool old = *(bool *)extra;
465
466         switch ((int)op) {
467         case BINARY_PRINT_LINE_BEGIN:
468                 return fprintf(fp, "            %s bytes:", old ? "Old" : "New");
469         case BINARY_PRINT_NUM_DATA:
470                 return fprintf(fp, " %02x", val);
471         case BINARY_PRINT_LINE_END:
472                 return fprintf(fp, "\n");
473         default:
474                 return 0;
475         }
476 }
477
478 size_t perf_event__fprintf_text_poke(union perf_event *event, struct machine *machine, FILE *fp)
479 {
480         struct perf_record_text_poke_event *tp = &event->text_poke;
481         size_t ret;
482         bool old;
483
484         ret = fprintf(fp, " %" PRI_lx64 " ", tp->addr);
485         if (machine) {
486                 struct addr_location al;
487
488                 al.map = maps__find(machine__kernel_maps(machine), tp->addr);
489                 if (al.map && map__load(al.map) >= 0) {
490                         al.addr = al.map->map_ip(al.map, tp->addr);
491                         al.sym = map__find_symbol(al.map, al.addr);
492                         if (al.sym)
493                                 ret += symbol__fprintf_symname_offs(al.sym, &al, fp);
494                 }
495         }
496         ret += fprintf(fp, " old len %u new len %u\n", tp->old_len, tp->new_len);
497         old = true;
498         ret += binary__fprintf(tp->bytes, tp->old_len, 16, text_poke_printer,
499                                &old, fp);
500         old = false;
501         ret += binary__fprintf(tp->bytes + tp->old_len, tp->new_len, 16,
502                                text_poke_printer, &old, fp);
503         return ret;
504 }
505
506 size_t perf_event__fprintf(union perf_event *event, struct machine *machine, FILE *fp)
507 {
508         size_t ret = fprintf(fp, "PERF_RECORD_%s",
509                              perf_event__name(event->header.type));
510
511         switch (event->header.type) {
512         case PERF_RECORD_COMM:
513                 ret += perf_event__fprintf_comm(event, fp);
514                 break;
515         case PERF_RECORD_FORK:
516         case PERF_RECORD_EXIT:
517                 ret += perf_event__fprintf_task(event, fp);
518                 break;
519         case PERF_RECORD_MMAP:
520                 ret += perf_event__fprintf_mmap(event, fp);
521                 break;
522         case PERF_RECORD_NAMESPACES:
523                 ret += perf_event__fprintf_namespaces(event, fp);
524                 break;
525         case PERF_RECORD_CGROUP:
526                 ret += perf_event__fprintf_cgroup(event, fp);
527                 break;
528         case PERF_RECORD_MMAP2:
529                 ret += perf_event__fprintf_mmap2(event, fp);
530                 break;
531         case PERF_RECORD_AUX:
532                 ret += perf_event__fprintf_aux(event, fp);
533                 break;
534         case PERF_RECORD_ITRACE_START:
535                 ret += perf_event__fprintf_itrace_start(event, fp);
536                 break;
537         case PERF_RECORD_SWITCH:
538         case PERF_RECORD_SWITCH_CPU_WIDE:
539                 ret += perf_event__fprintf_switch(event, fp);
540                 break;
541         case PERF_RECORD_LOST:
542                 ret += perf_event__fprintf_lost(event, fp);
543                 break;
544         case PERF_RECORD_KSYMBOL:
545                 ret += perf_event__fprintf_ksymbol(event, fp);
546                 break;
547         case PERF_RECORD_BPF_EVENT:
548                 ret += perf_event__fprintf_bpf(event, fp);
549                 break;
550         case PERF_RECORD_TEXT_POKE:
551                 ret += perf_event__fprintf_text_poke(event, machine, fp);
552                 break;
553         case PERF_RECORD_AUX_OUTPUT_HW_ID:
554                 ret += perf_event__fprintf_aux_output_hw_id(event, fp);
555                 break;
556         default:
557                 ret += fprintf(fp, "\n");
558         }
559
560         return ret;
561 }
562
563 int perf_event__process(struct perf_tool *tool __maybe_unused,
564                         union perf_event *event,
565                         struct perf_sample *sample,
566                         struct machine *machine)
567 {
568         return machine__process_event(machine, event, sample);
569 }
570
571 struct map *thread__find_map(struct thread *thread, u8 cpumode, u64 addr,
572                              struct addr_location *al)
573 {
574         struct maps *maps = thread->maps;
575         struct machine *machine = maps->machine;
576         bool load_map = false;
577
578         al->maps = maps;
579         al->thread = thread;
580         al->addr = addr;
581         al->cpumode = cpumode;
582         al->filtered = 0;
583
584         if (machine == NULL) {
585                 al->map = NULL;
586                 return NULL;
587         }
588
589         if (cpumode == PERF_RECORD_MISC_KERNEL && perf_host) {
590                 al->level = 'k';
591                 al->maps = maps = machine__kernel_maps(machine);
592                 load_map = true;
593         } else if (cpumode == PERF_RECORD_MISC_USER && perf_host) {
594                 al->level = '.';
595         } else if (cpumode == PERF_RECORD_MISC_GUEST_KERNEL && perf_guest) {
596                 al->level = 'g';
597                 al->maps = maps = machine__kernel_maps(machine);
598                 load_map = true;
599         } else if (cpumode == PERF_RECORD_MISC_GUEST_USER && perf_guest) {
600                 al->level = 'u';
601         } else {
602                 al->level = 'H';
603                 al->map = NULL;
604
605                 if ((cpumode == PERF_RECORD_MISC_GUEST_USER ||
606                         cpumode == PERF_RECORD_MISC_GUEST_KERNEL) &&
607                         !perf_guest)
608                         al->filtered |= (1 << HIST_FILTER__GUEST);
609                 if ((cpumode == PERF_RECORD_MISC_USER ||
610                         cpumode == PERF_RECORD_MISC_KERNEL) &&
611                         !perf_host)
612                         al->filtered |= (1 << HIST_FILTER__HOST);
613
614                 return NULL;
615         }
616
617         al->map = maps__find(maps, al->addr);
618         if (al->map != NULL) {
619                 /*
620                  * Kernel maps might be changed when loading symbols so loading
621                  * must be done prior to using kernel maps.
622                  */
623                 if (load_map)
624                         map__load(al->map);
625                 al->addr = al->map->map_ip(al->map, al->addr);
626         }
627
628         return al->map;
629 }
630
631 /*
632  * For branch stacks or branch samples, the sample cpumode might not be correct
633  * because it applies only to the sample 'ip' and not necessary to 'addr' or
634  * branch stack addresses. If possible, use a fallback to deal with those cases.
635  */
636 struct map *thread__find_map_fb(struct thread *thread, u8 cpumode, u64 addr,
637                                 struct addr_location *al)
638 {
639         struct map *map = thread__find_map(thread, cpumode, addr, al);
640         struct machine *machine = thread->maps->machine;
641         u8 addr_cpumode = machine__addr_cpumode(machine, cpumode, addr);
642
643         if (map || addr_cpumode == cpumode)
644                 return map;
645
646         return thread__find_map(thread, addr_cpumode, addr, al);
647 }
648
649 struct symbol *thread__find_symbol(struct thread *thread, u8 cpumode,
650                                    u64 addr, struct addr_location *al)
651 {
652         al->sym = NULL;
653         if (thread__find_map(thread, cpumode, addr, al))
654                 al->sym = map__find_symbol(al->map, al->addr);
655         return al->sym;
656 }
657
658 struct symbol *thread__find_symbol_fb(struct thread *thread, u8 cpumode,
659                                       u64 addr, struct addr_location *al)
660 {
661         al->sym = NULL;
662         if (thread__find_map_fb(thread, cpumode, addr, al))
663                 al->sym = map__find_symbol(al->map, al->addr);
664         return al->sym;
665 }
666
667 static bool check_address_range(struct intlist *addr_list, int addr_range,
668                                 unsigned long addr)
669 {
670         struct int_node *pos;
671
672         intlist__for_each_entry(pos, addr_list) {
673                 if (addr >= pos->i && addr < pos->i + addr_range)
674                         return true;
675         }
676
677         return false;
678 }
679
680 /*
681  * Callers need to drop the reference to al->thread, obtained in
682  * machine__findnew_thread()
683  */
684 int machine__resolve(struct machine *machine, struct addr_location *al,
685                      struct perf_sample *sample)
686 {
687         struct thread *thread;
688
689         if (symbol_conf.guest_code && !machine__is_host(machine))
690                 thread = machine__findnew_guest_code(machine, sample->pid);
691         else
692                 thread = machine__findnew_thread(machine, sample->pid, sample->tid);
693         if (thread == NULL)
694                 return -1;
695
696         dump_printf(" ... thread: %s:%d\n", thread__comm_str(thread), thread->tid);
697         thread__find_map(thread, sample->cpumode, sample->ip, al);
698         dump_printf(" ...... dso: %s\n",
699                     al->map ? al->map->dso->long_name :
700                         al->level == 'H' ? "[hypervisor]" : "<not found>");
701
702         if (thread__is_filtered(thread))
703                 al->filtered |= (1 << HIST_FILTER__THREAD);
704
705         al->sym = NULL;
706         al->cpu = sample->cpu;
707         al->socket = -1;
708         al->srcline = NULL;
709
710         if (al->cpu >= 0) {
711                 struct perf_env *env = machine->env;
712
713                 if (env && env->cpu)
714                         al->socket = env->cpu[al->cpu].socket_id;
715         }
716
717         if (al->map) {
718                 struct dso *dso = al->map->dso;
719
720                 if (symbol_conf.dso_list &&
721                     (!dso || !(strlist__has_entry(symbol_conf.dso_list,
722                                                   dso->short_name) ||
723                                (dso->short_name != dso->long_name &&
724                                 strlist__has_entry(symbol_conf.dso_list,
725                                                    dso->long_name))))) {
726                         al->filtered |= (1 << HIST_FILTER__DSO);
727                 }
728
729                 al->sym = map__find_symbol(al->map, al->addr);
730         } else if (symbol_conf.dso_list) {
731                 al->filtered |= (1 << HIST_FILTER__DSO);
732         }
733
734         if (symbol_conf.sym_list) {
735                 int ret = 0;
736                 char al_addr_str[32];
737                 size_t sz = sizeof(al_addr_str);
738
739                 if (al->sym) {
740                         ret = strlist__has_entry(symbol_conf.sym_list,
741                                                 al->sym->name);
742                 }
743                 if (!ret && al->sym) {
744                         snprintf(al_addr_str, sz, "0x%"PRIx64,
745                                 al->map->unmap_ip(al->map, al->sym->start));
746                         ret = strlist__has_entry(symbol_conf.sym_list,
747                                                 al_addr_str);
748                 }
749                 if (!ret && symbol_conf.addr_list && al->map) {
750                         unsigned long addr = al->map->unmap_ip(al->map, al->addr);
751
752                         ret = intlist__has_entry(symbol_conf.addr_list, addr);
753                         if (!ret && symbol_conf.addr_range) {
754                                 ret = check_address_range(symbol_conf.addr_list,
755                                                           symbol_conf.addr_range,
756                                                           addr);
757                         }
758                 }
759
760                 if (!ret)
761                         al->filtered |= (1 << HIST_FILTER__SYMBOL);
762         }
763
764         return 0;
765 }
766
767 /*
768  * The preprocess_sample method will return with reference counts for the
769  * in it, when done using (and perhaps getting ref counts if needing to
770  * keep a pointer to one of those entries) it must be paired with
771  * addr_location__put(), so that the refcounts can be decremented.
772  */
773 void addr_location__put(struct addr_location *al)
774 {
775         thread__zput(al->thread);
776 }
777
778 bool is_bts_event(struct perf_event_attr *attr)
779 {
780         return attr->type == PERF_TYPE_HARDWARE &&
781                (attr->config & PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
782                attr->sample_period == 1;
783 }
784
785 bool sample_addr_correlates_sym(struct perf_event_attr *attr)
786 {
787         if (attr->type == PERF_TYPE_SOFTWARE &&
788             (attr->config == PERF_COUNT_SW_PAGE_FAULTS ||
789              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MIN ||
790              attr->config == PERF_COUNT_SW_PAGE_FAULTS_MAJ))
791                 return true;
792
793         if (is_bts_event(attr))
794                 return true;
795
796         return false;
797 }
798
799 void thread__resolve(struct thread *thread, struct addr_location *al,
800                      struct perf_sample *sample)
801 {
802         thread__find_map_fb(thread, sample->cpumode, sample->addr, al);
803
804         al->cpu = sample->cpu;
805         al->sym = NULL;
806
807         if (al->map)
808                 al->sym = map__find_symbol(al->map, al->addr);
809 }
This page took 0.081654 seconds and 4 git commands to generate.