]> Git Repo - linux.git/blob - tools/perf/util/session.c
driver core: Return proper error code when dev_set_name() fails
[linux.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <signal.h>
4 #include <inttypes.h>
5 #include <linux/err.h>
6 #include <linux/kernel.h>
7 #include <linux/zalloc.h>
8 #include <api/fs/fs.h>
9
10 #include <byteswap.h>
11 #include <unistd.h>
12 #include <sys/types.h>
13 #include <sys/mman.h>
14 #include <perf/cpumap.h>
15
16 #include "map_symbol.h"
17 #include "branch.h"
18 #include "debug.h"
19 #include "env.h"
20 #include "evlist.h"
21 #include "evsel.h"
22 #include "memswap.h"
23 #include "map.h"
24 #include "symbol.h"
25 #include "session.h"
26 #include "tool.h"
27 #include "perf_regs.h"
28 #include "asm/bug.h"
29 #include "auxtrace.h"
30 #include "thread.h"
31 #include "thread-stack.h"
32 #include "sample-raw.h"
33 #include "stat.h"
34 #include "tsc.h"
35 #include "ui/progress.h"
36 #include "util.h"
37 #include "arch/common.h"
38 #include "units.h"
39 #include <internal/lib.h>
40
41 #ifdef HAVE_ZSTD_SUPPORT
42 static int perf_session__process_compressed_event(struct perf_session *session,
43                                                   union perf_event *event, u64 file_offset,
44                                                   const char *file_path)
45 {
46         void *src;
47         size_t decomp_size, src_size;
48         u64 decomp_last_rem = 0;
49         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
50         struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
51
52         if (decomp_last) {
53                 decomp_last_rem = decomp_last->size - decomp_last->head;
54                 decomp_len += decomp_last_rem;
55         }
56
57         mmap_len = sizeof(struct decomp) + decomp_len;
58         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
59                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
60         if (decomp == MAP_FAILED) {
61                 pr_err("Couldn't allocate memory for decompression\n");
62                 return -1;
63         }
64
65         decomp->file_pos = file_offset;
66         decomp->file_path = file_path;
67         decomp->mmap_len = mmap_len;
68         decomp->head = 0;
69
70         if (decomp_last_rem) {
71                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
72                 decomp->size = decomp_last_rem;
73         }
74
75         src = (void *)event + sizeof(struct perf_record_compressed);
76         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
77
78         decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
79                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
80         if (!decomp_size) {
81                 munmap(decomp, mmap_len);
82                 pr_err("Couldn't decompress data\n");
83                 return -1;
84         }
85
86         decomp->size += decomp_size;
87
88         if (session->active_decomp->decomp == NULL)
89                 session->active_decomp->decomp = decomp;
90         else
91                 session->active_decomp->decomp_last->next = decomp;
92
93         session->active_decomp->decomp_last = decomp;
94
95         pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
96
97         return 0;
98 }
99 #else /* !HAVE_ZSTD_SUPPORT */
100 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
101 #endif
102
103 static int perf_session__deliver_event(struct perf_session *session,
104                                        union perf_event *event,
105                                        struct perf_tool *tool,
106                                        u64 file_offset,
107                                        const char *file_path);
108
109 static int perf_session__open(struct perf_session *session, int repipe_fd)
110 {
111         struct perf_data *data = session->data;
112
113         if (perf_session__read_header(session, repipe_fd) < 0) {
114                 pr_err("incompatible file format (rerun with -v to learn more)\n");
115                 return -1;
116         }
117
118         if (perf_data__is_pipe(data))
119                 return 0;
120
121         if (perf_header__has_feat(&session->header, HEADER_STAT))
122                 return 0;
123
124         if (!evlist__valid_sample_type(session->evlist)) {
125                 pr_err("non matching sample_type\n");
126                 return -1;
127         }
128
129         if (!evlist__valid_sample_id_all(session->evlist)) {
130                 pr_err("non matching sample_id_all\n");
131                 return -1;
132         }
133
134         if (!evlist__valid_read_format(session->evlist)) {
135                 pr_err("non matching read_format\n");
136                 return -1;
137         }
138
139         return 0;
140 }
141
142 void perf_session__set_id_hdr_size(struct perf_session *session)
143 {
144         u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
145
146         machines__set_id_hdr_size(&session->machines, id_hdr_size);
147 }
148
149 int perf_session__create_kernel_maps(struct perf_session *session)
150 {
151         int ret = machine__create_kernel_maps(&session->machines.host);
152
153         if (ret >= 0)
154                 ret = machines__create_guest_kernel_maps(&session->machines);
155         return ret;
156 }
157
158 static void perf_session__destroy_kernel_maps(struct perf_session *session)
159 {
160         machines__destroy_kernel_maps(&session->machines);
161 }
162
163 static bool perf_session__has_comm_exec(struct perf_session *session)
164 {
165         struct evsel *evsel;
166
167         evlist__for_each_entry(session->evlist, evsel) {
168                 if (evsel->core.attr.comm_exec)
169                         return true;
170         }
171
172         return false;
173 }
174
175 static void perf_session__set_comm_exec(struct perf_session *session)
176 {
177         bool comm_exec = perf_session__has_comm_exec(session);
178
179         machines__set_comm_exec(&session->machines, comm_exec);
180 }
181
182 static int ordered_events__deliver_event(struct ordered_events *oe,
183                                          struct ordered_event *event)
184 {
185         struct perf_session *session = container_of(oe, struct perf_session,
186                                                     ordered_events);
187
188         return perf_session__deliver_event(session, event->event,
189                                            session->tool, event->file_offset,
190                                            event->file_path);
191 }
192
193 struct perf_session *__perf_session__new(struct perf_data *data,
194                                          bool repipe, int repipe_fd,
195                                          struct perf_tool *tool)
196 {
197         int ret = -ENOMEM;
198         struct perf_session *session = zalloc(sizeof(*session));
199
200         if (!session)
201                 goto out;
202
203         session->repipe = repipe;
204         session->tool   = tool;
205         session->decomp_data.zstd_decomp = &session->zstd_data;
206         session->active_decomp = &session->decomp_data;
207         INIT_LIST_HEAD(&session->auxtrace_index);
208         machines__init(&session->machines);
209         ordered_events__init(&session->ordered_events,
210                              ordered_events__deliver_event, NULL);
211
212         perf_env__init(&session->header.env);
213         if (data) {
214                 ret = perf_data__open(data);
215                 if (ret < 0)
216                         goto out_delete;
217
218                 session->data = data;
219
220                 if (perf_data__is_read(data)) {
221                         ret = perf_session__open(session, repipe_fd);
222                         if (ret < 0)
223                                 goto out_delete;
224
225                         /*
226                          * set session attributes that are present in perf.data
227                          * but not in pipe-mode.
228                          */
229                         if (!data->is_pipe) {
230                                 perf_session__set_id_hdr_size(session);
231                                 perf_session__set_comm_exec(session);
232                         }
233
234                         evlist__init_trace_event_sample_raw(session->evlist);
235
236                         /* Open the directory data. */
237                         if (data->is_dir) {
238                                 ret = perf_data__open_dir(data);
239                                 if (ret)
240                                         goto out_delete;
241                         }
242
243                         if (!symbol_conf.kallsyms_name &&
244                             !symbol_conf.vmlinux_name)
245                                 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
246                 }
247         } else  {
248                 session->machines.host.env = &perf_env;
249         }
250
251         session->machines.host.single_address_space =
252                 perf_env__single_address_space(session->machines.host.env);
253
254         if (!data || perf_data__is_write(data)) {
255                 /*
256                  * In O_RDONLY mode this will be performed when reading the
257                  * kernel MMAP event, in perf_event__process_mmap().
258                  */
259                 if (perf_session__create_kernel_maps(session) < 0)
260                         pr_warning("Cannot read kernel map\n");
261         }
262
263         /*
264          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
265          * processed, so evlist__sample_id_all is not meaningful here.
266          */
267         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
268             tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
269                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
270                 tool->ordered_events = false;
271         }
272
273         return session;
274
275  out_delete:
276         perf_session__delete(session);
277  out:
278         return ERR_PTR(ret);
279 }
280
281 static void perf_decomp__release_events(struct decomp *next)
282 {
283         struct decomp *decomp;
284         size_t mmap_len;
285
286         do {
287                 decomp = next;
288                 if (decomp == NULL)
289                         break;
290                 next = decomp->next;
291                 mmap_len = decomp->mmap_len;
292                 munmap(decomp, mmap_len);
293         } while (1);
294 }
295
296 void perf_session__delete(struct perf_session *session)
297 {
298         if (session == NULL)
299                 return;
300         auxtrace__free(session);
301         auxtrace_index__free(&session->auxtrace_index);
302         perf_session__destroy_kernel_maps(session);
303         perf_decomp__release_events(session->decomp_data.decomp);
304         perf_env__exit(&session->header.env);
305         machines__exit(&session->machines);
306         if (session->data) {
307                 if (perf_data__is_read(session->data))
308                         evlist__delete(session->evlist);
309                 perf_data__close(session->data);
310         }
311 #ifdef HAVE_LIBTRACEEVENT
312         trace_event__cleanup(&session->tevent);
313 #endif
314         free(session);
315 }
316
317 static int process_event_synth_tracing_data_stub(struct perf_session *session
318                                                  __maybe_unused,
319                                                  union perf_event *event
320                                                  __maybe_unused)
321 {
322         dump_printf(": unhandled!\n");
323         return 0;
324 }
325
326 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
327                                          union perf_event *event __maybe_unused,
328                                          struct evlist **pevlist
329                                          __maybe_unused)
330 {
331         dump_printf(": unhandled!\n");
332         return 0;
333 }
334
335 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
336                                                  union perf_event *event __maybe_unused,
337                                                  struct evlist **pevlist
338                                                  __maybe_unused)
339 {
340         if (dump_trace)
341                 perf_event__fprintf_event_update(event, stdout);
342
343         dump_printf(": unhandled!\n");
344         return 0;
345 }
346
347 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
348                                      union perf_event *event __maybe_unused,
349                                      struct perf_sample *sample __maybe_unused,
350                                      struct evsel *evsel __maybe_unused,
351                                      struct machine *machine __maybe_unused)
352 {
353         dump_printf(": unhandled!\n");
354         return 0;
355 }
356
357 static int process_event_stub(struct perf_tool *tool __maybe_unused,
358                               union perf_event *event __maybe_unused,
359                               struct perf_sample *sample __maybe_unused,
360                               struct machine *machine __maybe_unused)
361 {
362         dump_printf(": unhandled!\n");
363         return 0;
364 }
365
366 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
367                                        union perf_event *event __maybe_unused,
368                                        struct ordered_events *oe __maybe_unused)
369 {
370         dump_printf(": unhandled!\n");
371         return 0;
372 }
373
374 static int skipn(int fd, off_t n)
375 {
376         char buf[4096];
377         ssize_t ret;
378
379         while (n > 0) {
380                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
381                 if (ret <= 0)
382                         return ret;
383                 n -= ret;
384         }
385
386         return 0;
387 }
388
389 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
390                                        union perf_event *event)
391 {
392         dump_printf(": unhandled!\n");
393         if (perf_data__is_pipe(session->data))
394                 skipn(perf_data__fd(session->data), event->auxtrace.size);
395         return event->auxtrace.size;
396 }
397
398 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
399                                   union perf_event *event __maybe_unused)
400 {
401         dump_printf(": unhandled!\n");
402         return 0;
403 }
404
405
406 static
407 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
408                                   union perf_event *event __maybe_unused)
409 {
410         if (dump_trace)
411                 perf_event__fprintf_thread_map(event, stdout);
412
413         dump_printf(": unhandled!\n");
414         return 0;
415 }
416
417 static
418 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
419                                union perf_event *event __maybe_unused)
420 {
421         if (dump_trace)
422                 perf_event__fprintf_cpu_map(event, stdout);
423
424         dump_printf(": unhandled!\n");
425         return 0;
426 }
427
428 static
429 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
430                                    union perf_event *event __maybe_unused)
431 {
432         if (dump_trace)
433                 perf_event__fprintf_stat_config(event, stdout);
434
435         dump_printf(": unhandled!\n");
436         return 0;
437 }
438
439 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
440                              union perf_event *event)
441 {
442         if (dump_trace)
443                 perf_event__fprintf_stat(event, stdout);
444
445         dump_printf(": unhandled!\n");
446         return 0;
447 }
448
449 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
450                                    union perf_event *event)
451 {
452         if (dump_trace)
453                 perf_event__fprintf_stat_round(event, stdout);
454
455         dump_printf(": unhandled!\n");
456         return 0;
457 }
458
459 static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
460                                         union perf_event *event)
461 {
462         if (dump_trace)
463                 perf_event__fprintf_time_conv(event, stdout);
464
465         dump_printf(": unhandled!\n");
466         return 0;
467 }
468
469 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
470                                                        union perf_event *event __maybe_unused,
471                                                        u64 file_offset __maybe_unused,
472                                                        const char *file_path __maybe_unused)
473 {
474        dump_printf(": unhandled!\n");
475        return 0;
476 }
477
478 void perf_tool__fill_defaults(struct perf_tool *tool)
479 {
480         if (tool->sample == NULL)
481                 tool->sample = process_event_sample_stub;
482         if (tool->mmap == NULL)
483                 tool->mmap = process_event_stub;
484         if (tool->mmap2 == NULL)
485                 tool->mmap2 = process_event_stub;
486         if (tool->comm == NULL)
487                 tool->comm = process_event_stub;
488         if (tool->namespaces == NULL)
489                 tool->namespaces = process_event_stub;
490         if (tool->cgroup == NULL)
491                 tool->cgroup = process_event_stub;
492         if (tool->fork == NULL)
493                 tool->fork = process_event_stub;
494         if (tool->exit == NULL)
495                 tool->exit = process_event_stub;
496         if (tool->lost == NULL)
497                 tool->lost = perf_event__process_lost;
498         if (tool->lost_samples == NULL)
499                 tool->lost_samples = perf_event__process_lost_samples;
500         if (tool->aux == NULL)
501                 tool->aux = perf_event__process_aux;
502         if (tool->itrace_start == NULL)
503                 tool->itrace_start = perf_event__process_itrace_start;
504         if (tool->context_switch == NULL)
505                 tool->context_switch = perf_event__process_switch;
506         if (tool->ksymbol == NULL)
507                 tool->ksymbol = perf_event__process_ksymbol;
508         if (tool->bpf == NULL)
509                 tool->bpf = perf_event__process_bpf;
510         if (tool->text_poke == NULL)
511                 tool->text_poke = perf_event__process_text_poke;
512         if (tool->aux_output_hw_id == NULL)
513                 tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
514         if (tool->read == NULL)
515                 tool->read = process_event_sample_stub;
516         if (tool->throttle == NULL)
517                 tool->throttle = process_event_stub;
518         if (tool->unthrottle == NULL)
519                 tool->unthrottle = process_event_stub;
520         if (tool->attr == NULL)
521                 tool->attr = process_event_synth_attr_stub;
522         if (tool->event_update == NULL)
523                 tool->event_update = process_event_synth_event_update_stub;
524         if (tool->tracing_data == NULL)
525                 tool->tracing_data = process_event_synth_tracing_data_stub;
526         if (tool->build_id == NULL)
527                 tool->build_id = process_event_op2_stub;
528         if (tool->finished_round == NULL) {
529                 if (tool->ordered_events)
530                         tool->finished_round = perf_event__process_finished_round;
531                 else
532                         tool->finished_round = process_finished_round_stub;
533         }
534         if (tool->id_index == NULL)
535                 tool->id_index = process_event_op2_stub;
536         if (tool->auxtrace_info == NULL)
537                 tool->auxtrace_info = process_event_op2_stub;
538         if (tool->auxtrace == NULL)
539                 tool->auxtrace = process_event_auxtrace_stub;
540         if (tool->auxtrace_error == NULL)
541                 tool->auxtrace_error = process_event_op2_stub;
542         if (tool->thread_map == NULL)
543                 tool->thread_map = process_event_thread_map_stub;
544         if (tool->cpu_map == NULL)
545                 tool->cpu_map = process_event_cpu_map_stub;
546         if (tool->stat_config == NULL)
547                 tool->stat_config = process_event_stat_config_stub;
548         if (tool->stat == NULL)
549                 tool->stat = process_stat_stub;
550         if (tool->stat_round == NULL)
551                 tool->stat_round = process_stat_round_stub;
552         if (tool->time_conv == NULL)
553                 tool->time_conv = process_event_time_conv_stub;
554         if (tool->feature == NULL)
555                 tool->feature = process_event_op2_stub;
556         if (tool->compressed == NULL)
557                 tool->compressed = perf_session__process_compressed_event;
558         if (tool->finished_init == NULL)
559                 tool->finished_init = process_event_op2_stub;
560 }
561
562 static void swap_sample_id_all(union perf_event *event, void *data)
563 {
564         void *end = (void *) event + event->header.size;
565         int size = end - data;
566
567         BUG_ON(size % sizeof(u64));
568         mem_bswap_64(data, size);
569 }
570
571 static void perf_event__all64_swap(union perf_event *event,
572                                    bool sample_id_all __maybe_unused)
573 {
574         struct perf_event_header *hdr = &event->header;
575         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
576 }
577
578 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
579 {
580         event->comm.pid = bswap_32(event->comm.pid);
581         event->comm.tid = bswap_32(event->comm.tid);
582
583         if (sample_id_all) {
584                 void *data = &event->comm.comm;
585
586                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
587                 swap_sample_id_all(event, data);
588         }
589 }
590
591 static void perf_event__mmap_swap(union perf_event *event,
592                                   bool sample_id_all)
593 {
594         event->mmap.pid   = bswap_32(event->mmap.pid);
595         event->mmap.tid   = bswap_32(event->mmap.tid);
596         event->mmap.start = bswap_64(event->mmap.start);
597         event->mmap.len   = bswap_64(event->mmap.len);
598         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
599
600         if (sample_id_all) {
601                 void *data = &event->mmap.filename;
602
603                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
604                 swap_sample_id_all(event, data);
605         }
606 }
607
608 static void perf_event__mmap2_swap(union perf_event *event,
609                                   bool sample_id_all)
610 {
611         event->mmap2.pid   = bswap_32(event->mmap2.pid);
612         event->mmap2.tid   = bswap_32(event->mmap2.tid);
613         event->mmap2.start = bswap_64(event->mmap2.start);
614         event->mmap2.len   = bswap_64(event->mmap2.len);
615         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
616
617         if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
618                 event->mmap2.maj   = bswap_32(event->mmap2.maj);
619                 event->mmap2.min   = bswap_32(event->mmap2.min);
620                 event->mmap2.ino   = bswap_64(event->mmap2.ino);
621                 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
622         }
623
624         if (sample_id_all) {
625                 void *data = &event->mmap2.filename;
626
627                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
628                 swap_sample_id_all(event, data);
629         }
630 }
631 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
632 {
633         event->fork.pid  = bswap_32(event->fork.pid);
634         event->fork.tid  = bswap_32(event->fork.tid);
635         event->fork.ppid = bswap_32(event->fork.ppid);
636         event->fork.ptid = bswap_32(event->fork.ptid);
637         event->fork.time = bswap_64(event->fork.time);
638
639         if (sample_id_all)
640                 swap_sample_id_all(event, &event->fork + 1);
641 }
642
643 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
644 {
645         event->read.pid          = bswap_32(event->read.pid);
646         event->read.tid          = bswap_32(event->read.tid);
647         event->read.value        = bswap_64(event->read.value);
648         event->read.time_enabled = bswap_64(event->read.time_enabled);
649         event->read.time_running = bswap_64(event->read.time_running);
650         event->read.id           = bswap_64(event->read.id);
651
652         if (sample_id_all)
653                 swap_sample_id_all(event, &event->read + 1);
654 }
655
656 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
657 {
658         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
659         event->aux.aux_size   = bswap_64(event->aux.aux_size);
660         event->aux.flags      = bswap_64(event->aux.flags);
661
662         if (sample_id_all)
663                 swap_sample_id_all(event, &event->aux + 1);
664 }
665
666 static void perf_event__itrace_start_swap(union perf_event *event,
667                                           bool sample_id_all)
668 {
669         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
670         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
671
672         if (sample_id_all)
673                 swap_sample_id_all(event, &event->itrace_start + 1);
674 }
675
676 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
677 {
678         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
679                 event->context_switch.next_prev_pid =
680                                 bswap_32(event->context_switch.next_prev_pid);
681                 event->context_switch.next_prev_tid =
682                                 bswap_32(event->context_switch.next_prev_tid);
683         }
684
685         if (sample_id_all)
686                 swap_sample_id_all(event, &event->context_switch + 1);
687 }
688
689 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
690 {
691         event->text_poke.addr    = bswap_64(event->text_poke.addr);
692         event->text_poke.old_len = bswap_16(event->text_poke.old_len);
693         event->text_poke.new_len = bswap_16(event->text_poke.new_len);
694
695         if (sample_id_all) {
696                 size_t len = sizeof(event->text_poke.old_len) +
697                              sizeof(event->text_poke.new_len) +
698                              event->text_poke.old_len +
699                              event->text_poke.new_len;
700                 void *data = &event->text_poke.old_len;
701
702                 data += PERF_ALIGN(len, sizeof(u64));
703                 swap_sample_id_all(event, data);
704         }
705 }
706
707 static void perf_event__throttle_swap(union perf_event *event,
708                                       bool sample_id_all)
709 {
710         event->throttle.time      = bswap_64(event->throttle.time);
711         event->throttle.id        = bswap_64(event->throttle.id);
712         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
713
714         if (sample_id_all)
715                 swap_sample_id_all(event, &event->throttle + 1);
716 }
717
718 static void perf_event__namespaces_swap(union perf_event *event,
719                                         bool sample_id_all)
720 {
721         u64 i;
722
723         event->namespaces.pid           = bswap_32(event->namespaces.pid);
724         event->namespaces.tid           = bswap_32(event->namespaces.tid);
725         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
726
727         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
728                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
729
730                 ns->dev = bswap_64(ns->dev);
731                 ns->ino = bswap_64(ns->ino);
732         }
733
734         if (sample_id_all)
735                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
736 }
737
738 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
739 {
740         event->cgroup.id = bswap_64(event->cgroup.id);
741
742         if (sample_id_all) {
743                 void *data = &event->cgroup.path;
744
745                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
746                 swap_sample_id_all(event, data);
747         }
748 }
749
750 static u8 revbyte(u8 b)
751 {
752         int rev = (b >> 4) | ((b & 0xf) << 4);
753         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
754         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
755         return (u8) rev;
756 }
757
758 /*
759  * XXX this is hack in attempt to carry flags bitfield
760  * through endian village. ABI says:
761  *
762  * Bit-fields are allocated from right to left (least to most significant)
763  * on little-endian implementations and from left to right (most to least
764  * significant) on big-endian implementations.
765  *
766  * The above seems to be byte specific, so we need to reverse each
767  * byte of the bitfield. 'Internet' also says this might be implementation
768  * specific and we probably need proper fix and carry perf_event_attr
769  * bitfield flags in separate data file FEAT_ section. Thought this seems
770  * to work for now.
771  */
772 static void swap_bitfield(u8 *p, unsigned len)
773 {
774         unsigned i;
775
776         for (i = 0; i < len; i++) {
777                 *p = revbyte(*p);
778                 p++;
779         }
780 }
781
782 /* exported for swapping attributes in file header */
783 void perf_event__attr_swap(struct perf_event_attr *attr)
784 {
785         attr->type              = bswap_32(attr->type);
786         attr->size              = bswap_32(attr->size);
787
788 #define bswap_safe(f, n)                                        \
789         (attr->size > (offsetof(struct perf_event_attr, f) +    \
790                        sizeof(attr->f) * (n)))
791 #define bswap_field(f, sz)                      \
792 do {                                            \
793         if (bswap_safe(f, 0))                   \
794                 attr->f = bswap_##sz(attr->f);  \
795 } while(0)
796 #define bswap_field_16(f) bswap_field(f, 16)
797 #define bswap_field_32(f) bswap_field(f, 32)
798 #define bswap_field_64(f) bswap_field(f, 64)
799
800         bswap_field_64(config);
801         bswap_field_64(sample_period);
802         bswap_field_64(sample_type);
803         bswap_field_64(read_format);
804         bswap_field_32(wakeup_events);
805         bswap_field_32(bp_type);
806         bswap_field_64(bp_addr);
807         bswap_field_64(bp_len);
808         bswap_field_64(branch_sample_type);
809         bswap_field_64(sample_regs_user);
810         bswap_field_32(sample_stack_user);
811         bswap_field_32(aux_watermark);
812         bswap_field_16(sample_max_stack);
813         bswap_field_32(aux_sample_size);
814
815         /*
816          * After read_format are bitfields. Check read_format because
817          * we are unable to use offsetof on bitfield.
818          */
819         if (bswap_safe(read_format, 1))
820                 swap_bitfield((u8 *) (&attr->read_format + 1),
821                               sizeof(u64));
822 #undef bswap_field_64
823 #undef bswap_field_32
824 #undef bswap_field
825 #undef bswap_safe
826 }
827
828 static void perf_event__hdr_attr_swap(union perf_event *event,
829                                       bool sample_id_all __maybe_unused)
830 {
831         size_t size;
832
833         perf_event__attr_swap(&event->attr.attr);
834
835         size = event->header.size;
836         size -= (void *)&event->attr.id - (void *)event;
837         mem_bswap_64(event->attr.id, size);
838 }
839
840 static void perf_event__event_update_swap(union perf_event *event,
841                                           bool sample_id_all __maybe_unused)
842 {
843         event->event_update.type = bswap_64(event->event_update.type);
844         event->event_update.id   = bswap_64(event->event_update.id);
845 }
846
847 static void perf_event__event_type_swap(union perf_event *event,
848                                         bool sample_id_all __maybe_unused)
849 {
850         event->event_type.event_type.event_id =
851                 bswap_64(event->event_type.event_type.event_id);
852 }
853
854 static void perf_event__tracing_data_swap(union perf_event *event,
855                                           bool sample_id_all __maybe_unused)
856 {
857         event->tracing_data.size = bswap_32(event->tracing_data.size);
858 }
859
860 static void perf_event__auxtrace_info_swap(union perf_event *event,
861                                            bool sample_id_all __maybe_unused)
862 {
863         size_t size;
864
865         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
866
867         size = event->header.size;
868         size -= (void *)&event->auxtrace_info.priv - (void *)event;
869         mem_bswap_64(event->auxtrace_info.priv, size);
870 }
871
872 static void perf_event__auxtrace_swap(union perf_event *event,
873                                       bool sample_id_all __maybe_unused)
874 {
875         event->auxtrace.size      = bswap_64(event->auxtrace.size);
876         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
877         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
878         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
879         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
880         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
881 }
882
883 static void perf_event__auxtrace_error_swap(union perf_event *event,
884                                             bool sample_id_all __maybe_unused)
885 {
886         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
887         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
888         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
889         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
890         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
891         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
892         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
893         if (event->auxtrace_error.fmt)
894                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
895         if (event->auxtrace_error.fmt >= 2) {
896                 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
897                 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
898         }
899 }
900
901 static void perf_event__thread_map_swap(union perf_event *event,
902                                         bool sample_id_all __maybe_unused)
903 {
904         unsigned i;
905
906         event->thread_map.nr = bswap_64(event->thread_map.nr);
907
908         for (i = 0; i < event->thread_map.nr; i++)
909                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
910 }
911
912 static void perf_event__cpu_map_swap(union perf_event *event,
913                                      bool sample_id_all __maybe_unused)
914 {
915         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
916
917         data->type = bswap_16(data->type);
918
919         switch (data->type) {
920         case PERF_CPU_MAP__CPUS:
921                 data->cpus_data.nr = bswap_16(data->cpus_data.nr);
922
923                 for (unsigned i = 0; i < data->cpus_data.nr; i++)
924                         data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
925                 break;
926         case PERF_CPU_MAP__MASK:
927                 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
928
929                 switch (data->mask32_data.long_size) {
930                 case 4:
931                         data->mask32_data.nr = bswap_16(data->mask32_data.nr);
932                         for (unsigned i = 0; i < data->mask32_data.nr; i++)
933                                 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
934                         break;
935                 case 8:
936                         data->mask64_data.nr = bswap_16(data->mask64_data.nr);
937                         for (unsigned i = 0; i < data->mask64_data.nr; i++)
938                                 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
939                         break;
940                 default:
941                         pr_err("cpu_map swap: unsupported long size\n");
942                 }
943                 break;
944         case PERF_CPU_MAP__RANGE_CPUS:
945                 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
946                 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
947                 break;
948         default:
949                 break;
950         }
951 }
952
953 static void perf_event__stat_config_swap(union perf_event *event,
954                                          bool sample_id_all __maybe_unused)
955 {
956         u64 size;
957
958         size  = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
959         size += 1; /* nr item itself */
960         mem_bswap_64(&event->stat_config.nr, size);
961 }
962
963 static void perf_event__stat_swap(union perf_event *event,
964                                   bool sample_id_all __maybe_unused)
965 {
966         event->stat.id     = bswap_64(event->stat.id);
967         event->stat.thread = bswap_32(event->stat.thread);
968         event->stat.cpu    = bswap_32(event->stat.cpu);
969         event->stat.val    = bswap_64(event->stat.val);
970         event->stat.ena    = bswap_64(event->stat.ena);
971         event->stat.run    = bswap_64(event->stat.run);
972 }
973
974 static void perf_event__stat_round_swap(union perf_event *event,
975                                         bool sample_id_all __maybe_unused)
976 {
977         event->stat_round.type = bswap_64(event->stat_round.type);
978         event->stat_round.time = bswap_64(event->stat_round.time);
979 }
980
981 static void perf_event__time_conv_swap(union perf_event *event,
982                                        bool sample_id_all __maybe_unused)
983 {
984         event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
985         event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
986         event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
987
988         if (event_contains(event->time_conv, time_cycles)) {
989                 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
990                 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
991         }
992 }
993
994 typedef void (*perf_event__swap_op)(union perf_event *event,
995                                     bool sample_id_all);
996
997 static perf_event__swap_op perf_event__swap_ops[] = {
998         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
999         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
1000         [PERF_RECORD_COMM]                = perf_event__comm_swap,
1001         [PERF_RECORD_FORK]                = perf_event__task_swap,
1002         [PERF_RECORD_EXIT]                = perf_event__task_swap,
1003         [PERF_RECORD_LOST]                = perf_event__all64_swap,
1004         [PERF_RECORD_READ]                = perf_event__read_swap,
1005         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
1006         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
1007         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
1008         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
1009         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
1010         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
1011         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
1012         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
1013         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
1014         [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
1015         [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
1016         [PERF_RECORD_AUX_OUTPUT_HW_ID]    = perf_event__all64_swap,
1017         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
1018         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
1019         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1020         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
1021         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
1022         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
1023         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
1024         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
1025         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
1026         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
1027         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
1028         [PERF_RECORD_STAT]                = perf_event__stat_swap,
1029         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
1030         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
1031         [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
1032         [PERF_RECORD_HEADER_MAX]          = NULL,
1033 };
1034
1035 /*
1036  * When perf record finishes a pass on every buffers, it records this pseudo
1037  * event.
1038  * We record the max timestamp t found in the pass n.
1039  * Assuming these timestamps are monotonic across cpus, we know that if
1040  * a buffer still has events with timestamps below t, they will be all
1041  * available and then read in the pass n + 1.
1042  * Hence when we start to read the pass n + 2, we can safely flush every
1043  * events with timestamps below t.
1044  *
1045  *    ============ PASS n =================
1046  *       CPU 0         |   CPU 1
1047  *                     |
1048  *    cnt1 timestamps  |   cnt2 timestamps
1049  *          1          |         2
1050  *          2          |         3
1051  *          -          |         4  <--- max recorded
1052  *
1053  *    ============ PASS n + 1 ==============
1054  *       CPU 0         |   CPU 1
1055  *                     |
1056  *    cnt1 timestamps  |   cnt2 timestamps
1057  *          3          |         5
1058  *          4          |         6
1059  *          5          |         7 <---- max recorded
1060  *
1061  *      Flush every events below timestamp 4
1062  *
1063  *    ============ PASS n + 2 ==============
1064  *       CPU 0         |   CPU 1
1065  *                     |
1066  *    cnt1 timestamps  |   cnt2 timestamps
1067  *          6          |         8
1068  *          7          |         9
1069  *          -          |         10
1070  *
1071  *      Flush every events below timestamp 7
1072  *      etc...
1073  */
1074 int perf_event__process_finished_round(struct perf_tool *tool __maybe_unused,
1075                                        union perf_event *event __maybe_unused,
1076                                        struct ordered_events *oe)
1077 {
1078         if (dump_trace)
1079                 fprintf(stdout, "\n");
1080         return ordered_events__flush(oe, OE_FLUSH__ROUND);
1081 }
1082
1083 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1084                               u64 timestamp, u64 file_offset, const char *file_path)
1085 {
1086         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
1087 }
1088
1089 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1090 {
1091         struct ip_callchain *callchain = sample->callchain;
1092         struct branch_stack *lbr_stack = sample->branch_stack;
1093         struct branch_entry *entries = perf_sample__branch_entries(sample);
1094         u64 kernel_callchain_nr = callchain->nr;
1095         unsigned int i;
1096
1097         for (i = 0; i < kernel_callchain_nr; i++) {
1098                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1099                         break;
1100         }
1101
1102         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1103                 u64 total_nr;
1104                 /*
1105                  * LBR callstack can only get user call chain,
1106                  * i is kernel call chain number,
1107                  * 1 is PERF_CONTEXT_USER.
1108                  *
1109                  * The user call chain is stored in LBR registers.
1110                  * LBR are pair registers. The caller is stored
1111                  * in "from" register, while the callee is stored
1112                  * in "to" register.
1113                  * For example, there is a call stack
1114                  * "A"->"B"->"C"->"D".
1115                  * The LBR registers will be recorded like
1116                  * "C"->"D", "B"->"C", "A"->"B".
1117                  * So only the first "to" register and all "from"
1118                  * registers are needed to construct the whole stack.
1119                  */
1120                 total_nr = i + 1 + lbr_stack->nr + 1;
1121                 kernel_callchain_nr = i + 1;
1122
1123                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1124
1125                 for (i = 0; i < kernel_callchain_nr; i++)
1126                         printf("..... %2d: %016" PRIx64 "\n",
1127                                i, callchain->ips[i]);
1128
1129                 printf("..... %2d: %016" PRIx64 "\n",
1130                        (int)(kernel_callchain_nr), entries[0].to);
1131                 for (i = 0; i < lbr_stack->nr; i++)
1132                         printf("..... %2d: %016" PRIx64 "\n",
1133                                (int)(i + kernel_callchain_nr + 1), entries[i].from);
1134         }
1135 }
1136
1137 static void callchain__printf(struct evsel *evsel,
1138                               struct perf_sample *sample)
1139 {
1140         unsigned int i;
1141         struct ip_callchain *callchain = sample->callchain;
1142
1143         if (evsel__has_branch_callstack(evsel))
1144                 callchain__lbr_callstack_printf(sample);
1145
1146         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1147
1148         for (i = 0; i < callchain->nr; i++)
1149                 printf("..... %2d: %016" PRIx64 "\n",
1150                        i, callchain->ips[i]);
1151 }
1152
1153 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1154 {
1155         struct branch_entry *entries = perf_sample__branch_entries(sample);
1156         uint64_t i;
1157
1158         if (!callstack) {
1159                 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
1160         } else {
1161                 /* the reason of adding 1 to nr is because after expanding
1162                  * branch stack it generates nr + 1 callstack records. e.g.,
1163                  *         B()->C()
1164                  *         A()->B()
1165                  * the final callstack should be:
1166                  *         C()
1167                  *         B()
1168                  *         A()
1169                  */
1170                 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
1171         }
1172
1173         for (i = 0; i < sample->branch_stack->nr; i++) {
1174                 struct branch_entry *e = &entries[i];
1175
1176                 if (!callstack) {
1177                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s %s\n",
1178                                 i, e->from, e->to,
1179                                 (unsigned short)e->flags.cycles,
1180                                 e->flags.mispred ? "M" : " ",
1181                                 e->flags.predicted ? "P" : " ",
1182                                 e->flags.abort ? "A" : " ",
1183                                 e->flags.in_tx ? "T" : " ",
1184                                 (unsigned)e->flags.reserved,
1185                                 get_branch_type(e),
1186                                 e->flags.spec ? branch_spec_desc(e->flags.spec) : "");
1187                 } else {
1188                         if (i == 0) {
1189                                 printf("..... %2"PRIu64": %016" PRIx64 "\n"
1190                                        "..... %2"PRIu64": %016" PRIx64 "\n",
1191                                                 i, e->to, i+1, e->from);
1192                         } else {
1193                                 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
1194                         }
1195                 }
1196         }
1197 }
1198
1199 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
1200 {
1201         unsigned rid, i = 0;
1202
1203         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1204                 u64 val = regs[i++];
1205
1206                 printf(".... %-5s 0x%016" PRIx64 "\n",
1207                        perf_reg_name(rid, arch), val);
1208         }
1209 }
1210
1211 static const char *regs_abi[] = {
1212         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1213         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1214         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1215 };
1216
1217 static inline const char *regs_dump_abi(struct regs_dump *d)
1218 {
1219         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1220                 return "unknown";
1221
1222         return regs_abi[d->abi];
1223 }
1224
1225 static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
1226 {
1227         u64 mask = regs->mask;
1228
1229         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1230                type,
1231                mask,
1232                regs_dump_abi(regs));
1233
1234         regs_dump__printf(mask, regs->regs, arch);
1235 }
1236
1237 static void regs_user__printf(struct perf_sample *sample, const char *arch)
1238 {
1239         struct regs_dump *user_regs = &sample->user_regs;
1240
1241         if (user_regs->regs)
1242                 regs__printf("user", user_regs, arch);
1243 }
1244
1245 static void regs_intr__printf(struct perf_sample *sample, const char *arch)
1246 {
1247         struct regs_dump *intr_regs = &sample->intr_regs;
1248
1249         if (intr_regs->regs)
1250                 regs__printf("intr", intr_regs, arch);
1251 }
1252
1253 static void stack_user__printf(struct stack_dump *dump)
1254 {
1255         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1256                dump->size, dump->offset);
1257 }
1258
1259 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1260 {
1261         u64 sample_type = __evlist__combined_sample_type(evlist);
1262
1263         if (event->header.type != PERF_RECORD_SAMPLE &&
1264             !evlist__sample_id_all(evlist)) {
1265                 fputs("-1 -1 ", stdout);
1266                 return;
1267         }
1268
1269         if ((sample_type & PERF_SAMPLE_CPU))
1270                 printf("%u ", sample->cpu);
1271
1272         if (sample_type & PERF_SAMPLE_TIME)
1273                 printf("%" PRIu64 " ", sample->time);
1274 }
1275
1276 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1277 {
1278         printf("... sample_read:\n");
1279
1280         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1281                 printf("...... time enabled %016" PRIx64 "\n",
1282                        sample->read.time_enabled);
1283
1284         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1285                 printf("...... time running %016" PRIx64 "\n",
1286                        sample->read.time_running);
1287
1288         if (read_format & PERF_FORMAT_GROUP) {
1289                 struct sample_read_value *value = sample->read.group.values;
1290
1291                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1292
1293                 sample_read_group__for_each(value, sample->read.group.nr, read_format) {
1294                         printf("..... id %016" PRIx64
1295                                ", value %016" PRIx64,
1296                                value->id, value->value);
1297                         if (read_format & PERF_FORMAT_LOST)
1298                                 printf(", lost %" PRIu64, value->lost);
1299                         printf("\n");
1300                 }
1301         } else {
1302                 printf("..... id %016" PRIx64 ", value %016" PRIx64,
1303                         sample->read.one.id, sample->read.one.value);
1304                 if (read_format & PERF_FORMAT_LOST)
1305                         printf(", lost %" PRIu64, sample->read.one.lost);
1306                 printf("\n");
1307         }
1308 }
1309
1310 static void dump_event(struct evlist *evlist, union perf_event *event,
1311                        u64 file_offset, struct perf_sample *sample,
1312                        const char *file_path)
1313 {
1314         if (!dump_trace)
1315                 return;
1316
1317         printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1318                file_offset, file_path, event->header.size, event->header.type);
1319
1320         trace_event(event);
1321         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1322                 evlist->trace_event_sample_raw(evlist, event, sample);
1323
1324         if (sample)
1325                 evlist__print_tstamp(evlist, event, sample);
1326
1327         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1328                event->header.size, perf_event__name(event->header.type));
1329 }
1330
1331 char *get_page_size_name(u64 size, char *str)
1332 {
1333         if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1334                 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1335
1336         return str;
1337 }
1338
1339 static void dump_sample(struct evsel *evsel, union perf_event *event,
1340                         struct perf_sample *sample, const char *arch)
1341 {
1342         u64 sample_type;
1343         char str[PAGE_SIZE_NAME_LEN];
1344
1345         if (!dump_trace)
1346                 return;
1347
1348         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1349                event->header.misc, sample->pid, sample->tid, sample->ip,
1350                sample->period, sample->addr);
1351
1352         sample_type = evsel->core.attr.sample_type;
1353
1354         if (evsel__has_callchain(evsel))
1355                 callchain__printf(evsel, sample);
1356
1357         if (evsel__has_br_stack(evsel))
1358                 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1359
1360         if (sample_type & PERF_SAMPLE_REGS_USER)
1361                 regs_user__printf(sample, arch);
1362
1363         if (sample_type & PERF_SAMPLE_REGS_INTR)
1364                 regs_intr__printf(sample, arch);
1365
1366         if (sample_type & PERF_SAMPLE_STACK_USER)
1367                 stack_user__printf(&sample->user_stack);
1368
1369         if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1370                 printf("... weight: %" PRIu64 "", sample->weight);
1371                         if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1372                                 printf(",0x%"PRIx16"", sample->ins_lat);
1373                                 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1374                         }
1375                 printf("\n");
1376         }
1377
1378         if (sample_type & PERF_SAMPLE_DATA_SRC)
1379                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1380
1381         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1382                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1383
1384         if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1385                 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1386
1387         if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1388                 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1389
1390         if (sample_type & PERF_SAMPLE_TRANSACTION)
1391                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1392
1393         if (sample_type & PERF_SAMPLE_READ)
1394                 sample_read__printf(sample, evsel->core.attr.read_format);
1395 }
1396
1397 static void dump_read(struct evsel *evsel, union perf_event *event)
1398 {
1399         struct perf_record_read *read_event = &event->read;
1400         u64 read_format;
1401
1402         if (!dump_trace)
1403                 return;
1404
1405         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1406                evsel__name(evsel), event->read.value);
1407
1408         if (!evsel)
1409                 return;
1410
1411         read_format = evsel->core.attr.read_format;
1412
1413         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1414                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1415
1416         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1417                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1418
1419         if (read_format & PERF_FORMAT_ID)
1420                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1421
1422         if (read_format & PERF_FORMAT_LOST)
1423                 printf("... lost         : %" PRI_lu64 "\n", read_event->lost);
1424 }
1425
1426 static struct machine *machines__find_for_cpumode(struct machines *machines,
1427                                                union perf_event *event,
1428                                                struct perf_sample *sample)
1429 {
1430         if (perf_guest &&
1431             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1432              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1433                 u32 pid;
1434
1435                 if (sample->machine_pid)
1436                         pid = sample->machine_pid;
1437                 else if (event->header.type == PERF_RECORD_MMAP
1438                     || event->header.type == PERF_RECORD_MMAP2)
1439                         pid = event->mmap.pid;
1440                 else
1441                         pid = sample->pid;
1442
1443                 /*
1444                  * Guest code machine is created as needed and does not use
1445                  * DEFAULT_GUEST_KERNEL_ID.
1446                  */
1447                 if (symbol_conf.guest_code)
1448                         return machines__findnew(machines, pid);
1449
1450                 return machines__find_guest(machines, pid);
1451         }
1452
1453         return &machines->host;
1454 }
1455
1456 static int deliver_sample_value(struct evlist *evlist,
1457                                 struct perf_tool *tool,
1458                                 union perf_event *event,
1459                                 struct perf_sample *sample,
1460                                 struct sample_read_value *v,
1461                                 struct machine *machine)
1462 {
1463         struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1464         struct evsel *evsel;
1465
1466         if (sid) {
1467                 sample->id     = v->id;
1468                 sample->period = v->value - sid->period;
1469                 sid->period    = v->value;
1470         }
1471
1472         if (!sid || sid->evsel == NULL) {
1473                 ++evlist->stats.nr_unknown_id;
1474                 return 0;
1475         }
1476
1477         /*
1478          * There's no reason to deliver sample
1479          * for zero period, bail out.
1480          */
1481         if (!sample->period)
1482                 return 0;
1483
1484         evsel = container_of(sid->evsel, struct evsel, core);
1485         return tool->sample(tool, event, sample, evsel, machine);
1486 }
1487
1488 static int deliver_sample_group(struct evlist *evlist,
1489                                 struct perf_tool *tool,
1490                                 union  perf_event *event,
1491                                 struct perf_sample *sample,
1492                                 struct machine *machine,
1493                                 u64 read_format)
1494 {
1495         int ret = -EINVAL;
1496         struct sample_read_value *v = sample->read.group.values;
1497
1498         sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1499                 ret = deliver_sample_value(evlist, tool, event, sample, v,
1500                                            machine);
1501                 if (ret)
1502                         break;
1503         }
1504
1505         return ret;
1506 }
1507
1508 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1509                                   union  perf_event *event, struct perf_sample *sample,
1510                                   struct evsel *evsel, struct machine *machine)
1511 {
1512         /* We know evsel != NULL. */
1513         u64 sample_type = evsel->core.attr.sample_type;
1514         u64 read_format = evsel->core.attr.read_format;
1515
1516         /* Standard sample delivery. */
1517         if (!(sample_type & PERF_SAMPLE_READ))
1518                 return tool->sample(tool, event, sample, evsel, machine);
1519
1520         /* For PERF_SAMPLE_READ we have either single or group mode. */
1521         if (read_format & PERF_FORMAT_GROUP)
1522                 return deliver_sample_group(evlist, tool, event, sample,
1523                                             machine, read_format);
1524         else
1525                 return deliver_sample_value(evlist, tool, event, sample,
1526                                             &sample->read.one, machine);
1527 }
1528
1529 static int machines__deliver_event(struct machines *machines,
1530                                    struct evlist *evlist,
1531                                    union perf_event *event,
1532                                    struct perf_sample *sample,
1533                                    struct perf_tool *tool, u64 file_offset,
1534                                    const char *file_path)
1535 {
1536         struct evsel *evsel;
1537         struct machine *machine;
1538
1539         dump_event(evlist, event, file_offset, sample, file_path);
1540
1541         evsel = evlist__id2evsel(evlist, sample->id);
1542
1543         machine = machines__find_for_cpumode(machines, event, sample);
1544
1545         switch (event->header.type) {
1546         case PERF_RECORD_SAMPLE:
1547                 if (evsel == NULL) {
1548                         ++evlist->stats.nr_unknown_id;
1549                         return 0;
1550                 }
1551                 if (machine == NULL) {
1552                         ++evlist->stats.nr_unprocessable_samples;
1553                         dump_sample(evsel, event, sample, perf_env__arch(NULL));
1554                         return 0;
1555                 }
1556                 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1557                 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1558         case PERF_RECORD_MMAP:
1559                 return tool->mmap(tool, event, sample, machine);
1560         case PERF_RECORD_MMAP2:
1561                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1562                         ++evlist->stats.nr_proc_map_timeout;
1563                 return tool->mmap2(tool, event, sample, machine);
1564         case PERF_RECORD_COMM:
1565                 return tool->comm(tool, event, sample, machine);
1566         case PERF_RECORD_NAMESPACES:
1567                 return tool->namespaces(tool, event, sample, machine);
1568         case PERF_RECORD_CGROUP:
1569                 return tool->cgroup(tool, event, sample, machine);
1570         case PERF_RECORD_FORK:
1571                 return tool->fork(tool, event, sample, machine);
1572         case PERF_RECORD_EXIT:
1573                 return tool->exit(tool, event, sample, machine);
1574         case PERF_RECORD_LOST:
1575                 if (tool->lost == perf_event__process_lost)
1576                         evlist->stats.total_lost += event->lost.lost;
1577                 return tool->lost(tool, event, sample, machine);
1578         case PERF_RECORD_LOST_SAMPLES:
1579                 if (tool->lost_samples == perf_event__process_lost_samples &&
1580                     !(event->header.misc & PERF_RECORD_MISC_LOST_SAMPLES_BPF))
1581                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1582                 return tool->lost_samples(tool, event, sample, machine);
1583         case PERF_RECORD_READ:
1584                 dump_read(evsel, event);
1585                 return tool->read(tool, event, sample, evsel, machine);
1586         case PERF_RECORD_THROTTLE:
1587                 return tool->throttle(tool, event, sample, machine);
1588         case PERF_RECORD_UNTHROTTLE:
1589                 return tool->unthrottle(tool, event, sample, machine);
1590         case PERF_RECORD_AUX:
1591                 if (tool->aux == perf_event__process_aux) {
1592                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1593                                 evlist->stats.total_aux_lost += 1;
1594                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1595                                 evlist->stats.total_aux_partial += 1;
1596                         if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1597                                 evlist->stats.total_aux_collision += 1;
1598                 }
1599                 return tool->aux(tool, event, sample, machine);
1600         case PERF_RECORD_ITRACE_START:
1601                 return tool->itrace_start(tool, event, sample, machine);
1602         case PERF_RECORD_SWITCH:
1603         case PERF_RECORD_SWITCH_CPU_WIDE:
1604                 return tool->context_switch(tool, event, sample, machine);
1605         case PERF_RECORD_KSYMBOL:
1606                 return tool->ksymbol(tool, event, sample, machine);
1607         case PERF_RECORD_BPF_EVENT:
1608                 return tool->bpf(tool, event, sample, machine);
1609         case PERF_RECORD_TEXT_POKE:
1610                 return tool->text_poke(tool, event, sample, machine);
1611         case PERF_RECORD_AUX_OUTPUT_HW_ID:
1612                 return tool->aux_output_hw_id(tool, event, sample, machine);
1613         default:
1614                 ++evlist->stats.nr_unknown_events;
1615                 return -1;
1616         }
1617 }
1618
1619 static int perf_session__deliver_event(struct perf_session *session,
1620                                        union perf_event *event,
1621                                        struct perf_tool *tool,
1622                                        u64 file_offset,
1623                                        const char *file_path)
1624 {
1625         struct perf_sample sample;
1626         int ret = evlist__parse_sample(session->evlist, event, &sample);
1627
1628         if (ret) {
1629                 pr_err("Can't parse sample, err = %d\n", ret);
1630                 return ret;
1631         }
1632
1633         ret = auxtrace__process_event(session, event, &sample, tool);
1634         if (ret < 0)
1635                 return ret;
1636         if (ret > 0)
1637                 return 0;
1638
1639         ret = machines__deliver_event(&session->machines, session->evlist,
1640                                       event, &sample, tool, file_offset, file_path);
1641
1642         if (dump_trace && sample.aux_sample.size)
1643                 auxtrace__dump_auxtrace_sample(session, &sample);
1644
1645         return ret;
1646 }
1647
1648 static s64 perf_session__process_user_event(struct perf_session *session,
1649                                             union perf_event *event,
1650                                             u64 file_offset,
1651                                             const char *file_path)
1652 {
1653         struct ordered_events *oe = &session->ordered_events;
1654         struct perf_tool *tool = session->tool;
1655         struct perf_sample sample = { .time = 0, };
1656         int fd = perf_data__fd(session->data);
1657         int err;
1658
1659         if (event->header.type != PERF_RECORD_COMPRESSED ||
1660             tool->compressed == perf_session__process_compressed_event_stub)
1661                 dump_event(session->evlist, event, file_offset, &sample, file_path);
1662
1663         /* These events are processed right away */
1664         switch (event->header.type) {
1665         case PERF_RECORD_HEADER_ATTR:
1666                 err = tool->attr(tool, event, &session->evlist);
1667                 if (err == 0) {
1668                         perf_session__set_id_hdr_size(session);
1669                         perf_session__set_comm_exec(session);
1670                 }
1671                 return err;
1672         case PERF_RECORD_EVENT_UPDATE:
1673                 return tool->event_update(tool, event, &session->evlist);
1674         case PERF_RECORD_HEADER_EVENT_TYPE:
1675                 /*
1676                  * Deprecated, but we need to handle it for sake
1677                  * of old data files create in pipe mode.
1678                  */
1679                 return 0;
1680         case PERF_RECORD_HEADER_TRACING_DATA:
1681                 /*
1682                  * Setup for reading amidst mmap, but only when we
1683                  * are in 'file' mode. The 'pipe' fd is in proper
1684                  * place already.
1685                  */
1686                 if (!perf_data__is_pipe(session->data))
1687                         lseek(fd, file_offset, SEEK_SET);
1688                 return tool->tracing_data(session, event);
1689         case PERF_RECORD_HEADER_BUILD_ID:
1690                 return tool->build_id(session, event);
1691         case PERF_RECORD_FINISHED_ROUND:
1692                 return tool->finished_round(tool, event, oe);
1693         case PERF_RECORD_ID_INDEX:
1694                 return tool->id_index(session, event);
1695         case PERF_RECORD_AUXTRACE_INFO:
1696                 return tool->auxtrace_info(session, event);
1697         case PERF_RECORD_AUXTRACE:
1698                 /*
1699                  * Setup for reading amidst mmap, but only when we
1700                  * are in 'file' mode.  The 'pipe' fd is in proper
1701                  * place already.
1702                  */
1703                 if (!perf_data__is_pipe(session->data))
1704                         lseek(fd, file_offset + event->header.size, SEEK_SET);
1705                 return tool->auxtrace(session, event);
1706         case PERF_RECORD_AUXTRACE_ERROR:
1707                 perf_session__auxtrace_error_inc(session, event);
1708                 return tool->auxtrace_error(session, event);
1709         case PERF_RECORD_THREAD_MAP:
1710                 return tool->thread_map(session, event);
1711         case PERF_RECORD_CPU_MAP:
1712                 return tool->cpu_map(session, event);
1713         case PERF_RECORD_STAT_CONFIG:
1714                 return tool->stat_config(session, event);
1715         case PERF_RECORD_STAT:
1716                 return tool->stat(session, event);
1717         case PERF_RECORD_STAT_ROUND:
1718                 return tool->stat_round(session, event);
1719         case PERF_RECORD_TIME_CONV:
1720                 session->time_conv = event->time_conv;
1721                 return tool->time_conv(session, event);
1722         case PERF_RECORD_HEADER_FEATURE:
1723                 return tool->feature(session, event);
1724         case PERF_RECORD_COMPRESSED:
1725                 err = tool->compressed(session, event, file_offset, file_path);
1726                 if (err)
1727                         dump_event(session->evlist, event, file_offset, &sample, file_path);
1728                 return err;
1729         case PERF_RECORD_FINISHED_INIT:
1730                 return tool->finished_init(session, event);
1731         default:
1732                 return -EINVAL;
1733         }
1734 }
1735
1736 int perf_session__deliver_synth_event(struct perf_session *session,
1737                                       union perf_event *event,
1738                                       struct perf_sample *sample)
1739 {
1740         struct evlist *evlist = session->evlist;
1741         struct perf_tool *tool = session->tool;
1742
1743         events_stats__inc(&evlist->stats, event->header.type);
1744
1745         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1746                 return perf_session__process_user_event(session, event, 0, NULL);
1747
1748         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1749 }
1750
1751 static void event_swap(union perf_event *event, bool sample_id_all)
1752 {
1753         perf_event__swap_op swap;
1754
1755         swap = perf_event__swap_ops[event->header.type];
1756         if (swap)
1757                 swap(event, sample_id_all);
1758 }
1759
1760 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1761                              void *buf, size_t buf_sz,
1762                              union perf_event **event_ptr,
1763                              struct perf_sample *sample)
1764 {
1765         union perf_event *event;
1766         size_t hdr_sz, rest;
1767         int fd;
1768
1769         if (session->one_mmap && !session->header.needs_swap) {
1770                 event = file_offset - session->one_mmap_offset +
1771                         session->one_mmap_addr;
1772                 goto out_parse_sample;
1773         }
1774
1775         if (perf_data__is_pipe(session->data))
1776                 return -1;
1777
1778         fd = perf_data__fd(session->data);
1779         hdr_sz = sizeof(struct perf_event_header);
1780
1781         if (buf_sz < hdr_sz)
1782                 return -1;
1783
1784         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1785             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1786                 return -1;
1787
1788         event = (union perf_event *)buf;
1789
1790         if (session->header.needs_swap)
1791                 perf_event_header__bswap(&event->header);
1792
1793         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1794                 return -1;
1795
1796         buf += hdr_sz;
1797         rest = event->header.size - hdr_sz;
1798
1799         if (readn(fd, buf, rest) != (ssize_t)rest)
1800                 return -1;
1801
1802         if (session->header.needs_swap)
1803                 event_swap(event, evlist__sample_id_all(session->evlist));
1804
1805 out_parse_sample:
1806
1807         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1808             evlist__parse_sample(session->evlist, event, sample))
1809                 return -1;
1810
1811         *event_ptr = event;
1812
1813         return 0;
1814 }
1815
1816 int perf_session__peek_events(struct perf_session *session, u64 offset,
1817                               u64 size, peek_events_cb_t cb, void *data)
1818 {
1819         u64 max_offset = offset + size;
1820         char buf[PERF_SAMPLE_MAX_SIZE];
1821         union perf_event *event;
1822         int err;
1823
1824         do {
1825                 err = perf_session__peek_event(session, offset, buf,
1826                                                PERF_SAMPLE_MAX_SIZE, &event,
1827                                                NULL);
1828                 if (err)
1829                         return err;
1830
1831                 err = cb(session, event, offset, data);
1832                 if (err)
1833                         return err;
1834
1835                 offset += event->header.size;
1836                 if (event->header.type == PERF_RECORD_AUXTRACE)
1837                         offset += event->auxtrace.size;
1838
1839         } while (offset < max_offset);
1840
1841         return err;
1842 }
1843
1844 static s64 perf_session__process_event(struct perf_session *session,
1845                                        union perf_event *event, u64 file_offset,
1846                                        const char *file_path)
1847 {
1848         struct evlist *evlist = session->evlist;
1849         struct perf_tool *tool = session->tool;
1850         int ret;
1851
1852         if (session->header.needs_swap)
1853                 event_swap(event, evlist__sample_id_all(evlist));
1854
1855         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1856                 return -EINVAL;
1857
1858         events_stats__inc(&evlist->stats, event->header.type);
1859
1860         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1861                 return perf_session__process_user_event(session, event, file_offset, file_path);
1862
1863         if (tool->ordered_events) {
1864                 u64 timestamp = -1ULL;
1865
1866                 ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1867                 if (ret && ret != -1)
1868                         return ret;
1869
1870                 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1871                 if (ret != -ETIME)
1872                         return ret;
1873         }
1874
1875         return perf_session__deliver_event(session, event, tool, file_offset, file_path);
1876 }
1877
1878 void perf_event_header__bswap(struct perf_event_header *hdr)
1879 {
1880         hdr->type = bswap_32(hdr->type);
1881         hdr->misc = bswap_16(hdr->misc);
1882         hdr->size = bswap_16(hdr->size);
1883 }
1884
1885 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1886 {
1887         return machine__findnew_thread(&session->machines.host, -1, pid);
1888 }
1889
1890 int perf_session__register_idle_thread(struct perf_session *session)
1891 {
1892         struct thread *thread = machine__idle_thread(&session->machines.host);
1893
1894         /* machine__idle_thread() got the thread, so put it */
1895         thread__put(thread);
1896         return thread ? 0 : -1;
1897 }
1898
1899 static void
1900 perf_session__warn_order(const struct perf_session *session)
1901 {
1902         const struct ordered_events *oe = &session->ordered_events;
1903         struct evsel *evsel;
1904         bool should_warn = true;
1905
1906         evlist__for_each_entry(session->evlist, evsel) {
1907                 if (evsel->core.attr.write_backward)
1908                         should_warn = false;
1909         }
1910
1911         if (!should_warn)
1912                 return;
1913         if (oe->nr_unordered_events != 0)
1914                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1915 }
1916
1917 static void perf_session__warn_about_errors(const struct perf_session *session)
1918 {
1919         const struct events_stats *stats = &session->evlist->stats;
1920
1921         if (session->tool->lost == perf_event__process_lost &&
1922             stats->nr_events[PERF_RECORD_LOST] != 0) {
1923                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1924                             "Check IO/CPU overload!\n\n",
1925                             stats->nr_events[0],
1926                             stats->nr_events[PERF_RECORD_LOST]);
1927         }
1928
1929         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1930                 double drop_rate;
1931
1932                 drop_rate = (double)stats->total_lost_samples /
1933                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1934                 if (drop_rate > 0.05) {
1935                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1936                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1937                                     drop_rate * 100.0);
1938                 }
1939         }
1940
1941         if (session->tool->aux == perf_event__process_aux &&
1942             stats->total_aux_lost != 0) {
1943                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1944                             stats->total_aux_lost,
1945                             stats->nr_events[PERF_RECORD_AUX]);
1946         }
1947
1948         if (session->tool->aux == perf_event__process_aux &&
1949             stats->total_aux_partial != 0) {
1950                 bool vmm_exclusive = false;
1951
1952                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1953                                        &vmm_exclusive);
1954
1955                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1956                             "Are you running a KVM guest in the background?%s\n\n",
1957                             stats->total_aux_partial,
1958                             stats->nr_events[PERF_RECORD_AUX],
1959                             vmm_exclusive ?
1960                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1961                             "will reduce the gaps to only guest's timeslices." :
1962                             "");
1963         }
1964
1965         if (session->tool->aux == perf_event__process_aux &&
1966             stats->total_aux_collision != 0) {
1967                 ui__warning("AUX data detected collision  %" PRIu64 " times out of %u!\n\n",
1968                             stats->total_aux_collision,
1969                             stats->nr_events[PERF_RECORD_AUX]);
1970         }
1971
1972         if (stats->nr_unknown_events != 0) {
1973                 ui__warning("Found %u unknown events!\n\n"
1974                             "Is this an older tool processing a perf.data "
1975                             "file generated by a more recent tool?\n\n"
1976                             "If that is not the case, consider "
1977                             "reporting to [email protected].\n\n",
1978                             stats->nr_unknown_events);
1979         }
1980
1981         if (stats->nr_unknown_id != 0) {
1982                 ui__warning("%u samples with id not present in the header\n",
1983                             stats->nr_unknown_id);
1984         }
1985
1986         if (stats->nr_invalid_chains != 0) {
1987                 ui__warning("Found invalid callchains!\n\n"
1988                             "%u out of %u events were discarded for this reason.\n\n"
1989                             "Consider reporting to [email protected].\n\n",
1990                             stats->nr_invalid_chains,
1991                             stats->nr_events[PERF_RECORD_SAMPLE]);
1992         }
1993
1994         if (stats->nr_unprocessable_samples != 0) {
1995                 ui__warning("%u unprocessable samples recorded.\n"
1996                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1997                             stats->nr_unprocessable_samples);
1998         }
1999
2000         perf_session__warn_order(session);
2001
2002         events_stats__auxtrace_error_warn(stats);
2003
2004         if (stats->nr_proc_map_timeout != 0) {
2005                 ui__warning("%d map information files for pre-existing threads were\n"
2006                             "not processed, if there are samples for addresses they\n"
2007                             "will not be resolved, you may find out which are these\n"
2008                             "threads by running with -v and redirecting the output\n"
2009                             "to a file.\n"
2010                             "The time limit to process proc map is too short?\n"
2011                             "Increase it by --proc-map-timeout\n",
2012                             stats->nr_proc_map_timeout);
2013         }
2014 }
2015
2016 static int perf_session__flush_thread_stack(struct thread *thread,
2017                                             void *p __maybe_unused)
2018 {
2019         return thread_stack__flush(thread);
2020 }
2021
2022 static int perf_session__flush_thread_stacks(struct perf_session *session)
2023 {
2024         return machines__for_each_thread(&session->machines,
2025                                          perf_session__flush_thread_stack,
2026                                          NULL);
2027 }
2028
2029 volatile sig_atomic_t session_done;
2030
2031 static int __perf_session__process_decomp_events(struct perf_session *session);
2032
2033 static int __perf_session__process_pipe_events(struct perf_session *session)
2034 {
2035         struct ordered_events *oe = &session->ordered_events;
2036         struct perf_tool *tool = session->tool;
2037         union perf_event *event;
2038         uint32_t size, cur_size = 0;
2039         void *buf = NULL;
2040         s64 skip = 0;
2041         u64 head;
2042         ssize_t err;
2043         void *p;
2044
2045         perf_tool__fill_defaults(tool);
2046
2047         head = 0;
2048         cur_size = sizeof(union perf_event);
2049
2050         buf = malloc(cur_size);
2051         if (!buf)
2052                 return -errno;
2053         ordered_events__set_copy_on_queue(oe, true);
2054 more:
2055         event = buf;
2056         err = perf_data__read(session->data, event,
2057                               sizeof(struct perf_event_header));
2058         if (err <= 0) {
2059                 if (err == 0)
2060                         goto done;
2061
2062                 pr_err("failed to read event header\n");
2063                 goto out_err;
2064         }
2065
2066         if (session->header.needs_swap)
2067                 perf_event_header__bswap(&event->header);
2068
2069         size = event->header.size;
2070         if (size < sizeof(struct perf_event_header)) {
2071                 pr_err("bad event header size\n");
2072                 goto out_err;
2073         }
2074
2075         if (size > cur_size) {
2076                 void *new = realloc(buf, size);
2077                 if (!new) {
2078                         pr_err("failed to allocate memory to read event\n");
2079                         goto out_err;
2080                 }
2081                 buf = new;
2082                 cur_size = size;
2083                 event = buf;
2084         }
2085         p = event;
2086         p += sizeof(struct perf_event_header);
2087
2088         if (size - sizeof(struct perf_event_header)) {
2089                 err = perf_data__read(session->data, p,
2090                                       size - sizeof(struct perf_event_header));
2091                 if (err <= 0) {
2092                         if (err == 0) {
2093                                 pr_err("unexpected end of event stream\n");
2094                                 goto done;
2095                         }
2096
2097                         pr_err("failed to read event data\n");
2098                         goto out_err;
2099                 }
2100         }
2101
2102         if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
2103                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2104                        head, event->header.size, event->header.type);
2105                 err = -EINVAL;
2106                 goto out_err;
2107         }
2108
2109         head += size;
2110
2111         if (skip > 0)
2112                 head += skip;
2113
2114         err = __perf_session__process_decomp_events(session);
2115         if (err)
2116                 goto out_err;
2117
2118         if (!session_done())
2119                 goto more;
2120 done:
2121         /* do the final flush for ordered samples */
2122         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2123         if (err)
2124                 goto out_err;
2125         err = auxtrace__flush_events(session, tool);
2126         if (err)
2127                 goto out_err;
2128         err = perf_session__flush_thread_stacks(session);
2129 out_err:
2130         free(buf);
2131         if (!tool->no_warn)
2132                 perf_session__warn_about_errors(session);
2133         ordered_events__free(&session->ordered_events);
2134         auxtrace__free_events(session);
2135         return err;
2136 }
2137
2138 static union perf_event *
2139 prefetch_event(char *buf, u64 head, size_t mmap_size,
2140                bool needs_swap, union perf_event *error)
2141 {
2142         union perf_event *event;
2143         u16 event_size;
2144
2145         /*
2146          * Ensure we have enough space remaining to read
2147          * the size of the event in the headers.
2148          */
2149         if (head + sizeof(event->header) > mmap_size)
2150                 return NULL;
2151
2152         event = (union perf_event *)(buf + head);
2153         if (needs_swap)
2154                 perf_event_header__bswap(&event->header);
2155
2156         event_size = event->header.size;
2157         if (head + event_size <= mmap_size)
2158                 return event;
2159
2160         /* We're not fetching the event so swap back again */
2161         if (needs_swap)
2162                 perf_event_header__bswap(&event->header);
2163
2164         /* Check if the event fits into the next mmapped buf. */
2165         if (event_size <= mmap_size - head % page_size) {
2166                 /* Remap buf and fetch again. */
2167                 return NULL;
2168         }
2169
2170         /* Invalid input. Event size should never exceed mmap_size. */
2171         pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
2172                  " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
2173
2174         return error;
2175 }
2176
2177 static union perf_event *
2178 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2179 {
2180         return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2181 }
2182
2183 static union perf_event *
2184 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2185 {
2186         return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2187 }
2188
2189 static int __perf_session__process_decomp_events(struct perf_session *session)
2190 {
2191         s64 skip;
2192         u64 size;
2193         struct decomp *decomp = session->active_decomp->decomp_last;
2194
2195         if (!decomp)
2196                 return 0;
2197
2198         while (decomp->head < decomp->size && !session_done()) {
2199                 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2200                                                              session->header.needs_swap);
2201
2202                 if (!event)
2203                         break;
2204
2205                 size = event->header.size;
2206
2207                 if (size < sizeof(struct perf_event_header) ||
2208                     (skip = perf_session__process_event(session, event, decomp->file_pos,
2209                                                         decomp->file_path)) < 0) {
2210                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2211                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2212                         return -EINVAL;
2213                 }
2214
2215                 if (skip)
2216                         size += skip;
2217
2218                 decomp->head += size;
2219         }
2220
2221         return 0;
2222 }
2223
2224 /*
2225  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2226  * slices. On 32bit we use 32MB.
2227  */
2228 #if BITS_PER_LONG == 64
2229 #define MMAP_SIZE ULLONG_MAX
2230 #define NUM_MMAPS 1
2231 #else
2232 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2233 #define NUM_MMAPS 128
2234 #endif
2235
2236 struct reader;
2237
2238 typedef s64 (*reader_cb_t)(struct perf_session *session,
2239                            union perf_event *event,
2240                            u64 file_offset,
2241                            const char *file_path);
2242
2243 struct reader {
2244         int              fd;
2245         const char       *path;
2246         u64              data_size;
2247         u64              data_offset;
2248         reader_cb_t      process;
2249         bool             in_place_update;
2250         char             *mmaps[NUM_MMAPS];
2251         size_t           mmap_size;
2252         int              mmap_idx;
2253         char             *mmap_cur;
2254         u64              file_pos;
2255         u64              file_offset;
2256         u64              head;
2257         u64              size;
2258         bool             done;
2259         struct zstd_data   zstd_data;
2260         struct decomp_data decomp_data;
2261 };
2262
2263 static int
2264 reader__init(struct reader *rd, bool *one_mmap)
2265 {
2266         u64 data_size = rd->data_size;
2267         char **mmaps = rd->mmaps;
2268
2269         rd->head = rd->data_offset;
2270         data_size += rd->data_offset;
2271
2272         rd->mmap_size = MMAP_SIZE;
2273         if (rd->mmap_size > data_size) {
2274                 rd->mmap_size = data_size;
2275                 if (one_mmap)
2276                         *one_mmap = true;
2277         }
2278
2279         memset(mmaps, 0, sizeof(rd->mmaps));
2280
2281         if (zstd_init(&rd->zstd_data, 0))
2282                 return -1;
2283         rd->decomp_data.zstd_decomp = &rd->zstd_data;
2284
2285         return 0;
2286 }
2287
2288 static void
2289 reader__release_decomp(struct reader *rd)
2290 {
2291         perf_decomp__release_events(rd->decomp_data.decomp);
2292         zstd_fini(&rd->zstd_data);
2293 }
2294
2295 static int
2296 reader__mmap(struct reader *rd, struct perf_session *session)
2297 {
2298         int mmap_prot, mmap_flags;
2299         char *buf, **mmaps = rd->mmaps;
2300         u64 page_offset;
2301
2302         mmap_prot  = PROT_READ;
2303         mmap_flags = MAP_SHARED;
2304
2305         if (rd->in_place_update) {
2306                 mmap_prot  |= PROT_WRITE;
2307         } else if (session->header.needs_swap) {
2308                 mmap_prot  |= PROT_WRITE;
2309                 mmap_flags = MAP_PRIVATE;
2310         }
2311
2312         if (mmaps[rd->mmap_idx]) {
2313                 munmap(mmaps[rd->mmap_idx], rd->mmap_size);
2314                 mmaps[rd->mmap_idx] = NULL;
2315         }
2316
2317         page_offset = page_size * (rd->head / page_size);
2318         rd->file_offset += page_offset;
2319         rd->head -= page_offset;
2320
2321         buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
2322                    rd->file_offset);
2323         if (buf == MAP_FAILED) {
2324                 pr_err("failed to mmap file\n");
2325                 return -errno;
2326         }
2327         mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
2328         rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
2329         rd->file_pos = rd->file_offset + rd->head;
2330         if (session->one_mmap) {
2331                 session->one_mmap_addr = buf;
2332                 session->one_mmap_offset = rd->file_offset;
2333         }
2334
2335         return 0;
2336 }
2337
2338 enum {
2339         READER_OK,
2340         READER_NODATA,
2341 };
2342
2343 static int
2344 reader__read_event(struct reader *rd, struct perf_session *session,
2345                    struct ui_progress *prog)
2346 {
2347         u64 size;
2348         int err = READER_OK;
2349         union perf_event *event;
2350         s64 skip;
2351
2352         event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2353                                    session->header.needs_swap);
2354         if (IS_ERR(event))
2355                 return PTR_ERR(event);
2356
2357         if (!event)
2358                 return READER_NODATA;
2359
2360         size = event->header.size;
2361
2362         skip = -EINVAL;
2363
2364         if (size < sizeof(struct perf_event_header) ||
2365             (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2366                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2367                        rd->file_offset + rd->head, event->header.size,
2368                        event->header.type, strerror(-skip));
2369                 err = skip;
2370                 goto out;
2371         }
2372
2373         if (skip)
2374                 size += skip;
2375
2376         rd->size += size;
2377         rd->head += size;
2378         rd->file_pos += size;
2379
2380         err = __perf_session__process_decomp_events(session);
2381         if (err)
2382                 goto out;
2383
2384         ui_progress__update(prog, size);
2385
2386 out:
2387         return err;
2388 }
2389
2390 static inline bool
2391 reader__eof(struct reader *rd)
2392 {
2393         return (rd->file_pos >= rd->data_size + rd->data_offset);
2394 }
2395
2396 static int
2397 reader__process_events(struct reader *rd, struct perf_session *session,
2398                        struct ui_progress *prog)
2399 {
2400         int err;
2401
2402         err = reader__init(rd, &session->one_mmap);
2403         if (err)
2404                 goto out;
2405
2406         session->active_decomp = &rd->decomp_data;
2407
2408 remap:
2409         err = reader__mmap(rd, session);
2410         if (err)
2411                 goto out;
2412
2413 more:
2414         err = reader__read_event(rd, session, prog);
2415         if (err < 0)
2416                 goto out;
2417         else if (err == READER_NODATA)
2418                 goto remap;
2419
2420         if (session_done())
2421                 goto out;
2422
2423         if (!reader__eof(rd))
2424                 goto more;
2425
2426 out:
2427         session->active_decomp = &session->decomp_data;
2428         return err;
2429 }
2430
2431 static s64 process_simple(struct perf_session *session,
2432                           union perf_event *event,
2433                           u64 file_offset,
2434                           const char *file_path)
2435 {
2436         return perf_session__process_event(session, event, file_offset, file_path);
2437 }
2438
2439 static int __perf_session__process_events(struct perf_session *session)
2440 {
2441         struct reader rd = {
2442                 .fd             = perf_data__fd(session->data),
2443                 .path           = session->data->file.path,
2444                 .data_size      = session->header.data_size,
2445                 .data_offset    = session->header.data_offset,
2446                 .process        = process_simple,
2447                 .in_place_update = session->data->in_place_update,
2448         };
2449         struct ordered_events *oe = &session->ordered_events;
2450         struct perf_tool *tool = session->tool;
2451         struct ui_progress prog;
2452         int err;
2453
2454         perf_tool__fill_defaults(tool);
2455
2456         if (rd.data_size == 0)
2457                 return -1;
2458
2459         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2460
2461         err = reader__process_events(&rd, session, &prog);
2462         if (err)
2463                 goto out_err;
2464         /* do the final flush for ordered samples */
2465         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2466         if (err)
2467                 goto out_err;
2468         err = auxtrace__flush_events(session, tool);
2469         if (err)
2470                 goto out_err;
2471         err = perf_session__flush_thread_stacks(session);
2472 out_err:
2473         ui_progress__finish();
2474         if (!tool->no_warn)
2475                 perf_session__warn_about_errors(session);
2476         /*
2477          * We may switching perf.data output, make ordered_events
2478          * reusable.
2479          */
2480         ordered_events__reinit(&session->ordered_events);
2481         auxtrace__free_events(session);
2482         reader__release_decomp(&rd);
2483         session->one_mmap = false;
2484         return err;
2485 }
2486
2487 /*
2488  * Processing 2 MB of data from each reader in sequence,
2489  * because that's the way the ordered events sorting works
2490  * most efficiently.
2491  */
2492 #define READER_MAX_SIZE (2 * 1024 * 1024)
2493
2494 /*
2495  * This function reads, merge and process directory data.
2496  * It assumens the version 1 of directory data, where each
2497  * data file holds per-cpu data, already sorted by kernel.
2498  */
2499 static int __perf_session__process_dir_events(struct perf_session *session)
2500 {
2501         struct perf_data *data = session->data;
2502         struct perf_tool *tool = session->tool;
2503         int i, ret, readers, nr_readers;
2504         struct ui_progress prog;
2505         u64 total_size = perf_data__size(session->data);
2506         struct reader *rd;
2507
2508         perf_tool__fill_defaults(tool);
2509
2510         ui_progress__init_size(&prog, total_size, "Sorting events...");
2511
2512         nr_readers = 1;
2513         for (i = 0; i < data->dir.nr; i++) {
2514                 if (data->dir.files[i].size)
2515                         nr_readers++;
2516         }
2517
2518         rd = zalloc(nr_readers * sizeof(struct reader));
2519         if (!rd)
2520                 return -ENOMEM;
2521
2522         rd[0] = (struct reader) {
2523                 .fd              = perf_data__fd(session->data),
2524                 .path            = session->data->file.path,
2525                 .data_size       = session->header.data_size,
2526                 .data_offset     = session->header.data_offset,
2527                 .process         = process_simple,
2528                 .in_place_update = session->data->in_place_update,
2529         };
2530         ret = reader__init(&rd[0], NULL);
2531         if (ret)
2532                 goto out_err;
2533         ret = reader__mmap(&rd[0], session);
2534         if (ret)
2535                 goto out_err;
2536         readers = 1;
2537
2538         for (i = 0; i < data->dir.nr; i++) {
2539                 if (!data->dir.files[i].size)
2540                         continue;
2541                 rd[readers] = (struct reader) {
2542                         .fd              = data->dir.files[i].fd,
2543                         .path            = data->dir.files[i].path,
2544                         .data_size       = data->dir.files[i].size,
2545                         .data_offset     = 0,
2546                         .process         = process_simple,
2547                         .in_place_update = session->data->in_place_update,
2548                 };
2549                 ret = reader__init(&rd[readers], NULL);
2550                 if (ret)
2551                         goto out_err;
2552                 ret = reader__mmap(&rd[readers], session);
2553                 if (ret)
2554                         goto out_err;
2555                 readers++;
2556         }
2557
2558         i = 0;
2559         while (readers) {
2560                 if (session_done())
2561                         break;
2562
2563                 if (rd[i].done) {
2564                         i = (i + 1) % nr_readers;
2565                         continue;
2566                 }
2567                 if (reader__eof(&rd[i])) {
2568                         rd[i].done = true;
2569                         readers--;
2570                         continue;
2571                 }
2572
2573                 session->active_decomp = &rd[i].decomp_data;
2574                 ret = reader__read_event(&rd[i], session, &prog);
2575                 if (ret < 0) {
2576                         goto out_err;
2577                 } else if (ret == READER_NODATA) {
2578                         ret = reader__mmap(&rd[i], session);
2579                         if (ret)
2580                                 goto out_err;
2581                 }
2582
2583                 if (rd[i].size >= READER_MAX_SIZE) {
2584                         rd[i].size = 0;
2585                         i = (i + 1) % nr_readers;
2586                 }
2587         }
2588
2589         ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
2590         if (ret)
2591                 goto out_err;
2592
2593         ret = perf_session__flush_thread_stacks(session);
2594 out_err:
2595         ui_progress__finish();
2596
2597         if (!tool->no_warn)
2598                 perf_session__warn_about_errors(session);
2599
2600         /*
2601          * We may switching perf.data output, make ordered_events
2602          * reusable.
2603          */
2604         ordered_events__reinit(&session->ordered_events);
2605
2606         session->one_mmap = false;
2607
2608         session->active_decomp = &session->decomp_data;
2609         for (i = 0; i < nr_readers; i++)
2610                 reader__release_decomp(&rd[i]);
2611         zfree(&rd);
2612
2613         return ret;
2614 }
2615
2616 int perf_session__process_events(struct perf_session *session)
2617 {
2618         if (perf_session__register_idle_thread(session) < 0)
2619                 return -ENOMEM;
2620
2621         if (perf_data__is_pipe(session->data))
2622                 return __perf_session__process_pipe_events(session);
2623
2624         if (perf_data__is_dir(session->data) && session->data->dir.nr)
2625                 return __perf_session__process_dir_events(session);
2626
2627         return __perf_session__process_events(session);
2628 }
2629
2630 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2631 {
2632         struct evsel *evsel;
2633
2634         evlist__for_each_entry(session->evlist, evsel) {
2635                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2636                         return true;
2637         }
2638
2639         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2640         return false;
2641 }
2642
2643 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2644 {
2645         char *bracket;
2646         struct ref_reloc_sym *ref;
2647         struct kmap *kmap;
2648
2649         ref = zalloc(sizeof(struct ref_reloc_sym));
2650         if (ref == NULL)
2651                 return -ENOMEM;
2652
2653         ref->name = strdup(symbol_name);
2654         if (ref->name == NULL) {
2655                 free(ref);
2656                 return -ENOMEM;
2657         }
2658
2659         bracket = strchr(ref->name, ']');
2660         if (bracket)
2661                 *bracket = '\0';
2662
2663         ref->addr = addr;
2664
2665         kmap = map__kmap(map);
2666         if (kmap)
2667                 kmap->ref_reloc_sym = ref;
2668
2669         return 0;
2670 }
2671
2672 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2673 {
2674         return machines__fprintf_dsos(&session->machines, fp);
2675 }
2676
2677 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2678                                           bool (skip)(struct dso *dso, int parm), int parm)
2679 {
2680         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2681 }
2682
2683 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
2684                                        bool skip_empty)
2685 {
2686         size_t ret;
2687         const char *msg = "";
2688
2689         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2690                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2691
2692         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2693
2694         ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
2695         return ret;
2696 }
2697
2698 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2699 {
2700         /*
2701          * FIXME: Here we have to actually print all the machines in this
2702          * session, not just the host...
2703          */
2704         return machine__fprintf(&session->machines.host, fp);
2705 }
2706
2707 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2708                                               unsigned int type)
2709 {
2710         struct evsel *pos;
2711
2712         evlist__for_each_entry(session->evlist, pos) {
2713                 if (pos->core.attr.type == type)
2714                         return pos;
2715         }
2716         return NULL;
2717 }
2718
2719 int perf_session__cpu_bitmap(struct perf_session *session,
2720                              const char *cpu_list, unsigned long *cpu_bitmap)
2721 {
2722         int i, err = -1;
2723         struct perf_cpu_map *map;
2724         int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2725
2726         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2727                 struct evsel *evsel;
2728
2729                 evsel = perf_session__find_first_evtype(session, i);
2730                 if (!evsel)
2731                         continue;
2732
2733                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2734                         pr_err("File does not contain CPU events. "
2735                                "Remove -C option to proceed.\n");
2736                         return -1;
2737                 }
2738         }
2739
2740         map = perf_cpu_map__new(cpu_list);
2741         if (map == NULL) {
2742                 pr_err("Invalid cpu_list\n");
2743                 return -1;
2744         }
2745
2746         for (i = 0; i < perf_cpu_map__nr(map); i++) {
2747                 struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
2748
2749                 if (cpu.cpu >= nr_cpus) {
2750                         pr_err("Requested CPU %d too large. "
2751                                "Consider raising MAX_NR_CPUS\n", cpu.cpu);
2752                         goto out_delete_map;
2753                 }
2754
2755                 __set_bit(cpu.cpu, cpu_bitmap);
2756         }
2757
2758         err = 0;
2759
2760 out_delete_map:
2761         perf_cpu_map__put(map);
2762         return err;
2763 }
2764
2765 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2766                                 bool full)
2767 {
2768         if (session == NULL || fp == NULL)
2769                 return;
2770
2771         fprintf(fp, "# ========\n");
2772         perf_header__fprintf_info(session, fp, full);
2773         fprintf(fp, "# ========\n#\n");
2774 }
2775
2776 static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
2777 {
2778         struct machine *machine = machines__findnew(&session->machines, machine_pid);
2779         struct thread *thread;
2780
2781         if (!machine)
2782                 return -ENOMEM;
2783
2784         machine->single_address_space = session->machines.host.single_address_space;
2785
2786         thread = machine__idle_thread(machine);
2787         if (!thread)
2788                 return -ENOMEM;
2789         thread__put(thread);
2790
2791         machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
2792
2793         return 0;
2794 }
2795
2796 static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
2797                                        pid_t tid, int guest_cpu)
2798 {
2799         struct machine *machine = &session->machines.host;
2800         struct thread *thread = machine__findnew_thread(machine, pid, tid);
2801
2802         if (!thread)
2803                 return -ENOMEM;
2804         thread__set_guest_cpu(thread, guest_cpu);
2805         thread__put(thread);
2806
2807         return 0;
2808 }
2809
2810 int perf_event__process_id_index(struct perf_session *session,
2811                                  union perf_event *event)
2812 {
2813         struct evlist *evlist = session->evlist;
2814         struct perf_record_id_index *ie = &event->id_index;
2815         size_t sz = ie->header.size - sizeof(*ie);
2816         size_t i, nr, max_nr;
2817         size_t e1_sz = sizeof(struct id_index_entry);
2818         size_t e2_sz = sizeof(struct id_index_entry_2);
2819         size_t etot_sz = e1_sz + e2_sz;
2820         struct id_index_entry_2 *e2;
2821         pid_t last_pid = 0;
2822
2823         max_nr = sz / e1_sz;
2824         nr = ie->nr;
2825         if (nr > max_nr) {
2826                 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
2827                 return -EINVAL;
2828         }
2829
2830         if (sz >= nr * etot_sz) {
2831                 max_nr = sz / etot_sz;
2832                 if (nr > max_nr) {
2833                         printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
2834                         return -EINVAL;
2835                 }
2836                 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
2837         } else {
2838                 e2 = NULL;
2839         }
2840
2841         if (dump_trace)
2842                 fprintf(stdout, " nr: %zu\n", nr);
2843
2844         for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
2845                 struct id_index_entry *e = &ie->entries[i];
2846                 struct perf_sample_id *sid;
2847                 int ret;
2848
2849                 if (dump_trace) {
2850                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2851                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2852                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2853                         fprintf(stdout, "  tid: %"PRI_ld64, e->tid);
2854                         if (e2) {
2855                                 fprintf(stdout, "  machine_pid: %"PRI_ld64, e2->machine_pid);
2856                                 fprintf(stdout, "  vcpu: %"PRI_lu64"\n", e2->vcpu);
2857                         } else {
2858                                 fprintf(stdout, "\n");
2859                         }
2860                 }
2861
2862                 sid = evlist__id2sid(evlist, e->id);
2863                 if (!sid)
2864                         return -ENOENT;
2865
2866                 sid->idx = e->idx;
2867                 sid->cpu.cpu = e->cpu;
2868                 sid->tid = e->tid;
2869
2870                 if (!e2)
2871                         continue;
2872
2873                 sid->machine_pid = e2->machine_pid;
2874                 sid->vcpu.cpu = e2->vcpu;
2875
2876                 if (!sid->machine_pid)
2877                         continue;
2878
2879                 if (sid->machine_pid != last_pid) {
2880                         ret = perf_session__register_guest(session, sid->machine_pid);
2881                         if (ret)
2882                                 return ret;
2883                         last_pid = sid->machine_pid;
2884                         perf_guest = true;
2885                 }
2886
2887                 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
2888                 if (ret)
2889                         return ret;
2890         }
2891         return 0;
2892 }
This page took 0.211805 seconds and 4 git commands to generate.