]> Git Repo - linux.git/blob - tools/perf/util/session.c
HID: hid-sensor-custom: Fix big on-stack allocation in hid_sensor_custom_get_known()
[linux.git] / tools / perf / util / session.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <errno.h>
3 #include <inttypes.h>
4 #include <linux/err.h>
5 #include <linux/kernel.h>
6 #include <linux/zalloc.h>
7 #include <api/fs/fs.h>
8
9 #include <byteswap.h>
10 #include <unistd.h>
11 #include <sys/types.h>
12 #include <sys/mman.h>
13 #include <perf/cpumap.h>
14
15 #include "map_symbol.h"
16 #include "branch.h"
17 #include "debug.h"
18 #include "env.h"
19 #include "evlist.h"
20 #include "evsel.h"
21 #include "memswap.h"
22 #include "map.h"
23 #include "symbol.h"
24 #include "session.h"
25 #include "tool.h"
26 #include "perf_regs.h"
27 #include "asm/bug.h"
28 #include "auxtrace.h"
29 #include "thread.h"
30 #include "thread-stack.h"
31 #include "sample-raw.h"
32 #include "stat.h"
33 #include "tsc.h"
34 #include "ui/progress.h"
35 #include "../perf.h"
36 #include "arch/common.h"
37 #include "units.h"
38 #include <internal/lib.h>
39
40 #ifdef HAVE_ZSTD_SUPPORT
41 static int perf_session__process_compressed_event(struct perf_session *session,
42                                                   union perf_event *event, u64 file_offset,
43                                                   const char *file_path)
44 {
45         void *src;
46         size_t decomp_size, src_size;
47         u64 decomp_last_rem = 0;
48         size_t mmap_len, decomp_len = session->header.env.comp_mmap_len;
49         struct decomp *decomp, *decomp_last = session->active_decomp->decomp_last;
50
51         if (decomp_last) {
52                 decomp_last_rem = decomp_last->size - decomp_last->head;
53                 decomp_len += decomp_last_rem;
54         }
55
56         mmap_len = sizeof(struct decomp) + decomp_len;
57         decomp = mmap(NULL, mmap_len, PROT_READ|PROT_WRITE,
58                       MAP_ANONYMOUS|MAP_PRIVATE, -1, 0);
59         if (decomp == MAP_FAILED) {
60                 pr_err("Couldn't allocate memory for decompression\n");
61                 return -1;
62         }
63
64         decomp->file_pos = file_offset;
65         decomp->file_path = file_path;
66         decomp->mmap_len = mmap_len;
67         decomp->head = 0;
68
69         if (decomp_last_rem) {
70                 memcpy(decomp->data, &(decomp_last->data[decomp_last->head]), decomp_last_rem);
71                 decomp->size = decomp_last_rem;
72         }
73
74         src = (void *)event + sizeof(struct perf_record_compressed);
75         src_size = event->pack.header.size - sizeof(struct perf_record_compressed);
76
77         decomp_size = zstd_decompress_stream(session->active_decomp->zstd_decomp, src, src_size,
78                                 &(decomp->data[decomp_last_rem]), decomp_len - decomp_last_rem);
79         if (!decomp_size) {
80                 munmap(decomp, mmap_len);
81                 pr_err("Couldn't decompress data\n");
82                 return -1;
83         }
84
85         decomp->size += decomp_size;
86
87         if (session->active_decomp->decomp == NULL)
88                 session->active_decomp->decomp = decomp;
89         else
90                 session->active_decomp->decomp_last->next = decomp;
91
92         session->active_decomp->decomp_last = decomp;
93
94         pr_debug("decomp (B): %zd to %zd\n", src_size, decomp_size);
95
96         return 0;
97 }
98 #else /* !HAVE_ZSTD_SUPPORT */
99 #define perf_session__process_compressed_event perf_session__process_compressed_event_stub
100 #endif
101
102 static int perf_session__deliver_event(struct perf_session *session,
103                                        union perf_event *event,
104                                        struct perf_tool *tool,
105                                        u64 file_offset,
106                                        const char *file_path);
107
108 static int perf_session__open(struct perf_session *session, int repipe_fd)
109 {
110         struct perf_data *data = session->data;
111
112         if (perf_session__read_header(session, repipe_fd) < 0) {
113                 pr_err("incompatible file format (rerun with -v to learn more)\n");
114                 return -1;
115         }
116
117         if (perf_data__is_pipe(data))
118                 return 0;
119
120         if (perf_header__has_feat(&session->header, HEADER_STAT))
121                 return 0;
122
123         if (!evlist__valid_sample_type(session->evlist)) {
124                 pr_err("non matching sample_type\n");
125                 return -1;
126         }
127
128         if (!evlist__valid_sample_id_all(session->evlist)) {
129                 pr_err("non matching sample_id_all\n");
130                 return -1;
131         }
132
133         if (!evlist__valid_read_format(session->evlist)) {
134                 pr_err("non matching read_format\n");
135                 return -1;
136         }
137
138         return 0;
139 }
140
141 void perf_session__set_id_hdr_size(struct perf_session *session)
142 {
143         u16 id_hdr_size = evlist__id_hdr_size(session->evlist);
144
145         machines__set_id_hdr_size(&session->machines, id_hdr_size);
146 }
147
148 int perf_session__create_kernel_maps(struct perf_session *session)
149 {
150         int ret = machine__create_kernel_maps(&session->machines.host);
151
152         if (ret >= 0)
153                 ret = machines__create_guest_kernel_maps(&session->machines);
154         return ret;
155 }
156
157 static void perf_session__destroy_kernel_maps(struct perf_session *session)
158 {
159         machines__destroy_kernel_maps(&session->machines);
160 }
161
162 static bool perf_session__has_comm_exec(struct perf_session *session)
163 {
164         struct evsel *evsel;
165
166         evlist__for_each_entry(session->evlist, evsel) {
167                 if (evsel->core.attr.comm_exec)
168                         return true;
169         }
170
171         return false;
172 }
173
174 static void perf_session__set_comm_exec(struct perf_session *session)
175 {
176         bool comm_exec = perf_session__has_comm_exec(session);
177
178         machines__set_comm_exec(&session->machines, comm_exec);
179 }
180
181 static int ordered_events__deliver_event(struct ordered_events *oe,
182                                          struct ordered_event *event)
183 {
184         struct perf_session *session = container_of(oe, struct perf_session,
185                                                     ordered_events);
186
187         return perf_session__deliver_event(session, event->event,
188                                            session->tool, event->file_offset,
189                                            event->file_path);
190 }
191
192 struct perf_session *__perf_session__new(struct perf_data *data,
193                                          bool repipe, int repipe_fd,
194                                          struct perf_tool *tool)
195 {
196         int ret = -ENOMEM;
197         struct perf_session *session = zalloc(sizeof(*session));
198
199         if (!session)
200                 goto out;
201
202         session->repipe = repipe;
203         session->tool   = tool;
204         session->decomp_data.zstd_decomp = &session->zstd_data;
205         session->active_decomp = &session->decomp_data;
206         INIT_LIST_HEAD(&session->auxtrace_index);
207         machines__init(&session->machines);
208         ordered_events__init(&session->ordered_events,
209                              ordered_events__deliver_event, NULL);
210
211         perf_env__init(&session->header.env);
212         if (data) {
213                 ret = perf_data__open(data);
214                 if (ret < 0)
215                         goto out_delete;
216
217                 session->data = data;
218
219                 if (perf_data__is_read(data)) {
220                         ret = perf_session__open(session, repipe_fd);
221                         if (ret < 0)
222                                 goto out_delete;
223
224                         /*
225                          * set session attributes that are present in perf.data
226                          * but not in pipe-mode.
227                          */
228                         if (!data->is_pipe) {
229                                 perf_session__set_id_hdr_size(session);
230                                 perf_session__set_comm_exec(session);
231                         }
232
233                         evlist__init_trace_event_sample_raw(session->evlist);
234
235                         /* Open the directory data. */
236                         if (data->is_dir) {
237                                 ret = perf_data__open_dir(data);
238                                 if (ret)
239                                         goto out_delete;
240                         }
241
242                         if (!symbol_conf.kallsyms_name &&
243                             !symbol_conf.vmlinux_name)
244                                 symbol_conf.kallsyms_name = perf_data__kallsyms_name(data);
245                 }
246         } else  {
247                 session->machines.host.env = &perf_env;
248         }
249
250         session->machines.host.single_address_space =
251                 perf_env__single_address_space(session->machines.host.env);
252
253         if (!data || perf_data__is_write(data)) {
254                 /*
255                  * In O_RDONLY mode this will be performed when reading the
256                  * kernel MMAP event, in perf_event__process_mmap().
257                  */
258                 if (perf_session__create_kernel_maps(session) < 0)
259                         pr_warning("Cannot read kernel map\n");
260         }
261
262         /*
263          * In pipe-mode, evlist is empty until PERF_RECORD_HEADER_ATTR is
264          * processed, so evlist__sample_id_all is not meaningful here.
265          */
266         if ((!data || !data->is_pipe) && tool && tool->ordering_requires_timestamps &&
267             tool->ordered_events && !evlist__sample_id_all(session->evlist)) {
268                 dump_printf("WARNING: No sample_id_all support, falling back to unordered processing\n");
269                 tool->ordered_events = false;
270         }
271
272         return session;
273
274  out_delete:
275         perf_session__delete(session);
276  out:
277         return ERR_PTR(ret);
278 }
279
280 static void perf_session__delete_threads(struct perf_session *session)
281 {
282         machine__delete_threads(&session->machines.host);
283 }
284
285 static void perf_decomp__release_events(struct decomp *next)
286 {
287         struct decomp *decomp;
288         size_t mmap_len;
289
290         do {
291                 decomp = next;
292                 if (decomp == NULL)
293                         break;
294                 next = decomp->next;
295                 mmap_len = decomp->mmap_len;
296                 munmap(decomp, mmap_len);
297         } while (1);
298 }
299
300 void perf_session__delete(struct perf_session *session)
301 {
302         if (session == NULL)
303                 return;
304         auxtrace__free(session);
305         auxtrace_index__free(&session->auxtrace_index);
306         perf_session__destroy_kernel_maps(session);
307         perf_session__delete_threads(session);
308         perf_decomp__release_events(session->decomp_data.decomp);
309         perf_env__exit(&session->header.env);
310         machines__exit(&session->machines);
311         if (session->data) {
312                 if (perf_data__is_read(session->data))
313                         evlist__delete(session->evlist);
314                 perf_data__close(session->data);
315         }
316         trace_event__cleanup(&session->tevent);
317         free(session);
318 }
319
320 static int process_event_synth_tracing_data_stub(struct perf_session *session
321                                                  __maybe_unused,
322                                                  union perf_event *event
323                                                  __maybe_unused)
324 {
325         dump_printf(": unhandled!\n");
326         return 0;
327 }
328
329 static int process_event_synth_attr_stub(struct perf_tool *tool __maybe_unused,
330                                          union perf_event *event __maybe_unused,
331                                          struct evlist **pevlist
332                                          __maybe_unused)
333 {
334         dump_printf(": unhandled!\n");
335         return 0;
336 }
337
338 static int process_event_synth_event_update_stub(struct perf_tool *tool __maybe_unused,
339                                                  union perf_event *event __maybe_unused,
340                                                  struct evlist **pevlist
341                                                  __maybe_unused)
342 {
343         if (dump_trace)
344                 perf_event__fprintf_event_update(event, stdout);
345
346         dump_printf(": unhandled!\n");
347         return 0;
348 }
349
350 static int process_event_sample_stub(struct perf_tool *tool __maybe_unused,
351                                      union perf_event *event __maybe_unused,
352                                      struct perf_sample *sample __maybe_unused,
353                                      struct evsel *evsel __maybe_unused,
354                                      struct machine *machine __maybe_unused)
355 {
356         dump_printf(": unhandled!\n");
357         return 0;
358 }
359
360 static int process_event_stub(struct perf_tool *tool __maybe_unused,
361                               union perf_event *event __maybe_unused,
362                               struct perf_sample *sample __maybe_unused,
363                               struct machine *machine __maybe_unused)
364 {
365         dump_printf(": unhandled!\n");
366         return 0;
367 }
368
369 static int process_finished_round_stub(struct perf_tool *tool __maybe_unused,
370                                        union perf_event *event __maybe_unused,
371                                        struct ordered_events *oe __maybe_unused)
372 {
373         dump_printf(": unhandled!\n");
374         return 0;
375 }
376
377 static int skipn(int fd, off_t n)
378 {
379         char buf[4096];
380         ssize_t ret;
381
382         while (n > 0) {
383                 ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
384                 if (ret <= 0)
385                         return ret;
386                 n -= ret;
387         }
388
389         return 0;
390 }
391
392 static s64 process_event_auxtrace_stub(struct perf_session *session __maybe_unused,
393                                        union perf_event *event)
394 {
395         dump_printf(": unhandled!\n");
396         if (perf_data__is_pipe(session->data))
397                 skipn(perf_data__fd(session->data), event->auxtrace.size);
398         return event->auxtrace.size;
399 }
400
401 static int process_event_op2_stub(struct perf_session *session __maybe_unused,
402                                   union perf_event *event __maybe_unused)
403 {
404         dump_printf(": unhandled!\n");
405         return 0;
406 }
407
408
409 static
410 int process_event_thread_map_stub(struct perf_session *session __maybe_unused,
411                                   union perf_event *event __maybe_unused)
412 {
413         if (dump_trace)
414                 perf_event__fprintf_thread_map(event, stdout);
415
416         dump_printf(": unhandled!\n");
417         return 0;
418 }
419
420 static
421 int process_event_cpu_map_stub(struct perf_session *session __maybe_unused,
422                                union perf_event *event __maybe_unused)
423 {
424         if (dump_trace)
425                 perf_event__fprintf_cpu_map(event, stdout);
426
427         dump_printf(": unhandled!\n");
428         return 0;
429 }
430
431 static
432 int process_event_stat_config_stub(struct perf_session *session __maybe_unused,
433                                    union perf_event *event __maybe_unused)
434 {
435         if (dump_trace)
436                 perf_event__fprintf_stat_config(event, stdout);
437
438         dump_printf(": unhandled!\n");
439         return 0;
440 }
441
442 static int process_stat_stub(struct perf_session *perf_session __maybe_unused,
443                              union perf_event *event)
444 {
445         if (dump_trace)
446                 perf_event__fprintf_stat(event, stdout);
447
448         dump_printf(": unhandled!\n");
449         return 0;
450 }
451
452 static int process_stat_round_stub(struct perf_session *perf_session __maybe_unused,
453                                    union perf_event *event)
454 {
455         if (dump_trace)
456                 perf_event__fprintf_stat_round(event, stdout);
457
458         dump_printf(": unhandled!\n");
459         return 0;
460 }
461
462 static int process_event_time_conv_stub(struct perf_session *perf_session __maybe_unused,
463                                         union perf_event *event)
464 {
465         if (dump_trace)
466                 perf_event__fprintf_time_conv(event, stdout);
467
468         dump_printf(": unhandled!\n");
469         return 0;
470 }
471
472 static int perf_session__process_compressed_event_stub(struct perf_session *session __maybe_unused,
473                                                        union perf_event *event __maybe_unused,
474                                                        u64 file_offset __maybe_unused,
475                                                        const char *file_path __maybe_unused)
476 {
477        dump_printf(": unhandled!\n");
478        return 0;
479 }
480
481 void perf_tool__fill_defaults(struct perf_tool *tool)
482 {
483         if (tool->sample == NULL)
484                 tool->sample = process_event_sample_stub;
485         if (tool->mmap == NULL)
486                 tool->mmap = process_event_stub;
487         if (tool->mmap2 == NULL)
488                 tool->mmap2 = process_event_stub;
489         if (tool->comm == NULL)
490                 tool->comm = process_event_stub;
491         if (tool->namespaces == NULL)
492                 tool->namespaces = process_event_stub;
493         if (tool->cgroup == NULL)
494                 tool->cgroup = process_event_stub;
495         if (tool->fork == NULL)
496                 tool->fork = process_event_stub;
497         if (tool->exit == NULL)
498                 tool->exit = process_event_stub;
499         if (tool->lost == NULL)
500                 tool->lost = perf_event__process_lost;
501         if (tool->lost_samples == NULL)
502                 tool->lost_samples = perf_event__process_lost_samples;
503         if (tool->aux == NULL)
504                 tool->aux = perf_event__process_aux;
505         if (tool->itrace_start == NULL)
506                 tool->itrace_start = perf_event__process_itrace_start;
507         if (tool->context_switch == NULL)
508                 tool->context_switch = perf_event__process_switch;
509         if (tool->ksymbol == NULL)
510                 tool->ksymbol = perf_event__process_ksymbol;
511         if (tool->bpf == NULL)
512                 tool->bpf = perf_event__process_bpf;
513         if (tool->text_poke == NULL)
514                 tool->text_poke = perf_event__process_text_poke;
515         if (tool->aux_output_hw_id == NULL)
516                 tool->aux_output_hw_id = perf_event__process_aux_output_hw_id;
517         if (tool->read == NULL)
518                 tool->read = process_event_sample_stub;
519         if (tool->throttle == NULL)
520                 tool->throttle = process_event_stub;
521         if (tool->unthrottle == NULL)
522                 tool->unthrottle = process_event_stub;
523         if (tool->attr == NULL)
524                 tool->attr = process_event_synth_attr_stub;
525         if (tool->event_update == NULL)
526                 tool->event_update = process_event_synth_event_update_stub;
527         if (tool->tracing_data == NULL)
528                 tool->tracing_data = process_event_synth_tracing_data_stub;
529         if (tool->build_id == NULL)
530                 tool->build_id = process_event_op2_stub;
531         if (tool->finished_round == NULL) {
532                 if (tool->ordered_events)
533                         tool->finished_round = perf_event__process_finished_round;
534                 else
535                         tool->finished_round = process_finished_round_stub;
536         }
537         if (tool->id_index == NULL)
538                 tool->id_index = process_event_op2_stub;
539         if (tool->auxtrace_info == NULL)
540                 tool->auxtrace_info = process_event_op2_stub;
541         if (tool->auxtrace == NULL)
542                 tool->auxtrace = process_event_auxtrace_stub;
543         if (tool->auxtrace_error == NULL)
544                 tool->auxtrace_error = process_event_op2_stub;
545         if (tool->thread_map == NULL)
546                 tool->thread_map = process_event_thread_map_stub;
547         if (tool->cpu_map == NULL)
548                 tool->cpu_map = process_event_cpu_map_stub;
549         if (tool->stat_config == NULL)
550                 tool->stat_config = process_event_stat_config_stub;
551         if (tool->stat == NULL)
552                 tool->stat = process_stat_stub;
553         if (tool->stat_round == NULL)
554                 tool->stat_round = process_stat_round_stub;
555         if (tool->time_conv == NULL)
556                 tool->time_conv = process_event_time_conv_stub;
557         if (tool->feature == NULL)
558                 tool->feature = process_event_op2_stub;
559         if (tool->compressed == NULL)
560                 tool->compressed = perf_session__process_compressed_event;
561         if (tool->finished_init == NULL)
562                 tool->finished_init = process_event_op2_stub;
563 }
564
565 static void swap_sample_id_all(union perf_event *event, void *data)
566 {
567         void *end = (void *) event + event->header.size;
568         int size = end - data;
569
570         BUG_ON(size % sizeof(u64));
571         mem_bswap_64(data, size);
572 }
573
574 static void perf_event__all64_swap(union perf_event *event,
575                                    bool sample_id_all __maybe_unused)
576 {
577         struct perf_event_header *hdr = &event->header;
578         mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
579 }
580
581 static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
582 {
583         event->comm.pid = bswap_32(event->comm.pid);
584         event->comm.tid = bswap_32(event->comm.tid);
585
586         if (sample_id_all) {
587                 void *data = &event->comm.comm;
588
589                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
590                 swap_sample_id_all(event, data);
591         }
592 }
593
594 static void perf_event__mmap_swap(union perf_event *event,
595                                   bool sample_id_all)
596 {
597         event->mmap.pid   = bswap_32(event->mmap.pid);
598         event->mmap.tid   = bswap_32(event->mmap.tid);
599         event->mmap.start = bswap_64(event->mmap.start);
600         event->mmap.len   = bswap_64(event->mmap.len);
601         event->mmap.pgoff = bswap_64(event->mmap.pgoff);
602
603         if (sample_id_all) {
604                 void *data = &event->mmap.filename;
605
606                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
607                 swap_sample_id_all(event, data);
608         }
609 }
610
611 static void perf_event__mmap2_swap(union perf_event *event,
612                                   bool sample_id_all)
613 {
614         event->mmap2.pid   = bswap_32(event->mmap2.pid);
615         event->mmap2.tid   = bswap_32(event->mmap2.tid);
616         event->mmap2.start = bswap_64(event->mmap2.start);
617         event->mmap2.len   = bswap_64(event->mmap2.len);
618         event->mmap2.pgoff = bswap_64(event->mmap2.pgoff);
619
620         if (!(event->header.misc & PERF_RECORD_MISC_MMAP_BUILD_ID)) {
621                 event->mmap2.maj   = bswap_32(event->mmap2.maj);
622                 event->mmap2.min   = bswap_32(event->mmap2.min);
623                 event->mmap2.ino   = bswap_64(event->mmap2.ino);
624                 event->mmap2.ino_generation = bswap_64(event->mmap2.ino_generation);
625         }
626
627         if (sample_id_all) {
628                 void *data = &event->mmap2.filename;
629
630                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
631                 swap_sample_id_all(event, data);
632         }
633 }
634 static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
635 {
636         event->fork.pid  = bswap_32(event->fork.pid);
637         event->fork.tid  = bswap_32(event->fork.tid);
638         event->fork.ppid = bswap_32(event->fork.ppid);
639         event->fork.ptid = bswap_32(event->fork.ptid);
640         event->fork.time = bswap_64(event->fork.time);
641
642         if (sample_id_all)
643                 swap_sample_id_all(event, &event->fork + 1);
644 }
645
646 static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
647 {
648         event->read.pid          = bswap_32(event->read.pid);
649         event->read.tid          = bswap_32(event->read.tid);
650         event->read.value        = bswap_64(event->read.value);
651         event->read.time_enabled = bswap_64(event->read.time_enabled);
652         event->read.time_running = bswap_64(event->read.time_running);
653         event->read.id           = bswap_64(event->read.id);
654
655         if (sample_id_all)
656                 swap_sample_id_all(event, &event->read + 1);
657 }
658
659 static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
660 {
661         event->aux.aux_offset = bswap_64(event->aux.aux_offset);
662         event->aux.aux_size   = bswap_64(event->aux.aux_size);
663         event->aux.flags      = bswap_64(event->aux.flags);
664
665         if (sample_id_all)
666                 swap_sample_id_all(event, &event->aux + 1);
667 }
668
669 static void perf_event__itrace_start_swap(union perf_event *event,
670                                           bool sample_id_all)
671 {
672         event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
673         event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
674
675         if (sample_id_all)
676                 swap_sample_id_all(event, &event->itrace_start + 1);
677 }
678
679 static void perf_event__switch_swap(union perf_event *event, bool sample_id_all)
680 {
681         if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE) {
682                 event->context_switch.next_prev_pid =
683                                 bswap_32(event->context_switch.next_prev_pid);
684                 event->context_switch.next_prev_tid =
685                                 bswap_32(event->context_switch.next_prev_tid);
686         }
687
688         if (sample_id_all)
689                 swap_sample_id_all(event, &event->context_switch + 1);
690 }
691
692 static void perf_event__text_poke_swap(union perf_event *event, bool sample_id_all)
693 {
694         event->text_poke.addr    = bswap_64(event->text_poke.addr);
695         event->text_poke.old_len = bswap_16(event->text_poke.old_len);
696         event->text_poke.new_len = bswap_16(event->text_poke.new_len);
697
698         if (sample_id_all) {
699                 size_t len = sizeof(event->text_poke.old_len) +
700                              sizeof(event->text_poke.new_len) +
701                              event->text_poke.old_len +
702                              event->text_poke.new_len;
703                 void *data = &event->text_poke.old_len;
704
705                 data += PERF_ALIGN(len, sizeof(u64));
706                 swap_sample_id_all(event, data);
707         }
708 }
709
710 static void perf_event__throttle_swap(union perf_event *event,
711                                       bool sample_id_all)
712 {
713         event->throttle.time      = bswap_64(event->throttle.time);
714         event->throttle.id        = bswap_64(event->throttle.id);
715         event->throttle.stream_id = bswap_64(event->throttle.stream_id);
716
717         if (sample_id_all)
718                 swap_sample_id_all(event, &event->throttle + 1);
719 }
720
721 static void perf_event__namespaces_swap(union perf_event *event,
722                                         bool sample_id_all)
723 {
724         u64 i;
725
726         event->namespaces.pid           = bswap_32(event->namespaces.pid);
727         event->namespaces.tid           = bswap_32(event->namespaces.tid);
728         event->namespaces.nr_namespaces = bswap_64(event->namespaces.nr_namespaces);
729
730         for (i = 0; i < event->namespaces.nr_namespaces; i++) {
731                 struct perf_ns_link_info *ns = &event->namespaces.link_info[i];
732
733                 ns->dev = bswap_64(ns->dev);
734                 ns->ino = bswap_64(ns->ino);
735         }
736
737         if (sample_id_all)
738                 swap_sample_id_all(event, &event->namespaces.link_info[i]);
739 }
740
741 static void perf_event__cgroup_swap(union perf_event *event, bool sample_id_all)
742 {
743         event->cgroup.id = bswap_64(event->cgroup.id);
744
745         if (sample_id_all) {
746                 void *data = &event->cgroup.path;
747
748                 data += PERF_ALIGN(strlen(data) + 1, sizeof(u64));
749                 swap_sample_id_all(event, data);
750         }
751 }
752
753 static u8 revbyte(u8 b)
754 {
755         int rev = (b >> 4) | ((b & 0xf) << 4);
756         rev = ((rev & 0xcc) >> 2) | ((rev & 0x33) << 2);
757         rev = ((rev & 0xaa) >> 1) | ((rev & 0x55) << 1);
758         return (u8) rev;
759 }
760
761 /*
762  * XXX this is hack in attempt to carry flags bitfield
763  * through endian village. ABI says:
764  *
765  * Bit-fields are allocated from right to left (least to most significant)
766  * on little-endian implementations and from left to right (most to least
767  * significant) on big-endian implementations.
768  *
769  * The above seems to be byte specific, so we need to reverse each
770  * byte of the bitfield. 'Internet' also says this might be implementation
771  * specific and we probably need proper fix and carry perf_event_attr
772  * bitfield flags in separate data file FEAT_ section. Thought this seems
773  * to work for now.
774  */
775 static void swap_bitfield(u8 *p, unsigned len)
776 {
777         unsigned i;
778
779         for (i = 0; i < len; i++) {
780                 *p = revbyte(*p);
781                 p++;
782         }
783 }
784
785 /* exported for swapping attributes in file header */
786 void perf_event__attr_swap(struct perf_event_attr *attr)
787 {
788         attr->type              = bswap_32(attr->type);
789         attr->size              = bswap_32(attr->size);
790
791 #define bswap_safe(f, n)                                        \
792         (attr->size > (offsetof(struct perf_event_attr, f) +    \
793                        sizeof(attr->f) * (n)))
794 #define bswap_field(f, sz)                      \
795 do {                                            \
796         if (bswap_safe(f, 0))                   \
797                 attr->f = bswap_##sz(attr->f);  \
798 } while(0)
799 #define bswap_field_16(f) bswap_field(f, 16)
800 #define bswap_field_32(f) bswap_field(f, 32)
801 #define bswap_field_64(f) bswap_field(f, 64)
802
803         bswap_field_64(config);
804         bswap_field_64(sample_period);
805         bswap_field_64(sample_type);
806         bswap_field_64(read_format);
807         bswap_field_32(wakeup_events);
808         bswap_field_32(bp_type);
809         bswap_field_64(bp_addr);
810         bswap_field_64(bp_len);
811         bswap_field_64(branch_sample_type);
812         bswap_field_64(sample_regs_user);
813         bswap_field_32(sample_stack_user);
814         bswap_field_32(aux_watermark);
815         bswap_field_16(sample_max_stack);
816         bswap_field_32(aux_sample_size);
817
818         /*
819          * After read_format are bitfields. Check read_format because
820          * we are unable to use offsetof on bitfield.
821          */
822         if (bswap_safe(read_format, 1))
823                 swap_bitfield((u8 *) (&attr->read_format + 1),
824                               sizeof(u64));
825 #undef bswap_field_64
826 #undef bswap_field_32
827 #undef bswap_field
828 #undef bswap_safe
829 }
830
831 static void perf_event__hdr_attr_swap(union perf_event *event,
832                                       bool sample_id_all __maybe_unused)
833 {
834         size_t size;
835
836         perf_event__attr_swap(&event->attr.attr);
837
838         size = event->header.size;
839         size -= (void *)&event->attr.id - (void *)event;
840         mem_bswap_64(event->attr.id, size);
841 }
842
843 static void perf_event__event_update_swap(union perf_event *event,
844                                           bool sample_id_all __maybe_unused)
845 {
846         event->event_update.type = bswap_64(event->event_update.type);
847         event->event_update.id   = bswap_64(event->event_update.id);
848 }
849
850 static void perf_event__event_type_swap(union perf_event *event,
851                                         bool sample_id_all __maybe_unused)
852 {
853         event->event_type.event_type.event_id =
854                 bswap_64(event->event_type.event_type.event_id);
855 }
856
857 static void perf_event__tracing_data_swap(union perf_event *event,
858                                           bool sample_id_all __maybe_unused)
859 {
860         event->tracing_data.size = bswap_32(event->tracing_data.size);
861 }
862
863 static void perf_event__auxtrace_info_swap(union perf_event *event,
864                                            bool sample_id_all __maybe_unused)
865 {
866         size_t size;
867
868         event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
869
870         size = event->header.size;
871         size -= (void *)&event->auxtrace_info.priv - (void *)event;
872         mem_bswap_64(event->auxtrace_info.priv, size);
873 }
874
875 static void perf_event__auxtrace_swap(union perf_event *event,
876                                       bool sample_id_all __maybe_unused)
877 {
878         event->auxtrace.size      = bswap_64(event->auxtrace.size);
879         event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
880         event->auxtrace.reference = bswap_64(event->auxtrace.reference);
881         event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
882         event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
883         event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
884 }
885
886 static void perf_event__auxtrace_error_swap(union perf_event *event,
887                                             bool sample_id_all __maybe_unused)
888 {
889         event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
890         event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
891         event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
892         event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
893         event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
894         event->auxtrace_error.fmt  = bswap_32(event->auxtrace_error.fmt);
895         event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
896         if (event->auxtrace_error.fmt)
897                 event->auxtrace_error.time = bswap_64(event->auxtrace_error.time);
898         if (event->auxtrace_error.fmt >= 2) {
899                 event->auxtrace_error.machine_pid = bswap_32(event->auxtrace_error.machine_pid);
900                 event->auxtrace_error.vcpu = bswap_32(event->auxtrace_error.vcpu);
901         }
902 }
903
904 static void perf_event__thread_map_swap(union perf_event *event,
905                                         bool sample_id_all __maybe_unused)
906 {
907         unsigned i;
908
909         event->thread_map.nr = bswap_64(event->thread_map.nr);
910
911         for (i = 0; i < event->thread_map.nr; i++)
912                 event->thread_map.entries[i].pid = bswap_64(event->thread_map.entries[i].pid);
913 }
914
915 static void perf_event__cpu_map_swap(union perf_event *event,
916                                      bool sample_id_all __maybe_unused)
917 {
918         struct perf_record_cpu_map_data *data = &event->cpu_map.data;
919
920         data->type = bswap_16(data->type);
921
922         switch (data->type) {
923         case PERF_CPU_MAP__CPUS:
924                 data->cpus_data.nr = bswap_16(data->cpus_data.nr);
925
926                 for (unsigned i = 0; i < data->cpus_data.nr; i++)
927                         data->cpus_data.cpu[i] = bswap_16(data->cpus_data.cpu[i]);
928                 break;
929         case PERF_CPU_MAP__MASK:
930                 data->mask32_data.long_size = bswap_16(data->mask32_data.long_size);
931
932                 switch (data->mask32_data.long_size) {
933                 case 4:
934                         data->mask32_data.nr = bswap_16(data->mask32_data.nr);
935                         for (unsigned i = 0; i < data->mask32_data.nr; i++)
936                                 data->mask32_data.mask[i] = bswap_32(data->mask32_data.mask[i]);
937                         break;
938                 case 8:
939                         data->mask64_data.nr = bswap_16(data->mask64_data.nr);
940                         for (unsigned i = 0; i < data->mask64_data.nr; i++)
941                                 data->mask64_data.mask[i] = bswap_64(data->mask64_data.mask[i]);
942                         break;
943                 default:
944                         pr_err("cpu_map swap: unsupported long size\n");
945                 }
946                 break;
947         case PERF_CPU_MAP__RANGE_CPUS:
948                 data->range_cpu_data.start_cpu = bswap_16(data->range_cpu_data.start_cpu);
949                 data->range_cpu_data.end_cpu = bswap_16(data->range_cpu_data.end_cpu);
950                 break;
951         default:
952                 break;
953         }
954 }
955
956 static void perf_event__stat_config_swap(union perf_event *event,
957                                          bool sample_id_all __maybe_unused)
958 {
959         u64 size;
960
961         size  = bswap_64(event->stat_config.nr) * sizeof(event->stat_config.data[0]);
962         size += 1; /* nr item itself */
963         mem_bswap_64(&event->stat_config.nr, size);
964 }
965
966 static void perf_event__stat_swap(union perf_event *event,
967                                   bool sample_id_all __maybe_unused)
968 {
969         event->stat.id     = bswap_64(event->stat.id);
970         event->stat.thread = bswap_32(event->stat.thread);
971         event->stat.cpu    = bswap_32(event->stat.cpu);
972         event->stat.val    = bswap_64(event->stat.val);
973         event->stat.ena    = bswap_64(event->stat.ena);
974         event->stat.run    = bswap_64(event->stat.run);
975 }
976
977 static void perf_event__stat_round_swap(union perf_event *event,
978                                         bool sample_id_all __maybe_unused)
979 {
980         event->stat_round.type = bswap_64(event->stat_round.type);
981         event->stat_round.time = bswap_64(event->stat_round.time);
982 }
983
984 static void perf_event__time_conv_swap(union perf_event *event,
985                                        bool sample_id_all __maybe_unused)
986 {
987         event->time_conv.time_shift = bswap_64(event->time_conv.time_shift);
988         event->time_conv.time_mult  = bswap_64(event->time_conv.time_mult);
989         event->time_conv.time_zero  = bswap_64(event->time_conv.time_zero);
990
991         if (event_contains(event->time_conv, time_cycles)) {
992                 event->time_conv.time_cycles = bswap_64(event->time_conv.time_cycles);
993                 event->time_conv.time_mask = bswap_64(event->time_conv.time_mask);
994         }
995 }
996
997 typedef void (*perf_event__swap_op)(union perf_event *event,
998                                     bool sample_id_all);
999
1000 static perf_event__swap_op perf_event__swap_ops[] = {
1001         [PERF_RECORD_MMAP]                = perf_event__mmap_swap,
1002         [PERF_RECORD_MMAP2]               = perf_event__mmap2_swap,
1003         [PERF_RECORD_COMM]                = perf_event__comm_swap,
1004         [PERF_RECORD_FORK]                = perf_event__task_swap,
1005         [PERF_RECORD_EXIT]                = perf_event__task_swap,
1006         [PERF_RECORD_LOST]                = perf_event__all64_swap,
1007         [PERF_RECORD_READ]                = perf_event__read_swap,
1008         [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
1009         [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
1010         [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
1011         [PERF_RECORD_AUX]                 = perf_event__aux_swap,
1012         [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
1013         [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
1014         [PERF_RECORD_SWITCH]              = perf_event__switch_swap,
1015         [PERF_RECORD_SWITCH_CPU_WIDE]     = perf_event__switch_swap,
1016         [PERF_RECORD_NAMESPACES]          = perf_event__namespaces_swap,
1017         [PERF_RECORD_CGROUP]              = perf_event__cgroup_swap,
1018         [PERF_RECORD_TEXT_POKE]           = perf_event__text_poke_swap,
1019         [PERF_RECORD_AUX_OUTPUT_HW_ID]    = perf_event__all64_swap,
1020         [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
1021         [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
1022         [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
1023         [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
1024         [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
1025         [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
1026         [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
1027         [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
1028         [PERF_RECORD_THREAD_MAP]          = perf_event__thread_map_swap,
1029         [PERF_RECORD_CPU_MAP]             = perf_event__cpu_map_swap,
1030         [PERF_RECORD_STAT_CONFIG]         = perf_event__stat_config_swap,
1031         [PERF_RECORD_STAT]                = perf_event__stat_swap,
1032         [PERF_RECORD_STAT_ROUND]          = perf_event__stat_round_swap,
1033         [PERF_RECORD_EVENT_UPDATE]        = perf_event__event_update_swap,
1034         [PERF_RECORD_TIME_CONV]           = perf_event__time_conv_swap,
1035         [PERF_RECORD_HEADER_MAX]          = NULL,
1036 };
1037
1038 /*
1039  * When perf record finishes a pass on every buffers, it records this pseudo
1040  * event.
1041  * We record the max timestamp t found in the pass n.
1042  * Assuming these timestamps are monotonic across cpus, we know that if
1043  * a buffer still has events with timestamps below t, they will be all
1044  * available and then read in the pass n + 1.
1045  * Hence when we start to read the pass n + 2, we can safely flush every
1046  * events with timestamps below t.
1047  *
1048  *    ============ PASS n =================
1049  *       CPU 0         |   CPU 1
1050  *                     |
1051  *    cnt1 timestamps  |   cnt2 timestamps
1052  *          1          |         2
1053  *          2          |         3
1054  *          -          |         4  <--- max recorded
1055  *
1056  *    ============ PASS n + 1 ==============
1057  *       CPU 0         |   CPU 1
1058  *                     |
1059  *    cnt1 timestamps  |   cnt2 timestamps
1060  *          3          |         5
1061  *          4          |         6
1062  *          5          |         7 <---- max recorded
1063  *
1064  *      Flush every events below timestamp 4
1065  *
1066  *    ============ PASS n + 2 ==============
1067  *       CPU 0         |   CPU 1
1068  *                     |
1069  *    cnt1 timestamps  |   cnt2 timestamps
1070  *          6          |         8
1071  *          7          |         9
1072  *          -          |         10
1073  *
1074  *      Flush every events below timestamp 7
1075  *      etc...
1076  */
1077 int perf_event__process_finished_round(struct perf_tool *tool __maybe_unused,
1078                                        union perf_event *event __maybe_unused,
1079                                        struct ordered_events *oe)
1080 {
1081         if (dump_trace)
1082                 fprintf(stdout, "\n");
1083         return ordered_events__flush(oe, OE_FLUSH__ROUND);
1084 }
1085
1086 int perf_session__queue_event(struct perf_session *s, union perf_event *event,
1087                               u64 timestamp, u64 file_offset, const char *file_path)
1088 {
1089         return ordered_events__queue(&s->ordered_events, event, timestamp, file_offset, file_path);
1090 }
1091
1092 static void callchain__lbr_callstack_printf(struct perf_sample *sample)
1093 {
1094         struct ip_callchain *callchain = sample->callchain;
1095         struct branch_stack *lbr_stack = sample->branch_stack;
1096         struct branch_entry *entries = perf_sample__branch_entries(sample);
1097         u64 kernel_callchain_nr = callchain->nr;
1098         unsigned int i;
1099
1100         for (i = 0; i < kernel_callchain_nr; i++) {
1101                 if (callchain->ips[i] == PERF_CONTEXT_USER)
1102                         break;
1103         }
1104
1105         if ((i != kernel_callchain_nr) && lbr_stack->nr) {
1106                 u64 total_nr;
1107                 /*
1108                  * LBR callstack can only get user call chain,
1109                  * i is kernel call chain number,
1110                  * 1 is PERF_CONTEXT_USER.
1111                  *
1112                  * The user call chain is stored in LBR registers.
1113                  * LBR are pair registers. The caller is stored
1114                  * in "from" register, while the callee is stored
1115                  * in "to" register.
1116                  * For example, there is a call stack
1117                  * "A"->"B"->"C"->"D".
1118                  * The LBR registers will be recorded like
1119                  * "C"->"D", "B"->"C", "A"->"B".
1120                  * So only the first "to" register and all "from"
1121                  * registers are needed to construct the whole stack.
1122                  */
1123                 total_nr = i + 1 + lbr_stack->nr + 1;
1124                 kernel_callchain_nr = i + 1;
1125
1126                 printf("... LBR call chain: nr:%" PRIu64 "\n", total_nr);
1127
1128                 for (i = 0; i < kernel_callchain_nr; i++)
1129                         printf("..... %2d: %016" PRIx64 "\n",
1130                                i, callchain->ips[i]);
1131
1132                 printf("..... %2d: %016" PRIx64 "\n",
1133                        (int)(kernel_callchain_nr), entries[0].to);
1134                 for (i = 0; i < lbr_stack->nr; i++)
1135                         printf("..... %2d: %016" PRIx64 "\n",
1136                                (int)(i + kernel_callchain_nr + 1), entries[i].from);
1137         }
1138 }
1139
1140 static void callchain__printf(struct evsel *evsel,
1141                               struct perf_sample *sample)
1142 {
1143         unsigned int i;
1144         struct ip_callchain *callchain = sample->callchain;
1145
1146         if (evsel__has_branch_callstack(evsel))
1147                 callchain__lbr_callstack_printf(sample);
1148
1149         printf("... FP chain: nr:%" PRIu64 "\n", callchain->nr);
1150
1151         for (i = 0; i < callchain->nr; i++)
1152                 printf("..... %2d: %016" PRIx64 "\n",
1153                        i, callchain->ips[i]);
1154 }
1155
1156 static void branch_stack__printf(struct perf_sample *sample, bool callstack)
1157 {
1158         struct branch_entry *entries = perf_sample__branch_entries(sample);
1159         uint64_t i;
1160
1161         if (!callstack) {
1162                 printf("%s: nr:%" PRIu64 "\n", "... branch stack", sample->branch_stack->nr);
1163         } else {
1164                 /* the reason of adding 1 to nr is because after expanding
1165                  * branch stack it generates nr + 1 callstack records. e.g.,
1166                  *         B()->C()
1167                  *         A()->B()
1168                  * the final callstack should be:
1169                  *         C()
1170                  *         B()
1171                  *         A()
1172                  */
1173                 printf("%s: nr:%" PRIu64 "\n", "... branch callstack", sample->branch_stack->nr+1);
1174         }
1175
1176         for (i = 0; i < sample->branch_stack->nr; i++) {
1177                 struct branch_entry *e = &entries[i];
1178
1179                 if (!callstack) {
1180                         printf("..... %2"PRIu64": %016" PRIx64 " -> %016" PRIx64 " %hu cycles %s%s%s%s %x %s\n",
1181                                 i, e->from, e->to,
1182                                 (unsigned short)e->flags.cycles,
1183                                 e->flags.mispred ? "M" : " ",
1184                                 e->flags.predicted ? "P" : " ",
1185                                 e->flags.abort ? "A" : " ",
1186                                 e->flags.in_tx ? "T" : " ",
1187                                 (unsigned)e->flags.reserved,
1188                                 get_branch_type(e));
1189                 } else {
1190                         if (i == 0) {
1191                                 printf("..... %2"PRIu64": %016" PRIx64 "\n"
1192                                        "..... %2"PRIu64": %016" PRIx64 "\n",
1193                                                 i, e->to, i+1, e->from);
1194                         } else {
1195                                 printf("..... %2"PRIu64": %016" PRIx64 "\n", i+1, e->from);
1196                         }
1197                 }
1198         }
1199 }
1200
1201 static void regs_dump__printf(u64 mask, u64 *regs, const char *arch)
1202 {
1203         unsigned rid, i = 0;
1204
1205         for_each_set_bit(rid, (unsigned long *) &mask, sizeof(mask) * 8) {
1206                 u64 val = regs[i++];
1207
1208                 printf(".... %-5s 0x%016" PRIx64 "\n",
1209                        perf_reg_name(rid, arch), val);
1210         }
1211 }
1212
1213 static const char *regs_abi[] = {
1214         [PERF_SAMPLE_REGS_ABI_NONE] = "none",
1215         [PERF_SAMPLE_REGS_ABI_32] = "32-bit",
1216         [PERF_SAMPLE_REGS_ABI_64] = "64-bit",
1217 };
1218
1219 static inline const char *regs_dump_abi(struct regs_dump *d)
1220 {
1221         if (d->abi > PERF_SAMPLE_REGS_ABI_64)
1222                 return "unknown";
1223
1224         return regs_abi[d->abi];
1225 }
1226
1227 static void regs__printf(const char *type, struct regs_dump *regs, const char *arch)
1228 {
1229         u64 mask = regs->mask;
1230
1231         printf("... %s regs: mask 0x%" PRIx64 " ABI %s\n",
1232                type,
1233                mask,
1234                regs_dump_abi(regs));
1235
1236         regs_dump__printf(mask, regs->regs, arch);
1237 }
1238
1239 static void regs_user__printf(struct perf_sample *sample, const char *arch)
1240 {
1241         struct regs_dump *user_regs = &sample->user_regs;
1242
1243         if (user_regs->regs)
1244                 regs__printf("user", user_regs, arch);
1245 }
1246
1247 static void regs_intr__printf(struct perf_sample *sample, const char *arch)
1248 {
1249         struct regs_dump *intr_regs = &sample->intr_regs;
1250
1251         if (intr_regs->regs)
1252                 regs__printf("intr", intr_regs, arch);
1253 }
1254
1255 static void stack_user__printf(struct stack_dump *dump)
1256 {
1257         printf("... ustack: size %" PRIu64 ", offset 0x%x\n",
1258                dump->size, dump->offset);
1259 }
1260
1261 static void evlist__print_tstamp(struct evlist *evlist, union perf_event *event, struct perf_sample *sample)
1262 {
1263         u64 sample_type = __evlist__combined_sample_type(evlist);
1264
1265         if (event->header.type != PERF_RECORD_SAMPLE &&
1266             !evlist__sample_id_all(evlist)) {
1267                 fputs("-1 -1 ", stdout);
1268                 return;
1269         }
1270
1271         if ((sample_type & PERF_SAMPLE_CPU))
1272                 printf("%u ", sample->cpu);
1273
1274         if (sample_type & PERF_SAMPLE_TIME)
1275                 printf("%" PRIu64 " ", sample->time);
1276 }
1277
1278 static void sample_read__printf(struct perf_sample *sample, u64 read_format)
1279 {
1280         printf("... sample_read:\n");
1281
1282         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1283                 printf("...... time enabled %016" PRIx64 "\n",
1284                        sample->read.time_enabled);
1285
1286         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1287                 printf("...... time running %016" PRIx64 "\n",
1288                        sample->read.time_running);
1289
1290         if (read_format & PERF_FORMAT_GROUP) {
1291                 struct sample_read_value *value = sample->read.group.values;
1292
1293                 printf(".... group nr %" PRIu64 "\n", sample->read.group.nr);
1294
1295                 sample_read_group__for_each(value, sample->read.group.nr, read_format) {
1296                         printf("..... id %016" PRIx64
1297                                ", value %016" PRIx64,
1298                                value->id, value->value);
1299                         if (read_format & PERF_FORMAT_LOST)
1300                                 printf(", lost %" PRIu64, value->lost);
1301                         printf("\n");
1302                 }
1303         } else {
1304                 printf("..... id %016" PRIx64 ", value %016" PRIx64,
1305                         sample->read.one.id, sample->read.one.value);
1306                 if (read_format & PERF_FORMAT_LOST)
1307                         printf(", lost %" PRIu64, sample->read.one.lost);
1308                 printf("\n");
1309         }
1310 }
1311
1312 static void dump_event(struct evlist *evlist, union perf_event *event,
1313                        u64 file_offset, struct perf_sample *sample,
1314                        const char *file_path)
1315 {
1316         if (!dump_trace)
1317                 return;
1318
1319         printf("\n%#" PRIx64 "@%s [%#x]: event: %d\n",
1320                file_offset, file_path, event->header.size, event->header.type);
1321
1322         trace_event(event);
1323         if (event->header.type == PERF_RECORD_SAMPLE && evlist->trace_event_sample_raw)
1324                 evlist->trace_event_sample_raw(evlist, event, sample);
1325
1326         if (sample)
1327                 evlist__print_tstamp(evlist, event, sample);
1328
1329         printf("%#" PRIx64 " [%#x]: PERF_RECORD_%s", file_offset,
1330                event->header.size, perf_event__name(event->header.type));
1331 }
1332
1333 char *get_page_size_name(u64 size, char *str)
1334 {
1335         if (!size || !unit_number__scnprintf(str, PAGE_SIZE_NAME_LEN, size))
1336                 snprintf(str, PAGE_SIZE_NAME_LEN, "%s", "N/A");
1337
1338         return str;
1339 }
1340
1341 static void dump_sample(struct evsel *evsel, union perf_event *event,
1342                         struct perf_sample *sample, const char *arch)
1343 {
1344         u64 sample_type;
1345         char str[PAGE_SIZE_NAME_LEN];
1346
1347         if (!dump_trace)
1348                 return;
1349
1350         printf("(IP, 0x%x): %d/%d: %#" PRIx64 " period: %" PRIu64 " addr: %#" PRIx64 "\n",
1351                event->header.misc, sample->pid, sample->tid, sample->ip,
1352                sample->period, sample->addr);
1353
1354         sample_type = evsel->core.attr.sample_type;
1355
1356         if (evsel__has_callchain(evsel))
1357                 callchain__printf(evsel, sample);
1358
1359         if (evsel__has_br_stack(evsel))
1360                 branch_stack__printf(sample, evsel__has_branch_callstack(evsel));
1361
1362         if (sample_type & PERF_SAMPLE_REGS_USER)
1363                 regs_user__printf(sample, arch);
1364
1365         if (sample_type & PERF_SAMPLE_REGS_INTR)
1366                 regs_intr__printf(sample, arch);
1367
1368         if (sample_type & PERF_SAMPLE_STACK_USER)
1369                 stack_user__printf(&sample->user_stack);
1370
1371         if (sample_type & PERF_SAMPLE_WEIGHT_TYPE) {
1372                 printf("... weight: %" PRIu64 "", sample->weight);
1373                         if (sample_type & PERF_SAMPLE_WEIGHT_STRUCT) {
1374                                 printf(",0x%"PRIx16"", sample->ins_lat);
1375                                 printf(",0x%"PRIx16"", sample->p_stage_cyc);
1376                         }
1377                 printf("\n");
1378         }
1379
1380         if (sample_type & PERF_SAMPLE_DATA_SRC)
1381                 printf(" . data_src: 0x%"PRIx64"\n", sample->data_src);
1382
1383         if (sample_type & PERF_SAMPLE_PHYS_ADDR)
1384                 printf(" .. phys_addr: 0x%"PRIx64"\n", sample->phys_addr);
1385
1386         if (sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)
1387                 printf(" .. data page size: %s\n", get_page_size_name(sample->data_page_size, str));
1388
1389         if (sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)
1390                 printf(" .. code page size: %s\n", get_page_size_name(sample->code_page_size, str));
1391
1392         if (sample_type & PERF_SAMPLE_TRANSACTION)
1393                 printf("... transaction: %" PRIx64 "\n", sample->transaction);
1394
1395         if (sample_type & PERF_SAMPLE_READ)
1396                 sample_read__printf(sample, evsel->core.attr.read_format);
1397 }
1398
1399 static void dump_read(struct evsel *evsel, union perf_event *event)
1400 {
1401         struct perf_record_read *read_event = &event->read;
1402         u64 read_format;
1403
1404         if (!dump_trace)
1405                 return;
1406
1407         printf(": %d %d %s %" PRI_lu64 "\n", event->read.pid, event->read.tid,
1408                evsel__name(evsel), event->read.value);
1409
1410         if (!evsel)
1411                 return;
1412
1413         read_format = evsel->core.attr.read_format;
1414
1415         if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1416                 printf("... time enabled : %" PRI_lu64 "\n", read_event->time_enabled);
1417
1418         if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1419                 printf("... time running : %" PRI_lu64 "\n", read_event->time_running);
1420
1421         if (read_format & PERF_FORMAT_ID)
1422                 printf("... id           : %" PRI_lu64 "\n", read_event->id);
1423
1424         if (read_format & PERF_FORMAT_LOST)
1425                 printf("... lost         : %" PRI_lu64 "\n", read_event->lost);
1426 }
1427
1428 static struct machine *machines__find_for_cpumode(struct machines *machines,
1429                                                union perf_event *event,
1430                                                struct perf_sample *sample)
1431 {
1432         if (perf_guest &&
1433             ((sample->cpumode == PERF_RECORD_MISC_GUEST_KERNEL) ||
1434              (sample->cpumode == PERF_RECORD_MISC_GUEST_USER))) {
1435                 u32 pid;
1436
1437                 if (sample->machine_pid)
1438                         pid = sample->machine_pid;
1439                 else if (event->header.type == PERF_RECORD_MMAP
1440                     || event->header.type == PERF_RECORD_MMAP2)
1441                         pid = event->mmap.pid;
1442                 else
1443                         pid = sample->pid;
1444
1445                 /*
1446                  * Guest code machine is created as needed and does not use
1447                  * DEFAULT_GUEST_KERNEL_ID.
1448                  */
1449                 if (symbol_conf.guest_code)
1450                         return machines__findnew(machines, pid);
1451
1452                 return machines__find_guest(machines, pid);
1453         }
1454
1455         return &machines->host;
1456 }
1457
1458 static int deliver_sample_value(struct evlist *evlist,
1459                                 struct perf_tool *tool,
1460                                 union perf_event *event,
1461                                 struct perf_sample *sample,
1462                                 struct sample_read_value *v,
1463                                 struct machine *machine)
1464 {
1465         struct perf_sample_id *sid = evlist__id2sid(evlist, v->id);
1466         struct evsel *evsel;
1467
1468         if (sid) {
1469                 sample->id     = v->id;
1470                 sample->period = v->value - sid->period;
1471                 sid->period    = v->value;
1472         }
1473
1474         if (!sid || sid->evsel == NULL) {
1475                 ++evlist->stats.nr_unknown_id;
1476                 return 0;
1477         }
1478
1479         /*
1480          * There's no reason to deliver sample
1481          * for zero period, bail out.
1482          */
1483         if (!sample->period)
1484                 return 0;
1485
1486         evsel = container_of(sid->evsel, struct evsel, core);
1487         return tool->sample(tool, event, sample, evsel, machine);
1488 }
1489
1490 static int deliver_sample_group(struct evlist *evlist,
1491                                 struct perf_tool *tool,
1492                                 union  perf_event *event,
1493                                 struct perf_sample *sample,
1494                                 struct machine *machine,
1495                                 u64 read_format)
1496 {
1497         int ret = -EINVAL;
1498         struct sample_read_value *v = sample->read.group.values;
1499
1500         sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1501                 ret = deliver_sample_value(evlist, tool, event, sample, v,
1502                                            machine);
1503                 if (ret)
1504                         break;
1505         }
1506
1507         return ret;
1508 }
1509
1510 static int evlist__deliver_sample(struct evlist *evlist, struct perf_tool *tool,
1511                                   union  perf_event *event, struct perf_sample *sample,
1512                                   struct evsel *evsel, struct machine *machine)
1513 {
1514         /* We know evsel != NULL. */
1515         u64 sample_type = evsel->core.attr.sample_type;
1516         u64 read_format = evsel->core.attr.read_format;
1517
1518         /* Standard sample delivery. */
1519         if (!(sample_type & PERF_SAMPLE_READ))
1520                 return tool->sample(tool, event, sample, evsel, machine);
1521
1522         /* For PERF_SAMPLE_READ we have either single or group mode. */
1523         if (read_format & PERF_FORMAT_GROUP)
1524                 return deliver_sample_group(evlist, tool, event, sample,
1525                                             machine, read_format);
1526         else
1527                 return deliver_sample_value(evlist, tool, event, sample,
1528                                             &sample->read.one, machine);
1529 }
1530
1531 static int machines__deliver_event(struct machines *machines,
1532                                    struct evlist *evlist,
1533                                    union perf_event *event,
1534                                    struct perf_sample *sample,
1535                                    struct perf_tool *tool, u64 file_offset,
1536                                    const char *file_path)
1537 {
1538         struct evsel *evsel;
1539         struct machine *machine;
1540
1541         dump_event(evlist, event, file_offset, sample, file_path);
1542
1543         evsel = evlist__id2evsel(evlist, sample->id);
1544
1545         machine = machines__find_for_cpumode(machines, event, sample);
1546
1547         switch (event->header.type) {
1548         case PERF_RECORD_SAMPLE:
1549                 if (evsel == NULL) {
1550                         ++evlist->stats.nr_unknown_id;
1551                         return 0;
1552                 }
1553                 if (machine == NULL) {
1554                         ++evlist->stats.nr_unprocessable_samples;
1555                         dump_sample(evsel, event, sample, perf_env__arch(NULL));
1556                         return 0;
1557                 }
1558                 dump_sample(evsel, event, sample, perf_env__arch(machine->env));
1559                 return evlist__deliver_sample(evlist, tool, event, sample, evsel, machine);
1560         case PERF_RECORD_MMAP:
1561                 return tool->mmap(tool, event, sample, machine);
1562         case PERF_RECORD_MMAP2:
1563                 if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
1564                         ++evlist->stats.nr_proc_map_timeout;
1565                 return tool->mmap2(tool, event, sample, machine);
1566         case PERF_RECORD_COMM:
1567                 return tool->comm(tool, event, sample, machine);
1568         case PERF_RECORD_NAMESPACES:
1569                 return tool->namespaces(tool, event, sample, machine);
1570         case PERF_RECORD_CGROUP:
1571                 return tool->cgroup(tool, event, sample, machine);
1572         case PERF_RECORD_FORK:
1573                 return tool->fork(tool, event, sample, machine);
1574         case PERF_RECORD_EXIT:
1575                 return tool->exit(tool, event, sample, machine);
1576         case PERF_RECORD_LOST:
1577                 if (tool->lost == perf_event__process_lost)
1578                         evlist->stats.total_lost += event->lost.lost;
1579                 return tool->lost(tool, event, sample, machine);
1580         case PERF_RECORD_LOST_SAMPLES:
1581                 if (tool->lost_samples == perf_event__process_lost_samples)
1582                         evlist->stats.total_lost_samples += event->lost_samples.lost;
1583                 return tool->lost_samples(tool, event, sample, machine);
1584         case PERF_RECORD_READ:
1585                 dump_read(evsel, event);
1586                 return tool->read(tool, event, sample, evsel, machine);
1587         case PERF_RECORD_THROTTLE:
1588                 return tool->throttle(tool, event, sample, machine);
1589         case PERF_RECORD_UNTHROTTLE:
1590                 return tool->unthrottle(tool, event, sample, machine);
1591         case PERF_RECORD_AUX:
1592                 if (tool->aux == perf_event__process_aux) {
1593                         if (event->aux.flags & PERF_AUX_FLAG_TRUNCATED)
1594                                 evlist->stats.total_aux_lost += 1;
1595                         if (event->aux.flags & PERF_AUX_FLAG_PARTIAL)
1596                                 evlist->stats.total_aux_partial += 1;
1597                         if (event->aux.flags & PERF_AUX_FLAG_COLLISION)
1598                                 evlist->stats.total_aux_collision += 1;
1599                 }
1600                 return tool->aux(tool, event, sample, machine);
1601         case PERF_RECORD_ITRACE_START:
1602                 return tool->itrace_start(tool, event, sample, machine);
1603         case PERF_RECORD_SWITCH:
1604         case PERF_RECORD_SWITCH_CPU_WIDE:
1605                 return tool->context_switch(tool, event, sample, machine);
1606         case PERF_RECORD_KSYMBOL:
1607                 return tool->ksymbol(tool, event, sample, machine);
1608         case PERF_RECORD_BPF_EVENT:
1609                 return tool->bpf(tool, event, sample, machine);
1610         case PERF_RECORD_TEXT_POKE:
1611                 return tool->text_poke(tool, event, sample, machine);
1612         case PERF_RECORD_AUX_OUTPUT_HW_ID:
1613                 return tool->aux_output_hw_id(tool, event, sample, machine);
1614         default:
1615                 ++evlist->stats.nr_unknown_events;
1616                 return -1;
1617         }
1618 }
1619
1620 static int perf_session__deliver_event(struct perf_session *session,
1621                                        union perf_event *event,
1622                                        struct perf_tool *tool,
1623                                        u64 file_offset,
1624                                        const char *file_path)
1625 {
1626         struct perf_sample sample;
1627         int ret = evlist__parse_sample(session->evlist, event, &sample);
1628
1629         if (ret) {
1630                 pr_err("Can't parse sample, err = %d\n", ret);
1631                 return ret;
1632         }
1633
1634         ret = auxtrace__process_event(session, event, &sample, tool);
1635         if (ret < 0)
1636                 return ret;
1637         if (ret > 0)
1638                 return 0;
1639
1640         ret = machines__deliver_event(&session->machines, session->evlist,
1641                                       event, &sample, tool, file_offset, file_path);
1642
1643         if (dump_trace && sample.aux_sample.size)
1644                 auxtrace__dump_auxtrace_sample(session, &sample);
1645
1646         return ret;
1647 }
1648
1649 static s64 perf_session__process_user_event(struct perf_session *session,
1650                                             union perf_event *event,
1651                                             u64 file_offset,
1652                                             const char *file_path)
1653 {
1654         struct ordered_events *oe = &session->ordered_events;
1655         struct perf_tool *tool = session->tool;
1656         struct perf_sample sample = { .time = 0, };
1657         int fd = perf_data__fd(session->data);
1658         int err;
1659
1660         if (event->header.type != PERF_RECORD_COMPRESSED ||
1661             tool->compressed == perf_session__process_compressed_event_stub)
1662                 dump_event(session->evlist, event, file_offset, &sample, file_path);
1663
1664         /* These events are processed right away */
1665         switch (event->header.type) {
1666         case PERF_RECORD_HEADER_ATTR:
1667                 err = tool->attr(tool, event, &session->evlist);
1668                 if (err == 0) {
1669                         perf_session__set_id_hdr_size(session);
1670                         perf_session__set_comm_exec(session);
1671                 }
1672                 return err;
1673         case PERF_RECORD_EVENT_UPDATE:
1674                 return tool->event_update(tool, event, &session->evlist);
1675         case PERF_RECORD_HEADER_EVENT_TYPE:
1676                 /*
1677                  * Deprecated, but we need to handle it for sake
1678                  * of old data files create in pipe mode.
1679                  */
1680                 return 0;
1681         case PERF_RECORD_HEADER_TRACING_DATA:
1682                 /*
1683                  * Setup for reading amidst mmap, but only when we
1684                  * are in 'file' mode. The 'pipe' fd is in proper
1685                  * place already.
1686                  */
1687                 if (!perf_data__is_pipe(session->data))
1688                         lseek(fd, file_offset, SEEK_SET);
1689                 return tool->tracing_data(session, event);
1690         case PERF_RECORD_HEADER_BUILD_ID:
1691                 return tool->build_id(session, event);
1692         case PERF_RECORD_FINISHED_ROUND:
1693                 return tool->finished_round(tool, event, oe);
1694         case PERF_RECORD_ID_INDEX:
1695                 return tool->id_index(session, event);
1696         case PERF_RECORD_AUXTRACE_INFO:
1697                 return tool->auxtrace_info(session, event);
1698         case PERF_RECORD_AUXTRACE:
1699                 /* setup for reading amidst mmap */
1700                 lseek(fd, file_offset + event->header.size, SEEK_SET);
1701                 return tool->auxtrace(session, event);
1702         case PERF_RECORD_AUXTRACE_ERROR:
1703                 perf_session__auxtrace_error_inc(session, event);
1704                 return tool->auxtrace_error(session, event);
1705         case PERF_RECORD_THREAD_MAP:
1706                 return tool->thread_map(session, event);
1707         case PERF_RECORD_CPU_MAP:
1708                 return tool->cpu_map(session, event);
1709         case PERF_RECORD_STAT_CONFIG:
1710                 return tool->stat_config(session, event);
1711         case PERF_RECORD_STAT:
1712                 return tool->stat(session, event);
1713         case PERF_RECORD_STAT_ROUND:
1714                 return tool->stat_round(session, event);
1715         case PERF_RECORD_TIME_CONV:
1716                 session->time_conv = event->time_conv;
1717                 return tool->time_conv(session, event);
1718         case PERF_RECORD_HEADER_FEATURE:
1719                 return tool->feature(session, event);
1720         case PERF_RECORD_COMPRESSED:
1721                 err = tool->compressed(session, event, file_offset, file_path);
1722                 if (err)
1723                         dump_event(session->evlist, event, file_offset, &sample, file_path);
1724                 return err;
1725         case PERF_RECORD_FINISHED_INIT:
1726                 return tool->finished_init(session, event);
1727         default:
1728                 return -EINVAL;
1729         }
1730 }
1731
1732 int perf_session__deliver_synth_event(struct perf_session *session,
1733                                       union perf_event *event,
1734                                       struct perf_sample *sample)
1735 {
1736         struct evlist *evlist = session->evlist;
1737         struct perf_tool *tool = session->tool;
1738
1739         events_stats__inc(&evlist->stats, event->header.type);
1740
1741         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1742                 return perf_session__process_user_event(session, event, 0, NULL);
1743
1744         return machines__deliver_event(&session->machines, evlist, event, sample, tool, 0, NULL);
1745 }
1746
1747 static void event_swap(union perf_event *event, bool sample_id_all)
1748 {
1749         perf_event__swap_op swap;
1750
1751         swap = perf_event__swap_ops[event->header.type];
1752         if (swap)
1753                 swap(event, sample_id_all);
1754 }
1755
1756 int perf_session__peek_event(struct perf_session *session, off_t file_offset,
1757                              void *buf, size_t buf_sz,
1758                              union perf_event **event_ptr,
1759                              struct perf_sample *sample)
1760 {
1761         union perf_event *event;
1762         size_t hdr_sz, rest;
1763         int fd;
1764
1765         if (session->one_mmap && !session->header.needs_swap) {
1766                 event = file_offset - session->one_mmap_offset +
1767                         session->one_mmap_addr;
1768                 goto out_parse_sample;
1769         }
1770
1771         if (perf_data__is_pipe(session->data))
1772                 return -1;
1773
1774         fd = perf_data__fd(session->data);
1775         hdr_sz = sizeof(struct perf_event_header);
1776
1777         if (buf_sz < hdr_sz)
1778                 return -1;
1779
1780         if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
1781             readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
1782                 return -1;
1783
1784         event = (union perf_event *)buf;
1785
1786         if (session->header.needs_swap)
1787                 perf_event_header__bswap(&event->header);
1788
1789         if (event->header.size < hdr_sz || event->header.size > buf_sz)
1790                 return -1;
1791
1792         buf += hdr_sz;
1793         rest = event->header.size - hdr_sz;
1794
1795         if (readn(fd, buf, rest) != (ssize_t)rest)
1796                 return -1;
1797
1798         if (session->header.needs_swap)
1799                 event_swap(event, evlist__sample_id_all(session->evlist));
1800
1801 out_parse_sample:
1802
1803         if (sample && event->header.type < PERF_RECORD_USER_TYPE_START &&
1804             evlist__parse_sample(session->evlist, event, sample))
1805                 return -1;
1806
1807         *event_ptr = event;
1808
1809         return 0;
1810 }
1811
1812 int perf_session__peek_events(struct perf_session *session, u64 offset,
1813                               u64 size, peek_events_cb_t cb, void *data)
1814 {
1815         u64 max_offset = offset + size;
1816         char buf[PERF_SAMPLE_MAX_SIZE];
1817         union perf_event *event;
1818         int err;
1819
1820         do {
1821                 err = perf_session__peek_event(session, offset, buf,
1822                                                PERF_SAMPLE_MAX_SIZE, &event,
1823                                                NULL);
1824                 if (err)
1825                         return err;
1826
1827                 err = cb(session, event, offset, data);
1828                 if (err)
1829                         return err;
1830
1831                 offset += event->header.size;
1832                 if (event->header.type == PERF_RECORD_AUXTRACE)
1833                         offset += event->auxtrace.size;
1834
1835         } while (offset < max_offset);
1836
1837         return err;
1838 }
1839
1840 static s64 perf_session__process_event(struct perf_session *session,
1841                                        union perf_event *event, u64 file_offset,
1842                                        const char *file_path)
1843 {
1844         struct evlist *evlist = session->evlist;
1845         struct perf_tool *tool = session->tool;
1846         int ret;
1847
1848         if (session->header.needs_swap)
1849                 event_swap(event, evlist__sample_id_all(evlist));
1850
1851         if (event->header.type >= PERF_RECORD_HEADER_MAX)
1852                 return -EINVAL;
1853
1854         events_stats__inc(&evlist->stats, event->header.type);
1855
1856         if (event->header.type >= PERF_RECORD_USER_TYPE_START)
1857                 return perf_session__process_user_event(session, event, file_offset, file_path);
1858
1859         if (tool->ordered_events) {
1860                 u64 timestamp = -1ULL;
1861
1862                 ret = evlist__parse_sample_timestamp(evlist, event, &timestamp);
1863                 if (ret && ret != -1)
1864                         return ret;
1865
1866                 ret = perf_session__queue_event(session, event, timestamp, file_offset, file_path);
1867                 if (ret != -ETIME)
1868                         return ret;
1869         }
1870
1871         return perf_session__deliver_event(session, event, tool, file_offset, file_path);
1872 }
1873
1874 void perf_event_header__bswap(struct perf_event_header *hdr)
1875 {
1876         hdr->type = bswap_32(hdr->type);
1877         hdr->misc = bswap_16(hdr->misc);
1878         hdr->size = bswap_16(hdr->size);
1879 }
1880
1881 struct thread *perf_session__findnew(struct perf_session *session, pid_t pid)
1882 {
1883         return machine__findnew_thread(&session->machines.host, -1, pid);
1884 }
1885
1886 int perf_session__register_idle_thread(struct perf_session *session)
1887 {
1888         struct thread *thread = machine__idle_thread(&session->machines.host);
1889
1890         /* machine__idle_thread() got the thread, so put it */
1891         thread__put(thread);
1892         return thread ? 0 : -1;
1893 }
1894
1895 static void
1896 perf_session__warn_order(const struct perf_session *session)
1897 {
1898         const struct ordered_events *oe = &session->ordered_events;
1899         struct evsel *evsel;
1900         bool should_warn = true;
1901
1902         evlist__for_each_entry(session->evlist, evsel) {
1903                 if (evsel->core.attr.write_backward)
1904                         should_warn = false;
1905         }
1906
1907         if (!should_warn)
1908                 return;
1909         if (oe->nr_unordered_events != 0)
1910                 ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
1911 }
1912
1913 static void perf_session__warn_about_errors(const struct perf_session *session)
1914 {
1915         const struct events_stats *stats = &session->evlist->stats;
1916
1917         if (session->tool->lost == perf_event__process_lost &&
1918             stats->nr_events[PERF_RECORD_LOST] != 0) {
1919                 ui__warning("Processed %d events and lost %d chunks!\n\n"
1920                             "Check IO/CPU overload!\n\n",
1921                             stats->nr_events[0],
1922                             stats->nr_events[PERF_RECORD_LOST]);
1923         }
1924
1925         if (session->tool->lost_samples == perf_event__process_lost_samples) {
1926                 double drop_rate;
1927
1928                 drop_rate = (double)stats->total_lost_samples /
1929                             (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
1930                 if (drop_rate > 0.05) {
1931                         ui__warning("Processed %" PRIu64 " samples and lost %3.2f%%!\n\n",
1932                                     stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
1933                                     drop_rate * 100.0);
1934                 }
1935         }
1936
1937         if (session->tool->aux == perf_event__process_aux &&
1938             stats->total_aux_lost != 0) {
1939                 ui__warning("AUX data lost %" PRIu64 " times out of %u!\n\n",
1940                             stats->total_aux_lost,
1941                             stats->nr_events[PERF_RECORD_AUX]);
1942         }
1943
1944         if (session->tool->aux == perf_event__process_aux &&
1945             stats->total_aux_partial != 0) {
1946                 bool vmm_exclusive = false;
1947
1948                 (void)sysfs__read_bool("module/kvm_intel/parameters/vmm_exclusive",
1949                                        &vmm_exclusive);
1950
1951                 ui__warning("AUX data had gaps in it %" PRIu64 " times out of %u!\n\n"
1952                             "Are you running a KVM guest in the background?%s\n\n",
1953                             stats->total_aux_partial,
1954                             stats->nr_events[PERF_RECORD_AUX],
1955                             vmm_exclusive ?
1956                             "\nReloading kvm_intel module with vmm_exclusive=0\n"
1957                             "will reduce the gaps to only guest's timeslices." :
1958                             "");
1959         }
1960
1961         if (session->tool->aux == perf_event__process_aux &&
1962             stats->total_aux_collision != 0) {
1963                 ui__warning("AUX data detected collision  %" PRIu64 " times out of %u!\n\n",
1964                             stats->total_aux_collision,
1965                             stats->nr_events[PERF_RECORD_AUX]);
1966         }
1967
1968         if (stats->nr_unknown_events != 0) {
1969                 ui__warning("Found %u unknown events!\n\n"
1970                             "Is this an older tool processing a perf.data "
1971                             "file generated by a more recent tool?\n\n"
1972                             "If that is not the case, consider "
1973                             "reporting to [email protected].\n\n",
1974                             stats->nr_unknown_events);
1975         }
1976
1977         if (stats->nr_unknown_id != 0) {
1978                 ui__warning("%u samples with id not present in the header\n",
1979                             stats->nr_unknown_id);
1980         }
1981
1982         if (stats->nr_invalid_chains != 0) {
1983                 ui__warning("Found invalid callchains!\n\n"
1984                             "%u out of %u events were discarded for this reason.\n\n"
1985                             "Consider reporting to [email protected].\n\n",
1986                             stats->nr_invalid_chains,
1987                             stats->nr_events[PERF_RECORD_SAMPLE]);
1988         }
1989
1990         if (stats->nr_unprocessable_samples != 0) {
1991                 ui__warning("%u unprocessable samples recorded.\n"
1992                             "Do you have a KVM guest running and not using 'perf kvm'?\n",
1993                             stats->nr_unprocessable_samples);
1994         }
1995
1996         perf_session__warn_order(session);
1997
1998         events_stats__auxtrace_error_warn(stats);
1999
2000         if (stats->nr_proc_map_timeout != 0) {
2001                 ui__warning("%d map information files for pre-existing threads were\n"
2002                             "not processed, if there are samples for addresses they\n"
2003                             "will not be resolved, you may find out which are these\n"
2004                             "threads by running with -v and redirecting the output\n"
2005                             "to a file.\n"
2006                             "The time limit to process proc map is too short?\n"
2007                             "Increase it by --proc-map-timeout\n",
2008                             stats->nr_proc_map_timeout);
2009         }
2010 }
2011
2012 static int perf_session__flush_thread_stack(struct thread *thread,
2013                                             void *p __maybe_unused)
2014 {
2015         return thread_stack__flush(thread);
2016 }
2017
2018 static int perf_session__flush_thread_stacks(struct perf_session *session)
2019 {
2020         return machines__for_each_thread(&session->machines,
2021                                          perf_session__flush_thread_stack,
2022                                          NULL);
2023 }
2024
2025 volatile int session_done;
2026
2027 static int __perf_session__process_decomp_events(struct perf_session *session);
2028
2029 static int __perf_session__process_pipe_events(struct perf_session *session)
2030 {
2031         struct ordered_events *oe = &session->ordered_events;
2032         struct perf_tool *tool = session->tool;
2033         union perf_event *event;
2034         uint32_t size, cur_size = 0;
2035         void *buf = NULL;
2036         s64 skip = 0;
2037         u64 head;
2038         ssize_t err;
2039         void *p;
2040
2041         perf_tool__fill_defaults(tool);
2042
2043         head = 0;
2044         cur_size = sizeof(union perf_event);
2045
2046         buf = malloc(cur_size);
2047         if (!buf)
2048                 return -errno;
2049         ordered_events__set_copy_on_queue(oe, true);
2050 more:
2051         event = buf;
2052         err = perf_data__read(session->data, event,
2053                               sizeof(struct perf_event_header));
2054         if (err <= 0) {
2055                 if (err == 0)
2056                         goto done;
2057
2058                 pr_err("failed to read event header\n");
2059                 goto out_err;
2060         }
2061
2062         if (session->header.needs_swap)
2063                 perf_event_header__bswap(&event->header);
2064
2065         size = event->header.size;
2066         if (size < sizeof(struct perf_event_header)) {
2067                 pr_err("bad event header size\n");
2068                 goto out_err;
2069         }
2070
2071         if (size > cur_size) {
2072                 void *new = realloc(buf, size);
2073                 if (!new) {
2074                         pr_err("failed to allocate memory to read event\n");
2075                         goto out_err;
2076                 }
2077                 buf = new;
2078                 cur_size = size;
2079                 event = buf;
2080         }
2081         p = event;
2082         p += sizeof(struct perf_event_header);
2083
2084         if (size - sizeof(struct perf_event_header)) {
2085                 err = perf_data__read(session->data, p,
2086                                       size - sizeof(struct perf_event_header));
2087                 if (err <= 0) {
2088                         if (err == 0) {
2089                                 pr_err("unexpected end of event stream\n");
2090                                 goto done;
2091                         }
2092
2093                         pr_err("failed to read event data\n");
2094                         goto out_err;
2095                 }
2096         }
2097
2098         if ((skip = perf_session__process_event(session, event, head, "pipe")) < 0) {
2099                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2100                        head, event->header.size, event->header.type);
2101                 err = -EINVAL;
2102                 goto out_err;
2103         }
2104
2105         head += size;
2106
2107         if (skip > 0)
2108                 head += skip;
2109
2110         err = __perf_session__process_decomp_events(session);
2111         if (err)
2112                 goto out_err;
2113
2114         if (!session_done())
2115                 goto more;
2116 done:
2117         /* do the final flush for ordered samples */
2118         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2119         if (err)
2120                 goto out_err;
2121         err = auxtrace__flush_events(session, tool);
2122         if (err)
2123                 goto out_err;
2124         err = perf_session__flush_thread_stacks(session);
2125 out_err:
2126         free(buf);
2127         if (!tool->no_warn)
2128                 perf_session__warn_about_errors(session);
2129         ordered_events__free(&session->ordered_events);
2130         auxtrace__free_events(session);
2131         return err;
2132 }
2133
2134 static union perf_event *
2135 prefetch_event(char *buf, u64 head, size_t mmap_size,
2136                bool needs_swap, union perf_event *error)
2137 {
2138         union perf_event *event;
2139         u16 event_size;
2140
2141         /*
2142          * Ensure we have enough space remaining to read
2143          * the size of the event in the headers.
2144          */
2145         if (head + sizeof(event->header) > mmap_size)
2146                 return NULL;
2147
2148         event = (union perf_event *)(buf + head);
2149         if (needs_swap)
2150                 perf_event_header__bswap(&event->header);
2151
2152         event_size = event->header.size;
2153         if (head + event_size <= mmap_size)
2154                 return event;
2155
2156         /* We're not fetching the event so swap back again */
2157         if (needs_swap)
2158                 perf_event_header__bswap(&event->header);
2159
2160         /* Check if the event fits into the next mmapped buf. */
2161         if (event_size <= mmap_size - head % page_size) {
2162                 /* Remap buf and fetch again. */
2163                 return NULL;
2164         }
2165
2166         /* Invalid input. Event size should never exceed mmap_size. */
2167         pr_debug("%s: head=%#" PRIx64 " event->header.size=%#x, mmap_size=%#zx:"
2168                  " fuzzed or compressed perf.data?\n", __func__, head, event_size, mmap_size);
2169
2170         return error;
2171 }
2172
2173 static union perf_event *
2174 fetch_mmaped_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2175 {
2176         return prefetch_event(buf, head, mmap_size, needs_swap, ERR_PTR(-EINVAL));
2177 }
2178
2179 static union perf_event *
2180 fetch_decomp_event(u64 head, size_t mmap_size, char *buf, bool needs_swap)
2181 {
2182         return prefetch_event(buf, head, mmap_size, needs_swap, NULL);
2183 }
2184
2185 static int __perf_session__process_decomp_events(struct perf_session *session)
2186 {
2187         s64 skip;
2188         u64 size;
2189         struct decomp *decomp = session->active_decomp->decomp_last;
2190
2191         if (!decomp)
2192                 return 0;
2193
2194         while (decomp->head < decomp->size && !session_done()) {
2195                 union perf_event *event = fetch_decomp_event(decomp->head, decomp->size, decomp->data,
2196                                                              session->header.needs_swap);
2197
2198                 if (!event)
2199                         break;
2200
2201                 size = event->header.size;
2202
2203                 if (size < sizeof(struct perf_event_header) ||
2204                     (skip = perf_session__process_event(session, event, decomp->file_pos,
2205                                                         decomp->file_path)) < 0) {
2206                         pr_err("%#" PRIx64 " [%#x]: failed to process type: %d\n",
2207                                 decomp->file_pos + decomp->head, event->header.size, event->header.type);
2208                         return -EINVAL;
2209                 }
2210
2211                 if (skip)
2212                         size += skip;
2213
2214                 decomp->head += size;
2215         }
2216
2217         return 0;
2218 }
2219
2220 /*
2221  * On 64bit we can mmap the data file in one go. No need for tiny mmap
2222  * slices. On 32bit we use 32MB.
2223  */
2224 #if BITS_PER_LONG == 64
2225 #define MMAP_SIZE ULLONG_MAX
2226 #define NUM_MMAPS 1
2227 #else
2228 #define MMAP_SIZE (32 * 1024 * 1024ULL)
2229 #define NUM_MMAPS 128
2230 #endif
2231
2232 struct reader;
2233
2234 typedef s64 (*reader_cb_t)(struct perf_session *session,
2235                            union perf_event *event,
2236                            u64 file_offset,
2237                            const char *file_path);
2238
2239 struct reader {
2240         int              fd;
2241         const char       *path;
2242         u64              data_size;
2243         u64              data_offset;
2244         reader_cb_t      process;
2245         bool             in_place_update;
2246         char             *mmaps[NUM_MMAPS];
2247         size_t           mmap_size;
2248         int              mmap_idx;
2249         char             *mmap_cur;
2250         u64              file_pos;
2251         u64              file_offset;
2252         u64              head;
2253         u64              size;
2254         bool             done;
2255         struct zstd_data   zstd_data;
2256         struct decomp_data decomp_data;
2257 };
2258
2259 static int
2260 reader__init(struct reader *rd, bool *one_mmap)
2261 {
2262         u64 data_size = rd->data_size;
2263         char **mmaps = rd->mmaps;
2264
2265         rd->head = rd->data_offset;
2266         data_size += rd->data_offset;
2267
2268         rd->mmap_size = MMAP_SIZE;
2269         if (rd->mmap_size > data_size) {
2270                 rd->mmap_size = data_size;
2271                 if (one_mmap)
2272                         *one_mmap = true;
2273         }
2274
2275         memset(mmaps, 0, sizeof(rd->mmaps));
2276
2277         if (zstd_init(&rd->zstd_data, 0))
2278                 return -1;
2279         rd->decomp_data.zstd_decomp = &rd->zstd_data;
2280
2281         return 0;
2282 }
2283
2284 static void
2285 reader__release_decomp(struct reader *rd)
2286 {
2287         perf_decomp__release_events(rd->decomp_data.decomp);
2288         zstd_fini(&rd->zstd_data);
2289 }
2290
2291 static int
2292 reader__mmap(struct reader *rd, struct perf_session *session)
2293 {
2294         int mmap_prot, mmap_flags;
2295         char *buf, **mmaps = rd->mmaps;
2296         u64 page_offset;
2297
2298         mmap_prot  = PROT_READ;
2299         mmap_flags = MAP_SHARED;
2300
2301         if (rd->in_place_update) {
2302                 mmap_prot  |= PROT_WRITE;
2303         } else if (session->header.needs_swap) {
2304                 mmap_prot  |= PROT_WRITE;
2305                 mmap_flags = MAP_PRIVATE;
2306         }
2307
2308         if (mmaps[rd->mmap_idx]) {
2309                 munmap(mmaps[rd->mmap_idx], rd->mmap_size);
2310                 mmaps[rd->mmap_idx] = NULL;
2311         }
2312
2313         page_offset = page_size * (rd->head / page_size);
2314         rd->file_offset += page_offset;
2315         rd->head -= page_offset;
2316
2317         buf = mmap(NULL, rd->mmap_size, mmap_prot, mmap_flags, rd->fd,
2318                    rd->file_offset);
2319         if (buf == MAP_FAILED) {
2320                 pr_err("failed to mmap file\n");
2321                 return -errno;
2322         }
2323         mmaps[rd->mmap_idx] = rd->mmap_cur = buf;
2324         rd->mmap_idx = (rd->mmap_idx + 1) & (ARRAY_SIZE(rd->mmaps) - 1);
2325         rd->file_pos = rd->file_offset + rd->head;
2326         if (session->one_mmap) {
2327                 session->one_mmap_addr = buf;
2328                 session->one_mmap_offset = rd->file_offset;
2329         }
2330
2331         return 0;
2332 }
2333
2334 enum {
2335         READER_OK,
2336         READER_NODATA,
2337 };
2338
2339 static int
2340 reader__read_event(struct reader *rd, struct perf_session *session,
2341                    struct ui_progress *prog)
2342 {
2343         u64 size;
2344         int err = READER_OK;
2345         union perf_event *event;
2346         s64 skip;
2347
2348         event = fetch_mmaped_event(rd->head, rd->mmap_size, rd->mmap_cur,
2349                                    session->header.needs_swap);
2350         if (IS_ERR(event))
2351                 return PTR_ERR(event);
2352
2353         if (!event)
2354                 return READER_NODATA;
2355
2356         size = event->header.size;
2357
2358         skip = -EINVAL;
2359
2360         if (size < sizeof(struct perf_event_header) ||
2361             (skip = rd->process(session, event, rd->file_pos, rd->path)) < 0) {
2362                 pr_err("%#" PRIx64 " [%#x]: failed to process type: %d [%s]\n",
2363                        rd->file_offset + rd->head, event->header.size,
2364                        event->header.type, strerror(-skip));
2365                 err = skip;
2366                 goto out;
2367         }
2368
2369         if (skip)
2370                 size += skip;
2371
2372         rd->size += size;
2373         rd->head += size;
2374         rd->file_pos += size;
2375
2376         err = __perf_session__process_decomp_events(session);
2377         if (err)
2378                 goto out;
2379
2380         ui_progress__update(prog, size);
2381
2382 out:
2383         return err;
2384 }
2385
2386 static inline bool
2387 reader__eof(struct reader *rd)
2388 {
2389         return (rd->file_pos >= rd->data_size + rd->data_offset);
2390 }
2391
2392 static int
2393 reader__process_events(struct reader *rd, struct perf_session *session,
2394                        struct ui_progress *prog)
2395 {
2396         int err;
2397
2398         err = reader__init(rd, &session->one_mmap);
2399         if (err)
2400                 goto out;
2401
2402         session->active_decomp = &rd->decomp_data;
2403
2404 remap:
2405         err = reader__mmap(rd, session);
2406         if (err)
2407                 goto out;
2408
2409 more:
2410         err = reader__read_event(rd, session, prog);
2411         if (err < 0)
2412                 goto out;
2413         else if (err == READER_NODATA)
2414                 goto remap;
2415
2416         if (session_done())
2417                 goto out;
2418
2419         if (!reader__eof(rd))
2420                 goto more;
2421
2422 out:
2423         session->active_decomp = &session->decomp_data;
2424         return err;
2425 }
2426
2427 static s64 process_simple(struct perf_session *session,
2428                           union perf_event *event,
2429                           u64 file_offset,
2430                           const char *file_path)
2431 {
2432         return perf_session__process_event(session, event, file_offset, file_path);
2433 }
2434
2435 static int __perf_session__process_events(struct perf_session *session)
2436 {
2437         struct reader rd = {
2438                 .fd             = perf_data__fd(session->data),
2439                 .path           = session->data->file.path,
2440                 .data_size      = session->header.data_size,
2441                 .data_offset    = session->header.data_offset,
2442                 .process        = process_simple,
2443                 .in_place_update = session->data->in_place_update,
2444         };
2445         struct ordered_events *oe = &session->ordered_events;
2446         struct perf_tool *tool = session->tool;
2447         struct ui_progress prog;
2448         int err;
2449
2450         perf_tool__fill_defaults(tool);
2451
2452         if (rd.data_size == 0)
2453                 return -1;
2454
2455         ui_progress__init_size(&prog, rd.data_size, "Processing events...");
2456
2457         err = reader__process_events(&rd, session, &prog);
2458         if (err)
2459                 goto out_err;
2460         /* do the final flush for ordered samples */
2461         err = ordered_events__flush(oe, OE_FLUSH__FINAL);
2462         if (err)
2463                 goto out_err;
2464         err = auxtrace__flush_events(session, tool);
2465         if (err)
2466                 goto out_err;
2467         err = perf_session__flush_thread_stacks(session);
2468 out_err:
2469         ui_progress__finish();
2470         if (!tool->no_warn)
2471                 perf_session__warn_about_errors(session);
2472         /*
2473          * We may switching perf.data output, make ordered_events
2474          * reusable.
2475          */
2476         ordered_events__reinit(&session->ordered_events);
2477         auxtrace__free_events(session);
2478         reader__release_decomp(&rd);
2479         session->one_mmap = false;
2480         return err;
2481 }
2482
2483 /*
2484  * Processing 2 MB of data from each reader in sequence,
2485  * because that's the way the ordered events sorting works
2486  * most efficiently.
2487  */
2488 #define READER_MAX_SIZE (2 * 1024 * 1024)
2489
2490 /*
2491  * This function reads, merge and process directory data.
2492  * It assumens the version 1 of directory data, where each
2493  * data file holds per-cpu data, already sorted by kernel.
2494  */
2495 static int __perf_session__process_dir_events(struct perf_session *session)
2496 {
2497         struct perf_data *data = session->data;
2498         struct perf_tool *tool = session->tool;
2499         int i, ret, readers, nr_readers;
2500         struct ui_progress prog;
2501         u64 total_size = perf_data__size(session->data);
2502         struct reader *rd;
2503
2504         perf_tool__fill_defaults(tool);
2505
2506         ui_progress__init_size(&prog, total_size, "Sorting events...");
2507
2508         nr_readers = 1;
2509         for (i = 0; i < data->dir.nr; i++) {
2510                 if (data->dir.files[i].size)
2511                         nr_readers++;
2512         }
2513
2514         rd = zalloc(nr_readers * sizeof(struct reader));
2515         if (!rd)
2516                 return -ENOMEM;
2517
2518         rd[0] = (struct reader) {
2519                 .fd              = perf_data__fd(session->data),
2520                 .path            = session->data->file.path,
2521                 .data_size       = session->header.data_size,
2522                 .data_offset     = session->header.data_offset,
2523                 .process         = process_simple,
2524                 .in_place_update = session->data->in_place_update,
2525         };
2526         ret = reader__init(&rd[0], NULL);
2527         if (ret)
2528                 goto out_err;
2529         ret = reader__mmap(&rd[0], session);
2530         if (ret)
2531                 goto out_err;
2532         readers = 1;
2533
2534         for (i = 0; i < data->dir.nr; i++) {
2535                 if (!data->dir.files[i].size)
2536                         continue;
2537                 rd[readers] = (struct reader) {
2538                         .fd              = data->dir.files[i].fd,
2539                         .path            = data->dir.files[i].path,
2540                         .data_size       = data->dir.files[i].size,
2541                         .data_offset     = 0,
2542                         .process         = process_simple,
2543                         .in_place_update = session->data->in_place_update,
2544                 };
2545                 ret = reader__init(&rd[readers], NULL);
2546                 if (ret)
2547                         goto out_err;
2548                 ret = reader__mmap(&rd[readers], session);
2549                 if (ret)
2550                         goto out_err;
2551                 readers++;
2552         }
2553
2554         i = 0;
2555         while (readers) {
2556                 if (session_done())
2557                         break;
2558
2559                 if (rd[i].done) {
2560                         i = (i + 1) % nr_readers;
2561                         continue;
2562                 }
2563                 if (reader__eof(&rd[i])) {
2564                         rd[i].done = true;
2565                         readers--;
2566                         continue;
2567                 }
2568
2569                 session->active_decomp = &rd[i].decomp_data;
2570                 ret = reader__read_event(&rd[i], session, &prog);
2571                 if (ret < 0) {
2572                         goto out_err;
2573                 } else if (ret == READER_NODATA) {
2574                         ret = reader__mmap(&rd[i], session);
2575                         if (ret)
2576                                 goto out_err;
2577                 }
2578
2579                 if (rd[i].size >= READER_MAX_SIZE) {
2580                         rd[i].size = 0;
2581                         i = (i + 1) % nr_readers;
2582                 }
2583         }
2584
2585         ret = ordered_events__flush(&session->ordered_events, OE_FLUSH__FINAL);
2586         if (ret)
2587                 goto out_err;
2588
2589         ret = perf_session__flush_thread_stacks(session);
2590 out_err:
2591         ui_progress__finish();
2592
2593         if (!tool->no_warn)
2594                 perf_session__warn_about_errors(session);
2595
2596         /*
2597          * We may switching perf.data output, make ordered_events
2598          * reusable.
2599          */
2600         ordered_events__reinit(&session->ordered_events);
2601
2602         session->one_mmap = false;
2603
2604         session->active_decomp = &session->decomp_data;
2605         for (i = 0; i < nr_readers; i++)
2606                 reader__release_decomp(&rd[i]);
2607         zfree(&rd);
2608
2609         return ret;
2610 }
2611
2612 int perf_session__process_events(struct perf_session *session)
2613 {
2614         if (perf_session__register_idle_thread(session) < 0)
2615                 return -ENOMEM;
2616
2617         if (perf_data__is_pipe(session->data))
2618                 return __perf_session__process_pipe_events(session);
2619
2620         if (perf_data__is_dir(session->data) && session->data->dir.nr)
2621                 return __perf_session__process_dir_events(session);
2622
2623         return __perf_session__process_events(session);
2624 }
2625
2626 bool perf_session__has_traces(struct perf_session *session, const char *msg)
2627 {
2628         struct evsel *evsel;
2629
2630         evlist__for_each_entry(session->evlist, evsel) {
2631                 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT)
2632                         return true;
2633         }
2634
2635         pr_err("No trace sample to read. Did you call 'perf %s'?\n", msg);
2636         return false;
2637 }
2638
2639 int map__set_kallsyms_ref_reloc_sym(struct map *map, const char *symbol_name, u64 addr)
2640 {
2641         char *bracket;
2642         struct ref_reloc_sym *ref;
2643         struct kmap *kmap;
2644
2645         ref = zalloc(sizeof(struct ref_reloc_sym));
2646         if (ref == NULL)
2647                 return -ENOMEM;
2648
2649         ref->name = strdup(symbol_name);
2650         if (ref->name == NULL) {
2651                 free(ref);
2652                 return -ENOMEM;
2653         }
2654
2655         bracket = strchr(ref->name, ']');
2656         if (bracket)
2657                 *bracket = '\0';
2658
2659         ref->addr = addr;
2660
2661         kmap = map__kmap(map);
2662         if (kmap)
2663                 kmap->ref_reloc_sym = ref;
2664
2665         return 0;
2666 }
2667
2668 size_t perf_session__fprintf_dsos(struct perf_session *session, FILE *fp)
2669 {
2670         return machines__fprintf_dsos(&session->machines, fp);
2671 }
2672
2673 size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp,
2674                                           bool (skip)(struct dso *dso, int parm), int parm)
2675 {
2676         return machines__fprintf_dsos_buildid(&session->machines, fp, skip, parm);
2677 }
2678
2679 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp,
2680                                        bool skip_empty)
2681 {
2682         size_t ret;
2683         const char *msg = "";
2684
2685         if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
2686                 msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
2687
2688         ret = fprintf(fp, "\nAggregated stats:%s\n", msg);
2689
2690         ret += events_stats__fprintf(&session->evlist->stats, fp, skip_empty);
2691         return ret;
2692 }
2693
2694 size_t perf_session__fprintf(struct perf_session *session, FILE *fp)
2695 {
2696         /*
2697          * FIXME: Here we have to actually print all the machines in this
2698          * session, not just the host...
2699          */
2700         return machine__fprintf(&session->machines.host, fp);
2701 }
2702
2703 struct evsel *perf_session__find_first_evtype(struct perf_session *session,
2704                                               unsigned int type)
2705 {
2706         struct evsel *pos;
2707
2708         evlist__for_each_entry(session->evlist, pos) {
2709                 if (pos->core.attr.type == type)
2710                         return pos;
2711         }
2712         return NULL;
2713 }
2714
2715 int perf_session__cpu_bitmap(struct perf_session *session,
2716                              const char *cpu_list, unsigned long *cpu_bitmap)
2717 {
2718         int i, err = -1;
2719         struct perf_cpu_map *map;
2720         int nr_cpus = min(session->header.env.nr_cpus_avail, MAX_NR_CPUS);
2721
2722         for (i = 0; i < PERF_TYPE_MAX; ++i) {
2723                 struct evsel *evsel;
2724
2725                 evsel = perf_session__find_first_evtype(session, i);
2726                 if (!evsel)
2727                         continue;
2728
2729                 if (!(evsel->core.attr.sample_type & PERF_SAMPLE_CPU)) {
2730                         pr_err("File does not contain CPU events. "
2731                                "Remove -C option to proceed.\n");
2732                         return -1;
2733                 }
2734         }
2735
2736         map = perf_cpu_map__new(cpu_list);
2737         if (map == NULL) {
2738                 pr_err("Invalid cpu_list\n");
2739                 return -1;
2740         }
2741
2742         for (i = 0; i < perf_cpu_map__nr(map); i++) {
2743                 struct perf_cpu cpu = perf_cpu_map__cpu(map, i);
2744
2745                 if (cpu.cpu >= nr_cpus) {
2746                         pr_err("Requested CPU %d too large. "
2747                                "Consider raising MAX_NR_CPUS\n", cpu.cpu);
2748                         goto out_delete_map;
2749                 }
2750
2751                 set_bit(cpu.cpu, cpu_bitmap);
2752         }
2753
2754         err = 0;
2755
2756 out_delete_map:
2757         perf_cpu_map__put(map);
2758         return err;
2759 }
2760
2761 void perf_session__fprintf_info(struct perf_session *session, FILE *fp,
2762                                 bool full)
2763 {
2764         if (session == NULL || fp == NULL)
2765                 return;
2766
2767         fprintf(fp, "# ========\n");
2768         perf_header__fprintf_info(session, fp, full);
2769         fprintf(fp, "# ========\n#\n");
2770 }
2771
2772 static int perf_session__register_guest(struct perf_session *session, pid_t machine_pid)
2773 {
2774         struct machine *machine = machines__findnew(&session->machines, machine_pid);
2775         struct thread *thread;
2776
2777         if (!machine)
2778                 return -ENOMEM;
2779
2780         machine->single_address_space = session->machines.host.single_address_space;
2781
2782         thread = machine__idle_thread(machine);
2783         if (!thread)
2784                 return -ENOMEM;
2785         thread__put(thread);
2786
2787         machine->kallsyms_filename = perf_data__guest_kallsyms_name(session->data, machine_pid);
2788
2789         return 0;
2790 }
2791
2792 static int perf_session__set_guest_cpu(struct perf_session *session, pid_t pid,
2793                                        pid_t tid, int guest_cpu)
2794 {
2795         struct machine *machine = &session->machines.host;
2796         struct thread *thread = machine__findnew_thread(machine, pid, tid);
2797
2798         if (!thread)
2799                 return -ENOMEM;
2800         thread->guest_cpu = guest_cpu;
2801         thread__put(thread);
2802
2803         return 0;
2804 }
2805
2806 int perf_event__process_id_index(struct perf_session *session,
2807                                  union perf_event *event)
2808 {
2809         struct evlist *evlist = session->evlist;
2810         struct perf_record_id_index *ie = &event->id_index;
2811         size_t sz = ie->header.size - sizeof(*ie);
2812         size_t i, nr, max_nr;
2813         size_t e1_sz = sizeof(struct id_index_entry);
2814         size_t e2_sz = sizeof(struct id_index_entry_2);
2815         size_t etot_sz = e1_sz + e2_sz;
2816         struct id_index_entry_2 *e2;
2817         pid_t last_pid = 0;
2818
2819         max_nr = sz / e1_sz;
2820         nr = ie->nr;
2821         if (nr > max_nr) {
2822                 printf("Too big: nr %zu max_nr %zu\n", nr, max_nr);
2823                 return -EINVAL;
2824         }
2825
2826         if (sz >= nr * etot_sz) {
2827                 max_nr = sz / etot_sz;
2828                 if (nr > max_nr) {
2829                         printf("Too big2: nr %zu max_nr %zu\n", nr, max_nr);
2830                         return -EINVAL;
2831                 }
2832                 e2 = (void *)ie + sizeof(*ie) + nr * e1_sz;
2833         } else {
2834                 e2 = NULL;
2835         }
2836
2837         if (dump_trace)
2838                 fprintf(stdout, " nr: %zu\n", nr);
2839
2840         for (i = 0; i < nr; i++, (e2 ? e2++ : 0)) {
2841                 struct id_index_entry *e = &ie->entries[i];
2842                 struct perf_sample_id *sid;
2843                 int ret;
2844
2845                 if (dump_trace) {
2846                         fprintf(stdout, " ... id: %"PRI_lu64, e->id);
2847                         fprintf(stdout, "  idx: %"PRI_lu64, e->idx);
2848                         fprintf(stdout, "  cpu: %"PRI_ld64, e->cpu);
2849                         fprintf(stdout, "  tid: %"PRI_ld64, e->tid);
2850                         if (e2) {
2851                                 fprintf(stdout, "  machine_pid: %"PRI_ld64, e2->machine_pid);
2852                                 fprintf(stdout, "  vcpu: %"PRI_lu64"\n", e2->vcpu);
2853                         } else {
2854                                 fprintf(stdout, "\n");
2855                         }
2856                 }
2857
2858                 sid = evlist__id2sid(evlist, e->id);
2859                 if (!sid)
2860                         return -ENOENT;
2861
2862                 sid->idx = e->idx;
2863                 sid->cpu.cpu = e->cpu;
2864                 sid->tid = e->tid;
2865
2866                 if (!e2)
2867                         continue;
2868
2869                 sid->machine_pid = e2->machine_pid;
2870                 sid->vcpu.cpu = e2->vcpu;
2871
2872                 if (!sid->machine_pid)
2873                         continue;
2874
2875                 if (sid->machine_pid != last_pid) {
2876                         ret = perf_session__register_guest(session, sid->machine_pid);
2877                         if (ret)
2878                                 return ret;
2879                         last_pid = sid->machine_pid;
2880                         perf_guest = true;
2881                 }
2882
2883                 ret = perf_session__set_guest_cpu(session, sid->machine_pid, e->tid, e2->vcpu);
2884                 if (ret)
2885                         return ret;
2886         }
2887         return 0;
2888 }
This page took 0.206534 seconds and 4 git commands to generate.