1 // SPDX-License-Identifier: GPL-2.0
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
11 #include "util/build-id.h"
12 #include <subcmd/parse-options.h>
13 #include <internal/xyarray.h>
14 #include "util/parse-events.h"
15 #include "util/config.h"
17 #include "util/callchain.h"
18 #include "util/cgroup.h"
19 #include "util/header.h"
20 #include "util/event.h"
21 #include "util/evlist.h"
22 #include "util/evsel.h"
23 #include "util/debug.h"
24 #include "util/mmap.h"
25 #include "util/mutex.h"
26 #include "util/target.h"
27 #include "util/session.h"
28 #include "util/tool.h"
29 #include "util/symbol.h"
30 #include "util/record.h"
31 #include "util/cpumap.h"
32 #include "util/thread_map.h"
33 #include "util/data.h"
34 #include "util/perf_regs.h"
35 #include "util/auxtrace.h"
37 #include "util/parse-branch-options.h"
38 #include "util/parse-regs-options.h"
39 #include "util/perf_api_probe.h"
40 #include "util/trigger.h"
41 #include "util/perf-hooks.h"
42 #include "util/cpu-set-sched.h"
43 #include "util/synthetic-events.h"
44 #include "util/time-utils.h"
45 #include "util/units.h"
46 #include "util/bpf-event.h"
47 #include "util/util.h"
50 #include "util/pmus.h"
51 #include "util/clockid.h"
52 #include "util/off_cpu.h"
53 #include "util/bpf-filter.h"
69 #ifdef HAVE_EVENTFD_SUPPORT
70 #include <sys/eventfd.h>
74 #include <sys/types.h>
77 #include <linux/err.h>
78 #include <linux/string.h>
79 #include <linux/time64.h>
80 #include <linux/zalloc.h>
81 #include <linux/bitmap.h>
84 struct switch_output {
97 struct mmap_cpu_mask maps;
98 struct mmap_cpu_mask affinity;
101 struct record_thread {
103 struct thread_mask *mask;
108 struct fdarray pollfd;
112 struct mmap **overwrite_maps;
114 unsigned long long samples;
115 unsigned long waking;
117 u64 bytes_transferred;
118 u64 bytes_compressed;
121 static __thread struct record_thread *thread;
124 THREAD_MSG__UNDEFINED = 0,
129 static const char *thread_msg_tags[THREAD_MSG__MAX] = {
134 THREAD_SPEC__UNDEFINED = 0,
137 THREAD_SPEC__PACKAGE,
143 static const char *thread_spec_tags[THREAD_SPEC__MAX] = {
144 "undefined", "cpu", "core", "package", "numa", "user"
147 struct pollfd_index_map {
148 int evlist_pollfd_index;
149 int thread_pollfd_index;
153 struct perf_tool tool;
154 struct record_opts opts;
156 u64 thread_bytes_written;
157 struct perf_data data;
158 struct auxtrace_record *itr;
159 struct evlist *evlist;
160 struct perf_session *session;
161 struct evlist *sb_evlist;
164 bool switch_output_event_set;
167 bool no_buildid_cache;
168 bool no_buildid_cache_set;
171 bool timestamp_filename;
172 bool timestamp_boundary;
174 struct switch_output switch_output;
175 unsigned long long samples;
176 unsigned long output_max_size; /* = 0: unlimited */
177 struct perf_debuginfod debuginfod;
179 struct thread_mask *thread_masks;
180 struct record_thread *thread_data;
181 struct pollfd_index_map *index_map;
183 size_t index_map_cnt;
186 static volatile int done;
188 static volatile int auxtrace_record__snapshot_started;
189 static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
190 static DEFINE_TRIGGER(switch_output_trigger);
192 static const char *affinity_tags[PERF_AFFINITY_MAX] = {
197 static inline pid_t gettid(void)
199 return (pid_t)syscall(__NR_gettid);
203 static int record__threads_enabled(struct record *rec)
205 return rec->opts.threads_spec;
208 static bool switch_output_signal(struct record *rec)
210 return rec->switch_output.signal &&
211 trigger_is_ready(&switch_output_trigger);
214 static bool switch_output_size(struct record *rec)
216 return rec->switch_output.size &&
217 trigger_is_ready(&switch_output_trigger) &&
218 (rec->bytes_written >= rec->switch_output.size);
221 static bool switch_output_time(struct record *rec)
223 return rec->switch_output.time &&
224 trigger_is_ready(&switch_output_trigger);
227 static u64 record__bytes_written(struct record *rec)
229 return rec->bytes_written + rec->thread_bytes_written;
232 static bool record__output_max_size_exceeded(struct record *rec)
234 return rec->output_max_size &&
235 (record__bytes_written(rec) >= rec->output_max_size);
238 static int record__write(struct record *rec, struct mmap *map __maybe_unused,
239 void *bf, size_t size)
241 struct perf_data_file *file = &rec->session->data->file;
243 if (map && map->file)
246 if (perf_data_file__write(file, bf, size) < 0) {
247 pr_err("failed to write perf data, error: %m\n");
251 if (map && map->file) {
252 thread->bytes_written += size;
253 rec->thread_bytes_written += size;
255 rec->bytes_written += size;
258 if (record__output_max_size_exceeded(rec) && !done) {
259 fprintf(stderr, "[ perf record: perf size limit reached (%" PRIu64 " KB),"
260 " stopping session ]\n",
261 record__bytes_written(rec) >> 10);
265 if (switch_output_size(rec))
266 trigger_hit(&switch_output_trigger);
271 static int record__aio_enabled(struct record *rec);
272 static int record__comp_enabled(struct record *rec);
273 static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
274 void *dst, size_t dst_size, void *src, size_t src_size);
276 #ifdef HAVE_AIO_SUPPORT
277 static int record__aio_write(struct aiocb *cblock, int trace_fd,
278 void *buf, size_t size, off_t off)
282 cblock->aio_fildes = trace_fd;
283 cblock->aio_buf = buf;
284 cblock->aio_nbytes = size;
285 cblock->aio_offset = off;
286 cblock->aio_sigevent.sigev_notify = SIGEV_NONE;
289 rc = aio_write(cblock);
292 } else if (errno != EAGAIN) {
293 cblock->aio_fildes = -1;
294 pr_err("failed to queue perf data, error: %m\n");
302 static int record__aio_complete(struct mmap *md, struct aiocb *cblock)
308 ssize_t aio_ret, written;
310 aio_errno = aio_error(cblock);
311 if (aio_errno == EINPROGRESS)
314 written = aio_ret = aio_return(cblock);
316 if (aio_errno != EINTR)
317 pr_err("failed to write perf data, error: %m\n");
321 rem_size = cblock->aio_nbytes - written;
324 cblock->aio_fildes = -1;
326 * md->refcount is incremented in record__aio_pushfn() for
327 * every aio write request started in record__aio_push() so
328 * decrement it because the request is now complete.
330 perf_mmap__put(&md->core);
334 * aio write request may require restart with the
335 * remainder if the kernel didn't write whole
338 rem_off = cblock->aio_offset + written;
339 rem_buf = (void *)(cblock->aio_buf + written);
340 record__aio_write(cblock, cblock->aio_fildes,
341 rem_buf, rem_size, rem_off);
348 static int record__aio_sync(struct mmap *md, bool sync_all)
350 struct aiocb **aiocb = md->aio.aiocb;
351 struct aiocb *cblocks = md->aio.cblocks;
352 struct timespec timeout = { 0, 1000 * 1000 * 1 }; /* 1ms */
357 for (i = 0; i < md->aio.nr_cblocks; ++i) {
358 if (cblocks[i].aio_fildes == -1 || record__aio_complete(md, &cblocks[i])) {
365 * Started aio write is not complete yet
366 * so it has to be waited before the
369 aiocb[i] = &cblocks[i];
376 while (aio_suspend((const struct aiocb **)aiocb, md->aio.nr_cblocks, &timeout)) {
377 if (!(errno == EAGAIN || errno == EINTR))
378 pr_err("failed to sync perf data, error: %m\n");
389 static int record__aio_pushfn(struct mmap *map, void *to, void *buf, size_t size)
391 struct record_aio *aio = to;
394 * map->core.base data pointed by buf is copied into free map->aio.data[] buffer
395 * to release space in the kernel buffer as fast as possible, calling
396 * perf_mmap__consume() from perf_mmap__push() function.
398 * That lets the kernel to proceed with storing more profiling data into
399 * the kernel buffer earlier than other per-cpu kernel buffers are handled.
401 * Coping can be done in two steps in case the chunk of profiling data
402 * crosses the upper bound of the kernel buffer. In this case we first move
403 * part of data from map->start till the upper bound and then the remainder
404 * from the beginning of the kernel buffer till the end of the data chunk.
407 if (record__comp_enabled(aio->rec)) {
408 ssize_t compressed = zstd_compress(aio->rec->session, NULL, aio->data + aio->size,
409 mmap__mmap_len(map) - aio->size,
412 return (int)compressed;
416 memcpy(aio->data + aio->size, buf, size);
421 * Increment map->refcount to guard map->aio.data[] buffer
422 * from premature deallocation because map object can be
423 * released earlier than aio write request started on
424 * map->aio.data[] buffer is complete.
426 * perf_mmap__put() is done at record__aio_complete()
427 * after started aio request completion or at record__aio_push()
428 * if the request failed to start.
430 perf_mmap__get(&map->core);
438 static int record__aio_push(struct record *rec, struct mmap *map, off_t *off)
441 int trace_fd = rec->session->data->file.fd;
442 struct record_aio aio = { .rec = rec, .size = 0 };
445 * Call record__aio_sync() to wait till map->aio.data[] buffer
446 * becomes available after previous aio write operation.
449 idx = record__aio_sync(map, false);
450 aio.data = map->aio.data[idx];
451 ret = perf_mmap__push(map, &aio, record__aio_pushfn);
452 if (ret != 0) /* ret > 0 - no data, ret < 0 - error */
456 ret = record__aio_write(&(map->aio.cblocks[idx]), trace_fd, aio.data, aio.size, *off);
459 rec->bytes_written += aio.size;
460 if (switch_output_size(rec))
461 trigger_hit(&switch_output_trigger);
464 * Decrement map->refcount incremented in record__aio_pushfn()
465 * back if record__aio_write() operation failed to start, otherwise
466 * map->refcount is decremented in record__aio_complete() after
467 * aio write operation finishes successfully.
469 perf_mmap__put(&map->core);
475 static off_t record__aio_get_pos(int trace_fd)
477 return lseek(trace_fd, 0, SEEK_CUR);
480 static void record__aio_set_pos(int trace_fd, off_t pos)
482 lseek(trace_fd, pos, SEEK_SET);
485 static void record__aio_mmap_read_sync(struct record *rec)
488 struct evlist *evlist = rec->evlist;
489 struct mmap *maps = evlist->mmap;
491 if (!record__aio_enabled(rec))
494 for (i = 0; i < evlist->core.nr_mmaps; i++) {
495 struct mmap *map = &maps[i];
498 record__aio_sync(map, true);
502 static int nr_cblocks_default = 1;
503 static int nr_cblocks_max = 4;
505 static int record__aio_parse(const struct option *opt,
509 struct record_opts *opts = (struct record_opts *)opt->value;
512 opts->nr_cblocks = 0;
515 opts->nr_cblocks = strtol(str, NULL, 0);
516 if (!opts->nr_cblocks)
517 opts->nr_cblocks = nr_cblocks_default;
522 #else /* HAVE_AIO_SUPPORT */
523 static int nr_cblocks_max = 0;
525 static int record__aio_push(struct record *rec __maybe_unused, struct mmap *map __maybe_unused,
526 off_t *off __maybe_unused)
531 static off_t record__aio_get_pos(int trace_fd __maybe_unused)
536 static void record__aio_set_pos(int trace_fd __maybe_unused, off_t pos __maybe_unused)
540 static void record__aio_mmap_read_sync(struct record *rec __maybe_unused)
545 static int record__aio_enabled(struct record *rec)
547 return rec->opts.nr_cblocks > 0;
550 #define MMAP_FLUSH_DEFAULT 1
551 static int record__mmap_flush_parse(const struct option *opt,
556 struct record_opts *opts = (struct record_opts *)opt->value;
557 static struct parse_tag tags[] = {
558 { .tag = 'B', .mult = 1 },
559 { .tag = 'K', .mult = 1 << 10 },
560 { .tag = 'M', .mult = 1 << 20 },
561 { .tag = 'G', .mult = 1 << 30 },
569 opts->mmap_flush = parse_tag_value(str, tags);
570 if (opts->mmap_flush == (int)-1)
571 opts->mmap_flush = strtol(str, NULL, 0);
574 if (!opts->mmap_flush)
575 opts->mmap_flush = MMAP_FLUSH_DEFAULT;
577 flush_max = evlist__mmap_size(opts->mmap_pages);
579 if (opts->mmap_flush > flush_max)
580 opts->mmap_flush = flush_max;
585 #ifdef HAVE_ZSTD_SUPPORT
586 static unsigned int comp_level_default = 1;
588 static int record__parse_comp_level(const struct option *opt, const char *str, int unset)
590 struct record_opts *opts = opt->value;
593 opts->comp_level = 0;
596 opts->comp_level = strtol(str, NULL, 0);
597 if (!opts->comp_level)
598 opts->comp_level = comp_level_default;
604 static unsigned int comp_level_max = 22;
606 static int record__comp_enabled(struct record *rec)
608 return rec->opts.comp_level > 0;
611 static int process_synthesized_event(struct perf_tool *tool,
612 union perf_event *event,
613 struct perf_sample *sample __maybe_unused,
614 struct machine *machine __maybe_unused)
616 struct record *rec = container_of(tool, struct record, tool);
617 return record__write(rec, NULL, event, event->header.size);
620 static struct mutex synth_lock;
622 static int process_locked_synthesized_event(struct perf_tool *tool,
623 union perf_event *event,
624 struct perf_sample *sample __maybe_unused,
625 struct machine *machine __maybe_unused)
629 mutex_lock(&synth_lock);
630 ret = process_synthesized_event(tool, event, sample, machine);
631 mutex_unlock(&synth_lock);
635 static int record__pushfn(struct mmap *map, void *to, void *bf, size_t size)
637 struct record *rec = to;
639 if (record__comp_enabled(rec)) {
640 ssize_t compressed = zstd_compress(rec->session, map, map->data,
641 mmap__mmap_len(map), bf, size);
644 return (int)compressed;
651 return record__write(rec, map, bf, size);
654 static volatile sig_atomic_t signr = -1;
655 static volatile sig_atomic_t child_finished;
656 #ifdef HAVE_EVENTFD_SUPPORT
657 static volatile sig_atomic_t done_fd = -1;
660 static void sig_handler(int sig)
668 #ifdef HAVE_EVENTFD_SUPPORT
671 int orig_errno = errno;
674 * It is possible for this signal handler to run after done is
675 * checked in the main loop, but before the perf counter fds are
676 * polled. If this happens, the poll() will continue to wait
677 * even though done is set, and will only break out if either
678 * another signal is received, or the counters are ready for
679 * read. To ensure the poll() doesn't sleep when done is set,
680 * use an eventfd (done_fd) to wake up the poll().
682 if (write(done_fd, &tmp, sizeof(tmp)) < 0)
683 pr_err("failed to signal wakeup fd, error: %m\n");
687 #endif // HAVE_EVENTFD_SUPPORT
690 static void sigsegv_handler(int sig)
692 perf_hooks__recover();
693 sighandler_dump_stack(sig);
696 static void record__sig_exit(void)
701 signal(signr, SIG_DFL);
705 #ifdef HAVE_AUXTRACE_SUPPORT
707 static int record__process_auxtrace(struct perf_tool *tool,
709 union perf_event *event, void *data1,
710 size_t len1, void *data2, size_t len2)
712 struct record *rec = container_of(tool, struct record, tool);
713 struct perf_data *data = &rec->data;
717 if (!perf_data__is_pipe(data) && perf_data__is_single_file(data)) {
719 int fd = perf_data__fd(data);
722 file_offset = lseek(fd, 0, SEEK_CUR);
723 if (file_offset == -1)
725 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
731 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
732 padding = (len1 + len2) & 7;
734 padding = 8 - padding;
736 record__write(rec, map, event, event->header.size);
737 record__write(rec, map, data1, len1);
739 record__write(rec, map, data2, len2);
740 record__write(rec, map, &pad, padding);
745 static int record__auxtrace_mmap_read(struct record *rec,
750 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
751 record__process_auxtrace);
761 static int record__auxtrace_mmap_read_snapshot(struct record *rec,
766 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
767 record__process_auxtrace,
768 rec->opts.auxtrace_snapshot_size);
778 static int record__auxtrace_read_snapshot_all(struct record *rec)
783 for (i = 0; i < rec->evlist->core.nr_mmaps; i++) {
784 struct mmap *map = &rec->evlist->mmap[i];
786 if (!map->auxtrace_mmap.base)
789 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
798 static void record__read_auxtrace_snapshot(struct record *rec, bool on_exit)
800 pr_debug("Recording AUX area tracing snapshot\n");
801 if (record__auxtrace_read_snapshot_all(rec) < 0) {
802 trigger_error(&auxtrace_snapshot_trigger);
804 if (auxtrace_record__snapshot_finish(rec->itr, on_exit))
805 trigger_error(&auxtrace_snapshot_trigger);
807 trigger_ready(&auxtrace_snapshot_trigger);
811 static int record__auxtrace_snapshot_exit(struct record *rec)
813 if (trigger_is_error(&auxtrace_snapshot_trigger))
816 if (!auxtrace_record__snapshot_started &&
817 auxtrace_record__snapshot_start(rec->itr))
820 record__read_auxtrace_snapshot(rec, true);
821 if (trigger_is_error(&auxtrace_snapshot_trigger))
827 static int record__auxtrace_init(struct record *rec)
831 if ((rec->opts.auxtrace_snapshot_opts || rec->opts.auxtrace_sample_opts)
832 && record__threads_enabled(rec)) {
833 pr_err("AUX area tracing options are not available in parallel streaming mode.\n");
838 rec->itr = auxtrace_record__init(rec->evlist, &err);
843 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
844 rec->opts.auxtrace_snapshot_opts);
848 err = auxtrace_parse_sample_options(rec->itr, rec->evlist, &rec->opts,
849 rec->opts.auxtrace_sample_opts);
853 auxtrace_regroup_aux_output(rec->evlist);
855 return auxtrace_parse_filters(rec->evlist);
861 int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
862 struct mmap *map __maybe_unused)
868 void record__read_auxtrace_snapshot(struct record *rec __maybe_unused,
869 bool on_exit __maybe_unused)
874 int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
880 int record__auxtrace_snapshot_exit(struct record *rec __maybe_unused)
885 static int record__auxtrace_init(struct record *rec __maybe_unused)
892 static int record__config_text_poke(struct evlist *evlist)
896 /* Nothing to do if text poke is already configured */
897 evlist__for_each_entry(evlist, evsel) {
898 if (evsel->core.attr.text_poke)
902 evsel = evlist__add_dummy_on_all_cpus(evlist);
906 evsel->core.attr.text_poke = 1;
907 evsel->core.attr.ksymbol = 1;
908 evsel->immediate = true;
909 evsel__set_sample_bit(evsel, TIME);
914 static int record__config_off_cpu(struct record *rec)
916 return off_cpu_prepare(rec->evlist, &rec->opts.target, &rec->opts);
919 static bool record__tracking_system_wide(struct record *rec)
921 struct evlist *evlist = rec->evlist;
925 * If non-dummy evsel exists, system_wide sideband is need to
926 * help parse sample information.
927 * For example, PERF_EVENT_MMAP event to help parse symbol,
928 * and PERF_EVENT_COMM event to help parse task executable name.
930 evlist__for_each_entry(evlist, evsel) {
931 if (!evsel__is_dummy_event(evsel))
938 static int record__config_tracking_events(struct record *rec)
940 struct record_opts *opts = &rec->opts;
941 struct evlist *evlist = rec->evlist;
942 bool system_wide = false;
946 * For initial_delay, system wide or a hybrid system, we need to add
947 * tracking event so that we can track PERF_RECORD_MMAP to cover the
948 * delay of waiting or event synthesis.
950 if (opts->target.initial_delay || target__has_cpu(&opts->target) ||
951 perf_pmus__num_core_pmus() > 1) {
954 * User space tasks can migrate between CPUs, so when tracing
955 * selected CPUs, sideband for all CPUs is still needed.
957 if (!!opts->target.cpu_list && record__tracking_system_wide(rec))
960 evsel = evlist__findnew_tracking_event(evlist, system_wide);
965 * Enable the tracking event when the process is forked for
966 * initial_delay, immediately for system wide.
968 if (opts->target.initial_delay && !evsel->immediate &&
969 !target__has_cpu(&opts->target))
970 evsel->core.attr.enable_on_exec = 1;
972 evsel->immediate = 1;
978 static bool record__kcore_readable(struct machine *machine)
980 char kcore[PATH_MAX];
983 scnprintf(kcore, sizeof(kcore), "%s/proc/kcore", machine->root_dir);
985 fd = open(kcore, O_RDONLY);
994 static int record__kcore_copy(struct machine *machine, struct perf_data *data)
996 char from_dir[PATH_MAX];
997 char kcore_dir[PATH_MAX];
1000 snprintf(from_dir, sizeof(from_dir), "%s/proc", machine->root_dir);
1002 ret = perf_data__make_kcore_dir(data, kcore_dir, sizeof(kcore_dir));
1006 return kcore_copy(from_dir, kcore_dir);
1009 static void record__thread_data_init_pipes(struct record_thread *thread_data)
1011 thread_data->pipes.msg[0] = -1;
1012 thread_data->pipes.msg[1] = -1;
1013 thread_data->pipes.ack[0] = -1;
1014 thread_data->pipes.ack[1] = -1;
1017 static int record__thread_data_open_pipes(struct record_thread *thread_data)
1019 if (pipe(thread_data->pipes.msg))
1022 if (pipe(thread_data->pipes.ack)) {
1023 close(thread_data->pipes.msg[0]);
1024 thread_data->pipes.msg[0] = -1;
1025 close(thread_data->pipes.msg[1]);
1026 thread_data->pipes.msg[1] = -1;
1030 pr_debug2("thread_data[%p]: msg=[%d,%d], ack=[%d,%d]\n", thread_data,
1031 thread_data->pipes.msg[0], thread_data->pipes.msg[1],
1032 thread_data->pipes.ack[0], thread_data->pipes.ack[1]);
1037 static void record__thread_data_close_pipes(struct record_thread *thread_data)
1039 if (thread_data->pipes.msg[0] != -1) {
1040 close(thread_data->pipes.msg[0]);
1041 thread_data->pipes.msg[0] = -1;
1043 if (thread_data->pipes.msg[1] != -1) {
1044 close(thread_data->pipes.msg[1]);
1045 thread_data->pipes.msg[1] = -1;
1047 if (thread_data->pipes.ack[0] != -1) {
1048 close(thread_data->pipes.ack[0]);
1049 thread_data->pipes.ack[0] = -1;
1051 if (thread_data->pipes.ack[1] != -1) {
1052 close(thread_data->pipes.ack[1]);
1053 thread_data->pipes.ack[1] = -1;
1057 static bool evlist__per_thread(struct evlist *evlist)
1059 return cpu_map__is_dummy(evlist->core.user_requested_cpus);
1062 static int record__thread_data_init_maps(struct record_thread *thread_data, struct evlist *evlist)
1064 int m, tm, nr_mmaps = evlist->core.nr_mmaps;
1065 struct mmap *mmap = evlist->mmap;
1066 struct mmap *overwrite_mmap = evlist->overwrite_mmap;
1067 struct perf_cpu_map *cpus = evlist->core.all_cpus;
1068 bool per_thread = evlist__per_thread(evlist);
1071 thread_data->nr_mmaps = nr_mmaps;
1073 thread_data->nr_mmaps = bitmap_weight(thread_data->mask->maps.bits,
1074 thread_data->mask->maps.nbits);
1076 thread_data->maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
1077 if (!thread_data->maps)
1080 if (overwrite_mmap) {
1081 thread_data->overwrite_maps = zalloc(thread_data->nr_mmaps * sizeof(struct mmap *));
1082 if (!thread_data->overwrite_maps) {
1083 zfree(&thread_data->maps);
1087 pr_debug2("thread_data[%p]: nr_mmaps=%d, maps=%p, ow_maps=%p\n", thread_data,
1088 thread_data->nr_mmaps, thread_data->maps, thread_data->overwrite_maps);
1090 for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
1092 test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
1093 if (thread_data->maps) {
1094 thread_data->maps[tm] = &mmap[m];
1095 pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
1096 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
1098 if (thread_data->overwrite_maps) {
1099 thread_data->overwrite_maps[tm] = &overwrite_mmap[m];
1100 pr_debug2("thread_data[%p]: cpu%d: ow_maps[%d] -> ow_mmap[%d]\n",
1101 thread_data, perf_cpu_map__cpu(cpus, m).cpu, tm, m);
1110 static int record__thread_data_init_pollfd(struct record_thread *thread_data, struct evlist *evlist)
1113 struct mmap *map, *overwrite_map;
1115 fdarray__init(&thread_data->pollfd, 64);
1117 for (tm = 0; tm < thread_data->nr_mmaps; tm++) {
1118 map = thread_data->maps ? thread_data->maps[tm] : NULL;
1119 overwrite_map = thread_data->overwrite_maps ?
1120 thread_data->overwrite_maps[tm] : NULL;
1122 for (f = 0; f < evlist->core.pollfd.nr; f++) {
1123 void *ptr = evlist->core.pollfd.priv[f].ptr;
1125 if ((map && ptr == map) || (overwrite_map && ptr == overwrite_map)) {
1126 pos = fdarray__dup_entry_from(&thread_data->pollfd, f,
1127 &evlist->core.pollfd);
1130 pr_debug2("thread_data[%p]: pollfd[%d] <- event_fd=%d\n",
1131 thread_data, pos, evlist->core.pollfd.entries[f].fd);
1139 static void record__free_thread_data(struct record *rec)
1142 struct record_thread *thread_data = rec->thread_data;
1144 if (thread_data == NULL)
1147 for (t = 0; t < rec->nr_threads; t++) {
1148 record__thread_data_close_pipes(&thread_data[t]);
1149 zfree(&thread_data[t].maps);
1150 zfree(&thread_data[t].overwrite_maps);
1151 fdarray__exit(&thread_data[t].pollfd);
1154 zfree(&rec->thread_data);
1157 static int record__map_thread_evlist_pollfd_indexes(struct record *rec,
1158 int evlist_pollfd_index,
1159 int thread_pollfd_index)
1161 size_t x = rec->index_map_cnt;
1163 if (realloc_array_as_needed(rec->index_map, rec->index_map_sz, x, NULL))
1165 rec->index_map[x].evlist_pollfd_index = evlist_pollfd_index;
1166 rec->index_map[x].thread_pollfd_index = thread_pollfd_index;
1167 rec->index_map_cnt += 1;
1171 static int record__update_evlist_pollfd_from_thread(struct record *rec,
1172 struct evlist *evlist,
1173 struct record_thread *thread_data)
1175 struct pollfd *e_entries = evlist->core.pollfd.entries;
1176 struct pollfd *t_entries = thread_data->pollfd.entries;
1180 for (i = 0; i < rec->index_map_cnt; i++) {
1181 int e_pos = rec->index_map[i].evlist_pollfd_index;
1182 int t_pos = rec->index_map[i].thread_pollfd_index;
1184 if (e_entries[e_pos].fd != t_entries[t_pos].fd ||
1185 e_entries[e_pos].events != t_entries[t_pos].events) {
1186 pr_err("Thread and evlist pollfd index mismatch\n");
1190 e_entries[e_pos].revents = t_entries[t_pos].revents;
1195 static int record__dup_non_perf_events(struct record *rec,
1196 struct evlist *evlist,
1197 struct record_thread *thread_data)
1199 struct fdarray *fda = &evlist->core.pollfd;
1202 for (i = 0; i < fda->nr; i++) {
1203 if (!(fda->priv[i].flags & fdarray_flag__non_perf_event))
1205 ret = fdarray__dup_entry_from(&thread_data->pollfd, i, fda);
1207 pr_err("Failed to duplicate descriptor in main thread pollfd\n");
1210 pr_debug2("thread_data[%p]: pollfd[%d] <- non_perf_event fd=%d\n",
1211 thread_data, ret, fda->entries[i].fd);
1212 ret = record__map_thread_evlist_pollfd_indexes(rec, i, ret);
1214 pr_err("Failed to map thread and evlist pollfd indexes\n");
1221 static int record__alloc_thread_data(struct record *rec, struct evlist *evlist)
1224 struct record_thread *thread_data;
1226 rec->thread_data = zalloc(rec->nr_threads * sizeof(*(rec->thread_data)));
1227 if (!rec->thread_data) {
1228 pr_err("Failed to allocate thread data\n");
1231 thread_data = rec->thread_data;
1233 for (t = 0; t < rec->nr_threads; t++)
1234 record__thread_data_init_pipes(&thread_data[t]);
1236 for (t = 0; t < rec->nr_threads; t++) {
1237 thread_data[t].rec = rec;
1238 thread_data[t].mask = &rec->thread_masks[t];
1239 ret = record__thread_data_init_maps(&thread_data[t], evlist);
1241 pr_err("Failed to initialize thread[%d] maps\n", t);
1244 ret = record__thread_data_init_pollfd(&thread_data[t], evlist);
1246 pr_err("Failed to initialize thread[%d] pollfd\n", t);
1250 thread_data[t].tid = -1;
1251 ret = record__thread_data_open_pipes(&thread_data[t]);
1253 pr_err("Failed to open thread[%d] communication pipes\n", t);
1256 ret = fdarray__add(&thread_data[t].pollfd, thread_data[t].pipes.msg[0],
1257 POLLIN | POLLERR | POLLHUP, fdarray_flag__nonfilterable);
1259 pr_err("Failed to add descriptor to thread[%d] pollfd\n", t);
1262 thread_data[t].ctlfd_pos = ret;
1263 pr_debug2("thread_data[%p]: pollfd[%d] <- ctl_fd=%d\n",
1264 thread_data, thread_data[t].ctlfd_pos,
1265 thread_data[t].pipes.msg[0]);
1267 thread_data[t].tid = gettid();
1269 ret = record__dup_non_perf_events(rec, evlist, &thread_data[t]);
1273 thread_data[t].ctlfd_pos = -1; /* Not used */
1280 record__free_thread_data(rec);
1285 static int record__mmap_evlist(struct record *rec,
1286 struct evlist *evlist)
1289 struct record_opts *opts = &rec->opts;
1290 bool auxtrace_overwrite = opts->auxtrace_snapshot_mode ||
1291 opts->auxtrace_sample_mode;
1294 if (opts->affinity != PERF_AFFINITY_SYS)
1295 cpu__setup_cpunode_map();
1297 if (evlist__mmap_ex(evlist, opts->mmap_pages,
1298 opts->auxtrace_mmap_pages,
1300 opts->nr_cblocks, opts->affinity,
1301 opts->mmap_flush, opts->comp_level) < 0) {
1302 if (errno == EPERM) {
1303 pr_err("Permission error mapping pages.\n"
1304 "Consider increasing "
1305 "/proc/sys/kernel/perf_event_mlock_kb,\n"
1306 "or try again with a smaller value of -m/--mmap_pages.\n"
1307 "(current value: %u,%u)\n",
1308 opts->mmap_pages, opts->auxtrace_mmap_pages);
1311 pr_err("failed to mmap with %d (%s)\n", errno,
1312 str_error_r(errno, msg, sizeof(msg)));
1320 if (evlist__initialize_ctlfd(evlist, opts->ctl_fd, opts->ctl_fd_ack))
1323 ret = record__alloc_thread_data(rec, evlist);
1327 if (record__threads_enabled(rec)) {
1328 ret = perf_data__create_dir(&rec->data, evlist->core.nr_mmaps);
1330 pr_err("Failed to create data directory: %s\n", strerror(-ret));
1333 for (i = 0; i < evlist->core.nr_mmaps; i++) {
1335 evlist->mmap[i].file = &rec->data.dir.files[i];
1336 if (evlist->overwrite_mmap)
1337 evlist->overwrite_mmap[i].file = &rec->data.dir.files[i];
1344 static int record__mmap(struct record *rec)
1346 return record__mmap_evlist(rec, rec->evlist);
1349 static int record__open(struct record *rec)
1353 struct evlist *evlist = rec->evlist;
1354 struct perf_session *session = rec->session;
1355 struct record_opts *opts = &rec->opts;
1358 evlist__for_each_entry(evlist, pos) {
1360 if (evsel__open(pos, pos->core.cpus, pos->core.threads) < 0) {
1361 if (evsel__fallback(pos, &opts->target, errno, msg, sizeof(msg))) {
1363 ui__warning("%s\n", msg);
1366 if ((errno == EINVAL || errno == EBADF) &&
1367 pos->core.leader != &pos->core &&
1369 pos = evlist__reset_weak_group(evlist, pos, true);
1373 evsel__open_strerror(pos, &opts->target, errno, msg, sizeof(msg));
1374 ui__error("%s\n", msg);
1378 pos->supported = true;
1381 if (symbol_conf.kptr_restrict && !evlist__exclude_kernel(evlist)) {
1383 "WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1384 "check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
1385 "Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1386 "file is not found in the buildid cache or in the vmlinux path.\n\n"
1387 "Samples in kernel modules won't be resolved at all.\n\n"
1388 "If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1389 "even with a suitable vmlinux or kallsyms file.\n\n");
1392 if (evlist__apply_filters(evlist, &pos)) {
1393 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
1394 pos->filter ?: "BPF", evsel__name(pos), errno,
1395 str_error_r(errno, msg, sizeof(msg)));
1400 rc = record__mmap(rec);
1404 session->evlist = evlist;
1405 perf_session__set_id_hdr_size(session);
1410 static void set_timestamp_boundary(struct record *rec, u64 sample_time)
1412 if (rec->evlist->first_sample_time == 0)
1413 rec->evlist->first_sample_time = sample_time;
1416 rec->evlist->last_sample_time = sample_time;
1419 static int process_sample_event(struct perf_tool *tool,
1420 union perf_event *event,
1421 struct perf_sample *sample,
1422 struct evsel *evsel,
1423 struct machine *machine)
1425 struct record *rec = container_of(tool, struct record, tool);
1427 set_timestamp_boundary(rec, sample->time);
1429 if (rec->buildid_all)
1433 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
1436 static int process_buildids(struct record *rec)
1438 struct perf_session *session = rec->session;
1440 if (perf_data__size(&rec->data) == 0)
1444 * During this process, it'll load kernel map and replace the
1445 * dso->long_name to a real pathname it found. In this case
1446 * we prefer the vmlinux path like
1447 * /lib/modules/3.16.4/build/vmlinux
1449 * rather than build-id path (in debug directory).
1450 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
1452 symbol_conf.ignore_vmlinux_buildid = true;
1455 * If --buildid-all is given, it marks all DSO regardless of hits,
1456 * so no need to process samples. But if timestamp_boundary is enabled,
1457 * it still needs to walk on all samples to get the timestamps of
1458 * first/last samples.
1460 if (rec->buildid_all && !rec->timestamp_boundary)
1461 rec->tool.sample = NULL;
1463 return perf_session__process_events(session);
1466 static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
1469 struct perf_tool *tool = data;
1471 *As for guest kernel when processing subcommand record&report,
1472 *we arrange module mmap prior to guest kernel mmap and trigger
1473 *a preload dso because default guest module symbols are loaded
1474 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
1475 *method is used to avoid symbol missing when the first addr is
1476 *in module instead of in guest kernel.
1478 err = perf_event__synthesize_modules(tool, process_synthesized_event,
1481 pr_err("Couldn't record guest kernel [%d]'s reference"
1482 " relocation symbol.\n", machine->pid);
1485 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
1486 * have no _text sometimes.
1488 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
1491 pr_err("Couldn't record guest kernel [%d]'s reference"
1492 " relocation symbol.\n", machine->pid);
1495 static struct perf_event_header finished_round_event = {
1496 .size = sizeof(struct perf_event_header),
1497 .type = PERF_RECORD_FINISHED_ROUND,
1500 static struct perf_event_header finished_init_event = {
1501 .size = sizeof(struct perf_event_header),
1502 .type = PERF_RECORD_FINISHED_INIT,
1505 static void record__adjust_affinity(struct record *rec, struct mmap *map)
1507 if (rec->opts.affinity != PERF_AFFINITY_SYS &&
1508 !bitmap_equal(thread->mask->affinity.bits, map->affinity_mask.bits,
1509 thread->mask->affinity.nbits)) {
1510 bitmap_zero(thread->mask->affinity.bits, thread->mask->affinity.nbits);
1511 bitmap_or(thread->mask->affinity.bits, thread->mask->affinity.bits,
1512 map->affinity_mask.bits, thread->mask->affinity.nbits);
1513 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
1514 (cpu_set_t *)thread->mask->affinity.bits);
1516 pr_debug("threads[%d]: running on cpu%d: ", thread->tid, sched_getcpu());
1517 mmap_cpu_mask__scnprintf(&thread->mask->affinity, "affinity");
1522 static size_t process_comp_header(void *record, size_t increment)
1524 struct perf_record_compressed *event = record;
1525 size_t size = sizeof(*event);
1528 event->header.size += increment;
1532 event->header.type = PERF_RECORD_COMPRESSED;
1533 event->header.size = size;
1538 static ssize_t zstd_compress(struct perf_session *session, struct mmap *map,
1539 void *dst, size_t dst_size, void *src, size_t src_size)
1542 size_t max_record_size = PERF_SAMPLE_MAX_SIZE - sizeof(struct perf_record_compressed) - 1;
1543 struct zstd_data *zstd_data = &session->zstd_data;
1545 if (map && map->file)
1546 zstd_data = &map->zstd_data;
1548 compressed = zstd_compress_stream_to_records(zstd_data, dst, dst_size, src, src_size,
1549 max_record_size, process_comp_header);
1553 if (map && map->file) {
1554 thread->bytes_transferred += src_size;
1555 thread->bytes_compressed += compressed;
1557 session->bytes_transferred += src_size;
1558 session->bytes_compressed += compressed;
1564 static int record__mmap_read_evlist(struct record *rec, struct evlist *evlist,
1565 bool overwrite, bool synch)
1567 u64 bytes_written = rec->bytes_written;
1572 int trace_fd = rec->data.file.fd;
1578 nr_mmaps = thread->nr_mmaps;
1579 maps = overwrite ? thread->overwrite_maps : thread->maps;
1584 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
1587 if (record__aio_enabled(rec))
1588 off = record__aio_get_pos(trace_fd);
1590 for (i = 0; i < nr_mmaps; i++) {
1592 struct mmap *map = maps[i];
1594 if (map->core.base) {
1595 record__adjust_affinity(rec, map);
1597 flush = map->core.flush;
1598 map->core.flush = 1;
1600 if (!record__aio_enabled(rec)) {
1601 if (perf_mmap__push(map, rec, record__pushfn) < 0) {
1603 map->core.flush = flush;
1608 if (record__aio_push(rec, map, &off) < 0) {
1609 record__aio_set_pos(trace_fd, off);
1611 map->core.flush = flush;
1617 map->core.flush = flush;
1620 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
1621 !rec->opts.auxtrace_sample_mode &&
1622 record__auxtrace_mmap_read(rec, map) != 0) {
1628 if (record__aio_enabled(rec))
1629 record__aio_set_pos(trace_fd, off);
1632 * Mark the round finished in case we wrote
1633 * at least one event.
1635 * No need for round events in directory mode,
1636 * because per-cpu maps and files have data
1639 if (!record__threads_enabled(rec) && bytes_written != rec->bytes_written)
1640 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
1643 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
1648 static int record__mmap_read_all(struct record *rec, bool synch)
1652 err = record__mmap_read_evlist(rec, rec->evlist, false, synch);
1656 return record__mmap_read_evlist(rec, rec->evlist, true, synch);
1659 static void record__thread_munmap_filtered(struct fdarray *fda, int fd,
1660 void *arg __maybe_unused)
1662 struct perf_mmap *map = fda->priv[fd].ptr;
1665 perf_mmap__put(map);
1668 static void *record__thread(void *arg)
1670 enum thread_msg msg = THREAD_MSG__READY;
1671 bool terminate = false;
1672 struct fdarray *pollfd;
1676 thread->tid = gettid();
1678 err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1680 pr_warning("threads[%d]: failed to notify on start: %s\n",
1681 thread->tid, strerror(errno));
1683 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
1685 pollfd = &thread->pollfd;
1686 ctlfd_pos = thread->ctlfd_pos;
1689 unsigned long long hits = thread->samples;
1691 if (record__mmap_read_all(thread->rec, false) < 0 || terminate)
1694 if (hits == thread->samples) {
1696 err = fdarray__poll(pollfd, -1);
1698 * Propagate error, only if there's any. Ignore positive
1699 * number of returned events and interrupt error.
1701 if (err > 0 || (err < 0 && errno == EINTR))
1705 if (fdarray__filter(pollfd, POLLERR | POLLHUP,
1706 record__thread_munmap_filtered, NULL) == 0)
1710 if (pollfd->entries[ctlfd_pos].revents & POLLHUP) {
1712 close(thread->pipes.msg[0]);
1713 thread->pipes.msg[0] = -1;
1714 pollfd->entries[ctlfd_pos].fd = -1;
1715 pollfd->entries[ctlfd_pos].events = 0;
1718 pollfd->entries[ctlfd_pos].revents = 0;
1720 record__mmap_read_all(thread->rec, true);
1722 err = write(thread->pipes.ack[1], &msg, sizeof(msg));
1724 pr_warning("threads[%d]: failed to notify on termination: %s\n",
1725 thread->tid, strerror(errno));
1730 static void record__init_features(struct record *rec)
1732 struct perf_session *session = rec->session;
1735 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1736 perf_header__set_feat(&session->header, feat);
1738 if (rec->no_buildid)
1739 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1741 #ifdef HAVE_LIBTRACEEVENT
1742 if (!have_tracepoints(&rec->evlist->core.entries))
1743 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1746 if (!rec->opts.branch_stack)
1747 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1749 if (!rec->opts.full_auxtrace)
1750 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1752 if (!(rec->opts.use_clockid && rec->opts.clockid_res_ns))
1753 perf_header__clear_feat(&session->header, HEADER_CLOCKID);
1755 if (!rec->opts.use_clockid)
1756 perf_header__clear_feat(&session->header, HEADER_CLOCK_DATA);
1758 if (!record__threads_enabled(rec))
1759 perf_header__clear_feat(&session->header, HEADER_DIR_FORMAT);
1761 if (!record__comp_enabled(rec))
1762 perf_header__clear_feat(&session->header, HEADER_COMPRESSED);
1764 perf_header__clear_feat(&session->header, HEADER_STAT);
1768 record__finish_output(struct record *rec)
1771 struct perf_data *data = &rec->data;
1772 int fd = perf_data__fd(data);
1774 if (data->is_pipe) {
1775 /* Just to display approx. size */
1776 data->file.size = rec->bytes_written;
1780 rec->session->header.data_size += rec->bytes_written;
1781 data->file.size = lseek(perf_data__fd(data), 0, SEEK_CUR);
1782 if (record__threads_enabled(rec)) {
1783 for (i = 0; i < data->dir.nr; i++)
1784 data->dir.files[i].size = lseek(data->dir.files[i].fd, 0, SEEK_CUR);
1787 if (!rec->no_buildid) {
1788 process_buildids(rec);
1790 if (rec->buildid_all)
1791 perf_session__dsos_hit_all(rec->session);
1793 perf_session__write_header(rec->session, rec->evlist, fd, true);
1798 static int record__synthesize_workload(struct record *rec, bool tail)
1801 struct perf_thread_map *thread_map;
1802 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
1804 if (rec->opts.tail_synthesize != tail)
1807 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
1808 if (thread_map == NULL)
1811 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
1812 process_synthesized_event,
1813 &rec->session->machines.host,
1815 rec->opts.sample_address);
1816 perf_thread_map__put(thread_map);
1820 static int write_finished_init(struct record *rec, bool tail)
1822 if (rec->opts.tail_synthesize != tail)
1825 return record__write(rec, NULL, &finished_init_event, sizeof(finished_init_event));
1828 static int record__synthesize(struct record *rec, bool tail);
1831 record__switch_output(struct record *rec, bool at_exit)
1833 struct perf_data *data = &rec->data;
1834 char *new_filename = NULL;
1837 /* Same Size: "2015122520103046"*/
1838 char timestamp[] = "InvalidTimestamp";
1840 record__aio_mmap_read_sync(rec);
1842 write_finished_init(rec, true);
1844 record__synthesize(rec, true);
1845 if (target__none(&rec->opts.target))
1846 record__synthesize_workload(rec, true);
1849 record__finish_output(rec);
1850 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
1852 pr_err("Failed to get current timestamp\n");
1856 fd = perf_data__switch(data, timestamp,
1857 rec->session->header.data_offset,
1858 at_exit, &new_filename);
1859 if (fd >= 0 && !at_exit) {
1860 rec->bytes_written = 0;
1861 rec->session->header.data_size = 0;
1865 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
1866 data->path, timestamp);
1869 if (rec->switch_output.num_files) {
1870 int n = rec->switch_output.cur_file + 1;
1872 if (n >= rec->switch_output.num_files)
1874 rec->switch_output.cur_file = n;
1875 if (rec->switch_output.filenames[n]) {
1876 remove(rec->switch_output.filenames[n]);
1877 zfree(&rec->switch_output.filenames[n]);
1879 rec->switch_output.filenames[n] = new_filename;
1884 /* Output tracking events */
1886 record__synthesize(rec, false);
1889 * In 'perf record --switch-output' without -a,
1890 * record__synthesize() in record__switch_output() won't
1891 * generate tracking events because there's no thread_map
1892 * in evlist. Which causes newly created perf.data doesn't
1893 * contain map and comm information.
1894 * Create a fake thread_map and directly call
1895 * perf_event__synthesize_thread_map() for those events.
1897 if (target__none(&rec->opts.target))
1898 record__synthesize_workload(rec, false);
1899 write_finished_init(rec, false);
1904 static void __record__save_lost_samples(struct record *rec, struct evsel *evsel,
1905 struct perf_record_lost_samples *lost,
1906 int cpu_idx, int thread_idx, u64 lost_count,
1909 struct perf_sample_id *sid;
1910 struct perf_sample sample = {};
1913 lost->lost = lost_count;
1914 if (evsel->core.ids) {
1915 sid = xyarray__entry(evsel->core.sample_id, cpu_idx, thread_idx);
1916 sample.id = sid->id;
1919 id_hdr_size = perf_event__synthesize_id_sample((void *)(lost + 1),
1920 evsel->core.attr.sample_type, &sample);
1921 lost->header.size = sizeof(*lost) + id_hdr_size;
1922 lost->header.misc = misc_flag;
1923 record__write(rec, NULL, lost, lost->header.size);
1926 static void record__read_lost_samples(struct record *rec)
1928 struct perf_session *session = rec->session;
1929 struct perf_record_lost_samples *lost = NULL;
1930 struct evsel *evsel;
1932 /* there was an error during record__open */
1933 if (session->evlist == NULL)
1936 evlist__for_each_entry(session->evlist, evsel) {
1937 struct xyarray *xy = evsel->core.sample_id;
1940 if (xy == NULL || evsel->core.fd == NULL)
1942 if (xyarray__max_x(evsel->core.fd) != xyarray__max_x(xy) ||
1943 xyarray__max_y(evsel->core.fd) != xyarray__max_y(xy)) {
1944 pr_debug("Unmatched FD vs. sample ID: skip reading LOST count\n");
1948 for (int x = 0; x < xyarray__max_x(xy); x++) {
1949 for (int y = 0; y < xyarray__max_y(xy); y++) {
1950 struct perf_counts_values count;
1952 if (perf_evsel__read(&evsel->core, x, y, &count) < 0) {
1953 pr_debug("read LOST count failed\n");
1959 lost = zalloc(sizeof(*lost) +
1960 session->machines.host.id_hdr_size);
1962 pr_debug("Memory allocation failed\n");
1965 lost->header.type = PERF_RECORD_LOST_SAMPLES;
1967 __record__save_lost_samples(rec, evsel, lost,
1968 x, y, count.lost, 0);
1973 lost_count = perf_bpf_filter__lost_count(evsel);
1976 lost = zalloc(sizeof(*lost) +
1977 session->machines.host.id_hdr_size);
1979 pr_debug("Memory allocation failed\n");
1982 lost->header.type = PERF_RECORD_LOST_SAMPLES;
1984 __record__save_lost_samples(rec, evsel, lost, 0, 0, lost_count,
1985 PERF_RECORD_MISC_LOST_SAMPLES_BPF);
1992 static volatile sig_atomic_t workload_exec_errno;
1995 * evlist__prepare_workload will send a SIGUSR1
1996 * if the fork fails, since we asked by setting its
1997 * want_signal to true.
1999 static void workload_exec_failed_signal(int signo __maybe_unused,
2001 void *ucontext __maybe_unused)
2003 workload_exec_errno = info->si_value.sival_int;
2008 static void snapshot_sig_handler(int sig);
2009 static void alarm_sig_handler(int sig);
2011 static const struct perf_event_mmap_page *evlist__pick_pc(struct evlist *evlist)
2014 if (evlist->mmap && evlist->mmap[0].core.base)
2015 return evlist->mmap[0].core.base;
2016 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].core.base)
2017 return evlist->overwrite_mmap[0].core.base;
2022 static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
2024 const struct perf_event_mmap_page *pc = evlist__pick_pc(rec->evlist);
2030 static int record__synthesize(struct record *rec, bool tail)
2032 struct perf_session *session = rec->session;
2033 struct machine *machine = &session->machines.host;
2034 struct perf_data *data = &rec->data;
2035 struct record_opts *opts = &rec->opts;
2036 struct perf_tool *tool = &rec->tool;
2038 event_op f = process_synthesized_event;
2040 if (rec->opts.tail_synthesize != tail)
2043 if (data->is_pipe) {
2044 err = perf_event__synthesize_for_pipe(tool, session, data,
2045 process_synthesized_event);
2049 rec->bytes_written += err;
2052 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
2053 process_synthesized_event, machine);
2057 /* Synthesize id_index before auxtrace_info */
2058 err = perf_event__synthesize_id_index(tool,
2059 process_synthesized_event,
2060 session->evlist, machine);
2064 if (rec->opts.full_auxtrace) {
2065 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
2066 session, process_synthesized_event);
2071 if (!evlist__exclude_kernel(rec->evlist)) {
2072 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
2074 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
2075 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
2076 "Check /proc/kallsyms permission or run as root.\n");
2078 err = perf_event__synthesize_modules(tool, process_synthesized_event,
2080 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
2081 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
2082 "Check /proc/modules permission or run as root.\n");
2086 machines__process_guests(&session->machines,
2087 perf_event__synthesize_guest_os, tool);
2090 err = perf_event__synthesize_extra_attr(&rec->tool,
2092 process_synthesized_event,
2097 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->core.threads,
2098 process_synthesized_event,
2101 pr_err("Couldn't synthesize thread map.\n");
2105 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->core.all_cpus,
2106 process_synthesized_event, NULL);
2108 pr_err("Couldn't synthesize cpu map.\n");
2112 err = perf_event__synthesize_bpf_events(session, process_synthesized_event,
2115 pr_warning("Couldn't synthesize bpf events.\n");
2119 if (rec->opts.synth & PERF_SYNTH_CGROUP) {
2120 err = perf_event__synthesize_cgroups(tool, process_synthesized_event,
2123 pr_warning("Couldn't synthesize cgroup events.\n");
2128 if (rec->opts.nr_threads_synthesize > 1) {
2129 mutex_init(&synth_lock);
2130 perf_set_multithreaded();
2131 f = process_locked_synthesized_event;
2134 if (rec->opts.synth & PERF_SYNTH_TASK) {
2135 bool needs_mmap = rec->opts.synth & PERF_SYNTH_MMAP;
2137 err = __machine__synthesize_threads(machine, tool, &opts->target,
2138 rec->evlist->core.threads,
2139 f, needs_mmap, opts->sample_address,
2140 rec->opts.nr_threads_synthesize);
2143 if (rec->opts.nr_threads_synthesize > 1) {
2144 perf_set_singlethreaded();
2145 mutex_destroy(&synth_lock);
2152 static int record__process_signal_event(union perf_event *event __maybe_unused, void *data)
2154 struct record *rec = data;
2155 pthread_kill(rec->thread_id, SIGUSR2);
2159 static int record__setup_sb_evlist(struct record *rec)
2161 struct record_opts *opts = &rec->opts;
2163 if (rec->sb_evlist != NULL) {
2165 * We get here if --switch-output-event populated the
2166 * sb_evlist, so associate a callback that will send a SIGUSR2
2167 * to the main thread.
2169 evlist__set_cb(rec->sb_evlist, record__process_signal_event, rec);
2170 rec->thread_id = pthread_self();
2172 #ifdef HAVE_LIBBPF_SUPPORT
2173 if (!opts->no_bpf_event) {
2174 if (rec->sb_evlist == NULL) {
2175 rec->sb_evlist = evlist__new();
2177 if (rec->sb_evlist == NULL) {
2178 pr_err("Couldn't create side band evlist.\n.");
2183 if (evlist__add_bpf_sb_event(rec->sb_evlist, &rec->session->header.env)) {
2184 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
2189 if (evlist__start_sb_thread(rec->sb_evlist, &rec->opts.target)) {
2190 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
2191 opts->no_bpf_event = true;
2197 static int record__init_clock(struct record *rec)
2199 struct perf_session *session = rec->session;
2200 struct timespec ref_clockid;
2201 struct timeval ref_tod;
2204 if (!rec->opts.use_clockid)
2207 if (rec->opts.use_clockid && rec->opts.clockid_res_ns)
2208 session->header.env.clock.clockid_res_ns = rec->opts.clockid_res_ns;
2210 session->header.env.clock.clockid = rec->opts.clockid;
2212 if (gettimeofday(&ref_tod, NULL) != 0) {
2213 pr_err("gettimeofday failed, cannot set reference time.\n");
2217 if (clock_gettime(rec->opts.clockid, &ref_clockid)) {
2218 pr_err("clock_gettime failed, cannot set reference time.\n");
2222 ref = (u64) ref_tod.tv_sec * NSEC_PER_SEC +
2223 (u64) ref_tod.tv_usec * NSEC_PER_USEC;
2225 session->header.env.clock.tod_ns = ref;
2227 ref = (u64) ref_clockid.tv_sec * NSEC_PER_SEC +
2228 (u64) ref_clockid.tv_nsec;
2230 session->header.env.clock.clockid_ns = ref;
2234 static void hit_auxtrace_snapshot_trigger(struct record *rec)
2236 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
2237 trigger_hit(&auxtrace_snapshot_trigger);
2238 auxtrace_record__snapshot_started = 1;
2239 if (auxtrace_record__snapshot_start(rec->itr))
2240 trigger_error(&auxtrace_snapshot_trigger);
2244 static int record__terminate_thread(struct record_thread *thread_data)
2247 enum thread_msg ack = THREAD_MSG__UNDEFINED;
2248 pid_t tid = thread_data->tid;
2250 close(thread_data->pipes.msg[1]);
2251 thread_data->pipes.msg[1] = -1;
2252 err = read(thread_data->pipes.ack[0], &ack, sizeof(ack));
2254 pr_debug2("threads[%d]: sent %s\n", tid, thread_msg_tags[ack]);
2256 pr_warning("threads[%d]: failed to receive termination notification from %d\n",
2262 static int record__start_threads(struct record *rec)
2264 int t, tt, err, ret = 0, nr_threads = rec->nr_threads;
2265 struct record_thread *thread_data = rec->thread_data;
2266 sigset_t full, mask;
2268 pthread_attr_t attrs;
2270 thread = &thread_data[0];
2272 if (!record__threads_enabled(rec))
2276 if (sigprocmask(SIG_SETMASK, &full, &mask)) {
2277 pr_err("Failed to block signals on threads start: %s\n", strerror(errno));
2281 pthread_attr_init(&attrs);
2282 pthread_attr_setdetachstate(&attrs, PTHREAD_CREATE_DETACHED);
2284 for (t = 1; t < nr_threads; t++) {
2285 enum thread_msg msg = THREAD_MSG__UNDEFINED;
2287 #ifdef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
2288 pthread_attr_setaffinity_np(&attrs,
2289 MMAP_CPU_MASK_BYTES(&(thread_data[t].mask->affinity)),
2290 (cpu_set_t *)(thread_data[t].mask->affinity.bits));
2292 if (pthread_create(&handle, &attrs, record__thread, &thread_data[t])) {
2293 for (tt = 1; tt < t; tt++)
2294 record__terminate_thread(&thread_data[t]);
2295 pr_err("Failed to start threads: %s\n", strerror(errno));
2300 err = read(thread_data[t].pipes.ack[0], &msg, sizeof(msg));
2302 pr_debug2("threads[%d]: sent %s\n", rec->thread_data[t].tid,
2303 thread_msg_tags[msg]);
2305 pr_warning("threads[%d]: failed to receive start notification from %d\n",
2306 thread->tid, rec->thread_data[t].tid);
2309 sched_setaffinity(0, MMAP_CPU_MASK_BYTES(&thread->mask->affinity),
2310 (cpu_set_t *)thread->mask->affinity.bits);
2312 pr_debug("threads[%d]: started on cpu%d\n", thread->tid, sched_getcpu());
2315 pthread_attr_destroy(&attrs);
2317 if (sigprocmask(SIG_SETMASK, &mask, NULL)) {
2318 pr_err("Failed to unblock signals on threads start: %s\n", strerror(errno));
2325 static int record__stop_threads(struct record *rec)
2328 struct record_thread *thread_data = rec->thread_data;
2330 for (t = 1; t < rec->nr_threads; t++)
2331 record__terminate_thread(&thread_data[t]);
2333 for (t = 0; t < rec->nr_threads; t++) {
2334 rec->samples += thread_data[t].samples;
2335 if (!record__threads_enabled(rec))
2337 rec->session->bytes_transferred += thread_data[t].bytes_transferred;
2338 rec->session->bytes_compressed += thread_data[t].bytes_compressed;
2339 pr_debug("threads[%d]: samples=%lld, wakes=%ld, ", thread_data[t].tid,
2340 thread_data[t].samples, thread_data[t].waking);
2341 if (thread_data[t].bytes_transferred && thread_data[t].bytes_compressed)
2342 pr_debug("transferred=%" PRIu64 ", compressed=%" PRIu64 "\n",
2343 thread_data[t].bytes_transferred, thread_data[t].bytes_compressed);
2345 pr_debug("written=%" PRIu64 "\n", thread_data[t].bytes_written);
2351 static unsigned long record__waking(struct record *rec)
2354 unsigned long waking = 0;
2355 struct record_thread *thread_data = rec->thread_data;
2357 for (t = 0; t < rec->nr_threads; t++)
2358 waking += thread_data[t].waking;
2363 static int __cmd_record(struct record *rec, int argc, const char **argv)
2367 const bool forks = argc > 0;
2368 struct perf_tool *tool = &rec->tool;
2369 struct record_opts *opts = &rec->opts;
2370 struct perf_data *data = &rec->data;
2371 struct perf_session *session;
2372 bool disabled = false, draining = false;
2375 enum evlist_ctl_cmd cmd = EVLIST_CTL_CMD_UNSUPPORTED;
2377 atexit(record__sig_exit);
2378 signal(SIGCHLD, sig_handler);
2379 signal(SIGINT, sig_handler);
2380 signal(SIGTERM, sig_handler);
2381 signal(SIGSEGV, sigsegv_handler);
2383 if (rec->opts.record_namespaces)
2384 tool->namespace_events = true;
2386 if (rec->opts.record_cgroup) {
2387 #ifdef HAVE_FILE_HANDLE
2388 tool->cgroup_events = true;
2390 pr_err("cgroup tracking is not supported\n");
2395 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
2396 signal(SIGUSR2, snapshot_sig_handler);
2397 if (rec->opts.auxtrace_snapshot_mode)
2398 trigger_on(&auxtrace_snapshot_trigger);
2399 if (rec->switch_output.enabled)
2400 trigger_on(&switch_output_trigger);
2402 signal(SIGUSR2, SIG_IGN);
2405 session = perf_session__new(data, tool);
2406 if (IS_ERR(session)) {
2407 pr_err("Perf session creation failed.\n");
2408 return PTR_ERR(session);
2411 if (record__threads_enabled(rec)) {
2412 if (perf_data__is_pipe(&rec->data)) {
2413 pr_err("Parallel trace streaming is not available in pipe mode.\n");
2416 if (rec->opts.full_auxtrace) {
2417 pr_err("Parallel trace streaming is not available in AUX area tracing mode.\n");
2422 fd = perf_data__fd(data);
2423 rec->session = session;
2425 if (zstd_init(&session->zstd_data, rec->opts.comp_level) < 0) {
2426 pr_err("Compression initialization failed.\n");
2429 #ifdef HAVE_EVENTFD_SUPPORT
2430 done_fd = eventfd(0, EFD_NONBLOCK);
2432 pr_err("Failed to create wakeup eventfd, error: %m\n");
2434 goto out_delete_session;
2436 err = evlist__add_wakeup_eventfd(rec->evlist, done_fd);
2438 pr_err("Failed to add wakeup eventfd to poll list\n");
2440 goto out_delete_session;
2442 #endif // HAVE_EVENTFD_SUPPORT
2444 session->header.env.comp_type = PERF_COMP_ZSTD;
2445 session->header.env.comp_level = rec->opts.comp_level;
2447 if (rec->opts.kcore &&
2448 !record__kcore_readable(&session->machines.host)) {
2449 pr_err("ERROR: kcore is not readable.\n");
2453 if (record__init_clock(rec))
2456 record__init_features(rec);
2459 err = evlist__prepare_workload(rec->evlist, &opts->target, argv, data->is_pipe,
2460 workload_exec_failed_signal);
2462 pr_err("Couldn't run the workload!\n");
2464 goto out_delete_session;
2469 * If we have just single event and are sending data
2470 * through pipe, we need to force the ids allocation,
2471 * because we synthesize event name through the pipe
2472 * and need the id for that.
2474 if (data->is_pipe && rec->evlist->core.nr_entries == 1)
2475 rec->opts.sample_id = true;
2477 if (rec->timestamp_filename && perf_data__is_pipe(data)) {
2478 rec->timestamp_filename = false;
2479 pr_warning("WARNING: --timestamp-filename option is not available in pipe mode.\n");
2482 evlist__uniquify_name(rec->evlist);
2484 evlist__config(rec->evlist, opts, &callchain_param);
2486 /* Debug message used by test scripts */
2487 pr_debug3("perf record opening and mmapping events\n");
2488 if (record__open(rec) != 0) {
2490 goto out_free_threads;
2492 /* Debug message used by test scripts */
2493 pr_debug3("perf record done opening and mmapping events\n");
2494 session->header.env.comp_mmap_len = session->evlist->core.mmap_len;
2496 if (rec->opts.kcore) {
2497 err = record__kcore_copy(&session->machines.host, data);
2499 pr_err("ERROR: Failed to copy kcore\n");
2500 goto out_free_threads;
2505 * Normally perf_session__new would do this, but it doesn't have the
2508 if (rec->tool.ordered_events && !evlist__sample_id_all(rec->evlist)) {
2509 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
2510 rec->tool.ordered_events = false;
2513 if (evlist__nr_groups(rec->evlist) == 0)
2514 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
2516 if (data->is_pipe) {
2517 err = perf_header__write_pipe(fd);
2519 goto out_free_threads;
2521 err = perf_session__write_header(session, rec->evlist, fd, false);
2523 goto out_free_threads;
2527 if (!rec->no_buildid
2528 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
2529 pr_err("Couldn't generate buildids. "
2530 "Use --no-buildid to profile anyway.\n");
2531 goto out_free_threads;
2534 err = record__setup_sb_evlist(rec);
2536 goto out_free_threads;
2538 err = record__synthesize(rec, false);
2540 goto out_free_threads;
2542 if (rec->realtime_prio) {
2543 struct sched_param param;
2545 param.sched_priority = rec->realtime_prio;
2546 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
2547 pr_err("Could not set realtime priority.\n");
2549 goto out_free_threads;
2553 if (record__start_threads(rec))
2554 goto out_free_threads;
2557 * When perf is starting the traced process, all the events
2558 * (apart from group members) have enable_on_exec=1 set,
2559 * so don't spoil it by prematurely enabling them.
2561 if (!target__none(&opts->target) && !opts->target.initial_delay)
2562 evlist__enable(rec->evlist);
2568 struct machine *machine = &session->machines.host;
2569 union perf_event *event;
2572 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
2573 if (event == NULL) {
2579 * Some H/W events are generated before COMM event
2580 * which is emitted during exec(), so perf script
2581 * cannot see a correct process name for those events.
2582 * Synthesize COMM event to prevent it.
2584 tgid = perf_event__synthesize_comm(tool, event,
2585 rec->evlist->workload.pid,
2586 process_synthesized_event,
2593 event = malloc(sizeof(event->namespaces) +
2594 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
2595 machine->id_hdr_size);
2596 if (event == NULL) {
2602 * Synthesize NAMESPACES event for the command specified.
2604 perf_event__synthesize_namespaces(tool, event,
2605 rec->evlist->workload.pid,
2606 tgid, process_synthesized_event,
2610 evlist__start_workload(rec->evlist);
2613 if (opts->target.initial_delay) {
2614 pr_info(EVLIST_DISABLED_MSG);
2615 if (opts->target.initial_delay > 0) {
2616 usleep(opts->target.initial_delay * USEC_PER_MSEC);
2617 evlist__enable(rec->evlist);
2618 pr_info(EVLIST_ENABLED_MSG);
2622 err = event_enable_timer__start(rec->evlist->eet);
2626 /* Debug message used by test scripts */
2627 pr_debug3("perf record has started\n");
2630 trigger_ready(&auxtrace_snapshot_trigger);
2631 trigger_ready(&switch_output_trigger);
2632 perf_hooks__invoke_record_start();
2635 * Must write FINISHED_INIT so it will be seen after all other
2636 * synthesized user events, but before any regular events.
2638 err = write_finished_init(rec, false);
2643 unsigned long long hits = thread->samples;
2646 * rec->evlist->bkw_mmap_state is possible to be
2647 * BKW_MMAP_EMPTY here: when done == true and
2648 * hits != rec->samples in previous round.
2650 * evlist__toggle_bkw_mmap ensure we never
2651 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
2653 if (trigger_is_hit(&switch_output_trigger) || done || draining)
2654 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
2656 if (record__mmap_read_all(rec, false) < 0) {
2657 trigger_error(&auxtrace_snapshot_trigger);
2658 trigger_error(&switch_output_trigger);
2663 if (auxtrace_record__snapshot_started) {
2664 auxtrace_record__snapshot_started = 0;
2665 if (!trigger_is_error(&auxtrace_snapshot_trigger))
2666 record__read_auxtrace_snapshot(rec, false);
2667 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
2668 pr_err("AUX area tracing snapshot failed\n");
2674 if (trigger_is_hit(&switch_output_trigger)) {
2676 * If switch_output_trigger is hit, the data in
2677 * overwritable ring buffer should have been collected,
2678 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
2680 * If SIGUSR2 raise after or during record__mmap_read_all(),
2681 * record__mmap_read_all() didn't collect data from
2682 * overwritable ring buffer. Read again.
2684 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
2686 trigger_ready(&switch_output_trigger);
2689 * Reenable events in overwrite ring buffer after
2690 * record__mmap_read_all(): we should have collected
2693 evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
2696 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
2697 record__waking(rec));
2699 fd = record__switch_output(rec, false);
2701 pr_err("Failed to switch to new file\n");
2702 trigger_error(&switch_output_trigger);
2707 /* re-arm the alarm */
2708 if (rec->switch_output.time)
2709 alarm(rec->switch_output.time);
2712 if (hits == thread->samples) {
2713 if (done || draining)
2715 err = fdarray__poll(&thread->pollfd, -1);
2717 * Propagate error, only if there's any. Ignore positive
2718 * number of returned events and interrupt error.
2720 if (err > 0 || (err < 0 && errno == EINTR))
2724 if (fdarray__filter(&thread->pollfd, POLLERR | POLLHUP,
2725 record__thread_munmap_filtered, NULL) == 0)
2728 err = record__update_evlist_pollfd_from_thread(rec, rec->evlist, thread);
2733 if (evlist__ctlfd_process(rec->evlist, &cmd) > 0) {
2735 case EVLIST_CTL_CMD_SNAPSHOT:
2736 hit_auxtrace_snapshot_trigger(rec);
2737 evlist__ctlfd_ack(rec->evlist);
2739 case EVLIST_CTL_CMD_STOP:
2742 case EVLIST_CTL_CMD_ACK:
2743 case EVLIST_CTL_CMD_UNSUPPORTED:
2744 case EVLIST_CTL_CMD_ENABLE:
2745 case EVLIST_CTL_CMD_DISABLE:
2746 case EVLIST_CTL_CMD_EVLIST:
2747 case EVLIST_CTL_CMD_PING:
2753 err = event_enable_timer__process(rec->evlist->eet);
2762 * When perf is starting the traced process, at the end events
2763 * die with the process and we wait for that. Thus no need to
2764 * disable events in this case.
2766 if (done && !disabled && !target__none(&opts->target)) {
2767 trigger_off(&auxtrace_snapshot_trigger);
2768 evlist__disable(rec->evlist);
2773 trigger_off(&auxtrace_snapshot_trigger);
2774 trigger_off(&switch_output_trigger);
2776 if (opts->auxtrace_snapshot_on_exit)
2777 record__auxtrace_snapshot_exit(rec);
2779 if (forks && workload_exec_errno) {
2780 char msg[STRERR_BUFSIZE], strevsels[2048];
2781 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
2783 evlist__scnprintf_evsels(rec->evlist, sizeof(strevsels), strevsels);
2785 pr_err("Failed to collect '%s' for the '%s' workload: %s\n",
2786 strevsels, argv[0], emsg);
2792 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n",
2793 record__waking(rec));
2795 write_finished_init(rec, true);
2797 if (target__none(&rec->opts.target))
2798 record__synthesize_workload(rec, true);
2801 record__stop_threads(rec);
2802 record__mmap_read_all(rec, true);
2804 record__free_thread_data(rec);
2805 evlist__finalize_ctlfd(rec->evlist);
2806 record__aio_mmap_read_sync(rec);
2808 if (rec->session->bytes_transferred && rec->session->bytes_compressed) {
2809 ratio = (float)rec->session->bytes_transferred/(float)rec->session->bytes_compressed;
2810 session->header.env.comp_ratio = ratio + 0.5;
2816 if (!child_finished)
2817 kill(rec->evlist->workload.pid, SIGTERM);
2823 else if (WIFEXITED(exit_status))
2824 status = WEXITSTATUS(exit_status);
2825 else if (WIFSIGNALED(exit_status))
2826 signr = WTERMSIG(exit_status);
2831 rec->bytes_written += off_cpu_write(rec->session);
2833 record__read_lost_samples(rec);
2834 record__synthesize(rec, true);
2835 /* this will be recalculated during process_buildids() */
2839 if (!rec->timestamp_filename) {
2840 record__finish_output(rec);
2842 fd = record__switch_output(rec, true);
2845 goto out_delete_session;
2850 perf_hooks__invoke_record_end();
2852 if (!err && !quiet) {
2854 const char *postfix = rec->timestamp_filename ?
2855 ".<timestamp>" : "";
2857 if (rec->samples && !rec->opts.full_auxtrace)
2858 scnprintf(samples, sizeof(samples),
2859 " (%" PRIu64 " samples)", rec->samples);
2863 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s",
2864 perf_data__size(data) / 1024.0 / 1024.0,
2865 data->path, postfix, samples);
2867 fprintf(stderr, ", compressed (original %.3f MB, ratio is %.3f)",
2868 rec->session->bytes_transferred / 1024.0 / 1024.0,
2871 fprintf(stderr, " ]\n");
2875 #ifdef HAVE_EVENTFD_SUPPORT
2883 zstd_fini(&session->zstd_data);
2884 if (!opts->no_bpf_event)
2885 evlist__stop_sb_thread(rec->sb_evlist);
2887 perf_session__delete(session);
2891 static void callchain_debug(struct callchain_param *callchain)
2893 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
2895 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
2897 if (callchain->record_mode == CALLCHAIN_DWARF)
2898 pr_debug("callchain: stack dump size %d\n",
2899 callchain->dump_size);
2902 int record_opts__parse_callchain(struct record_opts *record,
2903 struct callchain_param *callchain,
2904 const char *arg, bool unset)
2907 callchain->enabled = !unset;
2909 /* --no-call-graph */
2911 callchain->record_mode = CALLCHAIN_NONE;
2912 pr_debug("callchain: disabled\n");
2916 ret = parse_callchain_record_opt(arg, callchain);
2918 /* Enable data address sampling for DWARF unwind. */
2919 if (callchain->record_mode == CALLCHAIN_DWARF)
2920 record->sample_address = true;
2921 callchain_debug(callchain);
2927 int record_parse_callchain_opt(const struct option *opt,
2931 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
2934 int record_callchain_opt(const struct option *opt,
2935 const char *arg __maybe_unused,
2936 int unset __maybe_unused)
2938 struct callchain_param *callchain = opt->value;
2940 callchain->enabled = true;
2942 if (callchain->record_mode == CALLCHAIN_NONE)
2943 callchain->record_mode = CALLCHAIN_FP;
2945 callchain_debug(callchain);
2949 static int perf_record_config(const char *var, const char *value, void *cb)
2951 struct record *rec = cb;
2953 if (!strcmp(var, "record.build-id")) {
2954 if (!strcmp(value, "cache"))
2955 rec->no_buildid_cache = false;
2956 else if (!strcmp(value, "no-cache"))
2957 rec->no_buildid_cache = true;
2958 else if (!strcmp(value, "skip"))
2959 rec->no_buildid = true;
2960 else if (!strcmp(value, "mmap"))
2961 rec->buildid_mmap = true;
2966 if (!strcmp(var, "record.call-graph")) {
2967 var = "call-graph.record-mode";
2968 return perf_default_config(var, value, cb);
2970 #ifdef HAVE_AIO_SUPPORT
2971 if (!strcmp(var, "record.aio")) {
2972 rec->opts.nr_cblocks = strtol(value, NULL, 0);
2973 if (!rec->opts.nr_cblocks)
2974 rec->opts.nr_cblocks = nr_cblocks_default;
2977 if (!strcmp(var, "record.debuginfod")) {
2978 rec->debuginfod.urls = strdup(value);
2979 if (!rec->debuginfod.urls)
2981 rec->debuginfod.set = true;
2987 static int record__parse_event_enable_time(const struct option *opt, const char *str, int unset)
2989 struct record *rec = (struct record *)opt->value;
2991 return evlist__parse_event_enable_time(rec->evlist, &rec->opts, str, unset);
2994 static int record__parse_affinity(const struct option *opt, const char *str, int unset)
2996 struct record_opts *opts = (struct record_opts *)opt->value;
3001 if (!strcasecmp(str, "node"))
3002 opts->affinity = PERF_AFFINITY_NODE;
3003 else if (!strcasecmp(str, "cpu"))
3004 opts->affinity = PERF_AFFINITY_CPU;
3009 static int record__mmap_cpu_mask_alloc(struct mmap_cpu_mask *mask, int nr_bits)
3011 mask->nbits = nr_bits;
3012 mask->bits = bitmap_zalloc(mask->nbits);
3019 static void record__mmap_cpu_mask_free(struct mmap_cpu_mask *mask)
3021 bitmap_free(mask->bits);
3025 static int record__thread_mask_alloc(struct thread_mask *mask, int nr_bits)
3029 ret = record__mmap_cpu_mask_alloc(&mask->maps, nr_bits);
3031 mask->affinity.bits = NULL;
3035 ret = record__mmap_cpu_mask_alloc(&mask->affinity, nr_bits);
3037 record__mmap_cpu_mask_free(&mask->maps);
3038 mask->maps.bits = NULL;
3044 static void record__thread_mask_free(struct thread_mask *mask)
3046 record__mmap_cpu_mask_free(&mask->maps);
3047 record__mmap_cpu_mask_free(&mask->affinity);
3050 static int record__parse_threads(const struct option *opt, const char *str, int unset)
3053 struct record_opts *opts = opt->value;
3055 if (unset || !str || !strlen(str)) {
3056 opts->threads_spec = THREAD_SPEC__CPU;
3058 for (s = 1; s < THREAD_SPEC__MAX; s++) {
3059 if (s == THREAD_SPEC__USER) {
3060 opts->threads_user_spec = strdup(str);
3061 if (!opts->threads_user_spec)
3063 opts->threads_spec = THREAD_SPEC__USER;
3066 if (!strncasecmp(str, thread_spec_tags[s], strlen(thread_spec_tags[s]))) {
3067 opts->threads_spec = s;
3073 if (opts->threads_spec == THREAD_SPEC__USER)
3074 pr_debug("threads_spec: %s\n", opts->threads_user_spec);
3076 pr_debug("threads_spec: %s\n", thread_spec_tags[opts->threads_spec]);
3081 static int parse_output_max_size(const struct option *opt,
3082 const char *str, int unset)
3084 unsigned long *s = (unsigned long *)opt->value;
3085 static struct parse_tag tags_size[] = {
3086 { .tag = 'B', .mult = 1 },
3087 { .tag = 'K', .mult = 1 << 10 },
3088 { .tag = 'M', .mult = 1 << 20 },
3089 { .tag = 'G', .mult = 1 << 30 },
3099 val = parse_tag_value(str, tags_size);
3100 if (val != (unsigned long) -1) {
3108 static int record__parse_mmap_pages(const struct option *opt,
3110 int unset __maybe_unused)
3112 struct record_opts *opts = opt->value;
3114 unsigned int mmap_pages;
3129 ret = __evlist__parse_mmap_pages(&mmap_pages, s);
3132 opts->mmap_pages = mmap_pages;
3140 ret = __evlist__parse_mmap_pages(&mmap_pages, p + 1);
3144 opts->auxtrace_mmap_pages = mmap_pages;
3151 void __weak arch__add_leaf_frame_record_opts(struct record_opts *opts __maybe_unused)
3155 static int parse_control_option(const struct option *opt,
3157 int unset __maybe_unused)
3159 struct record_opts *opts = opt->value;
3161 return evlist__parse_control(str, &opts->ctl_fd, &opts->ctl_fd_ack, &opts->ctl_fd_close);
3164 static void switch_output_size_warn(struct record *rec)
3166 u64 wakeup_size = evlist__mmap_size(rec->opts.mmap_pages);
3167 struct switch_output *s = &rec->switch_output;
3171 if (s->size < wakeup_size) {
3174 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
3175 pr_warning("WARNING: switch-output data size lower than "
3176 "wakeup kernel buffer size (%s) "
3177 "expect bigger perf.data sizes\n", buf);
3181 static int switch_output_setup(struct record *rec)
3183 struct switch_output *s = &rec->switch_output;
3184 static struct parse_tag tags_size[] = {
3185 { .tag = 'B', .mult = 1 },
3186 { .tag = 'K', .mult = 1 << 10 },
3187 { .tag = 'M', .mult = 1 << 20 },
3188 { .tag = 'G', .mult = 1 << 30 },
3191 static struct parse_tag tags_time[] = {
3192 { .tag = 's', .mult = 1 },
3193 { .tag = 'm', .mult = 60 },
3194 { .tag = 'h', .mult = 60*60 },
3195 { .tag = 'd', .mult = 60*60*24 },
3201 * If we're using --switch-output-events, then we imply its
3202 * --switch-output=signal, as we'll send a SIGUSR2 from the side band
3203 * thread to its parent.
3205 if (rec->switch_output_event_set) {
3206 if (record__threads_enabled(rec)) {
3207 pr_warning("WARNING: --switch-output-event option is not available in parallel streaming mode.\n");
3216 if (record__threads_enabled(rec)) {
3217 pr_warning("WARNING: --switch-output option is not available in parallel streaming mode.\n");
3221 if (!strcmp(s->str, "signal")) {
3224 pr_debug("switch-output with SIGUSR2 signal\n");
3228 val = parse_tag_value(s->str, tags_size);
3229 if (val != (unsigned long) -1) {
3231 pr_debug("switch-output with %s size threshold\n", s->str);
3235 val = parse_tag_value(s->str, tags_time);
3236 if (val != (unsigned long) -1) {
3238 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
3246 rec->timestamp_filename = true;
3249 if (s->size && !rec->opts.no_buffering)
3250 switch_output_size_warn(rec);
3255 static const char * const __record_usage[] = {
3256 "perf record [<options>] [<command>]",
3257 "perf record [<options>] -- <command> [<options>]",
3260 const char * const *record_usage = __record_usage;
3262 static int build_id__process_mmap(struct perf_tool *tool, union perf_event *event,
3263 struct perf_sample *sample, struct machine *machine)
3266 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3267 * no need to add them twice.
3269 if (!(event->header.misc & PERF_RECORD_MISC_USER))
3271 return perf_event__process_mmap(tool, event, sample, machine);
3274 static int build_id__process_mmap2(struct perf_tool *tool, union perf_event *event,
3275 struct perf_sample *sample, struct machine *machine)
3278 * We already have the kernel maps, put in place via perf_session__create_kernel_maps()
3279 * no need to add them twice.
3281 if (!(event->header.misc & PERF_RECORD_MISC_USER))
3284 return perf_event__process_mmap2(tool, event, sample, machine);
3287 static int process_timestamp_boundary(struct perf_tool *tool,
3288 union perf_event *event __maybe_unused,
3289 struct perf_sample *sample,
3290 struct machine *machine __maybe_unused)
3292 struct record *rec = container_of(tool, struct record, tool);
3294 set_timestamp_boundary(rec, sample->time);
3298 static int parse_record_synth_option(const struct option *opt,
3300 int unset __maybe_unused)
3302 struct record_opts *opts = opt->value;
3303 char *p = strdup(str);
3308 opts->synth = parse_synth_opt(p);
3311 if (opts->synth < 0) {
3312 pr_err("Invalid synth option: %s\n", str);
3319 * XXX Ideally would be local to cmd_record() and passed to a record__new
3320 * because we need to have access to it in record__exit, that is called
3321 * after cmd_record() exits, but since record_options need to be accessible to
3322 * builtin-script, leave it here.
3324 * At least we don't ouch it in all the other functions here directly.
3326 * Just say no to tons of global variables, sigh.
3328 static struct record record = {
3330 .sample_time = true,
3331 .mmap_pages = UINT_MAX,
3332 .user_freq = UINT_MAX,
3333 .user_interval = ULLONG_MAX,
3337 .default_per_cpu = true,
3339 .mmap_flush = MMAP_FLUSH_DEFAULT,
3340 .nr_threads_synthesize = 1,
3343 .synth = PERF_SYNTH_ALL,
3346 .sample = process_sample_event,
3347 .fork = perf_event__process_fork,
3348 .exit = perf_event__process_exit,
3349 .comm = perf_event__process_comm,
3350 .namespaces = perf_event__process_namespaces,
3351 .mmap = build_id__process_mmap,
3352 .mmap2 = build_id__process_mmap2,
3353 .itrace_start = process_timestamp_boundary,
3354 .aux = process_timestamp_boundary,
3355 .ordered_events = true,
3359 const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
3360 "\n\t\t\t\tDefault: fp";
3362 static bool dry_run;
3364 static struct parse_events_option_args parse_events_option_args = {
3365 .evlistp = &record.evlist,
3368 static struct parse_events_option_args switch_output_parse_events_option_args = {
3369 .evlistp = &record.sb_evlist,
3373 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
3374 * with it and switch to use the library functions in perf_evlist that came
3375 * from builtin-record.c, i.e. use record_opts,
3376 * evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
3379 static struct option __record_options[] = {
3380 OPT_CALLBACK('e', "event", &parse_events_option_args, "event",
3381 "event selector. use 'perf list' to list available events",
3382 parse_events_option),
3383 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
3384 "event filter", parse_filter),
3385 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
3386 NULL, "don't record events from perf itself",
3388 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
3389 "record events on existing process id"),
3390 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
3391 "record events on existing thread id"),
3392 OPT_INTEGER('r', "realtime", &record.realtime_prio,
3393 "collect data with this RT SCHED_FIFO priority"),
3394 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
3395 "collect data without buffering"),
3396 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
3397 "collect raw sample records from all opened counters"),
3398 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
3399 "system-wide collection from all CPUs"),
3400 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
3401 "list of cpus to monitor"),
3402 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
3403 OPT_STRING('o', "output", &record.data.path, "file",
3404 "output file name"),
3405 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
3406 &record.opts.no_inherit_set,
3407 "child tasks do not inherit counters"),
3408 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
3409 "synthesize non-sample events at the end of output"),
3410 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
3411 OPT_BOOLEAN(0, "no-bpf-event", &record.opts.no_bpf_event, "do not record bpf events"),
3412 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
3413 "Fail if the specified frequency can't be used"),
3414 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
3415 "profile at this frequency",
3416 record__parse_freq),
3417 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
3418 "number of mmap data pages and AUX area tracing mmap pages",
3419 record__parse_mmap_pages),
3420 OPT_CALLBACK(0, "mmap-flush", &record.opts, "number",
3421 "Minimal number of bytes that is extracted from mmap data pages (default: 1)",
3422 record__mmap_flush_parse),
3423 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
3424 NULL, "enables call-graph recording" ,
3425 &record_callchain_opt),
3426 OPT_CALLBACK(0, "call-graph", &record.opts,
3427 "record_mode[,record_size]", record_callchain_help,
3428 &record_parse_callchain_opt),
3429 OPT_INCR('v', "verbose", &verbose,
3430 "be more verbose (show counter open errors, etc)"),
3431 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any warnings or messages"),
3432 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
3433 "per thread counts"),
3434 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3435 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
3436 "Record the sample physical addresses"),
3437 OPT_BOOLEAN(0, "data-page-size", &record.opts.sample_data_page_size,
3438 "Record the sampled data address data page size"),
3439 OPT_BOOLEAN(0, "code-page-size", &record.opts.sample_code_page_size,
3440 "Record the sampled code address (ip) page size"),
3441 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3442 OPT_BOOLEAN(0, "sample-identifier", &record.opts.sample_identifier,
3443 "Record the sample identifier"),
3444 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
3445 &record.opts.sample_time_set,
3446 "Record the sample timestamps"),
3447 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
3448 "Record the sample period"),
3449 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
3451 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
3452 &record.no_buildid_cache_set,
3453 "do not update the buildid cache"),
3454 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
3455 &record.no_buildid_set,
3456 "do not collect buildids in perf.data"),
3457 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
3458 "monitor event in cgroup name only",
3460 OPT_CALLBACK('D', "delay", &record, "ms",
3461 "ms to wait before starting measurement after program start (-1: start with events disabled), "
3462 "or ranges of time to enable events e.g. '-D 10-20,30-40'",
3463 record__parse_event_enable_time),
3464 OPT_BOOLEAN(0, "kcore", &record.opts.kcore, "copy /proc/kcore"),
3465 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
3468 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
3469 "branch any", "sample any taken branches",
3470 parse_branch_stack),
3472 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
3473 "branch filter mask", "branch stack filter modes",
3474 parse_branch_stack),
3475 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
3476 "sample by weight (on special events only)"),
3477 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
3478 "sample transaction flags (special events only)"),
3479 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
3480 "use per-thread mmaps"),
3481 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
3482 "sample selected machine registers on interrupt,"
3483 " use '-I?' to list register names", parse_intr_regs),
3484 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
3485 "sample selected machine registers on interrupt,"
3486 " use '--user-regs=?' to list register names", parse_user_regs),
3487 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
3488 "Record running/enabled time of read (:S) events"),
3489 OPT_CALLBACK('k', "clockid", &record.opts,
3490 "clockid", "clockid to use for events, see clock_gettime()",
3492 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
3493 "opts", "AUX area tracing Snapshot Mode", ""),
3494 OPT_STRING_OPTARG(0, "aux-sample", &record.opts.auxtrace_sample_opts,
3495 "opts", "sample AUX area", ""),
3496 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
3497 "per thread proc mmap processing timeout in ms"),
3498 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
3499 "Record namespaces events"),
3500 OPT_BOOLEAN(0, "all-cgroups", &record.opts.record_cgroup,
3501 "Record cgroup events"),
3502 OPT_BOOLEAN_SET(0, "switch-events", &record.opts.record_switch_events,
3503 &record.opts.record_switch_events_set,
3504 "Record context switch events"),
3505 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
3506 "Configure all used events to run in kernel space.",
3507 PARSE_OPT_EXCLUSIVE),
3508 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
3509 "Configure all used events to run in user space.",
3510 PARSE_OPT_EXCLUSIVE),
3511 OPT_BOOLEAN(0, "kernel-callchains", &record.opts.kernel_callchains,
3512 "collect kernel callchains"),
3513 OPT_BOOLEAN(0, "user-callchains", &record.opts.user_callchains,
3514 "collect user callchains"),
3515 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
3516 "file", "vmlinux pathname"),
3517 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
3518 "Record build-id of all DSOs regardless of hits"),
3519 OPT_BOOLEAN(0, "buildid-mmap", &record.buildid_mmap,
3520 "Record build-id in map events"),
3521 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
3522 "append timestamp to output filename"),
3523 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
3524 "Record timestamp boundary (time of first/last samples)"),
3525 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
3526 &record.switch_output.set, "signal or size[BKMG] or time[smhd]",
3527 "Switch output when receiving SIGUSR2 (signal) or cross a size or time threshold",
3529 OPT_CALLBACK_SET(0, "switch-output-event", &switch_output_parse_events_option_args,
3530 &record.switch_output_event_set, "switch output event",
3531 "switch output event selector. use 'perf list' to list available events",
3532 parse_events_option_new_evlist),
3533 OPT_INTEGER(0, "switch-max-files", &record.switch_output.num_files,
3534 "Limit number of switch output generated files"),
3535 OPT_BOOLEAN(0, "dry-run", &dry_run,
3536 "Parse options then exit"),
3537 #ifdef HAVE_AIO_SUPPORT
3538 OPT_CALLBACK_OPTARG(0, "aio", &record.opts,
3539 &nr_cblocks_default, "n", "Use <n> control blocks in asynchronous trace writing mode (default: 1, max: 4)",
3542 OPT_CALLBACK(0, "affinity", &record.opts, "node|cpu",
3543 "Set affinity mask of trace reading thread to NUMA node cpu mask or cpu of processed mmap buffer",
3544 record__parse_affinity),
3545 #ifdef HAVE_ZSTD_SUPPORT
3546 OPT_CALLBACK_OPTARG('z', "compression-level", &record.opts, &comp_level_default, "n",
3547 "Compress records using specified level (default: 1 - fastest compression, 22 - greatest compression)",
3548 record__parse_comp_level),
3550 OPT_CALLBACK(0, "max-size", &record.output_max_size,
3551 "size", "Limit the maximum size of the output file", parse_output_max_size),
3552 OPT_UINTEGER(0, "num-thread-synthesize",
3553 &record.opts.nr_threads_synthesize,
3554 "number of threads to run for event synthesis"),
3556 OPT_CALLBACK(0, "pfm-events", &record.evlist, "event",
3557 "libpfm4 event selector. use 'perf list' to list available events",
3558 parse_libpfm_events_option),
3560 OPT_CALLBACK(0, "control", &record.opts, "fd:ctl-fd[,ack-fd] or fifo:ctl-fifo[,ack-fifo]",
3561 "Listen on ctl-fd descriptor for command to control measurement ('enable': enable events, 'disable': disable events,\n"
3562 "\t\t\t 'snapshot': AUX area tracing snapshot).\n"
3563 "\t\t\t Optionally send control command completion ('ack\\n') to ack-fd descriptor.\n"
3564 "\t\t\t Alternatively, ctl-fifo / ack-fifo will be opened and used as ctl-fd / ack-fd.",
3565 parse_control_option),
3566 OPT_CALLBACK(0, "synth", &record.opts, "no|all|task|mmap|cgroup",
3567 "Fine-tune event synthesis: default=all", parse_record_synth_option),
3568 OPT_STRING_OPTARG_SET(0, "debuginfod", &record.debuginfod.urls,
3569 &record.debuginfod.set, "debuginfod urls",
3570 "Enable debuginfod data retrieval from DEBUGINFOD_URLS or specified urls",
3572 OPT_CALLBACK_OPTARG(0, "threads", &record.opts, NULL, "spec",
3573 "write collected trace data into several data files using parallel threads",
3574 record__parse_threads),
3575 OPT_BOOLEAN(0, "off-cpu", &record.off_cpu, "Enable off-cpu analysis"),
3579 struct option *record_options = __record_options;
3581 static int record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
3583 struct perf_cpu cpu;
3586 if (cpu_map__is_dummy(cpus))
3589 perf_cpu_map__for_each_cpu_skip_any(cpu, idx, cpus) {
3590 /* Return ENODEV is input cpu is greater than max cpu */
3591 if ((unsigned long)cpu.cpu > mask->nbits)
3593 __set_bit(cpu.cpu, mask->bits);
3599 static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
3601 struct perf_cpu_map *cpus;
3603 cpus = perf_cpu_map__new(mask_spec);
3607 bitmap_zero(mask->bits, mask->nbits);
3608 if (record__mmap_cpu_mask_init(mask, cpus))
3611 perf_cpu_map__put(cpus);
3616 static void record__free_thread_masks(struct record *rec, int nr_threads)
3620 if (rec->thread_masks)
3621 for (t = 0; t < nr_threads; t++)
3622 record__thread_mask_free(&rec->thread_masks[t]);
3624 zfree(&rec->thread_masks);
3627 static int record__alloc_thread_masks(struct record *rec, int nr_threads, int nr_bits)
3631 rec->thread_masks = zalloc(nr_threads * sizeof(*(rec->thread_masks)));
3632 if (!rec->thread_masks) {
3633 pr_err("Failed to allocate thread masks\n");
3637 for (t = 0; t < nr_threads; t++) {
3638 ret = record__thread_mask_alloc(&rec->thread_masks[t], nr_bits);
3640 pr_err("Failed to allocate thread masks[%d]\n", t);
3648 record__free_thread_masks(rec, nr_threads);
3653 static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map *cpus)
3655 int t, ret, nr_cpus = perf_cpu_map__nr(cpus);
3657 ret = record__alloc_thread_masks(rec, nr_cpus, cpu__max_cpu().cpu);
3661 rec->nr_threads = nr_cpus;
3662 pr_debug("nr_threads: %d\n", rec->nr_threads);
3664 for (t = 0; t < rec->nr_threads; t++) {
3665 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
3666 __set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
3668 pr_debug("thread_masks[%d]: ", t);
3669 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3670 pr_debug("thread_masks[%d]: ", t);
3671 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3678 static int record__init_thread_masks_spec(struct record *rec, struct perf_cpu_map *cpus,
3679 const char **maps_spec, const char **affinity_spec,
3684 struct mmap_cpu_mask cpus_mask;
3685 struct thread_mask thread_mask, full_mask, *thread_masks;
3687 ret = record__mmap_cpu_mask_alloc(&cpus_mask, cpu__max_cpu().cpu);
3689 pr_err("Failed to allocate CPUs mask\n");
3693 ret = record__mmap_cpu_mask_init(&cpus_mask, cpus);
3695 pr_err("Failed to init cpu mask\n");
3696 goto out_free_cpu_mask;
3699 ret = record__thread_mask_alloc(&full_mask, cpu__max_cpu().cpu);
3701 pr_err("Failed to allocate full mask\n");
3702 goto out_free_cpu_mask;
3705 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3707 pr_err("Failed to allocate thread mask\n");
3708 goto out_free_full_and_cpu_masks;
3711 for (s = 0; s < nr_spec; s++) {
3712 ret = record__mmap_cpu_mask_init_spec(&thread_mask.maps, maps_spec[s]);
3714 pr_err("Failed to initialize maps thread mask\n");
3717 ret = record__mmap_cpu_mask_init_spec(&thread_mask.affinity, affinity_spec[s]);
3719 pr_err("Failed to initialize affinity thread mask\n");
3723 /* ignore invalid CPUs but do not allow empty masks */
3724 if (!bitmap_and(thread_mask.maps.bits, thread_mask.maps.bits,
3725 cpus_mask.bits, thread_mask.maps.nbits)) {
3726 pr_err("Empty maps mask: %s\n", maps_spec[s]);
3730 if (!bitmap_and(thread_mask.affinity.bits, thread_mask.affinity.bits,
3731 cpus_mask.bits, thread_mask.affinity.nbits)) {
3732 pr_err("Empty affinity mask: %s\n", affinity_spec[s]);
3737 /* do not allow intersection with other masks (full_mask) */
3738 if (bitmap_intersects(thread_mask.maps.bits, full_mask.maps.bits,
3739 thread_mask.maps.nbits)) {
3740 pr_err("Intersecting maps mask: %s\n", maps_spec[s]);
3744 if (bitmap_intersects(thread_mask.affinity.bits, full_mask.affinity.bits,
3745 thread_mask.affinity.nbits)) {
3746 pr_err("Intersecting affinity mask: %s\n", affinity_spec[s]);
3751 bitmap_or(full_mask.maps.bits, full_mask.maps.bits,
3752 thread_mask.maps.bits, full_mask.maps.nbits);
3753 bitmap_or(full_mask.affinity.bits, full_mask.affinity.bits,
3754 thread_mask.affinity.bits, full_mask.maps.nbits);
3756 thread_masks = realloc(rec->thread_masks, (t + 1) * sizeof(struct thread_mask));
3757 if (!thread_masks) {
3758 pr_err("Failed to reallocate thread masks\n");
3762 rec->thread_masks = thread_masks;
3763 rec->thread_masks[t] = thread_mask;
3765 pr_debug("thread_masks[%d]: ", t);
3766 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
3767 pr_debug("thread_masks[%d]: ", t);
3768 mmap_cpu_mask__scnprintf(&rec->thread_masks[t].affinity, "affinity");
3771 ret = record__thread_mask_alloc(&thread_mask, cpu__max_cpu().cpu);
3773 pr_err("Failed to allocate thread mask\n");
3774 goto out_free_full_and_cpu_masks;
3777 rec->nr_threads = t;
3778 pr_debug("nr_threads: %d\n", rec->nr_threads);
3779 if (!rec->nr_threads)
3783 record__thread_mask_free(&thread_mask);
3784 out_free_full_and_cpu_masks:
3785 record__thread_mask_free(&full_mask);
3787 record__mmap_cpu_mask_free(&cpus_mask);
3792 static int record__init_thread_core_masks(struct record *rec, struct perf_cpu_map *cpus)
3795 struct cpu_topology *topo;
3797 topo = cpu_topology__new();
3799 pr_err("Failed to allocate CPU topology\n");
3803 ret = record__init_thread_masks_spec(rec, cpus, topo->core_cpus_list,
3804 topo->core_cpus_list, topo->core_cpus_lists);
3805 cpu_topology__delete(topo);
3810 static int record__init_thread_package_masks(struct record *rec, struct perf_cpu_map *cpus)
3813 struct cpu_topology *topo;
3815 topo = cpu_topology__new();
3817 pr_err("Failed to allocate CPU topology\n");
3821 ret = record__init_thread_masks_spec(rec, cpus, topo->package_cpus_list,
3822 topo->package_cpus_list, topo->package_cpus_lists);
3823 cpu_topology__delete(topo);
3828 static int record__init_thread_numa_masks(struct record *rec, struct perf_cpu_map *cpus)
3833 struct numa_topology *topo;
3835 topo = numa_topology__new();
3837 pr_err("Failed to allocate NUMA topology\n");
3841 spec = zalloc(topo->nr * sizeof(char *));
3843 pr_err("Failed to allocate NUMA spec\n");
3845 goto out_delete_topo;
3847 for (s = 0; s < topo->nr; s++)
3848 spec[s] = topo->nodes[s].cpus;
3850 ret = record__init_thread_masks_spec(rec, cpus, spec, spec, topo->nr);
3855 numa_topology__delete(topo);
3860 static int record__init_thread_user_masks(struct record *rec, struct perf_cpu_map *cpus)
3864 char **maps_spec = NULL, **affinity_spec = NULL, **tmp_spec;
3865 char *user_spec, *spec, *spec_ptr, *mask, *mask_ptr, *dup_mask = NULL;
3867 for (t = 0, user_spec = (char *)rec->opts.threads_user_spec; ; t++, user_spec = NULL) {
3868 spec = strtok_r(user_spec, ":", &spec_ptr);
3871 pr_debug2("threads_spec[%d]: %s\n", t, spec);
3872 mask = strtok_r(spec, "/", &mask_ptr);
3875 pr_debug2(" maps mask: %s\n", mask);
3876 tmp_spec = realloc(maps_spec, (nr_spec + 1) * sizeof(char *));
3878 pr_err("Failed to reallocate maps spec\n");
3882 maps_spec = tmp_spec;
3883 maps_spec[nr_spec] = dup_mask = strdup(mask);
3884 if (!maps_spec[nr_spec]) {
3885 pr_err("Failed to allocate maps spec[%d]\n", nr_spec);
3889 mask = strtok_r(NULL, "/", &mask_ptr);
3891 pr_err("Invalid thread maps or affinity specs\n");
3895 pr_debug2(" affinity mask: %s\n", mask);
3896 tmp_spec = realloc(affinity_spec, (nr_spec + 1) * sizeof(char *));
3898 pr_err("Failed to reallocate affinity spec\n");
3902 affinity_spec = tmp_spec;
3903 affinity_spec[nr_spec] = strdup(mask);
3904 if (!affinity_spec[nr_spec]) {
3905 pr_err("Failed to allocate affinity spec[%d]\n", nr_spec);
3913 ret = record__init_thread_masks_spec(rec, cpus, (const char **)maps_spec,
3914 (const char **)affinity_spec, nr_spec);
3918 for (s = 0; s < nr_spec; s++) {
3922 free(affinity_spec[s]);
3924 free(affinity_spec);
3930 static int record__init_thread_default_masks(struct record *rec, struct perf_cpu_map *cpus)
3934 ret = record__alloc_thread_masks(rec, 1, cpu__max_cpu().cpu);
3938 if (record__mmap_cpu_mask_init(&rec->thread_masks->maps, cpus))
3941 rec->nr_threads = 1;
3946 static int record__init_thread_masks(struct record *rec)
3949 struct perf_cpu_map *cpus = rec->evlist->core.all_cpus;
3951 if (!record__threads_enabled(rec))
3952 return record__init_thread_default_masks(rec, cpus);
3954 if (evlist__per_thread(rec->evlist)) {
3955 pr_err("--per-thread option is mutually exclusive to parallel streaming mode.\n");
3959 switch (rec->opts.threads_spec) {
3960 case THREAD_SPEC__CPU:
3961 ret = record__init_thread_cpu_masks(rec, cpus);
3963 case THREAD_SPEC__CORE:
3964 ret = record__init_thread_core_masks(rec, cpus);
3966 case THREAD_SPEC__PACKAGE:
3967 ret = record__init_thread_package_masks(rec, cpus);
3969 case THREAD_SPEC__NUMA:
3970 ret = record__init_thread_numa_masks(rec, cpus);
3972 case THREAD_SPEC__USER:
3973 ret = record__init_thread_user_masks(rec, cpus);
3982 int cmd_record(int argc, const char **argv)
3985 struct record *rec = &record;
3986 char errbuf[BUFSIZ];
3988 setlocale(LC_ALL, "");
3990 #ifndef HAVE_BPF_SKEL
3991 # define set_nobuild(s, l, m, c) set_option_nobuild(record_options, s, l, m, c)
3992 set_nobuild('\0', "off-cpu", "no BUILD_BPF_SKEL=1", true);
3996 /* Disable eager loading of kernel symbols that adds overhead to perf record. */
3997 symbol_conf.lazy_load_kernel_maps = true;
3998 rec->opts.affinity = PERF_AFFINITY_SYS;
4000 rec->evlist = evlist__new();
4001 if (rec->evlist == NULL)
4004 err = perf_config(perf_record_config, rec);
4008 argc = parse_options(argc, argv, record_options, record_usage,
4009 PARSE_OPT_STOP_AT_NON_OPTION);
4011 perf_quiet_option();
4013 err = symbol__validate_sym_arguments();
4017 perf_debuginfod_setup(&record.debuginfod);
4019 /* Make system wide (-a) the default target. */
4020 if (!argc && target__none(&rec->opts.target))
4021 rec->opts.target.system_wide = true;
4023 if (nr_cgroups && !rec->opts.target.system_wide) {
4024 usage_with_options_msg(record_usage, record_options,
4025 "cgroup monitoring only available in system-wide mode");
4029 if (rec->buildid_mmap) {
4030 if (!perf_can_record_build_id()) {
4031 pr_err("Failed: no support to record build id in mmap events, update your kernel.\n");
4035 pr_debug("Enabling build id in mmap2 events.\n");
4036 /* Enable mmap build id synthesizing. */
4037 symbol_conf.buildid_mmap2 = true;
4038 /* Enable perf_event_attr::build_id bit. */
4039 rec->opts.build_id = true;
4040 /* Disable build id cache. */
4041 rec->no_buildid = true;
4044 if (rec->opts.record_cgroup && !perf_can_record_cgroup()) {
4045 pr_err("Kernel has no cgroup sampling support.\n");
4050 if (rec->opts.kcore)
4051 rec->opts.text_poke = true;
4053 if (rec->opts.kcore || record__threads_enabled(rec))
4054 rec->data.is_dir = true;
4056 if (record__threads_enabled(rec)) {
4057 if (rec->opts.affinity != PERF_AFFINITY_SYS) {
4058 pr_err("--affinity option is mutually exclusive to parallel streaming mode.\n");
4061 if (record__aio_enabled(rec)) {
4062 pr_err("Asynchronous streaming mode (--aio) is mutually exclusive to parallel streaming mode.\n");
4067 if (rec->opts.comp_level != 0) {
4068 pr_debug("Compression enabled, disabling build id collection at the end of the session.\n");
4069 rec->no_buildid = true;
4072 if (rec->opts.record_switch_events &&
4073 !perf_can_record_switch_events()) {
4074 ui__error("kernel does not support recording context switch events\n");
4075 parse_options_usage(record_usage, record_options, "switch-events", 0);
4080 if (switch_output_setup(rec)) {
4081 parse_options_usage(record_usage, record_options, "switch-output", 0);
4086 if (rec->switch_output.time) {
4087 signal(SIGALRM, alarm_sig_handler);
4088 alarm(rec->switch_output.time);
4091 if (rec->switch_output.num_files) {
4092 rec->switch_output.filenames = calloc(rec->switch_output.num_files,
4094 if (!rec->switch_output.filenames) {
4100 if (rec->timestamp_filename && record__threads_enabled(rec)) {
4101 rec->timestamp_filename = false;
4102 pr_warning("WARNING: --timestamp-filename option is not available in parallel streaming mode.\n");
4106 * Allow aliases to facilitate the lookup of symbols for address
4107 * filters. Refer to auxtrace_parse_filters().
4109 symbol_conf.allow_aliases = true;
4113 err = record__auxtrace_init(rec);
4122 if (rec->no_buildid_cache || rec->no_buildid) {
4123 disable_buildid_cache();
4124 } else if (rec->switch_output.enabled) {
4126 * In 'perf record --switch-output', disable buildid
4127 * generation by default to reduce data file switching
4128 * overhead. Still generate buildid if they are required
4131 * perf record --switch-output --no-no-buildid \
4132 * --no-no-buildid-cache
4134 * Following code equals to:
4136 * if ((rec->no_buildid || !rec->no_buildid_set) &&
4137 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
4138 * disable_buildid_cache();
4140 bool disable = true;
4142 if (rec->no_buildid_set && !rec->no_buildid)
4144 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
4147 rec->no_buildid = true;
4148 rec->no_buildid_cache = true;
4149 disable_buildid_cache();
4153 if (record.opts.overwrite)
4154 record.opts.tail_synthesize = true;
4156 if (rec->evlist->core.nr_entries == 0) {
4157 bool can_profile_kernel = perf_event_paranoid_check(1);
4159 err = parse_event(rec->evlist, can_profile_kernel ? "cycles:P" : "cycles:Pu");
4164 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
4165 rec->opts.no_inherit = true;
4167 err = target__validate(&rec->opts.target);
4169 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4170 ui__warning("%s\n", errbuf);
4173 err = target__parse_uid(&rec->opts.target);
4175 int saved_errno = errno;
4177 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
4178 ui__error("%s", errbuf);
4184 /* Enable ignoring missing threads when -u/-p option is defined. */
4185 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
4187 evlist__warn_user_requested_cpus(rec->evlist, rec->opts.target.cpu_list);
4189 if (callchain_param.enabled && callchain_param.record_mode == CALLCHAIN_FP)
4190 arch__add_leaf_frame_record_opts(&rec->opts);
4193 if (evlist__create_maps(rec->evlist, &rec->opts.target) < 0) {
4194 if (rec->opts.target.pid != NULL) {
4195 pr_err("Couldn't create thread/CPU maps: %s\n",
4196 errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
4200 usage_with_options(record_usage, record_options);
4203 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
4208 * We take all buildids when the file contains
4209 * AUX area tracing data because we do not decode the
4210 * trace because it would take too long.
4212 if (rec->opts.full_auxtrace)
4213 rec->buildid_all = true;
4215 if (rec->opts.text_poke) {
4216 err = record__config_text_poke(rec->evlist);
4218 pr_err("record__config_text_poke failed, error %d\n", err);
4224 err = record__config_off_cpu(rec);
4226 pr_err("record__config_off_cpu failed, error %d\n", err);
4231 if (record_opts__config(&rec->opts)) {
4236 err = record__config_tracking_events(rec);
4238 pr_err("record__config_tracking_events failed, error %d\n", err);
4242 err = record__init_thread_masks(rec);
4244 pr_err("Failed to initialize parallel data streaming masks\n");
4248 if (rec->opts.nr_cblocks > nr_cblocks_max)
4249 rec->opts.nr_cblocks = nr_cblocks_max;
4250 pr_debug("nr_cblocks: %d\n", rec->opts.nr_cblocks);
4252 pr_debug("affinity: %s\n", affinity_tags[rec->opts.affinity]);
4253 pr_debug("mmap flush: %d\n", rec->opts.mmap_flush);
4255 if (rec->opts.comp_level > comp_level_max)
4256 rec->opts.comp_level = comp_level_max;
4257 pr_debug("comp level: %d\n", rec->opts.comp_level);
4259 err = __cmd_record(&record, argc, argv);
4261 evlist__delete(rec->evlist);
4263 auxtrace_record__free(rec->itr);
4265 record__free_thread_masks(rec, rec->nr_threads);
4266 rec->nr_threads = 0;
4267 evlist__close_control(rec->opts.ctl_fd, rec->opts.ctl_fd_ack, &rec->opts.ctl_fd_close);
4271 static void snapshot_sig_handler(int sig __maybe_unused)
4273 struct record *rec = &record;
4275 hit_auxtrace_snapshot_trigger(rec);
4277 if (switch_output_signal(rec))
4278 trigger_hit(&switch_output_trigger);
4281 static void alarm_sig_handler(int sig __maybe_unused)
4283 struct record *rec = &record;
4285 if (switch_output_time(rec))
4286 trigger_hit(&switch_output_trigger);