1 // SPDX-License-Identifier: GPL-2.0-only
3 #include "util/cgroup.h"
5 #include "util/debug.h"
7 #include "util/event.h"
8 #include "util/evlist.h"
9 #include "util/machine.h"
11 #include "util/map_symbol.h"
12 #include "util/branch.h"
13 #include "util/memswap.h"
14 #include "util/namespaces.h"
15 #include "util/session.h"
16 #include "util/stat.h"
17 #include "util/symbol.h"
18 #include "util/synthetic-events.h"
19 #include "util/target.h"
20 #include "util/time-utils.h"
21 #include <linux/bitops.h>
22 #include <linux/kernel.h>
23 #include <linux/string.h>
24 #include <linux/zalloc.h>
25 #include <linux/perf_event.h>
27 #include <perf/evsel.h>
28 #include <perf/cpumap.h>
29 #include <internal/lib.h> // page_size
30 #include <internal/threadmap.h>
31 #include <perf/threadmap.h>
32 #include <symbol/kallsyms.h>
38 #include <uapi/linux/mman.h> /* To get things like MAP_HUGETLB even on older libc headers */
39 #include <api/fs/fs.h>
41 #include <sys/types.h>
46 #define DEFAULT_PROC_MAP_PARSE_TIMEOUT 500
48 unsigned int proc_map_timeout = DEFAULT_PROC_MAP_PARSE_TIMEOUT;
50 int perf_tool__process_synth_event(struct perf_tool *tool,
51 union perf_event *event,
52 struct machine *machine,
53 perf_event__handler_t process)
55 struct perf_sample synth_sample = {
62 .cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK,
65 return process(tool, event, &synth_sample, machine);
69 * Assumes that the first 4095 bytes of /proc/pid/stat contains
70 * the comm, tgid and ppid.
72 static int perf_event__get_comm_ids(pid_t pid, pid_t tid, char *comm, size_t len,
73 pid_t *tgid, pid_t *ppid, bool *kernel)
79 char *name, *tgids, *ppids, *vmpeak, *threads;
85 snprintf(bf, sizeof(bf), "/proc/%d/task/%d/status", pid, tid);
87 snprintf(bf, sizeof(bf), "/proc/%d/status", tid);
89 fd = open(bf, O_RDONLY);
91 pr_debug("couldn't open %s\n", bf);
95 n = read(fd, bf, sizeof(bf) - 1);
98 pr_warning("Couldn't get COMM, tigd and ppid for pid %d\n",
104 name = strstr(bf, "Name:");
105 tgids = strstr(name ?: bf, "Tgid:");
106 ppids = strstr(tgids ?: bf, "PPid:");
107 vmpeak = strstr(ppids ?: bf, "VmPeak:");
112 threads = strstr(ppids ?: bf, "Threads:");
117 name = skip_spaces(name + 5); /* strlen("Name:") */
118 nl = strchr(name, '\n');
125 memcpy(comm, name, size);
128 pr_debug("Name: string not found for pid %d\n", tid);
132 tgids += 5; /* strlen("Tgid:") */
135 pr_debug("Tgid: string not found for pid %d\n", tid);
139 ppids += 5; /* strlen("PPid:") */
142 pr_debug("PPid: string not found for pid %d\n", tid);
145 if (!vmpeak && threads)
153 static int perf_event__prepare_comm(union perf_event *event, pid_t pid, pid_t tid,
154 struct machine *machine,
155 pid_t *tgid, pid_t *ppid, bool *kernel)
161 memset(&event->comm, 0, sizeof(event->comm));
163 if (machine__is_host(machine)) {
164 if (perf_event__get_comm_ids(pid, tid, event->comm.comm,
165 sizeof(event->comm.comm),
166 tgid, ppid, kernel) != 0) {
170 *tgid = machine->pid;
176 event->comm.pid = *tgid;
177 event->comm.header.type = PERF_RECORD_COMM;
179 size = strlen(event->comm.comm) + 1;
180 size = PERF_ALIGN(size, sizeof(u64));
181 memset(event->comm.comm + size, 0, machine->id_hdr_size);
182 event->comm.header.size = (sizeof(event->comm) -
183 (sizeof(event->comm.comm) - size) +
184 machine->id_hdr_size);
185 event->comm.tid = tid;
190 pid_t perf_event__synthesize_comm(struct perf_tool *tool,
191 union perf_event *event, pid_t pid,
192 perf_event__handler_t process,
193 struct machine *machine)
198 if (perf_event__prepare_comm(event, 0, pid, machine, &tgid, &ppid,
199 &kernel_thread) != 0)
202 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
208 static void perf_event__get_ns_link_info(pid_t pid, const char *ns,
209 struct perf_ns_link_info *ns_link_info)
214 sprintf(proc_ns, "/proc/%u/ns/%s", pid, ns);
215 if (stat64(proc_ns, &st) == 0) {
216 ns_link_info->dev = st.st_dev;
217 ns_link_info->ino = st.st_ino;
221 int perf_event__synthesize_namespaces(struct perf_tool *tool,
222 union perf_event *event,
223 pid_t pid, pid_t tgid,
224 perf_event__handler_t process,
225 struct machine *machine)
228 struct perf_ns_link_info *ns_link_info;
230 if (!tool || !tool->namespace_events)
233 memset(&event->namespaces, 0, (sizeof(event->namespaces) +
234 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
235 machine->id_hdr_size));
237 event->namespaces.pid = tgid;
238 event->namespaces.tid = pid;
240 event->namespaces.nr_namespaces = NR_NAMESPACES;
242 ns_link_info = event->namespaces.link_info;
244 for (idx = 0; idx < event->namespaces.nr_namespaces; idx++)
245 perf_event__get_ns_link_info(pid, perf_ns__name(idx),
248 event->namespaces.header.type = PERF_RECORD_NAMESPACES;
250 event->namespaces.header.size = (sizeof(event->namespaces) +
251 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
252 machine->id_hdr_size);
254 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
260 static int perf_event__synthesize_fork(struct perf_tool *tool,
261 union perf_event *event,
262 pid_t pid, pid_t tgid, pid_t ppid,
263 perf_event__handler_t process,
264 struct machine *machine)
266 memset(&event->fork, 0, sizeof(event->fork) + machine->id_hdr_size);
269 * for main thread set parent to ppid from status file. For other
270 * threads set parent pid to main thread. ie., assume main thread
271 * spawns all threads in a process
274 event->fork.ppid = ppid;
275 event->fork.ptid = ppid;
277 event->fork.ppid = tgid;
278 event->fork.ptid = tgid;
280 event->fork.pid = tgid;
281 event->fork.tid = pid;
282 event->fork.header.type = PERF_RECORD_FORK;
283 event->fork.header.misc = PERF_RECORD_MISC_FORK_EXEC;
285 event->fork.header.size = (sizeof(event->fork) + machine->id_hdr_size);
287 if (perf_tool__process_synth_event(tool, event, machine, process) != 0)
293 static bool read_proc_maps_line(struct io *io, __u64 *start, __u64 *end,
294 u32 *prot, u32 *flags, __u64 *offset,
297 ssize_t pathname_size, char *pathname)
301 char *start_pathname = pathname;
303 if (io__get_hex(io, start) != '-')
305 if (io__get_hex(io, end) != ' ')
308 /* map protection and flags bits */
310 ch = io__get_char(io);
315 ch = io__get_char(io);
320 ch = io__get_char(io);
325 ch = io__get_char(io);
329 *flags = MAP_PRIVATE;
332 if (io__get_char(io) != ' ')
335 if (io__get_hex(io, offset) != ' ')
338 if (io__get_hex(io, &temp) != ':')
341 if (io__get_hex(io, &temp) != ' ')
345 ch = io__get_dec(io, inode);
351 ch = io__get_char(io);
356 if (ch == '\0' || ch == '\n' ||
357 (pathname + 1 - start_pathname) >= pathname_size) {
362 ch = io__get_char(io);
366 static void perf_record_mmap2__read_build_id(struct perf_record_mmap2 *event,
367 struct machine *machine,
373 struct dso *dso = NULL;
378 rc = sysfs__read_build_id("/sys/kernel/notes", &bid);
385 id.ino_generation = event->ino_generation;
387 dso = dsos__findnew_id(&machine->dsos, event->filename, &id);
388 if (dso && dso->has_build_id) {
394 nsi = nsinfo__new(event->pid);
395 nsinfo__mountns_enter(nsi, &nc);
397 rc = filename__read_build_id(event->filename, &bid) > 0 ? 0 : -1;
399 nsinfo__mountns_exit(&nc);
404 memcpy(event->build_id, bid.data, sizeof(bid.data));
405 event->build_id_size = (u8) bid.size;
406 event->header.misc |= PERF_RECORD_MISC_MMAP_BUILD_ID;
407 event->__reserved_1 = 0;
408 event->__reserved_2 = 0;
410 if (dso && !dso->has_build_id)
411 dso__set_build_id(dso, &bid);
413 if (event->filename[0] == '/') {
414 pr_debug2("Failed to read build ID for %s\n",
421 int perf_event__synthesize_mmap_events(struct perf_tool *tool,
422 union perf_event *event,
423 pid_t pid, pid_t tgid,
424 perf_event__handler_t process,
425 struct machine *machine,
428 unsigned long long t;
431 bool truncation = false;
432 unsigned long long timeout = proc_map_timeout * 1000000ULL;
434 const char *hugetlbfs_mnt = hugetlbfs__mountpoint();
435 int hugetlbfs_mnt_len = hugetlbfs_mnt ? strlen(hugetlbfs_mnt) : 0;
437 if (machine__is_default_guest(machine))
440 snprintf(bf, sizeof(bf), "%s/proc/%d/task/%d/maps",
441 machine->root_dir, pid, pid);
443 io.fd = open(bf, O_RDONLY, 0);
446 * We raced with a task exiting - just return:
448 pr_debug("couldn't open %s\n", bf);
451 io__init(&io, io.fd, bf, sizeof(bf));
453 event->header.type = PERF_RECORD_MMAP2;
457 static const char anonstr[] = "//anon";
458 size_t size, aligned_size;
460 /* ensure null termination since stack will be reused. */
461 event->mmap2.filename[0] = '\0';
463 /* 00400000-0040c000 r-xp 00000000 fd:01 41038 /bin/cat */
464 if (!read_proc_maps_line(&io,
473 sizeof(event->mmap2.filename),
474 event->mmap2.filename))
477 if ((rdclock() - t) > timeout) {
478 pr_warning("Reading %s/proc/%d/task/%d/maps time out. "
479 "You may want to increase "
480 "the time limit by --proc-map-timeout\n",
481 machine->root_dir, pid, pid);
486 event->mmap2.ino_generation = 0;
489 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
491 if (machine__is_host(machine))
492 event->header.misc = PERF_RECORD_MISC_USER;
494 event->header.misc = PERF_RECORD_MISC_GUEST_USER;
496 if ((event->mmap2.prot & PROT_EXEC) == 0) {
497 if (!mmap_data || (event->mmap2.prot & PROT_READ) == 0)
500 event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
505 event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
507 if (!strcmp(event->mmap2.filename, ""))
508 strcpy(event->mmap2.filename, anonstr);
510 if (hugetlbfs_mnt_len &&
511 !strncmp(event->mmap2.filename, hugetlbfs_mnt,
512 hugetlbfs_mnt_len)) {
513 strcpy(event->mmap2.filename, anonstr);
514 event->mmap2.flags |= MAP_HUGETLB;
517 size = strlen(event->mmap2.filename) + 1;
518 aligned_size = PERF_ALIGN(size, sizeof(u64));
519 event->mmap2.len -= event->mmap.start;
520 event->mmap2.header.size = (sizeof(event->mmap2) -
521 (sizeof(event->mmap2.filename) - aligned_size));
522 memset(event->mmap2.filename + size, 0, machine->id_hdr_size +
523 (aligned_size - size));
524 event->mmap2.header.size += machine->id_hdr_size;
525 event->mmap2.pid = tgid;
526 event->mmap2.tid = pid;
528 if (symbol_conf.buildid_mmap2)
529 perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
531 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
544 #ifdef HAVE_FILE_HANDLE
545 static int perf_event__synthesize_cgroup(struct perf_tool *tool,
546 union perf_event *event,
547 char *path, size_t mount_len,
548 perf_event__handler_t process,
549 struct machine *machine)
551 size_t event_size = sizeof(event->cgroup) - sizeof(event->cgroup.path);
552 size_t path_len = strlen(path) - mount_len + 1;
554 struct file_handle fh;
559 while (path_len % sizeof(u64))
560 path[mount_len + path_len++] = '\0';
562 memset(&event->cgroup, 0, event_size);
564 event->cgroup.header.type = PERF_RECORD_CGROUP;
565 event->cgroup.header.size = event_size + path_len + machine->id_hdr_size;
567 handle.fh.handle_bytes = sizeof(handle.cgroup_id);
568 if (name_to_handle_at(AT_FDCWD, path, &handle.fh, &mount_id, 0) < 0) {
569 pr_debug("stat failed: %s\n", path);
573 event->cgroup.id = handle.cgroup_id;
574 strncpy(event->cgroup.path, path + mount_len, path_len);
575 memset(event->cgroup.path + path_len, 0, machine->id_hdr_size);
577 if (perf_tool__process_synth_event(tool, event, machine, process) < 0) {
578 pr_debug("process synth event failed\n");
585 static int perf_event__walk_cgroup_tree(struct perf_tool *tool,
586 union perf_event *event,
587 char *path, size_t mount_len,
588 perf_event__handler_t process,
589 struct machine *machine)
591 size_t pos = strlen(path);
596 if (perf_event__synthesize_cgroup(tool, event, path, mount_len,
597 process, machine) < 0)
602 pr_debug("failed to open directory: %s\n", path);
606 while ((dent = readdir(d)) != NULL) {
607 if (dent->d_type != DT_DIR)
609 if (!strcmp(dent->d_name, ".") ||
610 !strcmp(dent->d_name, ".."))
613 /* any sane path should be less than PATH_MAX */
614 if (strlen(path) + strlen(dent->d_name) + 1 >= PATH_MAX)
617 if (path[pos - 1] != '/')
619 strcat(path, dent->d_name);
621 ret = perf_event__walk_cgroup_tree(tool, event, path,
622 mount_len, process, machine);
633 int perf_event__synthesize_cgroups(struct perf_tool *tool,
634 perf_event__handler_t process,
635 struct machine *machine)
637 union perf_event event;
638 char cgrp_root[PATH_MAX];
639 size_t mount_len; /* length of mount point in the path */
641 if (!tool || !tool->cgroup_events)
644 if (cgroupfs_find_mountpoint(cgrp_root, PATH_MAX, "perf_event") < 0) {
645 pr_debug("cannot find cgroup mount point\n");
649 mount_len = strlen(cgrp_root);
650 /* make sure the path starts with a slash (after mount point) */
651 strcat(cgrp_root, "/");
653 if (perf_event__walk_cgroup_tree(tool, &event, cgrp_root, mount_len,
654 process, machine) < 0)
660 int perf_event__synthesize_cgroups(struct perf_tool *tool __maybe_unused,
661 perf_event__handler_t process __maybe_unused,
662 struct machine *machine __maybe_unused)
668 int perf_event__synthesize_modules(struct perf_tool *tool, perf_event__handler_t process,
669 struct machine *machine)
672 struct map_rb_node *pos;
673 struct maps *maps = machine__kernel_maps(machine);
674 union perf_event *event;
675 size_t size = symbol_conf.buildid_mmap2 ?
676 sizeof(event->mmap2) : sizeof(event->mmap);
678 event = zalloc(size + machine->id_hdr_size);
680 pr_debug("Not enough memory synthesizing mmap event "
681 "for kernel modules\n");
686 * kernel uses 0 for user space maps, see kernel/perf_event.c
689 if (machine__is_host(machine))
690 event->header.misc = PERF_RECORD_MISC_KERNEL;
692 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
694 maps__for_each_entry(maps, pos) {
695 struct map *map = pos->map;
698 if (!__map__is_kmodule(map))
702 if (symbol_conf.buildid_mmap2) {
703 size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
704 event->mmap2.header.type = PERF_RECORD_MMAP2;
705 event->mmap2.header.size = (sizeof(event->mmap2) -
706 (sizeof(event->mmap2.filename) - size));
707 memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
708 event->mmap2.header.size += machine->id_hdr_size;
709 event->mmap2.start = map__start(map);
710 event->mmap2.len = map__size(map);
711 event->mmap2.pid = machine->pid;
713 memcpy(event->mmap2.filename, dso->long_name, dso->long_name_len + 1);
715 perf_record_mmap2__read_build_id(&event->mmap2, machine, false);
717 size = PERF_ALIGN(dso->long_name_len + 1, sizeof(u64));
718 event->mmap.header.type = PERF_RECORD_MMAP;
719 event->mmap.header.size = (sizeof(event->mmap) -
720 (sizeof(event->mmap.filename) - size));
721 memset(event->mmap.filename + size, 0, machine->id_hdr_size);
722 event->mmap.header.size += machine->id_hdr_size;
723 event->mmap.start = map__start(map);
724 event->mmap.len = map__size(map);
725 event->mmap.pid = machine->pid;
727 memcpy(event->mmap.filename, dso->long_name, dso->long_name_len + 1);
730 if (perf_tool__process_synth_event(tool, event, machine, process) != 0) {
740 static int filter_task(const struct dirent *dirent)
742 return isdigit(dirent->d_name[0]);
745 static int __event__synthesize_thread(union perf_event *comm_event,
746 union perf_event *mmap_event,
747 union perf_event *fork_event,
748 union perf_event *namespaces_event,
749 pid_t pid, int full, perf_event__handler_t process,
750 struct perf_tool *tool, struct machine *machine,
751 bool needs_mmap, bool mmap_data)
753 char filename[PATH_MAX];
754 struct dirent **dirent;
759 /* special case: only send one comm event using passed in pid */
761 tgid = perf_event__synthesize_comm(tool, comm_event, pid,
767 if (perf_event__synthesize_namespaces(tool, namespaces_event, pid,
768 tgid, process, machine) < 0)
772 * send mmap only for thread group leader
773 * see thread__init_maps()
775 if (pid == tgid && needs_mmap &&
776 perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
777 process, machine, mmap_data))
783 if (machine__is_default_guest(machine))
786 snprintf(filename, sizeof(filename), "%s/proc/%d/task",
787 machine->root_dir, pid);
789 n = scandir(filename, &dirent, filter_task, NULL);
793 for (i = 0; i < n; i++) {
796 bool kernel_thread = false;
798 _pid = strtol(dirent[i]->d_name, &end, 10);
802 /* some threads may exit just after scan, ignore it */
803 if (perf_event__prepare_comm(comm_event, pid, _pid, machine,
804 &tgid, &ppid, &kernel_thread) != 0)
808 if (perf_event__synthesize_fork(tool, fork_event, _pid, tgid,
809 ppid, process, machine) < 0)
812 if (perf_event__synthesize_namespaces(tool, namespaces_event, _pid,
813 tgid, process, machine) < 0)
817 * Send the prepared comm event
819 if (perf_tool__process_synth_event(tool, comm_event, machine, process) != 0)
823 if (_pid == pid && !kernel_thread && needs_mmap) {
824 /* process the parent's maps too */
825 rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
826 process, machine, mmap_data);
832 for (i = 0; i < n; i++)
839 int perf_event__synthesize_thread_map(struct perf_tool *tool,
840 struct perf_thread_map *threads,
841 perf_event__handler_t process,
842 struct machine *machine,
843 bool needs_mmap, bool mmap_data)
845 union perf_event *comm_event, *mmap_event, *fork_event;
846 union perf_event *namespaces_event;
847 int err = -1, thread, j;
849 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
850 if (comm_event == NULL)
853 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
854 if (mmap_event == NULL)
857 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
858 if (fork_event == NULL)
861 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
862 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
863 machine->id_hdr_size);
864 if (namespaces_event == NULL)
868 for (thread = 0; thread < threads->nr; ++thread) {
869 if (__event__synthesize_thread(comm_event, mmap_event,
870 fork_event, namespaces_event,
871 perf_thread_map__pid(threads, thread), 0,
872 process, tool, machine,
873 needs_mmap, mmap_data)) {
879 * comm.pid is set to thread group id by
880 * perf_event__synthesize_comm
882 if ((int) comm_event->comm.pid != perf_thread_map__pid(threads, thread)) {
883 bool need_leader = true;
885 /* is thread group leader in thread_map? */
886 for (j = 0; j < threads->nr; ++j) {
887 if ((int) comm_event->comm.pid == perf_thread_map__pid(threads, j)) {
893 /* if not, generate events for it */
895 __event__synthesize_thread(comm_event, mmap_event,
896 fork_event, namespaces_event,
897 comm_event->comm.pid, 0,
898 process, tool, machine,
899 needs_mmap, mmap_data)) {
905 free(namespaces_event);
916 static int __perf_event__synthesize_threads(struct perf_tool *tool,
917 perf_event__handler_t process,
918 struct machine *machine,
921 struct dirent **dirent,
925 union perf_event *comm_event, *mmap_event, *fork_event;
926 union perf_event *namespaces_event;
932 comm_event = malloc(sizeof(comm_event->comm) + machine->id_hdr_size);
933 if (comm_event == NULL)
936 mmap_event = malloc(sizeof(mmap_event->mmap2) + machine->id_hdr_size);
937 if (mmap_event == NULL)
940 fork_event = malloc(sizeof(fork_event->fork) + machine->id_hdr_size);
941 if (fork_event == NULL)
944 namespaces_event = malloc(sizeof(namespaces_event->namespaces) +
945 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
946 machine->id_hdr_size);
947 if (namespaces_event == NULL)
950 for (i = start; i < start + num; i++) {
951 if (!isdigit(dirent[i]->d_name[0]))
954 pid = (pid_t)strtol(dirent[i]->d_name, &end, 10);
955 /* only interested in proper numerical dirents */
959 * We may race with exiting thread, so don't stop just because
960 * one thread couldn't be synthesized.
962 __event__synthesize_thread(comm_event, mmap_event, fork_event,
963 namespaces_event, pid, 1, process,
964 tool, machine, needs_mmap, mmap_data);
968 free(namespaces_event);
979 struct synthesize_threads_arg {
980 struct perf_tool *tool;
981 perf_event__handler_t process;
982 struct machine *machine;
985 struct dirent **dirent;
990 static void *synthesize_threads_worker(void *arg)
992 struct synthesize_threads_arg *args = arg;
994 __perf_event__synthesize_threads(args->tool, args->process,
996 args->needs_mmap, args->mmap_data,
998 args->start, args->num);
1002 int perf_event__synthesize_threads(struct perf_tool *tool,
1003 perf_event__handler_t process,
1004 struct machine *machine,
1005 bool needs_mmap, bool mmap_data,
1006 unsigned int nr_threads_synthesize)
1008 struct synthesize_threads_arg *args = NULL;
1009 pthread_t *synthesize_threads = NULL;
1010 char proc_path[PATH_MAX];
1011 struct dirent **dirent;
1019 if (machine__is_default_guest(machine))
1022 snprintf(proc_path, sizeof(proc_path), "%s/proc", machine->root_dir);
1023 n = scandir(proc_path, &dirent, filter_task, NULL);
1027 if (nr_threads_synthesize == UINT_MAX)
1028 thread_nr = sysconf(_SC_NPROCESSORS_ONLN);
1030 thread_nr = nr_threads_synthesize;
1032 if (thread_nr <= 1) {
1033 err = __perf_event__synthesize_threads(tool, process,
1035 needs_mmap, mmap_data,
1042 synthesize_threads = calloc(sizeof(pthread_t), thread_nr);
1043 if (synthesize_threads == NULL)
1046 args = calloc(sizeof(*args), thread_nr);
1050 num_per_thread = n / thread_nr;
1052 for (i = 0; i < thread_nr; i++) {
1053 args[i].tool = tool;
1054 args[i].process = process;
1055 args[i].machine = machine;
1056 args[i].needs_mmap = needs_mmap;
1057 args[i].mmap_data = mmap_data;
1058 args[i].dirent = dirent;
1060 for (i = 0; i < m; i++) {
1061 args[i].num = num_per_thread + 1;
1062 args[i].start = i * args[i].num;
1065 base = args[i-1].start + args[i-1].num;
1066 for (j = i; j < thread_nr; j++) {
1067 args[j].num = num_per_thread;
1068 args[j].start = base + (j - i) * args[i].num;
1071 for (i = 0; i < thread_nr; i++) {
1072 if (pthread_create(&synthesize_threads[i], NULL,
1073 synthesize_threads_worker, &args[i]))
1078 for (i = 0; i < thread_nr; i++)
1079 pthread_join(synthesize_threads[i], NULL);
1082 free(synthesize_threads);
1084 for (i = 0; i < n; i++)
1091 int __weak perf_event__synthesize_extra_kmaps(struct perf_tool *tool __maybe_unused,
1092 perf_event__handler_t process __maybe_unused,
1093 struct machine *machine __maybe_unused)
1098 static int __perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1099 perf_event__handler_t process,
1100 struct machine *machine)
1102 union perf_event *event;
1103 size_t size = symbol_conf.buildid_mmap2 ?
1104 sizeof(event->mmap2) : sizeof(event->mmap);
1105 struct map *map = machine__kernel_map(machine);
1112 kmap = map__kmap(map);
1113 if (!kmap->ref_reloc_sym)
1117 * We should get this from /sys/kernel/sections/.text, but till that is
1118 * available use this, and after it is use this as a fallback for older
1121 event = zalloc(size + machine->id_hdr_size);
1122 if (event == NULL) {
1123 pr_debug("Not enough memory synthesizing mmap event "
1124 "for kernel modules\n");
1128 if (machine__is_host(machine)) {
1130 * kernel uses PERF_RECORD_MISC_USER for user space maps,
1131 * see kernel/perf_event.c __perf_event_mmap
1133 event->header.misc = PERF_RECORD_MISC_KERNEL;
1135 event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
1138 if (symbol_conf.buildid_mmap2) {
1139 size = snprintf(event->mmap2.filename, sizeof(event->mmap2.filename),
1140 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1141 size = PERF_ALIGN(size, sizeof(u64));
1142 event->mmap2.header.type = PERF_RECORD_MMAP2;
1143 event->mmap2.header.size = (sizeof(event->mmap2) -
1144 (sizeof(event->mmap2.filename) - size) + machine->id_hdr_size);
1145 event->mmap2.pgoff = kmap->ref_reloc_sym->addr;
1146 event->mmap2.start = map__start(map);
1147 event->mmap2.len = map__end(map) - event->mmap.start;
1148 event->mmap2.pid = machine->pid;
1150 perf_record_mmap2__read_build_id(&event->mmap2, machine, true);
1152 size = snprintf(event->mmap.filename, sizeof(event->mmap.filename),
1153 "%s%s", machine->mmap_name, kmap->ref_reloc_sym->name) + 1;
1154 size = PERF_ALIGN(size, sizeof(u64));
1155 event->mmap.header.type = PERF_RECORD_MMAP;
1156 event->mmap.header.size = (sizeof(event->mmap) -
1157 (sizeof(event->mmap.filename) - size) + machine->id_hdr_size);
1158 event->mmap.pgoff = kmap->ref_reloc_sym->addr;
1159 event->mmap.start = map__start(map);
1160 event->mmap.len = map__end(map) - event->mmap.start;
1161 event->mmap.pid = machine->pid;
1164 err = perf_tool__process_synth_event(tool, event, machine, process);
1170 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
1171 perf_event__handler_t process,
1172 struct machine *machine)
1176 err = __perf_event__synthesize_kernel_mmap(tool, process, machine);
1180 return perf_event__synthesize_extra_kmaps(tool, process, machine);
1183 int perf_event__synthesize_thread_map2(struct perf_tool *tool,
1184 struct perf_thread_map *threads,
1185 perf_event__handler_t process,
1186 struct machine *machine)
1188 union perf_event *event;
1191 size = sizeof(event->thread_map);
1192 size += threads->nr * sizeof(event->thread_map.entries[0]);
1194 event = zalloc(size);
1198 event->header.type = PERF_RECORD_THREAD_MAP;
1199 event->header.size = size;
1200 event->thread_map.nr = threads->nr;
1202 for (i = 0; i < threads->nr; i++) {
1203 struct perf_record_thread_map_entry *entry = &event->thread_map.entries[i];
1204 char *comm = perf_thread_map__comm(threads, i);
1209 entry->pid = perf_thread_map__pid(threads, i);
1210 strncpy((char *) &entry->comm, comm, sizeof(entry->comm));
1213 err = process(tool, event, NULL, machine);
1219 struct synthesize_cpu_map_data {
1220 const struct perf_cpu_map *map;
1227 struct perf_record_cpu_map_data *data;
1230 static void synthesize_cpus(struct synthesize_cpu_map_data *data)
1232 data->data->type = PERF_CPU_MAP__CPUS;
1233 data->data->cpus_data.nr = data->nr;
1234 for (int i = 0; i < data->nr; i++)
1235 data->data->cpus_data.cpu[i] = perf_cpu_map__cpu(data->map, i).cpu;
1238 static void synthesize_mask(struct synthesize_cpu_map_data *data)
1241 struct perf_cpu cpu;
1243 /* Due to padding, the 4bytes per entry mask variant is always smaller. */
1244 data->data->type = PERF_CPU_MAP__MASK;
1245 data->data->mask32_data.nr = BITS_TO_U32(data->max_cpu);
1246 data->data->mask32_data.long_size = 4;
1248 perf_cpu_map__for_each_cpu(cpu, idx, data->map) {
1249 int bit_word = cpu.cpu / 32;
1250 u32 bit_mask = 1U << (cpu.cpu & 31);
1252 data->data->mask32_data.mask[bit_word] |= bit_mask;
1256 static void synthesize_range_cpus(struct synthesize_cpu_map_data *data)
1258 data->data->type = PERF_CPU_MAP__RANGE_CPUS;
1259 data->data->range_cpu_data.any_cpu = data->has_any_cpu;
1260 data->data->range_cpu_data.start_cpu = data->min_cpu;
1261 data->data->range_cpu_data.end_cpu = data->max_cpu;
1264 static void *cpu_map_data__alloc(struct synthesize_cpu_map_data *syn_data,
1267 size_t size_cpus, size_mask;
1269 syn_data->nr = perf_cpu_map__nr(syn_data->map);
1270 syn_data->has_any_cpu = (perf_cpu_map__cpu(syn_data->map, 0).cpu == -1) ? 1 : 0;
1272 syn_data->min_cpu = perf_cpu_map__cpu(syn_data->map, syn_data->has_any_cpu).cpu;
1273 syn_data->max_cpu = perf_cpu_map__max(syn_data->map).cpu;
1274 if (syn_data->max_cpu - syn_data->min_cpu + 1 == syn_data->nr - syn_data->has_any_cpu) {
1275 /* A consecutive range of CPUs can be encoded using a range. */
1276 assert(sizeof(u16) + sizeof(struct perf_record_range_cpu_map) == sizeof(u64));
1277 syn_data->type = PERF_CPU_MAP__RANGE_CPUS;
1278 syn_data->size = header_size + sizeof(u64);
1279 return zalloc(syn_data->size);
1282 size_cpus = sizeof(u16) + sizeof(struct cpu_map_entries) + syn_data->nr * sizeof(u16);
1283 /* Due to padding, the 4bytes per entry mask variant is always smaller. */
1284 size_mask = sizeof(u16) + sizeof(struct perf_record_mask_cpu_map32) +
1285 BITS_TO_U32(syn_data->max_cpu) * sizeof(__u32);
1286 if (syn_data->has_any_cpu || size_cpus < size_mask) {
1287 /* Follow the CPU map encoding. */
1288 syn_data->type = PERF_CPU_MAP__CPUS;
1289 syn_data->size = header_size + PERF_ALIGN(size_cpus, sizeof(u64));
1290 return zalloc(syn_data->size);
1292 /* Encode using a bitmask. */
1293 syn_data->type = PERF_CPU_MAP__MASK;
1294 syn_data->size = header_size + PERF_ALIGN(size_mask, sizeof(u64));
1295 return zalloc(syn_data->size);
1298 static void cpu_map_data__synthesize(struct synthesize_cpu_map_data *data)
1300 switch (data->type) {
1301 case PERF_CPU_MAP__CPUS:
1302 synthesize_cpus(data);
1304 case PERF_CPU_MAP__MASK:
1305 synthesize_mask(data);
1307 case PERF_CPU_MAP__RANGE_CPUS:
1308 synthesize_range_cpus(data);
1315 static struct perf_record_cpu_map *cpu_map_event__new(const struct perf_cpu_map *map)
1317 struct synthesize_cpu_map_data syn_data = { .map = map };
1318 struct perf_record_cpu_map *event;
1321 event = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header));
1325 syn_data.data = &event->data;
1326 event->header.type = PERF_RECORD_CPU_MAP;
1327 event->header.size = syn_data.size;
1328 cpu_map_data__synthesize(&syn_data);
1333 int perf_event__synthesize_cpu_map(struct perf_tool *tool,
1334 const struct perf_cpu_map *map,
1335 perf_event__handler_t process,
1336 struct machine *machine)
1338 struct perf_record_cpu_map *event;
1341 event = cpu_map_event__new(map);
1345 err = process(tool, (union perf_event *) event, NULL, machine);
1351 int perf_event__synthesize_stat_config(struct perf_tool *tool,
1352 struct perf_stat_config *config,
1353 perf_event__handler_t process,
1354 struct machine *machine)
1356 struct perf_record_stat_config *event;
1357 int size, i = 0, err;
1359 size = sizeof(*event);
1360 size += (PERF_STAT_CONFIG_TERM__MAX * sizeof(event->data[0]));
1362 event = zalloc(size);
1366 event->header.type = PERF_RECORD_STAT_CONFIG;
1367 event->header.size = size;
1368 event->nr = PERF_STAT_CONFIG_TERM__MAX;
1370 #define ADD(__term, __val) \
1371 event->data[i].tag = PERF_STAT_CONFIG_TERM__##__term; \
1372 event->data[i].val = __val; \
1375 ADD(AGGR_MODE, config->aggr_mode)
1376 ADD(INTERVAL, config->interval)
1377 ADD(SCALE, config->scale)
1378 ADD(AGGR_LEVEL, config->aggr_level)
1380 WARN_ONCE(i != PERF_STAT_CONFIG_TERM__MAX,
1381 "stat config terms unbalanced\n");
1384 err = process(tool, (union perf_event *) event, NULL, machine);
1390 int perf_event__synthesize_stat(struct perf_tool *tool,
1391 struct perf_cpu cpu, u32 thread, u64 id,
1392 struct perf_counts_values *count,
1393 perf_event__handler_t process,
1394 struct machine *machine)
1396 struct perf_record_stat event;
1398 event.header.type = PERF_RECORD_STAT;
1399 event.header.size = sizeof(event);
1400 event.header.misc = 0;
1403 event.cpu = cpu.cpu;
1404 event.thread = thread;
1405 event.val = count->val;
1406 event.ena = count->ena;
1407 event.run = count->run;
1409 return process(tool, (union perf_event *) &event, NULL, machine);
1412 int perf_event__synthesize_stat_round(struct perf_tool *tool,
1413 u64 evtime, u64 type,
1414 perf_event__handler_t process,
1415 struct machine *machine)
1417 struct perf_record_stat_round event;
1419 event.header.type = PERF_RECORD_STAT_ROUND;
1420 event.header.size = sizeof(event);
1421 event.header.misc = 0;
1423 event.time = evtime;
1426 return process(tool, (union perf_event *) &event, NULL, machine);
1429 size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type, u64 read_format)
1431 size_t sz, result = sizeof(struct perf_record_sample);
1433 if (type & PERF_SAMPLE_IDENTIFIER)
1434 result += sizeof(u64);
1436 if (type & PERF_SAMPLE_IP)
1437 result += sizeof(u64);
1439 if (type & PERF_SAMPLE_TID)
1440 result += sizeof(u64);
1442 if (type & PERF_SAMPLE_TIME)
1443 result += sizeof(u64);
1445 if (type & PERF_SAMPLE_ADDR)
1446 result += sizeof(u64);
1448 if (type & PERF_SAMPLE_ID)
1449 result += sizeof(u64);
1451 if (type & PERF_SAMPLE_STREAM_ID)
1452 result += sizeof(u64);
1454 if (type & PERF_SAMPLE_CPU)
1455 result += sizeof(u64);
1457 if (type & PERF_SAMPLE_PERIOD)
1458 result += sizeof(u64);
1460 if (type & PERF_SAMPLE_READ) {
1461 result += sizeof(u64);
1462 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1463 result += sizeof(u64);
1464 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1465 result += sizeof(u64);
1466 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1467 if (read_format & PERF_FORMAT_GROUP) {
1468 sz = sample_read_value_size(read_format);
1469 result += sz * sample->read.group.nr;
1471 result += sizeof(u64);
1472 if (read_format & PERF_FORMAT_LOST)
1473 result += sizeof(u64);
1477 if (type & PERF_SAMPLE_CALLCHAIN) {
1478 sz = (sample->callchain->nr + 1) * sizeof(u64);
1482 if (type & PERF_SAMPLE_RAW) {
1483 result += sizeof(u32);
1484 result += sample->raw_size;
1487 if (type & PERF_SAMPLE_BRANCH_STACK) {
1488 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1490 sz += 2 * sizeof(u64);
1494 if (type & PERF_SAMPLE_REGS_USER) {
1495 if (sample->user_regs.abi) {
1496 result += sizeof(u64);
1497 sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1500 result += sizeof(u64);
1504 if (type & PERF_SAMPLE_STACK_USER) {
1505 sz = sample->user_stack.size;
1506 result += sizeof(u64);
1509 result += sizeof(u64);
1513 if (type & PERF_SAMPLE_WEIGHT_TYPE)
1514 result += sizeof(u64);
1516 if (type & PERF_SAMPLE_DATA_SRC)
1517 result += sizeof(u64);
1519 if (type & PERF_SAMPLE_TRANSACTION)
1520 result += sizeof(u64);
1522 if (type & PERF_SAMPLE_REGS_INTR) {
1523 if (sample->intr_regs.abi) {
1524 result += sizeof(u64);
1525 sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1528 result += sizeof(u64);
1532 if (type & PERF_SAMPLE_PHYS_ADDR)
1533 result += sizeof(u64);
1535 if (type & PERF_SAMPLE_CGROUP)
1536 result += sizeof(u64);
1538 if (type & PERF_SAMPLE_DATA_PAGE_SIZE)
1539 result += sizeof(u64);
1541 if (type & PERF_SAMPLE_CODE_PAGE_SIZE)
1542 result += sizeof(u64);
1544 if (type & PERF_SAMPLE_AUX) {
1545 result += sizeof(u64);
1546 result += sample->aux_sample.size;
1552 void __weak arch_perf_synthesize_sample_weight(const struct perf_sample *data,
1553 __u64 *array, u64 type __maybe_unused)
1555 *array = data->weight;
1558 static __u64 *copy_read_group_values(__u64 *array, __u64 read_format,
1559 const struct perf_sample *sample)
1561 size_t sz = sample_read_value_size(read_format);
1562 struct sample_read_value *v = sample->read.group.values;
1564 sample_read_group__for_each(v, sample->read.group.nr, read_format) {
1565 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1566 memcpy(array, v, sz);
1567 array = (void *)array + sz;
1572 int perf_event__synthesize_sample(union perf_event *event, u64 type, u64 read_format,
1573 const struct perf_sample *sample)
1578 * used for cross-endian analysis. See git commit 65014ab3
1579 * for why this goofiness is needed.
1583 array = event->sample.array;
1585 if (type & PERF_SAMPLE_IDENTIFIER) {
1586 *array = sample->id;
1590 if (type & PERF_SAMPLE_IP) {
1591 *array = sample->ip;
1595 if (type & PERF_SAMPLE_TID) {
1596 u.val32[0] = sample->pid;
1597 u.val32[1] = sample->tid;
1602 if (type & PERF_SAMPLE_TIME) {
1603 *array = sample->time;
1607 if (type & PERF_SAMPLE_ADDR) {
1608 *array = sample->addr;
1612 if (type & PERF_SAMPLE_ID) {
1613 *array = sample->id;
1617 if (type & PERF_SAMPLE_STREAM_ID) {
1618 *array = sample->stream_id;
1622 if (type & PERF_SAMPLE_CPU) {
1623 u.val32[0] = sample->cpu;
1629 if (type & PERF_SAMPLE_PERIOD) {
1630 *array = sample->period;
1634 if (type & PERF_SAMPLE_READ) {
1635 if (read_format & PERF_FORMAT_GROUP)
1636 *array = sample->read.group.nr;
1638 *array = sample->read.one.value;
1641 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1642 *array = sample->read.time_enabled;
1646 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1647 *array = sample->read.time_running;
1651 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1652 if (read_format & PERF_FORMAT_GROUP) {
1653 array = copy_read_group_values(array, read_format,
1656 *array = sample->read.one.id;
1659 if (read_format & PERF_FORMAT_LOST) {
1660 *array = sample->read.one.lost;
1666 if (type & PERF_SAMPLE_CALLCHAIN) {
1667 sz = (sample->callchain->nr + 1) * sizeof(u64);
1668 memcpy(array, sample->callchain, sz);
1669 array = (void *)array + sz;
1672 if (type & PERF_SAMPLE_RAW) {
1673 u.val32[0] = sample->raw_size;
1675 array = (void *)array + sizeof(u32);
1677 memcpy(array, sample->raw_data, sample->raw_size);
1678 array = (void *)array + sample->raw_size;
1681 if (type & PERF_SAMPLE_BRANCH_STACK) {
1682 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1684 sz += 2 * sizeof(u64);
1685 memcpy(array, sample->branch_stack, sz);
1686 array = (void *)array + sz;
1689 if (type & PERF_SAMPLE_REGS_USER) {
1690 if (sample->user_regs.abi) {
1691 *array++ = sample->user_regs.abi;
1692 sz = hweight64(sample->user_regs.mask) * sizeof(u64);
1693 memcpy(array, sample->user_regs.regs, sz);
1694 array = (void *)array + sz;
1700 if (type & PERF_SAMPLE_STACK_USER) {
1701 sz = sample->user_stack.size;
1704 memcpy(array, sample->user_stack.data, sz);
1705 array = (void *)array + sz;
1710 if (type & PERF_SAMPLE_WEIGHT_TYPE) {
1711 arch_perf_synthesize_sample_weight(sample, array, type);
1715 if (type & PERF_SAMPLE_DATA_SRC) {
1716 *array = sample->data_src;
1720 if (type & PERF_SAMPLE_TRANSACTION) {
1721 *array = sample->transaction;
1725 if (type & PERF_SAMPLE_REGS_INTR) {
1726 if (sample->intr_regs.abi) {
1727 *array++ = sample->intr_regs.abi;
1728 sz = hweight64(sample->intr_regs.mask) * sizeof(u64);
1729 memcpy(array, sample->intr_regs.regs, sz);
1730 array = (void *)array + sz;
1736 if (type & PERF_SAMPLE_PHYS_ADDR) {
1737 *array = sample->phys_addr;
1741 if (type & PERF_SAMPLE_CGROUP) {
1742 *array = sample->cgroup;
1746 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
1747 *array = sample->data_page_size;
1751 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
1752 *array = sample->code_page_size;
1756 if (type & PERF_SAMPLE_AUX) {
1757 sz = sample->aux_sample.size;
1759 memcpy(array, sample->aux_sample.data, sz);
1760 array = (void *)array + sz;
1766 int perf_event__synthesize_id_sample(__u64 *array, u64 type, const struct perf_sample *sample)
1768 __u64 *start = array;
1771 * used for cross-endian analysis. See git commit 65014ab3
1772 * for why this goofiness is needed.
1776 if (type & PERF_SAMPLE_TID) {
1777 u.val32[0] = sample->pid;
1778 u.val32[1] = sample->tid;
1783 if (type & PERF_SAMPLE_TIME) {
1784 *array = sample->time;
1788 if (type & PERF_SAMPLE_ID) {
1789 *array = sample->id;
1793 if (type & PERF_SAMPLE_STREAM_ID) {
1794 *array = sample->stream_id;
1798 if (type & PERF_SAMPLE_CPU) {
1799 u.val32[0] = sample->cpu;
1805 if (type & PERF_SAMPLE_IDENTIFIER) {
1806 *array = sample->id;
1810 return (void *)array - (void *)start;
1813 int __perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1814 struct evlist *evlist, struct machine *machine, size_t from)
1816 union perf_event *ev;
1817 struct evsel *evsel;
1818 size_t nr = 0, i = 0, sz, max_nr, n, pos;
1819 size_t e1_sz = sizeof(struct id_index_entry);
1820 size_t e2_sz = sizeof(struct id_index_entry_2);
1821 size_t etot_sz = e1_sz + e2_sz;
1822 bool e2_needed = false;
1825 max_nr = (UINT16_MAX - sizeof(struct perf_record_id_index)) / etot_sz;
1828 evlist__for_each_entry(evlist, evsel) {
1831 nr += evsel->core.ids;
1837 pr_debug2("Synthesizing id index\n");
1839 n = nr > max_nr ? max_nr : nr;
1840 sz = sizeof(struct perf_record_id_index) + n * etot_sz;
1845 sz = sizeof(struct perf_record_id_index) + n * e1_sz;
1847 ev->id_index.header.type = PERF_RECORD_ID_INDEX;
1848 ev->id_index.nr = n;
1851 evlist__for_each_entry(evlist, evsel) {
1856 for (j = 0; j < evsel->core.ids; j++, i++) {
1857 struct id_index_entry *e;
1858 struct id_index_entry_2 *e2;
1859 struct perf_sample_id *sid;
1862 ev->id_index.header.size = sz + (e2_needed ? n * e2_sz : 0);
1863 err = process(tool, ev, NULL, machine);
1871 e = &ev->id_index.entries[i];
1873 e->id = evsel->core.id[j];
1875 sid = evlist__id2sid(evlist, e->id);
1882 e->cpu = sid->cpu.cpu;
1885 if (sid->machine_pid)
1888 e2 = (void *)ev + sz;
1889 e2[i].machine_pid = sid->machine_pid;
1890 e2[i].vcpu = sid->vcpu.cpu;
1894 sz = sizeof(struct perf_record_id_index) + nr * e1_sz;
1895 ev->id_index.header.size = sz + (e2_needed ? nr * e2_sz : 0);
1896 ev->id_index.nr = nr;
1898 err = process(tool, ev, NULL, machine);
1905 int perf_event__synthesize_id_index(struct perf_tool *tool, perf_event__handler_t process,
1906 struct evlist *evlist, struct machine *machine)
1908 return __perf_event__synthesize_id_index(tool, process, evlist, machine, 0);
1911 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
1912 struct target *target, struct perf_thread_map *threads,
1913 perf_event__handler_t process, bool needs_mmap,
1914 bool data_mmap, unsigned int nr_threads_synthesize)
1917 * When perf runs in non-root PID namespace, and the namespace's proc FS
1918 * is not mounted, nsinfo__is_in_root_namespace() returns false.
1919 * In this case, the proc FS is coming for the parent namespace, thus
1920 * perf tool will wrongly gather process info from its parent PID
1923 * To avoid the confusion that the perf tool runs in a child PID
1924 * namespace but it synthesizes thread info from its parent PID
1925 * namespace, returns failure with warning.
1927 if (!nsinfo__is_in_root_namespace()) {
1928 pr_err("Perf runs in non-root PID namespace but it tries to ");
1929 pr_err("gather process info from its parent PID namespace.\n");
1930 pr_err("Please mount the proc file system properly, e.g. ");
1931 pr_err("add the option '--mount-proc' for unshare command.\n");
1935 if (target__has_task(target))
1936 return perf_event__synthesize_thread_map(tool, threads, process, machine,
1937 needs_mmap, data_mmap);
1938 else if (target__has_cpu(target))
1939 return perf_event__synthesize_threads(tool, process, machine,
1940 needs_mmap, data_mmap,
1941 nr_threads_synthesize);
1942 /* command specified */
1946 int machine__synthesize_threads(struct machine *machine, struct target *target,
1947 struct perf_thread_map *threads, bool needs_mmap,
1948 bool data_mmap, unsigned int nr_threads_synthesize)
1950 return __machine__synthesize_threads(machine, NULL, target, threads,
1951 perf_event__process, needs_mmap,
1952 data_mmap, nr_threads_synthesize);
1955 static struct perf_record_event_update *event_update_event__new(size_t size, u64 type, u64 id)
1957 struct perf_record_event_update *ev;
1959 size += sizeof(*ev);
1960 size = PERF_ALIGN(size, sizeof(u64));
1964 ev->header.type = PERF_RECORD_EVENT_UPDATE;
1965 ev->header.size = (u16)size;
1972 int perf_event__synthesize_event_update_unit(struct perf_tool *tool, struct evsel *evsel,
1973 perf_event__handler_t process)
1975 size_t size = strlen(evsel->unit);
1976 struct perf_record_event_update *ev;
1979 ev = event_update_event__new(size + 1, PERF_EVENT_UPDATE__UNIT, evsel->core.id[0]);
1983 strlcpy(ev->unit, evsel->unit, size + 1);
1984 err = process(tool, (union perf_event *)ev, NULL, NULL);
1989 int perf_event__synthesize_event_update_scale(struct perf_tool *tool, struct evsel *evsel,
1990 perf_event__handler_t process)
1992 struct perf_record_event_update *ev;
1993 struct perf_record_event_update_scale *ev_data;
1996 ev = event_update_event__new(sizeof(*ev_data), PERF_EVENT_UPDATE__SCALE, evsel->core.id[0]);
2000 ev->scale.scale = evsel->scale;
2001 err = process(tool, (union perf_event *)ev, NULL, NULL);
2006 int perf_event__synthesize_event_update_name(struct perf_tool *tool, struct evsel *evsel,
2007 perf_event__handler_t process)
2009 struct perf_record_event_update *ev;
2010 size_t len = strlen(evsel__name(evsel));
2013 ev = event_update_event__new(len + 1, PERF_EVENT_UPDATE__NAME, evsel->core.id[0]);
2017 strlcpy(ev->name, evsel->name, len + 1);
2018 err = process(tool, (union perf_event *)ev, NULL, NULL);
2023 int perf_event__synthesize_event_update_cpus(struct perf_tool *tool, struct evsel *evsel,
2024 perf_event__handler_t process)
2026 struct synthesize_cpu_map_data syn_data = { .map = evsel->core.own_cpus };
2027 struct perf_record_event_update *ev;
2030 ev = cpu_map_data__alloc(&syn_data, sizeof(struct perf_event_header) + 2 * sizeof(u64));
2034 syn_data.data = &ev->cpus.cpus;
2035 ev->header.type = PERF_RECORD_EVENT_UPDATE;
2036 ev->header.size = (u16)syn_data.size;
2037 ev->type = PERF_EVENT_UPDATE__CPUS;
2038 ev->id = evsel->core.id[0];
2039 cpu_map_data__synthesize(&syn_data);
2041 err = process(tool, (union perf_event *)ev, NULL, NULL);
2046 int perf_event__synthesize_attrs(struct perf_tool *tool, struct evlist *evlist,
2047 perf_event__handler_t process)
2049 struct evsel *evsel;
2052 evlist__for_each_entry(evlist, evsel) {
2053 err = perf_event__synthesize_attr(tool, &evsel->core.attr, evsel->core.ids,
2054 evsel->core.id, process);
2056 pr_debug("failed to create perf header attribute\n");
2064 static bool has_unit(struct evsel *evsel)
2066 return evsel->unit && *evsel->unit;
2069 static bool has_scale(struct evsel *evsel)
2071 return evsel->scale != 1;
2074 int perf_event__synthesize_extra_attr(struct perf_tool *tool, struct evlist *evsel_list,
2075 perf_event__handler_t process, bool is_pipe)
2077 struct evsel *evsel;
2081 * Synthesize other events stuff not carried within
2082 * attr event - unit, scale, name
2084 evlist__for_each_entry(evsel_list, evsel) {
2085 if (!evsel->supported)
2089 * Synthesize unit and scale only if it's defined.
2091 if (has_unit(evsel)) {
2092 err = perf_event__synthesize_event_update_unit(tool, evsel, process);
2094 pr_err("Couldn't synthesize evsel unit.\n");
2099 if (has_scale(evsel)) {
2100 err = perf_event__synthesize_event_update_scale(tool, evsel, process);
2102 pr_err("Couldn't synthesize evsel evsel.\n");
2107 if (evsel->core.own_cpus) {
2108 err = perf_event__synthesize_event_update_cpus(tool, evsel, process);
2110 pr_err("Couldn't synthesize evsel cpus.\n");
2116 * Name is needed only for pipe output,
2117 * perf.data carries event names.
2120 err = perf_event__synthesize_event_update_name(tool, evsel, process);
2122 pr_err("Couldn't synthesize evsel name.\n");
2130 int perf_event__synthesize_attr(struct perf_tool *tool, struct perf_event_attr *attr,
2131 u32 ids, u64 *id, perf_event__handler_t process)
2133 union perf_event *ev;
2137 size = sizeof(struct perf_event_attr);
2138 size = PERF_ALIGN(size, sizeof(u64));
2139 size += sizeof(struct perf_event_header);
2140 size += ids * sizeof(u64);
2147 ev->attr.attr = *attr;
2148 memcpy(ev->attr.id, id, ids * sizeof(u64));
2150 ev->attr.header.type = PERF_RECORD_HEADER_ATTR;
2151 ev->attr.header.size = (u16)size;
2153 if (ev->attr.header.size == size)
2154 err = process(tool, ev, NULL, NULL);
2163 #ifdef HAVE_LIBTRACEEVENT
2164 int perf_event__synthesize_tracing_data(struct perf_tool *tool, int fd, struct evlist *evlist,
2165 perf_event__handler_t process)
2167 union perf_event ev;
2168 struct tracing_data *tdata;
2169 ssize_t size = 0, aligned_size = 0, padding;
2173 * We are going to store the size of the data followed
2174 * by the data contents. Since the fd descriptor is a pipe,
2175 * we cannot seek back to store the size of the data once
2176 * we know it. Instead we:
2178 * - write the tracing data to the temp file
2179 * - get/write the data size to pipe
2180 * - write the tracing data from the temp file
2183 tdata = tracing_data_get(&evlist->core.entries, fd, true);
2187 memset(&ev, 0, sizeof(ev));
2189 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
2191 aligned_size = PERF_ALIGN(size, sizeof(u64));
2192 padding = aligned_size - size;
2193 ev.tracing_data.header.size = sizeof(ev.tracing_data);
2194 ev.tracing_data.size = aligned_size;
2196 process(tool, &ev, NULL, NULL);
2199 * The put function will copy all the tracing data
2200 * stored in temp file to the pipe.
2202 tracing_data_put(tdata);
2204 ff = (struct feat_fd){ .fd = fd };
2205 if (write_padded(&ff, NULL, 0, padding))
2208 return aligned_size;
2212 int perf_event__synthesize_build_id(struct perf_tool *tool, struct dso *pos, u16 misc,
2213 perf_event__handler_t process, struct machine *machine)
2215 union perf_event ev;
2221 memset(&ev, 0, sizeof(ev));
2223 len = pos->long_name_len + 1;
2224 len = PERF_ALIGN(len, NAME_ALIGN);
2225 ev.build_id.size = min(pos->bid.size, sizeof(pos->bid.data));
2226 memcpy(&ev.build_id.build_id, pos->bid.data, ev.build_id.size);
2227 ev.build_id.header.type = PERF_RECORD_HEADER_BUILD_ID;
2228 ev.build_id.header.misc = misc | PERF_RECORD_MISC_BUILD_ID_SIZE;
2229 ev.build_id.pid = machine->pid;
2230 ev.build_id.header.size = sizeof(ev.build_id) + len;
2231 memcpy(&ev.build_id.filename, pos->long_name, pos->long_name_len);
2233 return process(tool, &ev, NULL, machine);
2236 int perf_event__synthesize_stat_events(struct perf_stat_config *config, struct perf_tool *tool,
2237 struct evlist *evlist, perf_event__handler_t process, bool attrs)
2242 err = perf_event__synthesize_attrs(tool, evlist, process);
2244 pr_err("Couldn't synthesize attrs.\n");
2249 err = perf_event__synthesize_extra_attr(tool, evlist, process, attrs);
2250 err = perf_event__synthesize_thread_map2(tool, evlist->core.threads, process, NULL);
2252 pr_err("Couldn't synthesize thread map.\n");
2256 err = perf_event__synthesize_cpu_map(tool, evlist->core.user_requested_cpus, process, NULL);
2258 pr_err("Couldn't synthesize thread map.\n");
2262 err = perf_event__synthesize_stat_config(tool, config, process, NULL);
2264 pr_err("Couldn't synthesize config.\n");
2271 extern const struct perf_header_feature_ops feat_ops[HEADER_LAST_FEATURE];
2273 int perf_event__synthesize_features(struct perf_tool *tool, struct perf_session *session,
2274 struct evlist *evlist, perf_event__handler_t process)
2276 struct perf_header *header = &session->header;
2277 struct perf_record_header_feature *fe;
2282 sz_hdr = sizeof(fe->header);
2283 sz = sizeof(union perf_event);
2284 /* get a nice alignment */
2285 sz = PERF_ALIGN(sz, page_size);
2287 memset(&ff, 0, sizeof(ff));
2289 ff.buf = malloc(sz);
2293 ff.size = sz - sz_hdr;
2294 ff.ph = &session->header;
2296 for_each_set_bit(feat, header->adds_features, HEADER_FEAT_BITS) {
2297 if (!feat_ops[feat].synthesize) {
2298 pr_debug("No record header feature for header :%d\n", feat);
2302 ff.offset = sizeof(*fe);
2304 ret = feat_ops[feat].write(&ff, evlist);
2305 if (ret || ff.offset <= (ssize_t)sizeof(*fe)) {
2306 pr_debug("Error writing feature\n");
2309 /* ff.buf may have changed due to realloc in do_write() */
2311 memset(fe, 0, sizeof(*fe));
2314 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2315 fe->header.size = ff.offset;
2317 ret = process(tool, ff.buf, NULL, NULL);
2324 /* Send HEADER_LAST_FEATURE mark. */
2326 fe->feat_id = HEADER_LAST_FEATURE;
2327 fe->header.type = PERF_RECORD_HEADER_FEATURE;
2328 fe->header.size = sizeof(*fe);
2330 ret = process(tool, ff.buf, NULL, NULL);
2336 int perf_event__synthesize_for_pipe(struct perf_tool *tool,
2337 struct perf_session *session,
2338 struct perf_data *data,
2339 perf_event__handler_t process)
2343 struct evlist *evlist = session->evlist;
2346 * We need to synthesize events first, because some
2347 * features works on top of them (on report side).
2349 err = perf_event__synthesize_attrs(tool, evlist, process);
2351 pr_err("Couldn't synthesize attrs.\n");
2356 err = perf_event__synthesize_features(tool, session, evlist, process);
2358 pr_err("Couldn't synthesize features.\n");
2363 #ifdef HAVE_LIBTRACEEVENT
2364 if (have_tracepoints(&evlist->core.entries)) {
2365 int fd = perf_data__fd(data);
2368 * FIXME err <= 0 here actually means that
2369 * there were no tracepoints so its not really
2370 * an error, just that we don't need to
2371 * synthesize anything. We really have to
2372 * return this more properly and also
2373 * propagate errors that now are calling die()
2375 err = perf_event__synthesize_tracing_data(tool, fd, evlist,
2378 pr_err("Couldn't record tracing data.\n");
2390 int parse_synth_opt(char *synth)
2398 for (q = synth; (p = strsep(&q, ",")); p = q) {
2399 if (!strcasecmp(p, "no") || !strcasecmp(p, "none"))
2402 if (!strcasecmp(p, "all"))
2403 return PERF_SYNTH_ALL;
2405 if (!strcasecmp(p, "task"))
2406 ret |= PERF_SYNTH_TASK;
2407 else if (!strcasecmp(p, "mmap"))
2408 ret |= PERF_SYNTH_TASK | PERF_SYNTH_MMAP;
2409 else if (!strcasecmp(p, "cgroup"))
2410 ret |= PERF_SYNTH_CGROUP;