1 // SPDX-License-Identifier: GPL-2.0
2 #include <sys/sysmacros.h>
15 #include <linux/stringify.h>
21 #include "namespaces.h"
32 #include <linux/ctype.h>
33 #include <linux/zalloc.h>
36 struct perf_data *output;
37 struct perf_session *session;
38 struct machine *machine;
40 union jr_entry *entry;
45 bool needs_bswap; /* handles cross-endianness */
46 bool use_arch_timestamp;
49 uint64_t unwinding_size;
50 uint64_t unwinding_mapped_size;
51 uint64_t eh_frame_hdr_size;
52 size_t nr_debug_entries;
53 uint32_t code_load_count;
55 struct rb_root code_root;
60 struct perf_tool tool;
61 struct perf_data output;
62 struct perf_data input;
66 #define hmax(a, b) ((a) > (b) ? (a) : (b))
67 #define get_jit_tool(t) (container_of(tool, struct jit_tool, tool))
70 jit_emit_elf(struct jit_buf_desc *jd,
79 uint32_t unwinding_header_size,
80 uint32_t unwinding_size)
82 int ret, fd, saved_errno;
86 fprintf(stderr, "write ELF image %s\n", filename);
88 nsinfo__mountns_enter(jd->nsi, &nsc);
89 fd = open(filename, O_CREAT|O_TRUNC|O_WRONLY, 0644);
91 nsinfo__mountns_exit(&nsc);
93 pr_warning("cannot create jit ELF %s: %s\n", filename, strerror(saved_errno));
97 ret = jit_write_elf(fd, code_addr, sym, (const void *)code, csize, debug, nr_debug_entries,
98 unwinding, unwinding_header_size, unwinding_size);
103 nsinfo__mountns_enter(jd->nsi, &nsc);
105 nsinfo__mountns_exit(&nsc);
112 jit_close(struct jit_buf_desc *jd)
122 jit_validate_events(struct perf_session *session)
127 * check that all events use CLOCK_MONOTONIC
129 evlist__for_each_entry(session->evlist, evsel) {
130 if (evsel->core.attr.use_clockid == 0 || evsel->core.attr.clockid != CLOCK_MONOTONIC)
137 jit_open(struct jit_buf_desc *jd, const char *name)
139 struct jitheader header;
141 struct jr_prefix *prefix;
143 void *n, *buf = NULL;
144 int ret, retval = -1;
146 nsinfo__mountns_enter(jd->nsi, &nsc);
147 jd->in = fopen(name, "r");
148 nsinfo__mountns_exit(&nsc);
152 bsz = hmax(sizeof(header), sizeof(*prefix));
159 * protect from writer modifying the file while we are reading it
163 ret = fread(buf, sizeof(header), 1, jd->in);
167 memcpy(&header, buf, sizeof(header));
169 if (header.magic != JITHEADER_MAGIC) {
170 if (header.magic != JITHEADER_MAGIC_SW)
172 jd->needs_bswap = true;
175 if (jd->needs_bswap) {
176 header.version = bswap_32(header.version);
177 header.total_size = bswap_32(header.total_size);
178 header.pid = bswap_32(header.pid);
179 header.elf_mach = bswap_32(header.elf_mach);
180 header.timestamp = bswap_64(header.timestamp);
181 header.flags = bswap_64(header.flags);
184 jd->use_arch_timestamp = header.flags & JITDUMP_FLAGS_ARCH_TIMESTAMP;
187 pr_debug("version=%u\nhdr.size=%u\nts=0x%llx\npid=%d\nelf_mach=%d\nuse_arch_timestamp=%d\n",
190 (unsigned long long)header.timestamp,
193 jd->use_arch_timestamp);
195 if (header.version > JITHEADER_VERSION) {
196 pr_err("wrong jitdump version %u, expected " __stringify(JITHEADER_VERSION),
201 if (header.flags & JITDUMP_FLAGS_RESERVED) {
202 pr_err("jitdump file contains invalid or unsupported flags 0x%llx\n",
203 (unsigned long long)header.flags & JITDUMP_FLAGS_RESERVED);
207 if (jd->use_arch_timestamp && !jd->session->time_conv.time_mult) {
208 pr_err("jitdump file uses arch timestamps but there is no timestamp conversion\n");
213 * validate event is using the correct clockid
215 if (!jd->use_arch_timestamp && jit_validate_events(jd->session)) {
216 pr_err("error, jitted code must be sampled with perf record -k 1\n");
220 bs = header.total_size - sizeof(header);
223 n = realloc(buf, bs);
228 /* read extra we do not know about */
229 ret = fread(buf, bs - bsz, 1, jd->in);
234 * keep dirname for generating files and mmap records
236 strcpy(jd->dir, name);
246 static union jr_entry *
247 jit_get_next_entry(struct jit_buf_desc *jd)
249 struct jr_prefix *prefix;
258 if (jd->buf == NULL) {
259 size_t sz = getpagesize();
260 if (sz < sizeof(*prefix))
261 sz = sizeof(*prefix);
263 jd->buf = malloc(sz);
273 * file is still locked at this point
275 ret = fread(prefix, sizeof(*prefix), 1, jd->in);
279 if (jd->needs_bswap) {
280 prefix->id = bswap_32(prefix->id);
281 prefix->total_size = bswap_32(prefix->total_size);
282 prefix->timestamp = bswap_64(prefix->timestamp);
285 size = prefix->total_size;
288 if (bs < sizeof(*prefix))
291 if (id >= JIT_CODE_MAX) {
292 pr_warning("next_entry: unknown record type %d, skipping\n", id);
294 if (bs > jd->bufsize) {
296 n = realloc(jd->buf, bs);
303 addr = ((void *)jd->buf) + sizeof(*prefix);
305 ret = fread(addr, bs - sizeof(*prefix), 1, jd->in);
309 jr = (union jr_entry *)jd->buf;
312 case JIT_CODE_DEBUG_INFO:
313 if (jd->needs_bswap) {
315 jr->info.code_addr = bswap_64(jr->info.code_addr);
316 jr->info.nr_entry = bswap_64(jr->info.nr_entry);
317 for (n = 0 ; n < jr->info.nr_entry; n++) {
318 jr->info.entries[n].addr = bswap_64(jr->info.entries[n].addr);
319 jr->info.entries[n].lineno = bswap_32(jr->info.entries[n].lineno);
320 jr->info.entries[n].discrim = bswap_32(jr->info.entries[n].discrim);
324 case JIT_CODE_UNWINDING_INFO:
325 if (jd->needs_bswap) {
326 jr->unwinding.unwinding_size = bswap_64(jr->unwinding.unwinding_size);
327 jr->unwinding.eh_frame_hdr_size = bswap_64(jr->unwinding.eh_frame_hdr_size);
328 jr->unwinding.mapped_size = bswap_64(jr->unwinding.mapped_size);
334 if (jd->needs_bswap) {
335 jr->load.pid = bswap_32(jr->load.pid);
336 jr->load.tid = bswap_32(jr->load.tid);
337 jr->load.vma = bswap_64(jr->load.vma);
338 jr->load.code_addr = bswap_64(jr->load.code_addr);
339 jr->load.code_size = bswap_64(jr->load.code_size);
340 jr->load.code_index= bswap_64(jr->load.code_index);
342 jd->code_load_count++;
345 if (jd->needs_bswap) {
346 jr->move.pid = bswap_32(jr->move.pid);
347 jr->move.tid = bswap_32(jr->move.tid);
348 jr->move.vma = bswap_64(jr->move.vma);
349 jr->move.old_code_addr = bswap_64(jr->move.old_code_addr);
350 jr->move.new_code_addr = bswap_64(jr->move.new_code_addr);
351 jr->move.code_size = bswap_64(jr->move.code_size);
352 jr->move.code_index = bswap_64(jr->move.code_index);
357 /* skip unknown record (we have read them) */
364 jit_inject_event(struct jit_buf_desc *jd, union perf_event *event)
368 size = perf_data__write(jd->output, event, event->header.size);
372 jd->bytes_written += size;
376 static pid_t jr_entry_pid(struct jit_buf_desc *jd, union jr_entry *jr)
378 if (jd->nsi && nsinfo__in_pidns(jd->nsi))
379 return nsinfo__tgid(jd->nsi);
383 static pid_t jr_entry_tid(struct jit_buf_desc *jd, union jr_entry *jr)
385 if (jd->nsi && nsinfo__in_pidns(jd->nsi))
386 return nsinfo__pid(jd->nsi);
390 static uint64_t convert_timestamp(struct jit_buf_desc *jd, uint64_t timestamp)
392 struct perf_tsc_conversion tc = { .time_shift = 0, };
393 struct perf_record_time_conv *time_conv = &jd->session->time_conv;
395 if (!jd->use_arch_timestamp)
398 tc.time_shift = time_conv->time_shift;
399 tc.time_mult = time_conv->time_mult;
400 tc.time_zero = time_conv->time_zero;
403 * The event TIME_CONV was extended for the fields from "time_cycles"
404 * when supported cap_user_time_short, for backward compatibility,
405 * checks the event size and assigns these extended fields if these
406 * fields are contained in the event.
408 if (event_contains(*time_conv, time_cycles)) {
409 tc.time_cycles = time_conv->time_cycles;
410 tc.time_mask = time_conv->time_mask;
411 tc.cap_user_time_zero = time_conv->cap_user_time_zero;
412 tc.cap_user_time_short = time_conv->cap_user_time_short;
414 if (!tc.cap_user_time_zero)
418 return tsc_to_perf_time(timestamp, &tc);
421 static int jit_repipe_code_load(struct jit_buf_desc *jd, union jr_entry *jr)
423 struct perf_sample sample;
424 union perf_event *event;
425 struct perf_tool *tool = jd->session->tool;
434 int ret, csize, usize;
435 pid_t nspid, pid, tid;
441 nspid = jr->load.pid;
442 pid = jr_entry_pid(jd, jr);
443 tid = jr_entry_tid(jd, jr);
444 csize = jr->load.code_size;
445 usize = jd->unwinding_mapped_size;
446 addr = jr->load.code_addr;
447 sym = (void *)((unsigned long)jr + sizeof(jr->load));
448 code = (unsigned long)jr + jr->load.p.total_size - csize;
449 count = jr->load.code_index;
450 idr_size = jd->machine->id_hdr_size;
452 event = calloc(1, sizeof(*event) + idr_size);
456 filename = event->mmap2.filename;
457 size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
464 size = PERF_ALIGN(size, sizeof(u64));
465 uaddr = (uintptr_t)code;
466 ret = jit_emit_elf(jd, filename, sym, addr, (const void *)uaddr, csize, jd->debug_data, jd->nr_debug_entries,
467 jd->unwinding_data, jd->eh_frame_hdr_size, jd->unwinding_size);
469 if (jd->debug_data && jd->nr_debug_entries) {
470 zfree(&jd->debug_data);
471 jd->nr_debug_entries = 0;
474 if (jd->unwinding_data && jd->eh_frame_hdr_size) {
475 zfree(&jd->unwinding_data);
476 jd->eh_frame_hdr_size = 0;
477 jd->unwinding_mapped_size = 0;
478 jd->unwinding_size = 0;
485 if (nsinfo__stat(filename, &st, jd->nsi))
486 memset(&st, 0, sizeof(st));
488 event->mmap2.header.type = PERF_RECORD_MMAP2;
489 event->mmap2.header.misc = PERF_RECORD_MISC_USER;
490 event->mmap2.header.size = (sizeof(event->mmap2) -
491 (sizeof(event->mmap2.filename) - size) + idr_size);
493 event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
494 event->mmap2.start = addr;
495 event->mmap2.len = usize ? ALIGN_8(csize) + usize : csize;
496 event->mmap2.pid = pid;
497 event->mmap2.tid = tid;
498 event->mmap2.ino = st.st_ino;
499 event->mmap2.maj = major(st.st_dev);
500 event->mmap2.min = minor(st.st_dev);
501 event->mmap2.prot = st.st_mode;
502 event->mmap2.flags = MAP_SHARED;
503 event->mmap2.ino_generation = 1;
505 id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
506 if (jd->sample_type & PERF_SAMPLE_TID) {
510 if (jd->sample_type & PERF_SAMPLE_TIME)
511 id->time = convert_timestamp(jd, jr->load.p.timestamp);
514 * create pseudo sample to induce dso hit increment
515 * use first address as sample address
517 memset(&sample, 0, sizeof(sample));
518 sample.cpumode = PERF_RECORD_MISC_USER;
521 sample.time = id->time;
524 ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
528 ret = jit_inject_event(jd, event);
530 * mark dso as use to generate buildid in the header
533 build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
538 static int jit_repipe_code_move(struct jit_buf_desc *jd, union jr_entry *jr)
540 struct perf_sample sample;
541 union perf_event *event;
542 struct perf_tool *tool = jd->session->tool;
549 pid_t nspid, pid, tid;
555 nspid = jr->load.pid;
556 pid = jr_entry_pid(jd, jr);
557 tid = jr_entry_tid(jd, jr);
558 usize = jd->unwinding_mapped_size;
559 idr_size = jd->machine->id_hdr_size;
562 * +16 to account for sample_id_all (hack)
564 event = calloc(1, sizeof(*event) + 16);
568 filename = event->mmap2.filename;
569 size = snprintf(filename, PATH_MAX, "%s/jitted-%d-%" PRIu64 ".so",
572 jr->move.code_index);
576 if (nsinfo__stat(filename, &st, jd->nsi))
577 memset(&st, 0, sizeof(st));
579 size = PERF_ALIGN(size, sizeof(u64));
581 event->mmap2.header.type = PERF_RECORD_MMAP2;
582 event->mmap2.header.misc = PERF_RECORD_MISC_USER;
583 event->mmap2.header.size = (sizeof(event->mmap2) -
584 (sizeof(event->mmap2.filename) - size) + idr_size);
585 event->mmap2.pgoff = GEN_ELF_TEXT_OFFSET;
586 event->mmap2.start = jr->move.new_code_addr;
587 event->mmap2.len = usize ? ALIGN_8(jr->move.code_size) + usize
588 : jr->move.code_size;
589 event->mmap2.pid = pid;
590 event->mmap2.tid = tid;
591 event->mmap2.ino = st.st_ino;
592 event->mmap2.maj = major(st.st_dev);
593 event->mmap2.min = minor(st.st_dev);
594 event->mmap2.prot = st.st_mode;
595 event->mmap2.flags = MAP_SHARED;
596 event->mmap2.ino_generation = 1;
598 id = (void *)((unsigned long)event + event->mmap.header.size - idr_size);
599 if (jd->sample_type & PERF_SAMPLE_TID) {
603 if (jd->sample_type & PERF_SAMPLE_TIME)
604 id->time = convert_timestamp(jd, jr->load.p.timestamp);
607 * create pseudo sample to induce dso hit increment
608 * use first address as sample address
610 memset(&sample, 0, sizeof(sample));
611 sample.cpumode = PERF_RECORD_MISC_USER;
614 sample.time = id->time;
615 sample.ip = jr->move.new_code_addr;
617 ret = perf_event__process_mmap2(tool, event, &sample, jd->machine);
621 ret = jit_inject_event(jd, event);
623 build_id__mark_dso_hit(tool, event, &sample, NULL, jd->machine);
628 static int jit_repipe_debug_info(struct jit_buf_desc *jd, union jr_entry *jr)
636 sz = jr->prefix.total_size - sizeof(jr->info);
641 memcpy(data, &jr->info.entries, sz);
643 jd->debug_data = data;
646 * we must use nr_entry instead of size here because
647 * we cannot distinguish actual entry from padding otherwise
649 jd->nr_debug_entries = jr->info.nr_entry;
655 jit_repipe_unwinding_info(struct jit_buf_desc *jd, union jr_entry *jr)
657 void *unwinding_data;
658 uint32_t unwinding_data_size;
663 unwinding_data_size = jr->prefix.total_size - sizeof(jr->unwinding);
664 unwinding_data = malloc(unwinding_data_size);
668 memcpy(unwinding_data, &jr->unwinding.unwinding_data,
669 unwinding_data_size);
671 jd->eh_frame_hdr_size = jr->unwinding.eh_frame_hdr_size;
672 jd->unwinding_size = jr->unwinding.unwinding_size;
673 jd->unwinding_mapped_size = jr->unwinding.mapped_size;
674 jd->unwinding_data = unwinding_data;
680 jit_process_dump(struct jit_buf_desc *jd)
685 while ((jr = jit_get_next_entry(jd))) {
686 switch(jr->prefix.id) {
688 ret = jit_repipe_code_load(jd, jr);
691 ret = jit_repipe_code_move(jd, jr);
693 case JIT_CODE_DEBUG_INFO:
694 ret = jit_repipe_debug_info(jd, jr);
696 case JIT_CODE_UNWINDING_INFO:
697 ret = jit_repipe_unwinding_info(jd, jr);
708 jit_inject(struct jit_buf_desc *jd, char *path)
713 fprintf(stderr, "injecting: %s\n", path);
715 ret = jit_open(jd, path);
719 ret = jit_process_dump(jd);
724 fprintf(stderr, "injected: %s (%d)\n", path, ret);
730 * File must be with pattern .../jit-XXXX.dump
731 * where XXXX is the PID of the process which did the mmap()
732 * as captured in the RECORD_MMAP record
735 jit_detect(char *mmap_name, pid_t pid, struct nsinfo *nsi)
742 fprintf(stderr, "jit marker trying : %s\n", mmap_name);
746 p = strrchr(mmap_name, '/');
753 if (strncmp(p, "/jit-", 5))
762 * must be followed by a pid
767 pid2 = (int)strtol(p, &end, 10);
772 * pid does not match mmap pid
773 * pid==0 in system-wide mode (synthesized)
775 if (pid && pid2 != nsinfo__nstgid(nsi))
780 if (strcmp(end, ".dump"))
784 fprintf(stderr, "jit marker found: %s\n", mmap_name);
789 static void jit_add_pid(struct machine *machine, pid_t pid)
791 struct thread *thread = machine__findnew_thread(machine, pid, pid);
794 pr_err("%s: thread %d not found or created\n", __func__, pid);
798 thread->priv = (void *)1;
801 static bool jit_has_pid(struct machine *machine, pid_t pid)
803 struct thread *thread = machine__find_thread(machine, pid, pid);
808 return (bool)thread->priv;
812 jit_process(struct perf_session *session,
813 struct perf_data *output,
814 struct machine *machine,
820 struct thread *thread;
823 struct jit_buf_desc jd;
826 thread = machine__findnew_thread(machine, pid, tid);
827 if (thread == NULL) {
828 pr_err("problem processing JIT mmap event, skipping it.\n");
832 nsi = nsinfo__get(thread->nsinfo);
836 * first, detect marker mmap (i.e., the jitdump mmap)
838 if (jit_detect(filename, pid, nsi)) {
842 * Strip //anon*, [anon:* and /memfd:* mmaps if we processed a jitdump for this pid
844 if (jit_has_pid(machine, pid) &&
845 ((strncmp(filename, "//anon", 6) == 0) ||
846 (strncmp(filename, "[anon:", 6) == 0) ||
847 (strncmp(filename, "/memfd:", 7) == 0)))
853 memset(&jd, 0, sizeof(jd));
855 jd.session = session;
857 jd.machine = machine;
861 * track sample_type to compute id_all layout
862 * perf sets the same sample type to all events as of now
864 first = evlist__first(session->evlist);
865 jd.sample_type = first->core.attr.sample_type;
869 ret = jit_inject(&jd, filename);
871 jit_add_pid(machine, pid);
872 *nbytes = jd.bytes_written;