1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/perf_event.h>
13 #include "trace_helpers.h"
14 #include <linux/limits.h>
18 #define TRACEFS_PIPE "/sys/kernel/tracing/trace_pipe"
19 #define DEBUGFS_PIPE "/sys/kernel/debug/tracing/trace_pipe"
21 #define MAX_SYMS 300000
22 static struct ksym syms[MAX_SYMS];
25 static int ksym_cmp(const void *p1, const void *p2)
27 return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
30 int load_kallsyms_refresh(void)
33 char func[256], buf[256];
40 f = fopen("/proc/kallsyms", "r");
44 while (fgets(buf, sizeof(buf), f)) {
45 if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
49 syms[i].addr = (long) addr;
50 syms[i].name = strdup(func);
55 qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
59 int load_kallsyms(void)
62 * This is called/used from multiplace places,
63 * load symbols just once.
67 return load_kallsyms_refresh();
70 struct ksym *ksym_search(long key)
72 int start = 0, end = sym_cnt;
75 /* kallsyms not loaded. return NULL */
80 size_t mid = start + (end - start) / 2;
82 result = key - syms[mid].addr;
91 if (start >= 1 && syms[start - 1].addr < key &&
92 key < syms[start].addr)
94 return &syms[start - 1];
96 /* out of range. return _stext */
100 long ksym_get_addr(const char *name)
104 for (i = 0; i < sym_cnt; i++) {
105 if (strcmp(syms[i].name, name) == 0)
112 /* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
113 * this is faster than load + find.
115 int kallsyms_find(const char *sym, unsigned long long *addr)
117 char type, name[500];
118 unsigned long long value;
122 f = fopen("/proc/kallsyms", "r");
126 while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
127 if (strcmp(name, sym) == 0) {
139 void read_trace_pipe(void)
143 if (access(TRACEFS_PIPE, F_OK) == 0)
144 trace_fd = open(TRACEFS_PIPE, O_RDONLY, 0);
146 trace_fd = open(DEBUGFS_PIPE, O_RDONLY, 0);
151 static char buf[4096];
154 sz = read(trace_fd, buf, sizeof(buf) - 1);
162 ssize_t get_uprobe_offset(const void *addr)
164 size_t start, end, base;
169 f = fopen("/proc/self/maps", "r");
173 while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
174 if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
185 #if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
187 #define OP_RT_RA_MASK 0xffff0000UL
188 #define LIS_R2 0x3c400000UL
189 #define ADDIS_R2_R12 0x3c4c0000UL
190 #define ADDI_R2_R2 0x38420000UL
193 * A PPC64 ABIv2 function may have a local and a global entry
194 * point. We need to use the local entry point when patching
195 * functions, so identify and step over the global entry point
198 * The global entry point sequence is always of the form:
203 * A linker optimisation may convert the addis to lis:
209 const u32 *insn = (const u32 *)(uintptr_t)addr;
211 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
212 ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
213 ((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2))
214 return (uintptr_t)(insn + 2) - start + base;
217 return (uintptr_t)addr - start + base;
220 ssize_t get_rel_offset(uintptr_t addr)
222 size_t start, end, offset;
226 f = fopen("/proc/self/maps", "r");
230 while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) {
231 if (addr >= start && addr < end) {
233 return (size_t)addr - start + offset;
242 parse_build_id_buf(const void *note_start, Elf32_Word note_size, char *build_id)
244 Elf32_Word note_offs = 0;
246 while (note_offs + sizeof(Elf32_Nhdr) < note_size) {
247 Elf32_Nhdr *nhdr = (Elf32_Nhdr *)(note_start + note_offs);
249 if (nhdr->n_type == 3 && nhdr->n_namesz == sizeof("GNU") &&
250 !strcmp((char *)(nhdr + 1), "GNU") && nhdr->n_descsz > 0 &&
251 nhdr->n_descsz <= BPF_BUILD_ID_SIZE) {
252 memcpy(build_id, note_start + note_offs +
253 ALIGN(sizeof("GNU"), 4) + sizeof(Elf32_Nhdr), nhdr->n_descsz);
254 memset(build_id + nhdr->n_descsz, 0, BPF_BUILD_ID_SIZE - nhdr->n_descsz);
255 return (int) nhdr->n_descsz;
258 note_offs = note_offs + sizeof(Elf32_Nhdr) +
259 ALIGN(nhdr->n_namesz, 4) + ALIGN(nhdr->n_descsz, 4);
265 /* Reads binary from *path* file and returns it in the *build_id* buffer
266 * with *size* which is expected to be at least BPF_BUILD_ID_SIZE bytes.
267 * Returns size of build id on success. On error the error value is
270 int read_build_id(const char *path, char *build_id, size_t size)
272 int fd, err = -EINVAL;
277 if (size < BPF_BUILD_ID_SIZE)
280 fd = open(path, O_RDONLY | O_CLOEXEC);
284 (void)elf_version(EV_CURRENT);
286 elf = elf_begin(fd, ELF_C_READ_MMAP, NULL);
289 if (elf_kind(elf) != ELF_K_ELF)
291 if (!gelf_getehdr(elf, &ehdr))
294 for (i = 0; i < ehdr.e_phnum; i++) {
295 GElf_Phdr mem, *phdr;
298 phdr = gelf_getphdr(elf, i, &mem);
301 if (phdr->p_type != PT_NOTE)
303 data = elf_rawfile(elf, &max);
306 if (phdr->p_offset + phdr->p_memsz > max)
308 err = parse_build_id_buf(data + phdr->p_offset, phdr->p_memsz, build_id);