1 // SPDX-License-Identifier: GPL-2.0
11 #include <linux/perf_event.h>
13 #include "trace_helpers.h"
15 #define DEBUGFS "/sys/kernel/debug/tracing/"
17 #define MAX_SYMS 300000
18 static struct ksym syms[MAX_SYMS];
21 static int ksym_cmp(const void *p1, const void *p2)
23 return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
26 int load_kallsyms_refresh(void)
29 char func[256], buf[256];
36 f = fopen("/proc/kallsyms", "r");
40 while (fgets(buf, sizeof(buf), f)) {
41 if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
45 syms[i].addr = (long) addr;
46 syms[i].name = strdup(func);
51 qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
55 int load_kallsyms(void)
58 * This is called/used from multiplace places,
59 * load symbols just once.
63 return load_kallsyms_refresh();
66 struct ksym *ksym_search(long key)
68 int start = 0, end = sym_cnt;
71 /* kallsyms not loaded. return NULL */
76 size_t mid = start + (end - start) / 2;
78 result = key - syms[mid].addr;
87 if (start >= 1 && syms[start - 1].addr < key &&
88 key < syms[start].addr)
90 return &syms[start - 1];
92 /* out of range. return _stext */
96 long ksym_get_addr(const char *name)
100 for (i = 0; i < sym_cnt; i++) {
101 if (strcmp(syms[i].name, name) == 0)
108 /* open kallsyms and read symbol addresses on the fly. Without caching all symbols,
109 * this is faster than load + find.
111 int kallsyms_find(const char *sym, unsigned long long *addr)
113 char type, name[500];
114 unsigned long long value;
118 f = fopen("/proc/kallsyms", "r");
122 while (fscanf(f, "%llx %c %499s%*[^\n]\n", &value, &type, name) > 0) {
123 if (strcmp(name, sym) == 0) {
135 void read_trace_pipe(void)
139 trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
144 static char buf[4096];
147 sz = read(trace_fd, buf, sizeof(buf) - 1);
155 ssize_t get_uprobe_offset(const void *addr)
157 size_t start, end, base;
162 f = fopen("/proc/self/maps", "r");
166 while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &base) == 4) {
167 if (buf[2] == 'x' && (uintptr_t)addr >= start && (uintptr_t)addr < end) {
178 #if defined(__powerpc64__) && defined(_CALL_ELF) && _CALL_ELF == 2
180 #define OP_RT_RA_MASK 0xffff0000UL
181 #define LIS_R2 0x3c400000UL
182 #define ADDIS_R2_R12 0x3c4c0000UL
183 #define ADDI_R2_R2 0x38420000UL
186 * A PPC64 ABIv2 function may have a local and a global entry
187 * point. We need to use the local entry point when patching
188 * functions, so identify and step over the global entry point
191 * The global entry point sequence is always of the form:
196 * A linker optimisation may convert the addis to lis:
202 const u32 *insn = (const u32 *)(uintptr_t)addr;
204 if ((((*insn & OP_RT_RA_MASK) == ADDIS_R2_R12) ||
205 ((*insn & OP_RT_RA_MASK) == LIS_R2)) &&
206 ((*(insn + 1) & OP_RT_RA_MASK) == ADDI_R2_R2))
207 return (uintptr_t)(insn + 2) - start + base;
210 return (uintptr_t)addr - start + base;
213 ssize_t get_rel_offset(uintptr_t addr)
215 size_t start, end, offset;
219 f = fopen("/proc/self/maps", "r");
223 while (fscanf(f, "%zx-%zx %s %zx %*[^\n]\n", &start, &end, buf, &offset) == 4) {
224 if (addr >= start && addr < end) {
226 return (size_t)addr - start + offset;