1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/objtool.h>
3 #include <linux/module.h>
4 #include <linux/sort.h>
5 #include <asm/exception.h>
6 #include <asm/orc_header.h>
7 #include <asm/orc_lookup.h>
8 #include <asm/orc_types.h>
9 #include <asm/ptrace.h>
10 #include <asm/setup.h>
11 #include <asm/stacktrace.h>
13 #include <asm/unwind.h>
17 #define orc_warn(fmt, ...) \
18 printk_deferred_once(KERN_WARNING "WARNING: " fmt, ##__VA_ARGS__)
20 extern int __start_orc_unwind_ip[];
21 extern int __stop_orc_unwind_ip[];
22 extern struct orc_entry __start_orc_unwind[];
23 extern struct orc_entry __stop_orc_unwind[];
25 static bool orc_init __ro_after_init;
26 static unsigned int lookup_num_blocks __ro_after_init;
28 /* Fake frame pointer entry -- used as a fallback for generated code */
29 static struct orc_entry orc_fp_entry = {
32 .fp_reg = ORC_REG_PREV_SP,
34 .ra_reg = ORC_REG_PREV_SP,
40 * If we crash with IP==0, the last successfully executed instruction
41 * was probably an indirect function call with a NULL function pointer,
42 * and we don't have unwind information for NULL.
43 * This hardcoded ORC entry for IP==0 allows us to unwind from a NULL function
44 * pointer into its parent and then continue normally from there.
46 static struct orc_entry orc_null_entry = {
48 .sp_offset = sizeof(long),
49 .fp_reg = ORC_REG_UNDEFINED,
53 static inline unsigned long orc_ip(const int *ip)
55 return (unsigned long)ip + *ip;
58 static struct orc_entry *__orc_find(int *ip_table, struct orc_entry *u_table,
59 unsigned int num_entries, unsigned long ip)
61 int *first = ip_table;
62 int *mid = first, *found = first;
63 int *last = ip_table + num_entries - 1;
69 * Do a binary range search to find the rightmost duplicate of a given
70 * starting address. Some entries are section terminators which are
71 * "weak" entries for ensuring there are no gaps. They should be
72 * ignored when they conflict with a real entry.
74 while (first <= last) {
75 mid = first + ((last - first) / 2);
77 if (orc_ip(mid) <= ip) {
84 return u_table + (found - ip_table);
88 static struct orc_entry *orc_module_find(unsigned long ip)
92 mod = __module_address(ip);
93 if (!mod || !mod->arch.orc_unwind || !mod->arch.orc_unwind_ip)
96 return __orc_find(mod->arch.orc_unwind_ip, mod->arch.orc_unwind, mod->arch.num_orcs, ip);
99 static struct orc_entry *orc_module_find(unsigned long ip)
105 #ifdef CONFIG_DYNAMIC_FTRACE
106 static struct orc_entry *orc_find(unsigned long ip);
109 * Ftrace dynamic trampolines do not have orc entries of their own.
110 * But they are copies of the ftrace entries that are static and
111 * defined in ftrace_*.S, which do have orc entries.
113 * If the unwinder comes across a ftrace trampoline, then find the
114 * ftrace function that was used to create it, and use that ftrace
115 * function's orc entry, as the placement of the return code in
116 * the stack will be identical.
118 static struct orc_entry *orc_ftrace_find(unsigned long ip)
120 struct ftrace_ops *ops;
121 unsigned long tramp_addr, offset;
123 ops = ftrace_ops_trampoline(ip);
127 /* Set tramp_addr to the start of the code copied by the trampoline */
128 if (ops->flags & FTRACE_OPS_FL_SAVE_REGS)
129 tramp_addr = (unsigned long)ftrace_regs_caller;
131 tramp_addr = (unsigned long)ftrace_caller;
133 /* Now place tramp_addr to the location within the trampoline ip is at */
134 offset = ip - ops->trampoline;
135 tramp_addr += offset;
137 /* Prevent unlikely recursion */
138 if (ip == tramp_addr)
141 return orc_find(tramp_addr);
144 static struct orc_entry *orc_ftrace_find(unsigned long ip)
150 static struct orc_entry *orc_find(unsigned long ip)
152 static struct orc_entry *orc;
155 return &orc_null_entry;
157 /* For non-init vmlinux addresses, use the fast lookup table: */
158 if (ip >= LOOKUP_START_IP && ip < LOOKUP_STOP_IP) {
159 unsigned int idx, start, stop;
161 idx = (ip - LOOKUP_START_IP) / LOOKUP_BLOCK_SIZE;
163 if (unlikely((idx >= lookup_num_blocks-1))) {
164 orc_warn("WARNING: bad lookup idx: idx=%u num=%u ip=%pB\n",
165 idx, lookup_num_blocks, (void *)ip);
169 start = orc_lookup[idx];
170 stop = orc_lookup[idx + 1] + 1;
172 if (unlikely((__start_orc_unwind + start >= __stop_orc_unwind) ||
173 (__start_orc_unwind + stop > __stop_orc_unwind))) {
174 orc_warn("WARNING: bad lookup value: idx=%u num=%u start=%u stop=%u ip=%pB\n",
175 idx, lookup_num_blocks, start, stop, (void *)ip);
179 return __orc_find(__start_orc_unwind_ip + start,
180 __start_orc_unwind + start, stop - start, ip);
183 /* vmlinux .init slow lookup: */
184 if (is_kernel_inittext(ip))
185 return __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
186 __stop_orc_unwind_ip - __start_orc_unwind_ip, ip);
189 orc = orc_module_find(ip);
193 return orc_ftrace_find(ip);
196 #ifdef CONFIG_MODULES
198 static DEFINE_MUTEX(sort_mutex);
199 static int *cur_orc_ip_table = __start_orc_unwind_ip;
200 static struct orc_entry *cur_orc_table = __start_orc_unwind;
202 static void orc_sort_swap(void *_a, void *_b, int size)
205 int *a = _a, *b = _b, tmp;
206 struct orc_entry *orc_a, *orc_b;
208 /* Swap the .orc_unwind_ip entries: */
213 /* Swap the corresponding .orc_unwind entries: */
214 orc_a = cur_orc_table + (a - cur_orc_ip_table);
215 orc_b = cur_orc_table + (b - cur_orc_ip_table);
216 swap(*orc_a, *orc_b);
219 static int orc_sort_cmp(const void *_a, const void *_b)
221 const int *a = _a, *b = _b;
222 unsigned long a_val = orc_ip(a);
223 unsigned long b_val = orc_ip(b);
224 struct orc_entry *orc_a;
232 * The "weak" section terminator entries need to always be first
233 * to ensure the lookup code skips them in favor of real entries.
234 * These terminator entries exist to handle any gaps created by
235 * whitelisted .o files which didn't get objtool generation.
237 orc_a = cur_orc_table + (a - cur_orc_ip_table);
239 return orc_a->type == ORC_TYPE_UNDEFINED ? -1 : 1;
242 void unwind_module_init(struct module *mod, void *_orc_ip, size_t orc_ip_size,
243 void *_orc, size_t orc_size)
245 int *orc_ip = _orc_ip;
246 struct orc_entry *orc = _orc;
247 unsigned int num_entries = orc_ip_size / sizeof(int);
249 WARN_ON_ONCE(orc_ip_size % sizeof(int) != 0 ||
250 orc_size % sizeof(*orc) != 0 ||
251 num_entries != orc_size / sizeof(*orc));
254 * The 'cur_orc_*' globals allow the orc_sort_swap() callback to
255 * associate an .orc_unwind_ip table entry with its corresponding
256 * .orc_unwind entry so they can both be swapped.
258 mutex_lock(&sort_mutex);
259 cur_orc_ip_table = orc_ip;
261 sort(orc_ip, num_entries, sizeof(int), orc_sort_cmp, orc_sort_swap);
262 mutex_unlock(&sort_mutex);
264 mod->arch.orc_unwind_ip = orc_ip;
265 mod->arch.orc_unwind = orc;
266 mod->arch.num_orcs = num_entries;
270 void __init unwind_init(void)
273 size_t orc_size = (void *)__stop_orc_unwind - (void *)__start_orc_unwind;
274 size_t orc_ip_size = (void *)__stop_orc_unwind_ip - (void *)__start_orc_unwind_ip;
275 size_t num_entries = orc_ip_size / sizeof(int);
276 struct orc_entry *orc;
278 if (!num_entries || orc_ip_size % sizeof(int) != 0 ||
279 orc_size % sizeof(struct orc_entry) != 0 ||
280 num_entries != orc_size / sizeof(struct orc_entry)) {
281 orc_warn("WARNING: Bad or missing .orc_unwind table. Disabling unwinder.\n");
286 * Note, the orc_unwind and orc_unwind_ip tables were already
287 * sorted at build time via the 'sorttable' tool.
288 * It's ready for binary search straight away, no need to sort it.
291 /* Initialize the fast lookup table: */
292 lookup_num_blocks = orc_lookup_end - orc_lookup;
293 for (i = 0; i < lookup_num_blocks-1; i++) {
294 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind,
295 num_entries, LOOKUP_START_IP + (LOOKUP_BLOCK_SIZE * i));
297 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
301 orc_lookup[i] = orc - __start_orc_unwind;
304 /* Initialize the ending block: */
305 orc = __orc_find(__start_orc_unwind_ip, __start_orc_unwind, num_entries, LOOKUP_STOP_IP);
307 orc_warn("WARNING: Corrupt .orc_unwind table. Disabling unwinder.\n");
310 orc_lookup[lookup_num_blocks-1] = orc - __start_orc_unwind;
315 static inline bool on_stack(struct stack_info *info, unsigned long addr, size_t len)
317 unsigned long begin = info->begin;
318 unsigned long end = info->end;
320 return (info->type != STACK_TYPE_UNKNOWN &&
321 addr >= begin && addr < end && addr + len > begin && addr + len <= end);
324 static bool stack_access_ok(struct unwind_state *state, unsigned long addr, size_t len)
326 struct stack_info *info = &state->stack_info;
328 if (on_stack(info, addr, len))
331 return !get_stack_info(addr, state->task, info) && on_stack(info, addr, len);
334 unsigned long unwind_get_return_address(struct unwind_state *state)
336 return __unwind_get_return_address(state);
338 EXPORT_SYMBOL_GPL(unwind_get_return_address);
340 void unwind_start(struct unwind_state *state, struct task_struct *task,
341 struct pt_regs *regs)
343 __unwind_start(state, task, regs);
344 state->type = UNWINDER_ORC;
345 if (!unwind_done(state) && !__kernel_text_address(state->pc))
346 unwind_next_frame(state);
348 EXPORT_SYMBOL_GPL(unwind_start);
350 static bool is_entry_func(unsigned long addr)
352 extern u32 kernel_entry;
353 extern u32 kernel_entry_end;
355 return addr >= (unsigned long)&kernel_entry && addr < (unsigned long)&kernel_entry_end;
358 static inline unsigned long bt_address(unsigned long ra)
360 extern unsigned long eentry;
362 if (__kernel_text_address(ra))
365 if (__module_text_address(ra))
368 if (ra >= eentry && ra < eentry + EXCCODE_INT_END * VECSIZE) {
370 unsigned long type = (ra - eentry) / VECSIZE;
371 unsigned long offset = (ra - eentry) % VECSIZE;
374 case 0 ... EXCCODE_INT_START - 1:
375 func = (unsigned long)exception_table[type];
377 case EXCCODE_INT_START ... EXCCODE_INT_END:
378 func = (unsigned long)handle_vint;
381 func = (unsigned long)handle_reserved;
385 return func + offset;
391 bool unwind_next_frame(struct unwind_state *state)
393 unsigned long *p, pc;
394 struct pt_regs *regs;
395 struct orc_entry *orc;
396 struct stack_info *info = &state->stack_info;
398 if (unwind_done(state))
401 /* Don't let modules unload while we're reading their ORC data. */
404 if (is_entry_func(state->pc))
407 orc = orc_find(state->pc);
410 * As a fallback, try to assume this code uses a frame pointer.
411 * This is useful for generated code, like BPF, which ORC
412 * doesn't know about. This is just a guess, so the rest of
413 * the unwind is no longer considered reliable.
418 if (orc->type == ORC_TYPE_UNDEFINED)
421 if (orc->type == ORC_TYPE_END_OF_STACK)
425 switch (orc->sp_reg) {
427 if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
428 orc->type = ORC_TYPE_REGS;
430 state->sp = state->sp + orc->sp_offset;
433 state->sp = state->fp;
436 orc_warn("unknown SP base reg %d at %pB\n", orc->sp_reg, (void *)state->pc);
440 switch (orc->fp_reg) {
441 case ORC_REG_PREV_SP:
442 p = (unsigned long *)(state->sp + orc->fp_offset);
443 if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
448 case ORC_REG_UNDEFINED:
452 orc_warn("unknown FP base reg %d at %pB\n", orc->fp_reg, (void *)state->pc);
458 if (orc->ra_reg == ORC_REG_PREV_SP) {
459 p = (unsigned long *)(state->sp + orc->ra_offset);
460 if (!stack_access_ok(state, (unsigned long)p, sizeof(unsigned long)))
463 pc = unwind_graph_addr(state, *p, state->sp);
464 pc -= LOONGARCH_INSN_SIZE;
465 } else if (orc->ra_reg == ORC_REG_UNDEFINED) {
466 if (!state->ra || state->ra == state->pc)
469 pc = unwind_graph_addr(state, state->ra, state->sp);
470 pc -= LOONGARCH_INSN_SIZE;
473 orc_warn("unknown ra base reg %d at %pB\n", orc->ra_reg, (void *)state->pc);
478 if (info->type == STACK_TYPE_IRQ && state->sp == info->end)
479 regs = (struct pt_regs *)info->next_sp;
481 regs = (struct pt_regs *)state->sp;
483 if (!stack_access_ok(state, (unsigned long)regs, sizeof(*regs)))
486 if ((info->end == (unsigned long)regs + sizeof(*regs)) &&
487 !regs->regs[3] && !regs->regs[1])
494 if (!__kernel_text_address(pc))
497 state->sp = regs->regs[3];
498 state->ra = regs->regs[1];
499 state->fp = regs->regs[22];
500 get_stack_info(state->sp, state->task, info);
504 orc_warn("unknown .orc_unwind entry type %d at %pB\n", orc->type, (void *)state->pc);
508 state->pc = bt_address(pc);
510 pr_err("cannot find unwind pc at %pK\n", (void *)pc);
514 if (!__kernel_text_address(state->pc))
525 state->stack_info.type = STACK_TYPE_UNKNOWN;
528 EXPORT_SYMBOL_GPL(unwind_next_frame);