1 // SPDX-License-Identifier: GPL-2.0
3 * Stack trace management functions
5 * Copyright IBM Corp. 2006
8 #include <linux/perf_event.h>
9 #include <linux/stacktrace.h>
10 #include <linux/uaccess.h>
11 #include <linux/compat.h>
12 #include <asm/stacktrace.h>
13 #include <asm/unwind.h>
14 #include <asm/kprobes.h>
15 #include <asm/ptrace.h>
17 void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
18 struct task_struct *task, struct pt_regs *regs)
20 struct unwind_state state;
23 unwind_for_each_frame(&state, task, regs, 0) {
24 addr = unwind_get_return_address(&state);
25 if (!addr || !consume_entry(cookie, addr))
30 int arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
31 void *cookie, struct task_struct *task)
33 struct unwind_state state;
36 unwind_for_each_frame(&state, task, NULL, 0) {
37 if (state.stack_info.type != STACK_TYPE_TASK)
43 addr = unwind_get_return_address(&state);
49 * Mark stacktraces with krethook functions on them
52 if (state.ip == (unsigned long)arch_rethook_trampoline)
56 if (!consume_entry(cookie, addr))
60 /* Check for stack corruption */
61 if (unwind_error(&state))
66 static inline bool store_ip(stack_trace_consume_fn consume_entry, void *cookie,
67 struct perf_callchain_entry_ctx *entry, bool perf,
70 #ifdef CONFIG_PERF_EVENTS
72 if (perf_callchain_store(entry, ip))
77 return consume_entry(cookie, ip);
80 static inline bool ip_invalid(unsigned long ip)
83 * Perform some basic checks if an instruction address taken
84 * from unreliable source is invalid.
88 if (ip < mmap_min_addr)
90 if (ip >= current->mm->context.asce_limit)
95 static inline bool ip_within_vdso(unsigned long ip)
97 return in_range(ip, current->mm->context.vdso_base, vdso_text_size());
100 void arch_stack_walk_user_common(stack_trace_consume_fn consume_entry, void *cookie,
101 struct perf_callchain_entry_ctx *entry,
102 const struct pt_regs *regs, bool perf)
104 struct stack_frame_vdso_wrapper __user *sf_vdso;
105 struct stack_frame_user __user *sf;
106 unsigned long ip, sp;
109 if (is_compat_task())
113 ip = instruction_pointer(regs);
114 if (!store_ip(consume_entry, cookie, entry, perf, ip))
116 sf = (void __user *)user_stack_pointer(regs);
119 if (__get_user(sp, &sf->back_chain))
122 * VDSO entry code has a non-standard stack frame layout.
123 * See VDSO user wrapper code for details.
125 if (!sp && ip_within_vdso(ip)) {
126 sf_vdso = (void __user *)sf;
127 if (__get_user(ip, &sf_vdso->return_address))
129 sp = (unsigned long)sf + STACK_FRAME_VDSO_OVERHEAD;
130 sf = (void __user *)sp;
131 if (__get_user(sp, &sf->back_chain))
134 sf = (void __user *)sp;
135 if (__get_user(ip, &sf->gprs[8]))
138 /* Sanity check: ABI requires SP to be 8 byte aligned. */
141 if (ip_invalid(ip)) {
143 * If the instruction address is invalid, and this
144 * is the first stack frame, assume r14 has not
145 * been written to the stack yet. Otherwise exit.
153 if (!store_ip(consume_entry, cookie, entry, perf, ip))
160 void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
161 const struct pt_regs *regs)
163 arch_stack_walk_user_common(consume_entry, cookie, NULL, regs, false);
166 unsigned long return_address(unsigned int n)
168 struct unwind_state state;
171 /* Increment to skip current stack entry */
174 unwind_for_each_frame(&state, NULL, NULL, 0) {
175 addr = unwind_get_return_address(&state);
183 EXPORT_SYMBOL_GPL(return_address);