]>
Commit | Line | Data |
---|---|---|
c08098f2 | 1 | /* |
44c8bb91 VG |
2 | * stacktrace.c : stacktracing APIs needed by rest of kernel |
3 | * (wrappers over ARC dwarf based unwinder) | |
4 | * | |
c08098f2 VG |
5 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
44c8bb91 VG |
10 | * |
11 | * vineetg: aug 2009 | |
12 | * -Implemented CONFIG_STACKTRACE APIs, primarily save_stack_trace_tsk( ) | |
13 | * for displaying task's kernel mode call stack in /proc/<pid>/stack | |
14 | * -Iterator based approach to have single copy of unwinding core and APIs | |
15 | * needing unwinding, implement the logic in iterator regarding: | |
16 | * = which frame onwards to start capture | |
17 | * = which frame to stop capturing (wchan) | |
18 | * = specifics of data structs where trace is saved(CONFIG_STACKTRACE etc) | |
19 | * | |
20 | * vineetg: March 2009 | |
21 | * -Implemented correct versions of thread_saved_pc() and get_wchan() | |
22 | * | |
23 | * rajeshwarr: 2008 | |
24 | * -Initial implementation | |
c08098f2 VG |
25 | */ |
26 | ||
27 | #include <linux/ptrace.h> | |
28 | #include <linux/export.h> | |
44c8bb91 VG |
29 | #include <linux/stacktrace.h> |
30 | #include <linux/kallsyms.h> | |
b17b0153 IM |
31 | #include <linux/sched/debug.h> |
32 | ||
44c8bb91 VG |
33 | #include <asm/arcregs.h> |
34 | #include <asm/unwind.h> | |
35 | #include <asm/switch_to.h> | |
36 | ||
37 | /*------------------------------------------------------------------------- | |
38 | * Unwinder Iterator | |
39 | *------------------------------------------------------------------------- | |
40 | */ | |
41 | ||
42 | #ifdef CONFIG_ARC_DW2_UNWIND | |
43 | ||
44 | static void seed_unwind_frame_info(struct task_struct *tsk, | |
45 | struct pt_regs *regs, | |
46 | struct unwind_frame_info *frame_info) | |
47 | { | |
3a51d50f VG |
48 | /* |
49 | * synchronous unwinding (e.g. dump_stack) | |
50 | * - uses current values of SP and friends | |
51 | */ | |
44c8bb91 VG |
52 | if (tsk == NULL && regs == NULL) { |
53 | unsigned long fp, sp, blink, ret; | |
54 | frame_info->task = current; | |
55 | ||
56 | __asm__ __volatile__( | |
57 | "mov %0,r27\n\t" | |
58 | "mov %1,r28\n\t" | |
59 | "mov %2,r31\n\t" | |
60 | "mov %3,r63\n\t" | |
61 | : "=r"(fp), "=r"(sp), "=r"(blink), "=r"(ret) | |
62 | ); | |
63 | ||
64 | frame_info->regs.r27 = fp; | |
65 | frame_info->regs.r28 = sp; | |
66 | frame_info->regs.r31 = blink; | |
67 | frame_info->regs.r63 = ret; | |
68 | frame_info->call_frame = 0; | |
69 | } else if (regs == NULL) { | |
3a51d50f VG |
70 | /* |
71 | * Asynchronous unwinding of sleeping task | |
72 | * - Gets SP etc from task's pt_regs (saved bottom of kernel | |
73 | * mode stack of task) | |
74 | */ | |
44c8bb91 VG |
75 | |
76 | frame_info->task = tsk; | |
77 | ||
13648b01 VG |
78 | frame_info->regs.r27 = TSK_K_FP(tsk); |
79 | frame_info->regs.r28 = TSK_K_ESP(tsk); | |
80 | frame_info->regs.r31 = TSK_K_BLINK(tsk); | |
44c8bb91 VG |
81 | frame_info->regs.r63 = (unsigned int)__switch_to; |
82 | ||
83 | /* In the prologue of __switch_to, first FP is saved on stack | |
84 | * and then SP is copied to FP. Dwarf assumes cfa as FP based | |
85 | * but we didn't save FP. The value retrieved above is FP's | |
86 | * state in previous frame. | |
87 | * As a work around for this, we unwind from __switch_to start | |
88 | * and adjust SP accordingly. The other limitation is that | |
89 | * __switch_to macro is dwarf rules are not generated for inline | |
90 | * assembly code | |
91 | */ | |
92 | frame_info->regs.r27 = 0; | |
16f9afe6 | 93 | frame_info->regs.r28 += 60; |
44c8bb91 VG |
94 | frame_info->call_frame = 0; |
95 | ||
96 | } else { | |
3a51d50f VG |
97 | /* |
98 | * Asynchronous unwinding of intr/exception | |
99 | * - Just uses the pt_regs passed | |
100 | */ | |
44c8bb91 VG |
101 | frame_info->task = tsk; |
102 | ||
103 | frame_info->regs.r27 = regs->fp; | |
104 | frame_info->regs.r28 = regs->sp; | |
105 | frame_info->regs.r31 = regs->blink; | |
106 | frame_info->regs.r63 = regs->ret; | |
107 | frame_info->call_frame = 0; | |
108 | } | |
109 | } | |
110 | ||
111 | #endif | |
112 | ||
3a51d50f | 113 | notrace noinline unsigned int |
44c8bb91 VG |
114 | arc_unwind_core(struct task_struct *tsk, struct pt_regs *regs, |
115 | int (*consumer_fn) (unsigned int, void *), void *arg) | |
116 | { | |
117 | #ifdef CONFIG_ARC_DW2_UNWIND | |
118 | int ret = 0; | |
119 | unsigned int address; | |
120 | struct unwind_frame_info frame_info; | |
121 | ||
122 | seed_unwind_frame_info(tsk, regs, &frame_info); | |
123 | ||
124 | while (1) { | |
125 | address = UNW_PC(&frame_info); | |
126 | ||
def32fad VG |
127 | if (!address || !__kernel_text_address(address)) |
128 | break; | |
44c8bb91 | 129 | |
def32fad VG |
130 | if (consumer_fn(address, arg) == -1) |
131 | break; | |
44c8bb91 | 132 | |
def32fad VG |
133 | ret = arc_unwind(&frame_info); |
134 | if (ret) | |
44c8bb91 | 135 | break; |
def32fad VG |
136 | |
137 | frame_info.regs.r63 = frame_info.regs.r31; | |
44c8bb91 VG |
138 | } |
139 | ||
140 | return address; /* return the last address it saw */ | |
141 | #else | |
142 | /* On ARC, only Dward based unwinder works. fp based backtracing is | |
143 | * not possible (-fno-omit-frame-pointer) because of the way function | |
144 | * prelogue is setup (callee regs saved and then fp set and not other | |
145 | * way around | |
146 | */ | |
9bd54517 | 147 | pr_warn_once("CONFIG_ARC_DW2_UNWIND needs to be enabled\n"); |
44c8bb91 VG |
148 | return 0; |
149 | ||
150 | #endif | |
151 | } | |
152 | ||
153 | /*------------------------------------------------------------------------- | |
154 | * callbacks called by unwinder iterator to implement kernel APIs | |
155 | * | |
156 | * The callback can return -1 to force the iterator to stop, which by default | |
157 | * keeps going till the bottom-most frame. | |
158 | *------------------------------------------------------------------------- | |
159 | */ | |
160 | ||
161 | /* Call-back which plugs into unwinding core to dump the stack in | |
162 | * case of panic/OOPs/BUG etc | |
163 | */ | |
164 | static int __print_sym(unsigned int address, void *unused) | |
165 | { | |
166 | __print_symbol(" %s\n", address); | |
167 | return 0; | |
168 | } | |
169 | ||
170 | #ifdef CONFIG_STACKTRACE | |
171 | ||
172 | /* Call-back which plugs into unwinding core to capture the | |
173 | * traces needed by kernel on /proc/<pid>/stack | |
174 | */ | |
175 | static int __collect_all(unsigned int address, void *arg) | |
176 | { | |
177 | struct stack_trace *trace = arg; | |
178 | ||
179 | if (trace->skip > 0) | |
180 | trace->skip--; | |
181 | else | |
182 | trace->entries[trace->nr_entries++] = address; | |
183 | ||
184 | if (trace->nr_entries >= trace->max_entries) | |
185 | return -1; | |
186 | ||
187 | return 0; | |
188 | } | |
189 | ||
190 | static int __collect_all_but_sched(unsigned int address, void *arg) | |
191 | { | |
192 | struct stack_trace *trace = arg; | |
193 | ||
194 | if (in_sched_functions(address)) | |
195 | return 0; | |
196 | ||
197 | if (trace->skip > 0) | |
198 | trace->skip--; | |
199 | else | |
200 | trace->entries[trace->nr_entries++] = address; | |
201 | ||
202 | if (trace->nr_entries >= trace->max_entries) | |
203 | return -1; | |
204 | ||
205 | return 0; | |
206 | } | |
207 | ||
208 | #endif | |
209 | ||
210 | static int __get_first_nonsched(unsigned int address, void *unused) | |
211 | { | |
212 | if (in_sched_functions(address)) | |
213 | return 0; | |
214 | ||
215 | return -1; | |
216 | } | |
c08098f2 VG |
217 | |
218 | /*------------------------------------------------------------------------- | |
219 | * APIs expected by various kernel sub-systems | |
220 | *------------------------------------------------------------------------- | |
221 | */ | |
222 | ||
223 | noinline void show_stacktrace(struct task_struct *tsk, struct pt_regs *regs) | |
224 | { | |
44c8bb91 VG |
225 | pr_info("\nStack Trace:\n"); |
226 | arc_unwind_core(tsk, regs, __print_sym, NULL); | |
c08098f2 VG |
227 | } |
228 | EXPORT_SYMBOL(show_stacktrace); | |
229 | ||
230 | /* Expected by sched Code */ | |
231 | void show_stack(struct task_struct *tsk, unsigned long *sp) | |
232 | { | |
233 | show_stacktrace(tsk, NULL); | |
234 | } | |
235 | ||
c08098f2 | 236 | /* Another API expected by schedular, shows up in "ps" as Wait Channel |
7423cc0c | 237 | * Of course just returning schedule( ) would be pointless so unwind until |
c08098f2 VG |
238 | * the function is not in schedular code |
239 | */ | |
240 | unsigned int get_wchan(struct task_struct *tsk) | |
241 | { | |
44c8bb91 VG |
242 | return arc_unwind_core(tsk, NULL, __get_first_nonsched, NULL); |
243 | } | |
244 | ||
245 | #ifdef CONFIG_STACKTRACE | |
246 | ||
247 | /* | |
248 | * API required by CONFIG_STACKTRACE, CONFIG_LATENCYTOP. | |
249 | * A typical use is when /proc/<pid>/stack is queried by userland | |
250 | */ | |
251 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | |
252 | { | |
0dafafc3 | 253 | /* Assumes @tsk is sleeping so unwinds from __switch_to */ |
44c8bb91 VG |
254 | arc_unwind_core(tsk, NULL, __collect_all_but_sched, trace); |
255 | } | |
256 | ||
257 | void save_stack_trace(struct stack_trace *trace) | |
258 | { | |
0dafafc3 VG |
259 | /* Pass NULL for task so it unwinds the current call frame */ |
260 | arc_unwind_core(NULL, NULL, __collect_all, trace); | |
c08098f2 | 261 | } |
8f146d02 | 262 | EXPORT_SYMBOL_GPL(save_stack_trace); |
44c8bb91 | 263 | #endif |