]>
Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
e5a81b62 SR |
2 | /* |
3 | * Copyright (C) 2008 Steven Rostedt <[email protected]> | |
4 | * | |
5 | */ | |
68db0cf1 | 6 | #include <linux/sched/task_stack.h> |
e5a81b62 SR |
7 | #include <linux/stacktrace.h> |
8 | #include <linux/kallsyms.h> | |
9 | #include <linux/seq_file.h> | |
10 | #include <linux/spinlock.h> | |
11 | #include <linux/uaccess.h> | |
e5a81b62 SR |
12 | #include <linux/ftrace.h> |
13 | #include <linux/module.h> | |
f38f1d2a | 14 | #include <linux/sysctl.h> |
e5a81b62 | 15 | #include <linux/init.h> |
762e1207 SR |
16 | |
17 | #include <asm/setup.h> | |
18 | ||
e5a81b62 SR |
19 | #include "trace.h" |
20 | ||
1b6cced6 SR |
21 | static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = |
22 | { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; | |
bb99d8cc | 23 | unsigned stack_trace_index[STACK_TRACE_ENTRIES]; |
1b6cced6 | 24 | |
4df29712 SRRH |
25 | /* |
26 | * Reserve one entry for the passed in ip. This will allow | |
27 | * us to remove most or all of the stack size overhead | |
28 | * added by the stack tracer itself. | |
29 | */ | |
bb99d8cc | 30 | struct stack_trace stack_trace_max = { |
4df29712 | 31 | .max_entries = STACK_TRACE_ENTRIES - 1, |
72ac426a | 32 | .entries = &stack_dump_trace[0], |
e5a81b62 SR |
33 | }; |
34 | ||
bb99d8cc | 35 | unsigned long stack_trace_max_size; |
d332736d | 36 | arch_spinlock_t stack_trace_max_lock = |
edc35bd7 | 37 | (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; |
e5a81b62 | 38 | |
8aaf1ee7 | 39 | DEFINE_PER_CPU(int, disable_stack_tracer); |
f38f1d2a SR |
40 | static DEFINE_MUTEX(stack_sysctl_mutex); |
41 | ||
42 | int stack_tracer_enabled; | |
43 | static int last_stack_tracer_enabled; | |
e5a81b62 | 44 | |
bb99d8cc | 45 | void stack_trace_print(void) |
e3172181 MK |
46 | { |
47 | long i; | |
48 | int size; | |
49 | ||
50 | pr_emerg(" Depth Size Location (%d entries)\n" | |
51 | " ----- ---- --------\n", | |
bb99d8cc | 52 | stack_trace_max.nr_entries); |
e3172181 | 53 | |
bb99d8cc | 54 | for (i = 0; i < stack_trace_max.nr_entries; i++) { |
e3172181 MK |
55 | if (stack_dump_trace[i] == ULONG_MAX) |
56 | break; | |
bb99d8cc | 57 | if (i+1 == stack_trace_max.nr_entries || |
e3172181 | 58 | stack_dump_trace[i+1] == ULONG_MAX) |
bb99d8cc | 59 | size = stack_trace_index[i]; |
e3172181 | 60 | else |
bb99d8cc | 61 | size = stack_trace_index[i] - stack_trace_index[i+1]; |
e3172181 | 62 | |
bb99d8cc | 63 | pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i], |
e3172181 MK |
64 | size, (void *)stack_dump_trace[i]); |
65 | } | |
66 | } | |
67 | ||
bb99d8cc | 68 | /* |
505d3085 | 69 | * When arch-specific code overrides this function, the following |
d332736d | 70 | * data should be filled up, assuming stack_trace_max_lock is held to |
bb99d8cc AT |
71 | * prevent concurrent updates. |
72 | * stack_trace_index[] | |
73 | * stack_trace_max | |
74 | * stack_trace_max_size | |
75 | */ | |
76 | void __weak | |
d4ecbfc4 | 77 | check_stack(unsigned long ip, unsigned long *stack) |
e5a81b62 | 78 | { |
e3172181 | 79 | unsigned long this_size, flags; unsigned long *p, *top, *start; |
4df29712 | 80 | static int tracer_frame; |
6aa7de05 | 81 | int frame_size = READ_ONCE(tracer_frame); |
72ac426a | 82 | int i, x; |
e5a81b62 | 83 | |
87889501 | 84 | this_size = ((unsigned long)stack) & (THREAD_SIZE-1); |
e5a81b62 | 85 | this_size = THREAD_SIZE - this_size; |
4df29712 SRRH |
86 | /* Remove the frame of the tracer */ |
87 | this_size -= frame_size; | |
e5a81b62 | 88 | |
bb99d8cc | 89 | if (this_size <= stack_trace_max_size) |
e5a81b62 SR |
90 | return; |
91 | ||
81520a1b | 92 | /* we do not handle interrupt stacks yet */ |
87889501 | 93 | if (!object_is_on_stack(stack)) |
81520a1b SR |
94 | return; |
95 | ||
1904be1b SRRH |
96 | /* Can't do this from NMI context (can cause deadlocks) */ |
97 | if (in_nmi()) | |
98 | return; | |
99 | ||
a5e25883 | 100 | local_irq_save(flags); |
d332736d | 101 | arch_spin_lock(&stack_trace_max_lock); |
e5a81b62 | 102 | |
4df29712 SRRH |
103 | /* In case another CPU set the tracer_frame on us */ |
104 | if (unlikely(!frame_size)) | |
105 | this_size -= tracer_frame; | |
106 | ||
e5a81b62 | 107 | /* a race could have already updated it */ |
bb99d8cc | 108 | if (this_size <= stack_trace_max_size) |
e5a81b62 SR |
109 | goto out; |
110 | ||
bb99d8cc | 111 | stack_trace_max_size = this_size; |
e5a81b62 | 112 | |
bb99d8cc AT |
113 | stack_trace_max.nr_entries = 0; |
114 | stack_trace_max.skip = 3; | |
e5a81b62 | 115 | |
bb99d8cc | 116 | save_stack_trace(&stack_trace_max); |
e5a81b62 | 117 | |
72ac426a | 118 | /* Skip over the overhead of the stack tracer itself */ |
bb99d8cc | 119 | for (i = 0; i < stack_trace_max.nr_entries; i++) { |
72ac426a SRRH |
120 | if (stack_dump_trace[i] == ip) |
121 | break; | |
122 | } | |
d4ecbfc4 | 123 | |
6ccd8371 SR |
124 | /* |
125 | * Some archs may not have the passed in ip in the dump. | |
126 | * If that happens, we need to show everything. | |
127 | */ | |
128 | if (i == stack_trace_max.nr_entries) | |
129 | i = 0; | |
130 | ||
1b6cced6 SR |
131 | /* |
132 | * Now find where in the stack these are. | |
133 | */ | |
72ac426a | 134 | x = 0; |
87889501 | 135 | start = stack; |
1b6cced6 SR |
136 | top = (unsigned long *) |
137 | (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); | |
138 | ||
139 | /* | |
140 | * Loop through all the entries. One of the entries may | |
141 | * for some reason be missed on the stack, so we may | |
142 | * have to account for them. If they are all there, this | |
143 | * loop will only happen once. This code only takes place | |
144 | * on a new max, so it is far from a fast path. | |
145 | */ | |
bb99d8cc | 146 | while (i < stack_trace_max.nr_entries) { |
0a37119d | 147 | int found = 0; |
1b6cced6 | 148 | |
bb99d8cc | 149 | stack_trace_index[x] = this_size; |
1b6cced6 SR |
150 | p = start; |
151 | ||
bb99d8cc | 152 | for (; p < top && i < stack_trace_max.nr_entries; p++) { |
72ac426a SRRH |
153 | if (stack_dump_trace[i] == ULONG_MAX) |
154 | break; | |
6e22c836 YS |
155 | /* |
156 | * The READ_ONCE_NOCHECK is used to let KASAN know that | |
157 | * this is not a stack-out-of-bounds error. | |
158 | */ | |
159 | if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) { | |
72ac426a | 160 | stack_dump_trace[x] = stack_dump_trace[i++]; |
bb99d8cc | 161 | this_size = stack_trace_index[x++] = |
1b6cced6 | 162 | (top - p) * sizeof(unsigned long); |
0a37119d | 163 | found = 1; |
1b6cced6 SR |
164 | /* Start the search from here */ |
165 | start = p + 1; | |
4df29712 SRRH |
166 | /* |
167 | * We do not want to show the overhead | |
168 | * of the stack tracer stack in the | |
169 | * max stack. If we haven't figured | |
170 | * out what that is, then figure it out | |
171 | * now. | |
172 | */ | |
72ac426a | 173 | if (unlikely(!tracer_frame)) { |
4df29712 SRRH |
174 | tracer_frame = (p - stack) * |
175 | sizeof(unsigned long); | |
bb99d8cc | 176 | stack_trace_max_size -= tracer_frame; |
4df29712 | 177 | } |
1b6cced6 SR |
178 | } |
179 | } | |
180 | ||
0a37119d SR |
181 | if (!found) |
182 | i++; | |
1b6cced6 SR |
183 | } |
184 | ||
bb99d8cc | 185 | stack_trace_max.nr_entries = x; |
72ac426a SRRH |
186 | for (; x < i; x++) |
187 | stack_dump_trace[x] = ULONG_MAX; | |
188 | ||
a70857e4 | 189 | if (task_stack_end_corrupted(current)) { |
bb99d8cc | 190 | stack_trace_print(); |
e3172181 MK |
191 | BUG(); |
192 | } | |
193 | ||
e5a81b62 | 194 | out: |
d332736d | 195 | arch_spin_unlock(&stack_trace_max_lock); |
a5e25883 | 196 | local_irq_restore(flags); |
e5a81b62 SR |
197 | } |
198 | ||
199 | static void | |
a1e2e31d SR |
200 | stack_trace_call(unsigned long ip, unsigned long parent_ip, |
201 | struct ftrace_ops *op, struct pt_regs *pt_regs) | |
e5a81b62 | 202 | { |
87889501 | 203 | unsigned long stack; |
e5a81b62 | 204 | |
5168ae50 | 205 | preempt_disable_notrace(); |
e5a81b62 | 206 | |
e5a81b62 | 207 | /* no atomic needed, we only modify this variable by this cpu */ |
8aaf1ee7 SRV |
208 | __this_cpu_inc(disable_stack_tracer); |
209 | if (__this_cpu_read(disable_stack_tracer) != 1) | |
e5a81b62 SR |
210 | goto out; |
211 | ||
b00d607b SRV |
212 | /* If rcu is not watching, then save stack trace can fail */ |
213 | if (!rcu_is_watching()) | |
214 | goto out; | |
215 | ||
72ac426a | 216 | ip += MCOUNT_INSN_SIZE; |
4df29712 SRRH |
217 | |
218 | check_stack(ip, &stack); | |
e5a81b62 SR |
219 | |
220 | out: | |
8aaf1ee7 | 221 | __this_cpu_dec(disable_stack_tracer); |
e5a81b62 | 222 | /* prevent recursion in schedule */ |
5168ae50 | 223 | preempt_enable_notrace(); |
e5a81b62 SR |
224 | } |
225 | ||
226 | static struct ftrace_ops trace_ops __read_mostly = | |
227 | { | |
228 | .func = stack_trace_call, | |
4740974a | 229 | .flags = FTRACE_OPS_FL_RECURSION_SAFE, |
e5a81b62 SR |
230 | }; |
231 | ||
232 | static ssize_t | |
233 | stack_max_size_read(struct file *filp, char __user *ubuf, | |
234 | size_t count, loff_t *ppos) | |
235 | { | |
236 | unsigned long *ptr = filp->private_data; | |
237 | char buf[64]; | |
238 | int r; | |
239 | ||
240 | r = snprintf(buf, sizeof(buf), "%ld\n", *ptr); | |
241 | if (r > sizeof(buf)) | |
242 | r = sizeof(buf); | |
243 | return simple_read_from_buffer(ubuf, count, ppos, buf, r); | |
244 | } | |
245 | ||
246 | static ssize_t | |
247 | stack_max_size_write(struct file *filp, const char __user *ubuf, | |
248 | size_t count, loff_t *ppos) | |
249 | { | |
250 | long *ptr = filp->private_data; | |
251 | unsigned long val, flags; | |
e5a81b62 SR |
252 | int ret; |
253 | ||
22fe9b54 PH |
254 | ret = kstrtoul_from_user(ubuf, count, 10, &val); |
255 | if (ret) | |
e5a81b62 SR |
256 | return ret; |
257 | ||
a5e25883 | 258 | local_irq_save(flags); |
4f48f8b7 LJ |
259 | |
260 | /* | |
261 | * In case we trace inside arch_spin_lock() or after (NMI), | |
262 | * we will cause circular lock, so we also need to increase | |
8aaf1ee7 | 263 | * the percpu disable_stack_tracer here. |
4f48f8b7 | 264 | */ |
8aaf1ee7 | 265 | __this_cpu_inc(disable_stack_tracer); |
4f48f8b7 | 266 | |
d332736d | 267 | arch_spin_lock(&stack_trace_max_lock); |
e5a81b62 | 268 | *ptr = val; |
d332736d | 269 | arch_spin_unlock(&stack_trace_max_lock); |
4f48f8b7 | 270 | |
8aaf1ee7 | 271 | __this_cpu_dec(disable_stack_tracer); |
a5e25883 | 272 | local_irq_restore(flags); |
e5a81b62 SR |
273 | |
274 | return count; | |
275 | } | |
276 | ||
f38f1d2a | 277 | static const struct file_operations stack_max_size_fops = { |
e5a81b62 SR |
278 | .open = tracing_open_generic, |
279 | .read = stack_max_size_read, | |
280 | .write = stack_max_size_write, | |
6038f373 | 281 | .llseek = default_llseek, |
e5a81b62 SR |
282 | }; |
283 | ||
284 | static void * | |
2fc5f0cf | 285 | __next(struct seq_file *m, loff_t *pos) |
e5a81b62 | 286 | { |
2fc5f0cf | 287 | long n = *pos - 1; |
e5a81b62 | 288 | |
bb99d8cc | 289 | if (n > stack_trace_max.nr_entries || stack_dump_trace[n] == ULONG_MAX) |
e5a81b62 SR |
290 | return NULL; |
291 | ||
2fc5f0cf | 292 | m->private = (void *)n; |
1b6cced6 | 293 | return &m->private; |
e5a81b62 SR |
294 | } |
295 | ||
2fc5f0cf LZ |
296 | static void * |
297 | t_next(struct seq_file *m, void *v, loff_t *pos) | |
e5a81b62 | 298 | { |
2fc5f0cf LZ |
299 | (*pos)++; |
300 | return __next(m, pos); | |
301 | } | |
e5a81b62 | 302 | |
2fc5f0cf LZ |
303 | static void *t_start(struct seq_file *m, loff_t *pos) |
304 | { | |
e5a81b62 | 305 | local_irq_disable(); |
4f48f8b7 | 306 | |
8aaf1ee7 | 307 | __this_cpu_inc(disable_stack_tracer); |
4f48f8b7 | 308 | |
d332736d | 309 | arch_spin_lock(&stack_trace_max_lock); |
e5a81b62 | 310 | |
522a110b LW |
311 | if (*pos == 0) |
312 | return SEQ_START_TOKEN; | |
313 | ||
2fc5f0cf | 314 | return __next(m, pos); |
e5a81b62 SR |
315 | } |
316 | ||
317 | static void t_stop(struct seq_file *m, void *p) | |
318 | { | |
d332736d | 319 | arch_spin_unlock(&stack_trace_max_lock); |
4f48f8b7 | 320 | |
8aaf1ee7 | 321 | __this_cpu_dec(disable_stack_tracer); |
4f48f8b7 | 322 | |
e5a81b62 SR |
323 | local_irq_enable(); |
324 | } | |
325 | ||
962e3707 | 326 | static void trace_lookup_stack(struct seq_file *m, long i) |
e5a81b62 | 327 | { |
1b6cced6 | 328 | unsigned long addr = stack_dump_trace[i]; |
e5a81b62 | 329 | |
962e3707 | 330 | seq_printf(m, "%pS\n", (void *)addr); |
e5a81b62 SR |
331 | } |
332 | ||
e447e1df SR |
333 | static void print_disabled(struct seq_file *m) |
334 | { | |
335 | seq_puts(m, "#\n" | |
336 | "# Stack tracer disabled\n" | |
337 | "#\n" | |
338 | "# To enable the stack tracer, either add 'stacktrace' to the\n" | |
339 | "# kernel command line\n" | |
340 | "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n" | |
341 | "#\n"); | |
342 | } | |
343 | ||
e5a81b62 SR |
344 | static int t_show(struct seq_file *m, void *v) |
345 | { | |
522a110b | 346 | long i; |
1b6cced6 SR |
347 | int size; |
348 | ||
522a110b | 349 | if (v == SEQ_START_TOKEN) { |
eb1871f3 | 350 | seq_printf(m, " Depth Size Location" |
1b6cced6 | 351 | " (%d entries)\n" |
eb1871f3 | 352 | " ----- ---- --------\n", |
bb99d8cc | 353 | stack_trace_max.nr_entries); |
e447e1df | 354 | |
bb99d8cc | 355 | if (!stack_tracer_enabled && !stack_trace_max_size) |
e447e1df SR |
356 | print_disabled(m); |
357 | ||
1b6cced6 SR |
358 | return 0; |
359 | } | |
e5a81b62 | 360 | |
522a110b LW |
361 | i = *(long *)v; |
362 | ||
bb99d8cc | 363 | if (i >= stack_trace_max.nr_entries || |
1b6cced6 | 364 | stack_dump_trace[i] == ULONG_MAX) |
e5a81b62 SR |
365 | return 0; |
366 | ||
bb99d8cc | 367 | if (i+1 == stack_trace_max.nr_entries || |
1b6cced6 | 368 | stack_dump_trace[i+1] == ULONG_MAX) |
bb99d8cc | 369 | size = stack_trace_index[i]; |
1b6cced6 | 370 | else |
bb99d8cc | 371 | size = stack_trace_index[i] - stack_trace_index[i+1]; |
1b6cced6 | 372 | |
bb99d8cc | 373 | seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size); |
1b6cced6 SR |
374 | |
375 | trace_lookup_stack(m, i); | |
e5a81b62 SR |
376 | |
377 | return 0; | |
378 | } | |
379 | ||
f38f1d2a | 380 | static const struct seq_operations stack_trace_seq_ops = { |
e5a81b62 SR |
381 | .start = t_start, |
382 | .next = t_next, | |
383 | .stop = t_stop, | |
384 | .show = t_show, | |
385 | }; | |
386 | ||
387 | static int stack_trace_open(struct inode *inode, struct file *file) | |
388 | { | |
d8cc1ab7 | 389 | return seq_open(file, &stack_trace_seq_ops); |
e5a81b62 SR |
390 | } |
391 | ||
f38f1d2a | 392 | static const struct file_operations stack_trace_fops = { |
e5a81b62 SR |
393 | .open = stack_trace_open, |
394 | .read = seq_read, | |
395 | .llseek = seq_lseek, | |
d8cc1ab7 | 396 | .release = seq_release, |
e5a81b62 SR |
397 | }; |
398 | ||
bbd1d27d SRV |
399 | #ifdef CONFIG_DYNAMIC_FTRACE |
400 | ||
d2d45c7a SR |
401 | static int |
402 | stack_trace_filter_open(struct inode *inode, struct file *file) | |
403 | { | |
0f179765 SRV |
404 | struct ftrace_ops *ops = inode->i_private; |
405 | ||
406 | return ftrace_regex_open(ops, FTRACE_ITER_FILTER, | |
d2d45c7a SR |
407 | inode, file); |
408 | } | |
409 | ||
410 | static const struct file_operations stack_trace_filter_fops = { | |
411 | .open = stack_trace_filter_open, | |
412 | .read = seq_read, | |
413 | .write = ftrace_filter_write, | |
098c879e | 414 | .llseek = tracing_lseek, |
d2d45c7a SR |
415 | .release = ftrace_regex_release, |
416 | }; | |
417 | ||
bbd1d27d SRV |
418 | #endif /* CONFIG_DYNAMIC_FTRACE */ |
419 | ||
f38f1d2a SR |
420 | int |
421 | stack_trace_sysctl(struct ctl_table *table, int write, | |
8d65af78 | 422 | void __user *buffer, size_t *lenp, |
f38f1d2a SR |
423 | loff_t *ppos) |
424 | { | |
425 | int ret; | |
426 | ||
427 | mutex_lock(&stack_sysctl_mutex); | |
428 | ||
8d65af78 | 429 | ret = proc_dointvec(table, write, buffer, lenp, ppos); |
f38f1d2a SR |
430 | |
431 | if (ret || !write || | |
a32c7765 | 432 | (last_stack_tracer_enabled == !!stack_tracer_enabled)) |
f38f1d2a SR |
433 | goto out; |
434 | ||
a32c7765 | 435 | last_stack_tracer_enabled = !!stack_tracer_enabled; |
f38f1d2a SR |
436 | |
437 | if (stack_tracer_enabled) | |
438 | register_ftrace_function(&trace_ops); | |
439 | else | |
440 | unregister_ftrace_function(&trace_ops); | |
441 | ||
442 | out: | |
443 | mutex_unlock(&stack_sysctl_mutex); | |
444 | return ret; | |
445 | } | |
446 | ||
762e1207 SR |
447 | static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata; |
448 | ||
f38f1d2a SR |
449 | static __init int enable_stacktrace(char *str) |
450 | { | |
762e1207 SR |
451 | if (strncmp(str, "_filter=", 8) == 0) |
452 | strncpy(stack_trace_filter_buf, str+8, COMMAND_LINE_SIZE); | |
453 | ||
e05a43b7 SR |
454 | stack_tracer_enabled = 1; |
455 | last_stack_tracer_enabled = 1; | |
f38f1d2a SR |
456 | return 1; |
457 | } | |
458 | __setup("stacktrace", enable_stacktrace); | |
459 | ||
e5a81b62 SR |
460 | static __init int stack_trace_init(void) |
461 | { | |
462 | struct dentry *d_tracer; | |
e5a81b62 SR |
463 | |
464 | d_tracer = tracing_init_dentry(); | |
14a5ae40 | 465 | if (IS_ERR(d_tracer)) |
ed6f1c99 | 466 | return 0; |
e5a81b62 | 467 | |
5452af66 | 468 | trace_create_file("stack_max_size", 0644, d_tracer, |
bb99d8cc | 469 | &stack_trace_max_size, &stack_max_size_fops); |
e5a81b62 | 470 | |
5452af66 FW |
471 | trace_create_file("stack_trace", 0444, d_tracer, |
472 | NULL, &stack_trace_fops); | |
e5a81b62 | 473 | |
bbd1d27d | 474 | #ifdef CONFIG_DYNAMIC_FTRACE |
0c5a9acc | 475 | trace_create_file("stack_trace_filter", 0644, d_tracer, |
0f179765 | 476 | &trace_ops, &stack_trace_filter_fops); |
bbd1d27d | 477 | #endif |
d2d45c7a | 478 | |
762e1207 SR |
479 | if (stack_trace_filter_buf[0]) |
480 | ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1); | |
481 | ||
e05a43b7 | 482 | if (stack_tracer_enabled) |
f38f1d2a | 483 | register_ftrace_function(&trace_ops); |
e5a81b62 SR |
484 | |
485 | return 0; | |
486 | } | |
487 | ||
488 | device_initcall(stack_trace_init); |