]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Provide a default dump_stack() function for architectures | |
3 | * which don't implement their own. | |
4 | */ | |
5 | ||
6 | #include <linux/kernel.h> | |
7 | #include <linux/export.h> | |
8 | #include <linux/sched.h> | |
9 | #include <linux/smp.h> | |
10 | #include <linux/atomic.h> | |
11 | ||
12 | static void __dump_stack(void) | |
13 | { | |
14 | dump_stack_print_info(KERN_DEFAULT); | |
15 | show_stack(NULL, NULL); | |
16 | } | |
17 | ||
18 | /** | |
19 | * dump_stack - dump the current task information and its stack trace | |
20 | * | |
21 | * Architectures can override this implementation by implementing its own. | |
22 | */ | |
23 | #ifdef CONFIG_SMP | |
24 | static atomic_t dump_lock = ATOMIC_INIT(-1); | |
25 | ||
26 | asmlinkage void dump_stack(void) | |
27 | { | |
28 | int was_locked; | |
29 | int old; | |
30 | int cpu; | |
31 | ||
32 | /* | |
33 | * Permit this cpu to perform nested stack dumps while serialising | |
34 | * against other CPUs | |
35 | */ | |
36 | preempt_disable(); | |
37 | ||
38 | retry: | |
39 | cpu = smp_processor_id(); | |
40 | old = atomic_cmpxchg(&dump_lock, -1, cpu); | |
41 | if (old == -1) { | |
42 | was_locked = 0; | |
43 | } else if (old == cpu) { | |
44 | was_locked = 1; | |
45 | } else { | |
46 | cpu_relax(); | |
47 | goto retry; | |
48 | } | |
49 | ||
50 | __dump_stack(); | |
51 | ||
52 | if (!was_locked) | |
53 | atomic_set(&dump_lock, -1); | |
54 | ||
55 | preempt_enable(); | |
56 | } | |
57 | #else | |
58 | asmlinkage void dump_stack(void) | |
59 | { | |
60 | __dump_stack(); | |
61 | } | |
62 | #endif | |
63 | EXPORT_SYMBOL(dump_stack); |