]>
Commit | Line | Data |
---|---|---|
b2c0b2cb RK |
1 | /* |
2 | * NMI backtrace support | |
3 | * | |
4 | * Gratuitously copied from arch/x86/kernel/apic/hw_nmi.c by Russell King, | |
5 | * with the following header: | |
6 | * | |
7 | * HW NMI watchdog support | |
8 | * | |
9 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. | |
10 | * | |
11 | * Arch specific calls to support NMI watchdog | |
12 | * | |
13 | * Bits copied from original nmi.c file | |
14 | */ | |
15 | #include <linux/cpumask.h> | |
16 | #include <linux/delay.h> | |
17 | #include <linux/kprobes.h> | |
18 | #include <linux/nmi.h> | |
19 | #include <linux/seq_buf.h> | |
20 | ||
21 | #ifdef arch_trigger_all_cpu_backtrace | |
22 | /* For reliability, we're prepared to waste bits here. */ | |
23 | static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly; | |
24 | static cpumask_t printtrace_mask; | |
25 | ||
26 | #define NMI_BUF_SIZE 4096 | |
27 | ||
28 | struct nmi_seq_buf { | |
29 | unsigned char buffer[NMI_BUF_SIZE]; | |
30 | struct seq_buf seq; | |
31 | }; | |
32 | ||
33 | /* Safe printing in NMI context */ | |
34 | static DEFINE_PER_CPU(struct nmi_seq_buf, nmi_print_seq); | |
35 | ||
36 | /* "in progress" flag of arch_trigger_all_cpu_backtrace */ | |
37 | static unsigned long backtrace_flag; | |
38 | ||
39 | static void print_seq_line(struct nmi_seq_buf *s, int start, int end) | |
40 | { | |
41 | const char *buf = s->buffer + start; | |
42 | ||
43 | printk("%.*s", (end - start) + 1, buf); | |
44 | } | |
45 | ||
46 | void nmi_trigger_all_cpu_backtrace(bool include_self, | |
47 | void (*raise)(cpumask_t *mask)) | |
48 | { | |
49 | struct nmi_seq_buf *s; | |
50 | int i, cpu, this_cpu = get_cpu(); | |
51 | ||
52 | if (test_and_set_bit(0, &backtrace_flag)) { | |
53 | /* | |
54 | * If there is already a trigger_all_cpu_backtrace() in progress | |
55 | * (backtrace_flag == 1), don't output double cpu dump infos. | |
56 | */ | |
57 | put_cpu(); | |
58 | return; | |
59 | } | |
60 | ||
61 | cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); | |
62 | if (!include_self) | |
63 | cpumask_clear_cpu(this_cpu, to_cpumask(backtrace_mask)); | |
64 | ||
65 | cpumask_copy(&printtrace_mask, to_cpumask(backtrace_mask)); | |
66 | ||
67 | /* | |
68 | * Set up per_cpu seq_buf buffers that the NMIs running on the other | |
69 | * CPUs will write to. | |
70 | */ | |
71 | for_each_cpu(cpu, to_cpumask(backtrace_mask)) { | |
72 | s = &per_cpu(nmi_print_seq, cpu); | |
73 | seq_buf_init(&s->seq, s->buffer, NMI_BUF_SIZE); | |
74 | } | |
75 | ||
76 | if (!cpumask_empty(to_cpumask(backtrace_mask))) { | |
77 | pr_info("Sending NMI to %s CPUs:\n", | |
78 | (include_self ? "all" : "other")); | |
79 | raise(to_cpumask(backtrace_mask)); | |
80 | } | |
81 | ||
82 | /* Wait for up to 10 seconds for all CPUs to do the backtrace */ | |
83 | for (i = 0; i < 10 * 1000; i++) { | |
84 | if (cpumask_empty(to_cpumask(backtrace_mask))) | |
85 | break; | |
86 | mdelay(1); | |
87 | touch_softlockup_watchdog(); | |
88 | } | |
89 | ||
90 | /* | |
91 | * Now that all the NMIs have triggered, we can dump out their | |
92 | * back traces safely to the console. | |
93 | */ | |
94 | for_each_cpu(cpu, &printtrace_mask) { | |
95 | int len, last_i = 0; | |
96 | ||
97 | s = &per_cpu(nmi_print_seq, cpu); | |
98 | len = seq_buf_used(&s->seq); | |
99 | if (!len) | |
100 | continue; | |
101 | ||
102 | /* Print line by line. */ | |
103 | for (i = 0; i < len; i++) { | |
104 | if (s->buffer[i] == '\n') { | |
105 | print_seq_line(s, last_i, i); | |
106 | last_i = i + 1; | |
107 | } | |
108 | } | |
109 | /* Check if there was a partial line. */ | |
110 | if (last_i < len) { | |
111 | print_seq_line(s, last_i, len - 1); | |
112 | pr_cont("\n"); | |
113 | } | |
114 | } | |
115 | ||
116 | clear_bit(0, &backtrace_flag); | |
117 | smp_mb__after_atomic(); | |
118 | put_cpu(); | |
119 | } | |
120 | ||
121 | /* | |
122 | * It is not safe to call printk() directly from NMI handlers. | |
123 | * It may be fine if the NMI detected a lock up and we have no choice | |
124 | * but to do so, but doing a NMI on all other CPUs to get a back trace | |
125 | * can be done with a sysrq-l. We don't want that to lock up, which | |
126 | * can happen if the NMI interrupts a printk in progress. | |
127 | * | |
128 | * Instead, we redirect the vprintk() to this nmi_vprintk() that writes | |
129 | * the content into a per cpu seq_buf buffer. Then when the NMIs are | |
130 | * all done, we can safely dump the contents of the seq_buf to a printk() | |
131 | * from a non NMI context. | |
132 | */ | |
133 | static int nmi_vprintk(const char *fmt, va_list args) | |
134 | { | |
135 | struct nmi_seq_buf *s = this_cpu_ptr(&nmi_print_seq); | |
136 | unsigned int len = seq_buf_used(&s->seq); | |
137 | ||
138 | seq_buf_vprintf(&s->seq, fmt, args); | |
139 | return seq_buf_used(&s->seq) - len; | |
140 | } | |
141 | ||
142 | bool nmi_cpu_backtrace(struct pt_regs *regs) | |
143 | { | |
144 | int cpu = smp_processor_id(); | |
145 | ||
146 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | |
147 | printk_func_t printk_func_save = this_cpu_read(printk_func); | |
148 | ||
149 | /* Replace printk to write into the NMI seq */ | |
150 | this_cpu_write(printk_func, nmi_vprintk); | |
151 | pr_warn("NMI backtrace for cpu %d\n", cpu); | |
152 | show_regs(regs); | |
153 | this_cpu_write(printk_func, printk_func_save); | |
154 | ||
155 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | |
156 | return true; | |
157 | } | |
158 | ||
159 | return false; | |
160 | } | |
161 | NOKPROBE_SYMBOL(nmi_cpu_backtrace); | |
162 | #endif |