]>
Commit | Line | Data |
---|---|---|
58687acb DZ |
1 | /* |
2 | * Detect hard and soft lockups on a system | |
3 | * | |
4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. | |
5 | * | |
6 | * this code detects hard lockups: incidents in where on a CPU | |
7 | * the kernel does not respond to anything except NMI. | |
8 | * | |
9 | * Note: Most of this code is borrowed heavily from softlockup.c, | |
10 | * so thanks to Ingo for the initial implementation. | |
11 | * Some chunks also taken from arch/x86/kernel/apic/nmi.c, thanks | |
12 | * to those contributors as well. | |
13 | */ | |
14 | ||
15 | #include <linux/mm.h> | |
16 | #include <linux/cpu.h> | |
17 | #include <linux/nmi.h> | |
18 | #include <linux/init.h> | |
19 | #include <linux/delay.h> | |
20 | #include <linux/freezer.h> | |
21 | #include <linux/kthread.h> | |
22 | #include <linux/lockdep.h> | |
23 | #include <linux/notifier.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/sysctl.h> | |
26 | ||
27 | #include <asm/irq_regs.h> | |
28 | #include <linux/perf_event.h> | |
29 | ||
4135038a | 30 | int watchdog_enabled = 1; |
58687acb DZ |
31 | int __read_mostly softlockup_thresh = 60; |
32 | ||
33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | |
34 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | |
35 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); | |
36 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); | |
58687acb | 37 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
23637d47 | 38 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
cafcd80d DZ |
39 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
40 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | |
58687acb DZ |
41 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
42 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | |
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | |
44 | #endif | |
45 | ||
58687acb DZ |
46 | /* boot commands */ |
47 | /* | |
48 | * Should we panic when a soft-lockup or hard-lockup occurs: | |
49 | */ | |
23637d47 | 50 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
fef2c9bc DZ |
51 | static int hardlockup_panic = |
52 | CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; | |
58687acb DZ |
53 | |
54 | static int __init hardlockup_panic_setup(char *str) | |
55 | { | |
56 | if (!strncmp(str, "panic", 5)) | |
57 | hardlockup_panic = 1; | |
fef2c9bc DZ |
58 | else if (!strncmp(str, "nopanic", 7)) |
59 | hardlockup_panic = 0; | |
5dc30558 | 60 | else if (!strncmp(str, "0", 1)) |
4135038a | 61 | watchdog_enabled = 0; |
58687acb DZ |
62 | return 1; |
63 | } | |
64 | __setup("nmi_watchdog=", hardlockup_panic_setup); | |
65 | #endif | |
66 | ||
67 | unsigned int __read_mostly softlockup_panic = | |
68 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | |
69 | ||
70 | static int __init softlockup_panic_setup(char *str) | |
71 | { | |
72 | softlockup_panic = simple_strtoul(str, NULL, 0); | |
73 | ||
74 | return 1; | |
75 | } | |
76 | __setup("softlockup_panic=", softlockup_panic_setup); | |
77 | ||
78 | static int __init nowatchdog_setup(char *str) | |
79 | { | |
4135038a | 80 | watchdog_enabled = 0; |
58687acb DZ |
81 | return 1; |
82 | } | |
83 | __setup("nowatchdog", nowatchdog_setup); | |
84 | ||
85 | /* deprecated */ | |
86 | static int __init nosoftlockup_setup(char *str) | |
87 | { | |
4135038a | 88 | watchdog_enabled = 0; |
58687acb DZ |
89 | return 1; |
90 | } | |
91 | __setup("nosoftlockup", nosoftlockup_setup); | |
92 | /* */ | |
93 | ||
94 | ||
95 | /* | |
96 | * Returns seconds, approximately. We don't need nanosecond | |
97 | * resolution, and we don't need to waste time with a big divide when | |
98 | * 2^30ns == 1.074s. | |
99 | */ | |
100 | static unsigned long get_timestamp(int this_cpu) | |
101 | { | |
102 | return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ | |
103 | } | |
104 | ||
105 | static unsigned long get_sample_period(void) | |
106 | { | |
107 | /* | |
108 | * convert softlockup_thresh from seconds to ns | |
109 | * the divide by 5 is to give hrtimer 5 chances to | |
110 | * increment before the hardlockup detector generates | |
111 | * a warning | |
112 | */ | |
113 | return softlockup_thresh / 5 * NSEC_PER_SEC; | |
114 | } | |
115 | ||
116 | /* Commands for resetting the watchdog */ | |
117 | static void __touch_watchdog(void) | |
118 | { | |
26e09c6e | 119 | int this_cpu = smp_processor_id(); |
58687acb | 120 | |
909ea964 | 121 | __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); |
58687acb DZ |
122 | } |
123 | ||
332fbdbc | 124 | void touch_softlockup_watchdog(void) |
58687acb | 125 | { |
909ea964 | 126 | __this_cpu_write(watchdog_touch_ts, 0); |
58687acb | 127 | } |
0167c781 | 128 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
58687acb | 129 | |
332fbdbc | 130 | void touch_all_softlockup_watchdogs(void) |
58687acb DZ |
131 | { |
132 | int cpu; | |
133 | ||
134 | /* | |
135 | * this is done lockless | |
136 | * do we care if a 0 races with a timestamp? | |
137 | * all it means is the softlock check starts one cycle later | |
138 | */ | |
139 | for_each_online_cpu(cpu) | |
140 | per_cpu(watchdog_touch_ts, cpu) = 0; | |
141 | } | |
142 | ||
cafcd80d | 143 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
58687acb DZ |
144 | void touch_nmi_watchdog(void) |
145 | { | |
68d3f1d8 DZ |
146 | if (watchdog_enabled) { |
147 | unsigned cpu; | |
148 | ||
149 | for_each_present_cpu(cpu) { | |
150 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | |
151 | per_cpu(watchdog_nmi_touch, cpu) = true; | |
152 | } | |
153 | } | |
332fbdbc | 154 | touch_softlockup_watchdog(); |
58687acb DZ |
155 | } |
156 | EXPORT_SYMBOL(touch_nmi_watchdog); | |
157 | ||
cafcd80d DZ |
158 | #endif |
159 | ||
58687acb DZ |
160 | void touch_softlockup_watchdog_sync(void) |
161 | { | |
162 | __raw_get_cpu_var(softlockup_touch_sync) = true; | |
163 | __raw_get_cpu_var(watchdog_touch_ts) = 0; | |
164 | } | |
165 | ||
23637d47 | 166 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
58687acb | 167 | /* watchdog detector functions */ |
26e09c6e | 168 | static int is_hardlockup(void) |
58687acb | 169 | { |
909ea964 | 170 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
58687acb | 171 | |
909ea964 | 172 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
58687acb DZ |
173 | return 1; |
174 | ||
909ea964 | 175 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
58687acb DZ |
176 | return 0; |
177 | } | |
178 | #endif | |
179 | ||
26e09c6e | 180 | static int is_softlockup(unsigned long touch_ts) |
58687acb | 181 | { |
26e09c6e | 182 | unsigned long now = get_timestamp(smp_processor_id()); |
58687acb DZ |
183 | |
184 | /* Warn about unreasonable delays: */ | |
185 | if (time_after(now, touch_ts + softlockup_thresh)) | |
186 | return now - touch_ts; | |
187 | ||
188 | return 0; | |
189 | } | |
190 | ||
23637d47 | 191 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
58687acb DZ |
192 | static struct perf_event_attr wd_hw_attr = { |
193 | .type = PERF_TYPE_HARDWARE, | |
194 | .config = PERF_COUNT_HW_CPU_CYCLES, | |
195 | .size = sizeof(struct perf_event_attr), | |
196 | .pinned = 1, | |
197 | .disabled = 1, | |
198 | }; | |
199 | ||
200 | /* Callback function for perf event subsystem */ | |
277b1998 | 201 | static void watchdog_overflow_callback(struct perf_event *event, int nmi, |
58687acb DZ |
202 | struct perf_sample_data *data, |
203 | struct pt_regs *regs) | |
204 | { | |
c6db67cd PZ |
205 | /* Ensure the watchdog never gets throttled */ |
206 | event->hw.interrupts = 0; | |
207 | ||
909ea964 CL |
208 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
209 | __this_cpu_write(watchdog_nmi_touch, false); | |
58687acb DZ |
210 | return; |
211 | } | |
212 | ||
213 | /* check for a hardlockup | |
214 | * This is done by making sure our timer interrupt | |
215 | * is incrementing. The timer interrupt should have | |
216 | * fired multiple times before we overflow'd. If it hasn't | |
217 | * then this is a good indication the cpu is stuck | |
218 | */ | |
26e09c6e DZ |
219 | if (is_hardlockup()) { |
220 | int this_cpu = smp_processor_id(); | |
221 | ||
58687acb | 222 | /* only print hardlockups once */ |
909ea964 | 223 | if (__this_cpu_read(hard_watchdog_warn) == true) |
58687acb DZ |
224 | return; |
225 | ||
226 | if (hardlockup_panic) | |
227 | panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); | |
228 | else | |
229 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); | |
230 | ||
909ea964 | 231 | __this_cpu_write(hard_watchdog_warn, true); |
58687acb DZ |
232 | return; |
233 | } | |
234 | ||
909ea964 | 235 | __this_cpu_write(hard_watchdog_warn, false); |
58687acb DZ |
236 | return; |
237 | } | |
238 | static void watchdog_interrupt_count(void) | |
239 | { | |
909ea964 | 240 | __this_cpu_inc(hrtimer_interrupts); |
58687acb DZ |
241 | } |
242 | #else | |
243 | static inline void watchdog_interrupt_count(void) { return; } | |
23637d47 | 244 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
58687acb DZ |
245 | |
246 | /* watchdog kicker functions */ | |
247 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |
248 | { | |
909ea964 | 249 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
58687acb DZ |
250 | struct pt_regs *regs = get_irq_regs(); |
251 | int duration; | |
252 | ||
253 | /* kick the hardlockup detector */ | |
254 | watchdog_interrupt_count(); | |
255 | ||
256 | /* kick the softlockup detector */ | |
909ea964 | 257 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
58687acb DZ |
258 | |
259 | /* .. and repeat */ | |
260 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); | |
261 | ||
262 | if (touch_ts == 0) { | |
909ea964 | 263 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
58687acb DZ |
264 | /* |
265 | * If the time stamp was touched atomically | |
266 | * make sure the scheduler tick is up to date. | |
267 | */ | |
909ea964 | 268 | __this_cpu_write(softlockup_touch_sync, false); |
58687acb DZ |
269 | sched_clock_tick(); |
270 | } | |
271 | __touch_watchdog(); | |
272 | return HRTIMER_RESTART; | |
273 | } | |
274 | ||
275 | /* check for a softlockup | |
276 | * This is done by making sure a high priority task is | |
277 | * being scheduled. The task touches the watchdog to | |
278 | * indicate it is getting cpu time. If it hasn't then | |
279 | * this is a good indication some task is hogging the cpu | |
280 | */ | |
26e09c6e | 281 | duration = is_softlockup(touch_ts); |
58687acb DZ |
282 | if (unlikely(duration)) { |
283 | /* only warn once */ | |
909ea964 | 284 | if (__this_cpu_read(soft_watchdog_warn) == true) |
58687acb DZ |
285 | return HRTIMER_RESTART; |
286 | ||
287 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | |
26e09c6e | 288 | smp_processor_id(), duration, |
58687acb DZ |
289 | current->comm, task_pid_nr(current)); |
290 | print_modules(); | |
291 | print_irqtrace_events(current); | |
292 | if (regs) | |
293 | show_regs(regs); | |
294 | else | |
295 | dump_stack(); | |
296 | ||
297 | if (softlockup_panic) | |
298 | panic("softlockup: hung tasks"); | |
909ea964 | 299 | __this_cpu_write(soft_watchdog_warn, true); |
58687acb | 300 | } else |
909ea964 | 301 | __this_cpu_write(soft_watchdog_warn, false); |
58687acb DZ |
302 | |
303 | return HRTIMER_RESTART; | |
304 | } | |
305 | ||
306 | ||
307 | /* | |
308 | * The watchdog thread - touches the timestamp. | |
309 | */ | |
26e09c6e | 310 | static int watchdog(void *unused) |
58687acb | 311 | { |
fe7de49f | 312 | static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
26e09c6e | 313 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
58687acb DZ |
314 | |
315 | sched_setscheduler(current, SCHED_FIFO, ¶m); | |
316 | ||
317 | /* initialize timestamp */ | |
318 | __touch_watchdog(); | |
319 | ||
320 | /* kick off the timer for the hardlockup detector */ | |
321 | /* done here because hrtimer_start can only pin to smp_processor_id() */ | |
322 | hrtimer_start(hrtimer, ns_to_ktime(get_sample_period()), | |
323 | HRTIMER_MODE_REL_PINNED); | |
324 | ||
325 | set_current_state(TASK_INTERRUPTIBLE); | |
326 | /* | |
327 | * Run briefly once per second to reset the softlockup timestamp. | |
328 | * If this gets delayed for more than 60 seconds then the | |
26e09c6e | 329 | * debug-printout triggers in watchdog_timer_fn(). |
58687acb DZ |
330 | */ |
331 | while (!kthread_should_stop()) { | |
332 | __touch_watchdog(); | |
333 | schedule(); | |
334 | ||
335 | if (kthread_should_stop()) | |
336 | break; | |
337 | ||
338 | set_current_state(TASK_INTERRUPTIBLE); | |
339 | } | |
340 | __set_current_state(TASK_RUNNING); | |
341 | ||
342 | return 0; | |
343 | } | |
344 | ||
345 | ||
23637d47 | 346 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
58687acb DZ |
347 | static int watchdog_nmi_enable(int cpu) |
348 | { | |
349 | struct perf_event_attr *wd_attr; | |
350 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | |
351 | ||
352 | /* is it already setup and enabled? */ | |
353 | if (event && event->state > PERF_EVENT_STATE_OFF) | |
354 | goto out; | |
355 | ||
356 | /* it is setup but not enabled */ | |
357 | if (event != NULL) | |
358 | goto out_enable; | |
359 | ||
360 | /* Try to register using hardware perf events */ | |
361 | wd_attr = &wd_hw_attr; | |
362 | wd_attr->sample_period = hw_nmi_get_sample_period(); | |
38a81da2 | 363 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); |
58687acb DZ |
364 | if (!IS_ERR(event)) { |
365 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); | |
366 | goto out_save; | |
367 | } | |
368 | ||
5651f7f4 DZ |
369 | |
370 | /* vary the KERN level based on the returned errno */ | |
371 | if (PTR_ERR(event) == -EOPNOTSUPP) | |
372 | printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | |
373 | else if (PTR_ERR(event) == -ENOENT) | |
374 | printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); | |
375 | else | |
376 | printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); | |
eac24335 | 377 | return PTR_ERR(event); |
58687acb DZ |
378 | |
379 | /* success path */ | |
380 | out_save: | |
381 | per_cpu(watchdog_ev, cpu) = event; | |
382 | out_enable: | |
383 | perf_event_enable(per_cpu(watchdog_ev, cpu)); | |
384 | out: | |
385 | return 0; | |
386 | } | |
387 | ||
388 | static void watchdog_nmi_disable(int cpu) | |
389 | { | |
390 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | |
391 | ||
392 | if (event) { | |
393 | perf_event_disable(event); | |
394 | per_cpu(watchdog_ev, cpu) = NULL; | |
395 | ||
396 | /* should be in cleanup, but blocks oprofile */ | |
397 | perf_event_release_kernel(event); | |
398 | } | |
399 | return; | |
400 | } | |
401 | #else | |
402 | static int watchdog_nmi_enable(int cpu) { return 0; } | |
403 | static void watchdog_nmi_disable(int cpu) { return; } | |
23637d47 | 404 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
58687acb DZ |
405 | |
406 | /* prepare/enable/disable routines */ | |
407 | static int watchdog_prepare_cpu(int cpu) | |
408 | { | |
409 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | |
410 | ||
411 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); | |
412 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
413 | hrtimer->function = watchdog_timer_fn; | |
414 | ||
415 | return 0; | |
416 | } | |
417 | ||
418 | static int watchdog_enable(int cpu) | |
419 | { | |
420 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | |
f99a9933 | 421 | int err = 0; |
58687acb DZ |
422 | |
423 | /* enable the perf event */ | |
eac24335 | 424 | err = watchdog_nmi_enable(cpu); |
f99a9933 DZ |
425 | |
426 | /* Regardless of err above, fall through and start softlockup */ | |
58687acb DZ |
427 | |
428 | /* create the watchdog thread */ | |
429 | if (!p) { | |
430 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); | |
431 | if (IS_ERR(p)) { | |
432 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); | |
f99a9933 DZ |
433 | if (!err) |
434 | /* if hardlockup hasn't already set this */ | |
435 | err = PTR_ERR(p); | |
436 | goto out; | |
58687acb DZ |
437 | } |
438 | kthread_bind(p, cpu); | |
439 | per_cpu(watchdog_touch_ts, cpu) = 0; | |
440 | per_cpu(softlockup_watchdog, cpu) = p; | |
441 | wake_up_process(p); | |
442 | } | |
443 | ||
f99a9933 DZ |
444 | out: |
445 | return err; | |
58687acb DZ |
446 | } |
447 | ||
448 | static void watchdog_disable(int cpu) | |
449 | { | |
450 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | |
451 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | |
452 | ||
453 | /* | |
454 | * cancel the timer first to stop incrementing the stats | |
455 | * and waking up the kthread | |
456 | */ | |
457 | hrtimer_cancel(hrtimer); | |
458 | ||
459 | /* disable the perf event */ | |
460 | watchdog_nmi_disable(cpu); | |
461 | ||
462 | /* stop the watchdog thread */ | |
463 | if (p) { | |
464 | per_cpu(softlockup_watchdog, cpu) = NULL; | |
465 | kthread_stop(p); | |
466 | } | |
58687acb DZ |
467 | } |
468 | ||
469 | static void watchdog_enable_all_cpus(void) | |
470 | { | |
471 | int cpu; | |
39735766 MS |
472 | |
473 | watchdog_enabled = 0; | |
58687acb DZ |
474 | |
475 | for_each_online_cpu(cpu) | |
39735766 MS |
476 | if (!watchdog_enable(cpu)) |
477 | /* if any cpu succeeds, watchdog is considered | |
478 | enabled for the system */ | |
479 | watchdog_enabled = 1; | |
58687acb | 480 | |
39735766 | 481 | if (!watchdog_enabled) |
58687acb DZ |
482 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); |
483 | ||
484 | } | |
485 | ||
486 | static void watchdog_disable_all_cpus(void) | |
487 | { | |
488 | int cpu; | |
489 | ||
490 | for_each_online_cpu(cpu) | |
491 | watchdog_disable(cpu); | |
492 | ||
493 | /* if all watchdogs are disabled, then they are disabled for the system */ | |
494 | watchdog_enabled = 0; | |
495 | } | |
496 | ||
497 | ||
498 | /* sysctl functions */ | |
499 | #ifdef CONFIG_SYSCTL | |
500 | /* | |
501 | * proc handler for /proc/sys/kernel/nmi_watchdog | |
502 | */ | |
503 | ||
504 | int proc_dowatchdog_enabled(struct ctl_table *table, int write, | |
505 | void __user *buffer, size_t *length, loff_t *ppos) | |
506 | { | |
507 | proc_dointvec(table, write, buffer, length, ppos); | |
508 | ||
9ffdc6c3 MS |
509 | if (write) { |
510 | if (watchdog_enabled) | |
511 | watchdog_enable_all_cpus(); | |
512 | else | |
513 | watchdog_disable_all_cpus(); | |
514 | } | |
58687acb DZ |
515 | return 0; |
516 | } | |
517 | ||
518 | int proc_dowatchdog_thresh(struct ctl_table *table, int write, | |
519 | void __user *buffer, | |
520 | size_t *lenp, loff_t *ppos) | |
521 | { | |
522 | return proc_dointvec_minmax(table, write, buffer, lenp, ppos); | |
523 | } | |
58687acb DZ |
524 | #endif /* CONFIG_SYSCTL */ |
525 | ||
526 | ||
527 | /* | |
528 | * Create/destroy watchdog threads as CPUs come and go: | |
529 | */ | |
530 | static int __cpuinit | |
531 | cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |
532 | { | |
533 | int hotcpu = (unsigned long)hcpu; | |
eac24335 | 534 | int err = 0; |
58687acb DZ |
535 | |
536 | switch (action) { | |
537 | case CPU_UP_PREPARE: | |
538 | case CPU_UP_PREPARE_FROZEN: | |
eac24335 | 539 | err = watchdog_prepare_cpu(hotcpu); |
58687acb DZ |
540 | break; |
541 | case CPU_ONLINE: | |
542 | case CPU_ONLINE_FROZEN: | |
4135038a MS |
543 | if (watchdog_enabled) |
544 | err = watchdog_enable(hotcpu); | |
58687acb DZ |
545 | break; |
546 | #ifdef CONFIG_HOTPLUG_CPU | |
547 | case CPU_UP_CANCELED: | |
548 | case CPU_UP_CANCELED_FROZEN: | |
549 | watchdog_disable(hotcpu); | |
550 | break; | |
551 | case CPU_DEAD: | |
552 | case CPU_DEAD_FROZEN: | |
553 | watchdog_disable(hotcpu); | |
554 | break; | |
555 | #endif /* CONFIG_HOTPLUG_CPU */ | |
556 | } | |
f99a9933 DZ |
557 | |
558 | /* | |
559 | * hardlockup and softlockup are not important enough | |
560 | * to block cpu bring up. Just always succeed and | |
561 | * rely on printk output to flag problems. | |
562 | */ | |
563 | return NOTIFY_OK; | |
58687acb DZ |
564 | } |
565 | ||
566 | static struct notifier_block __cpuinitdata cpu_nfb = { | |
567 | .notifier_call = cpu_callback | |
568 | }; | |
569 | ||
004417a6 | 570 | void __init lockup_detector_init(void) |
58687acb DZ |
571 | { |
572 | void *cpu = (void *)(long)smp_processor_id(); | |
573 | int err; | |
574 | ||
58687acb | 575 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
eac24335 | 576 | WARN_ON(notifier_to_errno(err)); |
58687acb DZ |
577 | |
578 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | |
579 | register_cpu_notifier(&cpu_nfb); | |
580 | ||
004417a6 | 581 | return; |
58687acb | 582 | } |