]>
Commit | Line | Data |
---|---|---|
58687acb DZ |
1 | /* |
2 | * Detect hard and soft lockups on a system | |
3 | * | |
4 | * started by Don Zickus, Copyright (C) 2010 Red Hat, Inc. | |
5 | * | |
86f5e6a7 FLVC |
6 | * Note: Most of this code is borrowed heavily from the original softlockup |
7 | * detector, so thanks to Ingo for the initial implementation. | |
8 | * Some chunks also taken from the old x86-specific nmi watchdog code, thanks | |
58687acb DZ |
9 | * to those contributors as well. |
10 | */ | |
11 | ||
4501980a AM |
12 | #define pr_fmt(fmt) "NMI watchdog: " fmt |
13 | ||
58687acb DZ |
14 | #include <linux/mm.h> |
15 | #include <linux/cpu.h> | |
16 | #include <linux/nmi.h> | |
17 | #include <linux/init.h> | |
18 | #include <linux/delay.h> | |
19 | #include <linux/freezer.h> | |
20 | #include <linux/kthread.h> | |
21 | #include <linux/lockdep.h> | |
22 | #include <linux/notifier.h> | |
23 | #include <linux/module.h> | |
24 | #include <linux/sysctl.h> | |
bcd951cf | 25 | #include <linux/smpboot.h> |
8bd75c77 | 26 | #include <linux/sched/rt.h> |
58687acb DZ |
27 | |
28 | #include <asm/irq_regs.h> | |
5d1c0f4a | 29 | #include <linux/kvm_para.h> |
58687acb DZ |
30 | #include <linux/perf_event.h> |
31 | ||
4135038a | 32 | int watchdog_enabled = 1; |
4eec42f3 | 33 | int __read_mostly watchdog_thresh = 10; |
bcd951cf | 34 | static int __read_mostly watchdog_disabled; |
0f34c400 | 35 | static u64 __read_mostly sample_period; |
58687acb DZ |
36 | |
37 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | |
38 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | |
39 | static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer); | |
40 | static DEFINE_PER_CPU(bool, softlockup_touch_sync); | |
58687acb | 41 | static DEFINE_PER_CPU(bool, soft_watchdog_warn); |
bcd951cf TG |
42 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts); |
43 | static DEFINE_PER_CPU(unsigned long, soft_lockup_hrtimer_cnt); | |
23637d47 | 44 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
cafcd80d DZ |
45 | static DEFINE_PER_CPU(bool, hard_watchdog_warn); |
46 | static DEFINE_PER_CPU(bool, watchdog_nmi_touch); | |
58687acb DZ |
47 | static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); |
48 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | |
49 | #endif | |
50 | ||
58687acb DZ |
51 | /* boot commands */ |
52 | /* | |
53 | * Should we panic when a soft-lockup or hard-lockup occurs: | |
54 | */ | |
23637d47 | 55 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
fef2c9bc DZ |
56 | static int hardlockup_panic = |
57 | CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; | |
58687acb DZ |
58 | |
59 | static int __init hardlockup_panic_setup(char *str) | |
60 | { | |
61 | if (!strncmp(str, "panic", 5)) | |
62 | hardlockup_panic = 1; | |
fef2c9bc DZ |
63 | else if (!strncmp(str, "nopanic", 7)) |
64 | hardlockup_panic = 0; | |
5dc30558 | 65 | else if (!strncmp(str, "0", 1)) |
4135038a | 66 | watchdog_enabled = 0; |
58687acb DZ |
67 | return 1; |
68 | } | |
69 | __setup("nmi_watchdog=", hardlockup_panic_setup); | |
70 | #endif | |
71 | ||
72 | unsigned int __read_mostly softlockup_panic = | |
73 | CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE; | |
74 | ||
75 | static int __init softlockup_panic_setup(char *str) | |
76 | { | |
77 | softlockup_panic = simple_strtoul(str, NULL, 0); | |
78 | ||
79 | return 1; | |
80 | } | |
81 | __setup("softlockup_panic=", softlockup_panic_setup); | |
82 | ||
83 | static int __init nowatchdog_setup(char *str) | |
84 | { | |
4135038a | 85 | watchdog_enabled = 0; |
58687acb DZ |
86 | return 1; |
87 | } | |
88 | __setup("nowatchdog", nowatchdog_setup); | |
89 | ||
90 | /* deprecated */ | |
91 | static int __init nosoftlockup_setup(char *str) | |
92 | { | |
4135038a | 93 | watchdog_enabled = 0; |
58687acb DZ |
94 | return 1; |
95 | } | |
96 | __setup("nosoftlockup", nosoftlockup_setup); | |
97 | /* */ | |
98 | ||
4eec42f3 MSB |
99 | /* |
100 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- | |
101 | * lockups can have false positives under extreme conditions. So we generally | |
102 | * want a higher threshold for soft lockups than for hard lockups. So we couple | |
103 | * the thresholds with a factor: we make the soft threshold twice the amount of | |
104 | * time the hard threshold is. | |
105 | */ | |
6e9101ae | 106 | static int get_softlockup_thresh(void) |
4eec42f3 MSB |
107 | { |
108 | return watchdog_thresh * 2; | |
109 | } | |
58687acb DZ |
110 | |
111 | /* | |
112 | * Returns seconds, approximately. We don't need nanosecond | |
113 | * resolution, and we don't need to waste time with a big divide when | |
114 | * 2^30ns == 1.074s. | |
115 | */ | |
c06b4f19 | 116 | static unsigned long get_timestamp(void) |
58687acb | 117 | { |
c06b4f19 | 118 | return local_clock() >> 30LL; /* 2^30 ~= 10^9 */ |
58687acb DZ |
119 | } |
120 | ||
0f34c400 | 121 | static void set_sample_period(void) |
58687acb DZ |
122 | { |
123 | /* | |
586692a5 | 124 | * convert watchdog_thresh from seconds to ns |
86f5e6a7 FLVC |
125 | * the divide by 5 is to give hrtimer several chances (two |
126 | * or three with the current relation between the soft | |
127 | * and hard thresholds) to increment before the | |
128 | * hardlockup detector generates a warning | |
58687acb | 129 | */ |
0f34c400 | 130 | sample_period = get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); |
58687acb DZ |
131 | } |
132 | ||
133 | /* Commands for resetting the watchdog */ | |
134 | static void __touch_watchdog(void) | |
135 | { | |
c06b4f19 | 136 | __this_cpu_write(watchdog_touch_ts, get_timestamp()); |
58687acb DZ |
137 | } |
138 | ||
332fbdbc | 139 | void touch_softlockup_watchdog(void) |
58687acb | 140 | { |
909ea964 | 141 | __this_cpu_write(watchdog_touch_ts, 0); |
58687acb | 142 | } |
0167c781 | 143 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
58687acb | 144 | |
332fbdbc | 145 | void touch_all_softlockup_watchdogs(void) |
58687acb DZ |
146 | { |
147 | int cpu; | |
148 | ||
149 | /* | |
150 | * this is done lockless | |
151 | * do we care if a 0 races with a timestamp? | |
152 | * all it means is the softlock check starts one cycle later | |
153 | */ | |
154 | for_each_online_cpu(cpu) | |
155 | per_cpu(watchdog_touch_ts, cpu) = 0; | |
156 | } | |
157 | ||
cafcd80d | 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
58687acb DZ |
159 | void touch_nmi_watchdog(void) |
160 | { | |
68d3f1d8 DZ |
161 | if (watchdog_enabled) { |
162 | unsigned cpu; | |
163 | ||
164 | for_each_present_cpu(cpu) { | |
165 | if (per_cpu(watchdog_nmi_touch, cpu) != true) | |
166 | per_cpu(watchdog_nmi_touch, cpu) = true; | |
167 | } | |
168 | } | |
332fbdbc | 169 | touch_softlockup_watchdog(); |
58687acb DZ |
170 | } |
171 | EXPORT_SYMBOL(touch_nmi_watchdog); | |
172 | ||
cafcd80d DZ |
173 | #endif |
174 | ||
58687acb DZ |
175 | void touch_softlockup_watchdog_sync(void) |
176 | { | |
177 | __raw_get_cpu_var(softlockup_touch_sync) = true; | |
178 | __raw_get_cpu_var(watchdog_touch_ts) = 0; | |
179 | } | |
180 | ||
23637d47 | 181 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
58687acb | 182 | /* watchdog detector functions */ |
26e09c6e | 183 | static int is_hardlockup(void) |
58687acb | 184 | { |
909ea964 | 185 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
58687acb | 186 | |
909ea964 | 187 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
58687acb DZ |
188 | return 1; |
189 | ||
909ea964 | 190 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
58687acb DZ |
191 | return 0; |
192 | } | |
193 | #endif | |
194 | ||
26e09c6e | 195 | static int is_softlockup(unsigned long touch_ts) |
58687acb | 196 | { |
c06b4f19 | 197 | unsigned long now = get_timestamp(); |
58687acb DZ |
198 | |
199 | /* Warn about unreasonable delays: */ | |
4eec42f3 | 200 | if (time_after(now, touch_ts + get_softlockup_thresh())) |
58687acb DZ |
201 | return now - touch_ts; |
202 | ||
203 | return 0; | |
204 | } | |
205 | ||
23637d47 | 206 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
1880c4ae | 207 | |
58687acb DZ |
208 | static struct perf_event_attr wd_hw_attr = { |
209 | .type = PERF_TYPE_HARDWARE, | |
210 | .config = PERF_COUNT_HW_CPU_CYCLES, | |
211 | .size = sizeof(struct perf_event_attr), | |
212 | .pinned = 1, | |
213 | .disabled = 1, | |
214 | }; | |
215 | ||
216 | /* Callback function for perf event subsystem */ | |
a8b0ca17 | 217 | static void watchdog_overflow_callback(struct perf_event *event, |
58687acb DZ |
218 | struct perf_sample_data *data, |
219 | struct pt_regs *regs) | |
220 | { | |
c6db67cd PZ |
221 | /* Ensure the watchdog never gets throttled */ |
222 | event->hw.interrupts = 0; | |
223 | ||
909ea964 CL |
224 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
225 | __this_cpu_write(watchdog_nmi_touch, false); | |
58687acb DZ |
226 | return; |
227 | } | |
228 | ||
229 | /* check for a hardlockup | |
230 | * This is done by making sure our timer interrupt | |
231 | * is incrementing. The timer interrupt should have | |
232 | * fired multiple times before we overflow'd. If it hasn't | |
233 | * then this is a good indication the cpu is stuck | |
234 | */ | |
26e09c6e DZ |
235 | if (is_hardlockup()) { |
236 | int this_cpu = smp_processor_id(); | |
237 | ||
58687acb | 238 | /* only print hardlockups once */ |
909ea964 | 239 | if (__this_cpu_read(hard_watchdog_warn) == true) |
58687acb DZ |
240 | return; |
241 | ||
242 | if (hardlockup_panic) | |
243 | panic("Watchdog detected hard LOCKUP on cpu %d", this_cpu); | |
244 | else | |
245 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); | |
246 | ||
909ea964 | 247 | __this_cpu_write(hard_watchdog_warn, true); |
58687acb DZ |
248 | return; |
249 | } | |
250 | ||
909ea964 | 251 | __this_cpu_write(hard_watchdog_warn, false); |
58687acb DZ |
252 | return; |
253 | } | |
bcd951cf TG |
254 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
255 | ||
58687acb DZ |
256 | static void watchdog_interrupt_count(void) |
257 | { | |
909ea964 | 258 | __this_cpu_inc(hrtimer_interrupts); |
58687acb | 259 | } |
bcd951cf TG |
260 | |
261 | static int watchdog_nmi_enable(unsigned int cpu); | |
262 | static void watchdog_nmi_disable(unsigned int cpu); | |
58687acb DZ |
263 | |
264 | /* watchdog kicker functions */ | |
265 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |
266 | { | |
909ea964 | 267 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
58687acb DZ |
268 | struct pt_regs *regs = get_irq_regs(); |
269 | int duration; | |
270 | ||
271 | /* kick the hardlockup detector */ | |
272 | watchdog_interrupt_count(); | |
273 | ||
274 | /* kick the softlockup detector */ | |
909ea964 | 275 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
58687acb DZ |
276 | |
277 | /* .. and repeat */ | |
0f34c400 | 278 | hrtimer_forward_now(hrtimer, ns_to_ktime(sample_period)); |
58687acb DZ |
279 | |
280 | if (touch_ts == 0) { | |
909ea964 | 281 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
58687acb DZ |
282 | /* |
283 | * If the time stamp was touched atomically | |
284 | * make sure the scheduler tick is up to date. | |
285 | */ | |
909ea964 | 286 | __this_cpu_write(softlockup_touch_sync, false); |
58687acb DZ |
287 | sched_clock_tick(); |
288 | } | |
5d1c0f4a EM |
289 | |
290 | /* Clear the guest paused flag on watchdog reset */ | |
291 | kvm_check_and_clear_guest_paused(); | |
58687acb DZ |
292 | __touch_watchdog(); |
293 | return HRTIMER_RESTART; | |
294 | } | |
295 | ||
296 | /* check for a softlockup | |
297 | * This is done by making sure a high priority task is | |
298 | * being scheduled. The task touches the watchdog to | |
299 | * indicate it is getting cpu time. If it hasn't then | |
300 | * this is a good indication some task is hogging the cpu | |
301 | */ | |
26e09c6e | 302 | duration = is_softlockup(touch_ts); |
58687acb | 303 | if (unlikely(duration)) { |
5d1c0f4a EM |
304 | /* |
305 | * If a virtual machine is stopped by the host it can look to | |
306 | * the watchdog like a soft lockup, check to see if the host | |
307 | * stopped the vm before we issue the warning | |
308 | */ | |
309 | if (kvm_check_and_clear_guest_paused()) | |
310 | return HRTIMER_RESTART; | |
311 | ||
58687acb | 312 | /* only warn once */ |
909ea964 | 313 | if (__this_cpu_read(soft_watchdog_warn) == true) |
58687acb DZ |
314 | return HRTIMER_RESTART; |
315 | ||
b0f4c4b3 | 316 | printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
26e09c6e | 317 | smp_processor_id(), duration, |
58687acb DZ |
318 | current->comm, task_pid_nr(current)); |
319 | print_modules(); | |
320 | print_irqtrace_events(current); | |
321 | if (regs) | |
322 | show_regs(regs); | |
323 | else | |
324 | dump_stack(); | |
325 | ||
326 | if (softlockup_panic) | |
327 | panic("softlockup: hung tasks"); | |
909ea964 | 328 | __this_cpu_write(soft_watchdog_warn, true); |
58687acb | 329 | } else |
909ea964 | 330 | __this_cpu_write(soft_watchdog_warn, false); |
58687acb DZ |
331 | |
332 | return HRTIMER_RESTART; | |
333 | } | |
334 | ||
bcd951cf TG |
335 | static void watchdog_set_prio(unsigned int policy, unsigned int prio) |
336 | { | |
337 | struct sched_param param = { .sched_priority = prio }; | |
58687acb | 338 | |
bcd951cf TG |
339 | sched_setscheduler(current, policy, ¶m); |
340 | } | |
341 | ||
342 | static void watchdog_enable(unsigned int cpu) | |
58687acb | 343 | { |
26e09c6e | 344 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
58687acb | 345 | |
3935e895 BM |
346 | /* kick off the timer for the hardlockup detector */ |
347 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | |
348 | hrtimer->function = watchdog_timer_fn; | |
349 | ||
bcd951cf TG |
350 | if (!watchdog_enabled) { |
351 | kthread_park(current); | |
352 | return; | |
353 | } | |
354 | ||
355 | /* Enable the perf event */ | |
356 | watchdog_nmi_enable(cpu); | |
58687acb | 357 | |
58687acb | 358 | /* done here because hrtimer_start can only pin to smp_processor_id() */ |
0f34c400 | 359 | hrtimer_start(hrtimer, ns_to_ktime(sample_period), |
58687acb DZ |
360 | HRTIMER_MODE_REL_PINNED); |
361 | ||
bcd951cf TG |
362 | /* initialize timestamp */ |
363 | watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1); | |
364 | __touch_watchdog(); | |
365 | } | |
58687acb | 366 | |
bcd951cf TG |
367 | static void watchdog_disable(unsigned int cpu) |
368 | { | |
369 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | |
58687acb | 370 | |
bcd951cf TG |
371 | watchdog_set_prio(SCHED_NORMAL, 0); |
372 | hrtimer_cancel(hrtimer); | |
373 | /* disable the perf event */ | |
374 | watchdog_nmi_disable(cpu); | |
58687acb DZ |
375 | } |
376 | ||
bcd951cf TG |
377 | static int watchdog_should_run(unsigned int cpu) |
378 | { | |
379 | return __this_cpu_read(hrtimer_interrupts) != | |
380 | __this_cpu_read(soft_lockup_hrtimer_cnt); | |
381 | } | |
382 | ||
383 | /* | |
384 | * The watchdog thread function - touches the timestamp. | |
385 | * | |
0f34c400 | 386 | * It only runs once every sample_period seconds (4 seconds by |
bcd951cf TG |
387 | * default) to reset the softlockup timestamp. If this gets delayed |
388 | * for more than 2*watchdog_thresh seconds then the debug-printout | |
389 | * triggers in watchdog_timer_fn(). | |
390 | */ | |
391 | static void watchdog(unsigned int cpu) | |
392 | { | |
393 | __this_cpu_write(soft_lockup_hrtimer_cnt, | |
394 | __this_cpu_read(hrtimer_interrupts)); | |
395 | __touch_watchdog(); | |
396 | } | |
58687acb | 397 | |
23637d47 | 398 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
a7027046 DZ |
399 | /* |
400 | * People like the simple clean cpu node info on boot. | |
401 | * Reduce the watchdog noise by only printing messages | |
402 | * that are different from what cpu0 displayed. | |
403 | */ | |
404 | static unsigned long cpu0_err; | |
405 | ||
bcd951cf | 406 | static int watchdog_nmi_enable(unsigned int cpu) |
58687acb DZ |
407 | { |
408 | struct perf_event_attr *wd_attr; | |
409 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | |
410 | ||
411 | /* is it already setup and enabled? */ | |
412 | if (event && event->state > PERF_EVENT_STATE_OFF) | |
413 | goto out; | |
414 | ||
415 | /* it is setup but not enabled */ | |
416 | if (event != NULL) | |
417 | goto out_enable; | |
418 | ||
58687acb | 419 | wd_attr = &wd_hw_attr; |
4eec42f3 | 420 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
1880c4ae CG |
421 | |
422 | /* Try to register using hardware perf events */ | |
4dc0da86 | 423 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); |
a7027046 DZ |
424 | |
425 | /* save cpu0 error for future comparision */ | |
426 | if (cpu == 0 && IS_ERR(event)) | |
427 | cpu0_err = PTR_ERR(event); | |
428 | ||
58687acb | 429 | if (!IS_ERR(event)) { |
a7027046 DZ |
430 | /* only print for cpu0 or different than cpu0 */ |
431 | if (cpu == 0 || cpu0_err) | |
432 | pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n"); | |
58687acb DZ |
433 | goto out_save; |
434 | } | |
435 | ||
a7027046 DZ |
436 | /* skip displaying the same error again */ |
437 | if (cpu > 0 && (PTR_ERR(event) == cpu0_err)) | |
438 | return PTR_ERR(event); | |
5651f7f4 DZ |
439 | |
440 | /* vary the KERN level based on the returned errno */ | |
441 | if (PTR_ERR(event) == -EOPNOTSUPP) | |
4501980a | 442 | pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu); |
5651f7f4 | 443 | else if (PTR_ERR(event) == -ENOENT) |
4501980a AM |
444 | pr_warning("disabled (cpu%i): hardware events not enabled\n", |
445 | cpu); | |
5651f7f4 | 446 | else |
4501980a AM |
447 | pr_err("disabled (cpu%i): unable to create perf event: %ld\n", |
448 | cpu, PTR_ERR(event)); | |
eac24335 | 449 | return PTR_ERR(event); |
58687acb DZ |
450 | |
451 | /* success path */ | |
452 | out_save: | |
453 | per_cpu(watchdog_ev, cpu) = event; | |
454 | out_enable: | |
455 | perf_event_enable(per_cpu(watchdog_ev, cpu)); | |
456 | out: | |
457 | return 0; | |
458 | } | |
459 | ||
bcd951cf | 460 | static void watchdog_nmi_disable(unsigned int cpu) |
58687acb DZ |
461 | { |
462 | struct perf_event *event = per_cpu(watchdog_ev, cpu); | |
463 | ||
464 | if (event) { | |
465 | perf_event_disable(event); | |
466 | per_cpu(watchdog_ev, cpu) = NULL; | |
467 | ||
468 | /* should be in cleanup, but blocks oprofile */ | |
469 | perf_event_release_kernel(event); | |
470 | } | |
471 | return; | |
472 | } | |
473 | #else | |
bcd951cf TG |
474 | static int watchdog_nmi_enable(unsigned int cpu) { return 0; } |
475 | static void watchdog_nmi_disable(unsigned int cpu) { return; } | |
23637d47 | 476 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
58687acb DZ |
477 | |
478 | /* prepare/enable/disable routines */ | |
4ff81951 VA |
479 | /* sysctl functions */ |
480 | #ifdef CONFIG_SYSCTL | |
58687acb DZ |
481 | static void watchdog_enable_all_cpus(void) |
482 | { | |
bcd951cf | 483 | unsigned int cpu; |
58687acb | 484 | |
bcd951cf TG |
485 | if (watchdog_disabled) { |
486 | watchdog_disabled = 0; | |
487 | for_each_online_cpu(cpu) | |
488 | kthread_unpark(per_cpu(softlockup_watchdog, cpu)); | |
489 | } | |
58687acb DZ |
490 | } |
491 | ||
492 | static void watchdog_disable_all_cpus(void) | |
493 | { | |
bcd951cf | 494 | unsigned int cpu; |
58687acb | 495 | |
bcd951cf TG |
496 | if (!watchdog_disabled) { |
497 | watchdog_disabled = 1; | |
498 | for_each_online_cpu(cpu) | |
499 | kthread_park(per_cpu(softlockup_watchdog, cpu)); | |
500 | } | |
58687acb DZ |
501 | } |
502 | ||
58687acb | 503 | /* |
586692a5 | 504 | * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh |
58687acb DZ |
505 | */ |
506 | ||
586692a5 MSB |
507 | int proc_dowatchdog(struct ctl_table *table, int write, |
508 | void __user *buffer, size_t *lenp, loff_t *ppos) | |
58687acb | 509 | { |
e04ab2bc | 510 | int ret; |
58687acb | 511 | |
bcd951cf TG |
512 | if (watchdog_disabled < 0) |
513 | return -ENODEV; | |
514 | ||
586692a5 | 515 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
e04ab2bc | 516 | if (ret || !write) |
bcd951cf | 517 | return ret; |
e04ab2bc | 518 | |
0f34c400 | 519 | set_sample_period(); |
b66a2356 | 520 | /* |
521 | * Watchdog threads shouldn't be enabled if they are | |
522 | * disabled. The 'watchdog_disabled' variable check in | |
523 | * watchdog_*_all_cpus() function takes care of this. | |
524 | */ | |
586692a5 | 525 | if (watchdog_enabled && watchdog_thresh) |
e04ab2bc MSB |
526 | watchdog_enable_all_cpus(); |
527 | else | |
528 | watchdog_disable_all_cpus(); | |
529 | ||
e04ab2bc | 530 | return ret; |
58687acb | 531 | } |
58687acb DZ |
532 | #endif /* CONFIG_SYSCTL */ |
533 | ||
bcd951cf TG |
534 | static struct smp_hotplug_thread watchdog_threads = { |
535 | .store = &softlockup_watchdog, | |
536 | .thread_should_run = watchdog_should_run, | |
537 | .thread_fn = watchdog, | |
538 | .thread_comm = "watchdog/%u", | |
539 | .setup = watchdog_enable, | |
540 | .park = watchdog_disable, | |
541 | .unpark = watchdog_enable, | |
58687acb DZ |
542 | }; |
543 | ||
004417a6 | 544 | void __init lockup_detector_init(void) |
58687acb | 545 | { |
0f34c400 | 546 | set_sample_period(); |
bcd951cf TG |
547 | if (smpboot_register_percpu_thread(&watchdog_threads)) { |
548 | pr_err("Failed to create watchdog threads, disabled\n"); | |
549 | watchdog_disabled = -ENODEV; | |
550 | } | |
58687acb | 551 | } |