diff options
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r-- | kernel/watchdog.c | 185 |
1 files changed, 99 insertions, 86 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 7f9c3c52ecc1..3d0c56ad4792 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -27,8 +27,8 @@ | |||
27 | #include <asm/irq_regs.h> | 27 | #include <asm/irq_regs.h> |
28 | #include <linux/perf_event.h> | 28 | #include <linux/perf_event.h> |
29 | 29 | ||
30 | int watchdog_enabled; | 30 | int watchdog_enabled = 1; |
31 | int __read_mostly softlockup_thresh = 60; | 31 | int __read_mostly watchdog_thresh = 10; |
32 | 32 | ||
33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 33 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
34 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); | 34 | static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); |
@@ -43,21 +43,22 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); | |||
43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); | 43 | static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); |
44 | #endif | 44 | #endif |
45 | 45 | ||
46 | static int __read_mostly did_panic; | ||
47 | static int __initdata no_watchdog; | ||
48 | |||
49 | |||
50 | /* boot commands */ | 46 | /* boot commands */ |
51 | /* | 47 | /* |
52 | * Should we panic when a soft-lockup or hard-lockup occurs: | 48 | * Should we panic when a soft-lockup or hard-lockup occurs: |
53 | */ | 49 | */ |
54 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 50 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
55 | static int hardlockup_panic; | 51 | static int hardlockup_panic = |
52 | CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; | ||
56 | 53 | ||
57 | static int __init hardlockup_panic_setup(char *str) | 54 | static int __init hardlockup_panic_setup(char *str) |
58 | { | 55 | { |
59 | if (!strncmp(str, "panic", 5)) | 56 | if (!strncmp(str, "panic", 5)) |
60 | hardlockup_panic = 1; | 57 | hardlockup_panic = 1; |
58 | else if (!strncmp(str, "nopanic", 7)) | ||
59 | hardlockup_panic = 0; | ||
60 | else if (!strncmp(str, "0", 1)) | ||
61 | watchdog_enabled = 0; | ||
61 | return 1; | 62 | return 1; |
62 | } | 63 | } |
63 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 64 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
@@ -76,7 +77,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); | |||
76 | 77 | ||
77 | static int __init nowatchdog_setup(char *str) | 78 | static int __init nowatchdog_setup(char *str) |
78 | { | 79 | { |
79 | no_watchdog = 1; | 80 | watchdog_enabled = 0; |
80 | return 1; | 81 | return 1; |
81 | } | 82 | } |
82 | __setup("nowatchdog", nowatchdog_setup); | 83 | __setup("nowatchdog", nowatchdog_setup); |
@@ -84,12 +85,23 @@ __setup("nowatchdog", nowatchdog_setup); | |||
84 | /* deprecated */ | 85 | /* deprecated */ |
85 | static int __init nosoftlockup_setup(char *str) | 86 | static int __init nosoftlockup_setup(char *str) |
86 | { | 87 | { |
87 | no_watchdog = 1; | 88 | watchdog_enabled = 0; |
88 | return 1; | 89 | return 1; |
89 | } | 90 | } |
90 | __setup("nosoftlockup", nosoftlockup_setup); | 91 | __setup("nosoftlockup", nosoftlockup_setup); |
91 | /* */ | 92 | /* */ |
92 | 93 | ||
94 | /* | ||
95 | * Hard-lockup warnings should be triggered after just a few seconds. Soft- | ||
96 | * lockups can have false positives under extreme conditions. So we generally | ||
97 | * want a higher threshold for soft lockups than for hard lockups. So we couple | ||
98 | * the thresholds with a factor: we make the soft threshold twice the amount of | ||
99 | * time the hard threshold is. | ||
100 | */ | ||
101 | static int get_softlockup_thresh(void) | ||
102 | { | ||
103 | return watchdog_thresh * 2; | ||
104 | } | ||
93 | 105 | ||
94 | /* | 106 | /* |
95 | * Returns seconds, approximately. We don't need nanosecond | 107 | * Returns seconds, approximately. We don't need nanosecond |
@@ -104,12 +116,12 @@ static unsigned long get_timestamp(int this_cpu) | |||
104 | static unsigned long get_sample_period(void) | 116 | static unsigned long get_sample_period(void) |
105 | { | 117 | { |
106 | /* | 118 | /* |
107 | * convert softlockup_thresh from seconds to ns | 119 | * convert watchdog_thresh from seconds to ns |
108 | * the divide by 5 is to give hrtimer 5 chances to | 120 | * the divide by 5 is to give hrtimer 5 chances to |
109 | * increment before the hardlockup detector generates | 121 | * increment before the hardlockup detector generates |
110 | * a warning | 122 | * a warning |
111 | */ | 123 | */ |
112 | return softlockup_thresh / 5 * NSEC_PER_SEC; | 124 | return get_softlockup_thresh() * (NSEC_PER_SEC / 5); |
113 | } | 125 | } |
114 | 126 | ||
115 | /* Commands for resetting the watchdog */ | 127 | /* Commands for resetting the watchdog */ |
@@ -117,12 +129,12 @@ static void __touch_watchdog(void) | |||
117 | { | 129 | { |
118 | int this_cpu = smp_processor_id(); | 130 | int this_cpu = smp_processor_id(); |
119 | 131 | ||
120 | __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); | 132 | __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); |
121 | } | 133 | } |
122 | 134 | ||
123 | void touch_softlockup_watchdog(void) | 135 | void touch_softlockup_watchdog(void) |
124 | { | 136 | { |
125 | __raw_get_cpu_var(watchdog_touch_ts) = 0; | 137 | __this_cpu_write(watchdog_touch_ts, 0); |
126 | } | 138 | } |
127 | EXPORT_SYMBOL(touch_softlockup_watchdog); | 139 | EXPORT_SYMBOL(touch_softlockup_watchdog); |
128 | 140 | ||
@@ -166,12 +178,12 @@ void touch_softlockup_watchdog_sync(void) | |||
166 | /* watchdog detector functions */ | 178 | /* watchdog detector functions */ |
167 | static int is_hardlockup(void) | 179 | static int is_hardlockup(void) |
168 | { | 180 | { |
169 | unsigned long hrint = __get_cpu_var(hrtimer_interrupts); | 181 | unsigned long hrint = __this_cpu_read(hrtimer_interrupts); |
170 | 182 | ||
171 | if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) | 183 | if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) |
172 | return 1; | 184 | return 1; |
173 | 185 | ||
174 | __get_cpu_var(hrtimer_interrupts_saved) = hrint; | 186 | __this_cpu_write(hrtimer_interrupts_saved, hrint); |
175 | return 0; | 187 | return 0; |
176 | } | 188 | } |
177 | #endif | 189 | #endif |
@@ -181,24 +193,12 @@ static int is_softlockup(unsigned long touch_ts) | |||
181 | unsigned long now = get_timestamp(smp_processor_id()); | 193 | unsigned long now = get_timestamp(smp_processor_id()); |
182 | 194 | ||
183 | /* Warn about unreasonable delays: */ | 195 | /* Warn about unreasonable delays: */ |
184 | if (time_after(now, touch_ts + softlockup_thresh)) | 196 | if (time_after(now, touch_ts + get_softlockup_thresh())) |
185 | return now - touch_ts; | 197 | return now - touch_ts; |
186 | 198 | ||
187 | return 0; | 199 | return 0; |
188 | } | 200 | } |
189 | 201 | ||
190 | static int | ||
191 | watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) | ||
192 | { | ||
193 | did_panic = 1; | ||
194 | |||
195 | return NOTIFY_DONE; | ||
196 | } | ||
197 | |||
198 | static struct notifier_block panic_block = { | ||
199 | .notifier_call = watchdog_panic, | ||
200 | }; | ||
201 | |||
202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 202 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
203 | static struct perf_event_attr wd_hw_attr = { | 203 | static struct perf_event_attr wd_hw_attr = { |
204 | .type = PERF_TYPE_HARDWARE, | 204 | .type = PERF_TYPE_HARDWARE, |
@@ -209,15 +209,15 @@ static struct perf_event_attr wd_hw_attr = { | |||
209 | }; | 209 | }; |
210 | 210 | ||
211 | /* Callback function for perf event subsystem */ | 211 | /* Callback function for perf event subsystem */ |
212 | void watchdog_overflow_callback(struct perf_event *event, int nmi, | 212 | static void watchdog_overflow_callback(struct perf_event *event, int nmi, |
213 | struct perf_sample_data *data, | 213 | struct perf_sample_data *data, |
214 | struct pt_regs *regs) | 214 | struct pt_regs *regs) |
215 | { | 215 | { |
216 | /* Ensure the watchdog never gets throttled */ | 216 | /* Ensure the watchdog never gets throttled */ |
217 | event->hw.interrupts = 0; | 217 | event->hw.interrupts = 0; |
218 | 218 | ||
219 | if (__get_cpu_var(watchdog_nmi_touch) == true) { | 219 | if (__this_cpu_read(watchdog_nmi_touch) == true) { |
220 | __get_cpu_var(watchdog_nmi_touch) = false; | 220 | __this_cpu_write(watchdog_nmi_touch, false); |
221 | return; | 221 | return; |
222 | } | 222 | } |
223 | 223 | ||
@@ -231,7 +231,7 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
231 | int this_cpu = smp_processor_id(); | 231 | int this_cpu = smp_processor_id(); |
232 | 232 | ||
233 | /* only print hardlockups once */ | 233 | /* only print hardlockups once */ |
234 | if (__get_cpu_var(hard_watchdog_warn) == true) | 234 | if (__this_cpu_read(hard_watchdog_warn) == true) |
235 | return; | 235 | return; |
236 | 236 | ||
237 | if (hardlockup_panic) | 237 | if (hardlockup_panic) |
@@ -239,16 +239,16 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi, | |||
239 | else | 239 | else |
240 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); | 240 | WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); |
241 | 241 | ||
242 | __get_cpu_var(hard_watchdog_warn) = true; | 242 | __this_cpu_write(hard_watchdog_warn, true); |
243 | return; | 243 | return; |
244 | } | 244 | } |
245 | 245 | ||
246 | __get_cpu_var(hard_watchdog_warn) = false; | 246 | __this_cpu_write(hard_watchdog_warn, false); |
247 | return; | 247 | return; |
248 | } | 248 | } |
249 | static void watchdog_interrupt_count(void) | 249 | static void watchdog_interrupt_count(void) |
250 | { | 250 | { |
251 | __get_cpu_var(hrtimer_interrupts)++; | 251 | __this_cpu_inc(hrtimer_interrupts); |
252 | } | 252 | } |
253 | #else | 253 | #else |
254 | static inline void watchdog_interrupt_count(void) { return; } | 254 | static inline void watchdog_interrupt_count(void) { return; } |
@@ -257,7 +257,7 @@ static inline void watchdog_interrupt_count(void) { return; } | |||
257 | /* watchdog kicker functions */ | 257 | /* watchdog kicker functions */ |
258 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | 258 | static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) |
259 | { | 259 | { |
260 | unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); | 260 | unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); |
261 | struct pt_regs *regs = get_irq_regs(); | 261 | struct pt_regs *regs = get_irq_regs(); |
262 | int duration; | 262 | int duration; |
263 | 263 | ||
@@ -265,18 +265,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
265 | watchdog_interrupt_count(); | 265 | watchdog_interrupt_count(); |
266 | 266 | ||
267 | /* kick the softlockup detector */ | 267 | /* kick the softlockup detector */ |
268 | wake_up_process(__get_cpu_var(softlockup_watchdog)); | 268 | wake_up_process(__this_cpu_read(softlockup_watchdog)); |
269 | 269 | ||
270 | /* .. and repeat */ | 270 | /* .. and repeat */ |
271 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); | 271 | hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); |
272 | 272 | ||
273 | if (touch_ts == 0) { | 273 | if (touch_ts == 0) { |
274 | if (unlikely(__get_cpu_var(softlockup_touch_sync))) { | 274 | if (unlikely(__this_cpu_read(softlockup_touch_sync))) { |
275 | /* | 275 | /* |
276 | * If the time stamp was touched atomically | 276 | * If the time stamp was touched atomically |
277 | * make sure the scheduler tick is up to date. | 277 | * make sure the scheduler tick is up to date. |
278 | */ | 278 | */ |
279 | __get_cpu_var(softlockup_touch_sync) = false; | 279 | __this_cpu_write(softlockup_touch_sync, false); |
280 | sched_clock_tick(); | 280 | sched_clock_tick(); |
281 | } | 281 | } |
282 | __touch_watchdog(); | 282 | __touch_watchdog(); |
@@ -292,7 +292,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
292 | duration = is_softlockup(touch_ts); | 292 | duration = is_softlockup(touch_ts); |
293 | if (unlikely(duration)) { | 293 | if (unlikely(duration)) { |
294 | /* only warn once */ | 294 | /* only warn once */ |
295 | if (__get_cpu_var(soft_watchdog_warn) == true) | 295 | if (__this_cpu_read(soft_watchdog_warn) == true) |
296 | return HRTIMER_RESTART; | 296 | return HRTIMER_RESTART; |
297 | 297 | ||
298 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", | 298 | printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", |
@@ -307,9 +307,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
307 | 307 | ||
308 | if (softlockup_panic) | 308 | if (softlockup_panic) |
309 | panic("softlockup: hung tasks"); | 309 | panic("softlockup: hung tasks"); |
310 | __get_cpu_var(soft_watchdog_warn) = true; | 310 | __this_cpu_write(soft_watchdog_warn, true); |
311 | } else | 311 | } else |
312 | __get_cpu_var(soft_watchdog_warn) = false; | 312 | __this_cpu_write(soft_watchdog_warn, false); |
313 | 313 | ||
314 | return HRTIMER_RESTART; | 314 | return HRTIMER_RESTART; |
315 | } | 315 | } |
@@ -320,7 +320,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) | |||
320 | */ | 320 | */ |
321 | static int watchdog(void *unused) | 321 | static int watchdog(void *unused) |
322 | { | 322 | { |
323 | struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; | 323 | static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; |
324 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); | 324 | struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); |
325 | 325 | ||
326 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 326 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
@@ -370,15 +370,22 @@ static int watchdog_nmi_enable(int cpu) | |||
370 | 370 | ||
371 | /* Try to register using hardware perf events */ | 371 | /* Try to register using hardware perf events */ |
372 | wd_attr = &wd_hw_attr; | 372 | wd_attr = &wd_hw_attr; |
373 | wd_attr->sample_period = hw_nmi_get_sample_period(); | 373 | wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); |
374 | event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); | 374 | event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); |
375 | if (!IS_ERR(event)) { | 375 | if (!IS_ERR(event)) { |
376 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); | 376 | printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); |
377 | goto out_save; | 377 | goto out_save; |
378 | } | 378 | } |
379 | 379 | ||
380 | printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); | 380 | |
381 | return -1; | 381 | /* vary the KERN level based on the returned errno */ |
382 | if (PTR_ERR(event) == -EOPNOTSUPP) | ||
383 | printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); | ||
384 | else if (PTR_ERR(event) == -ENOENT) | ||
385 | printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); | ||
386 | else | ||
387 | printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); | ||
388 | return PTR_ERR(event); | ||
382 | 389 | ||
383 | /* success path */ | 390 | /* success path */ |
384 | out_save: | 391 | out_save: |
@@ -408,31 +415,37 @@ static void watchdog_nmi_disable(int cpu) { return; } | |||
408 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | 415 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
409 | 416 | ||
410 | /* prepare/enable/disable routines */ | 417 | /* prepare/enable/disable routines */ |
411 | static int watchdog_prepare_cpu(int cpu) | 418 | static void watchdog_prepare_cpu(int cpu) |
412 | { | 419 | { |
413 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); | 420 | struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); |
414 | 421 | ||
415 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); | 422 | WARN_ON(per_cpu(softlockup_watchdog, cpu)); |
416 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 423 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
417 | hrtimer->function = watchdog_timer_fn; | 424 | hrtimer->function = watchdog_timer_fn; |
418 | |||
419 | return 0; | ||
420 | } | 425 | } |
421 | 426 | ||
422 | static int watchdog_enable(int cpu) | 427 | static int watchdog_enable(int cpu) |
423 | { | 428 | { |
424 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); | 429 | struct task_struct *p = per_cpu(softlockup_watchdog, cpu); |
430 | int err = 0; | ||
425 | 431 | ||
426 | /* enable the perf event */ | 432 | /* enable the perf event */ |
427 | if (watchdog_nmi_enable(cpu) != 0) | 433 | err = watchdog_nmi_enable(cpu); |
428 | return -1; | 434 | |
435 | /* Regardless of err above, fall through and start softlockup */ | ||
429 | 436 | ||
430 | /* create the watchdog thread */ | 437 | /* create the watchdog thread */ |
431 | if (!p) { | 438 | if (!p) { |
432 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); | 439 | p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); |
433 | if (IS_ERR(p)) { | 440 | if (IS_ERR(p)) { |
434 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); | 441 | printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); |
435 | return -1; | 442 | if (!err) { |
443 | /* if hardlockup hasn't already set this */ | ||
444 | err = PTR_ERR(p); | ||
445 | /* and disable the perf event */ | ||
446 | watchdog_nmi_disable(cpu); | ||
447 | } | ||
448 | goto out; | ||
436 | } | 449 | } |
437 | kthread_bind(p, cpu); | 450 | kthread_bind(p, cpu); |
438 | per_cpu(watchdog_touch_ts, cpu) = 0; | 451 | per_cpu(watchdog_touch_ts, cpu) = 0; |
@@ -440,10 +453,8 @@ static int watchdog_enable(int cpu) | |||
440 | wake_up_process(p); | 453 | wake_up_process(p); |
441 | } | 454 | } |
442 | 455 | ||
443 | /* if any cpu succeeds, watchdog is considered enabled for the system */ | 456 | out: |
444 | watchdog_enabled = 1; | 457 | return err; |
445 | |||
446 | return 0; | ||
447 | } | 458 | } |
448 | 459 | ||
449 | static void watchdog_disable(int cpu) | 460 | static void watchdog_disable(int cpu) |
@@ -470,12 +481,16 @@ static void watchdog_disable(int cpu) | |||
470 | static void watchdog_enable_all_cpus(void) | 481 | static void watchdog_enable_all_cpus(void) |
471 | { | 482 | { |
472 | int cpu; | 483 | int cpu; |
473 | int result = 0; | 484 | |
485 | watchdog_enabled = 0; | ||
474 | 486 | ||
475 | for_each_online_cpu(cpu) | 487 | for_each_online_cpu(cpu) |
476 | result += watchdog_enable(cpu); | 488 | if (!watchdog_enable(cpu)) |
489 | /* if any cpu succeeds, watchdog is considered | ||
490 | enabled for the system */ | ||
491 | watchdog_enabled = 1; | ||
477 | 492 | ||
478 | if (result) | 493 | if (!watchdog_enabled) |
479 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); | 494 | printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); |
480 | 495 | ||
481 | } | 496 | } |
@@ -495,26 +510,25 @@ static void watchdog_disable_all_cpus(void) | |||
495 | /* sysctl functions */ | 510 | /* sysctl functions */ |
496 | #ifdef CONFIG_SYSCTL | 511 | #ifdef CONFIG_SYSCTL |
497 | /* | 512 | /* |
498 | * proc handler for /proc/sys/kernel/nmi_watchdog | 513 | * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh |
499 | */ | 514 | */ |
500 | 515 | ||
501 | int proc_dowatchdog_enabled(struct ctl_table *table, int write, | 516 | int proc_dowatchdog(struct ctl_table *table, int write, |
502 | void __user *buffer, size_t *length, loff_t *ppos) | 517 | void __user *buffer, size_t *lenp, loff_t *ppos) |
503 | { | 518 | { |
504 | proc_dointvec(table, write, buffer, length, ppos); | 519 | int ret; |
520 | |||
521 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
522 | if (ret || !write) | ||
523 | goto out; | ||
505 | 524 | ||
506 | if (watchdog_enabled) | 525 | if (watchdog_enabled && watchdog_thresh) |
507 | watchdog_enable_all_cpus(); | 526 | watchdog_enable_all_cpus(); |
508 | else | 527 | else |
509 | watchdog_disable_all_cpus(); | 528 | watchdog_disable_all_cpus(); |
510 | return 0; | ||
511 | } | ||
512 | 529 | ||
513 | int proc_dowatchdog_thresh(struct ctl_table *table, int write, | 530 | out: |
514 | void __user *buffer, | 531 | return ret; |
515 | size_t *lenp, loff_t *ppos) | ||
516 | { | ||
517 | return proc_dointvec_minmax(table, write, buffer, lenp, ppos); | ||
518 | } | 532 | } |
519 | #endif /* CONFIG_SYSCTL */ | 533 | #endif /* CONFIG_SYSCTL */ |
520 | 534 | ||
@@ -530,13 +544,12 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
530 | switch (action) { | 544 | switch (action) { |
531 | case CPU_UP_PREPARE: | 545 | case CPU_UP_PREPARE: |
532 | case CPU_UP_PREPARE_FROZEN: | 546 | case CPU_UP_PREPARE_FROZEN: |
533 | if (watchdog_prepare_cpu(hotcpu)) | 547 | watchdog_prepare_cpu(hotcpu); |
534 | return NOTIFY_BAD; | ||
535 | break; | 548 | break; |
536 | case CPU_ONLINE: | 549 | case CPU_ONLINE: |
537 | case CPU_ONLINE_FROZEN: | 550 | case CPU_ONLINE_FROZEN: |
538 | if (watchdog_enable(hotcpu)) | 551 | if (watchdog_enabled) |
539 | return NOTIFY_BAD; | 552 | watchdog_enable(hotcpu); |
540 | break; | 553 | break; |
541 | #ifdef CONFIG_HOTPLUG_CPU | 554 | #ifdef CONFIG_HOTPLUG_CPU |
542 | case CPU_UP_CANCELED: | 555 | case CPU_UP_CANCELED: |
@@ -549,6 +562,12 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
549 | break; | 562 | break; |
550 | #endif /* CONFIG_HOTPLUG_CPU */ | 563 | #endif /* CONFIG_HOTPLUG_CPU */ |
551 | } | 564 | } |
565 | |||
566 | /* | ||
567 | * hardlockup and softlockup are not important enough | ||
568 | * to block cpu bring up. Just always succeed and | ||
569 | * rely on printk output to flag problems. | ||
570 | */ | ||
552 | return NOTIFY_OK; | 571 | return NOTIFY_OK; |
553 | } | 572 | } |
554 | 573 | ||
@@ -556,22 +575,16 @@ static struct notifier_block __cpuinitdata cpu_nfb = { | |||
556 | .notifier_call = cpu_callback | 575 | .notifier_call = cpu_callback |
557 | }; | 576 | }; |
558 | 577 | ||
559 | static int __init spawn_watchdog_task(void) | 578 | void __init lockup_detector_init(void) |
560 | { | 579 | { |
561 | void *cpu = (void *)(long)smp_processor_id(); | 580 | void *cpu = (void *)(long)smp_processor_id(); |
562 | int err; | 581 | int err; |
563 | 582 | ||
564 | if (no_watchdog) | ||
565 | return 0; | ||
566 | |||
567 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); | 583 | err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); |
568 | WARN_ON(err == NOTIFY_BAD); | 584 | WARN_ON(notifier_to_errno(err)); |
569 | 585 | ||
570 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); | 586 | cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); |
571 | register_cpu_notifier(&cpu_nfb); | 587 | register_cpu_notifier(&cpu_nfb); |
572 | 588 | ||
573 | atomic_notifier_chain_register(&panic_notifier_list, &panic_block); | 589 | return; |
574 | |||
575 | return 0; | ||
576 | } | 590 | } |
577 | early_initcall(spawn_watchdog_task); | ||