aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/watchdog.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-06-06 09:42:53 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2013-06-19 19:16:09 -0400
commitb8900bc0217fac8e68085997bee2f05e6db931a2 (patch)
tree43c7ddba331b1cf3d7c984e7b0ba528cd295b44d /kernel/watchdog.c
parente12d0271774fea9fddf1e2a7952a0bffb2ee8e8b (diff)
watchdog: Register / unregister watchdog kthreads on sysctl control
The user activation/deactivation of the watchdog through boot parameters or systcl is currently implemented with a dance involving kthreads parking and unparking methods: the threads are unconditionally registered on boot and they park as soon as the user want the watchdog to be disabled. This method involves a few noisy details to handle though: the watchdog kthreads may be unparked anytime due to hotplug operations, after which the watchdog internals have to decide to park again if it is user-disabled. As a result the setup() and unpark() methods need to be able to request a reparking. This is not currently supported in the kthread infrastructure so this piece of the watchdog code only works halfway. Besides, unparking/reparking the watchdog kthreads consume unnecessary cputime on hotplug operations when those could be simply ignored in the first place. As suggested by Srivatsa, let's instead only register the watchdog threads when they are needed. This way we don't need to think about hotplug operations and we don't burden the CPU onlining when the watchdog is simply disabled. Suggested-by: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com> Cc: Anish Singh <anish198519851985@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Li Zhong <zhong@linux.vnet.ibm.com> Cc: Don Zickus <dzickus@redhat.com>
Diffstat (limited to 'kernel/watchdog.c')
-rw-r--r--kernel/watchdog.c87
1 files changed, 47 insertions, 40 deletions
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 05039e348f07..52c9a9b91bdd 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -31,7 +31,7 @@
31 31
32int watchdog_enabled = 1; 32int watchdog_enabled = 1;
33int __read_mostly watchdog_thresh = 10; 33int __read_mostly watchdog_thresh = 10;
34static int __read_mostly watchdog_disabled; 34static int __read_mostly watchdog_disabled = 1;
35static u64 __read_mostly sample_period; 35static u64 __read_mostly sample_period;
36 36
37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -347,11 +347,6 @@ static void watchdog_enable(unsigned int cpu)
347 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 347 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
348 hrtimer->function = watchdog_timer_fn; 348 hrtimer->function = watchdog_timer_fn;
349 349
350 if (!watchdog_enabled) {
351 kthread_park(current);
352 return;
353 }
354
355 /* Enable the perf event */ 350 /* Enable the perf event */
356 watchdog_nmi_enable(cpu); 351 watchdog_nmi_enable(cpu);
357 352
@@ -374,6 +369,11 @@ static void watchdog_disable(unsigned int cpu)
374 watchdog_nmi_disable(cpu); 369 watchdog_nmi_disable(cpu);
375} 370}
376 371
372static void watchdog_cleanup(unsigned int cpu, bool online)
373{
374 watchdog_disable(cpu);
375}
376
377static int watchdog_should_run(unsigned int cpu) 377static int watchdog_should_run(unsigned int cpu)
378{ 378{
379 return __this_cpu_read(hrtimer_interrupts) != 379 return __this_cpu_read(hrtimer_interrupts) !=
@@ -475,28 +475,40 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
475static void watchdog_nmi_disable(unsigned int cpu) { return; } 475static void watchdog_nmi_disable(unsigned int cpu) { return; }
476#endif /* CONFIG_HARDLOCKUP_DETECTOR */ 476#endif /* CONFIG_HARDLOCKUP_DETECTOR */
477 477
478/* prepare/enable/disable routines */ 478static struct smp_hotplug_thread watchdog_threads = {
479/* sysctl functions */ 479 .store = &softlockup_watchdog,
480#ifdef CONFIG_SYSCTL 480 .thread_should_run = watchdog_should_run,
481static void watchdog_enable_all_cpus(void) 481 .thread_fn = watchdog,
482 .thread_comm = "watchdog/%u",
483 .setup = watchdog_enable,
484 .cleanup = watchdog_cleanup,
485 .park = watchdog_disable,
486 .unpark = watchdog_enable,
487};
488
489static int watchdog_enable_all_cpus(void)
482{ 490{
483 unsigned int cpu; 491 int err = 0;
484 492
485 if (watchdog_disabled) { 493 if (watchdog_disabled) {
486 watchdog_disabled = 0; 494 err = smpboot_register_percpu_thread(&watchdog_threads);
487 for_each_online_cpu(cpu) 495 if (err)
488 kthread_unpark(per_cpu(softlockup_watchdog, cpu)); 496 pr_err("Failed to create watchdog threads, disabled\n");
497 else
498 watchdog_disabled = 0;
489 } 499 }
500
501 return err;
490} 502}
491 503
504/* prepare/enable/disable routines */
505/* sysctl functions */
506#ifdef CONFIG_SYSCTL
492static void watchdog_disable_all_cpus(void) 507static void watchdog_disable_all_cpus(void)
493{ 508{
494 unsigned int cpu;
495
496 if (!watchdog_disabled) { 509 if (!watchdog_disabled) {
497 watchdog_disabled = 1; 510 watchdog_disabled = 1;
498 for_each_online_cpu(cpu) 511 smpboot_unregister_percpu_thread(&watchdog_threads);
499 kthread_park(per_cpu(softlockup_watchdog, cpu));
500 } 512 }
501} 513}
502 514
@@ -507,14 +519,14 @@ static void watchdog_disable_all_cpus(void)
507int proc_dowatchdog(struct ctl_table *table, int write, 519int proc_dowatchdog(struct ctl_table *table, int write,
508 void __user *buffer, size_t *lenp, loff_t *ppos) 520 void __user *buffer, size_t *lenp, loff_t *ppos)
509{ 521{
510 int ret; 522 int err, old_thresh, old_enabled;
511 523
512 if (watchdog_disabled < 0) 524 old_thresh = ACCESS_ONCE(watchdog_thresh);
513 return -ENODEV; 525 old_enabled = ACCESS_ONCE(watchdog_enabled);
514 526
515 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
516 if (ret || !write) 528 if (err || !write)
517 return ret; 529 return err;
518 530
519 set_sample_period(); 531 set_sample_period();
520 /* 532 /*
@@ -523,29 +535,24 @@ int proc_dowatchdog(struct ctl_table *table, int write,
523 * watchdog_*_all_cpus() function takes care of this. 535 * watchdog_*_all_cpus() function takes care of this.
524 */ 536 */
525 if (watchdog_enabled && watchdog_thresh) 537 if (watchdog_enabled && watchdog_thresh)
526 watchdog_enable_all_cpus(); 538 err = watchdog_enable_all_cpus();
527 else 539 else
528 watchdog_disable_all_cpus(); 540 watchdog_disable_all_cpus();
529 541
530 return ret; 542 /* Restore old values on failure */
543 if (err) {
544 watchdog_thresh = old_thresh;
545 watchdog_enabled = old_enabled;
546 }
547
548 return err;
531} 549}
532#endif /* CONFIG_SYSCTL */ 550#endif /* CONFIG_SYSCTL */
533 551
534static struct smp_hotplug_thread watchdog_threads = {
535 .store = &softlockup_watchdog,
536 .thread_should_run = watchdog_should_run,
537 .thread_fn = watchdog,
538 .thread_comm = "watchdog/%u",
539 .setup = watchdog_enable,
540 .park = watchdog_disable,
541 .unpark = watchdog_enable,
542};
543
544void __init lockup_detector_init(void) 552void __init lockup_detector_init(void)
545{ 553{
546 set_sample_period(); 554 set_sample_period();
547 if (smpboot_register_percpu_thread(&watchdog_threads)) { 555
548 pr_err("Failed to create watchdog threads, disabled\n"); 556 if (watchdog_enabled)
549 watchdog_disabled = -ENODEV; 557 watchdog_enable_all_cpus();
550 }
551} 558}