diff options
-rw-r--r-- | include/linux/nmi.h | 2 | ||||
-rw-r--r-- | kernel/sysctl.c | 4 | ||||
-rw-r--r-- | kernel/time/tick-sched.c | 15 | ||||
-rw-r--r-- | kernel/watchdog.c | 113 |
4 files changed, 72 insertions, 62 deletions
diff --git a/include/linux/nmi.h b/include/linux/nmi.h index db50840e6355..6a45fb583ff1 100644 --- a/include/linux/nmi.h +++ b/include/linux/nmi.h | |||
@@ -46,7 +46,7 @@ static inline bool trigger_all_cpu_backtrace(void) | |||
46 | #ifdef CONFIG_LOCKUP_DETECTOR | 46 | #ifdef CONFIG_LOCKUP_DETECTOR |
47 | int hw_nmi_is_cpu_stuck(struct pt_regs *); | 47 | int hw_nmi_is_cpu_stuck(struct pt_regs *); |
48 | u64 hw_nmi_get_sample_period(int watchdog_thresh); | 48 | u64 hw_nmi_get_sample_period(int watchdog_thresh); |
49 | extern int watchdog_enabled; | 49 | extern int watchdog_user_enabled; |
50 | extern int watchdog_thresh; | 50 | extern int watchdog_thresh; |
51 | struct ctl_table; | 51 | struct ctl_table; |
52 | extern int proc_dowatchdog(struct ctl_table *, int , | 52 | extern int proc_dowatchdog(struct ctl_table *, int , |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 4ce13c3cedb9..5c9e33b5c0eb 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -800,7 +800,7 @@ static struct ctl_table kern_table[] = { | |||
800 | #if defined(CONFIG_LOCKUP_DETECTOR) | 800 | #if defined(CONFIG_LOCKUP_DETECTOR) |
801 | { | 801 | { |
802 | .procname = "watchdog", | 802 | .procname = "watchdog", |
803 | .data = &watchdog_enabled, | 803 | .data = &watchdog_user_enabled, |
804 | .maxlen = sizeof (int), | 804 | .maxlen = sizeof (int), |
805 | .mode = 0644, | 805 | .mode = 0644, |
806 | .proc_handler = proc_dowatchdog, | 806 | .proc_handler = proc_dowatchdog, |
@@ -827,7 +827,7 @@ static struct ctl_table kern_table[] = { | |||
827 | }, | 827 | }, |
828 | { | 828 | { |
829 | .procname = "nmi_watchdog", | 829 | .procname = "nmi_watchdog", |
830 | .data = &watchdog_enabled, | 830 | .data = &watchdog_user_enabled, |
831 | .maxlen = sizeof (int), | 831 | .maxlen = sizeof (int), |
832 | .mode = 0644, | 832 | .mode = 0644, |
833 | .proc_handler = proc_dowatchdog, | 833 | .proc_handler = proc_dowatchdog, |
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index 0cf1c1453181..69601726a745 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c | |||
@@ -178,6 +178,11 @@ static bool can_stop_full_tick(void) | |||
178 | */ | 178 | */ |
179 | if (!sched_clock_stable) { | 179 | if (!sched_clock_stable) { |
180 | trace_tick_stop(0, "unstable sched clock\n"); | 180 | trace_tick_stop(0, "unstable sched clock\n"); |
181 | /* | ||
182 | * Don't allow the user to think they can get | ||
183 | * full NO_HZ with this machine. | ||
184 | */ | ||
185 | WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock"); | ||
181 | return false; | 186 | return false; |
182 | } | 187 | } |
183 | #endif | 188 | #endif |
@@ -346,16 +351,6 @@ void __init tick_nohz_init(void) | |||
346 | } | 351 | } |
347 | 352 | ||
348 | cpu_notifier(tick_nohz_cpu_down_callback, 0); | 353 | cpu_notifier(tick_nohz_cpu_down_callback, 0); |
349 | |||
350 | /* Make sure full dynticks CPU are also RCU nocbs */ | ||
351 | for_each_cpu(cpu, nohz_full_mask) { | ||
352 | if (!rcu_is_nocb_cpu(cpu)) { | ||
353 | pr_warning("NO_HZ: CPU %d is not RCU nocb: " | ||
354 | "cleared from nohz_full range", cpu); | ||
355 | cpumask_clear_cpu(cpu, nohz_full_mask); | ||
356 | } | ||
357 | } | ||
358 | |||
359 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); | 354 | cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); |
360 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); | 355 | pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); |
361 | } | 356 | } |
diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 05039e348f07..1241d8c91d5e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c | |||
@@ -29,9 +29,9 @@ | |||
29 | #include <linux/kvm_para.h> | 29 | #include <linux/kvm_para.h> |
30 | #include <linux/perf_event.h> | 30 | #include <linux/perf_event.h> |
31 | 31 | ||
32 | int watchdog_enabled = 1; | 32 | int watchdog_user_enabled = 1; |
33 | int __read_mostly watchdog_thresh = 10; | 33 | int __read_mostly watchdog_thresh = 10; |
34 | static int __read_mostly watchdog_disabled; | 34 | static int __read_mostly watchdog_running; |
35 | static u64 __read_mostly sample_period; | 35 | static u64 __read_mostly sample_period; |
36 | 36 | ||
37 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); | 37 | static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); |
@@ -63,7 +63,7 @@ static int __init hardlockup_panic_setup(char *str) | |||
63 | else if (!strncmp(str, "nopanic", 7)) | 63 | else if (!strncmp(str, "nopanic", 7)) |
64 | hardlockup_panic = 0; | 64 | hardlockup_panic = 0; |
65 | else if (!strncmp(str, "0", 1)) | 65 | else if (!strncmp(str, "0", 1)) |
66 | watchdog_enabled = 0; | 66 | watchdog_user_enabled = 0; |
67 | return 1; | 67 | return 1; |
68 | } | 68 | } |
69 | __setup("nmi_watchdog=", hardlockup_panic_setup); | 69 | __setup("nmi_watchdog=", hardlockup_panic_setup); |
@@ -82,7 +82,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); | |||
82 | 82 | ||
83 | static int __init nowatchdog_setup(char *str) | 83 | static int __init nowatchdog_setup(char *str) |
84 | { | 84 | { |
85 | watchdog_enabled = 0; | 85 | watchdog_user_enabled = 0; |
86 | return 1; | 86 | return 1; |
87 | } | 87 | } |
88 | __setup("nowatchdog", nowatchdog_setup); | 88 | __setup("nowatchdog", nowatchdog_setup); |
@@ -90,7 +90,7 @@ __setup("nowatchdog", nowatchdog_setup); | |||
90 | /* deprecated */ | 90 | /* deprecated */ |
91 | static int __init nosoftlockup_setup(char *str) | 91 | static int __init nosoftlockup_setup(char *str) |
92 | { | 92 | { |
93 | watchdog_enabled = 0; | 93 | watchdog_user_enabled = 0; |
94 | return 1; | 94 | return 1; |
95 | } | 95 | } |
96 | __setup("nosoftlockup", nosoftlockup_setup); | 96 | __setup("nosoftlockup", nosoftlockup_setup); |
@@ -158,7 +158,7 @@ void touch_all_softlockup_watchdogs(void) | |||
158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR | 158 | #ifdef CONFIG_HARDLOCKUP_DETECTOR |
159 | void touch_nmi_watchdog(void) | 159 | void touch_nmi_watchdog(void) |
160 | { | 160 | { |
161 | if (watchdog_enabled) { | 161 | if (watchdog_user_enabled) { |
162 | unsigned cpu; | 162 | unsigned cpu; |
163 | 163 | ||
164 | for_each_present_cpu(cpu) { | 164 | for_each_present_cpu(cpu) { |
@@ -347,11 +347,6 @@ static void watchdog_enable(unsigned int cpu) | |||
347 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 347 | hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
348 | hrtimer->function = watchdog_timer_fn; | 348 | hrtimer->function = watchdog_timer_fn; |
349 | 349 | ||
350 | if (!watchdog_enabled) { | ||
351 | kthread_park(current); | ||
352 | return; | ||
353 | } | ||
354 | |||
355 | /* Enable the perf event */ | 350 | /* Enable the perf event */ |
356 | watchdog_nmi_enable(cpu); | 351 | watchdog_nmi_enable(cpu); |
357 | 352 | ||
@@ -374,6 +369,11 @@ static void watchdog_disable(unsigned int cpu) | |||
374 | watchdog_nmi_disable(cpu); | 369 | watchdog_nmi_disable(cpu); |
375 | } | 370 | } |
376 | 371 | ||
372 | static void watchdog_cleanup(unsigned int cpu, bool online) | ||
373 | { | ||
374 | watchdog_disable(cpu); | ||
375 | } | ||
376 | |||
377 | static int watchdog_should_run(unsigned int cpu) | 377 | static int watchdog_should_run(unsigned int cpu) |
378 | { | 378 | { |
379 | return __this_cpu_read(hrtimer_interrupts) != | 379 | return __this_cpu_read(hrtimer_interrupts) != |
@@ -475,28 +475,40 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; } | |||
475 | static void watchdog_nmi_disable(unsigned int cpu) { return; } | 475 | static void watchdog_nmi_disable(unsigned int cpu) { return; } |
476 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ | 476 | #endif /* CONFIG_HARDLOCKUP_DETECTOR */ |
477 | 477 | ||
478 | /* prepare/enable/disable routines */ | 478 | static struct smp_hotplug_thread watchdog_threads = { |
479 | /* sysctl functions */ | 479 | .store = &softlockup_watchdog, |
480 | #ifdef CONFIG_SYSCTL | 480 | .thread_should_run = watchdog_should_run, |
481 | static void watchdog_enable_all_cpus(void) | 481 | .thread_fn = watchdog, |
482 | .thread_comm = "watchdog/%u", | ||
483 | .setup = watchdog_enable, | ||
484 | .cleanup = watchdog_cleanup, | ||
485 | .park = watchdog_disable, | ||
486 | .unpark = watchdog_enable, | ||
487 | }; | ||
488 | |||
489 | static int watchdog_enable_all_cpus(void) | ||
482 | { | 490 | { |
483 | unsigned int cpu; | 491 | int err = 0; |
484 | 492 | ||
485 | if (watchdog_disabled) { | 493 | if (!watchdog_running) { |
486 | watchdog_disabled = 0; | 494 | err = smpboot_register_percpu_thread(&watchdog_threads); |
487 | for_each_online_cpu(cpu) | 495 | if (err) |
488 | kthread_unpark(per_cpu(softlockup_watchdog, cpu)); | 496 | pr_err("Failed to create watchdog threads, disabled\n"); |
497 | else | ||
498 | watchdog_running = 1; | ||
489 | } | 499 | } |
500 | |||
501 | return err; | ||
490 | } | 502 | } |
491 | 503 | ||
504 | /* prepare/enable/disable routines */ | ||
505 | /* sysctl functions */ | ||
506 | #ifdef CONFIG_SYSCTL | ||
492 | static void watchdog_disable_all_cpus(void) | 507 | static void watchdog_disable_all_cpus(void) |
493 | { | 508 | { |
494 | unsigned int cpu; | 509 | if (watchdog_running) { |
495 | 510 | watchdog_running = 0; | |
496 | if (!watchdog_disabled) { | 511 | smpboot_unregister_percpu_thread(&watchdog_threads); |
497 | watchdog_disabled = 1; | ||
498 | for_each_online_cpu(cpu) | ||
499 | kthread_park(per_cpu(softlockup_watchdog, cpu)); | ||
500 | } | 512 | } |
501 | } | 513 | } |
502 | 514 | ||
@@ -507,45 +519,48 @@ static void watchdog_disable_all_cpus(void) | |||
507 | int proc_dowatchdog(struct ctl_table *table, int write, | 519 | int proc_dowatchdog(struct ctl_table *table, int write, |
508 | void __user *buffer, size_t *lenp, loff_t *ppos) | 520 | void __user *buffer, size_t *lenp, loff_t *ppos) |
509 | { | 521 | { |
510 | int ret; | 522 | int err, old_thresh, old_enabled; |
511 | 523 | ||
512 | if (watchdog_disabled < 0) | 524 | old_thresh = ACCESS_ONCE(watchdog_thresh); |
513 | return -ENODEV; | 525 | old_enabled = ACCESS_ONCE(watchdog_user_enabled); |
514 | 526 | ||
515 | ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | 527 | err = proc_dointvec_minmax(table, write, buffer, lenp, ppos); |
516 | if (ret || !write) | 528 | if (err || !write) |
517 | return ret; | 529 | return err; |
518 | 530 | ||
519 | set_sample_period(); | 531 | set_sample_period(); |
520 | /* | 532 | /* |
521 | * Watchdog threads shouldn't be enabled if they are | 533 | * Watchdog threads shouldn't be enabled if they are |
522 | * disabled. The 'watchdog_disabled' variable check in | 534 | * disabled. The 'watchdog_running' variable check in |
523 | * watchdog_*_all_cpus() function takes care of this. | 535 | * watchdog_*_all_cpus() function takes care of this. |
524 | */ | 536 | */ |
525 | if (watchdog_enabled && watchdog_thresh) | 537 | if (watchdog_user_enabled && watchdog_thresh) |
526 | watchdog_enable_all_cpus(); | 538 | err = watchdog_enable_all_cpus(); |
527 | else | 539 | else |
528 | watchdog_disable_all_cpus(); | 540 | watchdog_disable_all_cpus(); |
529 | 541 | ||
530 | return ret; | 542 | /* Restore old values on failure */ |
543 | if (err) { | ||
544 | watchdog_thresh = old_thresh; | ||
545 | watchdog_user_enabled = old_enabled; | ||
546 | } | ||
547 | |||
548 | return err; | ||
531 | } | 549 | } |
532 | #endif /* CONFIG_SYSCTL */ | 550 | #endif /* CONFIG_SYSCTL */ |
533 | 551 | ||
534 | static struct smp_hotplug_thread watchdog_threads = { | ||
535 | .store = &softlockup_watchdog, | ||
536 | .thread_should_run = watchdog_should_run, | ||
537 | .thread_fn = watchdog, | ||
538 | .thread_comm = "watchdog/%u", | ||
539 | .setup = watchdog_enable, | ||
540 | .park = watchdog_disable, | ||
541 | .unpark = watchdog_enable, | ||
542 | }; | ||
543 | |||
544 | void __init lockup_detector_init(void) | 552 | void __init lockup_detector_init(void) |
545 | { | 553 | { |
546 | set_sample_period(); | 554 | set_sample_period(); |
547 | if (smpboot_register_percpu_thread(&watchdog_threads)) { | 555 | |
548 | pr_err("Failed to create watchdog threads, disabled\n"); | 556 | #ifdef CONFIG_NO_HZ_FULL |
549 | watchdog_disabled = -ENODEV; | 557 | if (watchdog_user_enabled) { |
558 | watchdog_user_enabled = 0; | ||
559 | pr_warning("Disabled lockup detectors by default for full dynticks\n"); | ||
560 | pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n"); | ||
550 | } | 561 | } |
562 | #endif | ||
563 | |||
564 | if (watchdog_user_enabled) | ||
565 | watchdog_enable_all_cpus(); | ||
551 | } | 566 | } |