aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 18:36:09 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-07-13 18:36:09 -0400
commit0da273668657a70155f3d4ae121dc19277a05778 (patch)
tree18f8c2edcbcd17e80b5cb052c3c024ce1d0e1f2f /kernel
parent560ae37178b12e3bd37626f7b1e0b29c503ea558 (diff)
parentb0ec636c93ddd77235bf0f023a8a95d78cb6cafe (diff)
Merge branch 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull timer updates from Thomas Gleixner: - watchdog fixes for full dynticks - improved debug output for full dynticks - remove an obsolete full dynticks check - two ARM SoC clocksource drivers for sharing across SoCs - tick broadcast fix for CPU hotplug * 'timers-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: tick: broadcast: Check broadcast mode on CPU hotplug clocksource: arm_global_timer: Add ARM global timer support clocksource: Add Marvell Orion SoC timer nohz: Remove obsolete check for full dynticks CPUs to be RCU nocbs watchdog: Boot-disable by default on full dynticks watchdog: Rename confusing state variable watchdog: Register / unregister watchdog kthreads on sysctl control nohz: Warn if the machine can not perform nohz_full
Diffstat (limited to 'kernel')
-rw-r--r--kernel/sysctl.c4
-rw-r--r--kernel/time/tick-broadcast.c5
-rw-r--r--kernel/time/tick-sched.c15
-rw-r--r--kernel/watchdog.c113
4 files changed, 75 insertions, 62 deletions
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index e5b31aff67aa..ac09d98490aa 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -807,7 +807,7 @@ static struct ctl_table kern_table[] = {
807#if defined(CONFIG_LOCKUP_DETECTOR) 807#if defined(CONFIG_LOCKUP_DETECTOR)
808 { 808 {
809 .procname = "watchdog", 809 .procname = "watchdog",
810 .data = &watchdog_enabled, 810 .data = &watchdog_user_enabled,
811 .maxlen = sizeof (int), 811 .maxlen = sizeof (int),
812 .mode = 0644, 812 .mode = 0644,
813 .proc_handler = proc_dowatchdog, 813 .proc_handler = proc_dowatchdog,
@@ -834,7 +834,7 @@ static struct ctl_table kern_table[] = {
834 }, 834 },
835 { 835 {
836 .procname = "nmi_watchdog", 836 .procname = "nmi_watchdog",
837 .data = &watchdog_enabled, 837 .data = &watchdog_user_enabled,
838 .maxlen = sizeof (int), 838 .maxlen = sizeof (int),
839 .mode = 0644, 839 .mode = 0644,
840 .proc_handler = proc_dowatchdog, 840 .proc_handler = proc_dowatchdog,
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 6d3f91631de6..218bcb565fed 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -157,7 +157,10 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
157 dev->event_handler = tick_handle_periodic; 157 dev->event_handler = tick_handle_periodic;
158 tick_device_setup_broadcast_func(dev); 158 tick_device_setup_broadcast_func(dev);
159 cpumask_set_cpu(cpu, tick_broadcast_mask); 159 cpumask_set_cpu(cpu, tick_broadcast_mask);
160 tick_broadcast_start_periodic(bc); 160 if (tick_broadcast_device.mode == TICKDEV_MODE_PERIODIC)
161 tick_broadcast_start_periodic(bc);
162 else
163 tick_broadcast_setup_oneshot(bc);
161 ret = 1; 164 ret = 1;
162 } else { 165 } else {
163 /* 166 /*
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 0cf1c1453181..69601726a745 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -178,6 +178,11 @@ static bool can_stop_full_tick(void)
178 */ 178 */
179 if (!sched_clock_stable) { 179 if (!sched_clock_stable) {
180 trace_tick_stop(0, "unstable sched clock\n"); 180 trace_tick_stop(0, "unstable sched clock\n");
181 /*
182 * Don't allow the user to think they can get
183 * full NO_HZ with this machine.
184 */
185 WARN_ONCE(1, "NO_HZ FULL will not work with unstable sched clock");
181 return false; 186 return false;
182 } 187 }
183#endif 188#endif
@@ -346,16 +351,6 @@ void __init tick_nohz_init(void)
346 } 351 }
347 352
348 cpu_notifier(tick_nohz_cpu_down_callback, 0); 353 cpu_notifier(tick_nohz_cpu_down_callback, 0);
349
350 /* Make sure full dynticks CPU are also RCU nocbs */
351 for_each_cpu(cpu, nohz_full_mask) {
352 if (!rcu_is_nocb_cpu(cpu)) {
353 pr_warning("NO_HZ: CPU %d is not RCU nocb: "
354 "cleared from nohz_full range", cpu);
355 cpumask_clear_cpu(cpu, nohz_full_mask);
356 }
357 }
358
359 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask); 354 cpulist_scnprintf(nohz_full_buf, sizeof(nohz_full_buf), nohz_full_mask);
360 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf); 355 pr_info("NO_HZ: Full dynticks CPUs: %s.\n", nohz_full_buf);
361} 356}
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 05039e348f07..1241d8c91d5e 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -29,9 +29,9 @@
29#include <linux/kvm_para.h> 29#include <linux/kvm_para.h>
30#include <linux/perf_event.h> 30#include <linux/perf_event.h>
31 31
32int watchdog_enabled = 1; 32int watchdog_user_enabled = 1;
33int __read_mostly watchdog_thresh = 10; 33int __read_mostly watchdog_thresh = 10;
34static int __read_mostly watchdog_disabled; 34static int __read_mostly watchdog_running;
35static u64 __read_mostly sample_period; 35static u64 __read_mostly sample_period;
36 36
37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); 37static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
@@ -63,7 +63,7 @@ static int __init hardlockup_panic_setup(char *str)
63 else if (!strncmp(str, "nopanic", 7)) 63 else if (!strncmp(str, "nopanic", 7))
64 hardlockup_panic = 0; 64 hardlockup_panic = 0;
65 else if (!strncmp(str, "0", 1)) 65 else if (!strncmp(str, "0", 1))
66 watchdog_enabled = 0; 66 watchdog_user_enabled = 0;
67 return 1; 67 return 1;
68} 68}
69__setup("nmi_watchdog=", hardlockup_panic_setup); 69__setup("nmi_watchdog=", hardlockup_panic_setup);
@@ -82,7 +82,7 @@ __setup("softlockup_panic=", softlockup_panic_setup);
82 82
83static int __init nowatchdog_setup(char *str) 83static int __init nowatchdog_setup(char *str)
84{ 84{
85 watchdog_enabled = 0; 85 watchdog_user_enabled = 0;
86 return 1; 86 return 1;
87} 87}
88__setup("nowatchdog", nowatchdog_setup); 88__setup("nowatchdog", nowatchdog_setup);
@@ -90,7 +90,7 @@ __setup("nowatchdog", nowatchdog_setup);
90/* deprecated */ 90/* deprecated */
91static int __init nosoftlockup_setup(char *str) 91static int __init nosoftlockup_setup(char *str)
92{ 92{
93 watchdog_enabled = 0; 93 watchdog_user_enabled = 0;
94 return 1; 94 return 1;
95} 95}
96__setup("nosoftlockup", nosoftlockup_setup); 96__setup("nosoftlockup", nosoftlockup_setup);
@@ -158,7 +158,7 @@ void touch_all_softlockup_watchdogs(void)
158#ifdef CONFIG_HARDLOCKUP_DETECTOR 158#ifdef CONFIG_HARDLOCKUP_DETECTOR
159void touch_nmi_watchdog(void) 159void touch_nmi_watchdog(void)
160{ 160{
161 if (watchdog_enabled) { 161 if (watchdog_user_enabled) {
162 unsigned cpu; 162 unsigned cpu;
163 163
164 for_each_present_cpu(cpu) { 164 for_each_present_cpu(cpu) {
@@ -347,11 +347,6 @@ static void watchdog_enable(unsigned int cpu)
347 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 347 hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
348 hrtimer->function = watchdog_timer_fn; 348 hrtimer->function = watchdog_timer_fn;
349 349
350 if (!watchdog_enabled) {
351 kthread_park(current);
352 return;
353 }
354
355 /* Enable the perf event */ 350 /* Enable the perf event */
356 watchdog_nmi_enable(cpu); 351 watchdog_nmi_enable(cpu);
357 352
@@ -374,6 +369,11 @@ static void watchdog_disable(unsigned int cpu)
374 watchdog_nmi_disable(cpu); 369 watchdog_nmi_disable(cpu);
375} 370}
376 371
372static void watchdog_cleanup(unsigned int cpu, bool online)
373{
374 watchdog_disable(cpu);
375}
376
377static int watchdog_should_run(unsigned int cpu) 377static int watchdog_should_run(unsigned int cpu)
378{ 378{
379 return __this_cpu_read(hrtimer_interrupts) != 379 return __this_cpu_read(hrtimer_interrupts) !=
@@ -475,28 +475,40 @@ static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
475static void watchdog_nmi_disable(unsigned int cpu) { return; } 475static void watchdog_nmi_disable(unsigned int cpu) { return; }
476#endif /* CONFIG_HARDLOCKUP_DETECTOR */ 476#endif /* CONFIG_HARDLOCKUP_DETECTOR */
477 477
478/* prepare/enable/disable routines */ 478static struct smp_hotplug_thread watchdog_threads = {
479/* sysctl functions */ 479 .store = &softlockup_watchdog,
480#ifdef CONFIG_SYSCTL 480 .thread_should_run = watchdog_should_run,
481static void watchdog_enable_all_cpus(void) 481 .thread_fn = watchdog,
482 .thread_comm = "watchdog/%u",
483 .setup = watchdog_enable,
484 .cleanup = watchdog_cleanup,
485 .park = watchdog_disable,
486 .unpark = watchdog_enable,
487};
488
489static int watchdog_enable_all_cpus(void)
482{ 490{
483 unsigned int cpu; 491 int err = 0;
484 492
485 if (watchdog_disabled) { 493 if (!watchdog_running) {
486 watchdog_disabled = 0; 494 err = smpboot_register_percpu_thread(&watchdog_threads);
487 for_each_online_cpu(cpu) 495 if (err)
488 kthread_unpark(per_cpu(softlockup_watchdog, cpu)); 496 pr_err("Failed to create watchdog threads, disabled\n");
497 else
498 watchdog_running = 1;
489 } 499 }
500
501 return err;
490} 502}
491 503
504/* prepare/enable/disable routines */
505/* sysctl functions */
506#ifdef CONFIG_SYSCTL
492static void watchdog_disable_all_cpus(void) 507static void watchdog_disable_all_cpus(void)
493{ 508{
494 unsigned int cpu; 509 if (watchdog_running) {
495 510 watchdog_running = 0;
496 if (!watchdog_disabled) { 511 smpboot_unregister_percpu_thread(&watchdog_threads);
497 watchdog_disabled = 1;
498 for_each_online_cpu(cpu)
499 kthread_park(per_cpu(softlockup_watchdog, cpu));
500 } 512 }
501} 513}
502 514
@@ -507,45 +519,48 @@ static void watchdog_disable_all_cpus(void)
507int proc_dowatchdog(struct ctl_table *table, int write, 519int proc_dowatchdog(struct ctl_table *table, int write,
508 void __user *buffer, size_t *lenp, loff_t *ppos) 520 void __user *buffer, size_t *lenp, loff_t *ppos)
509{ 521{
510 int ret; 522 int err, old_thresh, old_enabled;
511 523
512 if (watchdog_disabled < 0) 524 old_thresh = ACCESS_ONCE(watchdog_thresh);
513 return -ENODEV; 525 old_enabled = ACCESS_ONCE(watchdog_user_enabled);
514 526
515 ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); 527 err = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
516 if (ret || !write) 528 if (err || !write)
517 return ret; 529 return err;
518 530
519 set_sample_period(); 531 set_sample_period();
520 /* 532 /*
521 * Watchdog threads shouldn't be enabled if they are 533 * Watchdog threads shouldn't be enabled if they are
522 * disabled. The 'watchdog_disabled' variable check in 534 * disabled. The 'watchdog_running' variable check in
523 * watchdog_*_all_cpus() function takes care of this. 535 * watchdog_*_all_cpus() function takes care of this.
524 */ 536 */
525 if (watchdog_enabled && watchdog_thresh) 537 if (watchdog_user_enabled && watchdog_thresh)
526 watchdog_enable_all_cpus(); 538 err = watchdog_enable_all_cpus();
527 else 539 else
528 watchdog_disable_all_cpus(); 540 watchdog_disable_all_cpus();
529 541
530 return ret; 542 /* Restore old values on failure */
543 if (err) {
544 watchdog_thresh = old_thresh;
545 watchdog_user_enabled = old_enabled;
546 }
547
548 return err;
531} 549}
532#endif /* CONFIG_SYSCTL */ 550#endif /* CONFIG_SYSCTL */
533 551
534static struct smp_hotplug_thread watchdog_threads = {
535 .store = &softlockup_watchdog,
536 .thread_should_run = watchdog_should_run,
537 .thread_fn = watchdog,
538 .thread_comm = "watchdog/%u",
539 .setup = watchdog_enable,
540 .park = watchdog_disable,
541 .unpark = watchdog_enable,
542};
543
544void __init lockup_detector_init(void) 552void __init lockup_detector_init(void)
545{ 553{
546 set_sample_period(); 554 set_sample_period();
547 if (smpboot_register_percpu_thread(&watchdog_threads)) { 555
548 pr_err("Failed to create watchdog threads, disabled\n"); 556#ifdef CONFIG_NO_HZ_FULL
549 watchdog_disabled = -ENODEV; 557 if (watchdog_user_enabled) {
558 watchdog_user_enabled = 0;
559 pr_warning("Disabled lockup detectors by default for full dynticks\n");
560 pr_warning("You can reactivate it with 'sysctl -w kernel.watchdog=1'\n");
550 } 561 }
562#endif
563
564 if (watchdog_user_enabled)
565 watchdog_enable_all_cpus();
551} 566}