From 277b199800ac90811ac86d215063df1984f51619 Mon Sep 17 00:00:00 2001 From: Lin Ming Date: Fri, 20 Aug 2010 11:03:51 +0800 Subject: lockup_detector: Make callback function static watchdog_overflow_callback() is only used in kernel/watchdog.c. Signed-off-by: Lin Ming Cc: Peter Zijlstra Cc: Don Zickus LKML-Reference: <1282273431.16443.32.camel@minggr.sh.intel.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 613bc1f04610..b60e2a869bba 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -202,7 +202,7 @@ static struct perf_event_attr wd_hw_attr = { }; /* Callback function for perf event subsystem */ -void watchdog_overflow_callback(struct perf_event *event, int nmi, +static void watchdog_overflow_callback(struct perf_event *event, int nmi, struct perf_sample_data *data, struct pt_regs *regs) { -- cgit v1.2.2 From eac243355a99d6b9d41bbeba4fc83e7f735485f9 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 31 Aug 2010 23:00:08 -0400 Subject: lockup_detector: Convert cpu notifier to return encapsulate errno value By the commit e6bde73b07edeb703d4c89c1daabc09c303de11f ("cpu-hotplug: return better errno on cpu hotplug failure"), the cpu notifier can return encapsulate errno value, resulting in more meaningful error codes for CPU hotplug failures. This converts the cpu notifier to return encapsulate errno value for the lockup_detector as well. Signed-off-by: Akinobu Mita Cc: peterz@infradead.org Cc: gorcunov@gmail.com Cc: fweisbec@gmail.com LKML-Reference: <1283310009-22168-3-git-send-email-dzickus@redhat.com> Signed-off-by: Don Zickus Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 0d53c8e853b1..501cb6edca4b 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -371,7 +371,7 @@ static int watchdog_nmi_enable(int cpu) } printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); - return -1; + return PTR_ERR(event); /* success path */ out_save: @@ -415,17 +415,19 @@ static int watchdog_prepare_cpu(int cpu) static int watchdog_enable(int cpu) { struct task_struct *p = per_cpu(softlockup_watchdog, cpu); + int err; /* enable the perf event */ - if (watchdog_nmi_enable(cpu) != 0) - return -1; + err = watchdog_nmi_enable(cpu); + if (err) + return err; /* create the watchdog thread */ if (!p) { p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); if (IS_ERR(p)) { printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); - return -1; + return PTR_ERR(p); } kthread_bind(p, cpu); per_cpu(watchdog_touch_ts, cpu) = 0; @@ -519,17 +521,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; + int err = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - if (watchdog_prepare_cpu(hotcpu)) - return NOTIFY_BAD; + err = watchdog_prepare_cpu(hotcpu); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - if (watchdog_enable(hotcpu)) - return NOTIFY_BAD; + err = watchdog_enable(hotcpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: @@ -542,7 +543,7 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; #endif /* CONFIG_HOTPLUG_CPU */ } - return NOTIFY_OK; + return notifier_from_errno(err); } static struct notifier_block __cpuinitdata cpu_nfb = { @@ -558,7 +559,7 @@ static int __init spawn_watchdog_task(void) return 0; err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); - WARN_ON(err == NOTIFY_BAD); + WARN_ON(notifier_to_errno(err)); cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); -- cgit v1.2.2 From 14416c35b6b9975c9593d7ecc8382d1ecaa0b598 Mon Sep 17 00:00:00 2001 From: Akinobu Mita Date: Tue, 31 Aug 2010 23:00:09 -0400 Subject: lockup_detector: Remove unused panic_notifier The panic notifer in lockup_detector just set did_panic to 1. But did_panic is not used anywhere so we can just remove it. Signed-off-by: Akinobu Mita Cc: peterz@infradead.org Cc: gorcunov@gmail.com Cc: fweisbec@gmail.com LKML-Reference: <1283310009-22168-4-git-send-email-dzickus@redhat.com> Signed-off-by: Don Zickus Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 15 --------------- 1 file changed, 15 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 501cb6edca4b..5b1ee4f4ca0d 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -43,7 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif -static int __read_mostly did_panic; static int __initdata no_watchdog; @@ -180,18 +179,6 @@ static int is_softlockup(unsigned long touch_ts) return 0; } -static int -watchdog_panic(struct notifier_block *this, unsigned long event, void *ptr) -{ - did_panic = 1; - - return NOTIFY_DONE; -} - -static struct notifier_block panic_block = { - .notifier_call = watchdog_panic, -}; - #ifdef CONFIG_HARDLOCKUP_DETECTOR static struct perf_event_attr wd_hw_attr = { .type = PERF_TYPE_HARDWARE, @@ -564,8 +551,6 @@ static int __init spawn_watchdog_task(void) cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); - atomic_notifier_chain_register(&panic_notifier_list, &panic_block); - return 0; } early_initcall(spawn_watchdog_task); -- cgit v1.2.2 From d9ca07a05ce1c42ac9717e54eaea4546a3a80978 Mon Sep 17 00:00:00 2001 From: Stephane Eranian Date: Tue, 14 Sep 2010 15:34:01 +0200 Subject: watchdog: Avoid kernel crash when disabling watchdog In case you boot with the watchdog disabled, i.e., nowatchdog, then, if you try to disable it via /proc/sys/kernel/watchdog, you get a kernel crash. The reason is that you are trying to cancel a hrtimer which has never been initialized. This patch fixes this by skipping execution of watchdog_disable_all_cpus() when the watchdog is marked disabled from boot. Signed-off-by: Stephane Eranian Signed-off-by: Peter Zijlstra LKML-Reference: <4c8f7a23.cae9d80a.2c11.0bb4@mx.google.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index fa71aebda4ff..89eadbb9cefe 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -473,6 +473,9 @@ static void watchdog_disable_all_cpus(void) { int cpu; + if (no_watchdog) + return; + for_each_online_cpu(cpu) watchdog_disable(cpu); -- cgit v1.2.2 From 38a81da2205f94e8a2a834b51a6b99c91fc7c2e8 Mon Sep 17 00:00:00 2001 From: Matt Helsley Date: Mon, 13 Sep 2010 13:01:20 -0700 Subject: perf events: Clean up pid passing The kernel perf event creation path shouldn't use find_task_by_vpid() because a vpid exists in a specific namespace. find_task_by_vpid() uses current's pid namespace which isn't always the correct namespace to use for the vpid in all the places perf_event_create_kernel_counter() (and thus find_get_context()) is called. The goal is to clean up pid namespace handling and prevent bugs like: https://bugzilla.kernel.org/show_bug.cgi?id=17281 Instead of using pids switch find_get_context() to use task struct pointers directly. The syscall is responsible for resolving the pid to a task struct. This moves the pid namespace resolution into the syscall much like every other syscall that takes pid parameters. Signed-off-by: Matt Helsley Signed-off-by: Peter Zijlstra Cc: Robin Green Cc: Prasad Cc: Arnaldo Carvalho de Melo Cc: Steven Rostedt Cc: Will Deacon Cc: Mahesh Salgaonkar LKML-Reference: Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 89eadbb9cefe..dc8e16824b51 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -358,7 +358,7 @@ static int watchdog_nmi_enable(int cpu) /* Try to register using hardware perf events */ wd_attr = &wd_hw_attr; wd_attr->sample_period = hw_nmi_get_sample_period(); - event = perf_event_create_kernel_counter(wd_attr, cpu, -1, watchdog_overflow_callback); + event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); if (!IS_ERR(event)) { printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); goto out_save; -- cgit v1.2.2 From fe7de49f9d4e53f24ec9ef762a503f70b562341c Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 20 Oct 2010 16:01:12 -0700 Subject: sched: Make sched_param argument static in sched_setscheduler() callers Andrew Morton pointed out almost all sched_setscheduler() callers are using fixed parameters and can be converted to static. It reduces runtime memory use a little. Signed-off-by: KOSAKI Motohiro Reported-by: Andrew Morton Acked-by: James Morris Cc: Ingo Molnar Cc: Steven Rostedt Signed-off-by: Andrew Morton Signed-off-by: Thomas Gleixner Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index bafba687a6d8..94ca779aa9c2 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -307,7 +307,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) */ static int watchdog(void *unused) { - struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; + static struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 }; struct hrtimer *hrtimer = &__raw_get_cpu_var(watchdog_hrtimer); sched_setscheduler(current, SCHED_FIFO, ¶m); -- cgit v1.2.2 From 433039e97f672b81e6c8f6daef385dcf035c6e29 Mon Sep 17 00:00:00 2001 From: David Daney Date: Fri, 5 Nov 2010 16:17:39 -0700 Subject: watchdog: Fix section mismatch and potential undefined behavior. Commit d9ca07a05ce1 ("watchdog: Avoid kernel crash when disabling watchdog") introduces a section mismatch. Now that we reference no_watchdog from non-__init code it can no longer be __initdata. Signed-off-by: David Daney Cc: Stephane Eranian Cc: Peter Zijlstra Cc: Ingo Molnar Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index bafba687a6d8..6e3c41a4024c 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -43,7 +43,7 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif -static int __initdata no_watchdog; +static int no_watchdog; /* boot commands */ -- cgit v1.2.2 From 004417a6d468e24399e383645c068b498eed84ad Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 25 Nov 2010 18:38:29 +0100 Subject: perf, arch: Cleanup perf-pmu init vs lockup-detector The perf hardware pmu got initialized at various points in the boot, some before early_initcall() some after (notably arch_initcall). The problem is that the NMI lockup detector is ran from early_initcall() and expects the hardware pmu to be present. Sanitize this by moving all architecture hardware pmu implementations to initialize at early_initcall() and move the lockup detector to an explicit initcall right after that. Cc: paulus Cc: davem Cc: Michael Cree Cc: Deng-Cheng Zhu Acked-by: Paul Mundt Acked-by: Will Deacon Signed-off-by: Peter Zijlstra LKML-Reference: <1290707759.2145.119.camel@laptop> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6e3c41a4024c..cad4e42060a9 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -547,13 +547,13 @@ static struct notifier_block __cpuinitdata cpu_nfb = { .notifier_call = cpu_callback }; -static int __init spawn_watchdog_task(void) +void __init lockup_detector_init(void) { void *cpu = (void *)(long)smp_processor_id(); int err; if (no_watchdog) - return 0; + return; err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); WARN_ON(notifier_to_errno(err)); @@ -561,6 +561,5 @@ static int __init spawn_watchdog_task(void) cpu_callback(&cpu_nfb, CPU_ONLINE, cpu); register_cpu_notifier(&cpu_nfb); - return 0; + return; } -early_initcall(spawn_watchdog_task); -- cgit v1.2.2 From 5dc3055879b8f659f62abb7c3d1eaa4d02e36d65 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Mon, 29 Nov 2010 17:07:17 -0500 Subject: x86, NMI: Add back unknown_nmi_panic and nmi_watchdog sysctls Originally adapted from Huang Ying's patch which moved the unknown_nmi_panic to the traps.c file. Because the old nmi watchdog was deleted before this change happened, the unknown_nmi_panic sysctl was lost. This re-adds it. Also, the nmi_watchdog sysctl was re-implemented and its documentation updated accordingly. Patch-inspired-by: Huang Ying Signed-off-by: Don Zickus Reviewed-by: Cyrill Gorcunov Acked-by: Yinghai Lu Cc: fweisbec@gmail.com LKML-Reference: <1291068437-5331-3-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 2 ++ 1 file changed, 2 insertions(+) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index cad4e42060a9..eb17e143b5da 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -57,6 +57,8 @@ static int __init hardlockup_panic_setup(char *str) { if (!strncmp(str, "panic", 5)) hardlockup_panic = 1; + else if (!strncmp(str, "0", 1)) + no_watchdog = 1; return 1; } __setup("nmi_watchdog=", hardlockup_panic_setup); -- cgit v1.2.2 From 909ea96468096b07fbb41aaf69be060d92bd9271 Mon Sep 17 00:00:00 2001 From: Christoph Lameter Date: Wed, 8 Dec 2010 16:22:55 +0100 Subject: core: Replace __get_cpu_var with __this_cpu_read if not used for an address. __get_cpu_var() can be replaced with this_cpu_read and will then use a single read instruction with implied address calculation to access the correct per cpu instance. However, the address of a per cpu variable passed to __this_cpu_read() cannot be determined (since it's an implied address conversion through segment prefixes). Therefore apply this only to uses of __get_cpu_var where the address of the variable is not used. Cc: Pekka Enberg Cc: Hugh Dickins Cc: Thomas Gleixner Acked-by: H. Peter Anvin Signed-off-by: Christoph Lameter Signed-off-by: Tejun Heo --- kernel/watchdog.c | 36 ++++++++++++++++++------------------ 1 file changed, 18 insertions(+), 18 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6e3c41a4024c..8037a86106ed 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -116,12 +116,12 @@ static void __touch_watchdog(void) { int this_cpu = smp_processor_id(); - __get_cpu_var(watchdog_touch_ts) = get_timestamp(this_cpu); + __this_cpu_write(watchdog_touch_ts, get_timestamp(this_cpu)); } void touch_softlockup_watchdog(void) { - __raw_get_cpu_var(watchdog_touch_ts) = 0; + __this_cpu_write(watchdog_touch_ts, 0); } EXPORT_SYMBOL(touch_softlockup_watchdog); @@ -165,12 +165,12 @@ void touch_softlockup_watchdog_sync(void) /* watchdog detector functions */ static int is_hardlockup(void) { - unsigned long hrint = __get_cpu_var(hrtimer_interrupts); + unsigned long hrint = __this_cpu_read(hrtimer_interrupts); - if (__get_cpu_var(hrtimer_interrupts_saved) == hrint) + if (__this_cpu_read(hrtimer_interrupts_saved) == hrint) return 1; - __get_cpu_var(hrtimer_interrupts_saved) = hrint; + __this_cpu_write(hrtimer_interrupts_saved, hrint); return 0; } #endif @@ -203,8 +203,8 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, /* Ensure the watchdog never gets throttled */ event->hw.interrupts = 0; - if (__get_cpu_var(watchdog_nmi_touch) == true) { - __get_cpu_var(watchdog_nmi_touch) = false; + if (__this_cpu_read(watchdog_nmi_touch) == true) { + __this_cpu_write(watchdog_nmi_touch, false); return; } @@ -218,7 +218,7 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, int this_cpu = smp_processor_id(); /* only print hardlockups once */ - if (__get_cpu_var(hard_watchdog_warn) == true) + if (__this_cpu_read(hard_watchdog_warn) == true) return; if (hardlockup_panic) @@ -226,16 +226,16 @@ static void watchdog_overflow_callback(struct perf_event *event, int nmi, else WARN(1, "Watchdog detected hard LOCKUP on cpu %d", this_cpu); - __get_cpu_var(hard_watchdog_warn) = true; + __this_cpu_write(hard_watchdog_warn, true); return; } - __get_cpu_var(hard_watchdog_warn) = false; + __this_cpu_write(hard_watchdog_warn, false); return; } static void watchdog_interrupt_count(void) { - __get_cpu_var(hrtimer_interrupts)++; + __this_cpu_inc(hrtimer_interrupts); } #else static inline void watchdog_interrupt_count(void) { return; } @@ -244,7 +244,7 @@ static inline void watchdog_interrupt_count(void) { return; } /* watchdog kicker functions */ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) { - unsigned long touch_ts = __get_cpu_var(watchdog_touch_ts); + unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); struct pt_regs *regs = get_irq_regs(); int duration; @@ -252,18 +252,18 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) watchdog_interrupt_count(); /* kick the softlockup detector */ - wake_up_process(__get_cpu_var(softlockup_watchdog)); + wake_up_process(__this_cpu_read(softlockup_watchdog)); /* .. and repeat */ hrtimer_forward_now(hrtimer, ns_to_ktime(get_sample_period())); if (touch_ts == 0) { - if (unlikely(__get_cpu_var(softlockup_touch_sync))) { + if (unlikely(__this_cpu_read(softlockup_touch_sync))) { /* * If the time stamp was touched atomically * make sure the scheduler tick is up to date. */ - __get_cpu_var(softlockup_touch_sync) = false; + __this_cpu_write(softlockup_touch_sync, false); sched_clock_tick(); } __touch_watchdog(); @@ -279,7 +279,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) duration = is_softlockup(touch_ts); if (unlikely(duration)) { /* only warn once */ - if (__get_cpu_var(soft_watchdog_warn) == true) + if (__this_cpu_read(soft_watchdog_warn) == true) return HRTIMER_RESTART; printk(KERN_ERR "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", @@ -294,9 +294,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer) if (softlockup_panic) panic("softlockup: hung tasks"); - __get_cpu_var(soft_watchdog_warn) = true; + __this_cpu_write(soft_watchdog_warn, true); } else - __get_cpu_var(soft_watchdog_warn) = false; + __this_cpu_write(soft_watchdog_warn, false); return HRTIMER_RESTART; } -- cgit v1.2.2 From 551423748a4eba55f2eb0fc250d757986471f187 Mon Sep 17 00:00:00 2001 From: Ben Hutchings Date: Sun, 2 Jan 2011 23:02:42 +0000 Subject: watchdog: Improve initialisation error message and documentation The error message 'NMI watchdog failed to create perf event...' does not make it clear that this is a fatal error for the watchdog. It also currently prints the error value as a pointer, rather than extracting the error code with PTR_ERR(). Fix that. Add a note to the description of the 'nowatchdog' kernel parameter to associate it with this message. Reported-by: Cesare Leonardi Signed-off-by: Ben Hutchings Cc: 599368@bugs.debian.org Cc: 608138@bugs.debian.org Cc: Don Zickus Cc: Frederic Weisbecker Cc: # .37.x and later LKML-Reference: <1294009362.3167.126.camel@localhost> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6e3c41a4024c..5b082156cd21 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -364,7 +364,8 @@ static int watchdog_nmi_enable(int cpu) goto out_save; } - printk(KERN_ERR "NMI watchdog failed to create perf event on cpu%i: %p\n", cpu, event); + printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", + cpu, PTR_ERR(event)); return PTR_ERR(event); /* success path */ -- cgit v1.2.2 From 4135038a582c20ffdadfcf6564852e0b72a20968 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Fri, 28 Jan 2011 11:00:31 -0500 Subject: watchdog: Fix broken nowatchdog logic Passing nowatchdog to kernel disables 2 things: creation of watchdog threads AND initialization of percpu watchdog_hrtimer. As hrtimers are initialized only at boot it's not possible to enable watchdog later - for me all watchdog threads started to eat 100% of CPU time, but they could just crash. Additionally, even if these threads would start properly, watchdog_disable_all_cpus was guarded by no_watchdog check, so you couldn't disable watchdog. To fix this, remove no_watchdog variable and use already existing watchdog_enabled variable. Signed-off-by: Marcin Slusarz [ removed another no_watchdog instance ] Signed-off-by: Don Zickus Cc: Stephane Eranian Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: LKML-Reference: <1296230433-6261-1-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 20 ++++++-------------- 1 file changed, 6 insertions(+), 14 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d7ebdf4cea98..d9961ea1c3f4 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -27,7 +27,7 @@ #include #include -int watchdog_enabled; +int watchdog_enabled = 1; int __read_mostly softlockup_thresh = 60; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); @@ -43,9 +43,6 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); #endif -static int no_watchdog; - - /* boot commands */ /* * Should we panic when a soft-lockup or hard-lockup occurs: @@ -58,7 +55,7 @@ static int __init hardlockup_panic_setup(char *str) if (!strncmp(str, "panic", 5)) hardlockup_panic = 1; else if (!strncmp(str, "0", 1)) - no_watchdog = 1; + watchdog_enabled = 0; return 1; } __setup("nmi_watchdog=", hardlockup_panic_setup); @@ -77,7 +74,7 @@ __setup("softlockup_panic=", softlockup_panic_setup); static int __init nowatchdog_setup(char *str) { - no_watchdog = 1; + watchdog_enabled = 0; return 1; } __setup("nowatchdog", nowatchdog_setup); @@ -85,7 +82,7 @@ __setup("nowatchdog", nowatchdog_setup); /* deprecated */ static int __init nosoftlockup_setup(char *str) { - no_watchdog = 1; + watchdog_enabled = 0; return 1; } __setup("nosoftlockup", nosoftlockup_setup); @@ -476,9 +473,6 @@ static void watchdog_disable_all_cpus(void) { int cpu; - if (no_watchdog) - return; - for_each_online_cpu(cpu) watchdog_disable(cpu); @@ -530,7 +524,8 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: - err = watchdog_enable(hotcpu); + if (watchdog_enabled) + err = watchdog_enable(hotcpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: @@ -555,9 +550,6 @@ void __init lockup_detector_init(void) void *cpu = (void *)(long)smp_processor_id(); int err; - if (no_watchdog) - return; - err = cpu_callback(&cpu_nfb, CPU_UP_PREPARE, cpu); WARN_ON(notifier_to_errno(err)); -- cgit v1.2.2 From 397357666de6b5b6adb5fa99f9758ec8cf30ac34 Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Fri, 28 Jan 2011 11:00:32 -0500 Subject: watchdog: Fix sysctl consistency If it was not possible to enable watchdog for any cpu, switch watchdog_enabled back to 0, because it's visible via kernel.watchdog sysctl. Signed-off-by: Marcin Slusarz Signed-off-by: Don Zickus Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: LKML-Reference: <1296230433-6261-2-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index d9961ea1c3f4..c7e0049344bd 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -429,9 +429,6 @@ static int watchdog_enable(int cpu) wake_up_process(p); } - /* if any cpu succeeds, watchdog is considered enabled for the system */ - watchdog_enabled = 1; - return 0; } @@ -459,12 +456,16 @@ static void watchdog_disable(int cpu) static void watchdog_enable_all_cpus(void) { int cpu; - int result = 0; + + watchdog_enabled = 0; for_each_online_cpu(cpu) - result += watchdog_enable(cpu); + if (!watchdog_enable(cpu)) + /* if any cpu succeeds, watchdog is considered + enabled for the system */ + watchdog_enabled = 1; - if (result) + if (!watchdog_enabled) printk(KERN_ERR "watchdog: failed to be enabled on some cpus\n"); } -- cgit v1.2.2 From 9ffdc6c37df131f89d52001e0ef03091b158826f Mon Sep 17 00:00:00 2001 From: Marcin Slusarz Date: Fri, 28 Jan 2011 11:00:33 -0500 Subject: watchdog: Don't change watchdog state on read of sysctl Signed-off-by: Marcin Slusarz [ add {}'s to fix a warning ] Signed-off-by: Don Zickus Cc: Peter Zijlstra Cc: Frederic Weisbecker Cc: LKML-Reference: <1296230433-6261-3-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index c7e0049344bd..f37f974aa81b 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -493,10 +493,12 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write, { proc_dointvec(table, write, buffer, length, ppos); - if (watchdog_enabled) - watchdog_enable_all_cpus(); - else - watchdog_disable_all_cpus(); + if (write) { + if (watchdog_enabled) + watchdog_enable_all_cpus(); + else + watchdog_disable_all_cpus(); + } return 0; } -- cgit v1.2.2 From 5651f7f47dbb1cf2b95a60582546db4ff508e2b4 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Wed, 9 Feb 2011 14:02:33 -0500 Subject: watchdog, nmi: Lower the severity of error messages During boot if the hardlockup detector fails to initialize, it complains very loudly. Some failures should be expected under certain situations, ie no lapics, or resource in-use. Tone those error messages down a bit. Keep the rest at a high level. Reported-by: Paul Bolle Tested-by: Paul Bolle Signed-off-by: Don Zickus Cc: Peter Zijlstra LKML-Reference: <1297278153-21111-1-git-send-email-dzickus@redhat.com> Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index f37f974aa81b..18bb15776c57 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -363,8 +363,14 @@ static int watchdog_nmi_enable(int cpu) goto out_save; } - printk(KERN_ERR "NMI watchdog disabled for cpu%i: unable to create perf event: %ld\n", - cpu, PTR_ERR(event)); + + /* vary the KERN level based on the returned errno */ + if (PTR_ERR(event) == -EOPNOTSUPP) + printk(KERN_INFO "NMI watchdog disabled (cpu%i): not supported (no LAPIC?)\n", cpu); + else if (PTR_ERR(event) == -ENOENT) + printk(KERN_WARNING "NMI watchdog disabled (cpu%i): hardware events not enabled\n", cpu); + else + printk(KERN_ERR "NMI watchdog disabled (cpu%i): unable to create perf event: %ld\n", cpu, PTR_ERR(event)); return PTR_ERR(event); /* success path */ -- cgit v1.2.2 From fef2c9bc1b54c0261324a96e948c0b849796e896 Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 22 Mar 2011 16:34:16 -0700 Subject: kernel/watchdog.c: allow hardlockup to panic by default When a cpu is considered stuck, instead of limping along and just printing a warning, it is sometimes preferred to just panic, let kdump capture the vmcore and reboot. This gets the machine back into a stable state quickly while saving the info that got it into a stuck state to begin with. Add a Kconfig option to allow users to set the hardlockup to panic by default. Also add in a 'nmi_watchdog=nopanic' to override this. [akpm@linux-foundation.org: fix strncmp length] Signed-off-by: Don Zickus Acked-by: Peter Zijlstra Reviewed-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 18bb15776c57..054a67cca9da 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -48,12 +48,15 @@ static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); * Should we panic when a soft-lockup or hard-lockup occurs: */ #ifdef CONFIG_HARDLOCKUP_DETECTOR -static int hardlockup_panic; +static int hardlockup_panic = + CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE; static int __init hardlockup_panic_setup(char *str) { if (!strncmp(str, "panic", 5)) hardlockup_panic = 1; + else if (!strncmp(str, "nopanic", 7)) + hardlockup_panic = 0; else if (!strncmp(str, "0", 1)) watchdog_enabled = 0; return 1; -- cgit v1.2.2 From f99a99330f85a84c346ddeb4adc72dbfad9b9e3e Mon Sep 17 00:00:00 2001 From: Don Zickus Date: Tue, 22 Mar 2011 16:34:17 -0700 Subject: kernel/watchdog.c: always return NOTIFY_OK during cpu up/down events This patch addresses a couple of problems. One was the case when the hardlockup failed to start, it also failed to start the softlockup. There were valid cases when the hardlockup shouldn't start and that shouldn't block the softlockup (no lapic, bios controls perf counters). The second problem was when the hardlockup failed to start on boxes (from a no lapic or bios controlled perf counter case), it reported failure to the cpu notifier chain. This blocked the notifier from continuing to start other more critical pieces of cpu bring-up (in our case based on a 2.6.32 fork, it was the mce). As a result, during soft cpu online/offline testing, the system would panic when a cpu was offlined because the cpu notifier would succeed in processing a watchdog disable cpu event and would panic in the mce case as a result of un-initialized variables from a never executed cpu up event. I realized the hardlockup/softlockup cases are really just debugging aids and should never impede the progress of a cpu up/down event. Therefore I modified the code to always return NOTIFY_OK and instead rely on printks to inform the user of problems. Signed-off-by: Don Zickus Acked-by: Peter Zijlstra Reviewed-by: WANG Cong Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 054a67cca9da..140dce750450 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -418,19 +418,22 @@ static int watchdog_prepare_cpu(int cpu) static int watchdog_enable(int cpu) { struct task_struct *p = per_cpu(softlockup_watchdog, cpu); - int err; + int err = 0; /* enable the perf event */ err = watchdog_nmi_enable(cpu); - if (err) - return err; + + /* Regardless of err above, fall through and start softlockup */ /* create the watchdog thread */ if (!p) { p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); if (IS_ERR(p)) { printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); - return PTR_ERR(p); + if (!err) + /* if hardlockup hasn't already set this */ + err = PTR_ERR(p); + goto out; } kthread_bind(p, cpu); per_cpu(watchdog_touch_ts, cpu) = 0; @@ -438,7 +441,8 @@ static int watchdog_enable(int cpu) wake_up_process(p); } - return 0; +out: + return err; } static void watchdog_disable(int cpu) @@ -550,7 +554,13 @@ cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) break; #endif /* CONFIG_HOTPLUG_CPU */ } - return notifier_from_errno(err); + + /* + * hardlockup and softlockup are not important enough + * to block cpu bring up. Just always succeed and + * rely on printk output to flag problems. + */ + return NOTIFY_OK; } static struct notifier_block __cpuinitdata cpu_nfb = { -- cgit v1.2.2 From 1409f141ac719b994d2832911b1e9ec928943fc2 Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Wed, 27 Apr 2011 15:26:55 -0700 Subject: kernel/watchdog.c: disable nmi perf event in the error path of enabling watchdog In corner cases where softlockup watchdog is not setup successfully, the relevant nmi perf event for hardlockup watchdog could be disabled, then the status of the underlying hardware remains unchanged. Also, if the kthread doesn't start then the hrtimer won't run and the hardlockup detector will falsely fire. Signed-off-by: Hillf Danton Signed-off-by: Don Zickus Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/watchdog.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 140dce750450..14733d4d156b 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -430,9 +430,12 @@ static int watchdog_enable(int cpu) p = kthread_create(watchdog, (void *)(unsigned long)cpu, "watchdog/%d", cpu); if (IS_ERR(p)) { printk(KERN_ERR "softlockup watchdog for %i failed\n", cpu); - if (!err) + if (!err) { /* if hardlockup hasn't already set this */ err = PTR_ERR(p); + /* and disable the perf event */ + watchdog_nmi_disable(cpu); + } goto out; } kthread_bind(p, cpu); -- cgit v1.2.2 From 824c6b7f6294101f30e141117def224a56c203e6 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Sun, 22 May 2011 22:10:20 -0700 Subject: watchdog: Fix rounding bug in get_sample_period() In get_sample_period(), softlockup_thresh is integer divided by 5 before the multiplication by NSEC_PER_SEC. This results in softlockup_thresh being rounded down to the nearest integer multiple of 5. For example, a softlockup_thresh of 4 rounds down to 0. Signed-off-by: Mandeep Singh Baines Cc: Marcin Slusarz Cc: Don Zickus Cc: Peter Zijlstra Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1306127423-3347-1-git-send-email-msb@chromium.org Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 14733d4d156b..a06972d71060 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -110,7 +110,7 @@ static unsigned long get_sample_period(void) * increment before the hardlockup detector generates * a warning */ - return softlockup_thresh / 5 * NSEC_PER_SEC; + return softlockup_thresh * (NSEC_PER_SEC / 5); } /* Commands for resetting the watchdog */ -- cgit v1.2.2 From e04ab2bc41b35c0cb6cdb07c8443f91aa738cf78 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Sun, 22 May 2011 22:10:21 -0700 Subject: watchdog: Only disable/enable watchdog if neccessary Don't take any action on an unsuccessful write to /proc. Signed-off-by: Mandeep Singh Baines Cc: Marcin Slusarz Cc: Don Zickus Cc: Peter Zijlstra Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1306127423-3347-2-git-send-email-msb@chromium.org Signed-off-by: Ingo Molnar --- kernel/watchdog.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index a06972d71060..cf0e09f452e7 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -507,15 +507,19 @@ static void watchdog_disable_all_cpus(void) int proc_dowatchdog_enabled(struct ctl_table *table, int write, void __user *buffer, size_t *length, loff_t *ppos) { - proc_dointvec(table, write, buffer, length, ppos); + int ret; - if (write) { - if (watchdog_enabled) - watchdog_enable_all_cpus(); - else - watchdog_disable_all_cpus(); - } - return 0; + ret = proc_dointvec(table, write, buffer, length, ppos); + if (ret || !write) + goto out; + + if (watchdog_enabled) + watchdog_enable_all_cpus(); + else + watchdog_disable_all_cpus(); + +out: + return ret; } int proc_dowatchdog_thresh(struct ctl_table *table, int write, -- cgit v1.2.2 From 586692a5a5fc5740c8a46abc0f2365495c2d7c5f Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Sun, 22 May 2011 22:10:22 -0700 Subject: watchdog: Disable watchdog when thresh is zero This restores the previous behavior of softlock_thresh. Currently, setting watchdog_thresh to zero causes the watchdog kthreads to consume a lot of CPU. In addition, the logic of proc_dowatchdog_thresh and proc_dowatchdog_enabled has been factored into proc_dowatchdog. Signed-off-by: Mandeep Singh Baines Cc: Marcin Slusarz Cc: Don Zickus Cc: Peter Zijlstra Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1306127423-3347-3-git-send-email-msb@chromium.org Signed-off-by: Ingo Molnar LKML-Reference: <20110517071018.GE22305@elte.hu> --- kernel/watchdog.c | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index cf0e09f452e7..60301916f62e 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -28,7 +28,7 @@ #include int watchdog_enabled = 1; -int __read_mostly softlockup_thresh = 60; +int __read_mostly watchdog_thresh = 60; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); @@ -105,12 +105,12 @@ static unsigned long get_timestamp(int this_cpu) static unsigned long get_sample_period(void) { /* - * convert softlockup_thresh from seconds to ns + * convert watchdog_thresh from seconds to ns * the divide by 5 is to give hrtimer 5 chances to * increment before the hardlockup detector generates * a warning */ - return softlockup_thresh * (NSEC_PER_SEC / 5); + return watchdog_thresh * (NSEC_PER_SEC / 5); } /* Commands for resetting the watchdog */ @@ -182,7 +182,7 @@ static int is_softlockup(unsigned long touch_ts) unsigned long now = get_timestamp(smp_processor_id()); /* Warn about unreasonable delays: */ - if (time_after(now, touch_ts + softlockup_thresh)) + if (time_after(now, touch_ts + watchdog_thresh)) return now - touch_ts; return 0; @@ -501,19 +501,19 @@ static void watchdog_disable_all_cpus(void) /* sysctl functions */ #ifdef CONFIG_SYSCTL /* - * proc handler for /proc/sys/kernel/nmi_watchdog + * proc handler for /proc/sys/kernel/nmi_watchdog,watchdog_thresh */ -int proc_dowatchdog_enabled(struct ctl_table *table, int write, - void __user *buffer, size_t *length, loff_t *ppos) +int proc_dowatchdog(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) { int ret; - ret = proc_dointvec(table, write, buffer, length, ppos); + ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (ret || !write) goto out; - if (watchdog_enabled) + if (watchdog_enabled && watchdog_thresh) watchdog_enable_all_cpus(); else watchdog_disable_all_cpus(); @@ -521,13 +521,6 @@ int proc_dowatchdog_enabled(struct ctl_table *table, int write, out: return ret; } - -int proc_dowatchdog_thresh(struct ctl_table *table, int write, - void __user *buffer, - size_t *lenp, loff_t *ppos) -{ - return proc_dointvec_minmax(table, write, buffer, lenp, ppos); -} #endif /* CONFIG_SYSCTL */ -- cgit v1.2.2 From 4eec42f392043063d0f019640b4ccc2a45570002 Mon Sep 17 00:00:00 2001 From: Mandeep Singh Baines Date: Sun, 22 May 2011 22:10:23 -0700 Subject: watchdog: Change the default timeout and configure nmi watchdog period based on watchdog_thresh Before the conversion of the NMI watchdog to perf event, the watchdog timeout was 5 seconds. Now it is 60 seconds. For my particular application, netbooks, 5 seconds was a better timeout. With a short timeout, we catch faults earlier and are able to send back a panic. With a 60 second timeout, the user is unlikely to wait and will instead hit the power button, causing us to lose the panic info. This change configures the NMI period to watchdog_thresh and sets the softlockup_thresh to watchdog_thresh * 2. In addition, watchdog_thresh was reduced to 10 seconds as suggested by Ingo Molnar. Signed-off-by: Mandeep Singh Baines Cc: Marcin Slusarz Cc: Don Zickus Cc: Peter Zijlstra Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/1306127423-3347-4-git-send-email-msb@chromium.org Signed-off-by: Ingo Molnar LKML-Reference: <20110517071642.GF22305@elte.hu> --- kernel/watchdog.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 60301916f62e..6e63097fa73a 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -28,7 +28,7 @@ #include int watchdog_enabled = 1; -int __read_mostly watchdog_thresh = 60; +int __read_mostly watchdog_thresh = 10; static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts); static DEFINE_PER_CPU(struct task_struct *, softlockup_watchdog); @@ -91,6 +91,17 @@ static int __init nosoftlockup_setup(char *str) __setup("nosoftlockup", nosoftlockup_setup); /* */ +/* + * Hard-lockup warnings should be triggered after just a few seconds. Soft- + * lockups can have false positives under extreme conditions. So we generally + * want a higher threshold for soft lockups than for hard lockups. So we couple + * the thresholds with a factor: we make the soft threshold twice the amount of + * time the hard threshold is. + */ +static int get_softlockup_thresh() +{ + return watchdog_thresh * 2; +} /* * Returns seconds, approximately. We don't need nanosecond @@ -110,7 +121,7 @@ static unsigned long get_sample_period(void) * increment before the hardlockup detector generates * a warning */ - return watchdog_thresh * (NSEC_PER_SEC / 5); + return get_softlockup_thresh() * (NSEC_PER_SEC / 5); } /* Commands for resetting the watchdog */ @@ -182,7 +193,7 @@ static int is_softlockup(unsigned long touch_ts) unsigned long now = get_timestamp(smp_processor_id()); /* Warn about unreasonable delays: */ - if (time_after(now, touch_ts + watchdog_thresh)) + if (time_after(now, touch_ts + get_softlockup_thresh())) return now - touch_ts; return 0; @@ -359,7 +370,7 @@ static int watchdog_nmi_enable(int cpu) /* Try to register using hardware perf events */ wd_attr = &wd_hw_attr; - wd_attr->sample_period = hw_nmi_get_sample_period(); + wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh); event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback); if (!IS_ERR(event)) { printk(KERN_INFO "NMI watchdog enabled, takes one hw-pmu counter.\n"); -- cgit v1.2.2 From 6e9101aeec39961308176e0f59e73ac5d37d243a Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 24 May 2011 05:43:18 +0200 Subject: watchdog: Fix non-standard prototype of get_softlockup_thresh() This build warning slipped through: kernel/watchdog.c:102: warning: function declaration isn't a prototype As reported by Stephen Rothwell. Also address an unused variable warning that GCC 4.6.0 reports: we cannot do anything about failed watchdog ops during CPU hotplug (it's not serious enough to return an error from the notifier), so ignore them. Reported-by: Stephen Rothwell Cc: Mandeep Singh Baines Cc: Marcin Slusarz Cc: Don Zickus Cc: Peter Zijlstra Cc: Frederic Weisbecker Link: http://lkml.kernel.org/r/20110524134129.8da27016.sfr@canb.auug.org.au Signed-off-by: Ingo Molnar LKML-Reference: <20110517071642.GF22305@elte.hu> --- kernel/watchdog.c | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) (limited to 'kernel/watchdog.c') diff --git a/kernel/watchdog.c b/kernel/watchdog.c index 6e63097fa73a..3d0c56ad4792 100644 --- a/kernel/watchdog.c +++ b/kernel/watchdog.c @@ -98,7 +98,7 @@ __setup("nosoftlockup", nosoftlockup_setup); * the thresholds with a factor: we make the soft threshold twice the amount of * time the hard threshold is. */ -static int get_softlockup_thresh() +static int get_softlockup_thresh(void) { return watchdog_thresh * 2; } @@ -415,15 +415,13 @@ static void watchdog_nmi_disable(int cpu) { return; } #endif /* CONFIG_HARDLOCKUP_DETECTOR */ /* prepare/enable/disable routines */ -static int watchdog_prepare_cpu(int cpu) +static void watchdog_prepare_cpu(int cpu) { struct hrtimer *hrtimer = &per_cpu(watchdog_hrtimer, cpu); WARN_ON(per_cpu(softlockup_watchdog, cpu)); hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); hrtimer->function = watchdog_timer_fn; - - return 0; } static int watchdog_enable(int cpu) @@ -542,17 +540,16 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { int hotcpu = (unsigned long)hcpu; - int err = 0; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: - err = watchdog_prepare_cpu(hotcpu); + watchdog_prepare_cpu(hotcpu); break; case CPU_ONLINE: case CPU_ONLINE_FROZEN: if (watchdog_enabled) - err = watchdog_enable(hotcpu); + watchdog_enable(hotcpu); break; #ifdef CONFIG_HOTPLUG_CPU case CPU_UP_CANCELED: -- cgit v1.2.2