diff options
author | Jack F Vogel <jfv@bluesong.net> | 2005-05-01 11:58:48 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-05-01 11:58:48 -0400 |
commit | 67701ae9767534534d3710664037dfde2cc04935 (patch) | |
tree | 6adb8d33585f8eee20794827c79e40991aeeaee5 /arch/i386/kernel/nmi.c | |
parent | fd51f666fa591294bd7462447512666e61c56ea0 (diff) |
[PATCH] check nmi watchdog is broken
A bug against an xSeries system showed up recently noting that the
check_nmi_watchdog() test was failing.
I have been investigating it and discovered in both i386 and x86_64 the
recent change to the routine to use the cpu_callin_map has uncovered a
problem. Prior to that change, on an SMP box, the test was trivally
passing because all cpu's were found to not yet be online, but now with the
callin_map they are discovered, it goes on to test the counter and they
have not yet begun to increment, so it announces a CPU is stuck and bails
out.
On all the systems I have access to test, the announcement of failure is
also bougs... by the time you can login and check /proc/interrupts, the
NMI count is happily incrementing on all CPUs. Its just that the test is
being done too early.
I have tried moving the call to the test around a bit, and it was always
too early. I finally hit on this proposed solution, it delays the routine
via a late_initcall(), seems like the right solution to me.
Signed-off-by: Adrian Bunk <bunk@stusta.de>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/i386/kernel/nmi.c')
-rw-r--r-- | arch/i386/kernel/nmi.c | 11 |
1 files changed, 7 insertions, 4 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index 2f89d000f954..2c0ee9c2d020 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -102,20 +102,21 @@ int nmi_active; | |||
102 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | 102 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ |
103 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | 103 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) |
104 | 104 | ||
105 | int __init check_nmi_watchdog (void) | 105 | static int __init check_nmi_watchdog(void) |
106 | { | 106 | { |
107 | unsigned int prev_nmi_count[NR_CPUS]; | 107 | unsigned int prev_nmi_count[NR_CPUS]; |
108 | int cpu; | 108 | int cpu; |
109 | 109 | ||
110 | printk(KERN_INFO "testing NMI watchdog ... "); | 110 | if (nmi_watchdog == NMI_NONE) |
111 | return 0; | ||
112 | |||
113 | printk(KERN_INFO "Testing NMI watchdog ... "); | ||
111 | 114 | ||
112 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 115 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
113 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; | 116 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; |
114 | local_irq_enable(); | 117 | local_irq_enable(); |
115 | mdelay((10*1000)/nmi_hz); // wait 10 ticks | 118 | mdelay((10*1000)/nmi_hz); // wait 10 ticks |
116 | 119 | ||
117 | /* FIXME: Only boot CPU is online at this stage. Check CPUs | ||
118 | as they come up. */ | ||
119 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | 120 | for (cpu = 0; cpu < NR_CPUS; cpu++) { |
120 | #ifdef CONFIG_SMP | 121 | #ifdef CONFIG_SMP |
121 | /* Check cpu_callin_map here because that is set | 122 | /* Check cpu_callin_map here because that is set |
@@ -139,6 +140,8 @@ int __init check_nmi_watchdog (void) | |||
139 | 140 | ||
140 | return 0; | 141 | return 0; |
141 | } | 142 | } |
143 | /* This needs to happen later in boot so counters are working */ | ||
144 | late_initcall(check_nmi_watchdog); | ||
142 | 145 | ||
143 | static int __init setup_nmi_watchdog(char *str) | 146 | static int __init setup_nmi_watchdog(char *str) |
144 | { | 147 | { |