diff options
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/nmi_32.c | 14 | ||||
-rw-r--r-- | arch/x86/kernel/nmi_64.c | 95 |
2 files changed, 55 insertions, 54 deletions
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c index 4f4bfd3a88b6..edd413650b3b 100644 --- a/arch/x86/kernel/nmi_32.c +++ b/arch/x86/kernel/nmi_32.c | |||
@@ -51,13 +51,13 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu); | |||
51 | 51 | ||
52 | static int endflag __initdata = 0; | 52 | static int endflag __initdata = 0; |
53 | 53 | ||
54 | #ifdef CONFIG_SMP | ||
54 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | 55 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when |
55 | * the CPU is idle. To make sure the NMI watchdog really ticks on all | 56 | * the CPU is idle. To make sure the NMI watchdog really ticks on all |
56 | * CPUs during the test make them busy. | 57 | * CPUs during the test make them busy. |
57 | */ | 58 | */ |
58 | static __init void nmi_cpu_busy(void *data) | 59 | static __init void nmi_cpu_busy(void *data) |
59 | { | 60 | { |
60 | #ifdef CONFIG_SMP | ||
61 | local_irq_enable_in_hardirq(); | 61 | local_irq_enable_in_hardirq(); |
62 | /* Intentionally don't use cpu_relax here. This is | 62 | /* Intentionally don't use cpu_relax here. This is |
63 | to make sure that the performance counter really ticks, | 63 | to make sure that the performance counter really ticks, |
@@ -67,8 +67,8 @@ static __init void nmi_cpu_busy(void *data) | |||
67 | care if they get somewhat less cycles. */ | 67 | care if they get somewhat less cycles. */ |
68 | while (endflag == 0) | 68 | while (endflag == 0) |
69 | mb(); | 69 | mb(); |
70 | #endif | ||
71 | } | 70 | } |
71 | #endif | ||
72 | 72 | ||
73 | static int __init check_nmi_watchdog(void) | 73 | static int __init check_nmi_watchdog(void) |
74 | { | 74 | { |
@@ -87,11 +87,13 @@ static int __init check_nmi_watchdog(void) | |||
87 | 87 | ||
88 | printk(KERN_INFO "Testing NMI watchdog ... "); | 88 | printk(KERN_INFO "Testing NMI watchdog ... "); |
89 | 89 | ||
90 | #ifdef CONFIG_SMP | ||
90 | if (nmi_watchdog == NMI_LOCAL_APIC) | 91 | if (nmi_watchdog == NMI_LOCAL_APIC) |
91 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); | 92 | smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); |
93 | #endif | ||
92 | 94 | ||
93 | for_each_possible_cpu(cpu) | 95 | for_each_possible_cpu(cpu) |
94 | prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; | 96 | prev_nmi_count[cpu] = nmi_count(cpu); |
95 | local_irq_enable(); | 97 | local_irq_enable(); |
96 | mdelay((20*1000)/nmi_hz); // wait 20 ticks | 98 | mdelay((20*1000)/nmi_hz); // wait 20 ticks |
97 | 99 | ||
@@ -237,10 +239,10 @@ void acpi_nmi_disable(void) | |||
237 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | 239 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); |
238 | } | 240 | } |
239 | 241 | ||
240 | void setup_apic_nmi_watchdog (void *unused) | 242 | void setup_apic_nmi_watchdog(void *unused) |
241 | { | 243 | { |
242 | if (__get_cpu_var(wd_enabled)) | 244 | if (__get_cpu_var(wd_enabled)) |
243 | return; | 245 | return; |
244 | 246 | ||
245 | /* cheap hack to support suspend/resume */ | 247 | /* cheap hack to support suspend/resume */ |
246 | /* if cpu0 is not active neither should the other cpus */ | 248 | /* if cpu0 is not active neither should the other cpus */ |
@@ -329,7 +331,7 @@ __kprobes int nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
329 | unsigned int sum; | 331 | unsigned int sum; |
330 | int touched = 0; | 332 | int touched = 0; |
331 | int cpu = smp_processor_id(); | 333 | int cpu = smp_processor_id(); |
332 | int rc=0; | 334 | int rc = 0; |
333 | 335 | ||
334 | /* check for other users first */ | 336 | /* check for other users first */ |
335 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) | 337 | if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) |
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi_64.c index 53faef632fc6..fb99484d21cf 100644 --- a/arch/x86/kernel/nmi_64.c +++ b/arch/x86/kernel/nmi_64.c | |||
@@ -78,22 +78,22 @@ static __init void nmi_cpu_busy(void *data) | |||
78 | } | 78 | } |
79 | #endif | 79 | #endif |
80 | 80 | ||
81 | int __init check_nmi_watchdog (void) | 81 | int __init check_nmi_watchdog(void) |
82 | { | 82 | { |
83 | int *counts; | 83 | int *prev_nmi_count; |
84 | int cpu; | 84 | int cpu; |
85 | 85 | ||
86 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) | 86 | if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) |
87 | return 0; | 87 | return 0; |
88 | 88 | ||
89 | if (!atomic_read(&nmi_active)) | 89 | if (!atomic_read(&nmi_active)) |
90 | return 0; | 90 | return 0; |
91 | 91 | ||
92 | counts = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); | 92 | prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); |
93 | if (!counts) | 93 | if (!prev_nmi_count) |
94 | return -1; | 94 | return -1; |
95 | 95 | ||
96 | printk(KERN_INFO "testing NMI watchdog ... "); | 96 | printk(KERN_INFO "Testing NMI watchdog ... "); |
97 | 97 | ||
98 | #ifdef CONFIG_SMP | 98 | #ifdef CONFIG_SMP |
99 | if (nmi_watchdog == NMI_LOCAL_APIC) | 99 | if (nmi_watchdog == NMI_LOCAL_APIC) |
@@ -101,30 +101,29 @@ int __init check_nmi_watchdog (void) | |||
101 | #endif | 101 | #endif |
102 | 102 | ||
103 | for (cpu = 0; cpu < NR_CPUS; cpu++) | 103 | for (cpu = 0; cpu < NR_CPUS; cpu++) |
104 | counts[cpu] = cpu_pda(cpu)->__nmi_count; | 104 | prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count; |
105 | local_irq_enable(); | 105 | local_irq_enable(); |
106 | mdelay((20*1000)/nmi_hz); // wait 20 ticks | 106 | mdelay((20*1000)/nmi_hz); // wait 20 ticks |
107 | 107 | ||
108 | for_each_online_cpu(cpu) { | 108 | for_each_online_cpu(cpu) { |
109 | if (!per_cpu(wd_enabled, cpu)) | 109 | if (!per_cpu(wd_enabled, cpu)) |
110 | continue; | 110 | continue; |
111 | if (cpu_pda(cpu)->__nmi_count - counts[cpu] <= 5) { | 111 | if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) { |
112 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " | 112 | printk(KERN_WARNING "WARNING: CPU#%d: NMI " |
113 | "appears to be stuck (%d->%d)!\n", | 113 | "appears to be stuck (%d->%d)!\n", |
114 | cpu, | 114 | cpu, |
115 | counts[cpu], | 115 | prev_nmi_count[cpu], |
116 | cpu_pda(cpu)->__nmi_count); | 116 | cpu_pda(cpu)->__nmi_count); |
117 | per_cpu(wd_enabled, cpu) = 0; | 117 | per_cpu(wd_enabled, cpu) = 0; |
118 | atomic_dec(&nmi_active); | 118 | atomic_dec(&nmi_active); |
119 | } | 119 | } |
120 | } | 120 | } |
121 | endflag = 1; | ||
121 | if (!atomic_read(&nmi_active)) { | 122 | if (!atomic_read(&nmi_active)) { |
122 | kfree(counts); | 123 | kfree(prev_nmi_count); |
123 | atomic_set(&nmi_active, -1); | 124 | atomic_set(&nmi_active, -1); |
124 | endflag = 1; | ||
125 | return -1; | 125 | return -1; |
126 | } | 126 | } |
127 | endflag = 1; | ||
128 | printk("OK.\n"); | 127 | printk("OK.\n"); |
129 | 128 | ||
130 | /* now that we know it works we can reduce NMI frequency to | 129 | /* now that we know it works we can reduce NMI frequency to |
@@ -132,7 +131,7 @@ int __init check_nmi_watchdog (void) | |||
132 | if (nmi_watchdog == NMI_LOCAL_APIC) | 131 | if (nmi_watchdog == NMI_LOCAL_APIC) |
133 | nmi_hz = lapic_adjust_nmi_hz(1); | 132 | nmi_hz = lapic_adjust_nmi_hz(1); |
134 | 133 | ||
135 | kfree(counts); | 134 | kfree(prev_nmi_count); |
136 | return 0; | 135 | return 0; |
137 | } | 136 | } |
138 | 137 | ||
@@ -159,34 +158,6 @@ static int __init setup_nmi_watchdog(char *str) | |||
159 | 158 | ||
160 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 159 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
161 | 160 | ||
162 | |||
163 | static void __acpi_nmi_disable(void *__unused) | ||
164 | { | ||
165 | apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Disable timer based NMIs on all CPUs: | ||
170 | */ | ||
171 | void acpi_nmi_disable(void) | ||
172 | { | ||
173 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
174 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | ||
175 | } | ||
176 | |||
177 | static void __acpi_nmi_enable(void *__unused) | ||
178 | { | ||
179 | apic_write(APIC_LVT0, APIC_DM_NMI); | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * Enable timer based NMIs on all CPUs: | ||
184 | */ | ||
185 | void acpi_nmi_enable(void) | ||
186 | { | ||
187 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
188 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | ||
189 | } | ||
190 | #ifdef CONFIG_PM | 161 | #ifdef CONFIG_PM |
191 | 162 | ||
192 | static int nmi_pm_active; /* nmi_active before suspend */ | 163 | static int nmi_pm_active; /* nmi_active before suspend */ |
@@ -217,7 +188,7 @@ static struct sysdev_class nmi_sysclass = { | |||
217 | }; | 188 | }; |
218 | 189 | ||
219 | static struct sys_device device_lapic_nmi = { | 190 | static struct sys_device device_lapic_nmi = { |
220 | .id = 0, | 191 | .id = 0, |
221 | .cls = &nmi_sysclass, | 192 | .cls = &nmi_sysclass, |
222 | }; | 193 | }; |
223 | 194 | ||
@@ -231,7 +202,7 @@ static int __init init_lapic_nmi_sysfs(void) | |||
231 | if (nmi_watchdog != NMI_LOCAL_APIC) | 202 | if (nmi_watchdog != NMI_LOCAL_APIC) |
232 | return 0; | 203 | return 0; |
233 | 204 | ||
234 | if ( atomic_read(&nmi_active) < 0 ) | 205 | if (atomic_read(&nmi_active) < 0) |
235 | return 0; | 206 | return 0; |
236 | 207 | ||
237 | error = sysdev_class_register(&nmi_sysclass); | 208 | error = sysdev_class_register(&nmi_sysclass); |
@@ -244,9 +215,37 @@ late_initcall(init_lapic_nmi_sysfs); | |||
244 | 215 | ||
245 | #endif /* CONFIG_PM */ | 216 | #endif /* CONFIG_PM */ |
246 | 217 | ||
218 | static void __acpi_nmi_enable(void *__unused) | ||
219 | { | ||
220 | apic_write(APIC_LVT0, APIC_DM_NMI); | ||
221 | } | ||
222 | |||
223 | /* | ||
224 | * Enable timer based NMIs on all CPUs: | ||
225 | */ | ||
226 | void acpi_nmi_enable(void) | ||
227 | { | ||
228 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
229 | on_each_cpu(__acpi_nmi_enable, NULL, 0, 1); | ||
230 | } | ||
231 | |||
232 | static void __acpi_nmi_disable(void *__unused) | ||
233 | { | ||
234 | apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Disable timer based NMIs on all CPUs: | ||
239 | */ | ||
240 | void acpi_nmi_disable(void) | ||
241 | { | ||
242 | if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC) | ||
243 | on_each_cpu(__acpi_nmi_disable, NULL, 0, 1); | ||
244 | } | ||
245 | |||
247 | void setup_apic_nmi_watchdog(void *unused) | 246 | void setup_apic_nmi_watchdog(void *unused) |
248 | { | 247 | { |
249 | if (__get_cpu_var(wd_enabled) == 1) | 248 | if (__get_cpu_var(wd_enabled)) |
250 | return; | 249 | return; |
251 | 250 | ||
252 | /* cheap hack to support suspend/resume */ | 251 | /* cheap hack to support suspend/resume */ |
@@ -311,8 +310,9 @@ void touch_nmi_watchdog(void) | |||
311 | } | 310 | } |
312 | } | 311 | } |
313 | 312 | ||
314 | touch_softlockup_watchdog(); | 313 | touch_softlockup_watchdog(); |
315 | } | 314 | } |
315 | EXPORT_SYMBOL(touch_nmi_watchdog); | ||
316 | 316 | ||
317 | int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | 317 | int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) |
318 | { | 318 | { |
@@ -479,4 +479,3 @@ void __trigger_all_cpu_backtrace(void) | |||
479 | 479 | ||
480 | EXPORT_SYMBOL(nmi_active); | 480 | EXPORT_SYMBOL(nmi_active); |
481 | EXPORT_SYMBOL(nmi_watchdog); | 481 | EXPORT_SYMBOL(nmi_watchdog); |
482 | EXPORT_SYMBOL(touch_nmi_watchdog); | ||