aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/nmi.c (renamed from arch/x86/kernel/nmi_64.c)207
-rw-r--r--arch/x86/kernel/nmi_32.c476
-rw-r--r--arch/x86/kernel/traps_32.c8
-rw-r--r--arch/x86/kernel/traps_64.c28
-rw-r--r--include/asm-x86/nmi.h6
7 files changed, 166 insertions, 565 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index d1d4ee895270..8ef4dbf32cf9 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -52,7 +52,7 @@ obj-$(CONFIG_X86_32_SMP) += smpcommon.o
52obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o 52obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o
53obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 53obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
54obj-$(CONFIG_X86_MPPARSE) += mpparse.o 54obj-$(CONFIG_X86_MPPARSE) += mpparse.o
55obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o 55obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o
56obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o 56obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o
57obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 57obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
58obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 58obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index f9ae93adffe5..ddda4b64f545 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -181,7 +181,9 @@ void disable_lapic_nmi_watchdog(void)
181 return; 181 return;
182 182
183 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); 183 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
184 wd_ops->unreserve(); 184
185 if (wd_ops)
186 wd_ops->unreserve();
185 187
186 BUG_ON(atomic_read(&nmi_active) != 0); 188 BUG_ON(atomic_read(&nmi_active) != 0);
187} 189}
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi.c
index d62f3b66b529..9ebf71323c9a 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi.c
@@ -6,10 +6,13 @@
6 * Fixes: 6 * Fixes:
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. 7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog. 8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
9 * Pavel Machek and 10 * Pavel Machek and
10 * Mikael Pettersson : PM converted to driver model. Disable/enable API. 11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
11 */ 12 */
12 13
14#include <asm/apic.h>
15
13#include <linux/nmi.h> 16#include <linux/nmi.h>
14#include <linux/mm.h> 17#include <linux/mm.h>
15#include <linux/delay.h> 18#include <linux/delay.h>
@@ -17,22 +20,26 @@
17#include <linux/module.h> 20#include <linux/module.h>
18#include <linux/sysdev.h> 21#include <linux/sysdev.h>
19#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/percpu.h>
20#include <linux/kprobes.h> 24#include <linux/kprobes.h>
21#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <linux/kernel_stat.h>
22#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/smp.h>
23 29
24#include <asm/i8259.h> 30#include <asm/i8259.h>
25#include <asm/io_apic.h> 31#include <asm/io_apic.h>
26#include <asm/smp.h> 32#include <asm/smp.h>
27#include <asm/nmi.h> 33#include <asm/nmi.h>
28#include <asm/proto.h> 34#include <asm/proto.h>
35#include <asm/timer.h>
36
29#include <asm/mce.h> 37#include <asm/mce.h>
30 38
31#include <mach_traps.h> 39#include <mach_traps.h>
32 40
33int unknown_nmi_panic; 41int unknown_nmi_panic;
34int nmi_watchdog_enabled; 42int nmi_watchdog_enabled;
35int panic_on_unrecovered_nmi;
36 43
37static cpumask_t backtrace_mask = CPU_MASK_NONE; 44static cpumask_t backtrace_mask = CPU_MASK_NONE;
38 45
@@ -43,12 +50,47 @@ static cpumask_t backtrace_mask = CPU_MASK_NONE;
43 * 0: the lapic NMI watchdog is disabled, but can be enabled 50 * 0: the lapic NMI watchdog is disabled, but can be enabled
44 */ 51 */
45atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ 52atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
46static int panic_on_timeout; 53EXPORT_SYMBOL(nmi_active);
47 54
48unsigned int nmi_watchdog = NMI_DEFAULT; 55unsigned int nmi_watchdog = NMI_DEFAULT;
49static unsigned int nmi_hz = HZ; 56EXPORT_SYMBOL(nmi_watchdog);
57
58static int panic_on_timeout;
50 59
60static unsigned int nmi_hz = HZ;
51static DEFINE_PER_CPU(short, wd_enabled); 61static DEFINE_PER_CPU(short, wd_enabled);
62static int endflag __initdata;
63
64static inline unsigned int get_nmi_count(int cpu)
65{
66#ifdef CONFIG_X86_64
67 return cpu_pda(cpu)->__nmi_count;
68#else
69 return nmi_count(cpu);
70#endif
71}
72
73static inline int mce_in_progress(void)
74{
75#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
76 return atomic_read(&mce_entry) > 0;
77#endif
78 return 0;
79}
80
81/*
82 * Take the local apic timer and PIT/HPET into account. We don't
83 * know which one is active, when we have highres/dyntick on
84 */
85static inline unsigned int get_timer_irqs(int cpu)
86{
87#ifdef CONFIG_X86_64
88 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
89#else
90 return per_cpu(irq_stat, cpu).apic_timer_irqs +
91 per_cpu(irq_stat, cpu).irq0_irqs;
92#endif
93}
52 94
53/* Run after command line and cpu_init init, but before all other checks */ 95/* Run after command line and cpu_init init, but before all other checks */
54void nmi_watchdog_default(void) 96void nmi_watchdog_default(void)
@@ -58,22 +100,23 @@ void nmi_watchdog_default(void)
58 nmi_watchdog = NMI_NONE; 100 nmi_watchdog = NMI_NONE;
59} 101}
60 102
61static int endflag __initdata = 0;
62
63#ifdef CONFIG_SMP 103#ifdef CONFIG_SMP
64/* The performance counters used by NMI_LOCAL_APIC don't trigger when 104/*
105 * The performance counters used by NMI_LOCAL_APIC don't trigger when
65 * the CPU is idle. To make sure the NMI watchdog really ticks on all 106 * the CPU is idle. To make sure the NMI watchdog really ticks on all
66 * CPUs during the test make them busy. 107 * CPUs during the test make them busy.
67 */ 108 */
68static __init void nmi_cpu_busy(void *data) 109static __init void nmi_cpu_busy(void *data)
69{ 110{
70 local_irq_enable_in_hardirq(); 111 local_irq_enable_in_hardirq();
71 /* Intentionally don't use cpu_relax here. This is 112 /*
72 to make sure that the performance counter really ticks, 113 * Intentionally don't use cpu_relax here. This is
73 even if there is a simulator or similar that catches the 114 * to make sure that the performance counter really ticks,
74 pause instruction. On a real HT machine this is fine because 115 * even if there is a simulator or similar that catches the
75 all other CPUs are busy with "useless" delay loops and don't 116 * pause instruction. On a real HT machine this is fine because
76 care if they get somewhat less cycles. */ 117 * all other CPUs are busy with "useless" delay loops and don't
118 * care if they get somewhat less cycles.
119 */
77 while (endflag == 0) 120 while (endflag == 0)
78 mb(); 121 mb();
79} 122}
@@ -81,10 +124,10 @@ static __init void nmi_cpu_busy(void *data)
81 124
82int __init check_nmi_watchdog(void) 125int __init check_nmi_watchdog(void)
83{ 126{
84 int *prev_nmi_count; 127 unsigned int *prev_nmi_count;
85 int cpu; 128 int cpu;
86 129
87 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) 130 if (nmi_watchdog == NMI_NONE || nmi_watchdog == NMI_DISABLED)
88 return 0; 131 return 0;
89 132
90 if (!atomic_read(&nmi_active)) 133 if (!atomic_read(&nmi_active))
@@ -101,20 +144,20 @@ int __init check_nmi_watchdog(void)
101 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 144 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
102#endif 145#endif
103 146
104 for (cpu = 0; cpu < nr_cpu_ids; cpu++) 147 for_each_possible_cpu(cpu)
105 prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count; 148 prev_nmi_count[cpu] = get_nmi_count(cpu);
106 local_irq_enable(); 149 local_irq_enable();
107 mdelay((20*1000)/nmi_hz); // wait 20 ticks 150 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
108 151
109 for_each_online_cpu(cpu) { 152 for_each_online_cpu(cpu) {
110 if (!per_cpu(wd_enabled, cpu)) 153 if (!per_cpu(wd_enabled, cpu))
111 continue; 154 continue;
112 if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) { 155 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
113 printk(KERN_WARNING "WARNING: CPU#%d: NMI " 156 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
114 "appears to be stuck (%d->%d)!\n", 157 "appears to be stuck (%d->%d)!\n",
115 cpu, 158 cpu,
116 prev_nmi_count[cpu], 159 prev_nmi_count[cpu],
117 cpu_pda(cpu)->__nmi_count); 160 get_nmi_count(cpu));
118 per_cpu(wd_enabled, cpu) = 0; 161 per_cpu(wd_enabled, cpu) = 0;
119 atomic_dec(&nmi_active); 162 atomic_dec(&nmi_active);
120 } 163 }
@@ -127,8 +170,10 @@ int __init check_nmi_watchdog(void)
127 } 170 }
128 printk("OK.\n"); 171 printk("OK.\n");
129 172
130 /* now that we know it works we can reduce NMI frequency to 173 /*
131 something more reasonable; makes a difference in some configs */ 174 * now that we know it works we can reduce NMI frequency to
175 * something more reasonable; makes a difference in some configs
176 */
132 if (nmi_watchdog == NMI_LOCAL_APIC) 177 if (nmi_watchdog == NMI_LOCAL_APIC)
133 nmi_hz = lapic_adjust_nmi_hz(1); 178 nmi_hz = lapic_adjust_nmi_hz(1);
134 179
@@ -137,7 +182,6 @@ int __init check_nmi_watchdog(void)
137error: 182error:
138 if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259) 183 if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259)
139 disable_8259A_irq(0); 184 disable_8259A_irq(0);
140
141 return -1; 185 return -1;
142} 186}
143 187
@@ -145,7 +189,7 @@ static int __init setup_nmi_watchdog(char *str)
145{ 189{
146 int nmi; 190 int nmi;
147 191
148 if (!strncmp(str,"panic",5)) { 192 if (!strncmp(str, "panic", 5)) {
149 panic_on_timeout = 1; 193 panic_on_timeout = 1;
150 str = strchr(str, ','); 194 str = strchr(str, ',');
151 if (!str) 195 if (!str)
@@ -155,15 +199,17 @@ static int __init setup_nmi_watchdog(char *str)
155 199
156 get_option(&str, &nmi); 200 get_option(&str, &nmi);
157 201
158 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE)) 202 if (nmi >= NMI_INVALID || nmi < NMI_NONE)
159 return 0; 203 return 0;
160 204
161 nmi_watchdog = nmi; 205 nmi_watchdog = nmi;
162 return 1; 206 return 1;
163} 207}
164
165__setup("nmi_watchdog=", setup_nmi_watchdog); 208__setup("nmi_watchdog=", setup_nmi_watchdog);
166 209
210/*
211 * Suspend/resume support
212 */
167#ifdef CONFIG_PM 213#ifdef CONFIG_PM
168 214
169static int nmi_pm_active; /* nmi_active before suspend */ 215static int nmi_pm_active; /* nmi_active before suspend */
@@ -202,7 +248,8 @@ static int __init init_lapic_nmi_sysfs(void)
202{ 248{
203 int error; 249 int error;
204 250
205 /* should really be a BUG_ON but b/c this is an 251 /*
252 * should really be a BUG_ON but b/c this is an
206 * init call, it just doesn't work. -dcz 253 * init call, it just doesn't work. -dcz
207 */ 254 */
208 if (nmi_watchdog != NMI_LOCAL_APIC) 255 if (nmi_watchdog != NMI_LOCAL_APIC)
@@ -216,6 +263,7 @@ static int __init init_lapic_nmi_sysfs(void)
216 error = sysdev_register(&device_lapic_nmi); 263 error = sysdev_register(&device_lapic_nmi);
217 return error; 264 return error;
218} 265}
266
219/* must come after the local APIC's device_initcall() */ 267/* must come after the local APIC's device_initcall() */
220late_initcall(init_lapic_nmi_sysfs); 268late_initcall(init_lapic_nmi_sysfs);
221 269
@@ -223,7 +271,7 @@ late_initcall(init_lapic_nmi_sysfs);
223 271
224static void __acpi_nmi_enable(void *__unused) 272static void __acpi_nmi_enable(void *__unused)
225{ 273{
226 apic_write(APIC_LVT0, APIC_DM_NMI); 274 apic_write_around(APIC_LVT0, APIC_DM_NMI);
227} 275}
228 276
229/* 277/*
@@ -237,7 +285,7 @@ void acpi_nmi_enable(void)
237 285
238static void __acpi_nmi_disable(void *__unused) 286static void __acpi_nmi_disable(void *__unused)
239{ 287{
240 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); 288 apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
241} 289}
242 290
243/* 291/*
@@ -256,11 +304,12 @@ void setup_apic_nmi_watchdog(void *unused)
256 304
257 /* cheap hack to support suspend/resume */ 305 /* cheap hack to support suspend/resume */
258 /* if cpu0 is not active neither should the other cpus */ 306 /* if cpu0 is not active neither should the other cpus */
259 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) 307 if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
260 return; 308 return;
261 309
262 switch (nmi_watchdog) { 310 switch (nmi_watchdog) {
263 case NMI_LOCAL_APIC: 311 case NMI_LOCAL_APIC:
312 /* enable it before to avoid race with handler */
264 __get_cpu_var(wd_enabled) = 1; 313 __get_cpu_var(wd_enabled) = 1;
265 if (lapic_watchdog_init(nmi_hz) < 0) { 314 if (lapic_watchdog_init(nmi_hz) < 0) {
266 __get_cpu_var(wd_enabled) = 0; 315 __get_cpu_var(wd_enabled) = 0;
@@ -276,9 +325,9 @@ void setup_apic_nmi_watchdog(void *unused)
276void stop_apic_nmi_watchdog(void *unused) 325void stop_apic_nmi_watchdog(void *unused)
277{ 326{
278 /* only support LOCAL and IO APICs for now */ 327 /* only support LOCAL and IO APICs for now */
279 if ((nmi_watchdog != NMI_LOCAL_APIC) && 328 if (nmi_watchdog != NMI_LOCAL_APIC &&
280 (nmi_watchdog != NMI_IO_APIC)) 329 nmi_watchdog != NMI_IO_APIC)
281 return; 330 return;
282 if (__get_cpu_var(wd_enabled) == 0) 331 if (__get_cpu_var(wd_enabled) == 0)
283 return; 332 return;
284 if (nmi_watchdog == NMI_LOCAL_APIC) 333 if (nmi_watchdog == NMI_LOCAL_APIC)
@@ -294,6 +343,11 @@ void stop_apic_nmi_watchdog(void *unused)
294 * 343 *
295 * as these watchdog NMI IRQs are generated on every CPU, we only 344 * as these watchdog NMI IRQs are generated on every CPU, we only
296 * have to check the current processor. 345 * have to check the current processor.
346 *
347 * since NMIs don't listen to _any_ locks, we have to be extremely
348 * careful not to rely on unsafe variables. The printk might lock
349 * up though, so we have to break up any console locks first ...
350 * [when there will be more tty-related locks, break them up here too!]
297 */ 351 */
298 352
299static DEFINE_PER_CPU(unsigned, last_irq_sum); 353static DEFINE_PER_CPU(unsigned, last_irq_sum);
@@ -302,11 +356,12 @@ static DEFINE_PER_CPU(int, nmi_touch);
302 356
303void touch_nmi_watchdog(void) 357void touch_nmi_watchdog(void)
304{ 358{
305 if (nmi_watchdog > 0) { 359 if (nmi_watchdog == NMI_LOCAL_APIC ||
360 nmi_watchdog == NMI_IO_APIC) {
306 unsigned cpu; 361 unsigned cpu;
307 362
308 /* 363 /*
309 * Tell other CPUs to reset their alert counters. We cannot 364 * Tell other CPUs to reset their alert counters. We cannot
310 * do it ourselves because the alert count increase is not 365 * do it ourselves because the alert count increase is not
311 * atomic. 366 * atomic.
312 */ 367 */
@@ -316,6 +371,9 @@ void touch_nmi_watchdog(void)
316 } 371 }
317 } 372 }
318 373
374 /*
375 * Tickle the softlockup detector too:
376 */
319 touch_softlockup_watchdog(); 377 touch_softlockup_watchdog();
320} 378}
321EXPORT_SYMBOL(touch_nmi_watchdog); 379EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -323,7 +381,12 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
323notrace __kprobes int 381notrace __kprobes int
324nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) 382nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
325{ 383{
326 int sum; 384 /*
385 * Since current_thread_info()-> is always on the stack, and we
386 * always switch the stack NMI-atomically, it's safe to use
387 * smp_processor_id().
388 */
389 unsigned int sum;
327 int touched = 0; 390 int touched = 0;
328 int cpu = smp_processor_id(); 391 int cpu = smp_processor_id();
329 int rc = 0; 392 int rc = 0;
@@ -335,7 +398,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
335 touched = 1; 398 touched = 1;
336 } 399 }
337 400
338 sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs); 401 sum = get_timer_irqs(cpu);
402
339 if (__get_cpu_var(nmi_touch)) { 403 if (__get_cpu_var(nmi_touch)) {
340 __get_cpu_var(nmi_touch) = 0; 404 __get_cpu_var(nmi_touch) = 0;
341 touched = 1; 405 touched = 1;
@@ -345,28 +409,29 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
345 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 409 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
346 410
347 spin_lock(&lock); 411 spin_lock(&lock);
348 printk("NMI backtrace for cpu %d\n", cpu); 412 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
349 dump_stack(); 413 dump_stack();
350 spin_unlock(&lock); 414 spin_unlock(&lock);
351 cpu_clear(cpu, backtrace_mask); 415 cpu_clear(cpu, backtrace_mask);
352 } 416 }
353 417
354#ifdef CONFIG_X86_MCE 418 /* Could check oops_in_progress here too, but it's safer not to */
355 /* Could check oops_in_progress here too, but it's safer 419 if (mce_in_progress())
356 not too */
357 if (atomic_read(&mce_entry) > 0)
358 touched = 1; 420 touched = 1;
359#endif 421
360 /* if the apic timer isn't firing, this cpu isn't doing much */ 422 /* if the none of the timers isn't firing, this cpu isn't doing much */
361 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 423 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
362 /* 424 /*
363 * Ayiee, looks like this CPU is stuck ... 425 * Ayiee, looks like this CPU is stuck ...
364 * wait a few IRQs (5 seconds) before doing the oops ... 426 * wait a few IRQs (5 seconds) before doing the oops ...
365 */ 427 */
366 local_inc(&__get_cpu_var(alert_counter)); 428 local_inc(&__get_cpu_var(alert_counter));
367 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) 429 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
368 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs, 430 /*
369 panic_on_timeout); 431 * die_nmi will return ONLY if NOTIFY_STOP happens..
432 */
433 die_nmi("BUG: NMI Watchdog detected LOCKUP",
434 regs, panic_on_timeout);
370 } else { 435 } else {
371 __get_cpu_var(last_irq_sum) = sum; 436 __get_cpu_var(last_irq_sum) = sum;
372 local_set(&__get_cpu_var(alert_counter), 0); 437 local_set(&__get_cpu_var(alert_counter), 0);
@@ -380,7 +445,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
380 rc |= lapic_wd_event(nmi_hz); 445 rc |= lapic_wd_event(nmi_hz);
381 break; 446 break;
382 case NMI_IO_APIC: 447 case NMI_IO_APIC:
383 /* don't know how to accurately check for this. 448 /*
449 * don't know how to accurately check for this.
384 * just assume it was a watchdog timer interrupt 450 * just assume it was a watchdog timer interrupt
385 * This matches the old behaviour. 451 * This matches the old behaviour.
386 */ 452 */
@@ -390,30 +456,6 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
390 return rc; 456 return rc;
391} 457}
392 458
393static unsigned ignore_nmis;
394
395asmlinkage notrace __kprobes void
396do_nmi(struct pt_regs *regs, long error_code)
397{
398 nmi_enter();
399 add_pda(__nmi_count,1);
400 if (!ignore_nmis)
401 default_do_nmi(regs);
402 nmi_exit();
403}
404
405void stop_nmi(void)
406{
407 acpi_nmi_disable();
408 ignore_nmis++;
409}
410
411void restart_nmi(void)
412{
413 ignore_nmis--;
414 acpi_nmi_enable();
415}
416
417#ifdef CONFIG_SYSCTL 459#ifdef CONFIG_SYSCTL
418 460
419static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) 461static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
@@ -422,7 +464,7 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
422 char buf[64]; 464 char buf[64];
423 465
424 sprintf(buf, "NMI received for unknown reason %02x\n", reason); 466 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
425 die_nmi(buf, regs, 1); /* Always panic here */ 467 die_nmi(buf, regs, 1); /* Always panic here */
426 return 0; 468 return 0;
427} 469}
428 470
@@ -441,27 +483,37 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
441 return 0; 483 return 0;
442 484
443 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) { 485 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
444 printk( KERN_WARNING "NMI watchdog is permanently disabled\n"); 486 printk(KERN_WARNING
487 "NMI watchdog is permanently disabled\n");
445 return -EIO; 488 return -EIO;
446 } 489 }
447 490
448 /* if nmi_watchdog is not set yet, then set it */ 491 /* if nmi_watchdog is not set yet, then set it */
449 nmi_watchdog_default(); 492 nmi_watchdog_default();
450 493
494#ifdef CONFIG_X86_32
495 if (nmi_watchdog == NMI_NONE) {
496 if (lapic_watchdog_ok())
497 nmi_watchdog = NMI_LOCAL_APIC;
498 else
499 nmi_watchdog = NMI_IO_APIC;
500 }
501#endif
502
451 if (nmi_watchdog == NMI_LOCAL_APIC) { 503 if (nmi_watchdog == NMI_LOCAL_APIC) {
452 if (nmi_watchdog_enabled) 504 if (nmi_watchdog_enabled)
453 enable_lapic_nmi_watchdog(); 505 enable_lapic_nmi_watchdog();
454 else 506 else
455 disable_lapic_nmi_watchdog(); 507 disable_lapic_nmi_watchdog();
456 } else { 508 } else {
457 printk( KERN_WARNING 509 printk(KERN_WARNING
458 "NMI watchdog doesn't know what hardware to touch\n"); 510 "NMI watchdog doesn't know what hardware to touch\n");
459 return -EIO; 511 return -EIO;
460 } 512 }
461 return 0; 513 return 0;
462} 514}
463 515
464#endif 516#endif /* CONFIG_SYSCTL */
465 517
466int do_nmi_callback(struct pt_regs *regs, int cpu) 518int do_nmi_callback(struct pt_regs *regs, int cpu)
467{ 519{
@@ -484,6 +536,3 @@ void __trigger_all_cpu_backtrace(void)
484 mdelay(1); 536 mdelay(1);
485 } 537 }
486} 538}
487
488EXPORT_SYMBOL(nmi_active);
489EXPORT_SYMBOL(nmi_watchdog);
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
deleted file mode 100644
index 6580dae46277..000000000000
--- a/arch/x86/kernel/nmi_32.c
+++ /dev/null
@@ -1,476 +0,0 @@
1/*
2 * NMI watchdog support on APIC systems
3 *
4 * Started by Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes:
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
10 * Pavel Machek and
11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
12 */
13
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/module.h>
17#include <linux/nmi.h>
18#include <linux/sysdev.h>
19#include <linux/sysctl.h>
20#include <linux/percpu.h>
21#include <linux/kprobes.h>
22#include <linux/cpumask.h>
23#include <linux/kernel_stat.h>
24#include <linux/kdebug.h>
25#include <linux/slab.h>
26
27#include <asm/i8259.h>
28#include <asm/io_apic.h>
29#include <asm/smp.h>
30#include <asm/nmi.h>
31#include <asm/timer.h>
32
33#include "mach_traps.h"
34
35int unknown_nmi_panic;
36int nmi_watchdog_enabled;
37
38static cpumask_t backtrace_mask = CPU_MASK_NONE;
39
40/* nmi_active:
41 * >0: the lapic NMI watchdog is active, but can be disabled
42 * <0: the lapic NMI watchdog has not been set up, and cannot
43 * be enabled
44 * 0: the lapic NMI watchdog is disabled, but can be enabled
45 */
46atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
47
48unsigned int nmi_watchdog = NMI_DEFAULT;
49static unsigned int nmi_hz = HZ;
50
51static DEFINE_PER_CPU(short, wd_enabled);
52
53static int endflag __initdata = 0;
54
55#ifdef CONFIG_SMP
56/* The performance counters used by NMI_LOCAL_APIC don't trigger when
57 * the CPU is idle. To make sure the NMI watchdog really ticks on all
58 * CPUs during the test make them busy.
59 */
60static __init void nmi_cpu_busy(void *data)
61{
62 local_irq_enable_in_hardirq();
63 /* Intentionally don't use cpu_relax here. This is
64 to make sure that the performance counter really ticks,
65 even if there is a simulator or similar that catches the
66 pause instruction. On a real HT machine this is fine because
67 all other CPUs are busy with "useless" delay loops and don't
68 care if they get somewhat less cycles. */
69 while (endflag == 0)
70 mb();
71}
72#endif
73
74int __init check_nmi_watchdog(void)
75{
76 unsigned int *prev_nmi_count;
77 int cpu;
78
79 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
80 return 0;
81
82 if (!atomic_read(&nmi_active))
83 return 0;
84
85 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
86 if (!prev_nmi_count)
87 goto error;
88
89 printk(KERN_INFO "Testing NMI watchdog ... ");
90
91#ifdef CONFIG_SMP
92 if (nmi_watchdog == NMI_LOCAL_APIC)
93 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
94#endif
95
96 for_each_possible_cpu(cpu)
97 prev_nmi_count[cpu] = nmi_count(cpu);
98 local_irq_enable();
99 mdelay((20*1000)/nmi_hz); // wait 20 ticks
100
101 for_each_possible_cpu(cpu) {
102#ifdef CONFIG_SMP
103 /* Check cpu_callin_map here because that is set
104 after the timer is started. */
105 if (!cpu_isset(cpu, cpu_callin_map))
106 continue;
107#endif
108 if (!per_cpu(wd_enabled, cpu))
109 continue;
110 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
111 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
112 "appears to be stuck (%d->%d)!\n",
113 cpu,
114 prev_nmi_count[cpu],
115 nmi_count(cpu));
116 per_cpu(wd_enabled, cpu) = 0;
117 atomic_dec(&nmi_active);
118 }
119 }
120 endflag = 1;
121 if (!atomic_read(&nmi_active)) {
122 kfree(prev_nmi_count);
123 atomic_set(&nmi_active, -1);
124 goto error;
125 }
126 printk("OK.\n");
127
128 /* now that we know it works we can reduce NMI frequency to
129 something more reasonable; makes a difference in some configs */
130 if (nmi_watchdog == NMI_LOCAL_APIC)
131 nmi_hz = lapic_adjust_nmi_hz(1);
132
133 kfree(prev_nmi_count);
134 return 0;
135error:
136 if (nmi_watchdog == NMI_IO_APIC && !timer_through_8259)
137 disable_8259A_irq(0);
138 timer_ack = 0;
139
140 return -1;
141}
142
143static int __init setup_nmi_watchdog(char *str)
144{
145 int nmi;
146
147 get_option(&str, &nmi);
148
149 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
150 return 0;
151
152 nmi_watchdog = nmi;
153 return 1;
154}
155
156__setup("nmi_watchdog=", setup_nmi_watchdog);
157
158
159/* Suspend/resume support */
160
161#ifdef CONFIG_PM
162
163static int nmi_pm_active; /* nmi_active before suspend */
164
165static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
166{
167 /* only CPU0 goes here, other CPUs should be offline */
168 nmi_pm_active = atomic_read(&nmi_active);
169 stop_apic_nmi_watchdog(NULL);
170 BUG_ON(atomic_read(&nmi_active) != 0);
171 return 0;
172}
173
174static int lapic_nmi_resume(struct sys_device *dev)
175{
176 /* only CPU0 goes here, other CPUs should be offline */
177 if (nmi_pm_active > 0) {
178 setup_apic_nmi_watchdog(NULL);
179 touch_nmi_watchdog();
180 }
181 return 0;
182}
183
184
185static struct sysdev_class nmi_sysclass = {
186 .name = "lapic_nmi",
187 .resume = lapic_nmi_resume,
188 .suspend = lapic_nmi_suspend,
189};
190
191static struct sys_device device_lapic_nmi = {
192 .id = 0,
193 .cls = &nmi_sysclass,
194};
195
196static int __init init_lapic_nmi_sysfs(void)
197{
198 int error;
199
200 /* should really be a BUG_ON but b/c this is an
201 * init call, it just doesn't work. -dcz
202 */
203 if (nmi_watchdog != NMI_LOCAL_APIC)
204 return 0;
205
206 if (atomic_read(&nmi_active) < 0)
207 return 0;
208
209 error = sysdev_class_register(&nmi_sysclass);
210 if (!error)
211 error = sysdev_register(&device_lapic_nmi);
212 return error;
213}
214/* must come after the local APIC's device_initcall() */
215late_initcall(init_lapic_nmi_sysfs);
216
217#endif /* CONFIG_PM */
218
219static void __acpi_nmi_enable(void *__unused)
220{
221 apic_write_around(APIC_LVT0, APIC_DM_NMI);
222}
223
224/*
225 * Enable timer based NMIs on all CPUs:
226 */
227void acpi_nmi_enable(void)
228{
229 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
230 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
231}
232
233static void __acpi_nmi_disable(void *__unused)
234{
235 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
236}
237
238/*
239 * Disable timer based NMIs on all CPUs:
240 */
241void acpi_nmi_disable(void)
242{
243 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
244 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
245}
246
247void setup_apic_nmi_watchdog(void *unused)
248{
249 if (__get_cpu_var(wd_enabled))
250 return;
251
252 /* cheap hack to support suspend/resume */
253 /* if cpu0 is not active neither should the other cpus */
254 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
255 return;
256
257 switch (nmi_watchdog) {
258 case NMI_LOCAL_APIC:
259 __get_cpu_var(wd_enabled) = 1; /* enable it before to avoid race with handler */
260 if (lapic_watchdog_init(nmi_hz) < 0) {
261 __get_cpu_var(wd_enabled) = 0;
262 return;
263 }
264 /* FALL THROUGH */
265 case NMI_IO_APIC:
266 __get_cpu_var(wd_enabled) = 1;
267 atomic_inc(&nmi_active);
268 }
269}
270
271void stop_apic_nmi_watchdog(void *unused)
272{
273 /* only support LOCAL and IO APICs for now */
274 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
275 (nmi_watchdog != NMI_IO_APIC))
276 return;
277 if (__get_cpu_var(wd_enabled) == 0)
278 return;
279 if (nmi_watchdog == NMI_LOCAL_APIC)
280 lapic_watchdog_stop();
281 __get_cpu_var(wd_enabled) = 0;
282 atomic_dec(&nmi_active);
283}
284
285/*
286 * the best way to detect whether a CPU has a 'hard lockup' problem
287 * is to check it's local APIC timer IRQ counts. If they are not
288 * changing then that CPU has some problem.
289 *
290 * as these watchdog NMI IRQs are generated on every CPU, we only
291 * have to check the current processor.
292 *
293 * since NMIs don't listen to _any_ locks, we have to be extremely
294 * careful not to rely on unsafe variables. The printk might lock
295 * up though, so we have to break up any console locks first ...
296 * [when there will be more tty-related locks, break them up
297 * here too!]
298 */
299
300static unsigned int
301 last_irq_sums [NR_CPUS],
302 alert_counter [NR_CPUS];
303
304void touch_nmi_watchdog(void)
305{
306 if (nmi_watchdog > 0) {
307 unsigned cpu;
308
309 /*
310 * Just reset the alert counters, (other CPUs might be
311 * spinning on locks we hold):
312 */
313 for_each_present_cpu(cpu) {
314 if (alert_counter[cpu])
315 alert_counter[cpu] = 0;
316 }
317 }
318
319 /*
320 * Tickle the softlockup detector too:
321 */
322 touch_softlockup_watchdog();
323}
324EXPORT_SYMBOL(touch_nmi_watchdog);
325
326extern void die_nmi(struct pt_regs *, const char *msg);
327
328notrace __kprobes int
329nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
330{
331
332 /*
333 * Since current_thread_info()-> is always on the stack, and we
334 * always switch the stack NMI-atomically, it's safe to use
335 * smp_processor_id().
336 */
337 unsigned int sum;
338 int touched = 0;
339 int cpu = smp_processor_id();
340 int rc = 0;
341
342 /* check for other users first */
343 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
344 == NOTIFY_STOP) {
345 rc = 1;
346 touched = 1;
347 }
348
349 if (cpu_isset(cpu, backtrace_mask)) {
350 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
351
352 spin_lock(&lock);
353 printk("NMI backtrace for cpu %d\n", cpu);
354 dump_stack();
355 spin_unlock(&lock);
356 cpu_clear(cpu, backtrace_mask);
357 }
358
359 /*
360 * Take the local apic timer and PIT/HPET into account. We don't
361 * know which one is active, when we have highres/dyntick on
362 */
363 sum = per_cpu(irq_stat, cpu).apic_timer_irqs +
364 per_cpu(irq_stat, cpu).irq0_irqs;
365
366 /* if the none of the timers isn't firing, this cpu isn't doing much */
367 if (!touched && last_irq_sums[cpu] == sum) {
368 /*
369 * Ayiee, looks like this CPU is stuck ...
370 * wait a few IRQs (5 seconds) before doing the oops ...
371 */
372 alert_counter[cpu]++;
373 if (alert_counter[cpu] == 5*nmi_hz)
374 /*
375 * die_nmi will return ONLY if NOTIFY_STOP happens..
376 */
377 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
378 } else {
379 last_irq_sums[cpu] = sum;
380 alert_counter[cpu] = 0;
381 }
382 /* see if the nmi watchdog went off */
383 if (!__get_cpu_var(wd_enabled))
384 return rc;
385 switch (nmi_watchdog) {
386 case NMI_LOCAL_APIC:
387 rc |= lapic_wd_event(nmi_hz);
388 break;
389 case NMI_IO_APIC:
390 /* don't know how to accurately check for this.
391 * just assume it was a watchdog timer interrupt
392 * This matches the old behaviour.
393 */
394 rc = 1;
395 break;
396 }
397 return rc;
398}
399
400#ifdef CONFIG_SYSCTL
401
402static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
403{
404 unsigned char reason = get_nmi_reason();
405 char buf[64];
406
407 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
408 die_nmi(regs, buf);
409 return 0;
410}
411
412/*
413 * proc handler for /proc/sys/kernel/nmi
414 */
415int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
416 void __user *buffer, size_t *length, loff_t *ppos)
417{
418 int old_state;
419
420 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
421 old_state = nmi_watchdog_enabled;
422 proc_dointvec(table, write, file, buffer, length, ppos);
423 if (!!old_state == !!nmi_watchdog_enabled)
424 return 0;
425
426 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
427 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
428 return -EIO;
429 }
430
431 if (nmi_watchdog == NMI_DEFAULT) {
432 if (lapic_watchdog_ok())
433 nmi_watchdog = NMI_LOCAL_APIC;
434 else
435 nmi_watchdog = NMI_IO_APIC;
436 }
437
438 if (nmi_watchdog == NMI_LOCAL_APIC) {
439 if (nmi_watchdog_enabled)
440 enable_lapic_nmi_watchdog();
441 else
442 disable_lapic_nmi_watchdog();
443 } else {
444 printk( KERN_WARNING
445 "NMI watchdog doesn't know what hardware to touch\n");
446 return -EIO;
447 }
448 return 0;
449}
450
451#endif
452
453int do_nmi_callback(struct pt_regs *regs, int cpu)
454{
455#ifdef CONFIG_SYSCTL
456 if (unknown_nmi_panic)
457 return unknown_nmi_panic_callback(regs, cpu);
458#endif
459 return 0;
460}
461
462void __trigger_all_cpu_backtrace(void)
463{
464 int i;
465
466 backtrace_mask = cpu_online_map;
467 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
468 for (i = 0; i < 10 * 1000; i++) {
469 if (cpus_empty(backtrace_mask))
470 break;
471 mdelay(1);
472 }
473}
474
475EXPORT_SYMBOL(nmi_active);
476EXPORT_SYMBOL(nmi_watchdog);
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 08d752de4eee..cf37d20b1ba7 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -756,9 +756,9 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
756 756
757static DEFINE_SPINLOCK(nmi_print_lock); 757static DEFINE_SPINLOCK(nmi_print_lock);
758 758
759void notrace __kprobes die_nmi(struct pt_regs *regs, const char *msg) 759void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
760{ 760{
761 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP) 761 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
762 return; 762 return;
763 763
764 spin_lock(&nmi_print_lock); 764 spin_lock(&nmi_print_lock);
@@ -767,10 +767,12 @@ void notrace __kprobes die_nmi(struct pt_regs *regs, const char *msg)
767 * to get a message out: 767 * to get a message out:
768 */ 768 */
769 bust_spinlocks(1); 769 bust_spinlocks(1);
770 printk(KERN_EMERG "%s", msg); 770 printk(KERN_EMERG "%s", str);
771 printk(" on CPU%d, ip %08lx, registers:\n", 771 printk(" on CPU%d, ip %08lx, registers:\n",
772 smp_processor_id(), regs->ip); 772 smp_processor_id(), regs->ip);
773 show_registers(regs); 773 show_registers(regs);
774 if (do_panic)
775 panic("Non maskable interrupt");
774 console_silent(); 776 console_silent();
775 spin_unlock(&nmi_print_lock); 777 spin_unlock(&nmi_print_lock);
776 bust_spinlocks(0); 778 bust_spinlocks(0);
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index ec6d3b2130c4..80ba6d37bfe0 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -75,7 +75,9 @@ asmlinkage void alignment_check(void);
75asmlinkage void machine_check(void); 75asmlinkage void machine_check(void);
76asmlinkage void spurious_interrupt_bug(void); 76asmlinkage void spurious_interrupt_bug(void);
77 77
78int panic_on_unrecovered_nmi;
78static unsigned int code_bytes = 64; 79static unsigned int code_bytes = 64;
80static unsigned ignore_nmis;
79 81
80static inline void conditional_sti(struct pt_regs *regs) 82static inline void conditional_sti(struct pt_regs *regs)
81{ 83{
@@ -613,7 +615,9 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic)
613 * We are in trouble anyway, lets at least try 615 * We are in trouble anyway, lets at least try
614 * to get a message out. 616 * to get a message out.
615 */ 617 */
616 printk(str, smp_processor_id()); 618 printk(KERN_EMERG "%s", str);
619 printk(" on CPU%d, ip %08lx, registers:\n",
620 smp_processor_id(), regs->ip);
617 show_registers(regs); 621 show_registers(regs);
618 if (kexec_should_crash(current)) 622 if (kexec_should_crash(current))
619 crash_kexec(regs); 623 crash_kexec(regs);
@@ -862,6 +866,28 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
862 io_check_error(reason, regs); 866 io_check_error(reason, regs);
863} 867}
864 868
869asmlinkage notrace __kprobes void
870do_nmi(struct pt_regs *regs, long error_code)
871{
872 nmi_enter();
873 add_pda(__nmi_count, 1);
874 if (!ignore_nmis)
875 default_do_nmi(regs);
876 nmi_exit();
877}
878
879void stop_nmi(void)
880{
881 acpi_nmi_disable();
882 ignore_nmis++;
883}
884
885void restart_nmi(void)
886{
887 ignore_nmis--;
888 acpi_nmi_enable();
889}
890
865/* runs on IST stack. */ 891/* runs on IST stack. */
866asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) 892asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
867{ 893{
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 05449ef830a7..f0e435dd38fb 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -17,12 +17,10 @@ int do_nmi_callback(struct pt_regs *regs, int cpu);
17 17
18#ifdef CONFIG_X86_64 18#ifdef CONFIG_X86_64
19extern void default_do_nmi(struct pt_regs *); 19extern void default_do_nmi(struct pt_regs *);
20extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
21extern void nmi_watchdog_default(void);
22#else
23#define nmi_watchdog_default() do {} while (0)
24#endif 20#endif
25 21
22extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
23extern void nmi_watchdog_default(void);
26extern int check_nmi_watchdog(void); 24extern int check_nmi_watchdog(void);
27extern int nmi_watchdog_enabled; 25extern int nmi_watchdog_enabled;
28extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 26extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);