aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c4
-rw-r--r--arch/x86/kernel/nmi.c (renamed from arch/x86/kernel/nmi_64.c)218
-rw-r--r--arch/x86/kernel/nmi_32.c467
-rw-r--r--arch/x86/kernel/traps_32.c8
-rw-r--r--arch/x86/kernel/traps_64.c28
-rw-r--r--include/asm-x86/nmi.h7
7 files changed, 174 insertions, 560 deletions
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 77807d4769c9..c82b4e1738a8 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -53,7 +53,7 @@ obj-$(CONFIG_X86_32_SMP) += smpcommon.o
53obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o 53obj-$(CONFIG_X86_64_SMP) += tsc_sync.o smpcommon.o
54obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o 54obj-$(CONFIG_X86_TRAMPOLINE) += trampoline_$(BITS).o
55obj-$(CONFIG_X86_MPPARSE) += mpparse.o 55obj-$(CONFIG_X86_MPPARSE) += mpparse.o
56obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi_$(BITS).o 56obj-$(CONFIG_X86_LOCAL_APIC) += apic_$(BITS).o nmi.o
57obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o 57obj-$(CONFIG_X86_IO_APIC) += io_apic_$(BITS).o
58obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o 58obj-$(CONFIG_X86_REBOOTFIXUPS) += reboot_fixups_32.o
59obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 59obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index f9ae93adffe5..ddda4b64f545 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -181,7 +181,9 @@ void disable_lapic_nmi_watchdog(void)
181 return; 181 return;
182 182
183 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1); 183 on_each_cpu(stop_apic_nmi_watchdog, NULL, 0, 1);
184 wd_ops->unreserve(); 184
185 if (wd_ops)
186 wd_ops->unreserve();
185 187
186 BUG_ON(atomic_read(&nmi_active) != 0); 188 BUG_ON(atomic_read(&nmi_active) != 0);
187} 189}
diff --git a/arch/x86/kernel/nmi_64.c b/arch/x86/kernel/nmi.c
index 5a29ded994fa..fd680c73ba77 100644
--- a/arch/x86/kernel/nmi_64.c
+++ b/arch/x86/kernel/nmi.c
@@ -6,10 +6,13 @@
6 * Fixes: 6 * Fixes:
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog. 7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog. 8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
9 * Pavel Machek and 10 * Pavel Machek and
10 * Mikael Pettersson : PM converted to driver model. Disable/enable API. 11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
11 */ 12 */
12 13
14#include <asm/apic.h>
15
13#include <linux/nmi.h> 16#include <linux/nmi.h>
14#include <linux/mm.h> 17#include <linux/mm.h>
15#include <linux/delay.h> 18#include <linux/delay.h>
@@ -17,20 +20,22 @@
17#include <linux/module.h> 20#include <linux/module.h>
18#include <linux/sysdev.h> 21#include <linux/sysdev.h>
19#include <linux/sysctl.h> 22#include <linux/sysctl.h>
23#include <linux/percpu.h>
20#include <linux/kprobes.h> 24#include <linux/kprobes.h>
21#include <linux/cpumask.h> 25#include <linux/cpumask.h>
26#include <linux/kernel_stat.h>
22#include <linux/kdebug.h> 27#include <linux/kdebug.h>
28#include <linux/smp.h>
23 29
24#include <asm/smp.h>
25#include <asm/nmi.h>
26#include <asm/proto.h> 30#include <asm/proto.h>
31#include <asm/timer.h>
32
27#include <asm/mce.h> 33#include <asm/mce.h>
28 34
29#include <mach_traps.h> 35#include <mach_traps.h>
30 36
31int unknown_nmi_panic; 37int unknown_nmi_panic;
32int nmi_watchdog_enabled; 38int nmi_watchdog_enabled;
33int panic_on_unrecovered_nmi;
34 39
35static cpumask_t backtrace_mask = CPU_MASK_NONE; 40static cpumask_t backtrace_mask = CPU_MASK_NONE;
36 41
@@ -41,12 +46,47 @@ static cpumask_t backtrace_mask = CPU_MASK_NONE;
41 * 0: the lapic NMI watchdog is disabled, but can be enabled 46 * 0: the lapic NMI watchdog is disabled, but can be enabled
42 */ 47 */
43atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */ 48atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
44static int panic_on_timeout; 49EXPORT_SYMBOL(nmi_active);
45 50
46unsigned int nmi_watchdog = NMI_DEFAULT; 51unsigned int nmi_watchdog = NMI_DEFAULT;
47static unsigned int nmi_hz = HZ; 52EXPORT_SYMBOL(nmi_watchdog);
53
54static int panic_on_timeout;
48 55
56static unsigned int nmi_hz = HZ;
49static DEFINE_PER_CPU(short, wd_enabled); 57static DEFINE_PER_CPU(short, wd_enabled);
58static int endflag __initdata;
59
60static inline unsigned int get_nmi_count(int cpu)
61{
62#ifdef CONFIG_X86_64
63 return cpu_pda(cpu)->__nmi_count;
64#else
65 return nmi_count(cpu);
66#endif
67}
68
69static inline int mce_in_progress(void)
70{
71#if defined(CONFIG_X86_64) && defined(CONFIG_X86_MCE)
72 return atomic_read(&mce_entry) > 0;
73#endif
74 return 0;
75}
76
77/*
78 * Take the local apic timer and PIT/HPET into account. We don't
79 * know which one is active, when we have highres/dyntick on
80 */
81static inline unsigned int get_timer_irqs(int cpu)
82{
83#ifdef CONFIG_X86_64
84 return read_pda(apic_timer_irqs) + read_pda(irq0_irqs);
85#else
86 return per_cpu(irq_stat, cpu).apic_timer_irqs +
87 per_cpu(irq_stat, cpu).irq0_irqs;
88#endif
89}
50 90
51/* Run after command line and cpu_init init, but before all other checks */ 91/* Run after command line and cpu_init init, but before all other checks */
52void nmi_watchdog_default(void) 92void nmi_watchdog_default(void)
@@ -56,22 +96,23 @@ void nmi_watchdog_default(void)
56 nmi_watchdog = NMI_NONE; 96 nmi_watchdog = NMI_NONE;
57} 97}
58 98
59static int endflag __initdata = 0;
60
61#ifdef CONFIG_SMP 99#ifdef CONFIG_SMP
62/* The performance counters used by NMI_LOCAL_APIC don't trigger when 100/*
101 * The performance counters used by NMI_LOCAL_APIC don't trigger when
63 * the CPU is idle. To make sure the NMI watchdog really ticks on all 102 * the CPU is idle. To make sure the NMI watchdog really ticks on all
64 * CPUs during the test make them busy. 103 * CPUs during the test make them busy.
65 */ 104 */
66static __init void nmi_cpu_busy(void *data) 105static __init void nmi_cpu_busy(void *data)
67{ 106{
68 local_irq_enable_in_hardirq(); 107 local_irq_enable_in_hardirq();
69 /* Intentionally don't use cpu_relax here. This is 108 /*
70 to make sure that the performance counter really ticks, 109 * Intentionally don't use cpu_relax here. This is
71 even if there is a simulator or similar that catches the 110 * to make sure that the performance counter really ticks,
72 pause instruction. On a real HT machine this is fine because 111 * even if there is a simulator or similar that catches the
73 all other CPUs are busy with "useless" delay loops and don't 112 * pause instruction. On a real HT machine this is fine because
74 care if they get somewhat less cycles. */ 113 * all other CPUs are busy with "useless" delay loops and don't
114 * care if they get somewhat less cycles.
115 */
75 while (endflag == 0) 116 while (endflag == 0)
76 mb(); 117 mb();
77} 118}
@@ -79,10 +120,10 @@ static __init void nmi_cpu_busy(void *data)
79 120
80int __init check_nmi_watchdog(void) 121int __init check_nmi_watchdog(void)
81{ 122{
82 int *prev_nmi_count; 123 unsigned int *prev_nmi_count;
83 int cpu; 124 int cpu;
84 125
85 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED)) 126 if (nmi_watchdog == NMI_NONE || nmi_watchdog == NMI_DISABLED)
86 return 0; 127 return 0;
87 128
88 if (!atomic_read(&nmi_active)) 129 if (!atomic_read(&nmi_active))
@@ -90,7 +131,7 @@ int __init check_nmi_watchdog(void)
90 131
91 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); 132 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
92 if (!prev_nmi_count) 133 if (!prev_nmi_count)
93 return -1; 134 goto error;
94 135
95 printk(KERN_INFO "Testing NMI watchdog ... "); 136 printk(KERN_INFO "Testing NMI watchdog ... ");
96 137
@@ -99,20 +140,20 @@ int __init check_nmi_watchdog(void)
99 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); 140 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
100#endif 141#endif
101 142
102 for (cpu = 0; cpu < NR_CPUS; cpu++) 143 for_each_possible_cpu(cpu)
103 prev_nmi_count[cpu] = cpu_pda(cpu)->__nmi_count; 144 prev_nmi_count[cpu] = get_nmi_count(cpu);
104 local_irq_enable(); 145 local_irq_enable();
105 mdelay((20*1000)/nmi_hz); // wait 20 ticks 146 mdelay((20 * 1000) / nmi_hz); /* wait 20 ticks */
106 147
107 for_each_online_cpu(cpu) { 148 for_each_online_cpu(cpu) {
108 if (!per_cpu(wd_enabled, cpu)) 149 if (!per_cpu(wd_enabled, cpu))
109 continue; 150 continue;
110 if (cpu_pda(cpu)->__nmi_count - prev_nmi_count[cpu] <= 5) { 151 if (get_nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
111 printk(KERN_WARNING "WARNING: CPU#%d: NMI " 152 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
112 "appears to be stuck (%d->%d)!\n", 153 "appears to be stuck (%d->%d)!\n",
113 cpu, 154 cpu,
114 prev_nmi_count[cpu], 155 prev_nmi_count[cpu],
115 cpu_pda(cpu)->__nmi_count); 156 get_nmi_count(cpu));
116 per_cpu(wd_enabled, cpu) = 0; 157 per_cpu(wd_enabled, cpu) = 0;
117 atomic_dec(&nmi_active); 158 atomic_dec(&nmi_active);
118 } 159 }
@@ -121,24 +162,32 @@ int __init check_nmi_watchdog(void)
121 if (!atomic_read(&nmi_active)) { 162 if (!atomic_read(&nmi_active)) {
122 kfree(prev_nmi_count); 163 kfree(prev_nmi_count);
123 atomic_set(&nmi_active, -1); 164 atomic_set(&nmi_active, -1);
124 return -1; 165 goto error;
125 } 166 }
126 printk("OK.\n"); 167 printk("OK.\n");
127 168
128 /* now that we know it works we can reduce NMI frequency to 169 /*
129 something more reasonable; makes a difference in some configs */ 170 * now that we know it works we can reduce NMI frequency to
171 * something more reasonable; makes a difference in some configs
172 */
130 if (nmi_watchdog == NMI_LOCAL_APIC) 173 if (nmi_watchdog == NMI_LOCAL_APIC)
131 nmi_hz = lapic_adjust_nmi_hz(1); 174 nmi_hz = lapic_adjust_nmi_hz(1);
132 175
133 kfree(prev_nmi_count); 176 kfree(prev_nmi_count);
134 return 0; 177 return 0;
178
179error:
180#ifdef CONFIG_X86_32
181 timer_ack = !cpu_has_tsc;
182#endif
183 return -1;
135} 184}
136 185
137static int __init setup_nmi_watchdog(char *str) 186static int __init setup_nmi_watchdog(char *str)
138{ 187{
139 int nmi; 188 int nmi;
140 189
141 if (!strncmp(str,"panic",5)) { 190 if (!strncmp(str, "panic", 5)) {
142 panic_on_timeout = 1; 191 panic_on_timeout = 1;
143 str = strchr(str, ','); 192 str = strchr(str, ',');
144 if (!str) 193 if (!str)
@@ -148,15 +197,17 @@ static int __init setup_nmi_watchdog(char *str)
148 197
149 get_option(&str, &nmi); 198 get_option(&str, &nmi);
150 199
151 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE)) 200 if (nmi >= NMI_INVALID || nmi < NMI_NONE)
152 return 0; 201 return 0;
153 202
154 nmi_watchdog = nmi; 203 nmi_watchdog = nmi;
155 return 1; 204 return 1;
156} 205}
157
158__setup("nmi_watchdog=", setup_nmi_watchdog); 206__setup("nmi_watchdog=", setup_nmi_watchdog);
159 207
208/*
209 * Suspend/resume support
210 */
160#ifdef CONFIG_PM 211#ifdef CONFIG_PM
161 212
162static int nmi_pm_active; /* nmi_active before suspend */ 213static int nmi_pm_active; /* nmi_active before suspend */
@@ -195,7 +246,8 @@ static int __init init_lapic_nmi_sysfs(void)
195{ 246{
196 int error; 247 int error;
197 248
198 /* should really be a BUG_ON but b/c this is an 249 /*
250 * should really be a BUG_ON but b/c this is an
199 * init call, it just doesn't work. -dcz 251 * init call, it just doesn't work. -dcz
200 */ 252 */
201 if (nmi_watchdog != NMI_LOCAL_APIC) 253 if (nmi_watchdog != NMI_LOCAL_APIC)
@@ -209,6 +261,7 @@ static int __init init_lapic_nmi_sysfs(void)
209 error = sysdev_register(&device_lapic_nmi); 261 error = sysdev_register(&device_lapic_nmi);
210 return error; 262 return error;
211} 263}
264
212/* must come after the local APIC's device_initcall() */ 265/* must come after the local APIC's device_initcall() */
213late_initcall(init_lapic_nmi_sysfs); 266late_initcall(init_lapic_nmi_sysfs);
214 267
@@ -216,7 +269,7 @@ late_initcall(init_lapic_nmi_sysfs);
216 269
217static void __acpi_nmi_enable(void *__unused) 270static void __acpi_nmi_enable(void *__unused)
218{ 271{
219 apic_write(APIC_LVT0, APIC_DM_NMI); 272 apic_write_around(APIC_LVT0, APIC_DM_NMI);
220} 273}
221 274
222/* 275/*
@@ -230,7 +283,7 @@ void acpi_nmi_enable(void)
230 283
231static void __acpi_nmi_disable(void *__unused) 284static void __acpi_nmi_disable(void *__unused)
232{ 285{
233 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED); 286 apic_write_around(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
234} 287}
235 288
236/* 289/*
@@ -249,11 +302,12 @@ void setup_apic_nmi_watchdog(void *unused)
249 302
250 /* cheap hack to support suspend/resume */ 303 /* cheap hack to support suspend/resume */
251 /* if cpu0 is not active neither should the other cpus */ 304 /* if cpu0 is not active neither should the other cpus */
252 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0)) 305 if (smp_processor_id() != 0 && atomic_read(&nmi_active) <= 0)
253 return; 306 return;
254 307
255 switch (nmi_watchdog) { 308 switch (nmi_watchdog) {
256 case NMI_LOCAL_APIC: 309 case NMI_LOCAL_APIC:
310 /* enable it before to avoid race with handler */
257 __get_cpu_var(wd_enabled) = 1; 311 __get_cpu_var(wd_enabled) = 1;
258 if (lapic_watchdog_init(nmi_hz) < 0) { 312 if (lapic_watchdog_init(nmi_hz) < 0) {
259 __get_cpu_var(wd_enabled) = 0; 313 __get_cpu_var(wd_enabled) = 0;
@@ -269,9 +323,9 @@ void setup_apic_nmi_watchdog(void *unused)
269void stop_apic_nmi_watchdog(void *unused) 323void stop_apic_nmi_watchdog(void *unused)
270{ 324{
271 /* only support LOCAL and IO APICs for now */ 325 /* only support LOCAL and IO APICs for now */
272 if ((nmi_watchdog != NMI_LOCAL_APIC) && 326 if (nmi_watchdog != NMI_LOCAL_APIC &&
273 (nmi_watchdog != NMI_IO_APIC)) 327 nmi_watchdog != NMI_IO_APIC)
274 return; 328 return;
275 if (__get_cpu_var(wd_enabled) == 0) 329 if (__get_cpu_var(wd_enabled) == 0)
276 return; 330 return;
277 if (nmi_watchdog == NMI_LOCAL_APIC) 331 if (nmi_watchdog == NMI_LOCAL_APIC)
@@ -287,6 +341,11 @@ void stop_apic_nmi_watchdog(void *unused)
287 * 341 *
288 * as these watchdog NMI IRQs are generated on every CPU, we only 342 * as these watchdog NMI IRQs are generated on every CPU, we only
289 * have to check the current processor. 343 * have to check the current processor.
344 *
345 * since NMIs don't listen to _any_ locks, we have to be extremely
346 * careful not to rely on unsafe variables. The printk might lock
347 * up though, so we have to break up any console locks first ...
348 * [when there will be more tty-related locks, break them up here too!]
290 */ 349 */
291 350
292static DEFINE_PER_CPU(unsigned, last_irq_sum); 351static DEFINE_PER_CPU(unsigned, last_irq_sum);
@@ -295,11 +354,12 @@ static DEFINE_PER_CPU(int, nmi_touch);
295 354
296void touch_nmi_watchdog(void) 355void touch_nmi_watchdog(void)
297{ 356{
298 if (nmi_watchdog > 0) { 357 if (nmi_watchdog == NMI_LOCAL_APIC ||
358 nmi_watchdog == NMI_IO_APIC) {
299 unsigned cpu; 359 unsigned cpu;
300 360
301 /* 361 /*
302 * Tell other CPUs to reset their alert counters. We cannot 362 * Tell other CPUs to reset their alert counters. We cannot
303 * do it ourselves because the alert count increase is not 363 * do it ourselves because the alert count increase is not
304 * atomic. 364 * atomic.
305 */ 365 */
@@ -309,6 +369,9 @@ void touch_nmi_watchdog(void)
309 } 369 }
310 } 370 }
311 371
372 /*
373 * Tickle the softlockup detector too:
374 */
312 touch_softlockup_watchdog(); 375 touch_softlockup_watchdog();
313} 376}
314EXPORT_SYMBOL(touch_nmi_watchdog); 377EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -316,7 +379,12 @@ EXPORT_SYMBOL(touch_nmi_watchdog);
316notrace __kprobes int 379notrace __kprobes int
317nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) 380nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
318{ 381{
319 int sum; 382 /*
383 * Since current_thread_info()-> is always on the stack, and we
384 * always switch the stack NMI-atomically, it's safe to use
385 * smp_processor_id().
386 */
387 unsigned int sum;
320 int touched = 0; 388 int touched = 0;
321 int cpu = smp_processor_id(); 389 int cpu = smp_processor_id();
322 int rc = 0; 390 int rc = 0;
@@ -328,7 +396,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
328 touched = 1; 396 touched = 1;
329 } 397 }
330 398
331 sum = read_pda(apic_timer_irqs) + read_pda(irq0_irqs); 399 sum = get_timer_irqs(cpu);
400
332 if (__get_cpu_var(nmi_touch)) { 401 if (__get_cpu_var(nmi_touch)) {
333 __get_cpu_var(nmi_touch) = 0; 402 __get_cpu_var(nmi_touch) = 0;
334 touched = 1; 403 touched = 1;
@@ -338,28 +407,29 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
338 static DEFINE_SPINLOCK(lock); /* Serialise the printks */ 407 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
339 408
340 spin_lock(&lock); 409 spin_lock(&lock);
341 printk("NMI backtrace for cpu %d\n", cpu); 410 printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu);
342 dump_stack(); 411 dump_stack();
343 spin_unlock(&lock); 412 spin_unlock(&lock);
344 cpu_clear(cpu, backtrace_mask); 413 cpu_clear(cpu, backtrace_mask);
345 } 414 }
346 415
347#ifdef CONFIG_X86_MCE 416 /* Could check oops_in_progress here too, but it's safer not to */
348 /* Could check oops_in_progress here too, but it's safer 417 if (mce_in_progress())
349 not too */
350 if (atomic_read(&mce_entry) > 0)
351 touched = 1; 418 touched = 1;
352#endif 419
353 /* if the apic timer isn't firing, this cpu isn't doing much */ 420 /* if the none of the timers isn't firing, this cpu isn't doing much */
354 if (!touched && __get_cpu_var(last_irq_sum) == sum) { 421 if (!touched && __get_cpu_var(last_irq_sum) == sum) {
355 /* 422 /*
356 * Ayiee, looks like this CPU is stuck ... 423 * Ayiee, looks like this CPU is stuck ...
357 * wait a few IRQs (5 seconds) before doing the oops ... 424 * wait a few IRQs (5 seconds) before doing the oops ...
358 */ 425 */
359 local_inc(&__get_cpu_var(alert_counter)); 426 local_inc(&__get_cpu_var(alert_counter));
360 if (local_read(&__get_cpu_var(alert_counter)) == 5*nmi_hz) 427 if (local_read(&__get_cpu_var(alert_counter)) == 5 * nmi_hz)
361 die_nmi("NMI Watchdog detected LOCKUP on CPU %d\n", regs, 428 /*
362 panic_on_timeout); 429 * die_nmi will return ONLY if NOTIFY_STOP happens..
430 */
431 die_nmi("BUG: NMI Watchdog detected LOCKUP",
432 regs, panic_on_timeout);
363 } else { 433 } else {
364 __get_cpu_var(last_irq_sum) = sum; 434 __get_cpu_var(last_irq_sum) = sum;
365 local_set(&__get_cpu_var(alert_counter), 0); 435 local_set(&__get_cpu_var(alert_counter), 0);
@@ -373,7 +443,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
373 rc |= lapic_wd_event(nmi_hz); 443 rc |= lapic_wd_event(nmi_hz);
374 break; 444 break;
375 case NMI_IO_APIC: 445 case NMI_IO_APIC:
376 /* don't know how to accurately check for this. 446 /*
447 * don't know how to accurately check for this.
377 * just assume it was a watchdog timer interrupt 448 * just assume it was a watchdog timer interrupt
378 * This matches the old behaviour. 449 * This matches the old behaviour.
379 */ 450 */
@@ -383,30 +454,6 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
383 return rc; 454 return rc;
384} 455}
385 456
386static unsigned ignore_nmis;
387
388asmlinkage notrace __kprobes void
389do_nmi(struct pt_regs *regs, long error_code)
390{
391 nmi_enter();
392 add_pda(__nmi_count,1);
393 if (!ignore_nmis)
394 default_do_nmi(regs);
395 nmi_exit();
396}
397
398void stop_nmi(void)
399{
400 acpi_nmi_disable();
401 ignore_nmis++;
402}
403
404void restart_nmi(void)
405{
406 ignore_nmis--;
407 acpi_nmi_enable();
408}
409
410#ifdef CONFIG_SYSCTL 457#ifdef CONFIG_SYSCTL
411 458
412static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu) 459static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
@@ -415,7 +462,7 @@ static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
415 char buf[64]; 462 char buf[64];
416 463
417 sprintf(buf, "NMI received for unknown reason %02x\n", reason); 464 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
418 die_nmi(buf, regs, 1); /* Always panic here */ 465 die_nmi(buf, regs, 1); /* Always panic here */
419 return 0; 466 return 0;
420} 467}
421 468
@@ -434,27 +481,37 @@ int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
434 return 0; 481 return 0;
435 482
436 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) { 483 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
437 printk( KERN_WARNING "NMI watchdog is permanently disabled\n"); 484 printk(KERN_WARNING
485 "NMI watchdog is permanently disabled\n");
438 return -EIO; 486 return -EIO;
439 } 487 }
440 488
441 /* if nmi_watchdog is not set yet, then set it */ 489 /* if nmi_watchdog is not set yet, then set it */
442 nmi_watchdog_default(); 490 nmi_watchdog_default();
443 491
492#ifdef CONFIG_X86_32
493 if (nmi_watchdog == NMI_NONE) {
494 if (lapic_watchdog_ok())
495 nmi_watchdog = NMI_LOCAL_APIC;
496 else
497 nmi_watchdog = NMI_IO_APIC;
498 }
499#endif
500
444 if (nmi_watchdog == NMI_LOCAL_APIC) { 501 if (nmi_watchdog == NMI_LOCAL_APIC) {
445 if (nmi_watchdog_enabled) 502 if (nmi_watchdog_enabled)
446 enable_lapic_nmi_watchdog(); 503 enable_lapic_nmi_watchdog();
447 else 504 else
448 disable_lapic_nmi_watchdog(); 505 disable_lapic_nmi_watchdog();
449 } else { 506 } else {
450 printk( KERN_WARNING 507 printk(KERN_WARNING
451 "NMI watchdog doesn't know what hardware to touch\n"); 508 "NMI watchdog doesn't know what hardware to touch\n");
452 return -EIO; 509 return -EIO;
453 } 510 }
454 return 0; 511 return 0;
455} 512}
456 513
457#endif 514#endif /* CONFIG_SYSCTL */
458 515
459int do_nmi_callback(struct pt_regs *regs, int cpu) 516int do_nmi_callback(struct pt_regs *regs, int cpu)
460{ 517{
@@ -477,6 +534,3 @@ void __trigger_all_cpu_backtrace(void)
477 mdelay(1); 534 mdelay(1);
478 } 535 }
479} 536}
480
481EXPORT_SYMBOL(nmi_active);
482EXPORT_SYMBOL(nmi_watchdog);
diff --git a/arch/x86/kernel/nmi_32.c b/arch/x86/kernel/nmi_32.c
deleted file mode 100644
index 84160f74eeb0..000000000000
--- a/arch/x86/kernel/nmi_32.c
+++ /dev/null
@@ -1,467 +0,0 @@
1/*
2 * NMI watchdog support on APIC systems
3 *
4 * Started by Ingo Molnar <mingo@redhat.com>
5 *
6 * Fixes:
7 * Mikael Pettersson : AMD K7 support for local APIC NMI watchdog.
8 * Mikael Pettersson : Power Management for local APIC NMI watchdog.
9 * Mikael Pettersson : Pentium 4 support for local APIC NMI watchdog.
10 * Pavel Machek and
11 * Mikael Pettersson : PM converted to driver model. Disable/enable API.
12 */
13
14#include <linux/delay.h>
15#include <linux/interrupt.h>
16#include <linux/module.h>
17#include <linux/nmi.h>
18#include <linux/sysdev.h>
19#include <linux/sysctl.h>
20#include <linux/percpu.h>
21#include <linux/kprobes.h>
22#include <linux/cpumask.h>
23#include <linux/kernel_stat.h>
24#include <linux/kdebug.h>
25#include <linux/slab.h>
26
27#include <asm/smp.h>
28#include <asm/nmi.h>
29
30#include "mach_traps.h"
31
32int unknown_nmi_panic;
33int nmi_watchdog_enabled;
34
35static cpumask_t backtrace_mask = CPU_MASK_NONE;
36
37/* nmi_active:
38 * >0: the lapic NMI watchdog is active, but can be disabled
39 * <0: the lapic NMI watchdog has not been set up, and cannot
40 * be enabled
41 * 0: the lapic NMI watchdog is disabled, but can be enabled
42 */
43atomic_t nmi_active = ATOMIC_INIT(0); /* oprofile uses this */
44
45unsigned int nmi_watchdog = NMI_DEFAULT;
46static unsigned int nmi_hz = HZ;
47
48static DEFINE_PER_CPU(short, wd_enabled);
49
50static int endflag __initdata = 0;
51
52#ifdef CONFIG_SMP
53/* The performance counters used by NMI_LOCAL_APIC don't trigger when
54 * the CPU is idle. To make sure the NMI watchdog really ticks on all
55 * CPUs during the test make them busy.
56 */
57static __init void nmi_cpu_busy(void *data)
58{
59 local_irq_enable_in_hardirq();
60 /* Intentionally don't use cpu_relax here. This is
61 to make sure that the performance counter really ticks,
62 even if there is a simulator or similar that catches the
63 pause instruction. On a real HT machine this is fine because
64 all other CPUs are busy with "useless" delay loops and don't
65 care if they get somewhat less cycles. */
66 while (endflag == 0)
67 mb();
68}
69#endif
70
71int __init check_nmi_watchdog(void)
72{
73 unsigned int *prev_nmi_count;
74 int cpu;
75
76 if ((nmi_watchdog == NMI_NONE) || (nmi_watchdog == NMI_DISABLED))
77 return 0;
78
79 if (!atomic_read(&nmi_active))
80 return 0;
81
82 prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL);
83 if (!prev_nmi_count)
84 return -1;
85
86 printk(KERN_INFO "Testing NMI watchdog ... ");
87
88#ifdef CONFIG_SMP
89 if (nmi_watchdog == NMI_LOCAL_APIC)
90 smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0);
91#endif
92
93 for_each_possible_cpu(cpu)
94 prev_nmi_count[cpu] = nmi_count(cpu);
95 local_irq_enable();
96 mdelay((20*1000)/nmi_hz); // wait 20 ticks
97
98 for_each_possible_cpu(cpu) {
99#ifdef CONFIG_SMP
100 /* Check cpu_callin_map here because that is set
101 after the timer is started. */
102 if (!cpu_isset(cpu, cpu_callin_map))
103 continue;
104#endif
105 if (!per_cpu(wd_enabled, cpu))
106 continue;
107 if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) {
108 printk(KERN_WARNING "WARNING: CPU#%d: NMI "
109 "appears to be stuck (%d->%d)!\n",
110 cpu,
111 prev_nmi_count[cpu],
112 nmi_count(cpu));
113 per_cpu(wd_enabled, cpu) = 0;
114 atomic_dec(&nmi_active);
115 }
116 }
117 endflag = 1;
118 if (!atomic_read(&nmi_active)) {
119 kfree(prev_nmi_count);
120 atomic_set(&nmi_active, -1);
121 return -1;
122 }
123 printk("OK.\n");
124
125 /* now that we know it works we can reduce NMI frequency to
126 something more reasonable; makes a difference in some configs */
127 if (nmi_watchdog == NMI_LOCAL_APIC)
128 nmi_hz = lapic_adjust_nmi_hz(1);
129
130 kfree(prev_nmi_count);
131 return 0;
132}
133
134static int __init setup_nmi_watchdog(char *str)
135{
136 int nmi;
137
138 get_option(&str, &nmi);
139
140 if ((nmi >= NMI_INVALID) || (nmi < NMI_NONE))
141 return 0;
142
143 nmi_watchdog = nmi;
144 return 1;
145}
146
147__setup("nmi_watchdog=", setup_nmi_watchdog);
148
149
150/* Suspend/resume support */
151
152#ifdef CONFIG_PM
153
154static int nmi_pm_active; /* nmi_active before suspend */
155
156static int lapic_nmi_suspend(struct sys_device *dev, pm_message_t state)
157{
158 /* only CPU0 goes here, other CPUs should be offline */
159 nmi_pm_active = atomic_read(&nmi_active);
160 stop_apic_nmi_watchdog(NULL);
161 BUG_ON(atomic_read(&nmi_active) != 0);
162 return 0;
163}
164
165static int lapic_nmi_resume(struct sys_device *dev)
166{
167 /* only CPU0 goes here, other CPUs should be offline */
168 if (nmi_pm_active > 0) {
169 setup_apic_nmi_watchdog(NULL);
170 touch_nmi_watchdog();
171 }
172 return 0;
173}
174
175
176static struct sysdev_class nmi_sysclass = {
177 .name = "lapic_nmi",
178 .resume = lapic_nmi_resume,
179 .suspend = lapic_nmi_suspend,
180};
181
182static struct sys_device device_lapic_nmi = {
183 .id = 0,
184 .cls = &nmi_sysclass,
185};
186
187static int __init init_lapic_nmi_sysfs(void)
188{
189 int error;
190
191 /* should really be a BUG_ON but b/c this is an
192 * init call, it just doesn't work. -dcz
193 */
194 if (nmi_watchdog != NMI_LOCAL_APIC)
195 return 0;
196
197 if (atomic_read(&nmi_active) < 0)
198 return 0;
199
200 error = sysdev_class_register(&nmi_sysclass);
201 if (!error)
202 error = sysdev_register(&device_lapic_nmi);
203 return error;
204}
205/* must come after the local APIC's device_initcall() */
206late_initcall(init_lapic_nmi_sysfs);
207
208#endif /* CONFIG_PM */
209
210static void __acpi_nmi_enable(void *__unused)
211{
212 apic_write_around(APIC_LVT0, APIC_DM_NMI);
213}
214
215/*
216 * Enable timer based NMIs on all CPUs:
217 */
218void acpi_nmi_enable(void)
219{
220 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
221 on_each_cpu(__acpi_nmi_enable, NULL, 0, 1);
222}
223
224static void __acpi_nmi_disable(void *__unused)
225{
226 apic_write(APIC_LVT0, APIC_DM_NMI | APIC_LVT_MASKED);
227}
228
229/*
230 * Disable timer based NMIs on all CPUs:
231 */
232void acpi_nmi_disable(void)
233{
234 if (atomic_read(&nmi_active) && nmi_watchdog == NMI_IO_APIC)
235 on_each_cpu(__acpi_nmi_disable, NULL, 0, 1);
236}
237
238void setup_apic_nmi_watchdog(void *unused)
239{
240 if (__get_cpu_var(wd_enabled))
241 return;
242
243 /* cheap hack to support suspend/resume */
244 /* if cpu0 is not active neither should the other cpus */
245 if ((smp_processor_id() != 0) && (atomic_read(&nmi_active) <= 0))
246 return;
247
248 switch (nmi_watchdog) {
249 case NMI_LOCAL_APIC:
250 __get_cpu_var(wd_enabled) = 1; /* enable it before to avoid race with handler */
251 if (lapic_watchdog_init(nmi_hz) < 0) {
252 __get_cpu_var(wd_enabled) = 0;
253 return;
254 }
255 /* FALL THROUGH */
256 case NMI_IO_APIC:
257 __get_cpu_var(wd_enabled) = 1;
258 atomic_inc(&nmi_active);
259 }
260}
261
262void stop_apic_nmi_watchdog(void *unused)
263{
264 /* only support LOCAL and IO APICs for now */
265 if ((nmi_watchdog != NMI_LOCAL_APIC) &&
266 (nmi_watchdog != NMI_IO_APIC))
267 return;
268 if (__get_cpu_var(wd_enabled) == 0)
269 return;
270 if (nmi_watchdog == NMI_LOCAL_APIC)
271 lapic_watchdog_stop();
272 __get_cpu_var(wd_enabled) = 0;
273 atomic_dec(&nmi_active);
274}
275
276/*
277 * the best way to detect whether a CPU has a 'hard lockup' problem
278 * is to check it's local APIC timer IRQ counts. If they are not
279 * changing then that CPU has some problem.
280 *
281 * as these watchdog NMI IRQs are generated on every CPU, we only
282 * have to check the current processor.
283 *
284 * since NMIs don't listen to _any_ locks, we have to be extremely
285 * careful not to rely on unsafe variables. The printk might lock
286 * up though, so we have to break up any console locks first ...
287 * [when there will be more tty-related locks, break them up
288 * here too!]
289 */
290
291static unsigned int
292 last_irq_sums [NR_CPUS],
293 alert_counter [NR_CPUS];
294
295void touch_nmi_watchdog(void)
296{
297 if (nmi_watchdog > 0) {
298 unsigned cpu;
299
300 /*
301 * Just reset the alert counters, (other CPUs might be
302 * spinning on locks we hold):
303 */
304 for_each_present_cpu(cpu) {
305 if (alert_counter[cpu])
306 alert_counter[cpu] = 0;
307 }
308 }
309
310 /*
311 * Tickle the softlockup detector too:
312 */
313 touch_softlockup_watchdog();
314}
315EXPORT_SYMBOL(touch_nmi_watchdog);
316
317extern void die_nmi(struct pt_regs *, const char *msg);
318
319notrace __kprobes int
320nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
321{
322
323 /*
324 * Since current_thread_info()-> is always on the stack, and we
325 * always switch the stack NMI-atomically, it's safe to use
326 * smp_processor_id().
327 */
328 unsigned int sum;
329 int touched = 0;
330 int cpu = smp_processor_id();
331 int rc = 0;
332
333 /* check for other users first */
334 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT)
335 == NOTIFY_STOP) {
336 rc = 1;
337 touched = 1;
338 }
339
340 if (cpu_isset(cpu, backtrace_mask)) {
341 static DEFINE_SPINLOCK(lock); /* Serialise the printks */
342
343 spin_lock(&lock);
344 printk("NMI backtrace for cpu %d\n", cpu);
345 dump_stack();
346 spin_unlock(&lock);
347 cpu_clear(cpu, backtrace_mask);
348 }
349
350 /*
351 * Take the local apic timer and PIT/HPET into account. We don't
352 * know which one is active, when we have highres/dyntick on
353 */
354 sum = per_cpu(irq_stat, cpu).apic_timer_irqs +
355 per_cpu(irq_stat, cpu).irq0_irqs;
356
357 /* if the none of the timers isn't firing, this cpu isn't doing much */
358 if (!touched && last_irq_sums[cpu] == sum) {
359 /*
360 * Ayiee, looks like this CPU is stuck ...
361 * wait a few IRQs (5 seconds) before doing the oops ...
362 */
363 alert_counter[cpu]++;
364 if (alert_counter[cpu] == 5*nmi_hz)
365 /*
366 * die_nmi will return ONLY if NOTIFY_STOP happens..
367 */
368 die_nmi(regs, "BUG: NMI Watchdog detected LOCKUP");
369 } else {
370 last_irq_sums[cpu] = sum;
371 alert_counter[cpu] = 0;
372 }
373 /* see if the nmi watchdog went off */
374 if (!__get_cpu_var(wd_enabled))
375 return rc;
376 switch (nmi_watchdog) {
377 case NMI_LOCAL_APIC:
378 rc |= lapic_wd_event(nmi_hz);
379 break;
380 case NMI_IO_APIC:
381 /* don't know how to accurately check for this.
382 * just assume it was a watchdog timer interrupt
383 * This matches the old behaviour.
384 */
385 rc = 1;
386 break;
387 }
388 return rc;
389}
390
391#ifdef CONFIG_SYSCTL
392
393static int unknown_nmi_panic_callback(struct pt_regs *regs, int cpu)
394{
395 unsigned char reason = get_nmi_reason();
396 char buf[64];
397
398 sprintf(buf, "NMI received for unknown reason %02x\n", reason);
399 die_nmi(regs, buf);
400 return 0;
401}
402
403/*
404 * proc handler for /proc/sys/kernel/nmi
405 */
406int proc_nmi_enabled(struct ctl_table *table, int write, struct file *file,
407 void __user *buffer, size_t *length, loff_t *ppos)
408{
409 int old_state;
410
411 nmi_watchdog_enabled = (atomic_read(&nmi_active) > 0) ? 1 : 0;
412 old_state = nmi_watchdog_enabled;
413 proc_dointvec(table, write, file, buffer, length, ppos);
414 if (!!old_state == !!nmi_watchdog_enabled)
415 return 0;
416
417 if (atomic_read(&nmi_active) < 0 || nmi_watchdog == NMI_DISABLED) {
418 printk( KERN_WARNING "NMI watchdog is permanently disabled\n");
419 return -EIO;
420 }
421
422 if (nmi_watchdog == NMI_DEFAULT) {
423 if (lapic_watchdog_ok())
424 nmi_watchdog = NMI_LOCAL_APIC;
425 else
426 nmi_watchdog = NMI_IO_APIC;
427 }
428
429 if (nmi_watchdog == NMI_LOCAL_APIC) {
430 if (nmi_watchdog_enabled)
431 enable_lapic_nmi_watchdog();
432 else
433 disable_lapic_nmi_watchdog();
434 } else {
435 printk( KERN_WARNING
436 "NMI watchdog doesn't know what hardware to touch\n");
437 return -EIO;
438 }
439 return 0;
440}
441
442#endif
443
444int do_nmi_callback(struct pt_regs *regs, int cpu)
445{
446#ifdef CONFIG_SYSCTL
447 if (unknown_nmi_panic)
448 return unknown_nmi_panic_callback(regs, cpu);
449#endif
450 return 0;
451}
452
453void __trigger_all_cpu_backtrace(void)
454{
455 int i;
456
457 backtrace_mask = cpu_online_map;
458 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
459 for (i = 0; i < 10 * 1000; i++) {
460 if (cpus_empty(backtrace_mask))
461 break;
462 mdelay(1);
463 }
464}
465
466EXPORT_SYMBOL(nmi_active);
467EXPORT_SYMBOL(nmi_watchdog);
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index 08d752de4eee..cf37d20b1ba7 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -756,9 +756,9 @@ unknown_nmi_error(unsigned char reason, struct pt_regs *regs)
756 756
757static DEFINE_SPINLOCK(nmi_print_lock); 757static DEFINE_SPINLOCK(nmi_print_lock);
758 758
759void notrace __kprobes die_nmi(struct pt_regs *regs, const char *msg) 759void notrace __kprobes die_nmi(char *str, struct pt_regs *regs, int do_panic)
760{ 760{
761 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) == NOTIFY_STOP) 761 if (notify_die(DIE_NMIWATCHDOG, str, regs, 0, 2, SIGINT) == NOTIFY_STOP)
762 return; 762 return;
763 763
764 spin_lock(&nmi_print_lock); 764 spin_lock(&nmi_print_lock);
@@ -767,10 +767,12 @@ void notrace __kprobes die_nmi(struct pt_regs *regs, const char *msg)
767 * to get a message out: 767 * to get a message out:
768 */ 768 */
769 bust_spinlocks(1); 769 bust_spinlocks(1);
770 printk(KERN_EMERG "%s", msg); 770 printk(KERN_EMERG "%s", str);
771 printk(" on CPU%d, ip %08lx, registers:\n", 771 printk(" on CPU%d, ip %08lx, registers:\n",
772 smp_processor_id(), regs->ip); 772 smp_processor_id(), regs->ip);
773 show_registers(regs); 773 show_registers(regs);
774 if (do_panic)
775 panic("Non maskable interrupt");
774 console_silent(); 776 console_silent();
775 spin_unlock(&nmi_print_lock); 777 spin_unlock(&nmi_print_lock);
776 bust_spinlocks(0); 778 bust_spinlocks(0);
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index adff76ea97c4..e4a380797dc1 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -76,7 +76,9 @@ asmlinkage void alignment_check(void);
76asmlinkage void machine_check(void); 76asmlinkage void machine_check(void);
77asmlinkage void spurious_interrupt_bug(void); 77asmlinkage void spurious_interrupt_bug(void);
78 78
79int panic_on_unrecovered_nmi;
79static unsigned int code_bytes = 64; 80static unsigned int code_bytes = 64;
81static unsigned ignore_nmis;
80 82
81static inline void conditional_sti(struct pt_regs *regs) 83static inline void conditional_sti(struct pt_regs *regs)
82{ 84{
@@ -614,7 +616,9 @@ die_nmi(char *str, struct pt_regs *regs, int do_panic)
614 * We are in trouble anyway, lets at least try 616 * We are in trouble anyway, lets at least try
615 * to get a message out. 617 * to get a message out.
616 */ 618 */
617 printk(str, smp_processor_id()); 619 printk(KERN_EMERG "%s", str);
620 printk(" on CPU%d, ip %08lx, registers:\n",
621 smp_processor_id(), regs->ip);
618 show_registers(regs); 622 show_registers(regs);
619 if (kexec_should_crash(current)) 623 if (kexec_should_crash(current))
620 crash_kexec(regs); 624 crash_kexec(regs);
@@ -865,6 +869,28 @@ asmlinkage notrace __kprobes void default_do_nmi(struct pt_regs *regs)
865 io_check_error(reason, regs); 869 io_check_error(reason, regs);
866} 870}
867 871
872asmlinkage notrace __kprobes void
873do_nmi(struct pt_regs *regs, long error_code)
874{
875 nmi_enter();
876 add_pda(__nmi_count, 1);
877 if (!ignore_nmis)
878 default_do_nmi(regs);
879 nmi_exit();
880}
881
882void stop_nmi(void)
883{
884 acpi_nmi_disable();
885 ignore_nmis++;
886}
887
888void restart_nmi(void)
889{
890 ignore_nmis--;
891 acpi_nmi_enable();
892}
893
868/* runs on IST stack. */ 894/* runs on IST stack. */
869asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code) 895asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
870{ 896{
diff --git a/include/asm-x86/nmi.h b/include/asm-x86/nmi.h
index 1e363021e72f..470bb4aacb75 100644
--- a/include/asm-x86/nmi.h
+++ b/include/asm-x86/nmi.h
@@ -38,15 +38,12 @@ static inline void unset_nmi_pm_callback(struct pm_dev *dev)
38 38
39#ifdef CONFIG_X86_64 39#ifdef CONFIG_X86_64
40extern void default_do_nmi(struct pt_regs *); 40extern void default_do_nmi(struct pt_regs *);
41extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
42extern void nmi_watchdog_default(void);
43#else
44#define nmi_watchdog_default() do {} while (0)
45#endif 41#endif
46 42
43extern void die_nmi(char *str, struct pt_regs *regs, int do_panic);
44extern void nmi_watchdog_default(void);
47extern int check_nmi_watchdog(void); 45extern int check_nmi_watchdog(void);
48extern int nmi_watchdog_enabled; 46extern int nmi_watchdog_enabled;
49extern int unknown_nmi_panic;
50extern int avail_to_resrv_perfctr_nmi_bit(unsigned int); 47extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
51extern int avail_to_resrv_perfctr_nmi(unsigned int); 48extern int avail_to_resrv_perfctr_nmi(unsigned int);
52extern int reserve_perfctr_nmi(unsigned int); 49extern int reserve_perfctr_nmi(unsigned int);