diff options
Diffstat (limited to 'arch/x86/kernel/apic/nmi.c')
| -rw-r--r-- | arch/x86/kernel/apic/nmi.c | 15 |
1 files changed, 8 insertions, 7 deletions
diff --git a/arch/x86/kernel/apic/nmi.c b/arch/x86/kernel/apic/nmi.c index 0159a69396cb..1edaf15c0b8e 100644 --- a/arch/x86/kernel/apic/nmi.c +++ b/arch/x86/kernel/apic/nmi.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
| 19 | #include <linux/interrupt.h> | 19 | #include <linux/interrupt.h> |
| 20 | #include <linux/module.h> | 20 | #include <linux/module.h> |
| 21 | #include <linux/slab.h> | ||
| 21 | #include <linux/sysdev.h> | 22 | #include <linux/sysdev.h> |
| 22 | #include <linux/sysctl.h> | 23 | #include <linux/sysctl.h> |
| 23 | #include <linux/percpu.h> | 24 | #include <linux/percpu.h> |
| @@ -177,7 +178,7 @@ int __init check_nmi_watchdog(void) | |||
| 177 | error: | 178 | error: |
| 178 | if (nmi_watchdog == NMI_IO_APIC) { | 179 | if (nmi_watchdog == NMI_IO_APIC) { |
| 179 | if (!timer_through_8259) | 180 | if (!timer_through_8259) |
| 180 | disable_8259A_irq(0); | 181 | legacy_pic->chip->mask(0); |
| 181 | on_each_cpu(__acpi_nmi_disable, NULL, 1); | 182 | on_each_cpu(__acpi_nmi_disable, NULL, 1); |
| 182 | } | 183 | } |
| 183 | 184 | ||
| @@ -416,13 +417,13 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
| 416 | 417 | ||
| 417 | /* We can be called before check_nmi_watchdog, hence NULL check. */ | 418 | /* We can be called before check_nmi_watchdog, hence NULL check. */ |
| 418 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { | 419 | if (cpumask_test_cpu(cpu, to_cpumask(backtrace_mask))) { |
| 419 | static DEFINE_SPINLOCK(lock); /* Serialise the printks */ | 420 | static DEFINE_RAW_SPINLOCK(lock); /* Serialise the printks */ |
| 420 | 421 | ||
| 421 | spin_lock(&lock); | 422 | raw_spin_lock(&lock); |
| 422 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); | 423 | printk(KERN_WARNING "NMI backtrace for cpu %d\n", cpu); |
| 423 | show_regs(regs); | 424 | show_regs(regs); |
| 424 | dump_stack(); | 425 | dump_stack(); |
| 425 | spin_unlock(&lock); | 426 | raw_spin_unlock(&lock); |
| 426 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); | 427 | cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask)); |
| 427 | 428 | ||
| 428 | rc = 1; | 429 | rc = 1; |
| @@ -438,8 +439,8 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
| 438 | * Ayiee, looks like this CPU is stuck ... | 439 | * Ayiee, looks like this CPU is stuck ... |
| 439 | * wait a few IRQs (5 seconds) before doing the oops ... | 440 | * wait a few IRQs (5 seconds) before doing the oops ... |
| 440 | */ | 441 | */ |
| 441 | __this_cpu_inc(per_cpu_var(alert_counter)); | 442 | __this_cpu_inc(alert_counter); |
| 442 | if (__this_cpu_read(per_cpu_var(alert_counter)) == 5 * nmi_hz) | 443 | if (__this_cpu_read(alert_counter) == 5 * nmi_hz) |
| 443 | /* | 444 | /* |
| 444 | * die_nmi will return ONLY if NOTIFY_STOP happens.. | 445 | * die_nmi will return ONLY if NOTIFY_STOP happens.. |
| 445 | */ | 446 | */ |
| @@ -447,7 +448,7 @@ nmi_watchdog_tick(struct pt_regs *regs, unsigned reason) | |||
| 447 | regs, panic_on_timeout); | 448 | regs, panic_on_timeout); |
| 448 | } else { | 449 | } else { |
| 449 | __get_cpu_var(last_irq_sum) = sum; | 450 | __get_cpu_var(last_irq_sum) = sum; |
| 450 | __this_cpu_write(per_cpu_var(alert_counter), 0); | 451 | __this_cpu_write(alert_counter, 0); |
| 451 | } | 452 | } |
| 452 | 453 | ||
| 453 | /* see if the nmi watchdog went off */ | 454 | /* see if the nmi watchdog went off */ |
