diff options
Diffstat (limited to 'arch/i386/kernel/nmi.c')
-rw-r--r-- | arch/i386/kernel/nmi.c | 65 |
1 files changed, 64 insertions, 1 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index bd3875419630..a76e93146585 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -24,6 +24,7 @@ | |||
24 | 24 | ||
25 | #include <asm/smp.h> | 25 | #include <asm/smp.h> |
26 | #include <asm/nmi.h> | 26 | #include <asm/nmi.h> |
27 | #include <asm/intel_arch_perfmon.h> | ||
27 | 28 | ||
28 | #include "mach_traps.h" | 29 | #include "mach_traps.h" |
29 | 30 | ||
@@ -95,6 +96,9 @@ int nmi_active; | |||
95 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | 96 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ |
96 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | 97 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) |
97 | 98 | ||
99 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
100 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
101 | |||
98 | #ifdef CONFIG_SMP | 102 | #ifdef CONFIG_SMP |
99 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | 103 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when |
100 | * the CPU is idle. To make sure the NMI watchdog really ticks on all | 104 | * the CPU is idle. To make sure the NMI watchdog really ticks on all |
@@ -207,6 +211,8 @@ static int __init setup_nmi_watchdog(char *str) | |||
207 | 211 | ||
208 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 212 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
209 | 213 | ||
214 | static void disable_intel_arch_watchdog(void); | ||
215 | |||
210 | static void disable_lapic_nmi_watchdog(void) | 216 | static void disable_lapic_nmi_watchdog(void) |
211 | { | 217 | { |
212 | if (nmi_active <= 0) | 218 | if (nmi_active <= 0) |
@@ -216,6 +222,10 @@ static void disable_lapic_nmi_watchdog(void) | |||
216 | wrmsr(MSR_K7_EVNTSEL0, 0, 0); | 222 | wrmsr(MSR_K7_EVNTSEL0, 0, 0); |
217 | break; | 223 | break; |
218 | case X86_VENDOR_INTEL: | 224 | case X86_VENDOR_INTEL: |
225 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
226 | disable_intel_arch_watchdog(); | ||
227 | break; | ||
228 | } | ||
219 | switch (boot_cpu_data.x86) { | 229 | switch (boot_cpu_data.x86) { |
220 | case 6: | 230 | case 6: |
221 | if (boot_cpu_data.x86_model > 0xd) | 231 | if (boot_cpu_data.x86_model > 0xd) |
@@ -444,6 +454,53 @@ static int setup_p4_watchdog(void) | |||
444 | return 1; | 454 | return 1; |
445 | } | 455 | } |
446 | 456 | ||
457 | static void disable_intel_arch_watchdog(void) | ||
458 | { | ||
459 | unsigned ebx; | ||
460 | |||
461 | /* | ||
462 | * Check whether the Architectural PerfMon supports | ||
463 | * Unhalted Core Cycles Event or not. | ||
464 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
465 | */ | ||
466 | ebx = cpuid_ebx(10); | ||
467 | if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
468 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); | ||
469 | } | ||
470 | |||
471 | static int setup_intel_arch_watchdog(void) | ||
472 | { | ||
473 | unsigned int evntsel; | ||
474 | unsigned ebx; | ||
475 | |||
476 | /* | ||
477 | * Check whether the Architectural PerfMon supports | ||
478 | * Unhalted Core Cycles Event or not. | ||
479 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
480 | */ | ||
481 | ebx = cpuid_ebx(10); | ||
482 | if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
483 | return 0; | ||
484 | |||
485 | nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | ||
486 | |||
487 | clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); | ||
488 | clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); | ||
489 | |||
490 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
491 | | ARCH_PERFMON_EVENTSEL_OS | ||
492 | | ARCH_PERFMON_EVENTSEL_USR | ||
493 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
494 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
495 | |||
496 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
497 | write_watchdog_counter("INTEL_ARCH_PERFCTR0"); | ||
498 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
499 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
500 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
501 | return 1; | ||
502 | } | ||
503 | |||
447 | void setup_apic_nmi_watchdog (void) | 504 | void setup_apic_nmi_watchdog (void) |
448 | { | 505 | { |
449 | switch (boot_cpu_data.x86_vendor) { | 506 | switch (boot_cpu_data.x86_vendor) { |
@@ -453,6 +510,11 @@ void setup_apic_nmi_watchdog (void) | |||
453 | setup_k7_watchdog(); | 510 | setup_k7_watchdog(); |
454 | break; | 511 | break; |
455 | case X86_VENDOR_INTEL: | 512 | case X86_VENDOR_INTEL: |
513 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
514 | if (!setup_intel_arch_watchdog()) | ||
515 | return; | ||
516 | break; | ||
517 | } | ||
456 | switch (boot_cpu_data.x86) { | 518 | switch (boot_cpu_data.x86) { |
457 | case 6: | 519 | case 6: |
458 | if (boot_cpu_data.x86_model > 0xd) | 520 | if (boot_cpu_data.x86_model > 0xd) |
@@ -556,7 +618,8 @@ void nmi_watchdog_tick (struct pt_regs * regs) | |||
556 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 618 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); |
557 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 619 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
558 | } | 620 | } |
559 | else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { | 621 | else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 || |
622 | nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | ||
560 | /* Only P6 based Pentium M need to re-unmask | 623 | /* Only P6 based Pentium M need to re-unmask |
561 | * the apic vector but it doesn't hurt | 624 | * the apic vector but it doesn't hurt |
562 | * other P6 variant */ | 625 | * other P6 variant */ |