diff options
author | Andi Kleen <ak@suse.de> | 2006-09-26 04:52:26 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:26 -0400 |
commit | b07f8915cda3fcd73b8b68075ba1e6cd0673365d (patch) | |
tree | 73bd68878518350322098ddf69572c3da6f1e360 /arch/x86_64/kernel/nmi.c | |
parent | 874c4fe389d1358f82c96dc9b5092fc5c7690604 (diff) |
[PATCH] x86: Temporarily revert parts of the Core 2 nmi nmi watchdog support
This makes merging easier. They are readded a few patches later.
Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/x86_64/kernel/nmi.c')
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 81 |
1 files changed, 5 insertions, 76 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 5baa0c726e97..42c05d6907b9 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <asm/proto.h> | 26 | #include <asm/proto.h> |
27 | #include <asm/kdebug.h> | 27 | #include <asm/kdebug.h> |
28 | #include <asm/mce.h> | 28 | #include <asm/mce.h> |
29 | #include <asm/intel_arch_perfmon.h> | ||
30 | 29 | ||
31 | /* | 30 | /* |
32 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: | 31 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: |
@@ -66,9 +65,6 @@ static unsigned int nmi_p4_cccr_val; | |||
66 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | 65 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 |
67 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | 66 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING |
68 | 67 | ||
69 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
70 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
71 | |||
72 | #define MSR_P4_MISC_ENABLE 0x1A0 | 68 | #define MSR_P4_MISC_ENABLE 0x1A0 |
73 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | 69 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) |
74 | #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) | 70 | #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) |
@@ -100,10 +96,7 @@ static __cpuinit inline int nmi_known_cpu(void) | |||
100 | case X86_VENDOR_AMD: | 96 | case X86_VENDOR_AMD: |
101 | return boot_cpu_data.x86 == 15; | 97 | return boot_cpu_data.x86 == 15; |
102 | case X86_VENDOR_INTEL: | 98 | case X86_VENDOR_INTEL: |
103 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 99 | return boot_cpu_data.x86 == 15; |
104 | return 1; | ||
105 | else | ||
106 | return (boot_cpu_data.x86 == 15); | ||
107 | } | 100 | } |
108 | return 0; | 101 | return 0; |
109 | } | 102 | } |
@@ -209,8 +202,6 @@ int __init setup_nmi_watchdog(char *str) | |||
209 | 202 | ||
210 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 203 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
211 | 204 | ||
212 | static void disable_intel_arch_watchdog(void); | ||
213 | |||
214 | static void disable_lapic_nmi_watchdog(void) | 205 | static void disable_lapic_nmi_watchdog(void) |
215 | { | 206 | { |
216 | if (nmi_active <= 0) | 207 | if (nmi_active <= 0) |
@@ -223,8 +214,6 @@ static void disable_lapic_nmi_watchdog(void) | |||
223 | if (boot_cpu_data.x86 == 15) { | 214 | if (boot_cpu_data.x86 == 15) { |
224 | wrmsr(MSR_P4_IQ_CCCR0, 0, 0); | 215 | wrmsr(MSR_P4_IQ_CCCR0, 0, 0); |
225 | wrmsr(MSR_P4_CRU_ESCR0, 0, 0); | 216 | wrmsr(MSR_P4_CRU_ESCR0, 0, 0); |
226 | } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
227 | disable_intel_arch_watchdog(); | ||
228 | } | 217 | } |
229 | break; | 218 | break; |
230 | } | 219 | } |
@@ -377,53 +366,6 @@ static void setup_k7_watchdog(void) | |||
377 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | 366 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); |
378 | } | 367 | } |
379 | 368 | ||
380 | static void disable_intel_arch_watchdog(void) | ||
381 | { | ||
382 | unsigned ebx; | ||
383 | |||
384 | /* | ||
385 | * Check whether the Architectural PerfMon supports | ||
386 | * Unhalted Core Cycles Event or not. | ||
387 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
388 | */ | ||
389 | ebx = cpuid_ebx(10); | ||
390 | if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
391 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); | ||
392 | } | ||
393 | |||
394 | static int setup_intel_arch_watchdog(void) | ||
395 | { | ||
396 | unsigned int evntsel; | ||
397 | unsigned ebx; | ||
398 | |||
399 | /* | ||
400 | * Check whether the Architectural PerfMon supports | ||
401 | * Unhalted Core Cycles Event or not. | ||
402 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
403 | */ | ||
404 | ebx = cpuid_ebx(10); | ||
405 | if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
406 | return 0; | ||
407 | |||
408 | nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | ||
409 | |||
410 | clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); | ||
411 | clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); | ||
412 | |||
413 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
414 | | ARCH_PERFMON_EVENTSEL_OS | ||
415 | | ARCH_PERFMON_EVENTSEL_USR | ||
416 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
417 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
418 | |||
419 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
420 | wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz)); | ||
421 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
422 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
423 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
424 | return 1; | ||
425 | } | ||
426 | |||
427 | 369 | ||
428 | static int setup_p4_watchdog(void) | 370 | static int setup_p4_watchdog(void) |
429 | { | 371 | { |
@@ -477,16 +419,10 @@ void setup_apic_nmi_watchdog(void) | |||
477 | setup_k7_watchdog(); | 419 | setup_k7_watchdog(); |
478 | break; | 420 | break; |
479 | case X86_VENDOR_INTEL: | 421 | case X86_VENDOR_INTEL: |
480 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 422 | if (boot_cpu_data.x86 != 15) |
481 | if (!setup_intel_arch_watchdog()) | 423 | return; |
482 | return; | 424 | if (!setup_p4_watchdog()) |
483 | } else if (boot_cpu_data.x86 == 15) { | ||
484 | if (!setup_p4_watchdog()) | ||
485 | return; | ||
486 | } else { | ||
487 | return; | 425 | return; |
488 | } | ||
489 | |||
490 | break; | 426 | break; |
491 | 427 | ||
492 | default: | 428 | default: |
@@ -571,14 +507,7 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
571 | */ | 507 | */ |
572 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 508 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); |
573 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 509 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
574 | } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | 510 | } |
575 | /* | ||
576 | * For Intel based architectural perfmon | ||
577 | * - LVTPC is masked on interrupt and must be | ||
578 | * unmasked by the LVTPC handler. | ||
579 | */ | ||
580 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
581 | } | ||
582 | wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); | 511 | wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); |
583 | } | 512 | } |
584 | } | 513 | } |