diff options
author | Andi Kleen <ak@suse.de> | 2006-09-26 04:52:26 -0400 |
---|---|---|
committer | Andi Kleen <andi@basil.nowhere.org> | 2006-09-26 04:52:26 -0400 |
commit | b07f8915cda3fcd73b8b68075ba1e6cd0673365d (patch) | |
tree | 73bd68878518350322098ddf69572c3da6f1e360 | |
parent | 874c4fe389d1358f82c96dc9b5092fc5c7690604 (diff) |
[PATCH] x86: Temporarily revert parts of the Core 2 nmi nmi watchdog support
This makes merging easier. They are readded a few patches later.
Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r-- | arch/i386/kernel/nmi.c | 65 | ||||
-rw-r--r-- | arch/x86_64/kernel/nmi.c | 81 | ||||
-rw-r--r-- | include/asm-i386/intel_arch_perfmon.h | 19 | ||||
-rw-r--r-- | include/asm-x86_64/intel_arch_perfmon.h | 19 |
4 files changed, 6 insertions, 178 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c index acb351478e42..1282d70ff971 100644 --- a/arch/i386/kernel/nmi.c +++ b/arch/i386/kernel/nmi.c | |||
@@ -24,7 +24,6 @@ | |||
24 | 24 | ||
25 | #include <asm/smp.h> | 25 | #include <asm/smp.h> |
26 | #include <asm/nmi.h> | 26 | #include <asm/nmi.h> |
27 | #include <asm/intel_arch_perfmon.h> | ||
28 | 27 | ||
29 | #include "mach_traps.h" | 28 | #include "mach_traps.h" |
30 | 29 | ||
@@ -96,9 +95,6 @@ int nmi_active; | |||
96 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ | 95 | (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ |
97 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) | 96 | P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) |
98 | 97 | ||
99 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
100 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
101 | |||
102 | #ifdef CONFIG_SMP | 98 | #ifdef CONFIG_SMP |
103 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when | 99 | /* The performance counters used by NMI_LOCAL_APIC don't trigger when |
104 | * the CPU is idle. To make sure the NMI watchdog really ticks on all | 100 | * the CPU is idle. To make sure the NMI watchdog really ticks on all |
@@ -211,8 +207,6 @@ static int __init setup_nmi_watchdog(char *str) | |||
211 | 207 | ||
212 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 208 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
213 | 209 | ||
214 | static void disable_intel_arch_watchdog(void); | ||
215 | |||
216 | static void disable_lapic_nmi_watchdog(void) | 210 | static void disable_lapic_nmi_watchdog(void) |
217 | { | 211 | { |
218 | if (nmi_active <= 0) | 212 | if (nmi_active <= 0) |
@@ -222,10 +216,6 @@ static void disable_lapic_nmi_watchdog(void) | |||
222 | wrmsr(MSR_K7_EVNTSEL0, 0, 0); | 216 | wrmsr(MSR_K7_EVNTSEL0, 0, 0); |
223 | break; | 217 | break; |
224 | case X86_VENDOR_INTEL: | 218 | case X86_VENDOR_INTEL: |
225 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
226 | disable_intel_arch_watchdog(); | ||
227 | break; | ||
228 | } | ||
229 | switch (boot_cpu_data.x86) { | 219 | switch (boot_cpu_data.x86) { |
230 | case 6: | 220 | case 6: |
231 | if (boot_cpu_data.x86_model > 0xd) | 221 | if (boot_cpu_data.x86_model > 0xd) |
@@ -454,53 +444,6 @@ static int setup_p4_watchdog(void) | |||
454 | return 1; | 444 | return 1; |
455 | } | 445 | } |
456 | 446 | ||
457 | static void disable_intel_arch_watchdog(void) | ||
458 | { | ||
459 | unsigned ebx; | ||
460 | |||
461 | /* | ||
462 | * Check whether the Architectural PerfMon supports | ||
463 | * Unhalted Core Cycles Event or not. | ||
464 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
465 | */ | ||
466 | ebx = cpuid_ebx(10); | ||
467 | if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
468 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); | ||
469 | } | ||
470 | |||
471 | static int setup_intel_arch_watchdog(void) | ||
472 | { | ||
473 | unsigned int evntsel; | ||
474 | unsigned ebx; | ||
475 | |||
476 | /* | ||
477 | * Check whether the Architectural PerfMon supports | ||
478 | * Unhalted Core Cycles Event or not. | ||
479 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
480 | */ | ||
481 | ebx = cpuid_ebx(10); | ||
482 | if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
483 | return 0; | ||
484 | |||
485 | nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | ||
486 | |||
487 | clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); | ||
488 | clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); | ||
489 | |||
490 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
491 | | ARCH_PERFMON_EVENTSEL_OS | ||
492 | | ARCH_PERFMON_EVENTSEL_USR | ||
493 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
494 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
495 | |||
496 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
497 | write_watchdog_counter("INTEL_ARCH_PERFCTR0"); | ||
498 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
499 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
500 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
501 | return 1; | ||
502 | } | ||
503 | |||
504 | void setup_apic_nmi_watchdog (void) | 447 | void setup_apic_nmi_watchdog (void) |
505 | { | 448 | { |
506 | switch (boot_cpu_data.x86_vendor) { | 449 | switch (boot_cpu_data.x86_vendor) { |
@@ -510,11 +453,6 @@ void setup_apic_nmi_watchdog (void) | |||
510 | setup_k7_watchdog(); | 453 | setup_k7_watchdog(); |
511 | break; | 454 | break; |
512 | case X86_VENDOR_INTEL: | 455 | case X86_VENDOR_INTEL: |
513 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
514 | if (!setup_intel_arch_watchdog()) | ||
515 | return; | ||
516 | break; | ||
517 | } | ||
518 | switch (boot_cpu_data.x86) { | 456 | switch (boot_cpu_data.x86) { |
519 | case 6: | 457 | case 6: |
520 | if (boot_cpu_data.x86_model > 0xd) | 458 | if (boot_cpu_data.x86_model > 0xd) |
@@ -619,8 +557,7 @@ void nmi_watchdog_tick (struct pt_regs * regs) | |||
619 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 557 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); |
620 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 558 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
621 | } | 559 | } |
622 | else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 || | 560 | else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) { |
623 | nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | ||
624 | /* Only P6 based Pentium M need to re-unmask | 561 | /* Only P6 based Pentium M need to re-unmask |
625 | * the apic vector but it doesn't hurt | 562 | * the apic vector but it doesn't hurt |
626 | * other P6 variant */ | 563 | * other P6 variant */ |
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c index 5baa0c726e97..42c05d6907b9 100644 --- a/arch/x86_64/kernel/nmi.c +++ b/arch/x86_64/kernel/nmi.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <asm/proto.h> | 26 | #include <asm/proto.h> |
27 | #include <asm/kdebug.h> | 27 | #include <asm/kdebug.h> |
28 | #include <asm/mce.h> | 28 | #include <asm/mce.h> |
29 | #include <asm/intel_arch_perfmon.h> | ||
30 | 29 | ||
31 | /* | 30 | /* |
32 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: | 31 | * lapic_nmi_owner tracks the ownership of the lapic NMI hardware: |
@@ -66,9 +65,6 @@ static unsigned int nmi_p4_cccr_val; | |||
66 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 | 65 | #define K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING 0x76 |
67 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING | 66 | #define K7_NMI_EVENT K7_EVENT_CYCLES_PROCESSOR_IS_RUNNING |
68 | 67 | ||
69 | #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL | ||
70 | #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK | ||
71 | |||
72 | #define MSR_P4_MISC_ENABLE 0x1A0 | 68 | #define MSR_P4_MISC_ENABLE 0x1A0 |
73 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) | 69 | #define MSR_P4_MISC_ENABLE_PERF_AVAIL (1<<7) |
74 | #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) | 70 | #define MSR_P4_MISC_ENABLE_PEBS_UNAVAIL (1<<12) |
@@ -100,10 +96,7 @@ static __cpuinit inline int nmi_known_cpu(void) | |||
100 | case X86_VENDOR_AMD: | 96 | case X86_VENDOR_AMD: |
101 | return boot_cpu_data.x86 == 15; | 97 | return boot_cpu_data.x86 == 15; |
102 | case X86_VENDOR_INTEL: | 98 | case X86_VENDOR_INTEL: |
103 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) | 99 | return boot_cpu_data.x86 == 15; |
104 | return 1; | ||
105 | else | ||
106 | return (boot_cpu_data.x86 == 15); | ||
107 | } | 100 | } |
108 | return 0; | 101 | return 0; |
109 | } | 102 | } |
@@ -209,8 +202,6 @@ int __init setup_nmi_watchdog(char *str) | |||
209 | 202 | ||
210 | __setup("nmi_watchdog=", setup_nmi_watchdog); | 203 | __setup("nmi_watchdog=", setup_nmi_watchdog); |
211 | 204 | ||
212 | static void disable_intel_arch_watchdog(void); | ||
213 | |||
214 | static void disable_lapic_nmi_watchdog(void) | 205 | static void disable_lapic_nmi_watchdog(void) |
215 | { | 206 | { |
216 | if (nmi_active <= 0) | 207 | if (nmi_active <= 0) |
@@ -223,8 +214,6 @@ static void disable_lapic_nmi_watchdog(void) | |||
223 | if (boot_cpu_data.x86 == 15) { | 214 | if (boot_cpu_data.x86 == 15) { |
224 | wrmsr(MSR_P4_IQ_CCCR0, 0, 0); | 215 | wrmsr(MSR_P4_IQ_CCCR0, 0, 0); |
225 | wrmsr(MSR_P4_CRU_ESCR0, 0, 0); | 216 | wrmsr(MSR_P4_CRU_ESCR0, 0, 0); |
226 | } else if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | ||
227 | disable_intel_arch_watchdog(); | ||
228 | } | 217 | } |
229 | break; | 218 | break; |
230 | } | 219 | } |
@@ -377,53 +366,6 @@ static void setup_k7_watchdog(void) | |||
377 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); | 366 | wrmsr(MSR_K7_EVNTSEL0, evntsel, 0); |
378 | } | 367 | } |
379 | 368 | ||
380 | static void disable_intel_arch_watchdog(void) | ||
381 | { | ||
382 | unsigned ebx; | ||
383 | |||
384 | /* | ||
385 | * Check whether the Architectural PerfMon supports | ||
386 | * Unhalted Core Cycles Event or not. | ||
387 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
388 | */ | ||
389 | ebx = cpuid_ebx(10); | ||
390 | if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
391 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0); | ||
392 | } | ||
393 | |||
394 | static int setup_intel_arch_watchdog(void) | ||
395 | { | ||
396 | unsigned int evntsel; | ||
397 | unsigned ebx; | ||
398 | |||
399 | /* | ||
400 | * Check whether the Architectural PerfMon supports | ||
401 | * Unhalted Core Cycles Event or not. | ||
402 | * NOTE: Corresponding bit = 0 in ebp indicates event present. | ||
403 | */ | ||
404 | ebx = cpuid_ebx(10); | ||
405 | if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) | ||
406 | return 0; | ||
407 | |||
408 | nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; | ||
409 | |||
410 | clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2); | ||
411 | clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2); | ||
412 | |||
413 | evntsel = ARCH_PERFMON_EVENTSEL_INT | ||
414 | | ARCH_PERFMON_EVENTSEL_OS | ||
415 | | ARCH_PERFMON_EVENTSEL_USR | ||
416 | | ARCH_PERFMON_NMI_EVENT_SEL | ||
417 | | ARCH_PERFMON_NMI_EVENT_UMASK; | ||
418 | |||
419 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
420 | wrmsrl(MSR_ARCH_PERFMON_PERFCTR0, -((u64)cpu_khz * 1000 / nmi_hz)); | ||
421 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
422 | evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE; | ||
423 | wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0); | ||
424 | return 1; | ||
425 | } | ||
426 | |||
427 | 369 | ||
428 | static int setup_p4_watchdog(void) | 370 | static int setup_p4_watchdog(void) |
429 | { | 371 | { |
@@ -477,16 +419,10 @@ void setup_apic_nmi_watchdog(void) | |||
477 | setup_k7_watchdog(); | 419 | setup_k7_watchdog(); |
478 | break; | 420 | break; |
479 | case X86_VENDOR_INTEL: | 421 | case X86_VENDOR_INTEL: |
480 | if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) { | 422 | if (boot_cpu_data.x86 != 15) |
481 | if (!setup_intel_arch_watchdog()) | 423 | return; |
482 | return; | 424 | if (!setup_p4_watchdog()) |
483 | } else if (boot_cpu_data.x86 == 15) { | ||
484 | if (!setup_p4_watchdog()) | ||
485 | return; | ||
486 | } else { | ||
487 | return; | 425 | return; |
488 | } | ||
489 | |||
490 | break; | 426 | break; |
491 | 427 | ||
492 | default: | 428 | default: |
@@ -571,14 +507,7 @@ void __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason) | |||
571 | */ | 507 | */ |
572 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); | 508 | wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); |
573 | apic_write(APIC_LVTPC, APIC_DM_NMI); | 509 | apic_write(APIC_LVTPC, APIC_DM_NMI); |
574 | } else if (nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { | 510 | } |
575 | /* | ||
576 | * For Intel based architectural perfmon | ||
577 | * - LVTPC is masked on interrupt and must be | ||
578 | * unmasked by the LVTPC handler. | ||
579 | */ | ||
580 | apic_write(APIC_LVTPC, APIC_DM_NMI); | ||
581 | } | ||
582 | wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); | 511 | wrmsrl(nmi_perfctr_msr, -((u64)cpu_khz * 1000 / nmi_hz)); |
583 | } | 512 | } |
584 | } | 513 | } |
diff --git a/include/asm-i386/intel_arch_perfmon.h b/include/asm-i386/intel_arch_perfmon.h deleted file mode 100644 index 134ea9cc5283..000000000000 --- a/include/asm-i386/intel_arch_perfmon.h +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | #ifndef X86_INTEL_ARCH_PERFMON_H | ||
2 | #define X86_INTEL_ARCH_PERFMON_H 1 | ||
3 | |||
4 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | ||
5 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | ||
6 | |||
7 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | ||
8 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | ||
9 | |||
10 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | ||
11 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | ||
12 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | ||
13 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | ||
14 | |||
15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) | ||
16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | ||
17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) | ||
18 | |||
19 | #endif /* X86_INTEL_ARCH_PERFMON_H */ | ||
diff --git a/include/asm-x86_64/intel_arch_perfmon.h b/include/asm-x86_64/intel_arch_perfmon.h deleted file mode 100644 index 59c396431569..000000000000 --- a/include/asm-x86_64/intel_arch_perfmon.h +++ /dev/null | |||
@@ -1,19 +0,0 @@ | |||
1 | #ifndef X86_64_INTEL_ARCH_PERFMON_H | ||
2 | #define X86_64_INTEL_ARCH_PERFMON_H 1 | ||
3 | |||
4 | #define MSR_ARCH_PERFMON_PERFCTR0 0xc1 | ||
5 | #define MSR_ARCH_PERFMON_PERFCTR1 0xc2 | ||
6 | |||
7 | #define MSR_ARCH_PERFMON_EVENTSEL0 0x186 | ||
8 | #define MSR_ARCH_PERFMON_EVENTSEL1 0x187 | ||
9 | |||
10 | #define ARCH_PERFMON_EVENTSEL0_ENABLE (1 << 22) | ||
11 | #define ARCH_PERFMON_EVENTSEL_INT (1 << 20) | ||
12 | #define ARCH_PERFMON_EVENTSEL_OS (1 << 17) | ||
13 | #define ARCH_PERFMON_EVENTSEL_USR (1 << 16) | ||
14 | |||
15 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL (0x3c) | ||
16 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8) | ||
17 | #define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT (1 << 0) | ||
18 | |||
19 | #endif /* X86_64_INTEL_ARCH_PERFMON_H */ | ||