aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel
diff options
context:
space:
mode:
authorAndi Kleen <ak@suse.de>2006-09-26 04:52:26 -0400
committerAndi Kleen <andi@basil.nowhere.org>2006-09-26 04:52:26 -0400
commitb07f8915cda3fcd73b8b68075ba1e6cd0673365d (patch)
tree73bd68878518350322098ddf69572c3da6f1e360 /arch/i386/kernel
parent874c4fe389d1358f82c96dc9b5092fc5c7690604 (diff)
[PATCH] x86: Temporarily revert parts of the Core 2 nmi nmi watchdog support
This makes merging easier. They are readded a few patches later. Signed-off-by: Andi Kleen <ak@suse.de>
Diffstat (limited to 'arch/i386/kernel')
-rw-r--r--arch/i386/kernel/nmi.c65
1 files changed, 1 insertions, 64 deletions
diff --git a/arch/i386/kernel/nmi.c b/arch/i386/kernel/nmi.c
index acb351478e42..1282d70ff971 100644
--- a/arch/i386/kernel/nmi.c
+++ b/arch/i386/kernel/nmi.c
@@ -24,7 +24,6 @@
24 24
25#include <asm/smp.h> 25#include <asm/smp.h>
26#include <asm/nmi.h> 26#include <asm/nmi.h>
27#include <asm/intel_arch_perfmon.h>
28 27
29#include "mach_traps.h" 28#include "mach_traps.h"
30 29
@@ -96,9 +95,6 @@ int nmi_active;
96 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \ 95 (P4_CCCR_OVF_PMI0|P4_CCCR_THRESHOLD(15)|P4_CCCR_COMPLEMENT| \
97 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE) 96 P4_CCCR_COMPARE|P4_CCCR_REQUIRED|P4_CCCR_ESCR_SELECT(4)|P4_CCCR_ENABLE)
98 97
99#define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
100#define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
101
102#ifdef CONFIG_SMP 98#ifdef CONFIG_SMP
103/* The performance counters used by NMI_LOCAL_APIC don't trigger when 99/* The performance counters used by NMI_LOCAL_APIC don't trigger when
104 * the CPU is idle. To make sure the NMI watchdog really ticks on all 100 * the CPU is idle. To make sure the NMI watchdog really ticks on all
@@ -211,8 +207,6 @@ static int __init setup_nmi_watchdog(char *str)
211 207
212__setup("nmi_watchdog=", setup_nmi_watchdog); 208__setup("nmi_watchdog=", setup_nmi_watchdog);
213 209
214static void disable_intel_arch_watchdog(void);
215
216static void disable_lapic_nmi_watchdog(void) 210static void disable_lapic_nmi_watchdog(void)
217{ 211{
218 if (nmi_active <= 0) 212 if (nmi_active <= 0)
@@ -222,10 +216,6 @@ static void disable_lapic_nmi_watchdog(void)
222 wrmsr(MSR_K7_EVNTSEL0, 0, 0); 216 wrmsr(MSR_K7_EVNTSEL0, 0, 0);
223 break; 217 break;
224 case X86_VENDOR_INTEL: 218 case X86_VENDOR_INTEL:
225 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
226 disable_intel_arch_watchdog();
227 break;
228 }
229 switch (boot_cpu_data.x86) { 219 switch (boot_cpu_data.x86) {
230 case 6: 220 case 6:
231 if (boot_cpu_data.x86_model > 0xd) 221 if (boot_cpu_data.x86_model > 0xd)
@@ -454,53 +444,6 @@ static int setup_p4_watchdog(void)
454 return 1; 444 return 1;
455} 445}
456 446
457static void disable_intel_arch_watchdog(void)
458{
459 unsigned ebx;
460
461 /*
462 * Check whether the Architectural PerfMon supports
463 * Unhalted Core Cycles Event or not.
464 * NOTE: Corresponding bit = 0 in ebp indicates event present.
465 */
466 ebx = cpuid_ebx(10);
467 if (!(ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
468 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, 0, 0);
469}
470
471static int setup_intel_arch_watchdog(void)
472{
473 unsigned int evntsel;
474 unsigned ebx;
475
476 /*
477 * Check whether the Architectural PerfMon supports
478 * Unhalted Core Cycles Event or not.
479 * NOTE: Corresponding bit = 0 in ebp indicates event present.
480 */
481 ebx = cpuid_ebx(10);
482 if ((ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
483 return 0;
484
485 nmi_perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0;
486
487 clear_msr_range(MSR_ARCH_PERFMON_EVENTSEL0, 2);
488 clear_msr_range(MSR_ARCH_PERFMON_PERFCTR0, 2);
489
490 evntsel = ARCH_PERFMON_EVENTSEL_INT
491 | ARCH_PERFMON_EVENTSEL_OS
492 | ARCH_PERFMON_EVENTSEL_USR
493 | ARCH_PERFMON_NMI_EVENT_SEL
494 | ARCH_PERFMON_NMI_EVENT_UMASK;
495
496 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
497 write_watchdog_counter("INTEL_ARCH_PERFCTR0");
498 apic_write(APIC_LVTPC, APIC_DM_NMI);
499 evntsel |= ARCH_PERFMON_EVENTSEL0_ENABLE;
500 wrmsr(MSR_ARCH_PERFMON_EVENTSEL0, evntsel, 0);
501 return 1;
502}
503
504void setup_apic_nmi_watchdog (void) 447void setup_apic_nmi_watchdog (void)
505{ 448{
506 switch (boot_cpu_data.x86_vendor) { 449 switch (boot_cpu_data.x86_vendor) {
@@ -510,11 +453,6 @@ void setup_apic_nmi_watchdog (void)
510 setup_k7_watchdog(); 453 setup_k7_watchdog();
511 break; 454 break;
512 case X86_VENDOR_INTEL: 455 case X86_VENDOR_INTEL:
513 if (cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
514 if (!setup_intel_arch_watchdog())
515 return;
516 break;
517 }
518 switch (boot_cpu_data.x86) { 456 switch (boot_cpu_data.x86) {
519 case 6: 457 case 6:
520 if (boot_cpu_data.x86_model > 0xd) 458 if (boot_cpu_data.x86_model > 0xd)
@@ -619,8 +557,7 @@ void nmi_watchdog_tick (struct pt_regs * regs)
619 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0); 557 wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
620 apic_write(APIC_LVTPC, APIC_DM_NMI); 558 apic_write(APIC_LVTPC, APIC_DM_NMI);
621 } 559 }
622 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0 || 560 else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) {
623 nmi_perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) {
624 /* Only P6 based Pentium M need to re-unmask 561 /* Only P6 based Pentium M need to re-unmask
625 * the apic vector but it doesn't hurt 562 * the apic vector but it doesn't hurt
626 * other P6 variant */ 563 * other P6 variant */