aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephane Eranian <eranian@hpl.hp.com>2007-05-02 13:27:05 -0400
committerAndi Kleen <andi@basil.nowhere.org>2007-05-02 13:27:05 -0400
commit405e494d91bac85cc992f55ad434b0f325e399a5 (patch)
tree2af37f5ad441730e7ebc0846d09a9402d21c3fc9
parentbf8696ed6dfa561198b4736deaf11ab68dcc4845 (diff)
[PATCH] x86-64: x86_64 make NMI use PERFCTR1 for architectural perfmon (take 2)
Hello, This patch against 2.6.20-git14 makes the NMI watchdog use PERFSEL1/PERFCTR1 instead of PERFSEL0/PERFCTR0 on processors supporting Intel architectural perfmon, such as Intel Core 2. Although all PMU events can work on both counters, the Precise Event-Based Sampling (PEBS) requires that the event be in PERFCTR0 to work correctly (see section 18.14.4.1 in the IA32 SDM Vol 3b). This versions has 3 chunks compared to previous where we had missed on check. Changelog: - make the x86-64 NMI watchdog use PERFSEL1/PERFCTR1 instead of PERFSEL0/PERFCTR0 on processors supporting the Intel architectural perfmon (e.g. Core 2 Duo). This allows PEBS to work when the NMI watchdog is active. signed-off-by: stephane eranian <eranian@hpl.hp.com> Signed-off-by: Andi Kleen <ak@suse.de>
-rw-r--r--arch/x86_64/kernel/nmi.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86_64/kernel/nmi.c b/arch/x86_64/kernel/nmi.c
index dfab9f167366..010d3d9bd56a 100644
--- a/arch/x86_64/kernel/nmi.c
+++ b/arch/x86_64/kernel/nmi.c
@@ -348,7 +348,7 @@ int __init check_nmi_watchdog (void)
348 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk); 348 struct nmi_watchdog_ctlblk *wd = &__get_cpu_var(nmi_watchdog_ctlblk);
349 349
350 nmi_hz = 1; 350 nmi_hz = 1;
351 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) 351 if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1)
352 nmi_hz = adjust_for_32bit_ctr(nmi_hz); 352 nmi_hz = adjust_for_32bit_ctr(nmi_hz);
353 } 353 }
354 354
@@ -713,8 +713,8 @@ static int setup_intel_arch_watchdog(void)
713 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT)) 713 (ebx & ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT))
714 goto fail; 714 goto fail;
715 715
716 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR0; 716 perfctr_msr = MSR_ARCH_PERFMON_PERFCTR1;
717 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL0; 717 evntsel_msr = MSR_ARCH_PERFMON_EVENTSEL1;
718 718
719 if (!__reserve_perfctr_nmi(-1, perfctr_msr)) 719 if (!__reserve_perfctr_nmi(-1, perfctr_msr))
720 goto fail; 720 goto fail;
@@ -958,7 +958,7 @@ int __kprobes nmi_watchdog_tick(struct pt_regs * regs, unsigned reason)
958 /* start the cycle over again */ 958 /* start the cycle over again */
959 wrmsrl(wd->perfctr_msr, 959 wrmsrl(wd->perfctr_msr,
960 -((u64)cpu_khz * 1000 / nmi_hz)); 960 -((u64)cpu_khz * 1000 / nmi_hz));
961 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR0) { 961 } else if (wd->perfctr_msr == MSR_ARCH_PERFMON_PERFCTR1) {
962 /* 962 /*
963 * ArchPerfom/Core Duo needs to re-unmask 963 * ArchPerfom/Core Duo needs to re-unmask
964 * the apic vector 964 * the apic vector