diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-07-10 03:59:56 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-07-10 04:28:29 -0400 |
commit | 984b838ce69c063a91b87550598ab7f3439dd94a (patch) | |
tree | 70654d87c9964a16642ef6d5d9d3c420f788bfae /arch/x86 | |
parent | 9c74fb50867e8fb5f3be3be06716492c0f79309e (diff) |
perf_counter: Clean up global vs counter enable
Ingo noticed that both AMD and P6 call
x86_pmu_disable_counter() on *_pmu_enable_counter(). This is
because we rely on the side effect of that call to program
the event config but not touch the EN bit.
We change that for AMD by having enable_all() simply write
the full config in, and for P6 by explicitly coding it.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index c7cc6eac14ec..bed1c4c2f251 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -874,13 +874,13 @@ static void amd_pmu_enable_all(void) | |||
874 | barrier(); | 874 | barrier(); |
875 | 875 | ||
876 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { | 876 | for (idx = 0; idx < x86_pmu.num_counters; idx++) { |
877 | struct perf_counter *counter = cpuc->counters[idx]; | ||
877 | u64 val; | 878 | u64 val; |
878 | 879 | ||
879 | if (!test_bit(idx, cpuc->active_mask)) | 880 | if (!test_bit(idx, cpuc->active_mask)) |
880 | continue; | 881 | continue; |
881 | rdmsrl(MSR_K7_EVNTSEL0 + idx, val); | 882 | |
882 | if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) | 883 | val = counter->hw.config; |
883 | continue; | ||
884 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; | 884 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
885 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); | 885 | wrmsrl(MSR_K7_EVNTSEL0 + idx, val); |
886 | } | 886 | } |
@@ -1044,11 +1044,13 @@ intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx) | |||
1044 | static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | 1044 | static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) |
1045 | { | 1045 | { |
1046 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 1046 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
1047 | u64 val; | ||
1047 | 1048 | ||
1049 | val = hwc->config; | ||
1048 | if (cpuc->enabled) | 1050 | if (cpuc->enabled) |
1049 | x86_pmu_enable_counter(hwc, idx); | 1051 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
1050 | else | 1052 | |
1051 | x86_pmu_disable_counter(hwc, idx); | 1053 | (void)checking_wrmsrl(hwc->config_base + idx, val); |
1052 | } | 1054 | } |
1053 | 1055 | ||
1054 | 1056 | ||
@@ -1068,8 +1070,6 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx) | |||
1068 | 1070 | ||
1069 | if (cpuc->enabled) | 1071 | if (cpuc->enabled) |
1070 | x86_pmu_enable_counter(hwc, idx); | 1072 | x86_pmu_enable_counter(hwc, idx); |
1071 | else | ||
1072 | x86_pmu_disable_counter(hwc, idx); | ||
1073 | } | 1073 | } |
1074 | 1074 | ||
1075 | static int | 1075 | static int |