diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-07-08 04:21:41 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-07-10 04:28:27 -0400 |
commit | 9c74fb50867e8fb5f3be3be06716492c0f79309e (patch) | |
tree | 52f5ca52ae0ac6c8430b79ba77f37f7e40a4ec96 /arch/x86 | |
parent | 11d1578f9454159c43499d1d8fe8a7d728c176a3 (diff) |
perf_counter: Fix up P6 PMU details
The P6 doesn't seem to support cache ref/hit/miss counts, so
we extend the generic hardware event codes to have 0 and -1
mean the same thing as for the generic cache events.
Furthermore, it turns out the 0 event does not count
(that is, its reported that on PPro it actually does count
something), therefore use a event configuration that's
specified not to count to disable the counters.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 28 |
1 files changed, 23 insertions, 5 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 1910f39ff19a..c7cc6eac14ec 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -84,6 +84,14 @@ static u64 p6_pmu_event_map(int event) | |||
84 | return p6_perfmon_event_map[event]; | 84 | return p6_perfmon_event_map[event]; |
85 | } | 85 | } |
86 | 86 | ||
87 | /* | ||
88 | * Counter setting that is specified not to count anything. | ||
89 | * We use this to effectively disable a counter. | ||
90 | * | ||
91 | * L2_RQSTS with 0 MESI unit mask. | ||
92 | */ | ||
93 | #define P6_NOP_COUNTER 0x0000002EULL | ||
94 | |||
87 | static u64 p6_pmu_raw_event(u64 event) | 95 | static u64 p6_pmu_raw_event(u64 event) |
88 | { | 96 | { |
89 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL | 97 | #define P6_EVNTSEL_EVENT_MASK 0x000000FFULL |
@@ -704,6 +712,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
704 | { | 712 | { |
705 | struct perf_counter_attr *attr = &counter->attr; | 713 | struct perf_counter_attr *attr = &counter->attr; |
706 | struct hw_perf_counter *hwc = &counter->hw; | 714 | struct hw_perf_counter *hwc = &counter->hw; |
715 | u64 config; | ||
707 | int err; | 716 | int err; |
708 | 717 | ||
709 | if (!x86_pmu_initialized()) | 718 | if (!x86_pmu_initialized()) |
@@ -756,10 +765,19 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
756 | 765 | ||
757 | if (attr->config >= x86_pmu.max_events) | 766 | if (attr->config >= x86_pmu.max_events) |
758 | return -EINVAL; | 767 | return -EINVAL; |
768 | |||
759 | /* | 769 | /* |
760 | * The generic map: | 770 | * The generic map: |
761 | */ | 771 | */ |
762 | hwc->config |= x86_pmu.event_map(attr->config); | 772 | config = x86_pmu.event_map(attr->config); |
773 | |||
774 | if (config == 0) | ||
775 | return -ENOENT; | ||
776 | |||
777 | if (config == -1LL) | ||
778 | return -EINVAL; | ||
779 | |||
780 | hwc->config |= config; | ||
763 | 781 | ||
764 | return 0; | 782 | return 0; |
765 | } | 783 | } |
@@ -767,7 +785,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter) | |||
767 | static void p6_pmu_disable_all(void) | 785 | static void p6_pmu_disable_all(void) |
768 | { | 786 | { |
769 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 787 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
770 | unsigned long val; | 788 | u64 val; |
771 | 789 | ||
772 | if (!cpuc->enabled) | 790 | if (!cpuc->enabled) |
773 | return; | 791 | return; |
@@ -917,10 +935,10 @@ static inline void | |||
917 | p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) | 935 | p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx) |
918 | { | 936 | { |
919 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | 937 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); |
920 | unsigned long val = ARCH_PERFMON_EVENTSEL0_ENABLE; | 938 | u64 val = P6_NOP_COUNTER; |
921 | 939 | ||
922 | if (!cpuc->enabled) | 940 | if (cpuc->enabled) |
923 | val = 0; | 941 | val |= ARCH_PERFMON_EVENTSEL0_ENABLE; |
924 | 942 | ||
925 | (void)checking_wrmsrl(hwc->config_base + idx, val); | 943 | (void)checking_wrmsrl(hwc->config_base + idx, val); |
926 | } | 944 | } |