diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-11 08:06:28 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-11 11:54:15 -0400 |
commit | f4dbfa8f3131a84257223393905f7efad0ca5996 (patch) | |
tree | 67bb2666868c4449c2fa9ba6dc931721f60deb6c /arch/x86/kernel/cpu/perf_counter.c | |
parent | 1c432d899d32d36371ee4ee310fa3609cf0e5742 (diff) |
perf_counter: Standardize event names
Pure renames only, to PERF_COUNT_HW_* and PERF_COUNT_SW_*.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 32 |
1 files changed, 16 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index 57ae1bec81be..572fb434a666 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -69,13 +69,13 @@ static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { | |||
69 | */ | 69 | */ |
70 | static const u64 intel_perfmon_event_map[] = | 70 | static const u64 intel_perfmon_event_map[] = |
71 | { | 71 | { |
72 | [PERF_COUNT_CPU_CYCLES] = 0x003c, | 72 | [PERF_COUNT_HW_CPU_CYCLES] = 0x003c, |
73 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, | 73 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
74 | [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e, | 74 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e, |
75 | [PERF_COUNT_CACHE_MISSES] = 0x412e, | 75 | [PERF_COUNT_HW_CACHE_MISSES] = 0x412e, |
76 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, | 76 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, |
77 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, | 77 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, |
78 | [PERF_COUNT_BUS_CYCLES] = 0x013c, | 78 | [PERF_COUNT_HW_BUS_CYCLES] = 0x013c, |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static u64 intel_pmu_event_map(int event) | 81 | static u64 intel_pmu_event_map(int event) |
@@ -485,12 +485,12 @@ static const u64 amd_0f_hw_cache_event_ids | |||
485 | */ | 485 | */ |
486 | static const u64 amd_perfmon_event_map[] = | 486 | static const u64 amd_perfmon_event_map[] = |
487 | { | 487 | { |
488 | [PERF_COUNT_CPU_CYCLES] = 0x0076, | 488 | [PERF_COUNT_HW_CPU_CYCLES] = 0x0076, |
489 | [PERF_COUNT_INSTRUCTIONS] = 0x00c0, | 489 | [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0, |
490 | [PERF_COUNT_CACHE_REFERENCES] = 0x0080, | 490 | [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080, |
491 | [PERF_COUNT_CACHE_MISSES] = 0x0081, | 491 | [PERF_COUNT_HW_CACHE_MISSES] = 0x0081, |
492 | [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4, | 492 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4, |
493 | [PERF_COUNT_BRANCH_MISSES] = 0x00c5, | 493 | [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5, |
494 | }; | 494 | }; |
495 | 495 | ||
496 | static u64 amd_pmu_event_map(int event) | 496 | static u64 amd_pmu_event_map(int event) |
@@ -970,11 +970,11 @@ fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc) | |||
970 | 970 | ||
971 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; | 971 | event = hwc->config & ARCH_PERFMON_EVENT_MASK; |
972 | 972 | ||
973 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS))) | 973 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS))) |
974 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; | 974 | return X86_PMC_IDX_FIXED_INSTRUCTIONS; |
975 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES))) | 975 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES))) |
976 | return X86_PMC_IDX_FIXED_CPU_CYCLES; | 976 | return X86_PMC_IDX_FIXED_CPU_CYCLES; |
977 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES))) | 977 | if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES))) |
978 | return X86_PMC_IDX_FIXED_BUS_CYCLES; | 978 | return X86_PMC_IDX_FIXED_BUS_CYCLES; |
979 | 979 | ||
980 | return -1; | 980 | return -1; |