aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorJacob Shin <jacob.shin@amd.com>2013-02-06 12:26:27 -0500
committerIngo Molnar <mingo@kernel.org>2013-02-06 13:45:24 -0500
commit4c1fd17a1cb32bc4f429c7a5ff9a91a3bffdb8fa (patch)
treed6de83c171dba118665d840c0d8ead5f482c3ea7 /arch
parent9f19010af8c651879ac2c36f1a808a3a4419cd40 (diff)
perf/x86: Move MSR address offset calculation to architecture specific files
Move counter index to MSR address offset calculation to architecture specific files. This prepares the way for perf_event_amd to enable counter addresses that are not contiguous -- for example AMD Family 15h processors have 6 core performance counters starting at 0xc0010200 and 4 northbridge performance counters starting at 0xc0010240. Signed-off-by: Jacob Shin <jacob.shin@amd.com> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Cc: Stephane Eranian <eranian@google.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/1360171589-6381-5-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_event.h21
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c42
2 files changed, 47 insertions, 16 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 115c1ea97746..a7f06a90d2e7 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -325,6 +325,7 @@ struct x86_pmu {
325 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign); 325 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
326 unsigned eventsel; 326 unsigned eventsel;
327 unsigned perfctr; 327 unsigned perfctr;
328 int (*addr_offset)(int index, bool eventsel);
328 u64 (*event_map)(int); 329 u64 (*event_map)(int);
329 int max_events; 330 int max_events;
330 int num_counters; 331 int num_counters;
@@ -446,28 +447,16 @@ extern u64 __read_mostly hw_cache_extra_regs
446 447
447u64 x86_perf_event_update(struct perf_event *event); 448u64 x86_perf_event_update(struct perf_event *event);
448 449
449static inline int x86_pmu_addr_offset(int index)
450{
451 int offset;
452
453 /* offset = X86_FEATURE_PERFCTR_CORE ? index << 1 : index */
454 alternative_io(ASM_NOP2,
455 "shll $1, %%eax",
456 X86_FEATURE_PERFCTR_CORE,
457 "=a" (offset),
458 "a" (index));
459
460 return offset;
461}
462
463static inline unsigned int x86_pmu_config_addr(int index) 450static inline unsigned int x86_pmu_config_addr(int index)
464{ 451{
465 return x86_pmu.eventsel + x86_pmu_addr_offset(index); 452 return x86_pmu.eventsel + (x86_pmu.addr_offset ?
453 x86_pmu.addr_offset(index, true) : index);
466} 454}
467 455
468static inline unsigned int x86_pmu_event_addr(int index) 456static inline unsigned int x86_pmu_event_addr(int index)
469{ 457{
470 return x86_pmu.perfctr + x86_pmu_addr_offset(index); 458 return x86_pmu.perfctr + (x86_pmu.addr_offset ?
459 x86_pmu.addr_offset(index, false) : index);
471} 460}
472 461
473int x86_setup_perfctr(struct perf_event *event); 462int x86_setup_perfctr(struct perf_event *event);
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index aea8c2021f78..b60f31caeda0 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,6 +132,47 @@ static u64 amd_pmu_event_map(int hw_event)
132 return amd_perfmon_event_map[hw_event]; 132 return amd_perfmon_event_map[hw_event];
133} 133}
134 134
135/*
136 * Previously calculated offsets
137 */
138static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
139static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
140
141/*
142 * Legacy CPUs:
143 * 4 counters starting at 0xc0010000 each offset by 1
144 *
145 * CPUs with core performance counter extensions:
146 * 6 counters starting at 0xc0010200 each offset by 2
147 */
148static inline int amd_pmu_addr_offset(int index, bool eventsel)
149{
150 int offset;
151
152 if (!index)
153 return index;
154
155 if (eventsel)
156 offset = event_offsets[index];
157 else
158 offset = count_offsets[index];
159
160 if (offset)
161 return offset;
162
163 if (!cpu_has_perfctr_core)
164 offset = index;
165 else
166 offset = index << 1;
167
168 if (eventsel)
169 event_offsets[index] = offset;
170 else
171 count_offsets[index] = offset;
172
173 return offset;
174}
175
135static int amd_pmu_hw_config(struct perf_event *event) 176static int amd_pmu_hw_config(struct perf_event *event)
136{ 177{
137 int ret; 178 int ret;
@@ -578,6 +619,7 @@ static __initconst const struct x86_pmu amd_pmu = {
578 .schedule_events = x86_schedule_events, 619 .schedule_events = x86_schedule_events,
579 .eventsel = MSR_K7_EVNTSEL0, 620 .eventsel = MSR_K7_EVNTSEL0,
580 .perfctr = MSR_K7_PERFCTR0, 621 .perfctr = MSR_K7_PERFCTR0,
622 .addr_offset = amd_pmu_addr_offset,
581 .event_map = amd_pmu_event_map, 623 .event_map = amd_pmu_event_map,
582 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 624 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
583 .num_counters = AMD64_NUM_COUNTERS, 625 .num_counters = AMD64_NUM_COUNTERS,