aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJacob Shin <jacob.shin@amd.com>2013-04-15 13:21:22 -0400
committerIngo Molnar <mingo@kernel.org>2013-04-21 11:21:59 -0400
commit0cf5f4323b1b51ecca3e952f95110e03ea611882 (patch)
treecb23a43164a6a553f92afacc0cb60aa81a76a522
parenta5ebe0ba3dff658c5286e8d5f20e4328f719d5a3 (diff)
perf/x86/amd: Remove old-style NB counter support from perf_event_amd.c
Support for NB counters, MSRs 0xc0010240 ~ 0xc0010247, got moved to perf_event_amd_uncore.c in the following commit: c43ca5091a37 perf/x86/amd: Add support for AMD NB and L2I "uncore" counters AMD Family 10h NB events (events 0xe0 ~ 0xff, on MSRs 0xc001000 ~ 0xc001007) will still continue to be handled by perf_event_amd.c Signed-off-by: Jacob Shin <jacob.shin@amd.com> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Stephane Eranian <eranian@google.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Jacob Shin <jacob.shin@amd.com> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net> Link: http://lkml.kernel.org/r/1366046483-1765-2-git-send-email-jacob.shin@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c138
1 files changed, 5 insertions, 133 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index dfdab42aed27..7e28d9467bb4 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -132,14 +132,11 @@ static u64 amd_pmu_event_map(int hw_event)
132 return amd_perfmon_event_map[hw_event]; 132 return amd_perfmon_event_map[hw_event];
133} 133}
134 134
135static struct event_constraint *amd_nb_event_constraint;
136
137/* 135/*
138 * Previously calculated offsets 136 * Previously calculated offsets
139 */ 137 */
140static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly; 138static unsigned int event_offsets[X86_PMC_IDX_MAX] __read_mostly;
141static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly; 139static unsigned int count_offsets[X86_PMC_IDX_MAX] __read_mostly;
142static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
143 140
144/* 141/*
145 * Legacy CPUs: 142 * Legacy CPUs:
@@ -147,14 +144,10 @@ static unsigned int rdpmc_indexes[X86_PMC_IDX_MAX] __read_mostly;
147 * 144 *
148 * CPUs with core performance counter extensions: 145 * CPUs with core performance counter extensions:
149 * 6 counters starting at 0xc0010200 each offset by 2 146 * 6 counters starting at 0xc0010200 each offset by 2
150 *
151 * CPUs with north bridge performance counter extensions:
152 * 4 additional counters starting at 0xc0010240 each offset by 2
153 * (indexed right above either one of the above core counters)
154 */ 147 */
155static inline int amd_pmu_addr_offset(int index, bool eventsel) 148static inline int amd_pmu_addr_offset(int index, bool eventsel)
156{ 149{
157 int offset, first, base; 150 int offset;
158 151
159 if (!index) 152 if (!index)
160 return index; 153 return index;
@@ -167,23 +160,7 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
167 if (offset) 160 if (offset)
168 return offset; 161 return offset;
169 162
170 if (amd_nb_event_constraint && 163 if (!cpu_has_perfctr_core)
171 test_bit(index, amd_nb_event_constraint->idxmsk)) {
172 /*
173 * calculate the offset of NB counters with respect to
174 * base eventsel or perfctr
175 */
176
177 first = find_first_bit(amd_nb_event_constraint->idxmsk,
178 X86_PMC_IDX_MAX);
179
180 if (eventsel)
181 base = MSR_F15H_NB_PERF_CTL - x86_pmu.eventsel;
182 else
183 base = MSR_F15H_NB_PERF_CTR - x86_pmu.perfctr;
184
185 offset = base + ((index - first) << 1);
186 } else if (!cpu_has_perfctr_core)
187 offset = index; 164 offset = index;
188 else 165 else
189 offset = index << 1; 166 offset = index << 1;
@@ -196,36 +173,6 @@ static inline int amd_pmu_addr_offset(int index, bool eventsel)
196 return offset; 173 return offset;
197} 174}
198 175
199static inline int amd_pmu_rdpmc_index(int index)
200{
201 int ret, first;
202
203 if (!index)
204 return index;
205
206 ret = rdpmc_indexes[index];
207
208 if (ret)
209 return ret;
210
211 if (amd_nb_event_constraint &&
212 test_bit(index, amd_nb_event_constraint->idxmsk)) {
213 /*
214 * according to the mnual, ECX value of the NB counters is
215 * the index of the NB counter (0, 1, 2 or 3) plus 6
216 */
217
218 first = find_first_bit(amd_nb_event_constraint->idxmsk,
219 X86_PMC_IDX_MAX);
220 ret = index - first + 6;
221 } else
222 ret = index;
223
224 rdpmc_indexes[index] = ret;
225
226 return ret;
227}
228
229static int amd_core_hw_config(struct perf_event *event) 176static int amd_core_hw_config(struct perf_event *event)
230{ 177{
231 if (event->attr.exclude_host && event->attr.exclude_guest) 178 if (event->attr.exclude_host && event->attr.exclude_guest)
@@ -245,34 +192,6 @@ static int amd_core_hw_config(struct perf_event *event)
245} 192}
246 193
247/* 194/*
248 * NB counters do not support the following event select bits:
249 * Host/Guest only
250 * Counter mask
251 * Invert counter mask
252 * Edge detect
253 * OS/User mode
254 */
255static int amd_nb_hw_config(struct perf_event *event)
256{
257 /* for NB, we only allow system wide counting mode */
258 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK)
259 return -EINVAL;
260
261 if (event->attr.exclude_user || event->attr.exclude_kernel ||
262 event->attr.exclude_host || event->attr.exclude_guest)
263 return -EINVAL;
264
265 event->hw.config &= ~(ARCH_PERFMON_EVENTSEL_USR |
266 ARCH_PERFMON_EVENTSEL_OS);
267
268 if (event->hw.config & ~(AMD64_RAW_EVENT_MASK_NB |
269 ARCH_PERFMON_EVENTSEL_INT))
270 return -EINVAL;
271
272 return 0;
273}
274
275/*
276 * AMD64 events are detected based on their event codes. 195 * AMD64 events are detected based on their event codes.
277 */ 196 */
278static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc) 197static inline unsigned int amd_get_event_code(struct hw_perf_event *hwc)
@@ -285,11 +204,6 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
285 return (hwc->config & 0xe0) == 0xe0; 204 return (hwc->config & 0xe0) == 0xe0;
286} 205}
287 206
288static inline int amd_is_perfctr_nb_event(struct hw_perf_event *hwc)
289{
290 return amd_nb_event_constraint && amd_is_nb_event(hwc);
291}
292
293static inline int amd_has_nb(struct cpu_hw_events *cpuc) 207static inline int amd_has_nb(struct cpu_hw_events *cpuc)
294{ 208{
295 struct amd_nb *nb = cpuc->amd_nb; 209 struct amd_nb *nb = cpuc->amd_nb;
@@ -315,9 +229,6 @@ static int amd_pmu_hw_config(struct perf_event *event)
315 if (event->attr.type == PERF_TYPE_RAW) 229 if (event->attr.type == PERF_TYPE_RAW)
316 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK; 230 event->hw.config |= event->attr.config & AMD64_RAW_EVENT_MASK;
317 231
318 if (amd_is_perfctr_nb_event(&event->hw))
319 return amd_nb_hw_config(event);
320
321 return amd_core_hw_config(event); 232 return amd_core_hw_config(event);
322} 233}
323 234
@@ -341,19 +252,6 @@ static void __amd_put_nb_event_constraints(struct cpu_hw_events *cpuc,
341 } 252 }
342} 253}
343 254
344static void amd_nb_interrupt_hw_config(struct hw_perf_event *hwc)
345{
346 int core_id = cpu_data(smp_processor_id()).cpu_core_id;
347
348 /* deliver interrupts only to this core */
349 if (hwc->config & ARCH_PERFMON_EVENTSEL_INT) {
350 hwc->config |= AMD64_EVENTSEL_INT_CORE_ENABLE;
351 hwc->config &= ~AMD64_EVENTSEL_INT_CORE_SEL_MASK;
352 hwc->config |= (u64)(core_id) <<
353 AMD64_EVENTSEL_INT_CORE_SEL_SHIFT;
354 }
355}
356
357 /* 255 /*
358 * AMD64 NorthBridge events need special treatment because 256 * AMD64 NorthBridge events need special treatment because
359 * counter access needs to be synchronized across all cores 257 * counter access needs to be synchronized across all cores
@@ -441,9 +339,6 @@ __amd_get_nb_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *ev
441 if (new == -1) 339 if (new == -1)
442 return &emptyconstraint; 340 return &emptyconstraint;
443 341
444 if (amd_is_perfctr_nb_event(hwc))
445 amd_nb_interrupt_hw_config(hwc);
446
447 return &nb->event_constraints[new]; 342 return &nb->event_constraints[new];
448} 343}
449 344
@@ -543,8 +438,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
543 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw))) 438 if (!(amd_has_nb(cpuc) && amd_is_nb_event(&event->hw)))
544 return &unconstrained; 439 return &unconstrained;
545 440
546 return __amd_get_nb_event_constraints(cpuc, event, 441 return __amd_get_nb_event_constraints(cpuc, event, NULL);
547 amd_nb_event_constraint);
548} 442}
549 443
550static void amd_put_event_constraints(struct cpu_hw_events *cpuc, 444static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
@@ -643,9 +537,6 @@ static struct event_constraint amd_f15_PMC30 = EVENT_CONSTRAINT_OVERLAP(0, 0x09,
643static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0); 537static struct event_constraint amd_f15_PMC50 = EVENT_CONSTRAINT(0, 0x3F, 0);
644static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0); 538static struct event_constraint amd_f15_PMC53 = EVENT_CONSTRAINT(0, 0x38, 0);
645 539
646static struct event_constraint amd_NBPMC96 = EVENT_CONSTRAINT(0, 0x3C0, 0);
647static struct event_constraint amd_NBPMC74 = EVENT_CONSTRAINT(0, 0xF0, 0);
648
649static struct event_constraint * 540static struct event_constraint *
650amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event) 541amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *event)
651{ 542{
@@ -711,8 +602,8 @@ amd_get_event_constraints_f15h(struct cpu_hw_events *cpuc, struct perf_event *ev
711 return &amd_f15_PMC20; 602 return &amd_f15_PMC20;
712 } 603 }
713 case AMD_EVENT_NB: 604 case AMD_EVENT_NB:
714 return __amd_get_nb_event_constraints(cpuc, event, 605 /* moved to perf_event_amd_uncore.c */
715 amd_nb_event_constraint); 606 return &emptyconstraint;
716 default: 607 default:
717 return &emptyconstraint; 608 return &emptyconstraint;
718 } 609 }
@@ -738,7 +629,6 @@ static __initconst const struct x86_pmu amd_pmu = {
738 .eventsel = MSR_K7_EVNTSEL0, 629 .eventsel = MSR_K7_EVNTSEL0,
739 .perfctr = MSR_K7_PERFCTR0, 630 .perfctr = MSR_K7_PERFCTR0,
740 .addr_offset = amd_pmu_addr_offset, 631 .addr_offset = amd_pmu_addr_offset,
741 .rdpmc_index = amd_pmu_rdpmc_index,
742 .event_map = amd_pmu_event_map, 632 .event_map = amd_pmu_event_map,
743 .max_events = ARRAY_SIZE(amd_perfmon_event_map), 633 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
744 .num_counters = AMD64_NUM_COUNTERS, 634 .num_counters = AMD64_NUM_COUNTERS,
@@ -790,23 +680,6 @@ static int setup_perfctr_core(void)
790 return 0; 680 return 0;
791} 681}
792 682
793static int setup_perfctr_nb(void)
794{
795 if (!cpu_has_perfctr_nb)
796 return -ENODEV;
797
798 x86_pmu.num_counters += AMD64_NUM_COUNTERS_NB;
799
800 if (cpu_has_perfctr_core)
801 amd_nb_event_constraint = &amd_NBPMC96;
802 else
803 amd_nb_event_constraint = &amd_NBPMC74;
804
805 printk(KERN_INFO "perf: AMD northbridge performance counters detected\n");
806
807 return 0;
808}
809
810__init int amd_pmu_init(void) 683__init int amd_pmu_init(void)
811{ 684{
812 /* Performance-monitoring supported from K7 and later: */ 685 /* Performance-monitoring supported from K7 and later: */
@@ -817,7 +690,6 @@ __init int amd_pmu_init(void)
817 690
818 setup_event_constraints(); 691 setup_event_constraints();
819 setup_perfctr_core(); 692 setup_perfctr_core();
820 setup_perfctr_nb();
821 693
822 /* Events are common for all AMDs */ 694 /* Events are common for all AMDs */
823 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids, 695 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,