aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2011-02-02 11:40:57 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-16 07:30:50 -0500
commit41bf498949a263fa0b2d32524b89d696ac330e94 (patch)
treedfcd9497b87942185ef1db406c8b236a89bc1b4b /arch/x86/kernel/cpu/perf_event.c
parentd45dd923fcc620c948bd1eda16cc61426ac31646 (diff)
perf, x86: Calculate perfctr msr addresses in helper functions
This patch adds helper functions to calculate perfctr msr addresses. We need this to later add support for AMD family 15h cpus. For this we have to change the algorithms to generate the perfctr's msr addresses. Signed-off-by: Robert Richter <robert.richter@amd.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1296664860-10886-3-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c36
1 files changed, 23 insertions, 13 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 70d6d8fc2411..ee40c1ad0ebc 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -321,6 +321,16 @@ again:
321 return new_raw_count; 321 return new_raw_count;
322} 322}
323 323
324static inline unsigned int x86_pmu_config_addr(int index)
325{
326 return x86_pmu.eventsel + index;
327}
328
329static inline unsigned int x86_pmu_event_addr(int index)
330{
331 return x86_pmu.perfctr + index;
332}
333
324static atomic_t active_events; 334static atomic_t active_events;
325static DEFINE_MUTEX(pmc_reserve_mutex); 335static DEFINE_MUTEX(pmc_reserve_mutex);
326 336
@@ -331,12 +341,12 @@ static bool reserve_pmc_hardware(void)
331 int i; 341 int i;
332 342
333 for (i = 0; i < x86_pmu.num_counters; i++) { 343 for (i = 0; i < x86_pmu.num_counters; i++) {
334 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i)) 344 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
335 goto perfctr_fail; 345 goto perfctr_fail;
336 } 346 }
337 347
338 for (i = 0; i < x86_pmu.num_counters; i++) { 348 for (i = 0; i < x86_pmu.num_counters; i++) {
339 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i)) 349 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
340 goto eventsel_fail; 350 goto eventsel_fail;
341 } 351 }
342 352
@@ -344,13 +354,13 @@ static bool reserve_pmc_hardware(void)
344 354
345eventsel_fail: 355eventsel_fail:
346 for (i--; i >= 0; i--) 356 for (i--; i >= 0; i--)
347 release_evntsel_nmi(x86_pmu.eventsel + i); 357 release_evntsel_nmi(x86_pmu_config_addr(i));
348 358
349 i = x86_pmu.num_counters; 359 i = x86_pmu.num_counters;
350 360
351perfctr_fail: 361perfctr_fail:
352 for (i--; i >= 0; i--) 362 for (i--; i >= 0; i--)
353 release_perfctr_nmi(x86_pmu.perfctr + i); 363 release_perfctr_nmi(x86_pmu_event_addr(i));
354 364
355 return false; 365 return false;
356} 366}
@@ -360,8 +370,8 @@ static void release_pmc_hardware(void)
360 int i; 370 int i;
361 371
362 for (i = 0; i < x86_pmu.num_counters; i++) { 372 for (i = 0; i < x86_pmu.num_counters; i++) {
363 release_perfctr_nmi(x86_pmu.perfctr + i); 373 release_perfctr_nmi(x86_pmu_event_addr(i));
364 release_evntsel_nmi(x86_pmu.eventsel + i); 374 release_evntsel_nmi(x86_pmu_config_addr(i));
365 } 375 }
366} 376}
367 377
@@ -382,7 +392,7 @@ static bool check_hw_exists(void)
382 * complain and bail. 392 * complain and bail.
383 */ 393 */
384 for (i = 0; i < x86_pmu.num_counters; i++) { 394 for (i = 0; i < x86_pmu.num_counters; i++) {
385 reg = x86_pmu.eventsel + i; 395 reg = x86_pmu_config_addr(i);
386 ret = rdmsrl_safe(reg, &val); 396 ret = rdmsrl_safe(reg, &val);
387 if (ret) 397 if (ret)
388 goto msr_fail; 398 goto msr_fail;
@@ -407,8 +417,8 @@ static bool check_hw_exists(void)
407 * that don't trap on the MSR access and always return 0s. 417 * that don't trap on the MSR access and always return 0s.
408 */ 418 */
409 val = 0xabcdUL; 419 val = 0xabcdUL;
410 ret = checking_wrmsrl(x86_pmu.perfctr, val); 420 ret = checking_wrmsrl(x86_pmu_event_addr(0), val);
411 ret |= rdmsrl_safe(x86_pmu.perfctr, &val_new); 421 ret |= rdmsrl_safe(x86_pmu_event_addr(0), &val_new);
412 if (ret || val != val_new) 422 if (ret || val != val_new)
413 goto msr_fail; 423 goto msr_fail;
414 424
@@ -617,11 +627,11 @@ static void x86_pmu_disable_all(void)
617 627
618 if (!test_bit(idx, cpuc->active_mask)) 628 if (!test_bit(idx, cpuc->active_mask))
619 continue; 629 continue;
620 rdmsrl(x86_pmu.eventsel + idx, val); 630 rdmsrl(x86_pmu_config_addr(idx), val);
621 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE)) 631 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
622 continue; 632 continue;
623 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE; 633 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
624 wrmsrl(x86_pmu.eventsel + idx, val); 634 wrmsrl(x86_pmu_config_addr(idx), val);
625 } 635 }
626} 636}
627 637
@@ -1110,8 +1120,8 @@ void perf_event_print_debug(void)
1110 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask); 1120 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
1111 1121
1112 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 1122 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1113 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl); 1123 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1114 rdmsrl(x86_pmu.perfctr + idx, pmc_count); 1124 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
1115 1125
1116 prev_left = per_cpu(pmc_prev_left[idx], cpu); 1126 prev_left = per_cpu(pmc_prev_left[idx], cpu);
1117 1127