aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:19 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:11 -0400
commitd43698918bd46c71d494555fb92195fbea1fcb6c (patch)
tree5331574db6fa4bdefa3da6e06a266631ff358d17 /arch
parent7c90cc45f89af4dd4617f97d452740ad95b800d5 (diff)
perf_counter, x86: rework counter disable functions
As for the enable function, this patch reworks the disable functions and introduces x86_pmu_disable_counter(). The internal function i/f in struct x86_pmu changed too. [ Impact: refactor and generalize code ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-23-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c48
1 files changed, 23 insertions, 25 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index ae55933ce79c..df9012bbd211 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -45,7 +45,7 @@ struct x86_pmu {
45 u64 (*save_disable_all)(void); 45 u64 (*save_disable_all)(void);
46 void (*restore_all)(u64); 46 void (*restore_all)(u64);
47 void (*enable)(struct hw_perf_counter *, int); 47 void (*enable)(struct hw_perf_counter *, int);
48 void (*disable)(int, u64); 48 void (*disable)(struct hw_perf_counter *, int);
49 unsigned eventsel; 49 unsigned eventsel;
50 unsigned perfctr; 50 unsigned perfctr;
51 u64 (*event_map)(int); 51 u64 (*event_map)(int);
@@ -425,28 +425,19 @@ static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
425 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE); 425 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
426} 426}
427 427
428static void intel_pmu_disable_counter(int idx, u64 config) 428static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
429{ 429{
430 wrmsrl(MSR_ARCH_PERFMON_EVENTSEL0 + idx, config); 430 int err;
431}
432
433static void amd_pmu_disable_counter(int idx, u64 config)
434{
435 wrmsrl(MSR_K7_EVNTSEL0 + idx, config);
436
437}
438 431
439static void hw_perf_disable(int idx, u64 config)
440{
441 if (unlikely(!perf_counters_initialized)) 432 if (unlikely(!perf_counters_initialized))
442 return; 433 return;
443 434
444 x86_pmu.disable(idx, config); 435 err = checking_wrmsrl(hwc->config_base + idx,
436 hwc->config);
445} 437}
446 438
447static inline void 439static inline void
448__pmc_fixed_disable(struct perf_counter *counter, 440intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
449 struct hw_perf_counter *hwc, int __idx)
450{ 441{
451 int idx = __idx - X86_PMC_IDX_FIXED; 442 int idx = __idx - X86_PMC_IDX_FIXED;
452 u64 ctrl_val, mask; 443 u64 ctrl_val, mask;
@@ -460,13 +451,20 @@ __pmc_fixed_disable(struct perf_counter *counter,
460} 451}
461 452
462static inline void 453static inline void
463__x86_pmu_disable(struct perf_counter *counter, 454intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
464 struct hw_perf_counter *hwc, int idx)
465{ 455{
466 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) 456 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
467 __pmc_fixed_disable(counter, hwc, idx); 457 intel_pmu_disable_fixed(hwc, idx);
468 else 458 return;
469 hw_perf_disable(idx, hwc->config); 459 }
460
461 x86_pmu_disable_counter(hwc, idx);
462}
463
464static inline void
465amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
466{
467 x86_pmu_disable_counter(hwc, idx);
470} 468}
471 469
472static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]); 470static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
@@ -551,7 +549,7 @@ static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
551 if (cpuc->enabled) 549 if (cpuc->enabled)
552 x86_pmu_enable_counter(hwc, idx); 550 x86_pmu_enable_counter(hwc, idx);
553 else 551 else
554 amd_pmu_disable_counter(idx, hwc->config); 552 x86_pmu_disable_counter(hwc, idx);
555} 553}
556 554
557static int 555static int
@@ -622,7 +620,7 @@ try_generic:
622 620
623 perf_counters_lapic_init(hwc->nmi); 621 perf_counters_lapic_init(hwc->nmi);
624 622
625 __x86_pmu_disable(counter, hwc, idx); 623 x86_pmu.disable(hwc, idx);
626 624
627 cpuc->counters[idx] = counter; 625 cpuc->counters[idx] = counter;
628 set_bit(idx, cpuc->active); 626 set_bit(idx, cpuc->active);
@@ -694,7 +692,7 @@ static void x86_pmu_disable(struct perf_counter *counter)
694 * could reenable again: 692 * could reenable again:
695 */ 693 */
696 clear_bit(idx, cpuc->active); 694 clear_bit(idx, cpuc->active);
697 __x86_pmu_disable(counter, hwc, idx); 695 x86_pmu.disable(hwc, idx);
698 696
699 /* 697 /*
700 * Make sure the cleared pointer becomes visible before we 698 * Make sure the cleared pointer becomes visible before we
@@ -762,7 +760,7 @@ again:
762 760
763 intel_pmu_save_and_restart(counter); 761 intel_pmu_save_and_restart(counter);
764 if (perf_counter_overflow(counter, nmi, regs, 0)) 762 if (perf_counter_overflow(counter, nmi, regs, 0))
765 __x86_pmu_disable(counter, &counter->hw, bit); 763 intel_pmu_disable_counter(&counter->hw, bit);
766 } 764 }
767 765
768 intel_pmu_ack_status(ack); 766 intel_pmu_ack_status(ack);