aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorRobert Richter <robert.richter@amd.com>2009-04-29 06:47:21 -0400
committerIngo Molnar <mingo@elte.hu>2009-04-29 08:51:12 -0400
commita29aa8a7ff93e4196d558036928597e68337dd8d (patch)
tree5bf6bb57dba4440c90d0218438940603c286690d /arch/x86/kernel/cpu/perf_counter.c
parent85cf9dba92152bb4edec118b2f4f0be1ae7fdcab (diff)
perf_counter, x86: implement the interrupt handler for AMD cpus
This patch implements the interrupt handler for AMD performance counters. In difference to the Intel pmu, there is no single status register and also there are no fixed counters. This makes the handler very different and it is useful to make the handler vendor specific. To check if a counter is overflowed the upper bit of the counter is checked. Only counters where the active bit is set are checked. With this patch throttling is enabled for AMD performance counters. This patch also reenables Linux performance counters on AMD cpus. [ Impact: re-enable perfcounters on AMD CPUs ] Signed-off-by: Robert Richter <robert.richter@amd.com> Cc: Paul Mackerras <paulus@samba.org> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1241002046-8832-25-git-send-email-robert.richter@amd.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 2d3681bbb522..f4d59d4cf3f1 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -240,10 +240,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
240 struct hw_perf_counter *hwc = &counter->hw; 240 struct hw_perf_counter *hwc = &counter->hw;
241 int err; 241 int err;
242 242
243 /* disable temporarily */
244 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
245 return -ENOSYS;
246
247 if (!x86_pmu_initialized()) 243 if (!x86_pmu_initialized())
248 return -ENODEV; 244 return -ENODEV;
249 245
@@ -773,7 +769,43 @@ out:
773 return ret; 769 return ret;
774} 770}
775 771
776static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; } 772static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
773{
774 int cpu = smp_processor_id();
775 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
776 u64 val;
777 int handled = 0;
778 struct perf_counter *counter;
779 struct hw_perf_counter *hwc;
780 int idx;
781
782 ++cpuc->interrupts;
783 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
784 if (!test_bit(idx, cpuc->active))
785 continue;
786 counter = cpuc->counters[idx];
787 hwc = &counter->hw;
788 x86_perf_counter_update(counter, hwc, idx);
789 val = atomic64_read(&hwc->prev_count);
790 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
791 continue;
792 /* counter overflow */
793 x86_perf_counter_set_period(counter, hwc, idx);
794 handled = 1;
795 inc_irq_stat(apic_perf_irqs);
796 if (perf_counter_overflow(counter, nmi, regs, 0))
797 amd_pmu_disable_counter(hwc, idx);
798 else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
799 /*
800 * do not reenable when throttled, but reload
801 * the register
802 */
803 amd_pmu_disable_counter(hwc, idx);
804 else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
805 amd_pmu_enable_counter(hwc, idx);
806 }
807 return handled;
808}
777 809
778void perf_counter_unthrottle(void) 810void perf_counter_unthrottle(void)
779{ 811{
@@ -782,9 +814,6 @@ void perf_counter_unthrottle(void)
782 if (!x86_pmu_initialized()) 814 if (!x86_pmu_initialized())
783 return; 815 return;
784 816
785 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
786 return;
787
788 cpuc = &__get_cpu_var(cpu_hw_counters); 817 cpuc = &__get_cpu_var(cpu_hw_counters);
789 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { 818 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
790 if (printk_ratelimit()) 819 if (printk_ratelimit())