aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c45
1 files changed, 37 insertions, 8 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 2d3681bbb522..f4d59d4cf3f1 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -240,10 +240,6 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
240 struct hw_perf_counter *hwc = &counter->hw; 240 struct hw_perf_counter *hwc = &counter->hw;
241 int err; 241 int err;
242 242
243 /* disable temporarily */
244 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
245 return -ENOSYS;
246
247 if (!x86_pmu_initialized()) 243 if (!x86_pmu_initialized())
248 return -ENODEV; 244 return -ENODEV;
249 245
@@ -773,7 +769,43 @@ out:
773 return ret; 769 return ret;
774} 770}
775 771
776static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) { return 0; } 772static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
773{
774 int cpu = smp_processor_id();
775 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
776 u64 val;
777 int handled = 0;
778 struct perf_counter *counter;
779 struct hw_perf_counter *hwc;
780 int idx;
781
782 ++cpuc->interrupts;
783 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
784 if (!test_bit(idx, cpuc->active))
785 continue;
786 counter = cpuc->counters[idx];
787 hwc = &counter->hw;
788 x86_perf_counter_update(counter, hwc, idx);
789 val = atomic64_read(&hwc->prev_count);
790 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
791 continue;
792 /* counter overflow */
793 x86_perf_counter_set_period(counter, hwc, idx);
794 handled = 1;
795 inc_irq_stat(apic_perf_irqs);
796 if (perf_counter_overflow(counter, nmi, regs, 0))
797 amd_pmu_disable_counter(hwc, idx);
798 else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
799 /*
800 * do not reenable when throttled, but reload
801 * the register
802 */
803 amd_pmu_disable_counter(hwc, idx);
804 else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
805 amd_pmu_enable_counter(hwc, idx);
806 }
807 return handled;
808}
777 809
778void perf_counter_unthrottle(void) 810void perf_counter_unthrottle(void)
779{ 811{
@@ -782,9 +814,6 @@ void perf_counter_unthrottle(void)
782 if (!x86_pmu_initialized()) 814 if (!x86_pmu_initialized())
783 return; 815 return;
784 816
785 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
786 return;
787
788 cpuc = &__get_cpu_var(cpu_hw_counters); 817 cpuc = &__get_cpu_var(cpu_hw_counters);
789 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) { 818 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
790 if (printk_ratelimit()) 819 if (printk_ratelimit())