aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c38
1 files changed, 26 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index c19e927b6979..7601c014f8f6 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -334,6 +334,8 @@ static u64 amd_pmu_save_disable_all(void)
334 * right thing. 334 * right thing.
335 */ 335 */
336 barrier(); 336 barrier();
337 if (!enabled)
338 goto out;
337 339
338 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 340 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
339 u64 val; 341 u64 val;
@@ -347,6 +349,7 @@ static u64 amd_pmu_save_disable_all(void)
347 wrmsrl(MSR_K7_EVNTSEL0 + idx, val); 349 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
348 } 350 }
349 351
352out:
350 return enabled; 353 return enabled;
351} 354}
352 355
@@ -787,32 +790,43 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
787 int handled = 0; 790 int handled = 0;
788 struct perf_counter *counter; 791 struct perf_counter *counter;
789 struct hw_perf_counter *hwc; 792 struct hw_perf_counter *hwc;
790 int idx; 793 int idx, throttle = 0;
794
795 cpuc->throttle_ctrl = cpuc->enabled;
796 cpuc->enabled = 0;
797 barrier();
798
799 if (cpuc->throttle_ctrl) {
800 if (++cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
801 throttle = 1;
802 }
791 803
792 ++cpuc->interrupts;
793 for (idx = 0; idx < x86_pmu.num_counters; idx++) { 804 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
805 int disable = 0;
806
794 if (!test_bit(idx, cpuc->active_mask)) 807 if (!test_bit(idx, cpuc->active_mask))
795 continue; 808 continue;
809
796 counter = cpuc->counters[idx]; 810 counter = cpuc->counters[idx];
797 hwc = &counter->hw; 811 hwc = &counter->hw;
798 val = x86_perf_counter_update(counter, hwc, idx); 812 val = x86_perf_counter_update(counter, hwc, idx);
799 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 813 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
800 continue; 814 goto next;
815
801 /* counter overflow */ 816 /* counter overflow */
802 x86_perf_counter_set_period(counter, hwc, idx); 817 x86_perf_counter_set_period(counter, hwc, idx);
803 handled = 1; 818 handled = 1;
804 inc_irq_stat(apic_perf_irqs); 819 inc_irq_stat(apic_perf_irqs);
805 if (perf_counter_overflow(counter, nmi, regs, 0)) 820 disable = perf_counter_overflow(counter, nmi, regs, 0);
806 amd_pmu_disable_counter(hwc, idx); 821
807 else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) 822next:
808 /* 823 if (disable || throttle)
809 * do not reenable when throttled, but reload
810 * the register
811 */
812 amd_pmu_disable_counter(hwc, idx); 824 amd_pmu_disable_counter(hwc, idx);
813 else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
814 amd_pmu_enable_counter(hwc, idx);
815 } 825 }
826
827 if (cpuc->throttle_ctrl && !throttle)
828 cpuc->enabled = 1;
829
816 return handled; 830 return handled;
817} 831}
818 832