diff options
-rw-r--r-- | arch/powerpc/kernel/perf_counter.c | 48 |
1 files changed, 43 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c index fe21b2440f28..f96d55f55bd6 100644 --- a/arch/powerpc/kernel/perf_counter.c +++ b/arch/powerpc/kernel/perf_counter.c | |||
@@ -740,10 +740,37 @@ static void power_pmu_disable(struct perf_counter *counter) | |||
740 | local_irq_restore(flags); | 740 | local_irq_restore(flags); |
741 | } | 741 | } |
742 | 742 | ||
743 | /* | ||
744 | * Re-enable interrupts on a counter after they were throttled | ||
745 | * because they were coming too fast. | ||
746 | */ | ||
747 | static void power_pmu_unthrottle(struct perf_counter *counter) | ||
748 | { | ||
749 | s64 val, left; | ||
750 | unsigned long flags; | ||
751 | |||
752 | if (!counter->hw.idx || !counter->hw.irq_period) | ||
753 | return; | ||
754 | local_irq_save(flags); | ||
755 | perf_disable(); | ||
756 | power_pmu_read(counter); | ||
757 | left = counter->hw.irq_period; | ||
758 | val = 0; | ||
759 | if (left < 0x80000000L) | ||
760 | val = 0x80000000L - left; | ||
761 | write_pmc(counter->hw.idx, val); | ||
762 | atomic64_set(&counter->hw.prev_count, val); | ||
763 | atomic64_set(&counter->hw.period_left, left); | ||
764 | perf_counter_update_userpage(counter); | ||
765 | perf_enable(); | ||
766 | local_irq_restore(flags); | ||
767 | } | ||
768 | |||
743 | struct pmu power_pmu = { | 769 | struct pmu power_pmu = { |
744 | .enable = power_pmu_enable, | 770 | .enable = power_pmu_enable, |
745 | .disable = power_pmu_disable, | 771 | .disable = power_pmu_disable, |
746 | .read = power_pmu_read, | 772 | .read = power_pmu_read, |
773 | .unthrottle = power_pmu_unthrottle, | ||
747 | }; | 774 | }; |
748 | 775 | ||
749 | /* | 776 | /* |
@@ -957,10 +984,6 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
957 | if (left < 0x80000000L) | 984 | if (left < 0x80000000L) |
958 | val = 0x80000000L - left; | 985 | val = 0x80000000L - left; |
959 | } | 986 | } |
960 | write_pmc(counter->hw.idx, val); | ||
961 | atomic64_set(&counter->hw.prev_count, val); | ||
962 | atomic64_set(&counter->hw.period_left, left); | ||
963 | perf_counter_update_userpage(counter); | ||
964 | 987 | ||
965 | /* | 988 | /* |
966 | * Finally record data if requested. | 989 | * Finally record data if requested. |
@@ -983,8 +1006,23 @@ static void record_and_restart(struct perf_counter *counter, long val, | |||
983 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) | 1006 | if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) |
984 | addr = mfspr(SPRN_SDAR); | 1007 | addr = mfspr(SPRN_SDAR); |
985 | } | 1008 | } |
986 | perf_counter_overflow(counter, nmi, regs, addr); | 1009 | if (perf_counter_overflow(counter, nmi, regs, addr)) { |
1010 | /* | ||
1011 | * Interrupts are coming too fast - throttle them | ||
1012 | * by setting the counter to 0, so it will be | ||
1013 | * at least 2^30 cycles until the next interrupt | ||
1014 | * (assuming each counter counts at most 2 counts | ||
1015 | * per cycle). | ||
1016 | */ | ||
1017 | val = 0; | ||
1018 | left = ~0ULL >> 1; | ||
1019 | } | ||
987 | } | 1020 | } |
1021 | |||
1022 | write_pmc(counter->hw.idx, val); | ||
1023 | atomic64_set(&counter->hw.prev_count, val); | ||
1024 | atomic64_set(&counter->hw.period_left, left); | ||
1025 | perf_counter_update_userpage(counter); | ||
988 | } | 1026 | } |
989 | 1027 | ||
990 | /* | 1028 | /* |