aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-26 02:27:59 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-26 03:43:59 -0400
commit8a7b8cb91f26a671f22cedc7fd54508667f2d9b9 (patch)
tree6d3401bcea7b636a24e904310504f85c7a51ad6a /arch/powerpc/kernel/perf_counter.c
parent0127c3ea082ee9f1034789b978dfc7fd83254617 (diff)
perf_counter: powerpc: Implement interrupt throttling
This implements interrupt throttling on powerpc. Since we don't have individual count enable/disable or interrupt enable/disable controls per counter, this simply sets the hardware counter to 0, meaning that it will not interrupt again until it has counted 2^31 counts, which will take at least 2^30 cycles assuming a maximum of 2 counts per cycle. Also, we set counter->hw.period_left to the maximum possible value (2^63 - 1), so we won't report overflows for this counter for the forseeable future. The unthrottle operation restores counter->hw.period_left and the hardware counter so that we will once again report a counter overflow after counter->hw.irq_period counts. [ Impact: new perfcounters robustness feature on PowerPC ] Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <18971.35823.643362.446774@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc/kernel/perf_counter.c')
-rw-r--r--arch/powerpc/kernel/perf_counter.c48
1 files changed, 43 insertions, 5 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index fe21b2440f28..f96d55f55bd6 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -740,10 +740,37 @@ static void power_pmu_disable(struct perf_counter *counter)
740 local_irq_restore(flags); 740 local_irq_restore(flags);
741} 741}
742 742
743/*
744 * Re-enable interrupts on a counter after they were throttled
745 * because they were coming too fast.
746 */
747static void power_pmu_unthrottle(struct perf_counter *counter)
748{
749 s64 val, left;
750 unsigned long flags;
751
752 if (!counter->hw.idx || !counter->hw.irq_period)
753 return;
754 local_irq_save(flags);
755 perf_disable();
756 power_pmu_read(counter);
757 left = counter->hw.irq_period;
758 val = 0;
759 if (left < 0x80000000L)
760 val = 0x80000000L - left;
761 write_pmc(counter->hw.idx, val);
762 atomic64_set(&counter->hw.prev_count, val);
763 atomic64_set(&counter->hw.period_left, left);
764 perf_counter_update_userpage(counter);
765 perf_enable();
766 local_irq_restore(flags);
767}
768
743struct pmu power_pmu = { 769struct pmu power_pmu = {
744 .enable = power_pmu_enable, 770 .enable = power_pmu_enable,
745 .disable = power_pmu_disable, 771 .disable = power_pmu_disable,
746 .read = power_pmu_read, 772 .read = power_pmu_read,
773 .unthrottle = power_pmu_unthrottle,
747}; 774};
748 775
749/* 776/*
@@ -957,10 +984,6 @@ static void record_and_restart(struct perf_counter *counter, long val,
957 if (left < 0x80000000L) 984 if (left < 0x80000000L)
958 val = 0x80000000L - left; 985 val = 0x80000000L - left;
959 } 986 }
960 write_pmc(counter->hw.idx, val);
961 atomic64_set(&counter->hw.prev_count, val);
962 atomic64_set(&counter->hw.period_left, left);
963 perf_counter_update_userpage(counter);
964 987
965 /* 988 /*
966 * Finally record data if requested. 989 * Finally record data if requested.
@@ -983,8 +1006,23 @@ static void record_and_restart(struct perf_counter *counter, long val,
983 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync)) 1006 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
984 addr = mfspr(SPRN_SDAR); 1007 addr = mfspr(SPRN_SDAR);
985 } 1008 }
986 perf_counter_overflow(counter, nmi, regs, addr); 1009 if (perf_counter_overflow(counter, nmi, regs, addr)) {
1010 /*
1011 * Interrupts are coming too fast - throttle them
1012 * by setting the counter to 0, so it will be
1013 * at least 2^30 cycles until the next interrupt
1014 * (assuming each counter counts at most 2 counts
1015 * per cycle).
1016 */
1017 val = 0;
1018 left = ~0ULL >> 1;
1019 }
987 } 1020 }
1021
1022 write_pmc(counter->hw.idx, val);
1023 atomic64_set(&counter->hw.prev_count, val);
1024 atomic64_set(&counter->hw.period_left, left);
1025 perf_counter_update_userpage(counter);
988} 1026}
989 1027
990/* 1028/*