aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-15 09:19:28 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 09:26:56 -0400
commit60db5e09c13109b13830cc9dcae688003fd39e79 (patch)
treeac923b89c28d735d2460216202d960e9c6237be0 /arch/powerpc
parent789f90fcf6b0b54e655740e9396c954378542c79 (diff)
perf_counter: frequency based adaptive irq_period
Instead of specifying the irq_period for a counter, provide a target interrupt frequency and dynamically adapt the irq_period to match this frequency. [ Impact: new perf-counter attribute/feature ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <20090515132018.646195868@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/perf_counter.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bb1b463c1361..db8d5cafc159 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -534,7 +534,7 @@ void hw_perf_enable(void)
534 continue; 534 continue;
535 } 535 }
536 val = 0; 536 val = 0;
537 if (counter->hw_event.irq_period) { 537 if (counter->hw.irq_period) {
538 left = atomic64_read(&counter->hw.period_left); 538 left = atomic64_read(&counter->hw.period_left);
539 if (left < 0x80000000L) 539 if (left < 0x80000000L)
540 val = 0x80000000L - left; 540 val = 0x80000000L - left;
@@ -829,8 +829,6 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
829 829
830 if (!ppmu) 830 if (!ppmu)
831 return ERR_PTR(-ENXIO); 831 return ERR_PTR(-ENXIO);
832 if ((s64)counter->hw_event.irq_period < 0)
833 return ERR_PTR(-EINVAL);
834 if (!perf_event_raw(&counter->hw_event)) { 832 if (!perf_event_raw(&counter->hw_event)) {
835 ev = perf_event_id(&counter->hw_event); 833 ev = perf_event_id(&counter->hw_event);
836 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 834 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
@@ -901,7 +899,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
901 899
902 counter->hw.config = events[n]; 900 counter->hw.config = events[n];
903 counter->hw.counter_base = cflags[n]; 901 counter->hw.counter_base = cflags[n];
904 atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); 902 atomic64_set(&counter->hw.period_left, counter->hw.irq_period);
905 903
906 /* 904 /*
907 * See if we need to reserve the PMU. 905 * See if we need to reserve the PMU.
@@ -934,6 +932,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
934static void record_and_restart(struct perf_counter *counter, long val, 932static void record_and_restart(struct perf_counter *counter, long val,
935 struct pt_regs *regs, int nmi) 933 struct pt_regs *regs, int nmi)
936{ 934{
935 u64 period = counter->hw.irq_period;
937 s64 prev, delta, left; 936 s64 prev, delta, left;
938 int record = 0; 937 int record = 0;
939 938
@@ -948,11 +947,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
948 */ 947 */
949 val = 0; 948 val = 0;
950 left = atomic64_read(&counter->hw.period_left) - delta; 949 left = atomic64_read(&counter->hw.period_left) - delta;
951 if (counter->hw_event.irq_period) { 950 if (period) {
952 if (left <= 0) { 951 if (left <= 0) {
953 left += counter->hw_event.irq_period; 952 left += period;
954 if (left <= 0) 953 if (left <= 0)
955 left = counter->hw_event.irq_period; 954 left = period;
956 record = 1; 955 record = 1;
957 } 956 }
958 if (left < 0x80000000L) 957 if (left < 0x80000000L)