aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-15 09:19:28 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 09:26:56 -0400
commit60db5e09c13109b13830cc9dcae688003fd39e79 (patch)
treeac923b89c28d735d2460216202d960e9c6237be0 /arch
parent789f90fcf6b0b54e655740e9396c954378542c79 (diff)
perf_counter: frequency based adaptive irq_period
Instead of specifying the irq_period for a counter, provide a target interrupt frequency and dynamically adapt the irq_period to match this frequency. [ Impact: new perf-counter attribute/feature ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <20090515132018.646195868@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c13
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c9
2 files changed, 9 insertions, 13 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index bb1b463c1361..db8d5cafc159 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -534,7 +534,7 @@ void hw_perf_enable(void)
534 continue; 534 continue;
535 } 535 }
536 val = 0; 536 val = 0;
537 if (counter->hw_event.irq_period) { 537 if (counter->hw.irq_period) {
538 left = atomic64_read(&counter->hw.period_left); 538 left = atomic64_read(&counter->hw.period_left);
539 if (left < 0x80000000L) 539 if (left < 0x80000000L)
540 val = 0x80000000L - left; 540 val = 0x80000000L - left;
@@ -829,8 +829,6 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
829 829
830 if (!ppmu) 830 if (!ppmu)
831 return ERR_PTR(-ENXIO); 831 return ERR_PTR(-ENXIO);
832 if ((s64)counter->hw_event.irq_period < 0)
833 return ERR_PTR(-EINVAL);
834 if (!perf_event_raw(&counter->hw_event)) { 832 if (!perf_event_raw(&counter->hw_event)) {
835 ev = perf_event_id(&counter->hw_event); 833 ev = perf_event_id(&counter->hw_event);
836 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) 834 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
@@ -901,7 +899,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
901 899
902 counter->hw.config = events[n]; 900 counter->hw.config = events[n];
903 counter->hw.counter_base = cflags[n]; 901 counter->hw.counter_base = cflags[n];
904 atomic64_set(&counter->hw.period_left, counter->hw_event.irq_period); 902 atomic64_set(&counter->hw.period_left, counter->hw.irq_period);
905 903
906 /* 904 /*
907 * See if we need to reserve the PMU. 905 * See if we need to reserve the PMU.
@@ -934,6 +932,7 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
934static void record_and_restart(struct perf_counter *counter, long val, 932static void record_and_restart(struct perf_counter *counter, long val,
935 struct pt_regs *regs, int nmi) 933 struct pt_regs *regs, int nmi)
936{ 934{
935 u64 period = counter->hw.irq_period;
937 s64 prev, delta, left; 936 s64 prev, delta, left;
938 int record = 0; 937 int record = 0;
939 938
@@ -948,11 +947,11 @@ static void record_and_restart(struct perf_counter *counter, long val,
948 */ 947 */
949 val = 0; 948 val = 0;
950 left = atomic64_read(&counter->hw.period_left) - delta; 949 left = atomic64_read(&counter->hw.period_left) - delta;
951 if (counter->hw_event.irq_period) { 950 if (period) {
952 if (left <= 0) { 951 if (left <= 0) {
953 left += counter->hw_event.irq_period; 952 left += period;
954 if (left <= 0) 953 if (left <= 0)
955 left = counter->hw_event.irq_period; 954 left = period;
956 record = 1; 955 record = 1;
957 } 956 }
958 if (left < 0x80000000L) 957 if (left < 0x80000000L)
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 5a7f718eb1e1..886dcf334bc3 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -286,11 +286,8 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
286 hwc->nmi = 1; 286 hwc->nmi = 1;
287 } 287 }
288 288
289 hwc->irq_period = hw_event->irq_period; 289 atomic64_set(&hwc->period_left,
290 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > x86_pmu.max_period) 290 min(x86_pmu.max_period, hwc->irq_period));
291 hwc->irq_period = x86_pmu.max_period;
292
293 atomic64_set(&hwc->period_left, hwc->irq_period);
294 291
295 /* 292 /*
296 * Raw event type provide the config in the event structure 293 * Raw event type provide the config in the event structure
@@ -458,7 +455,7 @@ x86_perf_counter_set_period(struct perf_counter *counter,
458 struct hw_perf_counter *hwc, int idx) 455 struct hw_perf_counter *hwc, int idx)
459{ 456{
460 s64 left = atomic64_read(&hwc->period_left); 457 s64 left = atomic64_read(&hwc->period_left);
461 s64 period = hwc->irq_period; 458 s64 period = min(x86_pmu.max_period, hwc->irq_period);
462 int err; 459 int err;
463 460
464 /* 461 /*