aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-10 15:34:59 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-10 20:39:02 -0400
commit9e350de37ac9607012fcf9c5314a28fbddf8f43c (patch)
treed0f311bcf49d887e6d488ea72b2913cb00eaf910 /arch
parentdf1a132bf3d3508f863336c80a27806a2ac947e0 (diff)
perf_counter: Accurate period data
We currently log hw.sample_period for PERF_SAMPLE_PERIOD, however this is incorrect. When we adjust the period, it will only take effect the next cycle but report it for the current cycle. So when we adjust the period for every cycle, we're always wrong. Solve this by keeping track of the last_period. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/perf_counter.c9
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c15
2 files changed, 18 insertions, 6 deletions
diff --git a/arch/powerpc/kernel/perf_counter.c b/arch/powerpc/kernel/perf_counter.c
index 5e0bf399c433..4990ce2e5f08 100644
--- a/arch/powerpc/kernel/perf_counter.c
+++ b/arch/powerpc/kernel/perf_counter.c
@@ -767,6 +767,7 @@ static void power_pmu_unthrottle(struct perf_counter *counter)
767 perf_disable(); 767 perf_disable();
768 power_pmu_read(counter); 768 power_pmu_read(counter);
769 left = counter->hw.sample_period; 769 left = counter->hw.sample_period;
770 counter->hw.last_period = left;
770 val = 0; 771 val = 0;
771 if (left < 0x80000000L) 772 if (left < 0x80000000L)
772 val = 0x80000000L - left; 773 val = 0x80000000L - left;
@@ -937,7 +938,8 @@ const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
937 938
938 counter->hw.config = events[n]; 939 counter->hw.config = events[n];
939 counter->hw.counter_base = cflags[n]; 940 counter->hw.counter_base = cflags[n];
940 atomic64_set(&counter->hw.period_left, counter->hw.sample_period); 941 counter->hw.last_period = counter->hw.sample_period;
942 atomic64_set(&counter->hw.period_left, counter->hw.last_period);
941 943
942 /* 944 /*
943 * See if we need to reserve the PMU. 945 * See if we need to reserve the PMU.
@@ -1002,8 +1004,9 @@ static void record_and_restart(struct perf_counter *counter, long val,
1002 */ 1004 */
1003 if (record) { 1005 if (record) {
1004 struct perf_sample_data data = { 1006 struct perf_sample_data data = {
1005 .regs = regs, 1007 .regs = regs,
1006 .addr = 0, 1008 .addr = 0,
1009 .period = counter->hw.last_period,
1007 }; 1010 };
1008 1011
1009 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) { 1012 if (counter->attr.sample_type & PERF_SAMPLE_ADDR) {
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 82a23d487f92..57ae1bec81be 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
698 698
699 if (!hwc->sample_period) { 699 if (!hwc->sample_period) {
700 hwc->sample_period = x86_pmu.max_period; 700 hwc->sample_period = x86_pmu.max_period;
701 hwc->last_period = hwc->sample_period;
701 atomic64_set(&hwc->period_left, hwc->sample_period); 702 atomic64_set(&hwc->period_left, hwc->sample_period);
702 } 703 }
703 704
@@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,
880 if (unlikely(left <= -period)) { 881 if (unlikely(left <= -period)) {
881 left = period; 882 left = period;
882 atomic64_set(&hwc->period_left, left); 883 atomic64_set(&hwc->period_left, left);
884 hwc->last_period = period;
883 ret = 1; 885 ret = 1;
884 } 886 }
885 887
886 if (unlikely(left <= 0)) { 888 if (unlikely(left <= 0)) {
887 left += period; 889 left += period;
888 atomic64_set(&hwc->period_left, left); 890 atomic64_set(&hwc->period_left, left);
891 hwc->last_period = period;
889 ret = 1; 892 ret = 1;
890 } 893 }
891 /* 894 /*
@@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1257 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1260 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
1258 continue; 1261 continue;
1259 1262
1260 /* counter overflow */ 1263 /*
1261 handled = 1; 1264 * counter overflow
1262 inc_irq_stat(apic_perf_irqs); 1265 */
1266 handled = 1;
1267 data.period = counter->hw.last_period;
1268
1263 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1269 if (!x86_perf_counter_set_period(counter, hwc, idx))
1264 continue; 1270 continue;
1265 1271
@@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1267 amd_pmu_disable_counter(hwc, idx); 1273 amd_pmu_disable_counter(hwc, idx);
1268 } 1274 }
1269 1275
1276 if (handled)
1277 inc_irq_stat(apic_perf_irqs);
1278
1270 return handled; 1279 return handled;
1271} 1280}
1272 1281