aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-10 15:34:59 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-10 20:39:02 -0400
commit9e350de37ac9607012fcf9c5314a28fbddf8f43c (patch)
treed0f311bcf49d887e6d488ea72b2913cb00eaf910 /arch/x86/kernel/cpu/perf_counter.c
parentdf1a132bf3d3508f863336c80a27806a2ac947e0 (diff)
perf_counter: Accurate period data
We currently log hw.sample_period for PERF_SAMPLE_PERIOD, however this is incorrect. When we adjust the period, it will only take effect the next cycle but report it for the current cycle. So when we adjust the period for every cycle, we're always wrong. Solve this by keeping track of the last_period. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c15
1 files changed, 12 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index 82a23d487f92..57ae1bec81be 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -698,6 +698,7 @@ static int __hw_perf_counter_init(struct perf_counter *counter)
698 698
699 if (!hwc->sample_period) { 699 if (!hwc->sample_period) {
700 hwc->sample_period = x86_pmu.max_period; 700 hwc->sample_period = x86_pmu.max_period;
701 hwc->last_period = hwc->sample_period;
701 atomic64_set(&hwc->period_left, hwc->sample_period); 702 atomic64_set(&hwc->period_left, hwc->sample_period);
702 } 703 }
703 704
@@ -880,12 +881,14 @@ x86_perf_counter_set_period(struct perf_counter *counter,
880 if (unlikely(left <= -period)) { 881 if (unlikely(left <= -period)) {
881 left = period; 882 left = period;
882 atomic64_set(&hwc->period_left, left); 883 atomic64_set(&hwc->period_left, left);
884 hwc->last_period = period;
883 ret = 1; 885 ret = 1;
884 } 886 }
885 887
886 if (unlikely(left <= 0)) { 888 if (unlikely(left <= 0)) {
887 left += period; 889 left += period;
888 atomic64_set(&hwc->period_left, left); 890 atomic64_set(&hwc->period_left, left);
891 hwc->last_period = period;
889 ret = 1; 892 ret = 1;
890 } 893 }
891 /* 894 /*
@@ -1257,9 +1260,12 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1257 if (val & (1ULL << (x86_pmu.counter_bits - 1))) 1260 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
1258 continue; 1261 continue;
1259 1262
1260 /* counter overflow */ 1263 /*
1261 handled = 1; 1264 * counter overflow
1262 inc_irq_stat(apic_perf_irqs); 1265 */
1266 handled = 1;
1267 data.period = counter->hw.last_period;
1268
1263 if (!x86_perf_counter_set_period(counter, hwc, idx)) 1269 if (!x86_perf_counter_set_period(counter, hwc, idx))
1264 continue; 1270 continue;
1265 1271
@@ -1267,6 +1273,9 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
1267 amd_pmu_disable_counter(hwc, idx); 1273 amd_pmu_disable_counter(hwc, idx);
1268 } 1274 }
1269 1275
1276 if (handled)
1277 inc_irq_stat(apic_perf_irqs);
1278
1270 return handled; 1279 return handled;
1271} 1280}
1272 1281