diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-10 15:34:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-10 20:39:02 -0400 |
commit | 9e350de37ac9607012fcf9c5314a28fbddf8f43c (patch) | |
tree | d0f311bcf49d887e6d488ea72b2913cb00eaf910 /kernel/perf_counter.c | |
parent | df1a132bf3d3508f863336c80a27806a2ac947e0 (diff) |
perf_counter: Accurate period data
We currently log hw.sample_period for PERF_SAMPLE_PERIOD, however this is
incorrect. When we adjust the period, it will only take effect the next
cycle but report it for the current cycle. So when we adjust the period
for every cycle, we're always wrong.
Solve this by keeping track of the last_period.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 9 |
1 files changed, 6 insertions, 3 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4fe85e804f43..8b89b40bd0f0 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -2495,7 +2495,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi, | |||
2495 | perf_output_put(&handle, cpu_entry); | 2495 | perf_output_put(&handle, cpu_entry); |
2496 | 2496 | ||
2497 | if (sample_type & PERF_SAMPLE_PERIOD) | 2497 | if (sample_type & PERF_SAMPLE_PERIOD) |
2498 | perf_output_put(&handle, counter->hw.sample_period); | 2498 | perf_output_put(&handle, data->period); |
2499 | 2499 | ||
2500 | /* | 2500 | /* |
2501 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. | 2501 | * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. |
@@ -3040,11 +3040,13 @@ static void perf_swcounter_set_period(struct perf_counter *counter) | |||
3040 | if (unlikely(left <= -period)) { | 3040 | if (unlikely(left <= -period)) { |
3041 | left = period; | 3041 | left = period; |
3042 | atomic64_set(&hwc->period_left, left); | 3042 | atomic64_set(&hwc->period_left, left); |
3043 | hwc->last_period = period; | ||
3043 | } | 3044 | } |
3044 | 3045 | ||
3045 | if (unlikely(left <= 0)) { | 3046 | if (unlikely(left <= 0)) { |
3046 | left += period; | 3047 | left += period; |
3047 | atomic64_add(period, &hwc->period_left); | 3048 | atomic64_add(period, &hwc->period_left); |
3049 | hwc->last_period = period; | ||
3048 | } | 3050 | } |
3049 | 3051 | ||
3050 | atomic64_set(&hwc->prev_count, -left); | 3052 | atomic64_set(&hwc->prev_count, -left); |
@@ -3086,8 +3088,9 @@ static void perf_swcounter_overflow(struct perf_counter *counter, | |||
3086 | int nmi, struct pt_regs *regs, u64 addr) | 3088 | int nmi, struct pt_regs *regs, u64 addr) |
3087 | { | 3089 | { |
3088 | struct perf_sample_data data = { | 3090 | struct perf_sample_data data = { |
3089 | .regs = regs, | 3091 | .regs = regs, |
3090 | .addr = addr, | 3092 | .addr = addr, |
3093 | .period = counter->hw.last_period, | ||
3091 | }; | 3094 | }; |
3092 | 3095 | ||
3093 | perf_swcounter_update(counter); | 3096 | perf_swcounter_update(counter); |