aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-13 03:45:19 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-15 03:46:54 -0400
commitec3232bdf8518bea8410f0027f870b24d3aa8753 (patch)
tree5aa20585dfec4053f92f5da5ae7488b13a8bebb4 /arch
parent1a853e36871b533ccc3f3c5bdd5cd0d867043a00 (diff)
perf_counter: x86: More accurate counter update
Take the counter width into account instead of assuming 32 bits. In particular Nehalem has 44 bit wide counters, and all arithmetics should happen on a 44-bit signed integer basis. [ Impact: fix rare event imprecision, warning message on Nehalem ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/perf_counter.c9
1 files changed, 6 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c
index f7772ff7936e..3a92a2b2a80f 100644
--- a/arch/x86/kernel/cpu/perf_counter.c
+++ b/arch/x86/kernel/cpu/perf_counter.c
@@ -138,7 +138,9 @@ static u64
138x86_perf_counter_update(struct perf_counter *counter, 138x86_perf_counter_update(struct perf_counter *counter,
139 struct hw_perf_counter *hwc, int idx) 139 struct hw_perf_counter *hwc, int idx)
140{ 140{
141 u64 prev_raw_count, new_raw_count, delta; 141 int shift = 64 - x86_pmu.counter_bits;
142 u64 prev_raw_count, new_raw_count;
143 s64 delta;
142 144
143 /* 145 /*
144 * Careful: an NMI might modify the previous counter value. 146 * Careful: an NMI might modify the previous counter value.
@@ -161,9 +163,10 @@ again:
161 * (counter-)time and add that to the generic counter. 163 * (counter-)time and add that to the generic counter.
162 * 164 *
163 * Careful, not all hw sign-extends above the physical width 165 * Careful, not all hw sign-extends above the physical width
164 * of the count, so we do that by clipping the delta to 32 bits: 166 * of the count.
165 */ 167 */
166 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count); 168 delta = (new_raw_count << shift) - (prev_raw_count << shift);
169 delta >>= shift;
167 170
168 atomic64_add(delta, &counter->count); 171 atomic64_add(delta, &counter->count);
169 atomic64_sub(delta, &hwc->period_left); 172 atomic64_sub(delta, &hwc->period_left);