diff options
author | Robert Richter <robert.richter@amd.com> | 2009-04-29 06:47:22 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-04-29 08:51:13 -0400 |
commit | 4b7bfd0d276da3a006d37e85d3cf900d7a14ae2a (patch) | |
tree | ed4fe623afebdeb17e0a9ccc833f2154cd815991 /arch/x86/kernel/cpu/perf_counter.c | |
parent | a29aa8a7ff93e4196d558036928597e68337dd8d (diff) |
perf_counter, x86: return raw count with x86_perf_counter_update()
To check on AMD cpus if a counter overflows, the upper bit of the raw
counter value must be checked. This value is already internally
available in x86_perf_counter_update(). Now, the value is returned so
that it can be used directly to check for overflows.
[ Impact: micro-optimization ]
Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-26-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_counter.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_counter.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/arch/x86/kernel/cpu/perf_counter.c b/arch/x86/kernel/cpu/perf_counter.c index f4d59d4cf3f1..a8a53abd706d 100644 --- a/arch/x86/kernel/cpu/perf_counter.c +++ b/arch/x86/kernel/cpu/perf_counter.c | |||
@@ -132,7 +132,7 @@ static u64 amd_pmu_raw_event(u64 event) | |||
132 | * Can only be executed on the CPU where the counter is active. | 132 | * Can only be executed on the CPU where the counter is active. |
133 | * Returns the delta events processed. | 133 | * Returns the delta events processed. |
134 | */ | 134 | */ |
135 | static void | 135 | static u64 |
136 | x86_perf_counter_update(struct perf_counter *counter, | 136 | x86_perf_counter_update(struct perf_counter *counter, |
137 | struct hw_perf_counter *hwc, int idx) | 137 | struct hw_perf_counter *hwc, int idx) |
138 | { | 138 | { |
@@ -165,6 +165,8 @@ again: | |||
165 | 165 | ||
166 | atomic64_add(delta, &counter->count); | 166 | atomic64_add(delta, &counter->count); |
167 | atomic64_sub(delta, &hwc->period_left); | 167 | atomic64_sub(delta, &hwc->period_left); |
168 | |||
169 | return new_raw_count; | ||
168 | } | 170 | } |
169 | 171 | ||
170 | static atomic_t num_counters; | 172 | static atomic_t num_counters; |
@@ -785,8 +787,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi) | |||
785 | continue; | 787 | continue; |
786 | counter = cpuc->counters[idx]; | 788 | counter = cpuc->counters[idx]; |
787 | hwc = &counter->hw; | 789 | hwc = &counter->hw; |
788 | x86_perf_counter_update(counter, hwc, idx); | 790 | val = x86_perf_counter_update(counter, hwc, idx); |
789 | val = atomic64_read(&hwc->prev_count); | ||
790 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) | 791 | if (val & (1ULL << (x86_pmu.counter_bits - 1))) |
791 | continue; | 792 | continue; |
792 | /* counter overflow */ | 793 | /* counter overflow */ |