aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-05-21 08:43:08 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-09 05:12:37 -0400
commite78505958cf123048fb48cb56b79cebb8edd15fb (patch)
tree3688d124cdc906cbe9f6587c8671ba0a14c95262 /arch/x86/kernel/cpu/perf_event.c
parenta6e6dea68c18f705957573ee5596097c7e82d0e5 (diff)
perf: Convert perf_event to local_t
Since now all modification to event->count (and ->prev_count and ->period_left) are local to a cpu, change then to local64_t so we avoid the LOCK'ed ops. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event.c18
1 files changed, 9 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 79e199843db6..2d0d29069275 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -296,10 +296,10 @@ x86_perf_event_update(struct perf_event *event)
296 * count to the generic event atomically: 296 * count to the generic event atomically:
297 */ 297 */
298again: 298again:
299 prev_raw_count = atomic64_read(&hwc->prev_count); 299 prev_raw_count = local64_read(&hwc->prev_count);
300 rdmsrl(hwc->event_base + idx, new_raw_count); 300 rdmsrl(hwc->event_base + idx, new_raw_count);
301 301
302 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, 302 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
303 new_raw_count) != prev_raw_count) 303 new_raw_count) != prev_raw_count)
304 goto again; 304 goto again;
305 305
@@ -314,8 +314,8 @@ again:
314 delta = (new_raw_count << shift) - (prev_raw_count << shift); 314 delta = (new_raw_count << shift) - (prev_raw_count << shift);
315 delta >>= shift; 315 delta >>= shift;
316 316
317 atomic64_add(delta, &event->count); 317 local64_add(delta, &event->count);
318 atomic64_sub(delta, &hwc->period_left); 318 local64_sub(delta, &hwc->period_left);
319 319
320 return new_raw_count; 320 return new_raw_count;
321} 321}
@@ -439,7 +439,7 @@ static int x86_setup_perfctr(struct perf_event *event)
439 if (!hwc->sample_period) { 439 if (!hwc->sample_period) {
440 hwc->sample_period = x86_pmu.max_period; 440 hwc->sample_period = x86_pmu.max_period;
441 hwc->last_period = hwc->sample_period; 441 hwc->last_period = hwc->sample_period;
442 atomic64_set(&hwc->period_left, hwc->sample_period); 442 local64_set(&hwc->period_left, hwc->sample_period);
443 } else { 443 } else {
444 /* 444 /*
445 * If we have a PMU initialized but no APIC 445 * If we have a PMU initialized but no APIC
@@ -886,7 +886,7 @@ static int
886x86_perf_event_set_period(struct perf_event *event) 886x86_perf_event_set_period(struct perf_event *event)
887{ 887{
888 struct hw_perf_event *hwc = &event->hw; 888 struct hw_perf_event *hwc = &event->hw;
889 s64 left = atomic64_read(&hwc->period_left); 889 s64 left = local64_read(&hwc->period_left);
890 s64 period = hwc->sample_period; 890 s64 period = hwc->sample_period;
891 int ret = 0, idx = hwc->idx; 891 int ret = 0, idx = hwc->idx;
892 892
@@ -898,14 +898,14 @@ x86_perf_event_set_period(struct perf_event *event)
898 */ 898 */
899 if (unlikely(left <= -period)) { 899 if (unlikely(left <= -period)) {
900 left = period; 900 left = period;
901 atomic64_set(&hwc->period_left, left); 901 local64_set(&hwc->period_left, left);
902 hwc->last_period = period; 902 hwc->last_period = period;
903 ret = 1; 903 ret = 1;
904 } 904 }
905 905
906 if (unlikely(left <= 0)) { 906 if (unlikely(left <= 0)) {
907 left += period; 907 left += period;
908 atomic64_set(&hwc->period_left, left); 908 local64_set(&hwc->period_left, left);
909 hwc->last_period = period; 909 hwc->last_period = period;
910 ret = 1; 910 ret = 1;
911 } 911 }
@@ -924,7 +924,7 @@ x86_perf_event_set_period(struct perf_event *event)
924 * The hw event starts counting from this event offset, 924 * The hw event starts counting from this event offset,
925 * mark it to be able to extra future deltas: 925 * mark it to be able to extra future deltas:
926 */ 926 */
927 atomic64_set(&hwc->prev_count, (u64)-left); 927 local64_set(&hwc->prev_count, (u64)-left);
928 928
929 wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask); 929 wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
930 930