aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-05-21 08:43:08 -0400
committerIngo Molnar <mingo@elte.hu>2010-06-09 05:12:37 -0400
commite78505958cf123048fb48cb56b79cebb8edd15fb (patch)
tree3688d124cdc906cbe9f6587c8671ba0a14c95262 /arch/powerpc
parenta6e6dea68c18f705957573ee5596097c7e82d0e5 (diff)
perf: Convert perf_event to local_t
Since now all modification to event->count (and ->prev_count and ->period_left) are local to a cpu, change then to local64_t so we avoid the LOCK'ed ops. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/perf_event.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c
index ac2a8c2554d9..af1d9a7c65d1 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/kernel/perf_event.c
@@ -410,15 +410,15 @@ static void power_pmu_read(struct perf_event *event)
410 * Therefore we treat them like NMIs. 410 * Therefore we treat them like NMIs.
411 */ 411 */
412 do { 412 do {
413 prev = atomic64_read(&event->hw.prev_count); 413 prev = local64_read(&event->hw.prev_count);
414 barrier(); 414 barrier();
415 val = read_pmc(event->hw.idx); 415 val = read_pmc(event->hw.idx);
416 } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 416 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
417 417
418 /* The counters are only 32 bits wide */ 418 /* The counters are only 32 bits wide */
419 delta = (val - prev) & 0xfffffffful; 419 delta = (val - prev) & 0xfffffffful;
420 atomic64_add(delta, &event->count); 420 local64_add(delta, &event->count);
421 atomic64_sub(delta, &event->hw.period_left); 421 local64_sub(delta, &event->hw.period_left);
422} 422}
423 423
424/* 424/*
@@ -444,10 +444,10 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
444 if (!event->hw.idx) 444 if (!event->hw.idx)
445 continue; 445 continue;
446 val = (event->hw.idx == 5) ? pmc5 : pmc6; 446 val = (event->hw.idx == 5) ? pmc5 : pmc6;
447 prev = atomic64_read(&event->hw.prev_count); 447 prev = local64_read(&event->hw.prev_count);
448 event->hw.idx = 0; 448 event->hw.idx = 0;
449 delta = (val - prev) & 0xfffffffful; 449 delta = (val - prev) & 0xfffffffful;
450 atomic64_add(delta, &event->count); 450 local64_add(delta, &event->count);
451 } 451 }
452} 452}
453 453
@@ -462,7 +462,7 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
462 event = cpuhw->limited_counter[i]; 462 event = cpuhw->limited_counter[i];
463 event->hw.idx = cpuhw->limited_hwidx[i]; 463 event->hw.idx = cpuhw->limited_hwidx[i];
464 val = (event->hw.idx == 5) ? pmc5 : pmc6; 464 val = (event->hw.idx == 5) ? pmc5 : pmc6;
465 atomic64_set(&event->hw.prev_count, val); 465 local64_set(&event->hw.prev_count, val);
466 perf_event_update_userpage(event); 466 perf_event_update_userpage(event);
467 } 467 }
468} 468}
@@ -666,11 +666,11 @@ void hw_perf_enable(void)
666 } 666 }
667 val = 0; 667 val = 0;
668 if (event->hw.sample_period) { 668 if (event->hw.sample_period) {
669 left = atomic64_read(&event->hw.period_left); 669 left = local64_read(&event->hw.period_left);
670 if (left < 0x80000000L) 670 if (left < 0x80000000L)
671 val = 0x80000000L - left; 671 val = 0x80000000L - left;
672 } 672 }
673 atomic64_set(&event->hw.prev_count, val); 673 local64_set(&event->hw.prev_count, val);
674 event->hw.idx = idx; 674 event->hw.idx = idx;
675 write_pmc(idx, val); 675 write_pmc(idx, val);
676 perf_event_update_userpage(event); 676 perf_event_update_userpage(event);
@@ -842,8 +842,8 @@ static void power_pmu_unthrottle(struct perf_event *event)
842 if (left < 0x80000000L) 842 if (left < 0x80000000L)
843 val = 0x80000000L - left; 843 val = 0x80000000L - left;
844 write_pmc(event->hw.idx, val); 844 write_pmc(event->hw.idx, val);
845 atomic64_set(&event->hw.prev_count, val); 845 local64_set(&event->hw.prev_count, val);
846 atomic64_set(&event->hw.period_left, left); 846 local64_set(&event->hw.period_left, left);
847 perf_event_update_userpage(event); 847 perf_event_update_userpage(event);
848 perf_enable(); 848 perf_enable();
849 local_irq_restore(flags); 849 local_irq_restore(flags);
@@ -1109,7 +1109,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
1109 event->hw.config = events[n]; 1109 event->hw.config = events[n];
1110 event->hw.event_base = cflags[n]; 1110 event->hw.event_base = cflags[n];
1111 event->hw.last_period = event->hw.sample_period; 1111 event->hw.last_period = event->hw.sample_period;
1112 atomic64_set(&event->hw.period_left, event->hw.last_period); 1112 local64_set(&event->hw.period_left, event->hw.last_period);
1113 1113
1114 /* 1114 /*
1115 * See if we need to reserve the PMU. 1115 * See if we need to reserve the PMU.
@@ -1147,16 +1147,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1147 int record = 0; 1147 int record = 0;
1148 1148
1149 /* we don't have to worry about interrupts here */ 1149 /* we don't have to worry about interrupts here */
1150 prev = atomic64_read(&event->hw.prev_count); 1150 prev = local64_read(&event->hw.prev_count);
1151 delta = (val - prev) & 0xfffffffful; 1151 delta = (val - prev) & 0xfffffffful;
1152 atomic64_add(delta, &event->count); 1152 local64_add(delta, &event->count);
1153 1153
1154 /* 1154 /*
1155 * See if the total period for this event has expired, 1155 * See if the total period for this event has expired,
1156 * and update for the next period. 1156 * and update for the next period.
1157 */ 1157 */
1158 val = 0; 1158 val = 0;
1159 left = atomic64_read(&event->hw.period_left) - delta; 1159 left = local64_read(&event->hw.period_left) - delta;
1160 if (period) { 1160 if (period) {
1161 if (left <= 0) { 1161 if (left <= 0) {
1162 left += period; 1162 left += period;
@@ -1194,8 +1194,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1194 } 1194 }
1195 1195
1196 write_pmc(event->hw.idx, val); 1196 write_pmc(event->hw.idx, val);
1197 atomic64_set(&event->hw.prev_count, val); 1197 local64_set(&event->hw.prev_count, val);
1198 atomic64_set(&event->hw.period_left, left); 1198 local64_set(&event->hw.period_left, left);
1199 perf_event_update_userpage(event); 1199 perf_event_update_userpage(event);
1200} 1200}
1201 1201