aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-07-09 04:21:22 -0400
committerPaul Mackerras <paulus@samba.org>2010-08-02 20:24:03 -0400
commit09f86cd093b76b699656eaa82c37ca6d9a02b892 (patch)
treebff88b7d3a1573ffdf7da4c1fce06b892de05087 /arch/powerpc/kernel
parent3772b734720e1a3f2dc1d95cfdfaa5332f4ccf01 (diff)
perf, powerpc: Convert the FSL driver to use local64_t
For some reason the FSL driver got left out when we converted perf to use local64_t instead of atomic64_t. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel')
-rw-r--r--arch/powerpc/kernel/perf_event_fsl_emb.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c
index babcceecd2ea..fc6a89b7f08e 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/kernel/perf_event_fsl_emb.c
@@ -162,15 +162,15 @@ static void fsl_emb_pmu_read(struct perf_event *event)
162 * Therefore we treat them like NMIs. 162 * Therefore we treat them like NMIs.
163 */ 163 */
164 do { 164 do {
165 prev = atomic64_read(&event->hw.prev_count); 165 prev = local64_read(&event->hw.prev_count);
166 barrier(); 166 barrier();
167 val = read_pmc(event->hw.idx); 167 val = read_pmc(event->hw.idx);
168 } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 168 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
169 169
170 /* The counters are only 32 bits wide */ 170 /* The counters are only 32 bits wide */
171 delta = (val - prev) & 0xfffffffful; 171 delta = (val - prev) & 0xfffffffful;
172 atomic64_add(delta, &event->count); 172 local64_add(delta, &event->count);
173 atomic64_sub(delta, &event->hw.period_left); 173 local64_sub(delta, &event->hw.period_left);
174} 174}
175 175
176/* 176/*
@@ -296,11 +296,11 @@ static int fsl_emb_pmu_enable(struct perf_event *event)
296 296
297 val = 0; 297 val = 0;
298 if (event->hw.sample_period) { 298 if (event->hw.sample_period) {
299 s64 left = atomic64_read(&event->hw.period_left); 299 s64 left = local64_read(&event->hw.period_left);
300 if (left < 0x80000000L) 300 if (left < 0x80000000L)
301 val = 0x80000000L - left; 301 val = 0x80000000L - left;
302 } 302 }
303 atomic64_set(&event->hw.prev_count, val); 303 local64_set(&event->hw.prev_count, val);
304 write_pmc(i, val); 304 write_pmc(i, val);
305 perf_event_update_userpage(event); 305 perf_event_update_userpage(event);
306 306
@@ -371,8 +371,8 @@ static void fsl_emb_pmu_unthrottle(struct perf_event *event)
371 if (left < 0x80000000L) 371 if (left < 0x80000000L)
372 val = 0x80000000L - left; 372 val = 0x80000000L - left;
373 write_pmc(event->hw.idx, val); 373 write_pmc(event->hw.idx, val);
374 atomic64_set(&event->hw.prev_count, val); 374 local64_set(&event->hw.prev_count, val);
375 atomic64_set(&event->hw.period_left, left); 375 local64_set(&event->hw.period_left, left);
376 perf_event_update_userpage(event); 376 perf_event_update_userpage(event);
377 perf_enable(); 377 perf_enable();
378 local_irq_restore(flags); 378 local_irq_restore(flags);
@@ -500,7 +500,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event)
500 return ERR_PTR(-ENOTSUPP); 500 return ERR_PTR(-ENOTSUPP);
501 501
502 event->hw.last_period = event->hw.sample_period; 502 event->hw.last_period = event->hw.sample_period;
503 atomic64_set(&event->hw.period_left, event->hw.last_period); 503 local64_set(&event->hw.period_left, event->hw.last_period);
504 504
505 /* 505 /*
506 * See if we need to reserve the PMU. 506 * See if we need to reserve the PMU.
@@ -541,16 +541,16 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
541 int record = 0; 541 int record = 0;
542 542
543 /* we don't have to worry about interrupts here */ 543 /* we don't have to worry about interrupts here */
544 prev = atomic64_read(&event->hw.prev_count); 544 prev = local64_read(&event->hw.prev_count);
545 delta = (val - prev) & 0xfffffffful; 545 delta = (val - prev) & 0xfffffffful;
546 atomic64_add(delta, &event->count); 546 local64_add(delta, &event->count);
547 547
548 /* 548 /*
549 * See if the total period for this event has expired, 549 * See if the total period for this event has expired,
550 * and update for the next period. 550 * and update for the next period.
551 */ 551 */
552 val = 0; 552 val = 0;
553 left = atomic64_read(&event->hw.period_left) - delta; 553 left = local64_read(&event->hw.period_left) - delta;
554 if (period) { 554 if (period) {
555 if (left <= 0) { 555 if (left <= 0) {
556 left += period; 556 left += period;
@@ -584,8 +584,8 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
584 } 584 }
585 585
586 write_pmc(event->hw.idx, val); 586 write_pmc(event->hw.idx, val);
587 atomic64_set(&event->hw.prev_count, val); 587 local64_set(&event->hw.prev_count, val);
588 atomic64_set(&event->hw.period_left, left); 588 local64_set(&event->hw.period_left, left);
589 perf_event_update_userpage(event); 589 perf_event_update_userpage(event);
590} 590}
591 591