aboutsummaryrefslogtreecommitdiffstats
path: root/arch/metag
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2013-02-27 11:16:38 -0500
committerJames Hogan <james.hogan@imgtec.com>2013-03-15 09:20:03 -0400
commitc6ac1e6edacc7e1fb0405d61f95a797c6a712411 (patch)
tree8794cd7bbb6873aaf5cafdaa75af0daf35409dfa /arch/metag
parentdb59932f62386cdfd8510c27a83118c5e915e9ea (diff)
metag: perf: add missing prev_count updates
The prev_count needs setting when changing the counter value, otherwise the calculated delta will be wrong, which for frequency sampling (dynamic period sampling) results in sampling at too high a frequency. For non-interrupting performance counters it should also be cleared when enabling the counter since the write to the PERF_COUNT register will clear the perf counter. This also includes a minor change to remove the u64 cast from the metag_pmu->write() call as metag_pmu->write() takes a u32 anyway, and in any case GCC is smart enough to optimise away the cast. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
Diffstat (limited to 'arch/metag')
-rw-r--r--arch/metag/kernel/perf/perf_event.c12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index a00f527eade5..5bf984feaaa1 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -240,8 +240,10 @@ int metag_pmu_event_set_period(struct perf_event *event,
240 if (left > (s64)metag_pmu->max_period) 240 if (left > (s64)metag_pmu->max_period)
241 left = metag_pmu->max_period; 241 left = metag_pmu->max_period;
242 242
243 if (metag_pmu->write) 243 if (metag_pmu->write) {
244 metag_pmu->write(idx, (u64)(-left) & MAX_PERIOD); 244 local64_set(&hwc->prev_count, -(s32)left);
245 metag_pmu->write(idx, -left & MAX_PERIOD);
246 }
245 247
246 perf_event_update_userpage(event); 248 perf_event_update_userpage(event);
247 249
@@ -651,6 +653,12 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
651 * set to a specific value that needs preserving. 653 * set to a specific value that needs preserving.
652 */ 654 */
653 tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff; 655 tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
656 else
657 /*
658 * Older cores reset the counter on write, so prev_count needs
659 * resetting too so we can calculate a correct delta.
660 */
661 local64_set(&event->prev_count, 0);
654 662
655 metag_out32(tmp, PERF_COUNT(idx)); 663 metag_out32(tmp, PERF_COUNT(idx));
656unlock: 664unlock: