aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2013-02-27 11:16:38 -0500
committerJames Hogan <james.hogan@imgtec.com>2013-03-15 09:20:00 -0400
commitdb59932f62386cdfd8510c27a83118c5e915e9ea (patch)
treef644aaf9845d16b7da415beb3b1a41b8744eed1c
parentc43ca04b5e7854b3996f84a495e4553941e76266 (diff)
metag: perf: fixes for interrupting perf counters
The overflow handler needs to read modify write when re-enabling the counter so as not to change the counter value as it may have been changed to ready the next interrupt on overflow. Similarly for interrupting counters metag_pmu_enable_counter needs to leave the counter value unchanged rather than resetting it to zero. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
-rw-r--r--arch/metag/kernel/perf/perf_event.c22
1 files changed, 15 insertions, 7 deletions
diff --git a/arch/metag/kernel/perf/perf_event.c b/arch/metag/kernel/perf/perf_event.c
index 8096db2a550b..a00f527eade5 100644
--- a/arch/metag/kernel/perf/perf_event.c
+++ b/arch/metag/kernel/perf/perf_event.c
@@ -643,13 +643,15 @@ static void metag_pmu_enable_counter(struct hw_perf_event *event, int idx)
643 config = tmp >> 4; 643 config = tmp >> 4;
644 } 644 }
645 645
646 /*
647 * Enabled counters start from 0. Early cores clear the count on
648 * write but newer cores don't, so we make sure that the count is
649 * set to 0.
650 */
651 tmp = ((config & 0xf) << 28) | 646 tmp = ((config & 0xf) << 28) |
652 ((1 << 24) << cpu_2_hwthread_id[get_cpu()]); 647 ((1 << 24) << cpu_2_hwthread_id[get_cpu()]);
648 if (metag_pmu->max_period)
649 /*
650 * Cores supporting overflow interrupts may have had the counter
651 * set to a specific value that needs preserving.
652 */
653 tmp |= metag_in32(PERF_COUNT(idx)) & 0x00ffffff;
654
653 metag_out32(tmp, PERF_COUNT(idx)); 655 metag_out32(tmp, PERF_COUNT(idx));
654unlock: 656unlock:
655 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 657 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
@@ -764,10 +766,16 @@ static irqreturn_t metag_pmu_counter_overflow(int irq, void *dev)
764 766
765 /* 767 /*
766 * Enable the counter again once core overflow processing has 768 * Enable the counter again once core overflow processing has
767 * completed. 769 * completed. Note the counter value may have been modified while it was
770 * inactive to set it up ready for the next interrupt.
768 */ 771 */
769 if (!perf_event_overflow(event, &sampledata, regs)) 772 if (!perf_event_overflow(event, &sampledata, regs)) {
773 __global_lock2(flags);
774 counter = (counter & 0xff000000) |
775 (metag_in32(PERF_COUNT(idx)) & 0x00ffffff);
770 metag_out32(counter, PERF_COUNT(idx)); 776 metag_out32(counter, PERF_COUNT(idx));
777 __global_unlock2(flags);
778 }
771 779
772 return IRQ_HANDLED; 780 return IRQ_HANDLED;
773} 781}