diff options
author | Will Deacon <will.deacon@arm.com> | 2012-03-06 11:35:55 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2012-03-07 04:40:49 -0500 |
commit | 3f31ae121348afd9ed39700ea2a63c17cd7eeed1 (patch) | |
tree | ac7b0386088adf865df514a4bef062bd7d072426 /arch/arm | |
parent | f6f5a30c834135c9f2fa10400c59ebbdd9188567 (diff) |
ARM: 7357/1: perf: fix overflow handling for xscale2 PMUs
xscale2 PMUs indicate overflow not via the PMU control register, but by
a separate overflow FLAG register instead.
This patch fixes the xscale2 PMU code to use this register to detect
to overflow and ensures that we clear any pending overflow when
disabling a counter.
Cc: <stable@vger.kernel.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 10 |
1 files changed, 8 insertions, 2 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index a5bbd360cc4b..71a21e6712f5 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -598,7 +598,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
598 | if (!event) | 598 | if (!event) |
599 | continue; | 599 | continue; |
600 | 600 | ||
601 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) | 601 | if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) |
602 | continue; | 602 | continue; |
603 | 603 | ||
604 | hwc = &event->hw; | 604 | hwc = &event->hw; |
@@ -669,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
669 | static void | 669 | static void |
670 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | 670 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) |
671 | { | 671 | { |
672 | unsigned long flags, ien, evtsel; | 672 | unsigned long flags, ien, evtsel, of_flags; |
673 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 673 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
674 | 674 | ||
675 | ien = xscale2pmu_read_int_enable(); | 675 | ien = xscale2pmu_read_int_enable(); |
@@ -678,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
678 | switch (idx) { | 678 | switch (idx) { |
679 | case XSCALE_CYCLE_COUNTER: | 679 | case XSCALE_CYCLE_COUNTER: |
680 | ien &= ~XSCALE2_CCOUNT_INT_EN; | 680 | ien &= ~XSCALE2_CCOUNT_INT_EN; |
681 | of_flags = XSCALE2_CCOUNT_OVERFLOW; | ||
681 | break; | 682 | break; |
682 | case XSCALE_COUNTER0: | 683 | case XSCALE_COUNTER0: |
683 | ien &= ~XSCALE2_COUNT0_INT_EN; | 684 | ien &= ~XSCALE2_COUNT0_INT_EN; |
684 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | 685 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; |
685 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; | 686 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; |
687 | of_flags = XSCALE2_COUNT0_OVERFLOW; | ||
686 | break; | 688 | break; |
687 | case XSCALE_COUNTER1: | 689 | case XSCALE_COUNTER1: |
688 | ien &= ~XSCALE2_COUNT1_INT_EN; | 690 | ien &= ~XSCALE2_COUNT1_INT_EN; |
689 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | 691 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; |
690 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; | 692 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; |
693 | of_flags = XSCALE2_COUNT1_OVERFLOW; | ||
691 | break; | 694 | break; |
692 | case XSCALE_COUNTER2: | 695 | case XSCALE_COUNTER2: |
693 | ien &= ~XSCALE2_COUNT2_INT_EN; | 696 | ien &= ~XSCALE2_COUNT2_INT_EN; |
694 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | 697 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; |
695 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; | 698 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; |
699 | of_flags = XSCALE2_COUNT2_OVERFLOW; | ||
696 | break; | 700 | break; |
697 | case XSCALE_COUNTER3: | 701 | case XSCALE_COUNTER3: |
698 | ien &= ~XSCALE2_COUNT3_INT_EN; | 702 | ien &= ~XSCALE2_COUNT3_INT_EN; |
699 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | 703 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; |
700 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; | 704 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; |
705 | of_flags = XSCALE2_COUNT3_OVERFLOW; | ||
701 | break; | 706 | break; |
702 | default: | 707 | default: |
703 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | 708 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); |
@@ -707,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
707 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 712 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
708 | xscale2pmu_write_event_select(evtsel); | 713 | xscale2pmu_write_event_select(evtsel); |
709 | xscale2pmu_write_int_enable(ien); | 714 | xscale2pmu_write_int_enable(ien); |
715 | xscale2pmu_write_overflow_flags(of_flags); | ||
710 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 716 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
711 | } | 717 | } |
712 | 718 | ||