diff options
Diffstat (limited to 'arch/arm/kernel/perf_event_xscale.c')
| -rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 20 |
1 files changed, 16 insertions, 4 deletions
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 3b99d8269829..71a21e6712f5 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
| @@ -255,11 +255,14 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
| 255 | struct perf_event *event = cpuc->events[idx]; | 255 | struct perf_event *event = cpuc->events[idx]; |
| 256 | struct hw_perf_event *hwc; | 256 | struct hw_perf_event *hwc; |
| 257 | 257 | ||
| 258 | if (!event) | ||
| 259 | continue; | ||
| 260 | |||
| 258 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) | 261 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) |
| 259 | continue; | 262 | continue; |
| 260 | 263 | ||
| 261 | hwc = &event->hw; | 264 | hwc = &event->hw; |
| 262 | armpmu_event_update(event, hwc, idx, 1); | 265 | armpmu_event_update(event, hwc, idx); |
| 263 | data.period = event->hw.last_period; | 266 | data.period = event->hw.last_period; |
| 264 | if (!armpmu_event_set_period(event, hwc, idx)) | 267 | if (!armpmu_event_set_period(event, hwc, idx)) |
| 265 | continue; | 268 | continue; |
| @@ -592,11 +595,14 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
| 592 | struct perf_event *event = cpuc->events[idx]; | 595 | struct perf_event *event = cpuc->events[idx]; |
| 593 | struct hw_perf_event *hwc; | 596 | struct hw_perf_event *hwc; |
| 594 | 597 | ||
| 595 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) | 598 | if (!event) |
| 599 | continue; | ||
| 600 | |||
| 601 | if (!xscale2_pmnc_counter_has_overflowed(of_flags, idx)) | ||
| 596 | continue; | 602 | continue; |
| 597 | 603 | ||
| 598 | hwc = &event->hw; | 604 | hwc = &event->hw; |
| 599 | armpmu_event_update(event, hwc, idx, 1); | 605 | armpmu_event_update(event, hwc, idx); |
| 600 | data.period = event->hw.last_period; | 606 | data.period = event->hw.last_period; |
| 601 | if (!armpmu_event_set_period(event, hwc, idx)) | 607 | if (!armpmu_event_set_period(event, hwc, idx)) |
| 602 | continue; | 608 | continue; |
| @@ -663,7 +669,7 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
| 663 | static void | 669 | static void |
| 664 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | 670 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) |
| 665 | { | 671 | { |
| 666 | unsigned long flags, ien, evtsel; | 672 | unsigned long flags, ien, evtsel, of_flags; |
| 667 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | 673 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
| 668 | 674 | ||
| 669 | ien = xscale2pmu_read_int_enable(); | 675 | ien = xscale2pmu_read_int_enable(); |
| @@ -672,26 +678,31 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 672 | switch (idx) { | 678 | switch (idx) { |
| 673 | case XSCALE_CYCLE_COUNTER: | 679 | case XSCALE_CYCLE_COUNTER: |
| 674 | ien &= ~XSCALE2_CCOUNT_INT_EN; | 680 | ien &= ~XSCALE2_CCOUNT_INT_EN; |
| 681 | of_flags = XSCALE2_CCOUNT_OVERFLOW; | ||
| 675 | break; | 682 | break; |
| 676 | case XSCALE_COUNTER0: | 683 | case XSCALE_COUNTER0: |
| 677 | ien &= ~XSCALE2_COUNT0_INT_EN; | 684 | ien &= ~XSCALE2_COUNT0_INT_EN; |
| 678 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; | 685 | evtsel &= ~XSCALE2_COUNT0_EVT_MASK; |
| 679 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; | 686 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT0_EVT_SHFT; |
| 687 | of_flags = XSCALE2_COUNT0_OVERFLOW; | ||
| 680 | break; | 688 | break; |
| 681 | case XSCALE_COUNTER1: | 689 | case XSCALE_COUNTER1: |
| 682 | ien &= ~XSCALE2_COUNT1_INT_EN; | 690 | ien &= ~XSCALE2_COUNT1_INT_EN; |
| 683 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; | 691 | evtsel &= ~XSCALE2_COUNT1_EVT_MASK; |
| 684 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; | 692 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT1_EVT_SHFT; |
| 693 | of_flags = XSCALE2_COUNT1_OVERFLOW; | ||
| 685 | break; | 694 | break; |
| 686 | case XSCALE_COUNTER2: | 695 | case XSCALE_COUNTER2: |
| 687 | ien &= ~XSCALE2_COUNT2_INT_EN; | 696 | ien &= ~XSCALE2_COUNT2_INT_EN; |
| 688 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; | 697 | evtsel &= ~XSCALE2_COUNT2_EVT_MASK; |
| 689 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; | 698 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT2_EVT_SHFT; |
| 699 | of_flags = XSCALE2_COUNT2_OVERFLOW; | ||
| 690 | break; | 700 | break; |
| 691 | case XSCALE_COUNTER3: | 701 | case XSCALE_COUNTER3: |
| 692 | ien &= ~XSCALE2_COUNT3_INT_EN; | 702 | ien &= ~XSCALE2_COUNT3_INT_EN; |
| 693 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; | 703 | evtsel &= ~XSCALE2_COUNT3_EVT_MASK; |
| 694 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; | 704 | evtsel |= XSCALE_PERFCTR_UNUSED << XSCALE2_COUNT3_EVT_SHFT; |
| 705 | of_flags = XSCALE2_COUNT3_OVERFLOW; | ||
| 695 | break; | 706 | break; |
| 696 | default: | 707 | default: |
| 697 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | 708 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); |
| @@ -701,6 +712,7 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
| 701 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | 712 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
| 702 | xscale2pmu_write_event_select(evtsel); | 713 | xscale2pmu_write_event_select(evtsel); |
| 703 | xscale2pmu_write_int_enable(ien); | 714 | xscale2pmu_write_int_enable(ien); |
| 715 | xscale2pmu_write_overflow_flags(of_flags); | ||
| 704 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | 716 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
| 705 | } | 717 | } |
| 706 | 718 | ||
