diff options
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel.c')
-rw-r--r-- | arch/x86/kernel/cpu/perf_event_intel.c | 57 |
1 files changed, 28 insertions, 29 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c index 44b60c852107..84bfde64a337 100644 --- a/arch/x86/kernel/cpu/perf_event_intel.c +++ b/arch/x86/kernel/cpu/perf_event_intel.c | |||
@@ -548,9 +548,9 @@ static inline void intel_pmu_ack_status(u64 ack) | |||
548 | } | 548 | } |
549 | 549 | ||
550 | static inline void | 550 | static inline void |
551 | intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx) | 551 | intel_pmu_disable_fixed(struct hw_perf_event *hwc) |
552 | { | 552 | { |
553 | int idx = __idx - X86_PMC_IDX_FIXED; | 553 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
554 | u64 ctrl_val, mask; | 554 | u64 ctrl_val, mask; |
555 | 555 | ||
556 | mask = 0xfULL << (idx * 4); | 556 | mask = 0xfULL << (idx * 4); |
@@ -621,26 +621,28 @@ static void intel_pmu_drain_bts_buffer(void) | |||
621 | } | 621 | } |
622 | 622 | ||
623 | static inline void | 623 | static inline void |
624 | intel_pmu_disable_event(struct hw_perf_event *hwc, int idx) | 624 | intel_pmu_disable_event(struct perf_event *event) |
625 | { | 625 | { |
626 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 626 | struct hw_perf_event *hwc = &event->hw; |
627 | |||
628 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
627 | intel_pmu_disable_bts(); | 629 | intel_pmu_disable_bts(); |
628 | intel_pmu_drain_bts_buffer(); | 630 | intel_pmu_drain_bts_buffer(); |
629 | return; | 631 | return; |
630 | } | 632 | } |
631 | 633 | ||
632 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 634 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
633 | intel_pmu_disable_fixed(hwc, idx); | 635 | intel_pmu_disable_fixed(hwc); |
634 | return; | 636 | return; |
635 | } | 637 | } |
636 | 638 | ||
637 | x86_pmu_disable_event(hwc, idx); | 639 | x86_pmu_disable_event(event); |
638 | } | 640 | } |
639 | 641 | ||
640 | static inline void | 642 | static inline void |
641 | intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | 643 | intel_pmu_enable_fixed(struct hw_perf_event *hwc) |
642 | { | 644 | { |
643 | int idx = __idx - X86_PMC_IDX_FIXED; | 645 | int idx = hwc->idx - X86_PMC_IDX_FIXED; |
644 | u64 ctrl_val, bits, mask; | 646 | u64 ctrl_val, bits, mask; |
645 | int err; | 647 | int err; |
646 | 648 | ||
@@ -670,9 +672,11 @@ intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx) | |||
670 | err = checking_wrmsrl(hwc->config_base, ctrl_val); | 672 | err = checking_wrmsrl(hwc->config_base, ctrl_val); |
671 | } | 673 | } |
672 | 674 | ||
673 | static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | 675 | static void intel_pmu_enable_event(struct perf_event *event) |
674 | { | 676 | { |
675 | if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) { | 677 | struct hw_perf_event *hwc = &event->hw; |
678 | |||
679 | if (unlikely(hwc->idx == X86_PMC_IDX_FIXED_BTS)) { | ||
676 | if (!__get_cpu_var(cpu_hw_events).enabled) | 680 | if (!__get_cpu_var(cpu_hw_events).enabled) |
677 | return; | 681 | return; |
678 | 682 | ||
@@ -681,11 +685,11 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
681 | } | 685 | } |
682 | 686 | ||
683 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { | 687 | if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) { |
684 | intel_pmu_enable_fixed(hwc, idx); | 688 | intel_pmu_enable_fixed(hwc); |
685 | return; | 689 | return; |
686 | } | 690 | } |
687 | 691 | ||
688 | __x86_pmu_enable_event(hwc, idx); | 692 | __x86_pmu_enable_event(hwc); |
689 | } | 693 | } |
690 | 694 | ||
691 | /* | 695 | /* |
@@ -694,14 +698,8 @@ static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
694 | */ | 698 | */ |
695 | static int intel_pmu_save_and_restart(struct perf_event *event) | 699 | static int intel_pmu_save_and_restart(struct perf_event *event) |
696 | { | 700 | { |
697 | struct hw_perf_event *hwc = &event->hw; | 701 | x86_perf_event_update(event); |
698 | int idx = hwc->idx; | 702 | return x86_perf_event_set_period(event); |
699 | int ret; | ||
700 | |||
701 | x86_perf_event_update(event, hwc, idx); | ||
702 | ret = x86_perf_event_set_period(event, hwc, idx); | ||
703 | |||
704 | return ret; | ||
705 | } | 703 | } |
706 | 704 | ||
707 | static void intel_pmu_reset(void) | 705 | static void intel_pmu_reset(void) |
@@ -745,11 +743,11 @@ static int intel_pmu_handle_irq(struct pt_regs *regs) | |||
745 | 743 | ||
746 | cpuc = &__get_cpu_var(cpu_hw_events); | 744 | cpuc = &__get_cpu_var(cpu_hw_events); |
747 | 745 | ||
748 | perf_disable(); | 746 | intel_pmu_disable_all(); |
749 | intel_pmu_drain_bts_buffer(); | 747 | intel_pmu_drain_bts_buffer(); |
750 | status = intel_pmu_get_status(); | 748 | status = intel_pmu_get_status(); |
751 | if (!status) { | 749 | if (!status) { |
752 | perf_enable(); | 750 | intel_pmu_enable_all(); |
753 | return 0; | 751 | return 0; |
754 | } | 752 | } |
755 | 753 | ||
@@ -759,8 +757,7 @@ again: | |||
759 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); | 757 | WARN_ONCE(1, "perfevents: irq loop stuck!\n"); |
760 | perf_event_print_debug(); | 758 | perf_event_print_debug(); |
761 | intel_pmu_reset(); | 759 | intel_pmu_reset(); |
762 | perf_enable(); | 760 | goto done; |
763 | return 1; | ||
764 | } | 761 | } |
765 | 762 | ||
766 | inc_irq_stat(apic_perf_irqs); | 763 | inc_irq_stat(apic_perf_irqs); |
@@ -768,7 +765,6 @@ again: | |||
768 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { | 765 | for_each_set_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) { |
769 | struct perf_event *event = cpuc->events[bit]; | 766 | struct perf_event *event = cpuc->events[bit]; |
770 | 767 | ||
771 | clear_bit(bit, (unsigned long *) &status); | ||
772 | if (!test_bit(bit, cpuc->active_mask)) | 768 | if (!test_bit(bit, cpuc->active_mask)) |
773 | continue; | 769 | continue; |
774 | 770 | ||
@@ -778,7 +774,7 @@ again: | |||
778 | data.period = event->hw.last_period; | 774 | data.period = event->hw.last_period; |
779 | 775 | ||
780 | if (perf_event_overflow(event, 1, &data, regs)) | 776 | if (perf_event_overflow(event, 1, &data, regs)) |
781 | intel_pmu_disable_event(&event->hw, bit); | 777 | x86_pmu_stop(event); |
782 | } | 778 | } |
783 | 779 | ||
784 | intel_pmu_ack_status(ack); | 780 | intel_pmu_ack_status(ack); |
@@ -790,8 +786,8 @@ again: | |||
790 | if (status) | 786 | if (status) |
791 | goto again; | 787 | goto again; |
792 | 788 | ||
793 | perf_enable(); | 789 | done: |
794 | 790 | intel_pmu_enable_all(); | |
795 | return 1; | 791 | return 1; |
796 | } | 792 | } |
797 | 793 | ||
@@ -870,7 +866,10 @@ static __initconst struct x86_pmu intel_pmu = { | |||
870 | .max_period = (1ULL << 31) - 1, | 866 | .max_period = (1ULL << 31) - 1, |
871 | .enable_bts = intel_pmu_enable_bts, | 867 | .enable_bts = intel_pmu_enable_bts, |
872 | .disable_bts = intel_pmu_disable_bts, | 868 | .disable_bts = intel_pmu_disable_bts, |
873 | .get_event_constraints = intel_get_event_constraints | 869 | .get_event_constraints = intel_get_event_constraints, |
870 | |||
871 | .cpu_starting = init_debug_store_on_cpu, | ||
872 | .cpu_dying = fini_debug_store_on_cpu, | ||
874 | }; | 873 | }; |
875 | 874 | ||
876 | static __init int intel_pmu_init(void) | 875 | static __init int intel_pmu_init(void) |