diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 15:02:27 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-28 15:02:27 -0400 |
commit | 1fdb24e969110fafea36d3b393bea438f702c87f (patch) | |
tree | 47a1dfef8a259e7922285315f8a02d31b4efe2f1 /arch/arm/kernel/perf_event_v7.c | |
parent | f362f98e7c445643d27c610bb7a86b79727b592e (diff) | |
parent | 531a6a941745e1e045dd2a6bd09e1dc01247a5f3 (diff) |
Merge branch 'devel-stable' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm
* 'devel-stable' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm: (178 commits)
ARM: 7139/1: fix compilation with CONFIG_ARM_ATAG_DTB_COMPAT and large TEXT_OFFSET
ARM: gic, local timers: use the request_percpu_irq() interface
ARM: gic: consolidate PPI handling
ARM: switch from NO_MACH_MEMORY_H to NEED_MACH_MEMORY_H
ARM: mach-s5p64x0: remove mach/memory.h
ARM: mach-s3c64xx: remove mach/memory.h
ARM: plat-mxc: remove mach/memory.h
ARM: mach-prima2: remove mach/memory.h
ARM: mach-zynq: remove mach/memory.h
ARM: mach-bcmring: remove mach/memory.h
ARM: mach-davinci: remove mach/memory.h
ARM: mach-pxa: remove mach/memory.h
ARM: mach-ixp4xx: remove mach/memory.h
ARM: mach-h720x: remove mach/memory.h
ARM: mach-vt8500: remove mach/memory.h
ARM: mach-s5pc100: remove mach/memory.h
ARM: mach-tegra: remove mach/memory.h
ARM: plat-tcc: remove mach/memory.h
ARM: mach-mmp: remove mach/memory.h
ARM: mach-cns3xxx: remove mach/memory.h
...
Fix up mostly pretty trivial conflicts in:
- arch/arm/Kconfig
- arch/arm/include/asm/localtimer.h
- arch/arm/kernel/Makefile
- arch/arm/mach-shmobile/board-ap4evb.c
- arch/arm/mach-u300/core.c
- arch/arm/mm/dma-mapping.c
- arch/arm/mm/proc-v7.S
- arch/arm/plat-omap/Kconfig
largely due to some CONFIG option renaming (ie CONFIG_PM_SLEEP ->
CONFIG_ARM_CPU_SUSPEND for the arm-specific suspend code etc) and
addition of NEED_MACH_MEMORY_H next to HAVE_IDE.
Diffstat (limited to 'arch/arm/kernel/perf_event_v7.c')
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 395 |
1 files changed, 196 insertions, 199 deletions
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 6be3e2e4d838..1ef6d0034b85 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -17,6 +17,9 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifdef CONFIG_CPU_V7 | 19 | #ifdef CONFIG_CPU_V7 |
20 | |||
21 | static struct arm_pmu armv7pmu; | ||
22 | |||
20 | /* | 23 | /* |
21 | * Common ARMv7 event types | 24 | * Common ARMv7 event types |
22 | * | 25 | * |
@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
676 | }; | 679 | }; |
677 | 680 | ||
678 | /* | 681 | /* |
679 | * Perf Events counters | 682 | * Perf Events' indices |
680 | */ | 683 | */ |
681 | enum armv7_counters { | 684 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
682 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | 685 | #define ARMV7_IDX_COUNTER0 1 |
683 | ARMV7_COUNTER0 = 2, /* First event counter */ | 686 | #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) |
684 | }; | 687 | |
688 | #define ARMV7_MAX_COUNTERS 32 | ||
689 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) | ||
685 | 690 | ||
686 | /* | 691 | /* |
687 | * The cycle counter is ARMV7_CYCLE_COUNTER. | 692 | * ARMv7 low level PMNC access |
688 | * The first event counter is ARMV7_COUNTER0. | ||
689 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
690 | */ | 693 | */ |
691 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
692 | 694 | ||
693 | /* | 695 | /* |
694 | * ARMv7 low level PMNC access | 696 | * Perf Event to low level counters mapping |
695 | */ | 697 | */ |
698 | #define ARMV7_IDX_TO_COUNTER(x) \ | ||
699 | (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) | ||
696 | 700 | ||
697 | /* | 701 | /* |
698 | * Per-CPU PMNC: config reg | 702 | * Per-CPU PMNC: config reg |
@@ -708,103 +712,76 @@ enum armv7_counters { | |||
708 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | 712 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ |
709 | 713 | ||
710 | /* | 714 | /* |
711 | * Available counters | 715 | * FLAG: counters overflow flag status reg |
712 | */ | ||
713 | #define ARMV7_CNT0 0 /* First event counter */ | ||
714 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
715 | |||
716 | /* Perf Event to low level counters mapping */ | ||
717 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
718 | |||
719 | /* | ||
720 | * CNTENS: counters enable reg | ||
721 | */ | ||
722 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
723 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
724 | |||
725 | /* | ||
726 | * CNTENC: counters disable reg | ||
727 | */ | ||
728 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
729 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
730 | |||
731 | /* | ||
732 | * INTENS: counters overflow interrupt enable reg | ||
733 | */ | ||
734 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
735 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
736 | |||
737 | /* | ||
738 | * INTENC: counters overflow interrupt disable reg | ||
739 | */ | ||
740 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
741 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
742 | |||
743 | /* | ||
744 | * EVTSEL: Event selection reg | ||
745 | */ | 716 | */ |
746 | #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ | 717 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ |
718 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
747 | 719 | ||
748 | /* | 720 | /* |
749 | * SELECT: Counter selection reg | 721 | * PMXEVTYPER: Event selection reg |
750 | */ | 722 | */ |
751 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | 723 | #define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ |
724 | #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ | ||
752 | 725 | ||
753 | /* | 726 | /* |
754 | * FLAG: counters overflow flag status reg | 727 | * Event filters for PMUv2 |
755 | */ | 728 | */ |
756 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | 729 | #define ARMV7_EXCLUDE_PL1 (1 << 31) |
757 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | 730 | #define ARMV7_EXCLUDE_USER (1 << 30) |
758 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | 731 | #define ARMV7_INCLUDE_HYP (1 << 27) |
759 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
760 | 732 | ||
761 | static inline unsigned long armv7_pmnc_read(void) | 733 | static inline u32 armv7_pmnc_read(void) |
762 | { | 734 | { |
763 | u32 val; | 735 | u32 val; |
764 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | 736 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); |
765 | return val; | 737 | return val; |
766 | } | 738 | } |
767 | 739 | ||
768 | static inline void armv7_pmnc_write(unsigned long val) | 740 | static inline void armv7_pmnc_write(u32 val) |
769 | { | 741 | { |
770 | val &= ARMV7_PMNC_MASK; | 742 | val &= ARMV7_PMNC_MASK; |
771 | isb(); | 743 | isb(); |
772 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | 744 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); |
773 | } | 745 | } |
774 | 746 | ||
775 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | 747 | static inline int armv7_pmnc_has_overflowed(u32 pmnc) |
776 | { | 748 | { |
777 | return pmnc & ARMV7_OVERFLOWED_MASK; | 749 | return pmnc & ARMV7_OVERFLOWED_MASK; |
778 | } | 750 | } |
779 | 751 | ||
780 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | 752 | static inline int armv7_pmnc_counter_valid(int idx) |
781 | enum armv7_counters counter) | 753 | { |
754 | return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; | ||
755 | } | ||
756 | |||
757 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) | ||
782 | { | 758 | { |
783 | int ret = 0; | 759 | int ret = 0; |
760 | u32 counter; | ||
784 | 761 | ||
785 | if (counter == ARMV7_CYCLE_COUNTER) | 762 | if (!armv7_pmnc_counter_valid(idx)) { |
786 | ret = pmnc & ARMV7_FLAG_C; | ||
787 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
788 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
789 | else | ||
790 | pr_err("CPU%u checking wrong counter %d overflow status\n", | 763 | pr_err("CPU%u checking wrong counter %d overflow status\n", |
791 | smp_processor_id(), counter); | 764 | smp_processor_id(), idx); |
765 | } else { | ||
766 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
767 | ret = pmnc & BIT(counter); | ||
768 | } | ||
792 | 769 | ||
793 | return ret; | 770 | return ret; |
794 | } | 771 | } |
795 | 772 | ||
796 | static inline int armv7_pmnc_select_counter(unsigned int idx) | 773 | static inline int armv7_pmnc_select_counter(int idx) |
797 | { | 774 | { |
798 | u32 val; | 775 | u32 counter; |
799 | 776 | ||
800 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | 777 | if (!armv7_pmnc_counter_valid(idx)) { |
801 | pr_err("CPU%u selecting wrong PMNC counter" | 778 | pr_err("CPU%u selecting wrong PMNC counter %d\n", |
802 | " %d\n", smp_processor_id(), idx); | 779 | smp_processor_id(), idx); |
803 | return -1; | 780 | return -EINVAL; |
804 | } | 781 | } |
805 | 782 | ||
806 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | 783 | counter = ARMV7_IDX_TO_COUNTER(idx); |
807 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | 784 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); |
808 | isb(); | 785 | isb(); |
809 | 786 | ||
810 | return idx; | 787 | return idx; |
@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) | |||
812 | 789 | ||
813 | static inline u32 armv7pmu_read_counter(int idx) | 790 | static inline u32 armv7pmu_read_counter(int idx) |
814 | { | 791 | { |
815 | unsigned long value = 0; | 792 | u32 value = 0; |
816 | 793 | ||
817 | if (idx == ARMV7_CYCLE_COUNTER) | 794 | if (!armv7_pmnc_counter_valid(idx)) |
818 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
819 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
820 | if (armv7_pmnc_select_counter(idx) == idx) | ||
821 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
822 | : "=r" (value)); | ||
823 | } else | ||
824 | pr_err("CPU%u reading wrong counter %d\n", | 795 | pr_err("CPU%u reading wrong counter %d\n", |
825 | smp_processor_id(), idx); | 796 | smp_processor_id(), idx); |
797 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | ||
798 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
799 | else if (armv7_pmnc_select_counter(idx) == idx) | ||
800 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); | ||
826 | 801 | ||
827 | return value; | 802 | return value; |
828 | } | 803 | } |
829 | 804 | ||
830 | static inline void armv7pmu_write_counter(int idx, u32 value) | 805 | static inline void armv7pmu_write_counter(int idx, u32 value) |
831 | { | 806 | { |
832 | if (idx == ARMV7_CYCLE_COUNTER) | 807 | if (!armv7_pmnc_counter_valid(idx)) |
833 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
834 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
835 | if (armv7_pmnc_select_counter(idx) == idx) | ||
836 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
837 | : : "r" (value)); | ||
838 | } else | ||
839 | pr_err("CPU%u writing wrong counter %d\n", | 808 | pr_err("CPU%u writing wrong counter %d\n", |
840 | smp_processor_id(), idx); | 809 | smp_processor_id(), idx); |
810 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | ||
811 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
812 | else if (armv7_pmnc_select_counter(idx) == idx) | ||
813 | asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); | ||
841 | } | 814 | } |
842 | 815 | ||
843 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | 816 | static inline void armv7_pmnc_write_evtsel(int idx, u32 val) |
844 | { | 817 | { |
845 | if (armv7_pmnc_select_counter(idx) == idx) { | 818 | if (armv7_pmnc_select_counter(idx) == idx) { |
846 | val &= ARMV7_EVTSEL_MASK; | 819 | val &= ARMV7_EVTYPE_MASK; |
847 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | 820 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); |
848 | } | 821 | } |
849 | } | 822 | } |
850 | 823 | ||
851 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | 824 | static inline int armv7_pmnc_enable_counter(int idx) |
852 | { | 825 | { |
853 | u32 val; | 826 | u32 counter; |
854 | 827 | ||
855 | if ((idx != ARMV7_CYCLE_COUNTER) && | 828 | if (!armv7_pmnc_counter_valid(idx)) { |
856 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 829 | pr_err("CPU%u enabling wrong PMNC counter %d\n", |
857 | pr_err("CPU%u enabling wrong PMNC counter" | 830 | smp_processor_id(), idx); |
858 | " %d\n", smp_processor_id(), idx); | 831 | return -EINVAL; |
859 | return -1; | ||
860 | } | 832 | } |
861 | 833 | ||
862 | if (idx == ARMV7_CYCLE_COUNTER) | 834 | counter = ARMV7_IDX_TO_COUNTER(idx); |
863 | val = ARMV7_CNTENS_C; | 835 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); |
864 | else | ||
865 | val = ARMV7_CNTENS_P(idx); | ||
866 | |||
867 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
868 | |||
869 | return idx; | 836 | return idx; |
870 | } | 837 | } |
871 | 838 | ||
872 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | 839 | static inline int armv7_pmnc_disable_counter(int idx) |
873 | { | 840 | { |
874 | u32 val; | 841 | u32 counter; |
875 | |||
876 | 842 | ||
877 | if ((idx != ARMV7_CYCLE_COUNTER) && | 843 | if (!armv7_pmnc_counter_valid(idx)) { |
878 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 844 | pr_err("CPU%u disabling wrong PMNC counter %d\n", |
879 | pr_err("CPU%u disabling wrong PMNC counter" | 845 | smp_processor_id(), idx); |
880 | " %d\n", smp_processor_id(), idx); | 846 | return -EINVAL; |
881 | return -1; | ||
882 | } | 847 | } |
883 | 848 | ||
884 | if (idx == ARMV7_CYCLE_COUNTER) | 849 | counter = ARMV7_IDX_TO_COUNTER(idx); |
885 | val = ARMV7_CNTENC_C; | 850 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); |
886 | else | ||
887 | val = ARMV7_CNTENC_P(idx); | ||
888 | |||
889 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
890 | |||
891 | return idx; | 851 | return idx; |
892 | } | 852 | } |
893 | 853 | ||
894 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | 854 | static inline int armv7_pmnc_enable_intens(int idx) |
895 | { | 855 | { |
896 | u32 val; | 856 | u32 counter; |
897 | 857 | ||
898 | if ((idx != ARMV7_CYCLE_COUNTER) && | 858 | if (!armv7_pmnc_counter_valid(idx)) { |
899 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 859 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", |
900 | pr_err("CPU%u enabling wrong PMNC counter" | 860 | smp_processor_id(), idx); |
901 | " interrupt enable %d\n", smp_processor_id(), idx); | 861 | return -EINVAL; |
902 | return -1; | ||
903 | } | 862 | } |
904 | 863 | ||
905 | if (idx == ARMV7_CYCLE_COUNTER) | 864 | counter = ARMV7_IDX_TO_COUNTER(idx); |
906 | val = ARMV7_INTENS_C; | 865 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); |
907 | else | ||
908 | val = ARMV7_INTENS_P(idx); | ||
909 | |||
910 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
911 | |||
912 | return idx; | 866 | return idx; |
913 | } | 867 | } |
914 | 868 | ||
915 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | 869 | static inline int armv7_pmnc_disable_intens(int idx) |
916 | { | 870 | { |
917 | u32 val; | 871 | u32 counter; |
918 | 872 | ||
919 | if ((idx != ARMV7_CYCLE_COUNTER) && | 873 | if (!armv7_pmnc_counter_valid(idx)) { |
920 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 874 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", |
921 | pr_err("CPU%u disabling wrong PMNC counter" | 875 | smp_processor_id(), idx); |
922 | " interrupt enable %d\n", smp_processor_id(), idx); | 876 | return -EINVAL; |
923 | return -1; | ||
924 | } | 877 | } |
925 | 878 | ||
926 | if (idx == ARMV7_CYCLE_COUNTER) | 879 | counter = ARMV7_IDX_TO_COUNTER(idx); |
927 | val = ARMV7_INTENC_C; | 880 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); |
928 | else | ||
929 | val = ARMV7_INTENC_P(idx); | ||
930 | |||
931 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
932 | |||
933 | return idx; | 881 | return idx; |
934 | } | 882 | } |
935 | 883 | ||
@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void) | |||
973 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | 921 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); |
974 | printk(KERN_INFO "CCNT =0x%08x\n", val); | 922 | printk(KERN_INFO "CCNT =0x%08x\n", val); |
975 | 923 | ||
976 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | 924 | for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { |
977 | armv7_pmnc_select_counter(cnt); | 925 | armv7_pmnc_select_counter(cnt); |
978 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | 926 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); |
979 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | 927 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", |
980 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | 928 | ARMV7_IDX_TO_COUNTER(cnt), val); |
981 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | 929 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); |
982 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | 930 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", |
983 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | 931 | ARMV7_IDX_TO_COUNTER(cnt), val); |
984 | } | 932 | } |
985 | } | 933 | } |
986 | #endif | 934 | #endif |
@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void) | |||
988 | static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | 936 | static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) |
989 | { | 937 | { |
990 | unsigned long flags; | 938 | unsigned long flags; |
939 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
991 | 940 | ||
992 | /* | 941 | /* |
993 | * Enable counter and interrupt, and set the counter to count | 942 | * Enable counter and interrupt, and set the counter to count |
994 | * the event that we're interested in. | 943 | * the event that we're interested in. |
995 | */ | 944 | */ |
996 | raw_spin_lock_irqsave(&pmu_lock, flags); | 945 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
997 | 946 | ||
998 | /* | 947 | /* |
999 | * Disable counter | 948 | * Disable counter |
@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1002 | 951 | ||
1003 | /* | 952 | /* |
1004 | * Set event (if destined for PMNx counters) | 953 | * Set event (if destined for PMNx counters) |
1005 | * We don't need to set the event if it's a cycle count | 954 | * We only need to set the event for the cycle counter if we |
955 | * have the ability to perform event filtering. | ||
1006 | */ | 956 | */ |
1007 | if (idx != ARMV7_CYCLE_COUNTER) | 957 | if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) |
1008 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | 958 | armv7_pmnc_write_evtsel(idx, hwc->config_base); |
1009 | 959 | ||
1010 | /* | 960 | /* |
@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1017 | */ | 967 | */ |
1018 | armv7_pmnc_enable_counter(idx); | 968 | armv7_pmnc_enable_counter(idx); |
1019 | 969 | ||
1020 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 970 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1021 | } | 971 | } |
1022 | 972 | ||
1023 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | 973 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) |
1024 | { | 974 | { |
1025 | unsigned long flags; | 975 | unsigned long flags; |
976 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
1026 | 977 | ||
1027 | /* | 978 | /* |
1028 | * Disable counter and interrupt | 979 | * Disable counter and interrupt |
1029 | */ | 980 | */ |
1030 | raw_spin_lock_irqsave(&pmu_lock, flags); | 981 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1031 | 982 | ||
1032 | /* | 983 | /* |
1033 | * Disable counter | 984 | * Disable counter |
@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
1039 | */ | 990 | */ |
1040 | armv7_pmnc_disable_intens(idx); | 991 | armv7_pmnc_disable_intens(idx); |
1041 | 992 | ||
1042 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 993 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1043 | } | 994 | } |
1044 | 995 | ||
1045 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | 996 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) |
1046 | { | 997 | { |
1047 | unsigned long pmnc; | 998 | u32 pmnc; |
1048 | struct perf_sample_data data; | 999 | struct perf_sample_data data; |
1049 | struct cpu_hw_events *cpuc; | 1000 | struct pmu_hw_events *cpuc; |
1050 | struct pt_regs *regs; | 1001 | struct pt_regs *regs; |
1051 | int idx; | 1002 | int idx; |
1052 | 1003 | ||
@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1069 | perf_sample_data_init(&data, 0); | 1020 | perf_sample_data_init(&data, 0); |
1070 | 1021 | ||
1071 | cpuc = &__get_cpu_var(cpu_hw_events); | 1022 | cpuc = &__get_cpu_var(cpu_hw_events); |
1072 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 1023 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1073 | struct perf_event *event = cpuc->events[idx]; | 1024 | struct perf_event *event = cpuc->events[idx]; |
1074 | struct hw_perf_event *hwc; | 1025 | struct hw_perf_event *hwc; |
1075 | 1026 | ||
1076 | if (!test_bit(idx, cpuc->active_mask)) | ||
1077 | continue; | ||
1078 | |||
1079 | /* | 1027 | /* |
1080 | * We have a single interrupt for all counters. Check that | 1028 | * We have a single interrupt for all counters. Check that |
1081 | * each counter has overflowed before we process it. | 1029 | * each counter has overflowed before we process it. |
@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1090 | continue; | 1038 | continue; |
1091 | 1039 | ||
1092 | if (perf_event_overflow(event, &data, regs)) | 1040 | if (perf_event_overflow(event, &data, regs)) |
1093 | armpmu->disable(hwc, idx); | 1041 | cpu_pmu->disable(hwc, idx); |
1094 | } | 1042 | } |
1095 | 1043 | ||
1096 | /* | 1044 | /* |
@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1108 | static void armv7pmu_start(void) | 1056 | static void armv7pmu_start(void) |
1109 | { | 1057 | { |
1110 | unsigned long flags; | 1058 | unsigned long flags; |
1059 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
1111 | 1060 | ||
1112 | raw_spin_lock_irqsave(&pmu_lock, flags); | 1061 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1113 | /* Enable all counters */ | 1062 | /* Enable all counters */ |
1114 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | 1063 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); |
1115 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 1064 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1116 | } | 1065 | } |
1117 | 1066 | ||
1118 | static void armv7pmu_stop(void) | 1067 | static void armv7pmu_stop(void) |
1119 | { | 1068 | { |
1120 | unsigned long flags; | 1069 | unsigned long flags; |
1070 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
1121 | 1071 | ||
1122 | raw_spin_lock_irqsave(&pmu_lock, flags); | 1072 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1123 | /* Disable all counters */ | 1073 | /* Disable all counters */ |
1124 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | 1074 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); |
1125 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 1075 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1126 | } | 1076 | } |
1127 | 1077 | ||
1128 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | 1078 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, |
1129 | struct hw_perf_event *event) | 1079 | struct hw_perf_event *event) |
1130 | { | 1080 | { |
1131 | int idx; | 1081 | int idx; |
1082 | unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; | ||
1132 | 1083 | ||
1133 | /* Always place a cycle counter into the cycle counter. */ | 1084 | /* Always place a cycle counter into the cycle counter. */ |
1134 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | 1085 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { |
1135 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | 1086 | if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
1136 | return -EAGAIN; | 1087 | return -EAGAIN; |
1137 | 1088 | ||
1138 | return ARMV7_CYCLE_COUNTER; | 1089 | return ARMV7_IDX_CYCLE_COUNTER; |
1139 | } else { | 1090 | } |
1140 | /* | ||
1141 | * For anything other than a cycle counter, try and use | ||
1142 | * the events counters | ||
1143 | */ | ||
1144 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
1145 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
1146 | return idx; | ||
1147 | } | ||
1148 | 1091 | ||
1149 | /* The counters are all in use. */ | 1092 | /* |
1150 | return -EAGAIN; | 1093 | * For anything other than a cycle counter, try and use |
1094 | * the events counters | ||
1095 | */ | ||
1096 | for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { | ||
1097 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
1098 | return idx; | ||
1151 | } | 1099 | } |
1100 | |||
1101 | /* The counters are all in use. */ | ||
1102 | return -EAGAIN; | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * Add an event filter to a given event. This will only work for PMUv2 PMUs. | ||
1107 | */ | ||
1108 | static int armv7pmu_set_event_filter(struct hw_perf_event *event, | ||
1109 | struct perf_event_attr *attr) | ||
1110 | { | ||
1111 | unsigned long config_base = 0; | ||
1112 | |||
1113 | if (attr->exclude_idle) | ||
1114 | return -EPERM; | ||
1115 | if (attr->exclude_user) | ||
1116 | config_base |= ARMV7_EXCLUDE_USER; | ||
1117 | if (attr->exclude_kernel) | ||
1118 | config_base |= ARMV7_EXCLUDE_PL1; | ||
1119 | if (!attr->exclude_hv) | ||
1120 | config_base |= ARMV7_INCLUDE_HYP; | ||
1121 | |||
1122 | /* | ||
1123 | * Install the filter into config_base as this is used to | ||
1124 | * construct the event type. | ||
1125 | */ | ||
1126 | event->config_base = config_base; | ||
1127 | |||
1128 | return 0; | ||
1152 | } | 1129 | } |
1153 | 1130 | ||
1154 | static void armv7pmu_reset(void *info) | 1131 | static void armv7pmu_reset(void *info) |
1155 | { | 1132 | { |
1156 | u32 idx, nb_cnt = armpmu->num_events; | 1133 | u32 idx, nb_cnt = cpu_pmu->num_events; |
1157 | 1134 | ||
1158 | /* The counter and interrupt enable registers are unknown at reset. */ | 1135 | /* The counter and interrupt enable registers are unknown at reset. */ |
1159 | for (idx = 1; idx < nb_cnt; ++idx) | 1136 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) |
1160 | armv7pmu_disable_event(NULL, idx); | 1137 | armv7pmu_disable_event(NULL, idx); |
1161 | 1138 | ||
1162 | /* Initialize & Reset PMNC: C and P bits */ | 1139 | /* Initialize & Reset PMNC: C and P bits */ |
1163 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | 1140 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); |
1164 | } | 1141 | } |
1165 | 1142 | ||
1143 | static int armv7_a8_map_event(struct perf_event *event) | ||
1144 | { | ||
1145 | return map_cpu_event(event, &armv7_a8_perf_map, | ||
1146 | &armv7_a8_perf_cache_map, 0xFF); | ||
1147 | } | ||
1148 | |||
1149 | static int armv7_a9_map_event(struct perf_event *event) | ||
1150 | { | ||
1151 | return map_cpu_event(event, &armv7_a9_perf_map, | ||
1152 | &armv7_a9_perf_cache_map, 0xFF); | ||
1153 | } | ||
1154 | |||
1155 | static int armv7_a5_map_event(struct perf_event *event) | ||
1156 | { | ||
1157 | return map_cpu_event(event, &armv7_a5_perf_map, | ||
1158 | &armv7_a5_perf_cache_map, 0xFF); | ||
1159 | } | ||
1160 | |||
1161 | static int armv7_a15_map_event(struct perf_event *event) | ||
1162 | { | ||
1163 | return map_cpu_event(event, &armv7_a15_perf_map, | ||
1164 | &armv7_a15_perf_cache_map, 0xFF); | ||
1165 | } | ||
1166 | |||
1166 | static struct arm_pmu armv7pmu = { | 1167 | static struct arm_pmu armv7pmu = { |
1167 | .handle_irq = armv7pmu_handle_irq, | 1168 | .handle_irq = armv7pmu_handle_irq, |
1168 | .enable = armv7pmu_enable_event, | 1169 | .enable = armv7pmu_enable_event, |
@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = { | |||
1173 | .start = armv7pmu_start, | 1174 | .start = armv7pmu_start, |
1174 | .stop = armv7pmu_stop, | 1175 | .stop = armv7pmu_stop, |
1175 | .reset = armv7pmu_reset, | 1176 | .reset = armv7pmu_reset, |
1176 | .raw_event_mask = 0xFF, | ||
1177 | .max_period = (1LLU << 32) - 1, | 1177 | .max_period = (1LLU << 32) - 1, |
1178 | }; | 1178 | }; |
1179 | 1179 | ||
@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void) | |||
1188 | return nb_cnt + 1; | 1188 | return nb_cnt + 1; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) | 1191 | static struct arm_pmu *__init armv7_a8_pmu_init(void) |
1192 | { | 1192 | { |
1193 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | 1193 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; |
1194 | armv7pmu.name = "ARMv7 Cortex-A8"; | 1194 | armv7pmu.name = "ARMv7 Cortex-A8"; |
1195 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; | 1195 | armv7pmu.map_event = armv7_a8_map_event; |
1196 | armv7pmu.event_map = &armv7_a8_perf_map; | ||
1197 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1196 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1198 | return &armv7pmu; | 1197 | return &armv7pmu; |
1199 | } | 1198 | } |
1200 | 1199 | ||
1201 | static const struct arm_pmu *__init armv7_a9_pmu_init(void) | 1200 | static struct arm_pmu *__init armv7_a9_pmu_init(void) |
1202 | { | 1201 | { |
1203 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | 1202 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; |
1204 | armv7pmu.name = "ARMv7 Cortex-A9"; | 1203 | armv7pmu.name = "ARMv7 Cortex-A9"; |
1205 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; | 1204 | armv7pmu.map_event = armv7_a9_map_event; |
1206 | armv7pmu.event_map = &armv7_a9_perf_map; | ||
1207 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1205 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1208 | return &armv7pmu; | 1206 | return &armv7pmu; |
1209 | } | 1207 | } |
1210 | 1208 | ||
1211 | static const struct arm_pmu *__init armv7_a5_pmu_init(void) | 1209 | static struct arm_pmu *__init armv7_a5_pmu_init(void) |
1212 | { | 1210 | { |
1213 | armv7pmu.id = ARM_PERF_PMU_ID_CA5; | 1211 | armv7pmu.id = ARM_PERF_PMU_ID_CA5; |
1214 | armv7pmu.name = "ARMv7 Cortex-A5"; | 1212 | armv7pmu.name = "ARMv7 Cortex-A5"; |
1215 | armv7pmu.cache_map = &armv7_a5_perf_cache_map; | 1213 | armv7pmu.map_event = armv7_a5_map_event; |
1216 | armv7pmu.event_map = &armv7_a5_perf_map; | ||
1217 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1214 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1218 | return &armv7pmu; | 1215 | return &armv7pmu; |
1219 | } | 1216 | } |
1220 | 1217 | ||
1221 | static const struct arm_pmu *__init armv7_a15_pmu_init(void) | 1218 | static struct arm_pmu *__init armv7_a15_pmu_init(void) |
1222 | { | 1219 | { |
1223 | armv7pmu.id = ARM_PERF_PMU_ID_CA15; | 1220 | armv7pmu.id = ARM_PERF_PMU_ID_CA15; |
1224 | armv7pmu.name = "ARMv7 Cortex-A15"; | 1221 | armv7pmu.name = "ARMv7 Cortex-A15"; |
1225 | armv7pmu.cache_map = &armv7_a15_perf_cache_map; | 1222 | armv7pmu.map_event = armv7_a15_map_event; |
1226 | armv7pmu.event_map = &armv7_a15_perf_map; | ||
1227 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1223 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1224 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | ||
1228 | return &armv7pmu; | 1225 | return &armv7pmu; |
1229 | } | 1226 | } |
1230 | #else | 1227 | #else |
1231 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) | 1228 | static struct arm_pmu *__init armv7_a8_pmu_init(void) |
1232 | { | 1229 | { |
1233 | return NULL; | 1230 | return NULL; |
1234 | } | 1231 | } |
1235 | 1232 | ||
1236 | static const struct arm_pmu *__init armv7_a9_pmu_init(void) | 1233 | static struct arm_pmu *__init armv7_a9_pmu_init(void) |
1237 | { | 1234 | { |
1238 | return NULL; | 1235 | return NULL; |
1239 | } | 1236 | } |
1240 | 1237 | ||
1241 | static const struct arm_pmu *__init armv7_a5_pmu_init(void) | 1238 | static struct arm_pmu *__init armv7_a5_pmu_init(void) |
1242 | { | 1239 | { |
1243 | return NULL; | 1240 | return NULL; |
1244 | } | 1241 | } |
1245 | 1242 | ||
1246 | static const struct arm_pmu *__init armv7_a15_pmu_init(void) | 1243 | static struct arm_pmu *__init armv7_a15_pmu_init(void) |
1247 | { | 1244 | { |
1248 | return NULL; | 1245 | return NULL; |
1249 | } | 1246 | } |