aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>2012-07-20 10:18:07 -0400
committerWill Deacon <will.deacon@arm.com>2012-11-09 06:37:25 -0500
commit7279adbd9bb8ef8ff669da50f0e84c65a14022b5 (patch)
tree53a45f7dcb7ccf6e0b7cbb6003fba02c75622aaf
parented6f2a522398c26559f4da23a80aa6195e6284c7 (diff)
ARM: perf: check ARMv7 counter validity on a per-pmu basis
Multi-cluster ARMv7 systems may have CPU PMUs with different number of counters. This patch updates armv7_pmnc_counter_valid so that it takes a pmu argument and checks the counter validity against that. We also remove a number of redundant counter checks whether the current PMU is not easily retrievable. Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/kernel/perf_event_v7.c94
1 files changed, 30 insertions, 64 deletions
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 1183c81087b2..7d0cce85d17e 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -736,7 +736,8 @@ static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
736 */ 736 */
737#define ARMV7_IDX_CYCLE_COUNTER 0 737#define ARMV7_IDX_CYCLE_COUNTER 0
738#define ARMV7_IDX_COUNTER0 1 738#define ARMV7_IDX_COUNTER0 1
739#define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) 739#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
740 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
740 741
741#define ARMV7_MAX_COUNTERS 32 742#define ARMV7_MAX_COUNTERS 32
742#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) 743#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
@@ -802,38 +803,20 @@ static inline int armv7_pmnc_has_overflowed(u32 pmnc)
802 return pmnc & ARMV7_OVERFLOWED_MASK; 803 return pmnc & ARMV7_OVERFLOWED_MASK;
803} 804}
804 805
805static inline int armv7_pmnc_counter_valid(int idx) 806static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
806{ 807{
807 return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; 808 return idx >= ARMV7_IDX_CYCLE_COUNTER &&
809 idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
808} 810}
809 811
810static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) 812static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
811{ 813{
812 int ret = 0; 814 return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
813 u32 counter;
814
815 if (!armv7_pmnc_counter_valid(idx)) {
816 pr_err("CPU%u checking wrong counter %d overflow status\n",
817 smp_processor_id(), idx);
818 } else {
819 counter = ARMV7_IDX_TO_COUNTER(idx);
820 ret = pmnc & BIT(counter);
821 }
822
823 return ret;
824} 815}
825 816
826static inline int armv7_pmnc_select_counter(int idx) 817static inline int armv7_pmnc_select_counter(int idx)
827{ 818{
828 u32 counter; 819 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
829
830 if (!armv7_pmnc_counter_valid(idx)) {
831 pr_err("CPU%u selecting wrong PMNC counter %d\n",
832 smp_processor_id(), idx);
833 return -EINVAL;
834 }
835
836 counter = ARMV7_IDX_TO_COUNTER(idx);
837 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); 820 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
838 isb(); 821 isb();
839 822
@@ -842,11 +825,12 @@ static inline int armv7_pmnc_select_counter(int idx)
842 825
843static inline u32 armv7pmu_read_counter(struct perf_event *event) 826static inline u32 armv7pmu_read_counter(struct perf_event *event)
844{ 827{
828 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
845 struct hw_perf_event *hwc = &event->hw; 829 struct hw_perf_event *hwc = &event->hw;
846 int idx = hwc->idx; 830 int idx = hwc->idx;
847 u32 value = 0; 831 u32 value = 0;
848 832
849 if (!armv7_pmnc_counter_valid(idx)) 833 if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
850 pr_err("CPU%u reading wrong counter %d\n", 834 pr_err("CPU%u reading wrong counter %d\n",
851 smp_processor_id(), idx); 835 smp_processor_id(), idx);
852 else if (idx == ARMV7_IDX_CYCLE_COUNTER) 836 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
@@ -859,10 +843,11 @@ static inline u32 armv7pmu_read_counter(struct perf_event *event)
859 843
860static inline void armv7pmu_write_counter(struct perf_event *event, u32 value) 844static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
861{ 845{
846 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
862 struct hw_perf_event *hwc = &event->hw; 847 struct hw_perf_event *hwc = &event->hw;
863 int idx = hwc->idx; 848 int idx = hwc->idx;
864 849
865 if (!armv7_pmnc_counter_valid(idx)) 850 if (!armv7_pmnc_counter_valid(cpu_pmu, idx))
866 pr_err("CPU%u writing wrong counter %d\n", 851 pr_err("CPU%u writing wrong counter %d\n",
867 smp_processor_id(), idx); 852 smp_processor_id(), idx);
868 else if (idx == ARMV7_IDX_CYCLE_COUNTER) 853 else if (idx == ARMV7_IDX_CYCLE_COUNTER)
@@ -881,60 +866,28 @@ static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
881 866
882static inline int armv7_pmnc_enable_counter(int idx) 867static inline int armv7_pmnc_enable_counter(int idx)
883{ 868{
884 u32 counter; 869 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
885
886 if (!armv7_pmnc_counter_valid(idx)) {
887 pr_err("CPU%u enabling wrong PMNC counter %d\n",
888 smp_processor_id(), idx);
889 return -EINVAL;
890 }
891
892 counter = ARMV7_IDX_TO_COUNTER(idx);
893 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); 870 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
894 return idx; 871 return idx;
895} 872}
896 873
897static inline int armv7_pmnc_disable_counter(int idx) 874static inline int armv7_pmnc_disable_counter(int idx)
898{ 875{
899 u32 counter; 876 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
900
901 if (!armv7_pmnc_counter_valid(idx)) {
902 pr_err("CPU%u disabling wrong PMNC counter %d\n",
903 smp_processor_id(), idx);
904 return -EINVAL;
905 }
906
907 counter = ARMV7_IDX_TO_COUNTER(idx);
908 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); 877 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
909 return idx; 878 return idx;
910} 879}
911 880
912static inline int armv7_pmnc_enable_intens(int idx) 881static inline int armv7_pmnc_enable_intens(int idx)
913{ 882{
914 u32 counter; 883 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
915
916 if (!armv7_pmnc_counter_valid(idx)) {
917 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
918 smp_processor_id(), idx);
919 return -EINVAL;
920 }
921
922 counter = ARMV7_IDX_TO_COUNTER(idx);
923 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); 884 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
924 return idx; 885 return idx;
925} 886}
926 887
927static inline int armv7_pmnc_disable_intens(int idx) 888static inline int armv7_pmnc_disable_intens(int idx)
928{ 889{
929 u32 counter; 890 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
930
931 if (!armv7_pmnc_counter_valid(idx)) {
932 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
933 smp_processor_id(), idx);
934 return -EINVAL;
935 }
936
937 counter = ARMV7_IDX_TO_COUNTER(idx);
938 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); 891 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
939 isb(); 892 isb();
940 /* Clear the overflow flag in case an interrupt is pending. */ 893 /* Clear the overflow flag in case an interrupt is pending. */
@@ -959,7 +912,7 @@ static inline u32 armv7_pmnc_getreset_flags(void)
959} 912}
960 913
961#ifdef DEBUG 914#ifdef DEBUG
962static void armv7_pmnc_dump_regs(void) 915static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
963{ 916{
964 u32 val; 917 u32 val;
965 unsigned int cnt; 918 unsigned int cnt;
@@ -984,7 +937,8 @@ static void armv7_pmnc_dump_regs(void)
984 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); 937 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
985 printk(KERN_INFO "CCNT =0x%08x\n", val); 938 printk(KERN_INFO "CCNT =0x%08x\n", val);
986 939
987 for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { 940 for (cnt = ARMV7_IDX_COUNTER0;
941 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
988 armv7_pmnc_select_counter(cnt); 942 armv7_pmnc_select_counter(cnt);
989 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); 943 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
990 printk(KERN_INFO "CNT[%d] count =0x%08x\n", 944 printk(KERN_INFO "CNT[%d] count =0x%08x\n",
@@ -1004,6 +958,12 @@ static void armv7pmu_enable_event(struct perf_event *event)
1004 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 958 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1005 int idx = hwc->idx; 959 int idx = hwc->idx;
1006 960
961 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
962 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
963 smp_processor_id(), idx);
964 return;
965 }
966
1007 /* 967 /*
1008 * Enable counter and interrupt, and set the counter to count 968 * Enable counter and interrupt, and set the counter to count
1009 * the event that we're interested in. 969 * the event that we're interested in.
@@ -1044,6 +1004,12 @@ static void armv7pmu_disable_event(struct perf_event *event)
1044 struct pmu_hw_events *events = cpu_pmu->get_hw_events(); 1004 struct pmu_hw_events *events = cpu_pmu->get_hw_events();
1045 int idx = hwc->idx; 1005 int idx = hwc->idx;
1046 1006
1007 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
1008 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
1009 smp_processor_id(), idx);
1010 return;
1011 }
1012
1047 /* 1013 /*
1048 * Disable counter and interrupt 1014 * Disable counter and interrupt
1049 */ 1015 */