aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2017-04-11 04:39:56 -0400
committerWill Deacon <will.deacon@arm.com>2017-04-11 11:29:54 -0400
commitf1b36dcb5c316c276ca6faedc50d89d97f90d960 (patch)
tree504b808f89e3af152bbd6a8434978952cb459966
parent45736a72fb79b204c1fbdb08a1e1a2aa52c7281a (diff)
arm64: pmuv3: handle !PMUv3 when probing
When probing via ACPI, we won't know up-front whether a CPU has a PMUv3 compatible PMU. Thus we need to consult ID registers during probe time. This patch updates our PMUv3 probing code to test for the presence of PMUv3 functionality before touching an PMUv3-specific registers, and before updating the struct arm_pmu with PMUv3 data. When a PMUv3-compatible PMU is not present, probing will return -ENODEV. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Cc: Will Deacon <will.deacon@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm64/kernel/perf_event.c87
1 files changed, 71 insertions, 16 deletions
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 57ae9d9ed9bb..53f235465fc4 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -957,11 +957,26 @@ static int armv8_vulcan_map_event(struct perf_event *event)
957 ARMV8_PMU_EVTYPE_EVENT); 957 ARMV8_PMU_EVTYPE_EVENT);
958} 958}
959 959
960struct armv8pmu_probe_info {
961 struct arm_pmu *pmu;
962 bool present;
963};
964
960static void __armv8pmu_probe_pmu(void *info) 965static void __armv8pmu_probe_pmu(void *info)
961{ 966{
962 struct arm_pmu *cpu_pmu = info; 967 struct armv8pmu_probe_info *probe = info;
968 struct arm_pmu *cpu_pmu = probe->pmu;
969 u64 dfr0, pmuver;
963 u32 pmceid[2]; 970 u32 pmceid[2];
964 971
972 dfr0 = read_sysreg(id_aa64dfr0_el1);
973 pmuver = cpuid_feature_extract_unsigned_field(dfr0,
974 ID_AA64DFR0_PMUVER_SHIFT);
975 if (pmuver != 1)
976 return;
977
978 probe->present = true;
979
965 /* Read the nb of CNTx counters supported from PMNC */ 980 /* Read the nb of CNTx counters supported from PMNC */
966 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) 981 cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT)
967 & ARMV8_PMU_PMCR_N_MASK; 982 & ARMV8_PMU_PMCR_N_MASK;
@@ -979,13 +994,27 @@ static void __armv8pmu_probe_pmu(void *info)
979 994
980static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) 995static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu)
981{ 996{
982 return smp_call_function_any(&cpu_pmu->supported_cpus, 997 struct armv8pmu_probe_info probe = {
998 .pmu = cpu_pmu,
999 .present = false,
1000 };
1001 int ret;
1002
1003 ret = smp_call_function_any(&cpu_pmu->supported_cpus,
983 __armv8pmu_probe_pmu, 1004 __armv8pmu_probe_pmu,
984 cpu_pmu, 1); 1005 &probe, 1);
1006 if (ret)
1007 return ret;
1008
1009 return probe.present ? 0 : -ENODEV;
985} 1010}
986 1011
987static void armv8_pmu_init(struct arm_pmu *cpu_pmu) 1012static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
988{ 1013{
1014 int ret = armv8pmu_probe_pmu(cpu_pmu);
1015 if (ret)
1016 return ret;
1017
989 cpu_pmu->handle_irq = armv8pmu_handle_irq, 1018 cpu_pmu->handle_irq = armv8pmu_handle_irq,
990 cpu_pmu->enable = armv8pmu_enable_event, 1019 cpu_pmu->enable = armv8pmu_enable_event,
991 cpu_pmu->disable = armv8pmu_disable_event, 1020 cpu_pmu->disable = armv8pmu_disable_event,
@@ -997,78 +1026,104 @@ static void armv8_pmu_init(struct arm_pmu *cpu_pmu)
997 cpu_pmu->reset = armv8pmu_reset, 1026 cpu_pmu->reset = armv8pmu_reset,
998 cpu_pmu->max_period = (1LLU << 32) - 1, 1027 cpu_pmu->max_period = (1LLU << 32) - 1,
999 cpu_pmu->set_event_filter = armv8pmu_set_event_filter; 1028 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
1029
1030 return 0;
1000} 1031}
1001 1032
1002static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu) 1033static int armv8_pmuv3_init(struct arm_pmu *cpu_pmu)
1003{ 1034{
1004 armv8_pmu_init(cpu_pmu); 1035 int ret = armv8_pmu_init(cpu_pmu);
1036 if (ret)
1037 return ret;
1038
1005 cpu_pmu->name = "armv8_pmuv3"; 1039 cpu_pmu->name = "armv8_pmuv3";
1006 cpu_pmu->map_event = armv8_pmuv3_map_event; 1040 cpu_pmu->map_event = armv8_pmuv3_map_event;
1007 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = 1041 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1008 &armv8_pmuv3_events_attr_group; 1042 &armv8_pmuv3_events_attr_group;
1009 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = 1043 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1010 &armv8_pmuv3_format_attr_group; 1044 &armv8_pmuv3_format_attr_group;
1011 return armv8pmu_probe_pmu(cpu_pmu); 1045
1046 return 0;
1012} 1047}
1013 1048
1014static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) 1049static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu)
1015{ 1050{
1016 armv8_pmu_init(cpu_pmu); 1051 int ret = armv8_pmu_init(cpu_pmu);
1052 if (ret)
1053 return ret;
1054
1017 cpu_pmu->name = "armv8_cortex_a53"; 1055 cpu_pmu->name = "armv8_cortex_a53";
1018 cpu_pmu->map_event = armv8_a53_map_event; 1056 cpu_pmu->map_event = armv8_a53_map_event;
1019 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = 1057 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1020 &armv8_pmuv3_events_attr_group; 1058 &armv8_pmuv3_events_attr_group;
1021 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = 1059 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1022 &armv8_pmuv3_format_attr_group; 1060 &armv8_pmuv3_format_attr_group;
1023 return armv8pmu_probe_pmu(cpu_pmu); 1061
1062 return 0;
1024} 1063}
1025 1064
1026static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) 1065static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu)
1027{ 1066{
1028 armv8_pmu_init(cpu_pmu); 1067 int ret = armv8_pmu_init(cpu_pmu);
1068 if (ret)
1069 return ret;
1070
1029 cpu_pmu->name = "armv8_cortex_a57"; 1071 cpu_pmu->name = "armv8_cortex_a57";
1030 cpu_pmu->map_event = armv8_a57_map_event; 1072 cpu_pmu->map_event = armv8_a57_map_event;
1031 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = 1073 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1032 &armv8_pmuv3_events_attr_group; 1074 &armv8_pmuv3_events_attr_group;
1033 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = 1075 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1034 &armv8_pmuv3_format_attr_group; 1076 &armv8_pmuv3_format_attr_group;
1035 return armv8pmu_probe_pmu(cpu_pmu); 1077
1078 return 0;
1036} 1079}
1037 1080
1038static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) 1081static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu)
1039{ 1082{
1040 armv8_pmu_init(cpu_pmu); 1083 int ret = armv8_pmu_init(cpu_pmu);
1084 if (ret)
1085 return ret;
1086
1041 cpu_pmu->name = "armv8_cortex_a72"; 1087 cpu_pmu->name = "armv8_cortex_a72";
1042 cpu_pmu->map_event = armv8_a57_map_event; 1088 cpu_pmu->map_event = armv8_a57_map_event;
1043 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = 1089 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1044 &armv8_pmuv3_events_attr_group; 1090 &armv8_pmuv3_events_attr_group;
1045 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = 1091 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1046 &armv8_pmuv3_format_attr_group; 1092 &armv8_pmuv3_format_attr_group;
1047 return armv8pmu_probe_pmu(cpu_pmu); 1093
1094 return 0;
1048} 1095}
1049 1096
1050static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) 1097static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu)
1051{ 1098{
1052 armv8_pmu_init(cpu_pmu); 1099 int ret = armv8_pmu_init(cpu_pmu);
1100 if (ret)
1101 return ret;
1102
1053 cpu_pmu->name = "armv8_cavium_thunder"; 1103 cpu_pmu->name = "armv8_cavium_thunder";
1054 cpu_pmu->map_event = armv8_thunder_map_event; 1104 cpu_pmu->map_event = armv8_thunder_map_event;
1055 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = 1105 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1056 &armv8_pmuv3_events_attr_group; 1106 &armv8_pmuv3_events_attr_group;
1057 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = 1107 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1058 &armv8_pmuv3_format_attr_group; 1108 &armv8_pmuv3_format_attr_group;
1059 return armv8pmu_probe_pmu(cpu_pmu); 1109
1110 return 0;
1060} 1111}
1061 1112
1062static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) 1113static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu)
1063{ 1114{
1064 armv8_pmu_init(cpu_pmu); 1115 int ret = armv8_pmu_init(cpu_pmu);
1116 if (ret)
1117 return ret;
1118
1065 cpu_pmu->name = "armv8_brcm_vulcan"; 1119 cpu_pmu->name = "armv8_brcm_vulcan";
1066 cpu_pmu->map_event = armv8_vulcan_map_event; 1120 cpu_pmu->map_event = armv8_vulcan_map_event;
1067 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = 1121 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] =
1068 &armv8_pmuv3_events_attr_group; 1122 &armv8_pmuv3_events_attr_group;
1069 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = 1123 cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] =
1070 &armv8_pmuv3_format_attr_group; 1124 &armv8_pmuv3_format_attr_group;
1071 return armv8pmu_probe_pmu(cpu_pmu); 1125
1126 return 0;
1072} 1127}
1073 1128
1074static const struct of_device_id armv8_pmu_of_device_ids[] = { 1129static const struct of_device_id armv8_pmu_of_device_ids[] = {