aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWill Deacon <will.deacon@arm.com>2010-11-13 12:37:46 -0500
committerWill Deacon <will.deacon@arm.com>2010-11-25 11:52:07 -0500
commit3cb314bae2191b432a7e898abf865db880f6d07d (patch)
tree2e815f3f7277467c837963064684c5bb1cc4abf7
parent59a98a1e56edea4d7d9c5f4ce9d50e271a04993c (diff)
ARM: perf: add _init() functions to PMUs
In preparation for separating the PMU-specific code, this patch adds self-contained init functions to each PMU, therefore removing any PMU-specific knowledge from the PMU-agnostic init_hw_perf_events function. Acked-by: Jamie Iles <jamie@jamieiles.com> Acked-by: Jean Pihet <j-pihet@ti.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/kernel/perf_event.c65
1 files changed, 45 insertions, 20 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 35319b8e4d4..acc4e91dd30 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -1240,6 +1240,11 @@ static const struct arm_pmu armv6pmu = {
1240 .max_period = (1LLU << 32) - 1, 1240 .max_period = (1LLU << 32) - 1,
1241}; 1241};
1242 1242
1243const struct arm_pmu *__init armv6pmu_init(void)
1244{
1245 return &armv6pmu;
1246}
1247
1243/* 1248/*
1244 * ARMv6mpcore is almost identical to single core ARMv6 with the exception 1249 * ARMv6mpcore is almost identical to single core ARMv6 with the exception
1245 * that some of the events have different enumerations and that there is no 1250 * that some of the events have different enumerations and that there is no
@@ -1264,6 +1269,11 @@ static const struct arm_pmu armv6mpcore_pmu = {
1264 .max_period = (1LLU << 32) - 1, 1269 .max_period = (1LLU << 32) - 1,
1265}; 1270};
1266 1271
1272const struct arm_pmu *__init armv6mpcore_pmu_init(void)
1273{
1274 return &armv6mpcore_pmu;
1275}
1276
1267/* 1277/*
1268 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. 1278 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
1269 * 1279 *
@@ -2136,6 +2146,25 @@ static u32 __init armv7_reset_read_pmnc(void)
2136 return nb_cnt + 1; 2146 return nb_cnt + 1;
2137} 2147}
2138 2148
2149const struct arm_pmu *__init armv7_a8_pmu_init(void)
2150{
2151 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
2152 armv7pmu.cache_map = &armv7_a8_perf_cache_map;
2153 armv7pmu.event_map = &armv7_a8_perf_map;
2154 armv7pmu.num_events = armv7_reset_read_pmnc();
2155 return &armv7pmu;
2156}
2157
2158const struct arm_pmu *__init armv7_a9_pmu_init(void)
2159{
2160 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
2161 armv7pmu.cache_map = &armv7_a9_perf_cache_map;
2162 armv7pmu.event_map = &armv7_a9_perf_map;
2163 armv7pmu.num_events = armv7_reset_read_pmnc();
2164 return &armv7pmu;
2165}
2166
2167
2139/* 2168/*
2140 * ARMv5 [xscale] Performance counter handling code. 2169 * ARMv5 [xscale] Performance counter handling code.
2141 * 2170 *
@@ -2564,6 +2593,11 @@ static const struct arm_pmu xscale1pmu = {
2564 .max_period = (1LLU << 32) - 1, 2593 .max_period = (1LLU << 32) - 1,
2565}; 2594};
2566 2595
2596const struct arm_pmu *__init xscale1pmu_init(void)
2597{
2598 return &xscale1pmu;
2599}
2600
2567#define XSCALE2_OVERFLOWED_MASK 0x01f 2601#define XSCALE2_OVERFLOWED_MASK 0x01f
2568#define XSCALE2_CCOUNT_OVERFLOW 0x001 2602#define XSCALE2_CCOUNT_OVERFLOW 0x001
2569#define XSCALE2_COUNT0_OVERFLOW 0x002 2603#define XSCALE2_COUNT0_OVERFLOW 0x002
@@ -2920,6 +2954,11 @@ static const struct arm_pmu xscale2pmu = {
2920 .max_period = (1LLU << 32) - 1, 2954 .max_period = (1LLU << 32) - 1,
2921}; 2955};
2922 2956
2957const struct arm_pmu *__init xscale2pmu_init(void)
2958{
2959 return &xscale2pmu;
2960}
2961
2923static int __init 2962static int __init
2924init_hw_perf_events(void) 2963init_hw_perf_events(void)
2925{ 2964{
@@ -2933,30 +2972,16 @@ init_hw_perf_events(void)
2933 case 0xB360: /* ARM1136 */ 2972 case 0xB360: /* ARM1136 */
2934 case 0xB560: /* ARM1156 */ 2973 case 0xB560: /* ARM1156 */
2935 case 0xB760: /* ARM1176 */ 2974 case 0xB760: /* ARM1176 */
2936 armpmu = &armv6pmu; 2975 armpmu = armv6pmu_init();
2937 break; 2976 break;
2938 case 0xB020: /* ARM11mpcore */ 2977 case 0xB020: /* ARM11mpcore */
2939 armpmu = &armv6mpcore_pmu; 2978 armpmu = armv6mpcore_pmu_init();
2940 break; 2979 break;
2941 case 0xC080: /* Cortex-A8 */ 2980 case 0xC080: /* Cortex-A8 */
2942 armv7pmu.id = ARM_PERF_PMU_ID_CA8; 2981 armpmu = armv7_a8_pmu_init();
2943 armv7pmu.cache_map = &armv7_a8_perf_cache_map;
2944 armv7pmu.event_map = &armv7_a8_perf_map;
2945 armpmu = &armv7pmu;
2946
2947 /* Reset PMNC and read the nb of CNTx counters
2948 supported */
2949 armv7pmu.num_events = armv7_reset_read_pmnc();
2950 break; 2982 break;
2951 case 0xC090: /* Cortex-A9 */ 2983 case 0xC090: /* Cortex-A9 */
2952 armv7pmu.id = ARM_PERF_PMU_ID_CA9; 2984 armpmu = armv7_a9_pmu_init();
2953 armv7pmu.cache_map = &armv7_a9_perf_cache_map;
2954 armv7pmu.event_map = &armv7_a9_perf_map;
2955 armpmu = &armv7pmu;
2956
2957 /* Reset PMNC and read the nb of CNTx counters
2958 supported */
2959 armv7pmu.num_events = armv7_reset_read_pmnc();
2960 break; 2985 break;
2961 } 2986 }
2962 /* Intel CPUs [xscale]. */ 2987 /* Intel CPUs [xscale]. */
@@ -2964,10 +2989,10 @@ init_hw_perf_events(void)
2964 part_number = (cpuid >> 13) & 0x7; 2989 part_number = (cpuid >> 13) & 0x7;
2965 switch (part_number) { 2990 switch (part_number) {
2966 case 1: 2991 case 1:
2967 armpmu = &xscale1pmu; 2992 armpmu = xscale1pmu_init();
2968 break; 2993 break;
2969 case 2: 2994 case 2:
2970 armpmu = &xscale2pmu; 2995 armpmu = xscale2pmu_init();
2971 break; 2996 break;
2972 } 2997 }
2973 } 2998 }