diff options
| -rw-r--r-- | arch/arm/kernel/perf_event.c | 131 |
1 files changed, 38 insertions, 93 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 07a50357492a..c49e1701a2f6 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -84,14 +84,17 @@ struct arm_pmu { | |||
| 84 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | 84 | irqreturn_t (*handle_irq)(int irq_num, void *dev); |
| 85 | void (*enable)(struct hw_perf_event *evt, int idx); | 85 | void (*enable)(struct hw_perf_event *evt, int idx); |
| 86 | void (*disable)(struct hw_perf_event *evt, int idx); | 86 | void (*disable)(struct hw_perf_event *evt, int idx); |
| 87 | int (*event_map)(int evt); | ||
| 88 | u64 (*raw_event)(u64); | ||
| 89 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | 87 | int (*get_event_idx)(struct cpu_hw_events *cpuc, |
| 90 | struct hw_perf_event *hwc); | 88 | struct hw_perf_event *hwc); |
| 91 | u32 (*read_counter)(int idx); | 89 | u32 (*read_counter)(int idx); |
| 92 | void (*write_counter)(int idx, u32 val); | 90 | void (*write_counter)(int idx, u32 val); |
| 93 | void (*start)(void); | 91 | void (*start)(void); |
| 94 | void (*stop)(void); | 92 | void (*stop)(void); |
| 93 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | ||
| 94 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 95 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 96 | const unsigned (*event_map)[PERF_COUNT_HW_MAX]; | ||
| 97 | u32 raw_event_mask; | ||
| 95 | int num_events; | 98 | int num_events; |
| 96 | u64 max_period; | 99 | u64 max_period; |
| 97 | }; | 100 | }; |
| @@ -136,10 +139,6 @@ EXPORT_SYMBOL_GPL(perf_num_counters); | |||
| 136 | 139 | ||
| 137 | #define CACHE_OP_UNSUPPORTED 0xFFFF | 140 | #define CACHE_OP_UNSUPPORTED 0xFFFF |
| 138 | 141 | ||
| 139 | static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 140 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 141 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 142 | |||
| 143 | static int | 142 | static int |
| 144 | armpmu_map_cache_event(u64 config) | 143 | armpmu_map_cache_event(u64 config) |
| 145 | { | 144 | { |
| @@ -157,7 +156,7 @@ armpmu_map_cache_event(u64 config) | |||
| 157 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 156 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
| 158 | return -EINVAL; | 157 | return -EINVAL; |
| 159 | 158 | ||
| 160 | ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result]; | 159 | ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; |
| 161 | 160 | ||
| 162 | if (ret == CACHE_OP_UNSUPPORTED) | 161 | if (ret == CACHE_OP_UNSUPPORTED) |
| 163 | return -ENOENT; | 162 | return -ENOENT; |
| @@ -166,6 +165,19 @@ armpmu_map_cache_event(u64 config) | |||
| 166 | } | 165 | } |
| 167 | 166 | ||
| 168 | static int | 167 | static int |
| 168 | armpmu_map_event(u64 config) | ||
| 169 | { | ||
| 170 | int mapping = (*armpmu->event_map)[config]; | ||
| 171 | return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; | ||
| 172 | } | ||
| 173 | |||
| 174 | static int | ||
| 175 | armpmu_map_raw_event(u64 config) | ||
| 176 | { | ||
| 177 | return (int)(config & armpmu->raw_event_mask); | ||
| 178 | } | ||
| 179 | |||
| 180 | static int | ||
| 169 | armpmu_event_set_period(struct perf_event *event, | 181 | armpmu_event_set_period(struct perf_event *event, |
| 170 | struct hw_perf_event *hwc, | 182 | struct hw_perf_event *hwc, |
| 171 | int idx) | 183 | int idx) |
| @@ -458,11 +470,11 @@ __hw_perf_event_init(struct perf_event *event) | |||
| 458 | 470 | ||
| 459 | /* Decode the generic type into an ARM event identifier. */ | 471 | /* Decode the generic type into an ARM event identifier. */ |
| 460 | if (PERF_TYPE_HARDWARE == event->attr.type) { | 472 | if (PERF_TYPE_HARDWARE == event->attr.type) { |
| 461 | mapping = armpmu->event_map(event->attr.config); | 473 | mapping = armpmu_map_event(event->attr.config); |
| 462 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | 474 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { |
| 463 | mapping = armpmu_map_cache_event(event->attr.config); | 475 | mapping = armpmu_map_cache_event(event->attr.config); |
| 464 | } else if (PERF_TYPE_RAW == event->attr.type) { | 476 | } else if (PERF_TYPE_RAW == event->attr.type) { |
| 465 | mapping = armpmu->raw_event(event->attr.config); | 477 | mapping = armpmu_map_raw_event(event->attr.config); |
| 466 | } else { | 478 | } else { |
| 467 | pr_debug("event type %x not supported\n", event->attr.type); | 479 | pr_debug("event type %x not supported\n", event->attr.type); |
| 468 | return -EOPNOTSUPP; | 480 | return -EOPNOTSUPP; |
| @@ -1121,30 +1133,6 @@ armv6pmu_stop(void) | |||
| 1121 | spin_unlock_irqrestore(&pmu_lock, flags); | 1133 | spin_unlock_irqrestore(&pmu_lock, flags); |
| 1122 | } | 1134 | } |
| 1123 | 1135 | ||
| 1124 | static inline int | ||
| 1125 | armv6pmu_event_map(int config) | ||
| 1126 | { | ||
| 1127 | int mapping = armv6_perf_map[config]; | ||
| 1128 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 1129 | mapping = -EOPNOTSUPP; | ||
| 1130 | return mapping; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | static inline int | ||
| 1134 | armv6mpcore_pmu_event_map(int config) | ||
| 1135 | { | ||
| 1136 | int mapping = armv6mpcore_perf_map[config]; | ||
| 1137 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 1138 | mapping = -EOPNOTSUPP; | ||
| 1139 | return mapping; | ||
| 1140 | } | ||
| 1141 | |||
| 1142 | static u64 | ||
| 1143 | armv6pmu_raw_event(u64 config) | ||
| 1144 | { | ||
| 1145 | return config & 0xff; | ||
| 1146 | } | ||
| 1147 | |||
| 1148 | static int | 1136 | static int |
| 1149 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | 1137 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, |
| 1150 | struct hw_perf_event *event) | 1138 | struct hw_perf_event *event) |
| @@ -1240,13 +1228,14 @@ static const struct arm_pmu armv6pmu = { | |||
| 1240 | .handle_irq = armv6pmu_handle_irq, | 1228 | .handle_irq = armv6pmu_handle_irq, |
| 1241 | .enable = armv6pmu_enable_event, | 1229 | .enable = armv6pmu_enable_event, |
| 1242 | .disable = armv6pmu_disable_event, | 1230 | .disable = armv6pmu_disable_event, |
| 1243 | .event_map = armv6pmu_event_map, | ||
| 1244 | .raw_event = armv6pmu_raw_event, | ||
| 1245 | .read_counter = armv6pmu_read_counter, | 1231 | .read_counter = armv6pmu_read_counter, |
| 1246 | .write_counter = armv6pmu_write_counter, | 1232 | .write_counter = armv6pmu_write_counter, |
| 1247 | .get_event_idx = armv6pmu_get_event_idx, | 1233 | .get_event_idx = armv6pmu_get_event_idx, |
| 1248 | .start = armv6pmu_start, | 1234 | .start = armv6pmu_start, |
| 1249 | .stop = armv6pmu_stop, | 1235 | .stop = armv6pmu_stop, |
| 1236 | .cache_map = &armv6_perf_cache_map, | ||
| 1237 | .event_map = &armv6_perf_map, | ||
| 1238 | .raw_event_mask = 0xFF, | ||
| 1250 | .num_events = 3, | 1239 | .num_events = 3, |
| 1251 | .max_period = (1LLU << 32) - 1, | 1240 | .max_period = (1LLU << 32) - 1, |
| 1252 | }; | 1241 | }; |
| @@ -1263,13 +1252,14 @@ static const struct arm_pmu armv6mpcore_pmu = { | |||
| 1263 | .handle_irq = armv6pmu_handle_irq, | 1252 | .handle_irq = armv6pmu_handle_irq, |
| 1264 | .enable = armv6pmu_enable_event, | 1253 | .enable = armv6pmu_enable_event, |
| 1265 | .disable = armv6mpcore_pmu_disable_event, | 1254 | .disable = armv6mpcore_pmu_disable_event, |
| 1266 | .event_map = armv6mpcore_pmu_event_map, | ||
| 1267 | .raw_event = armv6pmu_raw_event, | ||
| 1268 | .read_counter = armv6pmu_read_counter, | 1255 | .read_counter = armv6pmu_read_counter, |
| 1269 | .write_counter = armv6pmu_write_counter, | 1256 | .write_counter = armv6pmu_write_counter, |
| 1270 | .get_event_idx = armv6pmu_get_event_idx, | 1257 | .get_event_idx = armv6pmu_get_event_idx, |
| 1271 | .start = armv6pmu_start, | 1258 | .start = armv6pmu_start, |
| 1272 | .stop = armv6pmu_stop, | 1259 | .stop = armv6pmu_stop, |
| 1260 | .cache_map = &armv6mpcore_perf_cache_map, | ||
| 1261 | .event_map = &armv6mpcore_perf_map, | ||
| 1262 | .raw_event_mask = 0xFF, | ||
| 1273 | .num_events = 3, | 1263 | .num_events = 3, |
| 1274 | .max_period = (1LLU << 32) - 1, | 1264 | .max_period = (1LLU << 32) - 1, |
| 1275 | }; | 1265 | }; |
| @@ -2093,27 +2083,6 @@ static void armv7pmu_stop(void) | |||
| 2093 | spin_unlock_irqrestore(&pmu_lock, flags); | 2083 | spin_unlock_irqrestore(&pmu_lock, flags); |
| 2094 | } | 2084 | } |
| 2095 | 2085 | ||
| 2096 | static inline int armv7_a8_pmu_event_map(int config) | ||
| 2097 | { | ||
| 2098 | int mapping = armv7_a8_perf_map[config]; | ||
| 2099 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 2100 | mapping = -EOPNOTSUPP; | ||
| 2101 | return mapping; | ||
| 2102 | } | ||
| 2103 | |||
| 2104 | static inline int armv7_a9_pmu_event_map(int config) | ||
| 2105 | { | ||
| 2106 | int mapping = armv7_a9_perf_map[config]; | ||
| 2107 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 2108 | mapping = -EOPNOTSUPP; | ||
| 2109 | return mapping; | ||
| 2110 | } | ||
| 2111 | |||
| 2112 | static u64 armv7pmu_raw_event(u64 config) | ||
| 2113 | { | ||
| 2114 | return config & 0xff; | ||
| 2115 | } | ||
| 2116 | |||
| 2117 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | 2086 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, |
| 2118 | struct hw_perf_event *event) | 2087 | struct hw_perf_event *event) |
| 2119 | { | 2088 | { |
| @@ -2144,12 +2113,12 @@ static struct arm_pmu armv7pmu = { | |||
| 2144 | .handle_irq = armv7pmu_handle_irq, | 2113 | .handle_irq = armv7pmu_handle_irq, |
| 2145 | .enable = armv7pmu_enable_event, | 2114 | .enable = armv7pmu_enable_event, |
| 2146 | .disable = armv7pmu_disable_event, | 2115 | .disable = armv7pmu_disable_event, |
| 2147 | .raw_event = armv7pmu_raw_event, | ||
| 2148 | .read_counter = armv7pmu_read_counter, | 2116 | .read_counter = armv7pmu_read_counter, |
| 2149 | .write_counter = armv7pmu_write_counter, | 2117 | .write_counter = armv7pmu_write_counter, |
| 2150 | .get_event_idx = armv7pmu_get_event_idx, | 2118 | .get_event_idx = armv7pmu_get_event_idx, |
| 2151 | .start = armv7pmu_start, | 2119 | .start = armv7pmu_start, |
| 2152 | .stop = armv7pmu_stop, | 2120 | .stop = armv7pmu_stop, |
| 2121 | .raw_event_mask = 0xFF, | ||
| 2153 | .max_period = (1LLU << 32) - 1, | 2122 | .max_period = (1LLU << 32) - 1, |
| 2154 | }; | 2123 | }; |
| 2155 | 2124 | ||
| @@ -2318,21 +2287,6 @@ static const unsigned xscale_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
| 2318 | #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) | 2287 | #define XSCALE_PMU_RESET (CCNT_RESET | PMN_RESET) |
| 2319 | #define XSCALE_PMU_CNT64 0x008 | 2288 | #define XSCALE_PMU_CNT64 0x008 |
| 2320 | 2289 | ||
| 2321 | static inline int | ||
| 2322 | xscalepmu_event_map(int config) | ||
| 2323 | { | ||
| 2324 | int mapping = xscale_perf_map[config]; | ||
| 2325 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 2326 | mapping = -EOPNOTSUPP; | ||
| 2327 | return mapping; | ||
| 2328 | } | ||
| 2329 | |||
| 2330 | static u64 | ||
| 2331 | xscalepmu_raw_event(u64 config) | ||
| 2332 | { | ||
| 2333 | return config & 0xff; | ||
| 2334 | } | ||
| 2335 | |||
| 2336 | #define XSCALE1_OVERFLOWED_MASK 0x700 | 2290 | #define XSCALE1_OVERFLOWED_MASK 0x700 |
| 2337 | #define XSCALE1_CCOUNT_OVERFLOW 0x400 | 2291 | #define XSCALE1_CCOUNT_OVERFLOW 0x400 |
| 2338 | #define XSCALE1_COUNT0_OVERFLOW 0x100 | 2292 | #define XSCALE1_COUNT0_OVERFLOW 0x100 |
| @@ -2598,13 +2552,14 @@ static const struct arm_pmu xscale1pmu = { | |||
| 2598 | .handle_irq = xscale1pmu_handle_irq, | 2552 | .handle_irq = xscale1pmu_handle_irq, |
| 2599 | .enable = xscale1pmu_enable_event, | 2553 | .enable = xscale1pmu_enable_event, |
| 2600 | .disable = xscale1pmu_disable_event, | 2554 | .disable = xscale1pmu_disable_event, |
| 2601 | .event_map = xscalepmu_event_map, | ||
| 2602 | .raw_event = xscalepmu_raw_event, | ||
| 2603 | .read_counter = xscale1pmu_read_counter, | 2555 | .read_counter = xscale1pmu_read_counter, |
| 2604 | .write_counter = xscale1pmu_write_counter, | 2556 | .write_counter = xscale1pmu_write_counter, |
| 2605 | .get_event_idx = xscale1pmu_get_event_idx, | 2557 | .get_event_idx = xscale1pmu_get_event_idx, |
| 2606 | .start = xscale1pmu_start, | 2558 | .start = xscale1pmu_start, |
| 2607 | .stop = xscale1pmu_stop, | 2559 | .stop = xscale1pmu_stop, |
| 2560 | .cache_map = &xscale_perf_cache_map, | ||
| 2561 | .event_map = &xscale_perf_map, | ||
| 2562 | .raw_event_mask = 0xFF, | ||
| 2608 | .num_events = 3, | 2563 | .num_events = 3, |
| 2609 | .max_period = (1LLU << 32) - 1, | 2564 | .max_period = (1LLU << 32) - 1, |
| 2610 | }; | 2565 | }; |
| @@ -2953,13 +2908,14 @@ static const struct arm_pmu xscale2pmu = { | |||
| 2953 | .handle_irq = xscale2pmu_handle_irq, | 2908 | .handle_irq = xscale2pmu_handle_irq, |
| 2954 | .enable = xscale2pmu_enable_event, | 2909 | .enable = xscale2pmu_enable_event, |
| 2955 | .disable = xscale2pmu_disable_event, | 2910 | .disable = xscale2pmu_disable_event, |
| 2956 | .event_map = xscalepmu_event_map, | ||
| 2957 | .raw_event = xscalepmu_raw_event, | ||
| 2958 | .read_counter = xscale2pmu_read_counter, | 2911 | .read_counter = xscale2pmu_read_counter, |
| 2959 | .write_counter = xscale2pmu_write_counter, | 2912 | .write_counter = xscale2pmu_write_counter, |
| 2960 | .get_event_idx = xscale2pmu_get_event_idx, | 2913 | .get_event_idx = xscale2pmu_get_event_idx, |
| 2961 | .start = xscale2pmu_start, | 2914 | .start = xscale2pmu_start, |
| 2962 | .stop = xscale2pmu_stop, | 2915 | .stop = xscale2pmu_stop, |
| 2916 | .cache_map = &xscale_perf_cache_map, | ||
| 2917 | .event_map = &xscale_perf_map, | ||
| 2918 | .raw_event_mask = 0xFF, | ||
| 2963 | .num_events = 5, | 2919 | .num_events = 5, |
| 2964 | .max_period = (1LLU << 32) - 1, | 2920 | .max_period = (1LLU << 32) - 1, |
| 2965 | }; | 2921 | }; |
| @@ -2978,20 +2934,14 @@ init_hw_perf_events(void) | |||
| 2978 | case 0xB560: /* ARM1156 */ | 2934 | case 0xB560: /* ARM1156 */ |
| 2979 | case 0xB760: /* ARM1176 */ | 2935 | case 0xB760: /* ARM1176 */ |
| 2980 | armpmu = &armv6pmu; | 2936 | armpmu = &armv6pmu; |
| 2981 | memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, | ||
| 2982 | sizeof(armv6_perf_cache_map)); | ||
| 2983 | break; | 2937 | break; |
| 2984 | case 0xB020: /* ARM11mpcore */ | 2938 | case 0xB020: /* ARM11mpcore */ |
| 2985 | armpmu = &armv6mpcore_pmu; | 2939 | armpmu = &armv6mpcore_pmu; |
| 2986 | memcpy(armpmu_perf_cache_map, | ||
| 2987 | armv6mpcore_perf_cache_map, | ||
| 2988 | sizeof(armv6mpcore_perf_cache_map)); | ||
| 2989 | break; | 2940 | break; |
| 2990 | case 0xC080: /* Cortex-A8 */ | 2941 | case 0xC080: /* Cortex-A8 */ |
| 2991 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | 2942 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; |
| 2992 | memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map, | 2943 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; |
| 2993 | sizeof(armv7_a8_perf_cache_map)); | 2944 | armv7pmu.event_map = &armv7_a8_perf_map; |
| 2994 | armv7pmu.event_map = armv7_a8_pmu_event_map; | ||
| 2995 | armpmu = &armv7pmu; | 2945 | armpmu = &armv7pmu; |
| 2996 | 2946 | ||
| 2997 | /* Reset PMNC and read the nb of CNTx counters | 2947 | /* Reset PMNC and read the nb of CNTx counters |
| @@ -3000,9 +2950,8 @@ init_hw_perf_events(void) | |||
| 3000 | break; | 2950 | break; |
| 3001 | case 0xC090: /* Cortex-A9 */ | 2951 | case 0xC090: /* Cortex-A9 */ |
| 3002 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | 2952 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; |
| 3003 | memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map, | 2953 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; |
| 3004 | sizeof(armv7_a9_perf_cache_map)); | 2954 | armv7pmu.event_map = &armv7_a9_perf_map; |
| 3005 | armv7pmu.event_map = armv7_a9_pmu_event_map; | ||
| 3006 | armpmu = &armv7pmu; | 2955 | armpmu = &armv7pmu; |
| 3007 | 2956 | ||
| 3008 | /* Reset PMNC and read the nb of CNTx counters | 2957 | /* Reset PMNC and read the nb of CNTx counters |
| @@ -3016,13 +2965,9 @@ init_hw_perf_events(void) | |||
| 3016 | switch (part_number) { | 2965 | switch (part_number) { |
| 3017 | case 1: | 2966 | case 1: |
| 3018 | armpmu = &xscale1pmu; | 2967 | armpmu = &xscale1pmu; |
| 3019 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, | ||
| 3020 | sizeof(xscale_perf_cache_map)); | ||
| 3021 | break; | 2968 | break; |
| 3022 | case 2: | 2969 | case 2: |
| 3023 | armpmu = &xscale2pmu; | 2970 | armpmu = &xscale2pmu; |
| 3024 | memcpy(armpmu_perf_cache_map, xscale_perf_cache_map, | ||
| 3025 | sizeof(xscale_perf_cache_map)); | ||
| 3026 | break; | 2971 | break; |
| 3027 | } | 2972 | } |
| 3028 | } | 2973 | } |
