aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/kernel/perf_event.c67
-rw-r--r--arch/arm/kernel/perf_event_v6.c21
-rw-r--r--arch/arm/kernel/perf_event_v7.c37
-rw-r--r--arch/arm/kernel/perf_event_xscale.c14
4 files changed, 87 insertions, 52 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 1a2ebbf07fb7..b13bf23ceba3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -75,11 +75,7 @@ struct arm_pmu {
75 void (*start)(void); 75 void (*start)(void);
76 void (*stop)(void); 76 void (*stop)(void);
77 void (*reset)(void *); 77 void (*reset)(void *);
78 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] 78 int (*map_event)(struct perf_event *event);
79 [PERF_COUNT_HW_CACHE_OP_MAX]
80 [PERF_COUNT_HW_CACHE_RESULT_MAX];
81 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
82 u32 raw_event_mask;
83 int num_events; 79 int num_events;
84 atomic_t active_events; 80 atomic_t active_events;
85 struct mutex reserve_mutex; 81 struct mutex reserve_mutex;
@@ -129,7 +125,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
129#define CACHE_OP_UNSUPPORTED 0xFFFF 125#define CACHE_OP_UNSUPPORTED 0xFFFF
130 126
131static int 127static int
132armpmu_map_cache_event(u64 config) 128armpmu_map_cache_event(const unsigned (*cache_map)
129 [PERF_COUNT_HW_CACHE_MAX]
130 [PERF_COUNT_HW_CACHE_OP_MAX]
131 [PERF_COUNT_HW_CACHE_RESULT_MAX],
132 u64 config)
133{ 133{
134 unsigned int cache_type, cache_op, cache_result, ret; 134 unsigned int cache_type, cache_op, cache_result, ret;
135 135
@@ -145,7 +145,7 @@ armpmu_map_cache_event(u64 config)
145 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 145 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
146 return -EINVAL; 146 return -EINVAL;
147 147
148 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; 148 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
149 149
150 if (ret == CACHE_OP_UNSUPPORTED) 150 if (ret == CACHE_OP_UNSUPPORTED)
151 return -ENOENT; 151 return -ENOENT;
@@ -154,16 +154,38 @@ armpmu_map_cache_event(u64 config)
154} 154}
155 155
156static int 156static int
157armpmu_map_event(u64 config) 157armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
158{ 158{
159 int mapping = (*armpmu->event_map)[config]; 159 int mapping = (*event_map)[config];
160 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; 160 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
161} 161}
162 162
163static int 163static int
164armpmu_map_raw_event(u64 config) 164armpmu_map_raw_event(u32 raw_event_mask, u64 config)
165{ 165{
166 return (int)(config & armpmu->raw_event_mask); 166 return (int)(config & raw_event_mask);
167}
168
169static int map_cpu_event(struct perf_event *event,
170 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
171 const unsigned (*cache_map)
172 [PERF_COUNT_HW_CACHE_MAX]
173 [PERF_COUNT_HW_CACHE_OP_MAX]
174 [PERF_COUNT_HW_CACHE_RESULT_MAX],
175 u32 raw_event_mask)
176{
177 u64 config = event->attr.config;
178
179 switch (event->attr.type) {
180 case PERF_TYPE_HARDWARE:
181 return armpmu_map_event(event_map, config);
182 case PERF_TYPE_HW_CACHE:
183 return armpmu_map_cache_event(cache_map, config);
184 case PERF_TYPE_RAW:
185 return armpmu_map_raw_event(raw_event_mask, config);
186 }
187
188 return -ENOENT;
167} 189}
168 190
169static int 191static int
@@ -484,17 +506,7 @@ __hw_perf_event_init(struct perf_event *event)
484 struct hw_perf_event *hwc = &event->hw; 506 struct hw_perf_event *hwc = &event->hw;
485 int mapping, err; 507 int mapping, err;
486 508
487 /* Decode the generic type into an ARM event identifier. */ 509 mapping = armpmu->map_event(event);
488 if (PERF_TYPE_HARDWARE == event->attr.type) {
489 mapping = armpmu_map_event(event->attr.config);
490 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
491 mapping = armpmu_map_cache_event(event->attr.config);
492 } else if (PERF_TYPE_RAW == event->attr.type) {
493 mapping = armpmu_map_raw_event(event->attr.config);
494 } else {
495 pr_debug("event type %x not supported\n", event->attr.type);
496 return -EOPNOTSUPP;
497 }
498 510
499 if (mapping < 0) { 511 if (mapping < 0) {
500 pr_debug("event %x:%llx not supported\n", event->attr.type, 512 pr_debug("event %x:%llx not supported\n", event->attr.type,
@@ -550,15 +562,8 @@ static int armpmu_event_init(struct perf_event *event)
550 int err = 0; 562 int err = 0;
551 atomic_t *active_events = &armpmu->active_events; 563 atomic_t *active_events = &armpmu->active_events;
552 564
553 switch (event->attr.type) { 565 if (armpmu->map_event(event) == -ENOENT)
554 case PERF_TYPE_RAW:
555 case PERF_TYPE_HARDWARE:
556 case PERF_TYPE_HW_CACHE:
557 break;
558
559 default:
560 return -ENOENT; 566 return -ENOENT;
561 }
562 567
563 event->destroy = hw_perf_event_destroy; 568 event->destroy = hw_perf_event_destroy;
564 569
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index 68cf70425f2f..a4c5aa9baa44 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -657,6 +657,12 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc,
657 raw_spin_unlock_irqrestore(&events->pmu_lock, flags); 657 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
658} 658}
659 659
660static int armv6_map_event(struct perf_event *event)
661{
662 return map_cpu_event(event, &armv6_perf_map,
663 &armv6_perf_cache_map, 0xFF);
664}
665
660static struct arm_pmu armv6pmu = { 666static struct arm_pmu armv6pmu = {
661 .id = ARM_PERF_PMU_ID_V6, 667 .id = ARM_PERF_PMU_ID_V6,
662 .name = "v6", 668 .name = "v6",
@@ -668,9 +674,7 @@ static struct arm_pmu armv6pmu = {
668 .get_event_idx = armv6pmu_get_event_idx, 674 .get_event_idx = armv6pmu_get_event_idx,
669 .start = armv6pmu_start, 675 .start = armv6pmu_start,
670 .stop = armv6pmu_stop, 676 .stop = armv6pmu_stop,
671 .cache_map = &armv6_perf_cache_map, 677 .map_event = armv6_map_event,
672 .event_map = &armv6_perf_map,
673 .raw_event_mask = 0xFF,
674 .num_events = 3, 678 .num_events = 3,
675 .max_period = (1LLU << 32) - 1, 679 .max_period = (1LLU << 32) - 1,
676}; 680};
@@ -687,6 +691,13 @@ static struct arm_pmu *__init armv6pmu_init(void)
687 * disable the interrupt reporting and update the event. When unthrottling we 691 * disable the interrupt reporting and update the event. When unthrottling we
688 * reset the period and enable the interrupt reporting. 692 * reset the period and enable the interrupt reporting.
689 */ 693 */
694
695static int armv6mpcore_map_event(struct perf_event *event)
696{
697 return map_cpu_event(event, &armv6mpcore_perf_map,
698 &armv6mpcore_perf_cache_map, 0xFF);
699}
700
690static struct arm_pmu armv6mpcore_pmu = { 701static struct arm_pmu armv6mpcore_pmu = {
691 .id = ARM_PERF_PMU_ID_V6MP, 702 .id = ARM_PERF_PMU_ID_V6MP,
692 .name = "v6mpcore", 703 .name = "v6mpcore",
@@ -698,9 +709,7 @@ static struct arm_pmu armv6mpcore_pmu = {
698 .get_event_idx = armv6pmu_get_event_idx, 709 .get_event_idx = armv6pmu_get_event_idx,
699 .start = armv6pmu_start, 710 .start = armv6pmu_start,
700 .stop = armv6pmu_stop, 711 .stop = armv6pmu_stop,
701 .cache_map = &armv6mpcore_perf_cache_map, 712 .map_event = armv6mpcore_map_event,
702 .event_map = &armv6mpcore_perf_map,
703 .raw_event_mask = 0xFF,
704 .num_events = 3, 713 .num_events = 3,
705 .max_period = (1LLU << 32) - 1, 714 .max_period = (1LLU << 32) - 1,
706}; 715};
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 68ac522fd940..be7b58a2cc6f 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1140,6 +1140,30 @@ static void armv7pmu_reset(void *info)
1140 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); 1140 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
1141} 1141}
1142 1142
1143static int armv7_a8_map_event(struct perf_event *event)
1144{
1145 return map_cpu_event(event, &armv7_a8_perf_map,
1146 &armv7_a8_perf_cache_map, 0xFF);
1147}
1148
1149static int armv7_a9_map_event(struct perf_event *event)
1150{
1151 return map_cpu_event(event, &armv7_a9_perf_map,
1152 &armv7_a9_perf_cache_map, 0xFF);
1153}
1154
1155static int armv7_a5_map_event(struct perf_event *event)
1156{
1157 return map_cpu_event(event, &armv7_a5_perf_map,
1158 &armv7_a5_perf_cache_map, 0xFF);
1159}
1160
1161static int armv7_a15_map_event(struct perf_event *event)
1162{
1163 return map_cpu_event(event, &armv7_a15_perf_map,
1164 &armv7_a15_perf_cache_map, 0xFF);
1165}
1166
1143static struct arm_pmu armv7pmu = { 1167static struct arm_pmu armv7pmu = {
1144 .handle_irq = armv7pmu_handle_irq, 1168 .handle_irq = armv7pmu_handle_irq,
1145 .enable = armv7pmu_enable_event, 1169 .enable = armv7pmu_enable_event,
@@ -1150,7 +1174,6 @@ static struct arm_pmu armv7pmu = {
1150 .start = armv7pmu_start, 1174 .start = armv7pmu_start,
1151 .stop = armv7pmu_stop, 1175 .stop = armv7pmu_stop,
1152 .reset = armv7pmu_reset, 1176 .reset = armv7pmu_reset,
1153 .raw_event_mask = 0xFF,
1154 .max_period = (1LLU << 32) - 1, 1177 .max_period = (1LLU << 32) - 1,
1155}; 1178};
1156 1179
@@ -1169,8 +1192,7 @@ static struct arm_pmu *__init armv7_a8_pmu_init(void)
1169{ 1192{
1170 armv7pmu.id = ARM_PERF_PMU_ID_CA8; 1193 armv7pmu.id = ARM_PERF_PMU_ID_CA8;
1171 armv7pmu.name = "ARMv7 Cortex-A8"; 1194 armv7pmu.name = "ARMv7 Cortex-A8";
1172 armv7pmu.cache_map = &armv7_a8_perf_cache_map; 1195 armv7pmu.map_event = armv7_a8_map_event;
1173 armv7pmu.event_map = &armv7_a8_perf_map;
1174 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1196 armv7pmu.num_events = armv7_read_num_pmnc_events();
1175 return &armv7pmu; 1197 return &armv7pmu;
1176} 1198}
@@ -1179,8 +1201,7 @@ static struct arm_pmu *__init armv7_a9_pmu_init(void)
1179{ 1201{
1180 armv7pmu.id = ARM_PERF_PMU_ID_CA9; 1202 armv7pmu.id = ARM_PERF_PMU_ID_CA9;
1181 armv7pmu.name = "ARMv7 Cortex-A9"; 1203 armv7pmu.name = "ARMv7 Cortex-A9";
1182 armv7pmu.cache_map = &armv7_a9_perf_cache_map; 1204 armv7pmu.map_event = armv7_a9_map_event;
1183 armv7pmu.event_map = &armv7_a9_perf_map;
1184 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1205 armv7pmu.num_events = armv7_read_num_pmnc_events();
1185 return &armv7pmu; 1206 return &armv7pmu;
1186} 1207}
@@ -1189,8 +1210,7 @@ static struct arm_pmu *__init armv7_a5_pmu_init(void)
1189{ 1210{
1190 armv7pmu.id = ARM_PERF_PMU_ID_CA5; 1211 armv7pmu.id = ARM_PERF_PMU_ID_CA5;
1191 armv7pmu.name = "ARMv7 Cortex-A5"; 1212 armv7pmu.name = "ARMv7 Cortex-A5";
1192 armv7pmu.cache_map = &armv7_a5_perf_cache_map; 1213 armv7pmu.map_event = armv7_a5_map_event;
1193 armv7pmu.event_map = &armv7_a5_perf_map;
1194 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1214 armv7pmu.num_events = armv7_read_num_pmnc_events();
1195 return &armv7pmu; 1215 return &armv7pmu;
1196} 1216}
@@ -1199,8 +1219,7 @@ static struct arm_pmu *__init armv7_a15_pmu_init(void)
1199{ 1219{
1200 armv7pmu.id = ARM_PERF_PMU_ID_CA15; 1220 armv7pmu.id = ARM_PERF_PMU_ID_CA15;
1201 armv7pmu.name = "ARMv7 Cortex-A15"; 1221 armv7pmu.name = "ARMv7 Cortex-A15";
1202 armv7pmu.cache_map = &armv7_a15_perf_cache_map; 1222 armv7pmu.map_event = armv7_a15_map_event;
1203 armv7pmu.event_map = &armv7_a15_perf_map;
1204 armv7pmu.num_events = armv7_read_num_pmnc_events(); 1223 armv7pmu.num_events = armv7_read_num_pmnc_events();
1205 armv7pmu.set_event_filter = armv7pmu_set_event_filter; 1224 armv7pmu.set_event_filter = armv7pmu_set_event_filter;
1206 return &armv7pmu; 1225 return &armv7pmu;
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 18e4823a0a62..d4c7610d25b9 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -425,6 +425,12 @@ xscale1pmu_write_counter(int counter, u32 val)
425 } 425 }
426} 426}
427 427
428static int xscale_map_event(struct perf_event *event)
429{
430 return map_cpu_event(event, &xscale_perf_map,
431 &xscale_perf_cache_map, 0xFF);
432}
433
428static struct arm_pmu xscale1pmu = { 434static struct arm_pmu xscale1pmu = {
429 .id = ARM_PERF_PMU_ID_XSCALE1, 435 .id = ARM_PERF_PMU_ID_XSCALE1,
430 .name = "xscale1", 436 .name = "xscale1",
@@ -436,9 +442,7 @@ static struct arm_pmu xscale1pmu = {
436 .get_event_idx = xscale1pmu_get_event_idx, 442 .get_event_idx = xscale1pmu_get_event_idx,
437 .start = xscale1pmu_start, 443 .start = xscale1pmu_start,
438 .stop = xscale1pmu_stop, 444 .stop = xscale1pmu_stop,
439 .cache_map = &xscale_perf_cache_map, 445 .map_event = xscale_map_event,
440 .event_map = &xscale_perf_map,
441 .raw_event_mask = 0xFF,
442 .num_events = 3, 446 .num_events = 3,
443 .max_period = (1LLU << 32) - 1, 447 .max_period = (1LLU << 32) - 1,
444}; 448};
@@ -799,9 +803,7 @@ static struct arm_pmu xscale2pmu = {
799 .get_event_idx = xscale2pmu_get_event_idx, 803 .get_event_idx = xscale2pmu_get_event_idx,
800 .start = xscale2pmu_start, 804 .start = xscale2pmu_start,
801 .stop = xscale2pmu_stop, 805 .stop = xscale2pmu_stop,
802 .cache_map = &xscale_perf_cache_map, 806 .map_event = xscale_map_event,
803 .event_map = &xscale_perf_map,
804 .raw_event_mask = 0xFF,
805 .num_events = 5, 807 .num_events = 5,
806 .max_period = (1LLU << 32) - 1, 808 .max_period = (1LLU << 32) - 1,
807}; 809};