aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2011-04-28 10:47:10 -0400
committerWill Deacon <will.deacon@arm.com>2011-08-31 05:50:09 -0400
commite1f431b57ef9e4a68281540933fa74865cbb7a74 (patch)
tree895e018faee0a504c82c02b69a8784433c3a057e /arch/arm/kernel/perf_event.c
parent7ae18a5717cbbf1879bdd5b66d7009a9958e5aef (diff)
ARM: perf: refactor event mapping
Currently mapping an event type to a hardware configuration value depends on the data being pointed to from struct arm_pmu. These fields (cache_map, event_map, raw_event_mask) are currently specific to CPU PMUs, and do not serve the general case well. This patch replaces the event map pointers on struct arm_pmu with a new 'map_event' function pointer. Small shim functions are used to reuse the existing common code. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Jamie Iles <jamie@jamieiles.com> Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r--arch/arm/kernel/perf_event.c67
1 files changed, 36 insertions, 31 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 1a2ebbf07fb7..b13bf23ceba3 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -75,11 +75,7 @@ struct arm_pmu {
75 void (*start)(void); 75 void (*start)(void);
76 void (*stop)(void); 76 void (*stop)(void);
77 void (*reset)(void *); 77 void (*reset)(void *);
78 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] 78 int (*map_event)(struct perf_event *event);
79 [PERF_COUNT_HW_CACHE_OP_MAX]
80 [PERF_COUNT_HW_CACHE_RESULT_MAX];
81 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
82 u32 raw_event_mask;
83 int num_events; 79 int num_events;
84 atomic_t active_events; 80 atomic_t active_events;
85 struct mutex reserve_mutex; 81 struct mutex reserve_mutex;
@@ -129,7 +125,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters);
129#define CACHE_OP_UNSUPPORTED 0xFFFF 125#define CACHE_OP_UNSUPPORTED 0xFFFF
130 126
131static int 127static int
132armpmu_map_cache_event(u64 config) 128armpmu_map_cache_event(const unsigned (*cache_map)
129 [PERF_COUNT_HW_CACHE_MAX]
130 [PERF_COUNT_HW_CACHE_OP_MAX]
131 [PERF_COUNT_HW_CACHE_RESULT_MAX],
132 u64 config)
133{ 133{
134 unsigned int cache_type, cache_op, cache_result, ret; 134 unsigned int cache_type, cache_op, cache_result, ret;
135 135
@@ -145,7 +145,7 @@ armpmu_map_cache_event(u64 config)
145 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) 145 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
146 return -EINVAL; 146 return -EINVAL;
147 147
148 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; 148 ret = (int)(*cache_map)[cache_type][cache_op][cache_result];
149 149
150 if (ret == CACHE_OP_UNSUPPORTED) 150 if (ret == CACHE_OP_UNSUPPORTED)
151 return -ENOENT; 151 return -ENOENT;
@@ -154,16 +154,38 @@ armpmu_map_cache_event(u64 config)
154} 154}
155 155
156static int 156static int
157armpmu_map_event(u64 config) 157armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config)
158{ 158{
159 int mapping = (*armpmu->event_map)[config]; 159 int mapping = (*event_map)[config];
160 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; 160 return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping;
161} 161}
162 162
163static int 163static int
164armpmu_map_raw_event(u64 config) 164armpmu_map_raw_event(u32 raw_event_mask, u64 config)
165{ 165{
166 return (int)(config & armpmu->raw_event_mask); 166 return (int)(config & raw_event_mask);
167}
168
169static int map_cpu_event(struct perf_event *event,
170 const unsigned (*event_map)[PERF_COUNT_HW_MAX],
171 const unsigned (*cache_map)
172 [PERF_COUNT_HW_CACHE_MAX]
173 [PERF_COUNT_HW_CACHE_OP_MAX]
174 [PERF_COUNT_HW_CACHE_RESULT_MAX],
175 u32 raw_event_mask)
176{
177 u64 config = event->attr.config;
178
179 switch (event->attr.type) {
180 case PERF_TYPE_HARDWARE:
181 return armpmu_map_event(event_map, config);
182 case PERF_TYPE_HW_CACHE:
183 return armpmu_map_cache_event(cache_map, config);
184 case PERF_TYPE_RAW:
185 return armpmu_map_raw_event(raw_event_mask, config);
186 }
187
188 return -ENOENT;
167} 189}
168 190
169static int 191static int
@@ -484,17 +506,7 @@ __hw_perf_event_init(struct perf_event *event)
484 struct hw_perf_event *hwc = &event->hw; 506 struct hw_perf_event *hwc = &event->hw;
485 int mapping, err; 507 int mapping, err;
486 508
487 /* Decode the generic type into an ARM event identifier. */ 509 mapping = armpmu->map_event(event);
488 if (PERF_TYPE_HARDWARE == event->attr.type) {
489 mapping = armpmu_map_event(event->attr.config);
490 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
491 mapping = armpmu_map_cache_event(event->attr.config);
492 } else if (PERF_TYPE_RAW == event->attr.type) {
493 mapping = armpmu_map_raw_event(event->attr.config);
494 } else {
495 pr_debug("event type %x not supported\n", event->attr.type);
496 return -EOPNOTSUPP;
497 }
498 510
499 if (mapping < 0) { 511 if (mapping < 0) {
500 pr_debug("event %x:%llx not supported\n", event->attr.type, 512 pr_debug("event %x:%llx not supported\n", event->attr.type,
@@ -550,15 +562,8 @@ static int armpmu_event_init(struct perf_event *event)
550 int err = 0; 562 int err = 0;
551 atomic_t *active_events = &armpmu->active_events; 563 atomic_t *active_events = &armpmu->active_events;
552 564
553 switch (event->attr.type) { 565 if (armpmu->map_event(event) == -ENOENT)
554 case PERF_TYPE_RAW:
555 case PERF_TYPE_HARDWARE:
556 case PERF_TYPE_HW_CACHE:
557 break;
558
559 default:
560 return -ENOENT; 566 return -ENOENT;
561 }
562 567
563 event->destroy = hw_perf_event_destroy; 568 event->destroy = hw_perf_event_destroy;
564 569