diff options
author | Will Deacon <will.deacon@arm.com> | 2011-08-31 05:50:37 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2011-08-31 05:50:37 -0400 |
commit | 4fb0d2ea397ab207fdecbd88ad0e37b36ce68a62 (patch) | |
tree | f0ebe12652b7ad1c2fc5016dfb31b633b7458a3f /arch/arm | |
parent | d12443363e590461655d4e9ccc31e40ad9078283 (diff) | |
parent | 7325eaec439cd0cc8c9b61b59d41d99abace1b23 (diff) |
Merge branches 'hwbreak', 'perf/updates' and 'perf/system-pmus' into for-rmk
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/include/asm/pmu.h | 93 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event.c | 475 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v6.c | 87 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 395 | ||||
-rw-r--r-- | arch/arm/kernel/perf_event_xscale.c | 90 | ||||
-rw-r--r-- | arch/arm/kernel/pmu.c | 182 |
6 files changed, 638 insertions, 684 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index b7e82c4aced6..71d99b83cdb9 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
@@ -13,7 +13,12 @@ | |||
13 | #define __ARM_PMU_H__ | 13 | #define __ARM_PMU_H__ |
14 | 14 | ||
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/perf_event.h> | ||
16 | 17 | ||
18 | /* | ||
19 | * Types of PMUs that can be accessed directly and require mutual | ||
20 | * exclusion between profiling tools. | ||
21 | */ | ||
17 | enum arm_pmu_type { | 22 | enum arm_pmu_type { |
18 | ARM_PMU_DEVICE_CPU = 0, | 23 | ARM_PMU_DEVICE_CPU = 0, |
19 | ARM_NUM_PMU_DEVICES, | 24 | ARM_NUM_PMU_DEVICES, |
@@ -37,21 +42,17 @@ struct arm_pmu_platdata { | |||
37 | * reserve_pmu() - reserve the hardware performance counters | 42 | * reserve_pmu() - reserve the hardware performance counters |
38 | * | 43 | * |
39 | * Reserve the hardware performance counters in the system for exclusive use. | 44 | * Reserve the hardware performance counters in the system for exclusive use. |
40 | * The platform_device for the system is returned on success, ERR_PTR() | 45 | * Returns 0 on success or -EBUSY if the lock is already held. |
41 | * encoded error on failure. | ||
42 | */ | 46 | */ |
43 | extern struct platform_device * | 47 | extern int |
44 | reserve_pmu(enum arm_pmu_type type); | 48 | reserve_pmu(enum arm_pmu_type type); |
45 | 49 | ||
46 | /** | 50 | /** |
47 | * release_pmu() - Relinquish control of the performance counters | 51 | * release_pmu() - Relinquish control of the performance counters |
48 | * | 52 | * |
49 | * Release the performance counters and allow someone else to use them. | 53 | * Release the performance counters and allow someone else to use them. |
50 | * Callers must have disabled the counters and released IRQs before calling | ||
51 | * this. The platform_device returned from reserve_pmu() must be passed as | ||
52 | * a cookie. | ||
53 | */ | 54 | */ |
54 | extern int | 55 | extern void |
55 | release_pmu(enum arm_pmu_type type); | 56 | release_pmu(enum arm_pmu_type type); |
56 | 57 | ||
57 | /** | 58 | /** |
@@ -68,24 +69,78 @@ init_pmu(enum arm_pmu_type type); | |||
68 | 69 | ||
69 | #include <linux/err.h> | 70 | #include <linux/err.h> |
70 | 71 | ||
71 | static inline struct platform_device * | ||
72 | reserve_pmu(enum arm_pmu_type type) | ||
73 | { | ||
74 | return ERR_PTR(-ENODEV); | ||
75 | } | ||
76 | |||
77 | static inline int | 72 | static inline int |
78 | release_pmu(enum arm_pmu_type type) | 73 | reserve_pmu(enum arm_pmu_type type) |
79 | { | 74 | { |
80 | return -ENODEV; | 75 | return -ENODEV; |
81 | } | 76 | } |
82 | 77 | ||
83 | static inline int | 78 | static inline void |
84 | init_pmu(enum arm_pmu_type type) | 79 | release_pmu(enum arm_pmu_type type) { } |
85 | { | ||
86 | return -ENODEV; | ||
87 | } | ||
88 | 80 | ||
89 | #endif /* CONFIG_CPU_HAS_PMU */ | 81 | #endif /* CONFIG_CPU_HAS_PMU */ |
90 | 82 | ||
83 | #ifdef CONFIG_HW_PERF_EVENTS | ||
84 | |||
85 | /* The events for a given PMU register set. */ | ||
86 | struct pmu_hw_events { | ||
87 | /* | ||
88 | * The events that are active on the PMU for the given index. | ||
89 | */ | ||
90 | struct perf_event **events; | ||
91 | |||
92 | /* | ||
93 | * A 1 bit for an index indicates that the counter is being used for | ||
94 | * an event. A 0 means that the counter can be used. | ||
95 | */ | ||
96 | unsigned long *used_mask; | ||
97 | |||
98 | /* | ||
99 | * Hardware lock to serialize accesses to PMU registers. Needed for the | ||
100 | * read/modify/write sequences. | ||
101 | */ | ||
102 | raw_spinlock_t pmu_lock; | ||
103 | }; | ||
104 | |||
105 | struct arm_pmu { | ||
106 | struct pmu pmu; | ||
107 | enum arm_perf_pmu_ids id; | ||
108 | enum arm_pmu_type type; | ||
109 | cpumask_t active_irqs; | ||
110 | const char *name; | ||
111 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
112 | void (*enable)(struct hw_perf_event *evt, int idx); | ||
113 | void (*disable)(struct hw_perf_event *evt, int idx); | ||
114 | int (*get_event_idx)(struct pmu_hw_events *hw_events, | ||
115 | struct hw_perf_event *hwc); | ||
116 | int (*set_event_filter)(struct hw_perf_event *evt, | ||
117 | struct perf_event_attr *attr); | ||
118 | u32 (*read_counter)(int idx); | ||
119 | void (*write_counter)(int idx, u32 val); | ||
120 | void (*start)(void); | ||
121 | void (*stop)(void); | ||
122 | void (*reset)(void *); | ||
123 | int (*map_event)(struct perf_event *event); | ||
124 | int num_events; | ||
125 | atomic_t active_events; | ||
126 | struct mutex reserve_mutex; | ||
127 | u64 max_period; | ||
128 | struct platform_device *plat_device; | ||
129 | struct pmu_hw_events *(*get_hw_events)(void); | ||
130 | }; | ||
131 | |||
132 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) | ||
133 | |||
134 | int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type); | ||
135 | |||
136 | u64 armpmu_event_update(struct perf_event *event, | ||
137 | struct hw_perf_event *hwc, | ||
138 | int idx, int overflow); | ||
139 | |||
140 | int armpmu_event_set_period(struct perf_event *event, | ||
141 | struct hw_perf_event *hwc, | ||
142 | int idx); | ||
143 | |||
144 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
145 | |||
91 | #endif /* __ARM_PMU_H__ */ | 146 | #endif /* __ARM_PMU_H__ */ |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 53c9c2610cbc..e6e5d7c84f1a 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -12,6 +12,7 @@ | |||
12 | */ | 12 | */ |
13 | #define pr_fmt(fmt) "hw perfevents: " fmt | 13 | #define pr_fmt(fmt) "hw perfevents: " fmt |
14 | 14 | ||
15 | #include <linux/bitmap.h> | ||
15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
16 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
17 | #include <linux/module.h> | 18 | #include <linux/module.h> |
@@ -26,16 +27,8 @@ | |||
26 | #include <asm/pmu.h> | 27 | #include <asm/pmu.h> |
27 | #include <asm/stacktrace.h> | 28 | #include <asm/stacktrace.h> |
28 | 29 | ||
29 | static struct platform_device *pmu_device; | ||
30 | |||
31 | /* | ||
32 | * Hardware lock to serialize accesses to PMU registers. Needed for the | ||
33 | * read/modify/write sequences. | ||
34 | */ | ||
35 | static DEFINE_RAW_SPINLOCK(pmu_lock); | ||
36 | |||
37 | /* | 30 | /* |
38 | * ARMv6 supports a maximum of 3 events, starting from index 1. If we add | 31 | * ARMv6 supports a maximum of 3 events, starting from index 0. If we add |
39 | * another platform that supports more, we need to increase this to be the | 32 | * another platform that supports more, we need to increase this to be the |
40 | * largest of all platforms. | 33 | * largest of all platforms. |
41 | * | 34 | * |
@@ -43,62 +36,24 @@ static DEFINE_RAW_SPINLOCK(pmu_lock); | |||
43 | * cycle counter CCNT + 31 events counters CNT0..30. | 36 | * cycle counter CCNT + 31 events counters CNT0..30. |
44 | * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. | 37 | * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. |
45 | */ | 38 | */ |
46 | #define ARMPMU_MAX_HWEVENTS 33 | 39 | #define ARMPMU_MAX_HWEVENTS 32 |
47 | 40 | ||
48 | /* The events for a given CPU. */ | 41 | static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); |
49 | struct cpu_hw_events { | 42 | static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); |
50 | /* | 43 | static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); |
51 | * The events that are active on the CPU for the given index. Index 0 | ||
52 | * is reserved. | ||
53 | */ | ||
54 | struct perf_event *events[ARMPMU_MAX_HWEVENTS]; | ||
55 | |||
56 | /* | ||
57 | * A 1 bit for an index indicates that the counter is being used for | ||
58 | * an event. A 0 means that the counter can be used. | ||
59 | */ | ||
60 | unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
61 | 44 | ||
62 | /* | 45 | #define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) |
63 | * A 1 bit for an index indicates that the counter is actively being | ||
64 | * used. | ||
65 | */ | ||
66 | unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
67 | }; | ||
68 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
69 | |||
70 | struct arm_pmu { | ||
71 | enum arm_perf_pmu_ids id; | ||
72 | const char *name; | ||
73 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
74 | void (*enable)(struct hw_perf_event *evt, int idx); | ||
75 | void (*disable)(struct hw_perf_event *evt, int idx); | ||
76 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | ||
77 | struct hw_perf_event *hwc); | ||
78 | u32 (*read_counter)(int idx); | ||
79 | void (*write_counter)(int idx, u32 val); | ||
80 | void (*start)(void); | ||
81 | void (*stop)(void); | ||
82 | void (*reset)(void *); | ||
83 | const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX] | ||
84 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
85 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
86 | const unsigned (*event_map)[PERF_COUNT_HW_MAX]; | ||
87 | u32 raw_event_mask; | ||
88 | int num_events; | ||
89 | u64 max_period; | ||
90 | }; | ||
91 | 46 | ||
92 | /* Set at runtime when we know what CPU type we are. */ | 47 | /* Set at runtime when we know what CPU type we are. */ |
93 | static const struct arm_pmu *armpmu; | 48 | static struct arm_pmu *cpu_pmu; |
94 | 49 | ||
95 | enum arm_perf_pmu_ids | 50 | enum arm_perf_pmu_ids |
96 | armpmu_get_pmu_id(void) | 51 | armpmu_get_pmu_id(void) |
97 | { | 52 | { |
98 | int id = -ENODEV; | 53 | int id = -ENODEV; |
99 | 54 | ||
100 | if (armpmu != NULL) | 55 | if (cpu_pmu != NULL) |
101 | id = armpmu->id; | 56 | id = cpu_pmu->id; |
102 | 57 | ||
103 | return id; | 58 | return id; |
104 | } | 59 | } |
@@ -109,8 +64,8 @@ armpmu_get_max_events(void) | |||
109 | { | 64 | { |
110 | int max_events = 0; | 65 | int max_events = 0; |
111 | 66 | ||
112 | if (armpmu != NULL) | 67 | if (cpu_pmu != NULL) |
113 | max_events = armpmu->num_events; | 68 | max_events = cpu_pmu->num_events; |
114 | 69 | ||
115 | return max_events; | 70 | return max_events; |
116 | } | 71 | } |
@@ -130,7 +85,11 @@ EXPORT_SYMBOL_GPL(perf_num_counters); | |||
130 | #define CACHE_OP_UNSUPPORTED 0xFFFF | 85 | #define CACHE_OP_UNSUPPORTED 0xFFFF |
131 | 86 | ||
132 | static int | 87 | static int |
133 | armpmu_map_cache_event(u64 config) | 88 | armpmu_map_cache_event(const unsigned (*cache_map) |
89 | [PERF_COUNT_HW_CACHE_MAX] | ||
90 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
91 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | ||
92 | u64 config) | ||
134 | { | 93 | { |
135 | unsigned int cache_type, cache_op, cache_result, ret; | 94 | unsigned int cache_type, cache_op, cache_result, ret; |
136 | 95 | ||
@@ -146,7 +105,7 @@ armpmu_map_cache_event(u64 config) | |||
146 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | 105 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) |
147 | return -EINVAL; | 106 | return -EINVAL; |
148 | 107 | ||
149 | ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result]; | 108 | ret = (int)(*cache_map)[cache_type][cache_op][cache_result]; |
150 | 109 | ||
151 | if (ret == CACHE_OP_UNSUPPORTED) | 110 | if (ret == CACHE_OP_UNSUPPORTED) |
152 | return -ENOENT; | 111 | return -ENOENT; |
@@ -155,23 +114,46 @@ armpmu_map_cache_event(u64 config) | |||
155 | } | 114 | } |
156 | 115 | ||
157 | static int | 116 | static int |
158 | armpmu_map_event(u64 config) | 117 | armpmu_map_event(const unsigned (*event_map)[PERF_COUNT_HW_MAX], u64 config) |
159 | { | 118 | { |
160 | int mapping = (*armpmu->event_map)[config]; | 119 | int mapping = (*event_map)[config]; |
161 | return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping; | 120 | return mapping == HW_OP_UNSUPPORTED ? -ENOENT : mapping; |
162 | } | 121 | } |
163 | 122 | ||
164 | static int | 123 | static int |
165 | armpmu_map_raw_event(u64 config) | 124 | armpmu_map_raw_event(u32 raw_event_mask, u64 config) |
166 | { | 125 | { |
167 | return (int)(config & armpmu->raw_event_mask); | 126 | return (int)(config & raw_event_mask); |
168 | } | 127 | } |
169 | 128 | ||
170 | static int | 129 | static int map_cpu_event(struct perf_event *event, |
130 | const unsigned (*event_map)[PERF_COUNT_HW_MAX], | ||
131 | const unsigned (*cache_map) | ||
132 | [PERF_COUNT_HW_CACHE_MAX] | ||
133 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
134 | [PERF_COUNT_HW_CACHE_RESULT_MAX], | ||
135 | u32 raw_event_mask) | ||
136 | { | ||
137 | u64 config = event->attr.config; | ||
138 | |||
139 | switch (event->attr.type) { | ||
140 | case PERF_TYPE_HARDWARE: | ||
141 | return armpmu_map_event(event_map, config); | ||
142 | case PERF_TYPE_HW_CACHE: | ||
143 | return armpmu_map_cache_event(cache_map, config); | ||
144 | case PERF_TYPE_RAW: | ||
145 | return armpmu_map_raw_event(raw_event_mask, config); | ||
146 | } | ||
147 | |||
148 | return -ENOENT; | ||
149 | } | ||
150 | |||
151 | int | ||
171 | armpmu_event_set_period(struct perf_event *event, | 152 | armpmu_event_set_period(struct perf_event *event, |
172 | struct hw_perf_event *hwc, | 153 | struct hw_perf_event *hwc, |
173 | int idx) | 154 | int idx) |
174 | { | 155 | { |
156 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | ||
175 | s64 left = local64_read(&hwc->period_left); | 157 | s64 left = local64_read(&hwc->period_left); |
176 | s64 period = hwc->sample_period; | 158 | s64 period = hwc->sample_period; |
177 | int ret = 0; | 159 | int ret = 0; |
@@ -202,11 +184,12 @@ armpmu_event_set_period(struct perf_event *event, | |||
202 | return ret; | 184 | return ret; |
203 | } | 185 | } |
204 | 186 | ||
205 | static u64 | 187 | u64 |
206 | armpmu_event_update(struct perf_event *event, | 188 | armpmu_event_update(struct perf_event *event, |
207 | struct hw_perf_event *hwc, | 189 | struct hw_perf_event *hwc, |
208 | int idx, int overflow) | 190 | int idx, int overflow) |
209 | { | 191 | { |
192 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | ||
210 | u64 delta, prev_raw_count, new_raw_count; | 193 | u64 delta, prev_raw_count, new_raw_count; |
211 | 194 | ||
212 | again: | 195 | again: |
@@ -246,11 +229,9 @@ armpmu_read(struct perf_event *event) | |||
246 | static void | 229 | static void |
247 | armpmu_stop(struct perf_event *event, int flags) | 230 | armpmu_stop(struct perf_event *event, int flags) |
248 | { | 231 | { |
232 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | ||
249 | struct hw_perf_event *hwc = &event->hw; | 233 | struct hw_perf_event *hwc = &event->hw; |
250 | 234 | ||
251 | if (!armpmu) | ||
252 | return; | ||
253 | |||
254 | /* | 235 | /* |
255 | * ARM pmu always has to update the counter, so ignore | 236 | * ARM pmu always has to update the counter, so ignore |
256 | * PERF_EF_UPDATE, see comments in armpmu_start(). | 237 | * PERF_EF_UPDATE, see comments in armpmu_start(). |
@@ -266,11 +247,9 @@ armpmu_stop(struct perf_event *event, int flags) | |||
266 | static void | 247 | static void |
267 | armpmu_start(struct perf_event *event, int flags) | 248 | armpmu_start(struct perf_event *event, int flags) |
268 | { | 249 | { |
250 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | ||
269 | struct hw_perf_event *hwc = &event->hw; | 251 | struct hw_perf_event *hwc = &event->hw; |
270 | 252 | ||
271 | if (!armpmu) | ||
272 | return; | ||
273 | |||
274 | /* | 253 | /* |
275 | * ARM pmu always has to reprogram the period, so ignore | 254 | * ARM pmu always has to reprogram the period, so ignore |
276 | * PERF_EF_RELOAD, see the comment below. | 255 | * PERF_EF_RELOAD, see the comment below. |
@@ -293,16 +272,16 @@ armpmu_start(struct perf_event *event, int flags) | |||
293 | static void | 272 | static void |
294 | armpmu_del(struct perf_event *event, int flags) | 273 | armpmu_del(struct perf_event *event, int flags) |
295 | { | 274 | { |
296 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 275 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
276 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | ||
297 | struct hw_perf_event *hwc = &event->hw; | 277 | struct hw_perf_event *hwc = &event->hw; |
298 | int idx = hwc->idx; | 278 | int idx = hwc->idx; |
299 | 279 | ||
300 | WARN_ON(idx < 0); | 280 | WARN_ON(idx < 0); |
301 | 281 | ||
302 | clear_bit(idx, cpuc->active_mask); | ||
303 | armpmu_stop(event, PERF_EF_UPDATE); | 282 | armpmu_stop(event, PERF_EF_UPDATE); |
304 | cpuc->events[idx] = NULL; | 283 | hw_events->events[idx] = NULL; |
305 | clear_bit(idx, cpuc->used_mask); | 284 | clear_bit(idx, hw_events->used_mask); |
306 | 285 | ||
307 | perf_event_update_userpage(event); | 286 | perf_event_update_userpage(event); |
308 | } | 287 | } |
@@ -310,7 +289,8 @@ armpmu_del(struct perf_event *event, int flags) | |||
310 | static int | 289 | static int |
311 | armpmu_add(struct perf_event *event, int flags) | 290 | armpmu_add(struct perf_event *event, int flags) |
312 | { | 291 | { |
313 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 292 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
293 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); | ||
314 | struct hw_perf_event *hwc = &event->hw; | 294 | struct hw_perf_event *hwc = &event->hw; |
315 | int idx; | 295 | int idx; |
316 | int err = 0; | 296 | int err = 0; |
@@ -318,7 +298,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
318 | perf_pmu_disable(event->pmu); | 298 | perf_pmu_disable(event->pmu); |
319 | 299 | ||
320 | /* If we don't have a space for the counter then finish early. */ | 300 | /* If we don't have a space for the counter then finish early. */ |
321 | idx = armpmu->get_event_idx(cpuc, hwc); | 301 | idx = armpmu->get_event_idx(hw_events, hwc); |
322 | if (idx < 0) { | 302 | if (idx < 0) { |
323 | err = idx; | 303 | err = idx; |
324 | goto out; | 304 | goto out; |
@@ -330,8 +310,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
330 | */ | 310 | */ |
331 | event->hw.idx = idx; | 311 | event->hw.idx = idx; |
332 | armpmu->disable(hwc, idx); | 312 | armpmu->disable(hwc, idx); |
333 | cpuc->events[idx] = event; | 313 | hw_events->events[idx] = event; |
334 | set_bit(idx, cpuc->active_mask); | ||
335 | 314 | ||
336 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | 315 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
337 | if (flags & PERF_EF_START) | 316 | if (flags & PERF_EF_START) |
@@ -345,25 +324,25 @@ out: | |||
345 | return err; | 324 | return err; |
346 | } | 325 | } |
347 | 326 | ||
348 | static struct pmu pmu; | ||
349 | |||
350 | static int | 327 | static int |
351 | validate_event(struct cpu_hw_events *cpuc, | 328 | validate_event(struct pmu_hw_events *hw_events, |
352 | struct perf_event *event) | 329 | struct perf_event *event) |
353 | { | 330 | { |
331 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | ||
354 | struct hw_perf_event fake_event = event->hw; | 332 | struct hw_perf_event fake_event = event->hw; |
333 | struct pmu *leader_pmu = event->group_leader->pmu; | ||
355 | 334 | ||
356 | if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF) | 335 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
357 | return 1; | 336 | return 1; |
358 | 337 | ||
359 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; | 338 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; |
360 | } | 339 | } |
361 | 340 | ||
362 | static int | 341 | static int |
363 | validate_group(struct perf_event *event) | 342 | validate_group(struct perf_event *event) |
364 | { | 343 | { |
365 | struct perf_event *sibling, *leader = event->group_leader; | 344 | struct perf_event *sibling, *leader = event->group_leader; |
366 | struct cpu_hw_events fake_pmu; | 345 | struct pmu_hw_events fake_pmu; |
367 | 346 | ||
368 | memset(&fake_pmu, 0, sizeof(fake_pmu)); | 347 | memset(&fake_pmu, 0, sizeof(fake_pmu)); |
369 | 348 | ||
@@ -383,110 +362,119 @@ validate_group(struct perf_event *event) | |||
383 | 362 | ||
384 | static irqreturn_t armpmu_platform_irq(int irq, void *dev) | 363 | static irqreturn_t armpmu_platform_irq(int irq, void *dev) |
385 | { | 364 | { |
386 | struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev); | 365 | struct arm_pmu *armpmu = (struct arm_pmu *) dev; |
366 | struct platform_device *plat_device = armpmu->plat_device; | ||
367 | struct arm_pmu_platdata *plat = dev_get_platdata(&plat_device->dev); | ||
387 | 368 | ||
388 | return plat->handle_irq(irq, dev, armpmu->handle_irq); | 369 | return plat->handle_irq(irq, dev, armpmu->handle_irq); |
389 | } | 370 | } |
390 | 371 | ||
372 | static void | ||
373 | armpmu_release_hardware(struct arm_pmu *armpmu) | ||
374 | { | ||
375 | int i, irq, irqs; | ||
376 | struct platform_device *pmu_device = armpmu->plat_device; | ||
377 | |||
378 | irqs = min(pmu_device->num_resources, num_possible_cpus()); | ||
379 | |||
380 | for (i = 0; i < irqs; ++i) { | ||
381 | if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs)) | ||
382 | continue; | ||
383 | irq = platform_get_irq(pmu_device, i); | ||
384 | if (irq >= 0) | ||
385 | free_irq(irq, armpmu); | ||
386 | } | ||
387 | |||
388 | release_pmu(armpmu->type); | ||
389 | } | ||
390 | |||
391 | static int | 391 | static int |
392 | armpmu_reserve_hardware(void) | 392 | armpmu_reserve_hardware(struct arm_pmu *armpmu) |
393 | { | 393 | { |
394 | struct arm_pmu_platdata *plat; | 394 | struct arm_pmu_platdata *plat; |
395 | irq_handler_t handle_irq; | 395 | irq_handler_t handle_irq; |
396 | int i, err = -ENODEV, irq; | 396 | int i, err, irq, irqs; |
397 | struct platform_device *pmu_device = armpmu->plat_device; | ||
397 | 398 | ||
398 | pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); | 399 | err = reserve_pmu(armpmu->type); |
399 | if (IS_ERR(pmu_device)) { | 400 | if (err) { |
400 | pr_warning("unable to reserve pmu\n"); | 401 | pr_warning("unable to reserve pmu\n"); |
401 | return PTR_ERR(pmu_device); | 402 | return err; |
402 | } | 403 | } |
403 | 404 | ||
404 | init_pmu(ARM_PMU_DEVICE_CPU); | ||
405 | |||
406 | plat = dev_get_platdata(&pmu_device->dev); | 405 | plat = dev_get_platdata(&pmu_device->dev); |
407 | if (plat && plat->handle_irq) | 406 | if (plat && plat->handle_irq) |
408 | handle_irq = armpmu_platform_irq; | 407 | handle_irq = armpmu_platform_irq; |
409 | else | 408 | else |
410 | handle_irq = armpmu->handle_irq; | 409 | handle_irq = armpmu->handle_irq; |
411 | 410 | ||
412 | if (pmu_device->num_resources < 1) { | 411 | irqs = min(pmu_device->num_resources, num_possible_cpus()); |
412 | if (irqs < 1) { | ||
413 | pr_err("no irqs for PMUs defined\n"); | 413 | pr_err("no irqs for PMUs defined\n"); |
414 | return -ENODEV; | 414 | return -ENODEV; |
415 | } | 415 | } |
416 | 416 | ||
417 | for (i = 0; i < pmu_device->num_resources; ++i) { | 417 | for (i = 0; i < irqs; ++i) { |
418 | err = 0; | ||
418 | irq = platform_get_irq(pmu_device, i); | 419 | irq = platform_get_irq(pmu_device, i); |
419 | if (irq < 0) | 420 | if (irq < 0) |
420 | continue; | 421 | continue; |
421 | 422 | ||
423 | /* | ||
424 | * If we have a single PMU interrupt that we can't shift, | ||
425 | * assume that we're running on a uniprocessor machine and | ||
426 | * continue. Otherwise, continue without this interrupt. | ||
427 | */ | ||
428 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { | ||
429 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
430 | irq, i); | ||
431 | continue; | ||
432 | } | ||
433 | |||
422 | err = request_irq(irq, handle_irq, | 434 | err = request_irq(irq, handle_irq, |
423 | IRQF_DISABLED | IRQF_NOBALANCING, | 435 | IRQF_DISABLED | IRQF_NOBALANCING, |
424 | "armpmu", NULL); | 436 | "arm-pmu", armpmu); |
425 | if (err) { | 437 | if (err) { |
426 | pr_warning("unable to request IRQ%d for ARM perf " | 438 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
427 | "counters\n", irq); | 439 | irq); |
428 | break; | 440 | armpmu_release_hardware(armpmu); |
441 | return err; | ||
429 | } | 442 | } |
430 | } | ||
431 | 443 | ||
432 | if (err) { | 444 | cpumask_set_cpu(i, &armpmu->active_irqs); |
433 | for (i = i - 1; i >= 0; --i) { | ||
434 | irq = platform_get_irq(pmu_device, i); | ||
435 | if (irq >= 0) | ||
436 | free_irq(irq, NULL); | ||
437 | } | ||
438 | release_pmu(ARM_PMU_DEVICE_CPU); | ||
439 | pmu_device = NULL; | ||
440 | } | 445 | } |
441 | 446 | ||
442 | return err; | 447 | return 0; |
443 | } | 448 | } |
444 | 449 | ||
445 | static void | 450 | static void |
446 | armpmu_release_hardware(void) | 451 | hw_perf_event_destroy(struct perf_event *event) |
447 | { | 452 | { |
448 | int i, irq; | 453 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
454 | atomic_t *active_events = &armpmu->active_events; | ||
455 | struct mutex *pmu_reserve_mutex = &armpmu->reserve_mutex; | ||
449 | 456 | ||
450 | for (i = pmu_device->num_resources - 1; i >= 0; --i) { | 457 | if (atomic_dec_and_mutex_lock(active_events, pmu_reserve_mutex)) { |
451 | irq = platform_get_irq(pmu_device, i); | 458 | armpmu_release_hardware(armpmu); |
452 | if (irq >= 0) | 459 | mutex_unlock(pmu_reserve_mutex); |
453 | free_irq(irq, NULL); | ||
454 | } | 460 | } |
455 | armpmu->stop(); | ||
456 | |||
457 | release_pmu(ARM_PMU_DEVICE_CPU); | ||
458 | pmu_device = NULL; | ||
459 | } | 461 | } |
460 | 462 | ||
461 | static atomic_t active_events = ATOMIC_INIT(0); | 463 | static int |
462 | static DEFINE_MUTEX(pmu_reserve_mutex); | 464 | event_requires_mode_exclusion(struct perf_event_attr *attr) |
463 | |||
464 | static void | ||
465 | hw_perf_event_destroy(struct perf_event *event) | ||
466 | { | 465 | { |
467 | if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { | 466 | return attr->exclude_idle || attr->exclude_user || |
468 | armpmu_release_hardware(); | 467 | attr->exclude_kernel || attr->exclude_hv; |
469 | mutex_unlock(&pmu_reserve_mutex); | ||
470 | } | ||
471 | } | 468 | } |
472 | 469 | ||
473 | static int | 470 | static int |
474 | __hw_perf_event_init(struct perf_event *event) | 471 | __hw_perf_event_init(struct perf_event *event) |
475 | { | 472 | { |
473 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | ||
476 | struct hw_perf_event *hwc = &event->hw; | 474 | struct hw_perf_event *hwc = &event->hw; |
477 | int mapping, err; | 475 | int mapping, err; |
478 | 476 | ||
479 | /* Decode the generic type into an ARM event identifier. */ | 477 | mapping = armpmu->map_event(event); |
480 | if (PERF_TYPE_HARDWARE == event->attr.type) { | ||
481 | mapping = armpmu_map_event(event->attr.config); | ||
482 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | ||
483 | mapping = armpmu_map_cache_event(event->attr.config); | ||
484 | } else if (PERF_TYPE_RAW == event->attr.type) { | ||
485 | mapping = armpmu_map_raw_event(event->attr.config); | ||
486 | } else { | ||
487 | pr_debug("event type %x not supported\n", event->attr.type); | ||
488 | return -EOPNOTSUPP; | ||
489 | } | ||
490 | 478 | ||
491 | if (mapping < 0) { | 479 | if (mapping < 0) { |
492 | pr_debug("event %x:%llx not supported\n", event->attr.type, | 480 | pr_debug("event %x:%llx not supported\n", event->attr.type, |
@@ -495,34 +483,31 @@ __hw_perf_event_init(struct perf_event *event) | |||
495 | } | 483 | } |
496 | 484 | ||
497 | /* | 485 | /* |
486 | * We don't assign an index until we actually place the event onto | ||
487 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
488 | * yet. For SMP systems, each core has it's own PMU so we can't do any | ||
489 | * clever allocation or constraints checking at this point. | ||
490 | */ | ||
491 | hwc->idx = -1; | ||
492 | hwc->config_base = 0; | ||
493 | hwc->config = 0; | ||
494 | hwc->event_base = 0; | ||
495 | |||
496 | /* | ||
498 | * Check whether we need to exclude the counter from certain modes. | 497 | * Check whether we need to exclude the counter from certain modes. |
499 | * The ARM performance counters are on all of the time so if someone | ||
500 | * has asked us for some excludes then we have to fail. | ||
501 | */ | 498 | */ |
502 | if (event->attr.exclude_kernel || event->attr.exclude_user || | 499 | if ((!armpmu->set_event_filter || |
503 | event->attr.exclude_hv || event->attr.exclude_idle) { | 500 | armpmu->set_event_filter(hwc, &event->attr)) && |
501 | event_requires_mode_exclusion(&event->attr)) { | ||
504 | pr_debug("ARM performance counters do not support " | 502 | pr_debug("ARM performance counters do not support " |
505 | "mode exclusion\n"); | 503 | "mode exclusion\n"); |
506 | return -EPERM; | 504 | return -EPERM; |
507 | } | 505 | } |
508 | 506 | ||
509 | /* | 507 | /* |
510 | * We don't assign an index until we actually place the event onto | 508 | * Store the event encoding into the config_base field. |
511 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
512 | * yet. For SMP systems, each core has it's own PMU so we can't do any | ||
513 | * clever allocation or constraints checking at this point. | ||
514 | */ | 509 | */ |
515 | hwc->idx = -1; | 510 | hwc->config_base |= (unsigned long)mapping; |
516 | |||
517 | /* | ||
518 | * Store the event encoding into the config_base field. config and | ||
519 | * event_base are unused as the only 2 things we need to know are | ||
520 | * the event mapping and the counter to use. The counter to use is | ||
521 | * also the indx and the config_base is the event type. | ||
522 | */ | ||
523 | hwc->config_base = (unsigned long)mapping; | ||
524 | hwc->config = 0; | ||
525 | hwc->event_base = 0; | ||
526 | 511 | ||
527 | if (!hwc->sample_period) { | 512 | if (!hwc->sample_period) { |
528 | hwc->sample_period = armpmu->max_period; | 513 | hwc->sample_period = armpmu->max_period; |
@@ -542,32 +527,23 @@ __hw_perf_event_init(struct perf_event *event) | |||
542 | 527 | ||
543 | static int armpmu_event_init(struct perf_event *event) | 528 | static int armpmu_event_init(struct perf_event *event) |
544 | { | 529 | { |
530 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | ||
545 | int err = 0; | 531 | int err = 0; |
532 | atomic_t *active_events = &armpmu->active_events; | ||
546 | 533 | ||
547 | switch (event->attr.type) { | 534 | if (armpmu->map_event(event) == -ENOENT) |
548 | case PERF_TYPE_RAW: | ||
549 | case PERF_TYPE_HARDWARE: | ||
550 | case PERF_TYPE_HW_CACHE: | ||
551 | break; | ||
552 | |||
553 | default: | ||
554 | return -ENOENT; | 535 | return -ENOENT; |
555 | } | ||
556 | |||
557 | if (!armpmu) | ||
558 | return -ENODEV; | ||
559 | 536 | ||
560 | event->destroy = hw_perf_event_destroy; | 537 | event->destroy = hw_perf_event_destroy; |
561 | 538 | ||
562 | if (!atomic_inc_not_zero(&active_events)) { | 539 | if (!atomic_inc_not_zero(active_events)) { |
563 | mutex_lock(&pmu_reserve_mutex); | 540 | mutex_lock(&armpmu->reserve_mutex); |
564 | if (atomic_read(&active_events) == 0) { | 541 | if (atomic_read(active_events) == 0) |
565 | err = armpmu_reserve_hardware(); | 542 | err = armpmu_reserve_hardware(armpmu); |
566 | } | ||
567 | 543 | ||
568 | if (!err) | 544 | if (!err) |
569 | atomic_inc(&active_events); | 545 | atomic_inc(active_events); |
570 | mutex_unlock(&pmu_reserve_mutex); | 546 | mutex_unlock(&armpmu->reserve_mutex); |
571 | } | 547 | } |
572 | 548 | ||
573 | if (err) | 549 | if (err) |
@@ -582,22 +558,9 @@ static int armpmu_event_init(struct perf_event *event) | |||
582 | 558 | ||
583 | static void armpmu_enable(struct pmu *pmu) | 559 | static void armpmu_enable(struct pmu *pmu) |
584 | { | 560 | { |
585 | /* Enable all of the perf events on hardware. */ | 561 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
586 | int idx, enabled = 0; | 562 | struct pmu_hw_events *hw_events = armpmu->get_hw_events(); |
587 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 563 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
588 | |||
589 | if (!armpmu) | ||
590 | return; | ||
591 | |||
592 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
593 | struct perf_event *event = cpuc->events[idx]; | ||
594 | |||
595 | if (!event) | ||
596 | continue; | ||
597 | |||
598 | armpmu->enable(&event->hw, idx); | ||
599 | enabled = 1; | ||
600 | } | ||
601 | 564 | ||
602 | if (enabled) | 565 | if (enabled) |
603 | armpmu->start(); | 566 | armpmu->start(); |
@@ -605,20 +568,32 @@ static void armpmu_enable(struct pmu *pmu) | |||
605 | 568 | ||
606 | static void armpmu_disable(struct pmu *pmu) | 569 | static void armpmu_disable(struct pmu *pmu) |
607 | { | 570 | { |
608 | if (armpmu) | 571 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
609 | armpmu->stop(); | 572 | armpmu->stop(); |
610 | } | 573 | } |
611 | 574 | ||
612 | static struct pmu pmu = { | 575 | static void __init armpmu_init(struct arm_pmu *armpmu) |
613 | .pmu_enable = armpmu_enable, | 576 | { |
614 | .pmu_disable = armpmu_disable, | 577 | atomic_set(&armpmu->active_events, 0); |
615 | .event_init = armpmu_event_init, | 578 | mutex_init(&armpmu->reserve_mutex); |
616 | .add = armpmu_add, | 579 | |
617 | .del = armpmu_del, | 580 | armpmu->pmu = (struct pmu) { |
618 | .start = armpmu_start, | 581 | .pmu_enable = armpmu_enable, |
619 | .stop = armpmu_stop, | 582 | .pmu_disable = armpmu_disable, |
620 | .read = armpmu_read, | 583 | .event_init = armpmu_event_init, |
621 | }; | 584 | .add = armpmu_add, |
585 | .del = armpmu_del, | ||
586 | .start = armpmu_start, | ||
587 | .stop = armpmu_stop, | ||
588 | .read = armpmu_read, | ||
589 | }; | ||
590 | } | ||
591 | |||
592 | int __init armpmu_register(struct arm_pmu *armpmu, char *name, int type) | ||
593 | { | ||
594 | armpmu_init(armpmu); | ||
595 | return perf_pmu_register(&armpmu->pmu, name, type); | ||
596 | } | ||
622 | 597 | ||
623 | /* Include the PMU-specific implementations. */ | 598 | /* Include the PMU-specific implementations. */ |
624 | #include "perf_event_xscale.c" | 599 | #include "perf_event_xscale.c" |
@@ -630,14 +605,72 @@ static struct pmu pmu = { | |||
630 | * This requires SMP to be available, so exists as a separate initcall. | 605 | * This requires SMP to be available, so exists as a separate initcall. |
631 | */ | 606 | */ |
632 | static int __init | 607 | static int __init |
633 | armpmu_reset(void) | 608 | cpu_pmu_reset(void) |
609 | { | ||
610 | if (cpu_pmu && cpu_pmu->reset) | ||
611 | return on_each_cpu(cpu_pmu->reset, NULL, 1); | ||
612 | return 0; | ||
613 | } | ||
614 | arch_initcall(cpu_pmu_reset); | ||
615 | |||
616 | /* | ||
617 | * PMU platform driver and devicetree bindings. | ||
618 | */ | ||
619 | static struct of_device_id armpmu_of_device_ids[] = { | ||
620 | {.compatible = "arm,cortex-a9-pmu"}, | ||
621 | {.compatible = "arm,cortex-a8-pmu"}, | ||
622 | {.compatible = "arm,arm1136-pmu"}, | ||
623 | {.compatible = "arm,arm1176-pmu"}, | ||
624 | {}, | ||
625 | }; | ||
626 | |||
627 | static struct platform_device_id armpmu_plat_device_ids[] = { | ||
628 | {.name = "arm-pmu"}, | ||
629 | {}, | ||
630 | }; | ||
631 | |||
632 | static int __devinit armpmu_device_probe(struct platform_device *pdev) | ||
634 | { | 633 | { |
635 | if (armpmu && armpmu->reset) | 634 | cpu_pmu->plat_device = pdev; |
636 | return on_each_cpu(armpmu->reset, NULL, 1); | ||
637 | return 0; | 635 | return 0; |
638 | } | 636 | } |
639 | arch_initcall(armpmu_reset); | ||
640 | 637 | ||
638 | static struct platform_driver armpmu_driver = { | ||
639 | .driver = { | ||
640 | .name = "arm-pmu", | ||
641 | .of_match_table = armpmu_of_device_ids, | ||
642 | }, | ||
643 | .probe = armpmu_device_probe, | ||
644 | .id_table = armpmu_plat_device_ids, | ||
645 | }; | ||
646 | |||
647 | static int __init register_pmu_driver(void) | ||
648 | { | ||
649 | return platform_driver_register(&armpmu_driver); | ||
650 | } | ||
651 | device_initcall(register_pmu_driver); | ||
652 | |||
653 | static struct pmu_hw_events *armpmu_get_cpu_events(void) | ||
654 | { | ||
655 | return &__get_cpu_var(cpu_hw_events); | ||
656 | } | ||
657 | |||
658 | static void __init cpu_pmu_init(struct arm_pmu *armpmu) | ||
659 | { | ||
660 | int cpu; | ||
661 | for_each_possible_cpu(cpu) { | ||
662 | struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); | ||
663 | events->events = per_cpu(hw_events, cpu); | ||
664 | events->used_mask = per_cpu(used_mask, cpu); | ||
665 | raw_spin_lock_init(&events->pmu_lock); | ||
666 | } | ||
667 | armpmu->get_hw_events = armpmu_get_cpu_events; | ||
668 | armpmu->type = ARM_PMU_DEVICE_CPU; | ||
669 | } | ||
670 | |||
671 | /* | ||
672 | * CPU PMU identification and registration. | ||
673 | */ | ||
641 | static int __init | 674 | static int __init |
642 | init_hw_perf_events(void) | 675 | init_hw_perf_events(void) |
643 | { | 676 | { |
@@ -651,22 +684,22 @@ init_hw_perf_events(void) | |||
651 | case 0xB360: /* ARM1136 */ | 684 | case 0xB360: /* ARM1136 */ |
652 | case 0xB560: /* ARM1156 */ | 685 | case 0xB560: /* ARM1156 */ |
653 | case 0xB760: /* ARM1176 */ | 686 | case 0xB760: /* ARM1176 */ |
654 | armpmu = armv6pmu_init(); | 687 | cpu_pmu = armv6pmu_init(); |
655 | break; | 688 | break; |
656 | case 0xB020: /* ARM11mpcore */ | 689 | case 0xB020: /* ARM11mpcore */ |
657 | armpmu = armv6mpcore_pmu_init(); | 690 | cpu_pmu = armv6mpcore_pmu_init(); |
658 | break; | 691 | break; |
659 | case 0xC080: /* Cortex-A8 */ | 692 | case 0xC080: /* Cortex-A8 */ |
660 | armpmu = armv7_a8_pmu_init(); | 693 | cpu_pmu = armv7_a8_pmu_init(); |
661 | break; | 694 | break; |
662 | case 0xC090: /* Cortex-A9 */ | 695 | case 0xC090: /* Cortex-A9 */ |
663 | armpmu = armv7_a9_pmu_init(); | 696 | cpu_pmu = armv7_a9_pmu_init(); |
664 | break; | 697 | break; |
665 | case 0xC050: /* Cortex-A5 */ | 698 | case 0xC050: /* Cortex-A5 */ |
666 | armpmu = armv7_a5_pmu_init(); | 699 | cpu_pmu = armv7_a5_pmu_init(); |
667 | break; | 700 | break; |
668 | case 0xC0F0: /* Cortex-A15 */ | 701 | case 0xC0F0: /* Cortex-A15 */ |
669 | armpmu = armv7_a15_pmu_init(); | 702 | cpu_pmu = armv7_a15_pmu_init(); |
670 | break; | 703 | break; |
671 | } | 704 | } |
672 | /* Intel CPUs [xscale]. */ | 705 | /* Intel CPUs [xscale]. */ |
@@ -674,23 +707,23 @@ init_hw_perf_events(void) | |||
674 | part_number = (cpuid >> 13) & 0x7; | 707 | part_number = (cpuid >> 13) & 0x7; |
675 | switch (part_number) { | 708 | switch (part_number) { |
676 | case 1: | 709 | case 1: |
677 | armpmu = xscale1pmu_init(); | 710 | cpu_pmu = xscale1pmu_init(); |
678 | break; | 711 | break; |
679 | case 2: | 712 | case 2: |
680 | armpmu = xscale2pmu_init(); | 713 | cpu_pmu = xscale2pmu_init(); |
681 | break; | 714 | break; |
682 | } | 715 | } |
683 | } | 716 | } |
684 | 717 | ||
685 | if (armpmu) { | 718 | if (cpu_pmu) { |
686 | pr_info("enabled with %s PMU driver, %d counters available\n", | 719 | pr_info("enabled with %s PMU driver, %d counters available\n", |
687 | armpmu->name, armpmu->num_events); | 720 | cpu_pmu->name, cpu_pmu->num_events); |
721 | cpu_pmu_init(cpu_pmu); | ||
722 | armpmu_register(cpu_pmu, "cpu", PERF_TYPE_RAW); | ||
688 | } else { | 723 | } else { |
689 | pr_info("no hardware support available\n"); | 724 | pr_info("no hardware support available\n"); |
690 | } | 725 | } |
691 | 726 | ||
692 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
693 | |||
694 | return 0; | 727 | return 0; |
695 | } | 728 | } |
696 | early_initcall(init_hw_perf_events); | 729 | early_initcall(init_hw_perf_events); |
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c index dd7f3b9f4cb3..e63d8115c01b 100644 --- a/arch/arm/kernel/perf_event_v6.c +++ b/arch/arm/kernel/perf_event_v6.c | |||
@@ -54,7 +54,7 @@ enum armv6_perf_types { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | enum armv6_counters { | 56 | enum armv6_counters { |
57 | ARMV6_CYCLE_COUNTER = 1, | 57 | ARMV6_CYCLE_COUNTER = 0, |
58 | ARMV6_COUNTER0, | 58 | ARMV6_COUNTER0, |
59 | ARMV6_COUNTER1, | 59 | ARMV6_COUNTER1, |
60 | }; | 60 | }; |
@@ -433,6 +433,7 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
433 | int idx) | 433 | int idx) |
434 | { | 434 | { |
435 | unsigned long val, mask, evt, flags; | 435 | unsigned long val, mask, evt, flags; |
436 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
436 | 437 | ||
437 | if (ARMV6_CYCLE_COUNTER == idx) { | 438 | if (ARMV6_CYCLE_COUNTER == idx) { |
438 | mask = 0; | 439 | mask = 0; |
@@ -454,12 +455,29 @@ armv6pmu_enable_event(struct hw_perf_event *hwc, | |||
454 | * Mask out the current event and set the counter to count the event | 455 | * Mask out the current event and set the counter to count the event |
455 | * that we're interested in. | 456 | * that we're interested in. |
456 | */ | 457 | */ |
457 | raw_spin_lock_irqsave(&pmu_lock, flags); | 458 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
458 | val = armv6_pmcr_read(); | 459 | val = armv6_pmcr_read(); |
459 | val &= ~mask; | 460 | val &= ~mask; |
460 | val |= evt; | 461 | val |= evt; |
461 | armv6_pmcr_write(val); | 462 | armv6_pmcr_write(val); |
462 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 463 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
464 | } | ||
465 | |||
466 | static int counter_is_active(unsigned long pmcr, int idx) | ||
467 | { | ||
468 | unsigned long mask = 0; | ||
469 | if (idx == ARMV6_CYCLE_COUNTER) | ||
470 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
471 | else if (idx == ARMV6_COUNTER0) | ||
472 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
473 | else if (idx == ARMV6_COUNTER1) | ||
474 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
475 | |||
476 | if (mask) | ||
477 | return pmcr & mask; | ||
478 | |||
479 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
480 | return 0; | ||
463 | } | 481 | } |
464 | 482 | ||
465 | static irqreturn_t | 483 | static irqreturn_t |
@@ -468,7 +486,7 @@ armv6pmu_handle_irq(int irq_num, | |||
468 | { | 486 | { |
469 | unsigned long pmcr = armv6_pmcr_read(); | 487 | unsigned long pmcr = armv6_pmcr_read(); |
470 | struct perf_sample_data data; | 488 | struct perf_sample_data data; |
471 | struct cpu_hw_events *cpuc; | 489 | struct pmu_hw_events *cpuc; |
472 | struct pt_regs *regs; | 490 | struct pt_regs *regs; |
473 | int idx; | 491 | int idx; |
474 | 492 | ||
@@ -487,11 +505,11 @@ armv6pmu_handle_irq(int irq_num, | |||
487 | perf_sample_data_init(&data, 0); | 505 | perf_sample_data_init(&data, 0); |
488 | 506 | ||
489 | cpuc = &__get_cpu_var(cpu_hw_events); | 507 | cpuc = &__get_cpu_var(cpu_hw_events); |
490 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 508 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
491 | struct perf_event *event = cpuc->events[idx]; | 509 | struct perf_event *event = cpuc->events[idx]; |
492 | struct hw_perf_event *hwc; | 510 | struct hw_perf_event *hwc; |
493 | 511 | ||
494 | if (!test_bit(idx, cpuc->active_mask)) | 512 | if (!counter_is_active(pmcr, idx)) |
495 | continue; | 513 | continue; |
496 | 514 | ||
497 | /* | 515 | /* |
@@ -508,7 +526,7 @@ armv6pmu_handle_irq(int irq_num, | |||
508 | continue; | 526 | continue; |
509 | 527 | ||
510 | if (perf_event_overflow(event, &data, regs)) | 528 | if (perf_event_overflow(event, &data, regs)) |
511 | armpmu->disable(hwc, idx); | 529 | cpu_pmu->disable(hwc, idx); |
512 | } | 530 | } |
513 | 531 | ||
514 | /* | 532 | /* |
@@ -527,28 +545,30 @@ static void | |||
527 | armv6pmu_start(void) | 545 | armv6pmu_start(void) |
528 | { | 546 | { |
529 | unsigned long flags, val; | 547 | unsigned long flags, val; |
548 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
530 | 549 | ||
531 | raw_spin_lock_irqsave(&pmu_lock, flags); | 550 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
532 | val = armv6_pmcr_read(); | 551 | val = armv6_pmcr_read(); |
533 | val |= ARMV6_PMCR_ENABLE; | 552 | val |= ARMV6_PMCR_ENABLE; |
534 | armv6_pmcr_write(val); | 553 | armv6_pmcr_write(val); |
535 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 554 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
536 | } | 555 | } |
537 | 556 | ||
538 | static void | 557 | static void |
539 | armv6pmu_stop(void) | 558 | armv6pmu_stop(void) |
540 | { | 559 | { |
541 | unsigned long flags, val; | 560 | unsigned long flags, val; |
561 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
542 | 562 | ||
543 | raw_spin_lock_irqsave(&pmu_lock, flags); | 563 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
544 | val = armv6_pmcr_read(); | 564 | val = armv6_pmcr_read(); |
545 | val &= ~ARMV6_PMCR_ENABLE; | 565 | val &= ~ARMV6_PMCR_ENABLE; |
546 | armv6_pmcr_write(val); | 566 | armv6_pmcr_write(val); |
547 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 567 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
548 | } | 568 | } |
549 | 569 | ||
550 | static int | 570 | static int |
551 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | 571 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
552 | struct hw_perf_event *event) | 572 | struct hw_perf_event *event) |
553 | { | 573 | { |
554 | /* Always place a cycle counter into the cycle counter. */ | 574 | /* Always place a cycle counter into the cycle counter. */ |
@@ -578,6 +598,7 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
578 | int idx) | 598 | int idx) |
579 | { | 599 | { |
580 | unsigned long val, mask, evt, flags; | 600 | unsigned long val, mask, evt, flags; |
601 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
581 | 602 | ||
582 | if (ARMV6_CYCLE_COUNTER == idx) { | 603 | if (ARMV6_CYCLE_COUNTER == idx) { |
583 | mask = ARMV6_PMCR_CCOUNT_IEN; | 604 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -598,12 +619,12 @@ armv6pmu_disable_event(struct hw_perf_event *hwc, | |||
598 | * of ETM bus signal assertion cycles. The external reporting should | 619 | * of ETM bus signal assertion cycles. The external reporting should |
599 | * be disabled and so this should never increment. | 620 | * be disabled and so this should never increment. |
600 | */ | 621 | */ |
601 | raw_spin_lock_irqsave(&pmu_lock, flags); | 622 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
602 | val = armv6_pmcr_read(); | 623 | val = armv6_pmcr_read(); |
603 | val &= ~mask; | 624 | val &= ~mask; |
604 | val |= evt; | 625 | val |= evt; |
605 | armv6_pmcr_write(val); | 626 | armv6_pmcr_write(val); |
606 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 627 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
607 | } | 628 | } |
608 | 629 | ||
609 | static void | 630 | static void |
@@ -611,6 +632,7 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
611 | int idx) | 632 | int idx) |
612 | { | 633 | { |
613 | unsigned long val, mask, flags, evt = 0; | 634 | unsigned long val, mask, flags, evt = 0; |
635 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
614 | 636 | ||
615 | if (ARMV6_CYCLE_COUNTER == idx) { | 637 | if (ARMV6_CYCLE_COUNTER == idx) { |
616 | mask = ARMV6_PMCR_CCOUNT_IEN; | 638 | mask = ARMV6_PMCR_CCOUNT_IEN; |
@@ -627,15 +649,21 @@ armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |||
627 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | 649 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We |
628 | * simply disable the interrupt reporting. | 650 | * simply disable the interrupt reporting. |
629 | */ | 651 | */ |
630 | raw_spin_lock_irqsave(&pmu_lock, flags); | 652 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
631 | val = armv6_pmcr_read(); | 653 | val = armv6_pmcr_read(); |
632 | val &= ~mask; | 654 | val &= ~mask; |
633 | val |= evt; | 655 | val |= evt; |
634 | armv6_pmcr_write(val); | 656 | armv6_pmcr_write(val); |
635 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 657 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
658 | } | ||
659 | |||
660 | static int armv6_map_event(struct perf_event *event) | ||
661 | { | ||
662 | return map_cpu_event(event, &armv6_perf_map, | ||
663 | &armv6_perf_cache_map, 0xFF); | ||
636 | } | 664 | } |
637 | 665 | ||
638 | static const struct arm_pmu armv6pmu = { | 666 | static struct arm_pmu armv6pmu = { |
639 | .id = ARM_PERF_PMU_ID_V6, | 667 | .id = ARM_PERF_PMU_ID_V6, |
640 | .name = "v6", | 668 | .name = "v6", |
641 | .handle_irq = armv6pmu_handle_irq, | 669 | .handle_irq = armv6pmu_handle_irq, |
@@ -646,14 +674,12 @@ static const struct arm_pmu armv6pmu = { | |||
646 | .get_event_idx = armv6pmu_get_event_idx, | 674 | .get_event_idx = armv6pmu_get_event_idx, |
647 | .start = armv6pmu_start, | 675 | .start = armv6pmu_start, |
648 | .stop = armv6pmu_stop, | 676 | .stop = armv6pmu_stop, |
649 | .cache_map = &armv6_perf_cache_map, | 677 | .map_event = armv6_map_event, |
650 | .event_map = &armv6_perf_map, | ||
651 | .raw_event_mask = 0xFF, | ||
652 | .num_events = 3, | 678 | .num_events = 3, |
653 | .max_period = (1LLU << 32) - 1, | 679 | .max_period = (1LLU << 32) - 1, |
654 | }; | 680 | }; |
655 | 681 | ||
656 | static const struct arm_pmu *__init armv6pmu_init(void) | 682 | static struct arm_pmu *__init armv6pmu_init(void) |
657 | { | 683 | { |
658 | return &armv6pmu; | 684 | return &armv6pmu; |
659 | } | 685 | } |
@@ -665,7 +691,14 @@ static const struct arm_pmu *__init armv6pmu_init(void) | |||
665 | * disable the interrupt reporting and update the event. When unthrottling we | 691 | * disable the interrupt reporting and update the event. When unthrottling we |
666 | * reset the period and enable the interrupt reporting. | 692 | * reset the period and enable the interrupt reporting. |
667 | */ | 693 | */ |
668 | static const struct arm_pmu armv6mpcore_pmu = { | 694 | |
695 | static int armv6mpcore_map_event(struct perf_event *event) | ||
696 | { | ||
697 | return map_cpu_event(event, &armv6mpcore_perf_map, | ||
698 | &armv6mpcore_perf_cache_map, 0xFF); | ||
699 | } | ||
700 | |||
701 | static struct arm_pmu armv6mpcore_pmu = { | ||
669 | .id = ARM_PERF_PMU_ID_V6MP, | 702 | .id = ARM_PERF_PMU_ID_V6MP, |
670 | .name = "v6mpcore", | 703 | .name = "v6mpcore", |
671 | .handle_irq = armv6pmu_handle_irq, | 704 | .handle_irq = armv6pmu_handle_irq, |
@@ -676,24 +709,22 @@ static const struct arm_pmu armv6mpcore_pmu = { | |||
676 | .get_event_idx = armv6pmu_get_event_idx, | 709 | .get_event_idx = armv6pmu_get_event_idx, |
677 | .start = armv6pmu_start, | 710 | .start = armv6pmu_start, |
678 | .stop = armv6pmu_stop, | 711 | .stop = armv6pmu_stop, |
679 | .cache_map = &armv6mpcore_perf_cache_map, | 712 | .map_event = armv6mpcore_map_event, |
680 | .event_map = &armv6mpcore_perf_map, | ||
681 | .raw_event_mask = 0xFF, | ||
682 | .num_events = 3, | 713 | .num_events = 3, |
683 | .max_period = (1LLU << 32) - 1, | 714 | .max_period = (1LLU << 32) - 1, |
684 | }; | 715 | }; |
685 | 716 | ||
686 | static const struct arm_pmu *__init armv6mpcore_pmu_init(void) | 717 | static struct arm_pmu *__init armv6mpcore_pmu_init(void) |
687 | { | 718 | { |
688 | return &armv6mpcore_pmu; | 719 | return &armv6mpcore_pmu; |
689 | } | 720 | } |
690 | #else | 721 | #else |
691 | static const struct arm_pmu *__init armv6pmu_init(void) | 722 | static struct arm_pmu *__init armv6pmu_init(void) |
692 | { | 723 | { |
693 | return NULL; | 724 | return NULL; |
694 | } | 725 | } |
695 | 726 | ||
696 | static const struct arm_pmu *__init armv6mpcore_pmu_init(void) | 727 | static struct arm_pmu *__init armv6mpcore_pmu_init(void) |
697 | { | 728 | { |
698 | return NULL; | 729 | return NULL; |
699 | } | 730 | } |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 4c851834f68e..98b75738345e 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
@@ -17,6 +17,9 @@ | |||
17 | */ | 17 | */ |
18 | 18 | ||
19 | #ifdef CONFIG_CPU_V7 | 19 | #ifdef CONFIG_CPU_V7 |
20 | |||
21 | static struct arm_pmu armv7pmu; | ||
22 | |||
20 | /* | 23 | /* |
21 | * Common ARMv7 event types | 24 | * Common ARMv7 event types |
22 | * | 25 | * |
@@ -676,23 +679,24 @@ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
676 | }; | 679 | }; |
677 | 680 | ||
678 | /* | 681 | /* |
679 | * Perf Events counters | 682 | * Perf Events' indices |
680 | */ | 683 | */ |
681 | enum armv7_counters { | 684 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
682 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | 685 | #define ARMV7_IDX_COUNTER0 1 |
683 | ARMV7_COUNTER0 = 2, /* First event counter */ | 686 | #define ARMV7_IDX_COUNTER_LAST (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1) |
684 | }; | 687 | |
688 | #define ARMV7_MAX_COUNTERS 32 | ||
689 | #define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1) | ||
685 | 690 | ||
686 | /* | 691 | /* |
687 | * The cycle counter is ARMV7_CYCLE_COUNTER. | 692 | * ARMv7 low level PMNC access |
688 | * The first event counter is ARMV7_COUNTER0. | ||
689 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
690 | */ | 693 | */ |
691 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
692 | 694 | ||
693 | /* | 695 | /* |
694 | * ARMv7 low level PMNC access | 696 | * Perf Event to low level counters mapping |
695 | */ | 697 | */ |
698 | #define ARMV7_IDX_TO_COUNTER(x) \ | ||
699 | (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK) | ||
696 | 700 | ||
697 | /* | 701 | /* |
698 | * Per-CPU PMNC: config reg | 702 | * Per-CPU PMNC: config reg |
@@ -708,103 +712,76 @@ enum armv7_counters { | |||
708 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | 712 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ |
709 | 713 | ||
710 | /* | 714 | /* |
711 | * Available counters | 715 | * FLAG: counters overflow flag status reg |
712 | */ | ||
713 | #define ARMV7_CNT0 0 /* First event counter */ | ||
714 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
715 | |||
716 | /* Perf Event to low level counters mapping */ | ||
717 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
718 | |||
719 | /* | ||
720 | * CNTENS: counters enable reg | ||
721 | */ | ||
722 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
723 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
724 | |||
725 | /* | ||
726 | * CNTENC: counters disable reg | ||
727 | */ | ||
728 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
729 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
730 | |||
731 | /* | ||
732 | * INTENS: counters overflow interrupt enable reg | ||
733 | */ | ||
734 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
735 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
736 | |||
737 | /* | ||
738 | * INTENC: counters overflow interrupt disable reg | ||
739 | */ | ||
740 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
741 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
742 | |||
743 | /* | ||
744 | * EVTSEL: Event selection reg | ||
745 | */ | 716 | */ |
746 | #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ | 717 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ |
718 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
747 | 719 | ||
748 | /* | 720 | /* |
749 | * SELECT: Counter selection reg | 721 | * PMXEVTYPER: Event selection reg |
750 | */ | 722 | */ |
751 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | 723 | #define ARMV7_EVTYPE_MASK 0xc00000ff /* Mask for writable bits */ |
724 | #define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */ | ||
752 | 725 | ||
753 | /* | 726 | /* |
754 | * FLAG: counters overflow flag status reg | 727 | * Event filters for PMUv2 |
755 | */ | 728 | */ |
756 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | 729 | #define ARMV7_EXCLUDE_PL1 (1 << 31) |
757 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | 730 | #define ARMV7_EXCLUDE_USER (1 << 30) |
758 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | 731 | #define ARMV7_INCLUDE_HYP (1 << 27) |
759 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
760 | 732 | ||
761 | static inline unsigned long armv7_pmnc_read(void) | 733 | static inline u32 armv7_pmnc_read(void) |
762 | { | 734 | { |
763 | u32 val; | 735 | u32 val; |
764 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | 736 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); |
765 | return val; | 737 | return val; |
766 | } | 738 | } |
767 | 739 | ||
768 | static inline void armv7_pmnc_write(unsigned long val) | 740 | static inline void armv7_pmnc_write(u32 val) |
769 | { | 741 | { |
770 | val &= ARMV7_PMNC_MASK; | 742 | val &= ARMV7_PMNC_MASK; |
771 | isb(); | 743 | isb(); |
772 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | 744 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); |
773 | } | 745 | } |
774 | 746 | ||
775 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | 747 | static inline int armv7_pmnc_has_overflowed(u32 pmnc) |
776 | { | 748 | { |
777 | return pmnc & ARMV7_OVERFLOWED_MASK; | 749 | return pmnc & ARMV7_OVERFLOWED_MASK; |
778 | } | 750 | } |
779 | 751 | ||
780 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | 752 | static inline int armv7_pmnc_counter_valid(int idx) |
781 | enum armv7_counters counter) | 753 | { |
754 | return idx >= ARMV7_IDX_CYCLE_COUNTER && idx <= ARMV7_IDX_COUNTER_LAST; | ||
755 | } | ||
756 | |||
757 | static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx) | ||
782 | { | 758 | { |
783 | int ret = 0; | 759 | int ret = 0; |
760 | u32 counter; | ||
784 | 761 | ||
785 | if (counter == ARMV7_CYCLE_COUNTER) | 762 | if (!armv7_pmnc_counter_valid(idx)) { |
786 | ret = pmnc & ARMV7_FLAG_C; | ||
787 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
788 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
789 | else | ||
790 | pr_err("CPU%u checking wrong counter %d overflow status\n", | 763 | pr_err("CPU%u checking wrong counter %d overflow status\n", |
791 | smp_processor_id(), counter); | 764 | smp_processor_id(), idx); |
765 | } else { | ||
766 | counter = ARMV7_IDX_TO_COUNTER(idx); | ||
767 | ret = pmnc & BIT(counter); | ||
768 | } | ||
792 | 769 | ||
793 | return ret; | 770 | return ret; |
794 | } | 771 | } |
795 | 772 | ||
796 | static inline int armv7_pmnc_select_counter(unsigned int idx) | 773 | static inline int armv7_pmnc_select_counter(int idx) |
797 | { | 774 | { |
798 | u32 val; | 775 | u32 counter; |
799 | 776 | ||
800 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | 777 | if (!armv7_pmnc_counter_valid(idx)) { |
801 | pr_err("CPU%u selecting wrong PMNC counter" | 778 | pr_err("CPU%u selecting wrong PMNC counter %d\n", |
802 | " %d\n", smp_processor_id(), idx); | 779 | smp_processor_id(), idx); |
803 | return -1; | 780 | return -EINVAL; |
804 | } | 781 | } |
805 | 782 | ||
806 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | 783 | counter = ARMV7_IDX_TO_COUNTER(idx); |
807 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | 784 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter)); |
808 | isb(); | 785 | isb(); |
809 | 786 | ||
810 | return idx; | 787 | return idx; |
@@ -812,124 +789,95 @@ static inline int armv7_pmnc_select_counter(unsigned int idx) | |||
812 | 789 | ||
813 | static inline u32 armv7pmu_read_counter(int idx) | 790 | static inline u32 armv7pmu_read_counter(int idx) |
814 | { | 791 | { |
815 | unsigned long value = 0; | 792 | u32 value = 0; |
816 | 793 | ||
817 | if (idx == ARMV7_CYCLE_COUNTER) | 794 | if (!armv7_pmnc_counter_valid(idx)) |
818 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
819 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
820 | if (armv7_pmnc_select_counter(idx) == idx) | ||
821 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
822 | : "=r" (value)); | ||
823 | } else | ||
824 | pr_err("CPU%u reading wrong counter %d\n", | 795 | pr_err("CPU%u reading wrong counter %d\n", |
825 | smp_processor_id(), idx); | 796 | smp_processor_id(), idx); |
797 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | ||
798 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
799 | else if (armv7_pmnc_select_counter(idx) == idx) | ||
800 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value)); | ||
826 | 801 | ||
827 | return value; | 802 | return value; |
828 | } | 803 | } |
829 | 804 | ||
830 | static inline void armv7pmu_write_counter(int idx, u32 value) | 805 | static inline void armv7pmu_write_counter(int idx, u32 value) |
831 | { | 806 | { |
832 | if (idx == ARMV7_CYCLE_COUNTER) | 807 | if (!armv7_pmnc_counter_valid(idx)) |
833 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
834 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
835 | if (armv7_pmnc_select_counter(idx) == idx) | ||
836 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
837 | : : "r" (value)); | ||
838 | } else | ||
839 | pr_err("CPU%u writing wrong counter %d\n", | 808 | pr_err("CPU%u writing wrong counter %d\n", |
840 | smp_processor_id(), idx); | 809 | smp_processor_id(), idx); |
810 | else if (idx == ARMV7_IDX_CYCLE_COUNTER) | ||
811 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
812 | else if (armv7_pmnc_select_counter(idx) == idx) | ||
813 | asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value)); | ||
841 | } | 814 | } |
842 | 815 | ||
843 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | 816 | static inline void armv7_pmnc_write_evtsel(int idx, u32 val) |
844 | { | 817 | { |
845 | if (armv7_pmnc_select_counter(idx) == idx) { | 818 | if (armv7_pmnc_select_counter(idx) == idx) { |
846 | val &= ARMV7_EVTSEL_MASK; | 819 | val &= ARMV7_EVTYPE_MASK; |
847 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | 820 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); |
848 | } | 821 | } |
849 | } | 822 | } |
850 | 823 | ||
851 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | 824 | static inline int armv7_pmnc_enable_counter(int idx) |
852 | { | 825 | { |
853 | u32 val; | 826 | u32 counter; |
854 | 827 | ||
855 | if ((idx != ARMV7_CYCLE_COUNTER) && | 828 | if (!armv7_pmnc_counter_valid(idx)) { |
856 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 829 | pr_err("CPU%u enabling wrong PMNC counter %d\n", |
857 | pr_err("CPU%u enabling wrong PMNC counter" | 830 | smp_processor_id(), idx); |
858 | " %d\n", smp_processor_id(), idx); | 831 | return -EINVAL; |
859 | return -1; | ||
860 | } | 832 | } |
861 | 833 | ||
862 | if (idx == ARMV7_CYCLE_COUNTER) | 834 | counter = ARMV7_IDX_TO_COUNTER(idx); |
863 | val = ARMV7_CNTENS_C; | 835 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter))); |
864 | else | ||
865 | val = ARMV7_CNTENS_P(idx); | ||
866 | |||
867 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
868 | |||
869 | return idx; | 836 | return idx; |
870 | } | 837 | } |
871 | 838 | ||
872 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | 839 | static inline int armv7_pmnc_disable_counter(int idx) |
873 | { | 840 | { |
874 | u32 val; | 841 | u32 counter; |
875 | |||
876 | 842 | ||
877 | if ((idx != ARMV7_CYCLE_COUNTER) && | 843 | if (!armv7_pmnc_counter_valid(idx)) { |
878 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 844 | pr_err("CPU%u disabling wrong PMNC counter %d\n", |
879 | pr_err("CPU%u disabling wrong PMNC counter" | 845 | smp_processor_id(), idx); |
880 | " %d\n", smp_processor_id(), idx); | 846 | return -EINVAL; |
881 | return -1; | ||
882 | } | 847 | } |
883 | 848 | ||
884 | if (idx == ARMV7_CYCLE_COUNTER) | 849 | counter = ARMV7_IDX_TO_COUNTER(idx); |
885 | val = ARMV7_CNTENC_C; | 850 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter))); |
886 | else | ||
887 | val = ARMV7_CNTENC_P(idx); | ||
888 | |||
889 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
890 | |||
891 | return idx; | 851 | return idx; |
892 | } | 852 | } |
893 | 853 | ||
894 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | 854 | static inline int armv7_pmnc_enable_intens(int idx) |
895 | { | 855 | { |
896 | u32 val; | 856 | u32 counter; |
897 | 857 | ||
898 | if ((idx != ARMV7_CYCLE_COUNTER) && | 858 | if (!armv7_pmnc_counter_valid(idx)) { |
899 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 859 | pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n", |
900 | pr_err("CPU%u enabling wrong PMNC counter" | 860 | smp_processor_id(), idx); |
901 | " interrupt enable %d\n", smp_processor_id(), idx); | 861 | return -EINVAL; |
902 | return -1; | ||
903 | } | 862 | } |
904 | 863 | ||
905 | if (idx == ARMV7_CYCLE_COUNTER) | 864 | counter = ARMV7_IDX_TO_COUNTER(idx); |
906 | val = ARMV7_INTENS_C; | 865 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter))); |
907 | else | ||
908 | val = ARMV7_INTENS_P(idx); | ||
909 | |||
910 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
911 | |||
912 | return idx; | 866 | return idx; |
913 | } | 867 | } |
914 | 868 | ||
915 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | 869 | static inline int armv7_pmnc_disable_intens(int idx) |
916 | { | 870 | { |
917 | u32 val; | 871 | u32 counter; |
918 | 872 | ||
919 | if ((idx != ARMV7_CYCLE_COUNTER) && | 873 | if (!armv7_pmnc_counter_valid(idx)) { |
920 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | 874 | pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n", |
921 | pr_err("CPU%u disabling wrong PMNC counter" | 875 | smp_processor_id(), idx); |
922 | " interrupt enable %d\n", smp_processor_id(), idx); | 876 | return -EINVAL; |
923 | return -1; | ||
924 | } | 877 | } |
925 | 878 | ||
926 | if (idx == ARMV7_CYCLE_COUNTER) | 879 | counter = ARMV7_IDX_TO_COUNTER(idx); |
927 | val = ARMV7_INTENC_C; | 880 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter))); |
928 | else | ||
929 | val = ARMV7_INTENC_P(idx); | ||
930 | |||
931 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
932 | |||
933 | return idx; | 881 | return idx; |
934 | } | 882 | } |
935 | 883 | ||
@@ -973,14 +921,14 @@ static void armv7_pmnc_dump_regs(void) | |||
973 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | 921 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); |
974 | printk(KERN_INFO "CCNT =0x%08x\n", val); | 922 | printk(KERN_INFO "CCNT =0x%08x\n", val); |
975 | 923 | ||
976 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | 924 | for (cnt = ARMV7_IDX_COUNTER0; cnt <= ARMV7_IDX_COUNTER_LAST; cnt++) { |
977 | armv7_pmnc_select_counter(cnt); | 925 | armv7_pmnc_select_counter(cnt); |
978 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | 926 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); |
979 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | 927 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", |
980 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | 928 | ARMV7_IDX_TO_COUNTER(cnt), val); |
981 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | 929 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); |
982 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | 930 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", |
983 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | 931 | ARMV7_IDX_TO_COUNTER(cnt), val); |
984 | } | 932 | } |
985 | } | 933 | } |
986 | #endif | 934 | #endif |
@@ -988,12 +936,13 @@ static void armv7_pmnc_dump_regs(void) | |||
988 | static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | 936 | static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) |
989 | { | 937 | { |
990 | unsigned long flags; | 938 | unsigned long flags; |
939 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
991 | 940 | ||
992 | /* | 941 | /* |
993 | * Enable counter and interrupt, and set the counter to count | 942 | * Enable counter and interrupt, and set the counter to count |
994 | * the event that we're interested in. | 943 | * the event that we're interested in. |
995 | */ | 944 | */ |
996 | raw_spin_lock_irqsave(&pmu_lock, flags); | 945 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
997 | 946 | ||
998 | /* | 947 | /* |
999 | * Disable counter | 948 | * Disable counter |
@@ -1002,9 +951,10 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1002 | 951 | ||
1003 | /* | 952 | /* |
1004 | * Set event (if destined for PMNx counters) | 953 | * Set event (if destined for PMNx counters) |
1005 | * We don't need to set the event if it's a cycle count | 954 | * We only need to set the event for the cycle counter if we |
955 | * have the ability to perform event filtering. | ||
1006 | */ | 956 | */ |
1007 | if (idx != ARMV7_CYCLE_COUNTER) | 957 | if (armv7pmu.set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER) |
1008 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | 958 | armv7_pmnc_write_evtsel(idx, hwc->config_base); |
1009 | 959 | ||
1010 | /* | 960 | /* |
@@ -1017,17 +967,18 @@ static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
1017 | */ | 967 | */ |
1018 | armv7_pmnc_enable_counter(idx); | 968 | armv7_pmnc_enable_counter(idx); |
1019 | 969 | ||
1020 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 970 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1021 | } | 971 | } |
1022 | 972 | ||
1023 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | 973 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) |
1024 | { | 974 | { |
1025 | unsigned long flags; | 975 | unsigned long flags; |
976 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
1026 | 977 | ||
1027 | /* | 978 | /* |
1028 | * Disable counter and interrupt | 979 | * Disable counter and interrupt |
1029 | */ | 980 | */ |
1030 | raw_spin_lock_irqsave(&pmu_lock, flags); | 981 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1031 | 982 | ||
1032 | /* | 983 | /* |
1033 | * Disable counter | 984 | * Disable counter |
@@ -1039,14 +990,14 @@ static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
1039 | */ | 990 | */ |
1040 | armv7_pmnc_disable_intens(idx); | 991 | armv7_pmnc_disable_intens(idx); |
1041 | 992 | ||
1042 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 993 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1043 | } | 994 | } |
1044 | 995 | ||
1045 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | 996 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) |
1046 | { | 997 | { |
1047 | unsigned long pmnc; | 998 | u32 pmnc; |
1048 | struct perf_sample_data data; | 999 | struct perf_sample_data data; |
1049 | struct cpu_hw_events *cpuc; | 1000 | struct pmu_hw_events *cpuc; |
1050 | struct pt_regs *regs; | 1001 | struct pt_regs *regs; |
1051 | int idx; | 1002 | int idx; |
1052 | 1003 | ||
@@ -1069,13 +1020,10 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1069 | perf_sample_data_init(&data, 0); | 1020 | perf_sample_data_init(&data, 0); |
1070 | 1021 | ||
1071 | cpuc = &__get_cpu_var(cpu_hw_events); | 1022 | cpuc = &__get_cpu_var(cpu_hw_events); |
1072 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 1023 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
1073 | struct perf_event *event = cpuc->events[idx]; | 1024 | struct perf_event *event = cpuc->events[idx]; |
1074 | struct hw_perf_event *hwc; | 1025 | struct hw_perf_event *hwc; |
1075 | 1026 | ||
1076 | if (!test_bit(idx, cpuc->active_mask)) | ||
1077 | continue; | ||
1078 | |||
1079 | /* | 1027 | /* |
1080 | * We have a single interrupt for all counters. Check that | 1028 | * We have a single interrupt for all counters. Check that |
1081 | * each counter has overflowed before we process it. | 1029 | * each counter has overflowed before we process it. |
@@ -1090,7 +1038,7 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1090 | continue; | 1038 | continue; |
1091 | 1039 | ||
1092 | if (perf_event_overflow(event, &data, regs)) | 1040 | if (perf_event_overflow(event, &data, regs)) |
1093 | armpmu->disable(hwc, idx); | 1041 | cpu_pmu->disable(hwc, idx); |
1094 | } | 1042 | } |
1095 | 1043 | ||
1096 | /* | 1044 | /* |
@@ -1108,61 +1056,114 @@ static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | |||
1108 | static void armv7pmu_start(void) | 1056 | static void armv7pmu_start(void) |
1109 | { | 1057 | { |
1110 | unsigned long flags; | 1058 | unsigned long flags; |
1059 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
1111 | 1060 | ||
1112 | raw_spin_lock_irqsave(&pmu_lock, flags); | 1061 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1113 | /* Enable all counters */ | 1062 | /* Enable all counters */ |
1114 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | 1063 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); |
1115 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 1064 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1116 | } | 1065 | } |
1117 | 1066 | ||
1118 | static void armv7pmu_stop(void) | 1067 | static void armv7pmu_stop(void) |
1119 | { | 1068 | { |
1120 | unsigned long flags; | 1069 | unsigned long flags; |
1070 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
1121 | 1071 | ||
1122 | raw_spin_lock_irqsave(&pmu_lock, flags); | 1072 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
1123 | /* Disable all counters */ | 1073 | /* Disable all counters */ |
1124 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | 1074 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); |
1125 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 1075 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
1126 | } | 1076 | } |
1127 | 1077 | ||
1128 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | 1078 | static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc, |
1129 | struct hw_perf_event *event) | 1079 | struct hw_perf_event *event) |
1130 | { | 1080 | { |
1131 | int idx; | 1081 | int idx; |
1082 | unsigned long evtype = event->config_base & ARMV7_EVTYPE_EVENT; | ||
1132 | 1083 | ||
1133 | /* Always place a cycle counter into the cycle counter. */ | 1084 | /* Always place a cycle counter into the cycle counter. */ |
1134 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | 1085 | if (evtype == ARMV7_PERFCTR_CPU_CYCLES) { |
1135 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | 1086 | if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
1136 | return -EAGAIN; | 1087 | return -EAGAIN; |
1137 | 1088 | ||
1138 | return ARMV7_CYCLE_COUNTER; | 1089 | return ARMV7_IDX_CYCLE_COUNTER; |
1139 | } else { | 1090 | } |
1140 | /* | ||
1141 | * For anything other than a cycle counter, try and use | ||
1142 | * the events counters | ||
1143 | */ | ||
1144 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
1145 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
1146 | return idx; | ||
1147 | } | ||
1148 | 1091 | ||
1149 | /* The counters are all in use. */ | 1092 | /* |
1150 | return -EAGAIN; | 1093 | * For anything other than a cycle counter, try and use |
1094 | * the events counters | ||
1095 | */ | ||
1096 | for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) { | ||
1097 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
1098 | return idx; | ||
1151 | } | 1099 | } |
1100 | |||
1101 | /* The counters are all in use. */ | ||
1102 | return -EAGAIN; | ||
1103 | } | ||
1104 | |||
1105 | /* | ||
1106 | * Add an event filter to a given event. This will only work for PMUv2 PMUs. | ||
1107 | */ | ||
1108 | static int armv7pmu_set_event_filter(struct hw_perf_event *event, | ||
1109 | struct perf_event_attr *attr) | ||
1110 | { | ||
1111 | unsigned long config_base = 0; | ||
1112 | |||
1113 | if (attr->exclude_idle) | ||
1114 | return -EPERM; | ||
1115 | if (attr->exclude_user) | ||
1116 | config_base |= ARMV7_EXCLUDE_USER; | ||
1117 | if (attr->exclude_kernel) | ||
1118 | config_base |= ARMV7_EXCLUDE_PL1; | ||
1119 | if (!attr->exclude_hv) | ||
1120 | config_base |= ARMV7_INCLUDE_HYP; | ||
1121 | |||
1122 | /* | ||
1123 | * Install the filter into config_base as this is used to | ||
1124 | * construct the event type. | ||
1125 | */ | ||
1126 | event->config_base = config_base; | ||
1127 | |||
1128 | return 0; | ||
1152 | } | 1129 | } |
1153 | 1130 | ||
1154 | static void armv7pmu_reset(void *info) | 1131 | static void armv7pmu_reset(void *info) |
1155 | { | 1132 | { |
1156 | u32 idx, nb_cnt = armpmu->num_events; | 1133 | u32 idx, nb_cnt = cpu_pmu->num_events; |
1157 | 1134 | ||
1158 | /* The counter and interrupt enable registers are unknown at reset. */ | 1135 | /* The counter and interrupt enable registers are unknown at reset. */ |
1159 | for (idx = 1; idx < nb_cnt; ++idx) | 1136 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) |
1160 | armv7pmu_disable_event(NULL, idx); | 1137 | armv7pmu_disable_event(NULL, idx); |
1161 | 1138 | ||
1162 | /* Initialize & Reset PMNC: C and P bits */ | 1139 | /* Initialize & Reset PMNC: C and P bits */ |
1163 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | 1140 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); |
1164 | } | 1141 | } |
1165 | 1142 | ||
1143 | static int armv7_a8_map_event(struct perf_event *event) | ||
1144 | { | ||
1145 | return map_cpu_event(event, &armv7_a8_perf_map, | ||
1146 | &armv7_a8_perf_cache_map, 0xFF); | ||
1147 | } | ||
1148 | |||
1149 | static int armv7_a9_map_event(struct perf_event *event) | ||
1150 | { | ||
1151 | return map_cpu_event(event, &armv7_a9_perf_map, | ||
1152 | &armv7_a9_perf_cache_map, 0xFF); | ||
1153 | } | ||
1154 | |||
1155 | static int armv7_a5_map_event(struct perf_event *event) | ||
1156 | { | ||
1157 | return map_cpu_event(event, &armv7_a5_perf_map, | ||
1158 | &armv7_a5_perf_cache_map, 0xFF); | ||
1159 | } | ||
1160 | |||
1161 | static int armv7_a15_map_event(struct perf_event *event) | ||
1162 | { | ||
1163 | return map_cpu_event(event, &armv7_a15_perf_map, | ||
1164 | &armv7_a15_perf_cache_map, 0xFF); | ||
1165 | } | ||
1166 | |||
1166 | static struct arm_pmu armv7pmu = { | 1167 | static struct arm_pmu armv7pmu = { |
1167 | .handle_irq = armv7pmu_handle_irq, | 1168 | .handle_irq = armv7pmu_handle_irq, |
1168 | .enable = armv7pmu_enable_event, | 1169 | .enable = armv7pmu_enable_event, |
@@ -1173,7 +1174,6 @@ static struct arm_pmu armv7pmu = { | |||
1173 | .start = armv7pmu_start, | 1174 | .start = armv7pmu_start, |
1174 | .stop = armv7pmu_stop, | 1175 | .stop = armv7pmu_stop, |
1175 | .reset = armv7pmu_reset, | 1176 | .reset = armv7pmu_reset, |
1176 | .raw_event_mask = 0xFF, | ||
1177 | .max_period = (1LLU << 32) - 1, | 1177 | .max_period = (1LLU << 32) - 1, |
1178 | }; | 1178 | }; |
1179 | 1179 | ||
@@ -1188,62 +1188,59 @@ static u32 __init armv7_read_num_pmnc_events(void) | |||
1188 | return nb_cnt + 1; | 1188 | return nb_cnt + 1; |
1189 | } | 1189 | } |
1190 | 1190 | ||
1191 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) | 1191 | static struct arm_pmu *__init armv7_a8_pmu_init(void) |
1192 | { | 1192 | { |
1193 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | 1193 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; |
1194 | armv7pmu.name = "ARMv7 Cortex-A8"; | 1194 | armv7pmu.name = "ARMv7 Cortex-A8"; |
1195 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; | 1195 | armv7pmu.map_event = armv7_a8_map_event; |
1196 | armv7pmu.event_map = &armv7_a8_perf_map; | ||
1197 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1196 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1198 | return &armv7pmu; | 1197 | return &armv7pmu; |
1199 | } | 1198 | } |
1200 | 1199 | ||
1201 | static const struct arm_pmu *__init armv7_a9_pmu_init(void) | 1200 | static struct arm_pmu *__init armv7_a9_pmu_init(void) |
1202 | { | 1201 | { |
1203 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | 1202 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; |
1204 | armv7pmu.name = "ARMv7 Cortex-A9"; | 1203 | armv7pmu.name = "ARMv7 Cortex-A9"; |
1205 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; | 1204 | armv7pmu.map_event = armv7_a9_map_event; |
1206 | armv7pmu.event_map = &armv7_a9_perf_map; | ||
1207 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1205 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1208 | return &armv7pmu; | 1206 | return &armv7pmu; |
1209 | } | 1207 | } |
1210 | 1208 | ||
1211 | static const struct arm_pmu *__init armv7_a5_pmu_init(void) | 1209 | static struct arm_pmu *__init armv7_a5_pmu_init(void) |
1212 | { | 1210 | { |
1213 | armv7pmu.id = ARM_PERF_PMU_ID_CA5; | 1211 | armv7pmu.id = ARM_PERF_PMU_ID_CA5; |
1214 | armv7pmu.name = "ARMv7 Cortex-A5"; | 1212 | armv7pmu.name = "ARMv7 Cortex-A5"; |
1215 | armv7pmu.cache_map = &armv7_a5_perf_cache_map; | 1213 | armv7pmu.map_event = armv7_a5_map_event; |
1216 | armv7pmu.event_map = &armv7_a5_perf_map; | ||
1217 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1214 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1218 | return &armv7pmu; | 1215 | return &armv7pmu; |
1219 | } | 1216 | } |
1220 | 1217 | ||
1221 | static const struct arm_pmu *__init armv7_a15_pmu_init(void) | 1218 | static struct arm_pmu *__init armv7_a15_pmu_init(void) |
1222 | { | 1219 | { |
1223 | armv7pmu.id = ARM_PERF_PMU_ID_CA15; | 1220 | armv7pmu.id = ARM_PERF_PMU_ID_CA15; |
1224 | armv7pmu.name = "ARMv7 Cortex-A15"; | 1221 | armv7pmu.name = "ARMv7 Cortex-A15"; |
1225 | armv7pmu.cache_map = &armv7_a15_perf_cache_map; | 1222 | armv7pmu.map_event = armv7_a15_map_event; |
1226 | armv7pmu.event_map = &armv7_a15_perf_map; | ||
1227 | armv7pmu.num_events = armv7_read_num_pmnc_events(); | 1223 | armv7pmu.num_events = armv7_read_num_pmnc_events(); |
1224 | armv7pmu.set_event_filter = armv7pmu_set_event_filter; | ||
1228 | return &armv7pmu; | 1225 | return &armv7pmu; |
1229 | } | 1226 | } |
1230 | #else | 1227 | #else |
1231 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) | 1228 | static struct arm_pmu *__init armv7_a8_pmu_init(void) |
1232 | { | 1229 | { |
1233 | return NULL; | 1230 | return NULL; |
1234 | } | 1231 | } |
1235 | 1232 | ||
1236 | static const struct arm_pmu *__init armv7_a9_pmu_init(void) | 1233 | static struct arm_pmu *__init armv7_a9_pmu_init(void) |
1237 | { | 1234 | { |
1238 | return NULL; | 1235 | return NULL; |
1239 | } | 1236 | } |
1240 | 1237 | ||
1241 | static const struct arm_pmu *__init armv7_a5_pmu_init(void) | 1238 | static struct arm_pmu *__init armv7_a5_pmu_init(void) |
1242 | { | 1239 | { |
1243 | return NULL; | 1240 | return NULL; |
1244 | } | 1241 | } |
1245 | 1242 | ||
1246 | static const struct arm_pmu *__init armv7_a15_pmu_init(void) | 1243 | static struct arm_pmu *__init armv7_a15_pmu_init(void) |
1247 | { | 1244 | { |
1248 | return NULL; | 1245 | return NULL; |
1249 | } | 1246 | } |
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c index 3c4397491d08..e0cca10a8411 100644 --- a/arch/arm/kernel/perf_event_xscale.c +++ b/arch/arm/kernel/perf_event_xscale.c | |||
@@ -40,7 +40,7 @@ enum xscale_perf_types { | |||
40 | }; | 40 | }; |
41 | 41 | ||
42 | enum xscale_counters { | 42 | enum xscale_counters { |
43 | XSCALE_CYCLE_COUNTER = 1, | 43 | XSCALE_CYCLE_COUNTER = 0, |
44 | XSCALE_COUNTER0, | 44 | XSCALE_COUNTER0, |
45 | XSCALE_COUNTER1, | 45 | XSCALE_COUNTER1, |
46 | XSCALE_COUNTER2, | 46 | XSCALE_COUNTER2, |
@@ -222,7 +222,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
222 | { | 222 | { |
223 | unsigned long pmnc; | 223 | unsigned long pmnc; |
224 | struct perf_sample_data data; | 224 | struct perf_sample_data data; |
225 | struct cpu_hw_events *cpuc; | 225 | struct pmu_hw_events *cpuc; |
226 | struct pt_regs *regs; | 226 | struct pt_regs *regs; |
227 | int idx; | 227 | int idx; |
228 | 228 | ||
@@ -249,13 +249,10 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
249 | perf_sample_data_init(&data, 0); | 249 | perf_sample_data_init(&data, 0); |
250 | 250 | ||
251 | cpuc = &__get_cpu_var(cpu_hw_events); | 251 | cpuc = &__get_cpu_var(cpu_hw_events); |
252 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 252 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
253 | struct perf_event *event = cpuc->events[idx]; | 253 | struct perf_event *event = cpuc->events[idx]; |
254 | struct hw_perf_event *hwc; | 254 | struct hw_perf_event *hwc; |
255 | 255 | ||
256 | if (!test_bit(idx, cpuc->active_mask)) | ||
257 | continue; | ||
258 | |||
259 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) | 256 | if (!xscale1_pmnc_counter_has_overflowed(pmnc, idx)) |
260 | continue; | 257 | continue; |
261 | 258 | ||
@@ -266,7 +263,7 @@ xscale1pmu_handle_irq(int irq_num, void *dev) | |||
266 | continue; | 263 | continue; |
267 | 264 | ||
268 | if (perf_event_overflow(event, &data, regs)) | 265 | if (perf_event_overflow(event, &data, regs)) |
269 | armpmu->disable(hwc, idx); | 266 | cpu_pmu->disable(hwc, idx); |
270 | } | 267 | } |
271 | 268 | ||
272 | irq_work_run(); | 269 | irq_work_run(); |
@@ -284,6 +281,7 @@ static void | |||
284 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | 281 | xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) |
285 | { | 282 | { |
286 | unsigned long val, mask, evt, flags; | 283 | unsigned long val, mask, evt, flags; |
284 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
287 | 285 | ||
288 | switch (idx) { | 286 | switch (idx) { |
289 | case XSCALE_CYCLE_COUNTER: | 287 | case XSCALE_CYCLE_COUNTER: |
@@ -305,18 +303,19 @@ xscale1pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
305 | return; | 303 | return; |
306 | } | 304 | } |
307 | 305 | ||
308 | raw_spin_lock_irqsave(&pmu_lock, flags); | 306 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
309 | val = xscale1pmu_read_pmnc(); | 307 | val = xscale1pmu_read_pmnc(); |
310 | val &= ~mask; | 308 | val &= ~mask; |
311 | val |= evt; | 309 | val |= evt; |
312 | xscale1pmu_write_pmnc(val); | 310 | xscale1pmu_write_pmnc(val); |
313 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 311 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
314 | } | 312 | } |
315 | 313 | ||
316 | static void | 314 | static void |
317 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | 315 | xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) |
318 | { | 316 | { |
319 | unsigned long val, mask, evt, flags; | 317 | unsigned long val, mask, evt, flags; |
318 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
320 | 319 | ||
321 | switch (idx) { | 320 | switch (idx) { |
322 | case XSCALE_CYCLE_COUNTER: | 321 | case XSCALE_CYCLE_COUNTER: |
@@ -336,16 +335,16 @@ xscale1pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
336 | return; | 335 | return; |
337 | } | 336 | } |
338 | 337 | ||
339 | raw_spin_lock_irqsave(&pmu_lock, flags); | 338 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
340 | val = xscale1pmu_read_pmnc(); | 339 | val = xscale1pmu_read_pmnc(); |
341 | val &= ~mask; | 340 | val &= ~mask; |
342 | val |= evt; | 341 | val |= evt; |
343 | xscale1pmu_write_pmnc(val); | 342 | xscale1pmu_write_pmnc(val); |
344 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 343 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
345 | } | 344 | } |
346 | 345 | ||
347 | static int | 346 | static int |
348 | xscale1pmu_get_event_idx(struct cpu_hw_events *cpuc, | 347 | xscale1pmu_get_event_idx(struct pmu_hw_events *cpuc, |
349 | struct hw_perf_event *event) | 348 | struct hw_perf_event *event) |
350 | { | 349 | { |
351 | if (XSCALE_PERFCTR_CCNT == event->config_base) { | 350 | if (XSCALE_PERFCTR_CCNT == event->config_base) { |
@@ -368,24 +367,26 @@ static void | |||
368 | xscale1pmu_start(void) | 367 | xscale1pmu_start(void) |
369 | { | 368 | { |
370 | unsigned long flags, val; | 369 | unsigned long flags, val; |
370 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
371 | 371 | ||
372 | raw_spin_lock_irqsave(&pmu_lock, flags); | 372 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
373 | val = xscale1pmu_read_pmnc(); | 373 | val = xscale1pmu_read_pmnc(); |
374 | val |= XSCALE_PMU_ENABLE; | 374 | val |= XSCALE_PMU_ENABLE; |
375 | xscale1pmu_write_pmnc(val); | 375 | xscale1pmu_write_pmnc(val); |
376 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 376 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
377 | } | 377 | } |
378 | 378 | ||
379 | static void | 379 | static void |
380 | xscale1pmu_stop(void) | 380 | xscale1pmu_stop(void) |
381 | { | 381 | { |
382 | unsigned long flags, val; | 382 | unsigned long flags, val; |
383 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
383 | 384 | ||
384 | raw_spin_lock_irqsave(&pmu_lock, flags); | 385 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
385 | val = xscale1pmu_read_pmnc(); | 386 | val = xscale1pmu_read_pmnc(); |
386 | val &= ~XSCALE_PMU_ENABLE; | 387 | val &= ~XSCALE_PMU_ENABLE; |
387 | xscale1pmu_write_pmnc(val); | 388 | xscale1pmu_write_pmnc(val); |
388 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 389 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
389 | } | 390 | } |
390 | 391 | ||
391 | static inline u32 | 392 | static inline u32 |
@@ -424,7 +425,13 @@ xscale1pmu_write_counter(int counter, u32 val) | |||
424 | } | 425 | } |
425 | } | 426 | } |
426 | 427 | ||
427 | static const struct arm_pmu xscale1pmu = { | 428 | static int xscale_map_event(struct perf_event *event) |
429 | { | ||
430 | return map_cpu_event(event, &xscale_perf_map, | ||
431 | &xscale_perf_cache_map, 0xFF); | ||
432 | } | ||
433 | |||
434 | static struct arm_pmu xscale1pmu = { | ||
428 | .id = ARM_PERF_PMU_ID_XSCALE1, | 435 | .id = ARM_PERF_PMU_ID_XSCALE1, |
429 | .name = "xscale1", | 436 | .name = "xscale1", |
430 | .handle_irq = xscale1pmu_handle_irq, | 437 | .handle_irq = xscale1pmu_handle_irq, |
@@ -435,14 +442,12 @@ static const struct arm_pmu xscale1pmu = { | |||
435 | .get_event_idx = xscale1pmu_get_event_idx, | 442 | .get_event_idx = xscale1pmu_get_event_idx, |
436 | .start = xscale1pmu_start, | 443 | .start = xscale1pmu_start, |
437 | .stop = xscale1pmu_stop, | 444 | .stop = xscale1pmu_stop, |
438 | .cache_map = &xscale_perf_cache_map, | 445 | .map_event = xscale_map_event, |
439 | .event_map = &xscale_perf_map, | ||
440 | .raw_event_mask = 0xFF, | ||
441 | .num_events = 3, | 446 | .num_events = 3, |
442 | .max_period = (1LLU << 32) - 1, | 447 | .max_period = (1LLU << 32) - 1, |
443 | }; | 448 | }; |
444 | 449 | ||
445 | static const struct arm_pmu *__init xscale1pmu_init(void) | 450 | static struct arm_pmu *__init xscale1pmu_init(void) |
446 | { | 451 | { |
447 | return &xscale1pmu; | 452 | return &xscale1pmu; |
448 | } | 453 | } |
@@ -560,7 +565,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
560 | { | 565 | { |
561 | unsigned long pmnc, of_flags; | 566 | unsigned long pmnc, of_flags; |
562 | struct perf_sample_data data; | 567 | struct perf_sample_data data; |
563 | struct cpu_hw_events *cpuc; | 568 | struct pmu_hw_events *cpuc; |
564 | struct pt_regs *regs; | 569 | struct pt_regs *regs; |
565 | int idx; | 570 | int idx; |
566 | 571 | ||
@@ -581,13 +586,10 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
581 | perf_sample_data_init(&data, 0); | 586 | perf_sample_data_init(&data, 0); |
582 | 587 | ||
583 | cpuc = &__get_cpu_var(cpu_hw_events); | 588 | cpuc = &__get_cpu_var(cpu_hw_events); |
584 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | 589 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
585 | struct perf_event *event = cpuc->events[idx]; | 590 | struct perf_event *event = cpuc->events[idx]; |
586 | struct hw_perf_event *hwc; | 591 | struct hw_perf_event *hwc; |
587 | 592 | ||
588 | if (!test_bit(idx, cpuc->active_mask)) | ||
589 | continue; | ||
590 | |||
591 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) | 593 | if (!xscale2_pmnc_counter_has_overflowed(pmnc, idx)) |
592 | continue; | 594 | continue; |
593 | 595 | ||
@@ -598,7 +600,7 @@ xscale2pmu_handle_irq(int irq_num, void *dev) | |||
598 | continue; | 600 | continue; |
599 | 601 | ||
600 | if (perf_event_overflow(event, &data, regs)) | 602 | if (perf_event_overflow(event, &data, regs)) |
601 | armpmu->disable(hwc, idx); | 603 | cpu_pmu->disable(hwc, idx); |
602 | } | 604 | } |
603 | 605 | ||
604 | irq_work_run(); | 606 | irq_work_run(); |
@@ -616,6 +618,7 @@ static void | |||
616 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | 618 | xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) |
617 | { | 619 | { |
618 | unsigned long flags, ien, evtsel; | 620 | unsigned long flags, ien, evtsel; |
621 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
619 | 622 | ||
620 | ien = xscale2pmu_read_int_enable(); | 623 | ien = xscale2pmu_read_int_enable(); |
621 | evtsel = xscale2pmu_read_event_select(); | 624 | evtsel = xscale2pmu_read_event_select(); |
@@ -649,16 +652,17 @@ xscale2pmu_enable_event(struct hw_perf_event *hwc, int idx) | |||
649 | return; | 652 | return; |
650 | } | 653 | } |
651 | 654 | ||
652 | raw_spin_lock_irqsave(&pmu_lock, flags); | 655 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
653 | xscale2pmu_write_event_select(evtsel); | 656 | xscale2pmu_write_event_select(evtsel); |
654 | xscale2pmu_write_int_enable(ien); | 657 | xscale2pmu_write_int_enable(ien); |
655 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 658 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
656 | } | 659 | } |
657 | 660 | ||
658 | static void | 661 | static void |
659 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | 662 | xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) |
660 | { | 663 | { |
661 | unsigned long flags, ien, evtsel; | 664 | unsigned long flags, ien, evtsel; |
665 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
662 | 666 | ||
663 | ien = xscale2pmu_read_int_enable(); | 667 | ien = xscale2pmu_read_int_enable(); |
664 | evtsel = xscale2pmu_read_event_select(); | 668 | evtsel = xscale2pmu_read_event_select(); |
@@ -692,14 +696,14 @@ xscale2pmu_disable_event(struct hw_perf_event *hwc, int idx) | |||
692 | return; | 696 | return; |
693 | } | 697 | } |
694 | 698 | ||
695 | raw_spin_lock_irqsave(&pmu_lock, flags); | 699 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
696 | xscale2pmu_write_event_select(evtsel); | 700 | xscale2pmu_write_event_select(evtsel); |
697 | xscale2pmu_write_int_enable(ien); | 701 | xscale2pmu_write_int_enable(ien); |
698 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 702 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
699 | } | 703 | } |
700 | 704 | ||
701 | static int | 705 | static int |
702 | xscale2pmu_get_event_idx(struct cpu_hw_events *cpuc, | 706 | xscale2pmu_get_event_idx(struct pmu_hw_events *cpuc, |
703 | struct hw_perf_event *event) | 707 | struct hw_perf_event *event) |
704 | { | 708 | { |
705 | int idx = xscale1pmu_get_event_idx(cpuc, event); | 709 | int idx = xscale1pmu_get_event_idx(cpuc, event); |
@@ -718,24 +722,26 @@ static void | |||
718 | xscale2pmu_start(void) | 722 | xscale2pmu_start(void) |
719 | { | 723 | { |
720 | unsigned long flags, val; | 724 | unsigned long flags, val; |
725 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
721 | 726 | ||
722 | raw_spin_lock_irqsave(&pmu_lock, flags); | 727 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
723 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; | 728 | val = xscale2pmu_read_pmnc() & ~XSCALE_PMU_CNT64; |
724 | val |= XSCALE_PMU_ENABLE; | 729 | val |= XSCALE_PMU_ENABLE; |
725 | xscale2pmu_write_pmnc(val); | 730 | xscale2pmu_write_pmnc(val); |
726 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 731 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
727 | } | 732 | } |
728 | 733 | ||
729 | static void | 734 | static void |
730 | xscale2pmu_stop(void) | 735 | xscale2pmu_stop(void) |
731 | { | 736 | { |
732 | unsigned long flags, val; | 737 | unsigned long flags, val; |
738 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); | ||
733 | 739 | ||
734 | raw_spin_lock_irqsave(&pmu_lock, flags); | 740 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
735 | val = xscale2pmu_read_pmnc(); | 741 | val = xscale2pmu_read_pmnc(); |
736 | val &= ~XSCALE_PMU_ENABLE; | 742 | val &= ~XSCALE_PMU_ENABLE; |
737 | xscale2pmu_write_pmnc(val); | 743 | xscale2pmu_write_pmnc(val); |
738 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | 744 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
739 | } | 745 | } |
740 | 746 | ||
741 | static inline u32 | 747 | static inline u32 |
@@ -786,7 +792,7 @@ xscale2pmu_write_counter(int counter, u32 val) | |||
786 | } | 792 | } |
787 | } | 793 | } |
788 | 794 | ||
789 | static const struct arm_pmu xscale2pmu = { | 795 | static struct arm_pmu xscale2pmu = { |
790 | .id = ARM_PERF_PMU_ID_XSCALE2, | 796 | .id = ARM_PERF_PMU_ID_XSCALE2, |
791 | .name = "xscale2", | 797 | .name = "xscale2", |
792 | .handle_irq = xscale2pmu_handle_irq, | 798 | .handle_irq = xscale2pmu_handle_irq, |
@@ -797,24 +803,22 @@ static const struct arm_pmu xscale2pmu = { | |||
797 | .get_event_idx = xscale2pmu_get_event_idx, | 803 | .get_event_idx = xscale2pmu_get_event_idx, |
798 | .start = xscale2pmu_start, | 804 | .start = xscale2pmu_start, |
799 | .stop = xscale2pmu_stop, | 805 | .stop = xscale2pmu_stop, |
800 | .cache_map = &xscale_perf_cache_map, | 806 | .map_event = xscale_map_event, |
801 | .event_map = &xscale_perf_map, | ||
802 | .raw_event_mask = 0xFF, | ||
803 | .num_events = 5, | 807 | .num_events = 5, |
804 | .max_period = (1LLU << 32) - 1, | 808 | .max_period = (1LLU << 32) - 1, |
805 | }; | 809 | }; |
806 | 810 | ||
807 | static const struct arm_pmu *__init xscale2pmu_init(void) | 811 | static struct arm_pmu *__init xscale2pmu_init(void) |
808 | { | 812 | { |
809 | return &xscale2pmu; | 813 | return &xscale2pmu; |
810 | } | 814 | } |
811 | #else | 815 | #else |
812 | static const struct arm_pmu *__init xscale1pmu_init(void) | 816 | static struct arm_pmu *__init xscale1pmu_init(void) |
813 | { | 817 | { |
814 | return NULL; | 818 | return NULL; |
815 | } | 819 | } |
816 | 820 | ||
817 | static const struct arm_pmu *__init xscale2pmu_init(void) | 821 | static struct arm_pmu *__init xscale2pmu_init(void) |
818 | { | 822 | { |
819 | return NULL; | 823 | return NULL; |
820 | } | 824 | } |
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c index c53474fe84df..2c3407ee8576 100644 --- a/arch/arm/kernel/pmu.c +++ b/arch/arm/kernel/pmu.c | |||
@@ -10,192 +10,26 @@ | |||
10 | * | 10 | * |
11 | */ | 11 | */ |
12 | 12 | ||
13 | #define pr_fmt(fmt) "PMU: " fmt | ||
14 | |||
15 | #include <linux/cpumask.h> | ||
16 | #include <linux/err.h> | 13 | #include <linux/err.h> |
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/kernel.h> | 14 | #include <linux/kernel.h> |
19 | #include <linux/module.h> | 15 | #include <linux/module.h> |
20 | #include <linux/of_device.h> | ||
21 | #include <linux/platform_device.h> | ||
22 | 16 | ||
23 | #include <asm/pmu.h> | 17 | #include <asm/pmu.h> |
24 | 18 | ||
25 | static volatile long pmu_lock; | 19 | /* |
26 | 20 | * PMU locking to ensure mutual exclusion between different subsystems. | |
27 | static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; | 21 | */ |
28 | 22 | static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)]; | |
29 | static int __devinit pmu_register(struct platform_device *pdev, | ||
30 | enum arm_pmu_type type) | ||
31 | { | ||
32 | if (type < 0 || type >= ARM_NUM_PMU_DEVICES) { | ||
33 | pr_warning("received registration request for unknown " | ||
34 | "PMU device type %d\n", type); | ||
35 | return -EINVAL; | ||
36 | } | ||
37 | |||
38 | if (pmu_devices[type]) { | ||
39 | pr_warning("rejecting duplicate registration of PMU device " | ||
40 | "type %d.", type); | ||
41 | return -ENOSPC; | ||
42 | } | ||
43 | |||
44 | pr_info("registered new PMU device of type %d\n", type); | ||
45 | pmu_devices[type] = pdev; | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | #define OF_MATCH_PMU(_name, _type) { \ | ||
50 | .compatible = _name, \ | ||
51 | .data = (void *)_type, \ | ||
52 | } | ||
53 | |||
54 | #define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU) | ||
55 | |||
56 | static struct of_device_id armpmu_of_device_ids[] = { | ||
57 | OF_MATCH_CPU("arm,cortex-a9-pmu"), | ||
58 | OF_MATCH_CPU("arm,cortex-a8-pmu"), | ||
59 | OF_MATCH_CPU("arm,arm1136-pmu"), | ||
60 | OF_MATCH_CPU("arm,arm1176-pmu"), | ||
61 | {}, | ||
62 | }; | ||
63 | |||
64 | #define PLAT_MATCH_PMU(_name, _type) { \ | ||
65 | .name = _name, \ | ||
66 | .driver_data = _type, \ | ||
67 | } | ||
68 | |||
69 | #define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU) | ||
70 | |||
71 | static struct platform_device_id armpmu_plat_device_ids[] = { | ||
72 | PLAT_MATCH_CPU("arm-pmu"), | ||
73 | {}, | ||
74 | }; | ||
75 | |||
76 | enum arm_pmu_type armpmu_device_type(struct platform_device *pdev) | ||
77 | { | ||
78 | const struct of_device_id *of_id; | ||
79 | const struct platform_device_id *pdev_id; | ||
80 | |||
81 | /* provided by of_device_id table */ | ||
82 | if (pdev->dev.of_node) { | ||
83 | of_id = of_match_device(armpmu_of_device_ids, &pdev->dev); | ||
84 | BUG_ON(!of_id); | ||
85 | return (enum arm_pmu_type)of_id->data; | ||
86 | } | ||
87 | |||
88 | /* Provided by platform_device_id table */ | ||
89 | pdev_id = platform_get_device_id(pdev); | ||
90 | BUG_ON(!pdev_id); | ||
91 | return pdev_id->driver_data; | ||
92 | } | ||
93 | |||
94 | static int __devinit armpmu_device_probe(struct platform_device *pdev) | ||
95 | { | ||
96 | return pmu_register(pdev, armpmu_device_type(pdev)); | ||
97 | } | ||
98 | |||
99 | static struct platform_driver armpmu_driver = { | ||
100 | .driver = { | ||
101 | .name = "arm-pmu", | ||
102 | .of_match_table = armpmu_of_device_ids, | ||
103 | }, | ||
104 | .probe = armpmu_device_probe, | ||
105 | .id_table = armpmu_plat_device_ids, | ||
106 | }; | ||
107 | |||
108 | static int __init register_pmu_driver(void) | ||
109 | { | ||
110 | return platform_driver_register(&armpmu_driver); | ||
111 | } | ||
112 | device_initcall(register_pmu_driver); | ||
113 | 23 | ||
114 | struct platform_device * | 24 | int |
115 | reserve_pmu(enum arm_pmu_type type) | 25 | reserve_pmu(enum arm_pmu_type type) |
116 | { | 26 | { |
117 | struct platform_device *pdev; | 27 | return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0; |
118 | |||
119 | if (test_and_set_bit_lock(type, &pmu_lock)) { | ||
120 | pdev = ERR_PTR(-EBUSY); | ||
121 | } else if (pmu_devices[type] == NULL) { | ||
122 | clear_bit_unlock(type, &pmu_lock); | ||
123 | pdev = ERR_PTR(-ENODEV); | ||
124 | } else { | ||
125 | pdev = pmu_devices[type]; | ||
126 | } | ||
127 | |||
128 | return pdev; | ||
129 | } | 28 | } |
130 | EXPORT_SYMBOL_GPL(reserve_pmu); | 29 | EXPORT_SYMBOL_GPL(reserve_pmu); |
131 | 30 | ||
132 | int | 31 | void |
133 | release_pmu(enum arm_pmu_type type) | 32 | release_pmu(enum arm_pmu_type type) |
134 | { | 33 | { |
135 | if (WARN_ON(!pmu_devices[type])) | 34 | clear_bit_unlock(type, pmu_lock); |
136 | return -EINVAL; | ||
137 | clear_bit_unlock(type, &pmu_lock); | ||
138 | return 0; | ||
139 | } | ||
140 | EXPORT_SYMBOL_GPL(release_pmu); | ||
141 | |||
142 | static int | ||
143 | set_irq_affinity(int irq, | ||
144 | unsigned int cpu) | ||
145 | { | ||
146 | #ifdef CONFIG_SMP | ||
147 | int err = irq_set_affinity(irq, cpumask_of(cpu)); | ||
148 | if (err) | ||
149 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
150 | irq, cpu); | ||
151 | return err; | ||
152 | #else | ||
153 | return -EINVAL; | ||
154 | #endif | ||
155 | } | ||
156 | |||
157 | static int | ||
158 | init_cpu_pmu(void) | ||
159 | { | ||
160 | int i, irqs, err = 0; | ||
161 | struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU]; | ||
162 | |||
163 | if (!pdev) | ||
164 | return -ENODEV; | ||
165 | |||
166 | irqs = pdev->num_resources; | ||
167 | |||
168 | /* | ||
169 | * If we have a single PMU interrupt that we can't shift, assume that | ||
170 | * we're running on a uniprocessor machine and continue. | ||
171 | */ | ||
172 | if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0))) | ||
173 | return 0; | ||
174 | |||
175 | for (i = 0; i < irqs; ++i) { | ||
176 | err = set_irq_affinity(platform_get_irq(pdev, i), i); | ||
177 | if (err) | ||
178 | break; | ||
179 | } | ||
180 | |||
181 | return err; | ||
182 | } | ||
183 | |||
184 | int | ||
185 | init_pmu(enum arm_pmu_type type) | ||
186 | { | ||
187 | int err = 0; | ||
188 | |||
189 | switch (type) { | ||
190 | case ARM_PMU_DEVICE_CPU: | ||
191 | err = init_cpu_pmu(); | ||
192 | break; | ||
193 | default: | ||
194 | pr_warning("attempt to initialise PMU of unknown " | ||
195 | "type %d\n", type); | ||
196 | err = -EINVAL; | ||
197 | } | ||
198 | |||
199 | return err; | ||
200 | } | 35 | } |
201 | EXPORT_SYMBOL_GPL(init_pmu); | ||