diff options
author | Punit Agrawal <punit.agrawal@arm.com> | 2013-08-22 09:41:51 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2013-09-17 07:02:16 -0400 |
commit | b91c8f284acc2cb2aa43a1ce58322573ad983a14 (patch) | |
tree | 5697906c0add1e6e2631dc14e22cdaf05f06b307 /drivers/bus/arm-cci.c | |
parent | 83bc10a2754bf9f2e373fb884dd63ac061453187 (diff) |
drivers: CCI: add ARM CCI PMU support
Extend the existing CCI driver to support the PMU by registering a perf
backend for it.
Cc: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
Cc: Nicolas Pitre <nico@linaro.org>
Cc: Dave Martin <dave.martin@linaro.org>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Signed-off-by: Punit Agrawal <punit.agrawal@arm.com>
[will: removed broken __init annotations]
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/bus/arm-cci.c')
-rw-r--r-- | drivers/bus/arm-cci.c | 636 |
1 files changed, 632 insertions, 4 deletions
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c index 200926699778..dc6528e8b8fb 100644 --- a/drivers/bus/arm-cci.c +++ b/drivers/bus/arm-cci.c | |||
@@ -18,11 +18,21 @@ | |||
18 | #include <linux/io.h> | 18 | #include <linux/io.h> |
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/of_address.h> | 20 | #include <linux/of_address.h> |
21 | #include <linux/of_irq.h> | ||
22 | #include <linux/of_platform.h> | ||
23 | #include <linux/platform_device.h> | ||
21 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/spinlock.h> | ||
22 | 26 | ||
23 | #include <asm/cacheflush.h> | 27 | #include <asm/cacheflush.h> |
28 | #include <asm/irq_regs.h> | ||
29 | #include <asm/pmu.h> | ||
24 | #include <asm/smp_plat.h> | 30 | #include <asm/smp_plat.h> |
25 | 31 | ||
32 | #define DRIVER_NAME "CCI-400" | ||
33 | #define DRIVER_NAME_PMU DRIVER_NAME " PMU" | ||
34 | #define PMU_NAME "CCI_400" | ||
35 | |||
26 | #define CCI_PORT_CTRL 0x0 | 36 | #define CCI_PORT_CTRL 0x0 |
27 | #define CCI_CTRL_STATUS 0xc | 37 | #define CCI_CTRL_STATUS 0xc |
28 | 38 | ||
@@ -54,6 +64,587 @@ static unsigned int nb_cci_ports; | |||
54 | static void __iomem *cci_ctrl_base; | 64 | static void __iomem *cci_ctrl_base; |
55 | static unsigned long cci_ctrl_phys; | 65 | static unsigned long cci_ctrl_phys; |
56 | 66 | ||
67 | #ifdef CONFIG_HW_PERF_EVENTS | ||
68 | |||
69 | #define CCI_PMCR 0x0100 | ||
70 | #define CCI_PID2 0x0fe8 | ||
71 | |||
72 | #define CCI_PMCR_CEN 0x00000001 | ||
73 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | ||
74 | #define CCI_PMCR_NCNT_SHIFT 11 | ||
75 | |||
76 | #define CCI_PID2_REV_MASK 0xf0 | ||
77 | #define CCI_PID2_REV_SHIFT 4 | ||
78 | |||
79 | /* Port ids */ | ||
80 | #define CCI_PORT_S0 0 | ||
81 | #define CCI_PORT_S1 1 | ||
82 | #define CCI_PORT_S2 2 | ||
83 | #define CCI_PORT_S3 3 | ||
84 | #define CCI_PORT_S4 4 | ||
85 | #define CCI_PORT_M0 5 | ||
86 | #define CCI_PORT_M1 6 | ||
87 | #define CCI_PORT_M2 7 | ||
88 | |||
89 | #define CCI_REV_R0 0 | ||
90 | #define CCI_REV_R1 1 | ||
91 | #define CCI_REV_R0_P4 4 | ||
92 | #define CCI_REV_R1_P2 6 | ||
93 | |||
94 | #define CCI_PMU_EVT_SEL 0x000 | ||
95 | #define CCI_PMU_CNTR 0x004 | ||
96 | #define CCI_PMU_CNTR_CTRL 0x008 | ||
97 | #define CCI_PMU_OVRFLW 0x00c | ||
98 | |||
99 | #define CCI_PMU_OVRFLW_FLAG 1 | ||
100 | |||
101 | #define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K) | ||
102 | |||
103 | /* | ||
104 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | ||
105 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | ||
106 | * make use of this event in hardware. | ||
107 | */ | ||
108 | enum cci400_perf_events { | ||
109 | CCI_PMU_CYCLES = 0xff | ||
110 | }; | ||
111 | |||
112 | #define CCI_PMU_EVENT_MASK 0xff | ||
113 | #define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7) | ||
114 | #define CCI_PMU_EVENT_CODE(event) (event & 0x1f) | ||
115 | |||
116 | #define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */ | ||
117 | |||
118 | #define CCI_PMU_CYCLE_CNTR_IDX 0 | ||
119 | #define CCI_PMU_CNTR0_IDX 1 | ||
120 | #define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1) | ||
121 | |||
122 | /* | ||
123 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | ||
124 | * ports and bits 4:0 are event codes. There are different event codes | ||
125 | * associated with each port type. | ||
126 | * | ||
127 | * Additionally, the range of events associated with the port types changed | ||
128 | * between Rev0 and Rev1. | ||
129 | * | ||
130 | * The constants below define the range of valid codes for each port type for | ||
131 | * the different revisions and are used to validate the event to be monitored. | ||
132 | */ | ||
133 | |||
134 | #define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00 | ||
135 | #define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13 | ||
136 | #define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14 | ||
137 | #define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a | ||
138 | |||
139 | #define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00 | ||
140 | #define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14 | ||
141 | #define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00 | ||
142 | #define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11 | ||
143 | |||
144 | struct pmu_port_event_ranges { | ||
145 | u8 slave_min; | ||
146 | u8 slave_max; | ||
147 | u8 master_min; | ||
148 | u8 master_max; | ||
149 | }; | ||
150 | |||
151 | static struct pmu_port_event_ranges port_event_range[] = { | ||
152 | [CCI_REV_R0] = { | ||
153 | .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV, | ||
154 | .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV, | ||
155 | .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV, | ||
156 | .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV, | ||
157 | }, | ||
158 | [CCI_REV_R1] = { | ||
159 | .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV, | ||
160 | .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV, | ||
161 | .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV, | ||
162 | .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV, | ||
163 | }, | ||
164 | }; | ||
165 | |||
166 | struct cci_pmu_drv_data { | ||
167 | void __iomem *base; | ||
168 | struct arm_pmu *cci_pmu; | ||
169 | int nr_irqs; | ||
170 | int irqs[CCI_PMU_MAX_HW_EVENTS]; | ||
171 | unsigned long active_irqs; | ||
172 | struct perf_event *events[CCI_PMU_MAX_HW_EVENTS]; | ||
173 | unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)]; | ||
174 | struct pmu_port_event_ranges *port_ranges; | ||
175 | struct pmu_hw_events hw_events; | ||
176 | }; | ||
177 | static struct cci_pmu_drv_data *pmu; | ||
178 | |||
179 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) | ||
180 | { | ||
181 | int i; | ||
182 | |||
183 | for (i = 0; i < nr_irqs; i++) | ||
184 | if (irq == irqs[i]) | ||
185 | return true; | ||
186 | |||
187 | return false; | ||
188 | } | ||
189 | |||
190 | static int probe_cci_revision(void) | ||
191 | { | ||
192 | int rev; | ||
193 | rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; | ||
194 | rev >>= CCI_PID2_REV_SHIFT; | ||
195 | |||
196 | if (rev <= CCI_REV_R0_P4) | ||
197 | return CCI_REV_R0; | ||
198 | else if (rev <= CCI_REV_R1_P2) | ||
199 | return CCI_REV_R1; | ||
200 | |||
201 | return -ENOENT; | ||
202 | } | ||
203 | |||
204 | static struct pmu_port_event_ranges *port_range_by_rev(void) | ||
205 | { | ||
206 | int rev = probe_cci_revision(); | ||
207 | |||
208 | if (rev < 0) | ||
209 | return NULL; | ||
210 | |||
211 | return &port_event_range[rev]; | ||
212 | } | ||
213 | |||
214 | static int pmu_is_valid_slave_event(u8 ev_code) | ||
215 | { | ||
216 | return pmu->port_ranges->slave_min <= ev_code && | ||
217 | ev_code <= pmu->port_ranges->slave_max; | ||
218 | } | ||
219 | |||
220 | static int pmu_is_valid_master_event(u8 ev_code) | ||
221 | { | ||
222 | return pmu->port_ranges->master_min <= ev_code && | ||
223 | ev_code <= pmu->port_ranges->master_max; | ||
224 | } | ||
225 | |||
226 | static int pmu_validate_hw_event(u8 hw_event) | ||
227 | { | ||
228 | u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event); | ||
229 | u8 ev_code = CCI_PMU_EVENT_CODE(hw_event); | ||
230 | |||
231 | switch (ev_source) { | ||
232 | case CCI_PORT_S0: | ||
233 | case CCI_PORT_S1: | ||
234 | case CCI_PORT_S2: | ||
235 | case CCI_PORT_S3: | ||
236 | case CCI_PORT_S4: | ||
237 | /* Slave Interface */ | ||
238 | if (pmu_is_valid_slave_event(ev_code)) | ||
239 | return hw_event; | ||
240 | break; | ||
241 | case CCI_PORT_M0: | ||
242 | case CCI_PORT_M1: | ||
243 | case CCI_PORT_M2: | ||
244 | /* Master Interface */ | ||
245 | if (pmu_is_valid_master_event(ev_code)) | ||
246 | return hw_event; | ||
247 | break; | ||
248 | } | ||
249 | |||
250 | return -ENOENT; | ||
251 | } | ||
252 | |||
253 | static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx) | ||
254 | { | ||
255 | return CCI_PMU_CYCLE_CNTR_IDX <= idx && | ||
256 | idx <= CCI_PMU_CNTR_LAST(cci_pmu); | ||
257 | } | ||
258 | |||
259 | static u32 pmu_read_register(int idx, unsigned int offset) | ||
260 | { | ||
261 | return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | ||
262 | } | ||
263 | |||
264 | static void pmu_write_register(u32 value, int idx, unsigned int offset) | ||
265 | { | ||
266 | return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset); | ||
267 | } | ||
268 | |||
269 | static void pmu_disable_counter(int idx) | ||
270 | { | ||
271 | pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL); | ||
272 | } | ||
273 | |||
274 | static void pmu_enable_counter(int idx) | ||
275 | { | ||
276 | pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL); | ||
277 | } | ||
278 | |||
279 | static void pmu_set_event(int idx, unsigned long event) | ||
280 | { | ||
281 | event &= CCI_PMU_EVENT_MASK; | ||
282 | pmu_write_register(event, idx, CCI_PMU_EVT_SEL); | ||
283 | } | ||
284 | |||
285 | static u32 pmu_get_max_counters(void) | ||
286 | { | ||
287 | u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) & | ||
288 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; | ||
289 | |||
290 | /* add 1 for cycle counter */ | ||
291 | return n_cnts + 1; | ||
292 | } | ||
293 | |||
294 | static struct pmu_hw_events *pmu_get_hw_events(void) | ||
295 | { | ||
296 | return &pmu->hw_events; | ||
297 | } | ||
298 | |||
299 | static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event) | ||
300 | { | ||
301 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
302 | struct hw_perf_event *hw_event = &event->hw; | ||
303 | unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK; | ||
304 | int idx; | ||
305 | |||
306 | if (cci_event == CCI_PMU_CYCLES) { | ||
307 | if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | ||
308 | return -EAGAIN; | ||
309 | |||
310 | return CCI_PMU_CYCLE_CNTR_IDX; | ||
311 | } | ||
312 | |||
313 | for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | ||
314 | if (!test_and_set_bit(idx, hw->used_mask)) | ||
315 | return idx; | ||
316 | |||
317 | /* No counters available */ | ||
318 | return -EAGAIN; | ||
319 | } | ||
320 | |||
321 | static int pmu_map_event(struct perf_event *event) | ||
322 | { | ||
323 | int mapping; | ||
324 | u8 config = event->attr.config & CCI_PMU_EVENT_MASK; | ||
325 | |||
326 | if (event->attr.type < PERF_TYPE_MAX) | ||
327 | return -ENOENT; | ||
328 | |||
329 | if (config == CCI_PMU_CYCLES) | ||
330 | mapping = config; | ||
331 | else | ||
332 | mapping = pmu_validate_hw_event(config); | ||
333 | |||
334 | return mapping; | ||
335 | } | ||
336 | |||
337 | static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler) | ||
338 | { | ||
339 | int i; | ||
340 | struct platform_device *pmu_device = cci_pmu->plat_device; | ||
341 | |||
342 | if (unlikely(!pmu_device)) | ||
343 | return -ENODEV; | ||
344 | |||
345 | if (pmu->nr_irqs < 1) { | ||
346 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | ||
347 | return -ENODEV; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * Register all available CCI PMU interrupts. In the interrupt handler | ||
352 | * we iterate over the counters checking for interrupt source (the | ||
353 | * overflowing counter) and clear it. | ||
354 | * | ||
355 | * This should allow handling of non-unique interrupt for the counters. | ||
356 | */ | ||
357 | for (i = 0; i < pmu->nr_irqs; i++) { | ||
358 | int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED, | ||
359 | "arm-cci-pmu", cci_pmu); | ||
360 | if (err) { | ||
361 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | ||
362 | pmu->irqs[i]); | ||
363 | return err; | ||
364 | } | ||
365 | |||
366 | set_bit(i, &pmu->active_irqs); | ||
367 | } | ||
368 | |||
369 | return 0; | ||
370 | } | ||
371 | |||
372 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | ||
373 | { | ||
374 | unsigned long flags; | ||
375 | struct arm_pmu *cci_pmu = (struct arm_pmu *)dev; | ||
376 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
377 | struct perf_sample_data data; | ||
378 | struct pt_regs *regs; | ||
379 | int idx, handled = IRQ_NONE; | ||
380 | |||
381 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
382 | regs = get_irq_regs(); | ||
383 | /* | ||
384 | * Iterate over counters and update the corresponding perf events. | ||
385 | * This should work regardless of whether we have per-counter overflow | ||
386 | * interrupt or a combined overflow interrupt. | ||
387 | */ | ||
388 | for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | ||
389 | struct perf_event *event = events->events[idx]; | ||
390 | struct hw_perf_event *hw_counter; | ||
391 | |||
392 | if (!event) | ||
393 | continue; | ||
394 | |||
395 | hw_counter = &event->hw; | ||
396 | |||
397 | /* Did this counter overflow? */ | ||
398 | if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG) | ||
399 | continue; | ||
400 | |||
401 | pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW); | ||
402 | |||
403 | handled = IRQ_HANDLED; | ||
404 | |||
405 | armpmu_event_update(event); | ||
406 | perf_sample_data_init(&data, 0, hw_counter->last_period); | ||
407 | if (!armpmu_event_set_period(event)) | ||
408 | continue; | ||
409 | |||
410 | if (perf_event_overflow(event, &data, regs)) | ||
411 | cci_pmu->disable(event); | ||
412 | } | ||
413 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
414 | |||
415 | return IRQ_RETVAL(handled); | ||
416 | } | ||
417 | |||
418 | static void pmu_free_irq(struct arm_pmu *cci_pmu) | ||
419 | { | ||
420 | int i; | ||
421 | |||
422 | for (i = 0; i < pmu->nr_irqs; i++) { | ||
423 | if (!test_and_clear_bit(i, &pmu->active_irqs)) | ||
424 | continue; | ||
425 | |||
426 | free_irq(pmu->irqs[i], cci_pmu); | ||
427 | } | ||
428 | } | ||
429 | |||
430 | static void pmu_enable_event(struct perf_event *event) | ||
431 | { | ||
432 | unsigned long flags; | ||
433 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
434 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
435 | struct hw_perf_event *hw_counter = &event->hw; | ||
436 | int idx = hw_counter->idx; | ||
437 | |||
438 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
439 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
440 | return; | ||
441 | } | ||
442 | |||
443 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
444 | |||
445 | /* Configure the event to count, unless you are counting cycles */ | ||
446 | if (idx != CCI_PMU_CYCLE_CNTR_IDX) | ||
447 | pmu_set_event(idx, hw_counter->config_base); | ||
448 | |||
449 | pmu_enable_counter(idx); | ||
450 | |||
451 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
452 | } | ||
453 | |||
454 | static void pmu_disable_event(struct perf_event *event) | ||
455 | { | ||
456 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
457 | struct hw_perf_event *hw_counter = &event->hw; | ||
458 | int idx = hw_counter->idx; | ||
459 | |||
460 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
461 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
462 | return; | ||
463 | } | ||
464 | |||
465 | pmu_disable_counter(idx); | ||
466 | } | ||
467 | |||
468 | static void pmu_start(struct arm_pmu *cci_pmu) | ||
469 | { | ||
470 | u32 val; | ||
471 | unsigned long flags; | ||
472 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
473 | |||
474 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
475 | |||
476 | /* Enable all the PMU counters. */ | ||
477 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; | ||
478 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
479 | |||
480 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
481 | } | ||
482 | |||
483 | static void pmu_stop(struct arm_pmu *cci_pmu) | ||
484 | { | ||
485 | u32 val; | ||
486 | unsigned long flags; | ||
487 | struct pmu_hw_events *events = cci_pmu->get_hw_events(); | ||
488 | |||
489 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
490 | |||
491 | /* Disable all the PMU counters. */ | ||
492 | val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; | ||
493 | writel(val, cci_ctrl_base + CCI_PMCR); | ||
494 | |||
495 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
496 | } | ||
497 | |||
498 | static u32 pmu_read_counter(struct perf_event *event) | ||
499 | { | ||
500 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
501 | struct hw_perf_event *hw_counter = &event->hw; | ||
502 | int idx = hw_counter->idx; | ||
503 | u32 value; | ||
504 | |||
505 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | ||
506 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
507 | return 0; | ||
508 | } | ||
509 | value = pmu_read_register(idx, CCI_PMU_CNTR); | ||
510 | |||
511 | return value; | ||
512 | } | ||
513 | |||
514 | static void pmu_write_counter(struct perf_event *event, u32 value) | ||
515 | { | ||
516 | struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu); | ||
517 | struct hw_perf_event *hw_counter = &event->hw; | ||
518 | int idx = hw_counter->idx; | ||
519 | |||
520 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) | ||
521 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | ||
522 | else | ||
523 | pmu_write_register(value, idx, CCI_PMU_CNTR); | ||
524 | } | ||
525 | |||
526 | static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev) | ||
527 | { | ||
528 | *cci_pmu = (struct arm_pmu){ | ||
529 | .name = PMU_NAME, | ||
530 | .max_period = (1LLU << 32) - 1, | ||
531 | .get_hw_events = pmu_get_hw_events, | ||
532 | .get_event_idx = pmu_get_event_idx, | ||
533 | .map_event = pmu_map_event, | ||
534 | .request_irq = pmu_request_irq, | ||
535 | .handle_irq = pmu_handle_irq, | ||
536 | .free_irq = pmu_free_irq, | ||
537 | .enable = pmu_enable_event, | ||
538 | .disable = pmu_disable_event, | ||
539 | .start = pmu_start, | ||
540 | .stop = pmu_stop, | ||
541 | .read_counter = pmu_read_counter, | ||
542 | .write_counter = pmu_write_counter, | ||
543 | }; | ||
544 | |||
545 | cci_pmu->plat_device = pdev; | ||
546 | cci_pmu->num_events = pmu_get_max_counters(); | ||
547 | |||
548 | return armpmu_register(cci_pmu, -1); | ||
549 | } | ||
550 | |||
551 | static const struct of_device_id arm_cci_pmu_matches[] = { | ||
552 | { | ||
553 | .compatible = "arm,cci-400-pmu", | ||
554 | }, | ||
555 | {}, | ||
556 | }; | ||
557 | |||
558 | static int cci_pmu_probe(struct platform_device *pdev) | ||
559 | { | ||
560 | struct resource *res; | ||
561 | int i, ret, irq; | ||
562 | |||
563 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | ||
564 | if (!pmu) | ||
565 | return -ENOMEM; | ||
566 | |||
567 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
568 | if (!res) { | ||
569 | dev_warn(&pdev->dev, "Failed to get mem resource\n"); | ||
570 | ret = -EINVAL; | ||
571 | goto memalloc_err; | ||
572 | }; | ||
573 | |||
574 | pmu->base = devm_ioremap_resource(&pdev->dev, res); | ||
575 | if (!pmu->base) { | ||
576 | dev_warn(&pdev->dev, "Failed to ioremap\n"); | ||
577 | ret = -ENOMEM; | ||
578 | goto memalloc_err; | ||
579 | } | ||
580 | |||
581 | /* | ||
582 | * CCI PMU has 5 overflow signals - one per counter; but some may be tied | ||
583 | * together to a common interrupt. | ||
584 | */ | ||
585 | pmu->nr_irqs = 0; | ||
586 | for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) { | ||
587 | irq = platform_get_irq(pdev, i); | ||
588 | if (irq < 0) | ||
589 | break; | ||
590 | |||
591 | if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs)) | ||
592 | continue; | ||
593 | |||
594 | pmu->irqs[pmu->nr_irqs++] = irq; | ||
595 | } | ||
596 | |||
597 | /* | ||
598 | * Ensure that the device tree has as many interrupts as the number | ||
599 | * of counters. | ||
600 | */ | ||
601 | if (i < CCI_PMU_MAX_HW_EVENTS) { | ||
602 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | ||
603 | i, CCI_PMU_MAX_HW_EVENTS); | ||
604 | ret = -EINVAL; | ||
605 | goto memalloc_err; | ||
606 | } | ||
607 | |||
608 | pmu->port_ranges = port_range_by_rev(); | ||
609 | if (!pmu->port_ranges) { | ||
610 | dev_warn(&pdev->dev, "CCI PMU version not supported\n"); | ||
611 | ret = -EINVAL; | ||
612 | goto memalloc_err; | ||
613 | } | ||
614 | |||
615 | pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL); | ||
616 | if (!pmu->cci_pmu) { | ||
617 | ret = -ENOMEM; | ||
618 | goto memalloc_err; | ||
619 | } | ||
620 | |||
621 | pmu->hw_events.events = pmu->events; | ||
622 | pmu->hw_events.used_mask = pmu->used_mask; | ||
623 | raw_spin_lock_init(&pmu->hw_events.pmu_lock); | ||
624 | |||
625 | ret = cci_pmu_init(pmu->cci_pmu, pdev); | ||
626 | if (ret) | ||
627 | goto pmuinit_err; | ||
628 | |||
629 | return 0; | ||
630 | |||
631 | pmuinit_err: | ||
632 | kfree(pmu->cci_pmu); | ||
633 | memalloc_err: | ||
634 | kfree(pmu); | ||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | static int cci_platform_probe(struct platform_device *pdev) | ||
639 | { | ||
640 | if (!cci_probed()) | ||
641 | return -ENODEV; | ||
642 | |||
643 | return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev); | ||
644 | } | ||
645 | |||
646 | #endif /* CONFIG_HW_PERF_EVENTS */ | ||
647 | |||
57 | struct cpu_port { | 648 | struct cpu_port { |
58 | u64 mpidr; | 649 | u64 mpidr; |
59 | u32 port; | 650 | u32 port; |
@@ -120,7 +711,7 @@ int cci_ace_get_port(struct device_node *dn) | |||
120 | } | 711 | } |
121 | EXPORT_SYMBOL_GPL(cci_ace_get_port); | 712 | EXPORT_SYMBOL_GPL(cci_ace_get_port); |
122 | 713 | ||
123 | static void __init cci_ace_init_ports(void) | 714 | static void cci_ace_init_ports(void) |
124 | { | 715 | { |
125 | int port, cpu; | 716 | int port, cpu; |
126 | struct device_node *cpun; | 717 | struct device_node *cpun; |
@@ -386,7 +977,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = { | |||
386 | {}, | 977 | {}, |
387 | }; | 978 | }; |
388 | 979 | ||
389 | static int __init cci_probe(void) | 980 | static int cci_probe(void) |
390 | { | 981 | { |
391 | struct cci_nb_ports const *cci_config; | 982 | struct cci_nb_ports const *cci_config; |
392 | int ret, i, nb_ace = 0, nb_ace_lite = 0; | 983 | int ret, i, nb_ace = 0, nb_ace_lite = 0; |
@@ -490,7 +1081,7 @@ memalloc_err: | |||
490 | static int cci_init_status = -EAGAIN; | 1081 | static int cci_init_status = -EAGAIN; |
491 | static DEFINE_MUTEX(cci_probing); | 1082 | static DEFINE_MUTEX(cci_probing); |
492 | 1083 | ||
493 | static int __init cci_init(void) | 1084 | static int cci_init(void) |
494 | { | 1085 | { |
495 | if (cci_init_status != -EAGAIN) | 1086 | if (cci_init_status != -EAGAIN) |
496 | return cci_init_status; | 1087 | return cci_init_status; |
@@ -502,18 +1093,55 @@ static int __init cci_init(void) | |||
502 | return cci_init_status; | 1093 | return cci_init_status; |
503 | } | 1094 | } |
504 | 1095 | ||
1096 | #ifdef CONFIG_HW_PERF_EVENTS | ||
1097 | static struct platform_driver cci_pmu_driver = { | ||
1098 | .driver = { | ||
1099 | .name = DRIVER_NAME_PMU, | ||
1100 | .of_match_table = arm_cci_pmu_matches, | ||
1101 | }, | ||
1102 | .probe = cci_pmu_probe, | ||
1103 | }; | ||
1104 | |||
1105 | static struct platform_driver cci_platform_driver = { | ||
1106 | .driver = { | ||
1107 | .name = DRIVER_NAME, | ||
1108 | .of_match_table = arm_cci_matches, | ||
1109 | }, | ||
1110 | .probe = cci_platform_probe, | ||
1111 | }; | ||
1112 | |||
1113 | static int __init cci_platform_init(void) | ||
1114 | { | ||
1115 | int ret; | ||
1116 | |||
1117 | ret = platform_driver_register(&cci_pmu_driver); | ||
1118 | if (ret) | ||
1119 | return ret; | ||
1120 | |||
1121 | return platform_driver_register(&cci_platform_driver); | ||
1122 | } | ||
1123 | |||
1124 | #else | ||
1125 | |||
1126 | static int __init cci_platform_init(void) | ||
1127 | { | ||
1128 | return 0; | ||
1129 | } | ||
1130 | |||
1131 | #endif | ||
505 | /* | 1132 | /* |
506 | * To sort out early init calls ordering a helper function is provided to | 1133 | * To sort out early init calls ordering a helper function is provided to |
507 | * check if the CCI driver has beed initialized. Function check if the driver | 1134 | * check if the CCI driver has beed initialized. Function check if the driver |
508 | * has been initialized, if not it calls the init function that probes | 1135 | * has been initialized, if not it calls the init function that probes |
509 | * the driver and updates the return value. | 1136 | * the driver and updates the return value. |
510 | */ | 1137 | */ |
511 | bool __init cci_probed(void) | 1138 | bool cci_probed(void) |
512 | { | 1139 | { |
513 | return cci_init() == 0; | 1140 | return cci_init() == 0; |
514 | } | 1141 | } |
515 | EXPORT_SYMBOL_GPL(cci_probed); | 1142 | EXPORT_SYMBOL_GPL(cci_probed); |
516 | 1143 | ||
517 | early_initcall(cci_init); | 1144 | early_initcall(cci_init); |
1145 | core_initcall(cci_platform_init); | ||
518 | MODULE_LICENSE("GPL"); | 1146 | MODULE_LICENSE("GPL"); |
519 | MODULE_DESCRIPTION("ARM CCI support"); | 1147 | MODULE_DESCRIPTION("ARM CCI support"); |