aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/bus
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 03:05:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-11-11 03:05:37 -0500
commit53575aa99dc1584484b99c8173042d8370f6ed88 (patch)
tree06fd13c8847c1e97cd5080ea31cec47c4ad54963 /drivers/bus
parentd5aabbcaee6bb5fb57ea8c67714516af4d8238ce (diff)
parent3316dee245ef297155fa45b8d14263dfd6a9164b (diff)
Merge tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc
Pull ARM driver updates from Olof Johansson: "Updates of SoC-near drivers and other driver updates that makes more sense to take through our tree. In this case it's involved: - Some Davinci driver updates that has required corresponding platform code changes (gpio mostly) - CCI bindings and a few driver updates - Marvell mvebu patches for PCI MSI support (could have gone through the PCI tree for this release, but they were acked by Bjorn for 3.12 so we kept them through arm-soc). - Marvell dove switch-over to DT-based PCIe configuration - Misc updates for Samsung platform dmaengine drivers" * tag 'drivers-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/arm/arm-soc: (32 commits) ARM: S3C24XX: add dma pdata for s3c2410, s3c2440 and s3c2442 dmaengine: s3c24xx-dma: add support for the s3c2410 type of controller ARM: S3C24XX: Fix possible dma selection warning PCI: mvebu: make local functions static PCI: mvebu: add I/O access wrappers PCI: mvebu: Dynamically detect if the PEX link is up to enable hot plug ARM: mvebu: fix gated clock documentation ARM: dove: remove legacy pcie and clock init ARM: dove: switch to DT probed mbus address windows ARM: SAMSUNG: set s3c24xx_dma_filter for s3c64xx-spi0 device ARM: S3C24XX: add platform-devices for new dma driver for s3c2412 and s3c2443 dmaengine: add driver for Samsung s3c24xx SoCs ARM: S3C24XX: number the dma clocks PCI: mvebu: add support for Marvell Dove SoCs PCI: mvebu: add support for reset on GPIO PCI: mvebu: remove subsys_initcall PCI: mvebu: increment nports only for registered ports PCI: mvebu: move clock enable before register access PCI: mvebu: add support for MSI irqchip: armada-370-xp: implement MSI support ...
Diffstat (limited to 'drivers/bus')
-rw-r--r--drivers/bus/arm-cci.c617
1 files changed, 613 insertions, 4 deletions
diff --git a/drivers/bus/arm-cci.c b/drivers/bus/arm-cci.c
index 200926699778..bb5b90e8e768 100644
--- a/drivers/bus/arm-cci.c
+++ b/drivers/bus/arm-cci.c
@@ -18,11 +18,21 @@
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_address.h> 20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <linux/of_platform.h>
23#include <linux/platform_device.h>
21#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/spinlock.h>
22 26
23#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/irq_regs.h>
29#include <asm/pmu.h>
24#include <asm/smp_plat.h> 30#include <asm/smp_plat.h>
25 31
32#define DRIVER_NAME "CCI-400"
33#define DRIVER_NAME_PMU DRIVER_NAME " PMU"
34#define PMU_NAME "CCI_400"
35
26#define CCI_PORT_CTRL 0x0 36#define CCI_PORT_CTRL 0x0
27#define CCI_CTRL_STATUS 0xc 37#define CCI_CTRL_STATUS 0xc
28 38
@@ -54,6 +64,568 @@ static unsigned int nb_cci_ports;
54static void __iomem *cci_ctrl_base; 64static void __iomem *cci_ctrl_base;
55static unsigned long cci_ctrl_phys; 65static unsigned long cci_ctrl_phys;
56 66
67#ifdef CONFIG_HW_PERF_EVENTS
68
69#define CCI_PMCR 0x0100
70#define CCI_PID2 0x0fe8
71
72#define CCI_PMCR_CEN 0x00000001
73#define CCI_PMCR_NCNT_MASK 0x0000f800
74#define CCI_PMCR_NCNT_SHIFT 11
75
76#define CCI_PID2_REV_MASK 0xf0
77#define CCI_PID2_REV_SHIFT 4
78
79/* Port ids */
80#define CCI_PORT_S0 0
81#define CCI_PORT_S1 1
82#define CCI_PORT_S2 2
83#define CCI_PORT_S3 3
84#define CCI_PORT_S4 4
85#define CCI_PORT_M0 5
86#define CCI_PORT_M1 6
87#define CCI_PORT_M2 7
88
89#define CCI_REV_R0 0
90#define CCI_REV_R1 1
91#define CCI_REV_R0_P4 4
92#define CCI_REV_R1_P2 6
93
94#define CCI_PMU_EVT_SEL 0x000
95#define CCI_PMU_CNTR 0x004
96#define CCI_PMU_CNTR_CTRL 0x008
97#define CCI_PMU_OVRFLW 0x00c
98
99#define CCI_PMU_OVRFLW_FLAG 1
100
101#define CCI_PMU_CNTR_BASE(idx) ((idx) * SZ_4K)
102
103/*
104 * Instead of an event id to monitor CCI cycles, a dedicated counter is
105 * provided. Use 0xff to represent CCI cycles and hope that no future revisions
106 * make use of this event in hardware.
107 */
108enum cci400_perf_events {
109 CCI_PMU_CYCLES = 0xff
110};
111
112#define CCI_PMU_EVENT_MASK 0xff
113#define CCI_PMU_EVENT_SOURCE(event) ((event >> 5) & 0x7)
114#define CCI_PMU_EVENT_CODE(event) (event & 0x1f)
115
116#define CCI_PMU_MAX_HW_EVENTS 5 /* CCI PMU has 4 counters + 1 cycle counter */
117
118#define CCI_PMU_CYCLE_CNTR_IDX 0
119#define CCI_PMU_CNTR0_IDX 1
120#define CCI_PMU_CNTR_LAST(cci_pmu) (CCI_PMU_CYCLE_CNTR_IDX + cci_pmu->num_events - 1)
121
122/*
123 * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8
124 * ports and bits 4:0 are event codes. There are different event codes
125 * associated with each port type.
126 *
127 * Additionally, the range of events associated with the port types changed
128 * between Rev0 and Rev1.
129 *
130 * The constants below define the range of valid codes for each port type for
131 * the different revisions and are used to validate the event to be monitored.
132 */
133
134#define CCI_REV_R0_SLAVE_PORT_MIN_EV 0x00
135#define CCI_REV_R0_SLAVE_PORT_MAX_EV 0x13
136#define CCI_REV_R0_MASTER_PORT_MIN_EV 0x14
137#define CCI_REV_R0_MASTER_PORT_MAX_EV 0x1a
138
139#define CCI_REV_R1_SLAVE_PORT_MIN_EV 0x00
140#define CCI_REV_R1_SLAVE_PORT_MAX_EV 0x14
141#define CCI_REV_R1_MASTER_PORT_MIN_EV 0x00
142#define CCI_REV_R1_MASTER_PORT_MAX_EV 0x11
143
144struct pmu_port_event_ranges {
145 u8 slave_min;
146 u8 slave_max;
147 u8 master_min;
148 u8 master_max;
149};
150
151static struct pmu_port_event_ranges port_event_range[] = {
152 [CCI_REV_R0] = {
153 .slave_min = CCI_REV_R0_SLAVE_PORT_MIN_EV,
154 .slave_max = CCI_REV_R0_SLAVE_PORT_MAX_EV,
155 .master_min = CCI_REV_R0_MASTER_PORT_MIN_EV,
156 .master_max = CCI_REV_R0_MASTER_PORT_MAX_EV,
157 },
158 [CCI_REV_R1] = {
159 .slave_min = CCI_REV_R1_SLAVE_PORT_MIN_EV,
160 .slave_max = CCI_REV_R1_SLAVE_PORT_MAX_EV,
161 .master_min = CCI_REV_R1_MASTER_PORT_MIN_EV,
162 .master_max = CCI_REV_R1_MASTER_PORT_MAX_EV,
163 },
164};
165
166struct cci_pmu_drv_data {
167 void __iomem *base;
168 struct arm_pmu *cci_pmu;
169 int nr_irqs;
170 int irqs[CCI_PMU_MAX_HW_EVENTS];
171 unsigned long active_irqs;
172 struct perf_event *events[CCI_PMU_MAX_HW_EVENTS];
173 unsigned long used_mask[BITS_TO_LONGS(CCI_PMU_MAX_HW_EVENTS)];
174 struct pmu_port_event_ranges *port_ranges;
175 struct pmu_hw_events hw_events;
176};
177static struct cci_pmu_drv_data *pmu;
178
179static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs)
180{
181 int i;
182
183 for (i = 0; i < nr_irqs; i++)
184 if (irq == irqs[i])
185 return true;
186
187 return false;
188}
189
190static int probe_cci_revision(void)
191{
192 int rev;
193 rev = readl_relaxed(cci_ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK;
194 rev >>= CCI_PID2_REV_SHIFT;
195
196 if (rev <= CCI_REV_R0_P4)
197 return CCI_REV_R0;
198 else if (rev <= CCI_REV_R1_P2)
199 return CCI_REV_R1;
200
201 return -ENOENT;
202}
203
204static struct pmu_port_event_ranges *port_range_by_rev(void)
205{
206 int rev = probe_cci_revision();
207
208 if (rev < 0)
209 return NULL;
210
211 return &port_event_range[rev];
212}
213
214static int pmu_is_valid_slave_event(u8 ev_code)
215{
216 return pmu->port_ranges->slave_min <= ev_code &&
217 ev_code <= pmu->port_ranges->slave_max;
218}
219
220static int pmu_is_valid_master_event(u8 ev_code)
221{
222 return pmu->port_ranges->master_min <= ev_code &&
223 ev_code <= pmu->port_ranges->master_max;
224}
225
226static int pmu_validate_hw_event(u8 hw_event)
227{
228 u8 ev_source = CCI_PMU_EVENT_SOURCE(hw_event);
229 u8 ev_code = CCI_PMU_EVENT_CODE(hw_event);
230
231 switch (ev_source) {
232 case CCI_PORT_S0:
233 case CCI_PORT_S1:
234 case CCI_PORT_S2:
235 case CCI_PORT_S3:
236 case CCI_PORT_S4:
237 /* Slave Interface */
238 if (pmu_is_valid_slave_event(ev_code))
239 return hw_event;
240 break;
241 case CCI_PORT_M0:
242 case CCI_PORT_M1:
243 case CCI_PORT_M2:
244 /* Master Interface */
245 if (pmu_is_valid_master_event(ev_code))
246 return hw_event;
247 break;
248 }
249
250 return -ENOENT;
251}
252
253static int pmu_is_valid_counter(struct arm_pmu *cci_pmu, int idx)
254{
255 return CCI_PMU_CYCLE_CNTR_IDX <= idx &&
256 idx <= CCI_PMU_CNTR_LAST(cci_pmu);
257}
258
259static u32 pmu_read_register(int idx, unsigned int offset)
260{
261 return readl_relaxed(pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
262}
263
264static void pmu_write_register(u32 value, int idx, unsigned int offset)
265{
266 return writel_relaxed(value, pmu->base + CCI_PMU_CNTR_BASE(idx) + offset);
267}
268
269static void pmu_disable_counter(int idx)
270{
271 pmu_write_register(0, idx, CCI_PMU_CNTR_CTRL);
272}
273
274static void pmu_enable_counter(int idx)
275{
276 pmu_write_register(1, idx, CCI_PMU_CNTR_CTRL);
277}
278
279static void pmu_set_event(int idx, unsigned long event)
280{
281 event &= CCI_PMU_EVENT_MASK;
282 pmu_write_register(event, idx, CCI_PMU_EVT_SEL);
283}
284
285static u32 pmu_get_max_counters(void)
286{
287 u32 n_cnts = (readl_relaxed(cci_ctrl_base + CCI_PMCR) &
288 CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT;
289
290 /* add 1 for cycle counter */
291 return n_cnts + 1;
292}
293
294static struct pmu_hw_events *pmu_get_hw_events(void)
295{
296 return &pmu->hw_events;
297}
298
299static int pmu_get_event_idx(struct pmu_hw_events *hw, struct perf_event *event)
300{
301 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
302 struct hw_perf_event *hw_event = &event->hw;
303 unsigned long cci_event = hw_event->config_base & CCI_PMU_EVENT_MASK;
304 int idx;
305
306 if (cci_event == CCI_PMU_CYCLES) {
307 if (test_and_set_bit(CCI_PMU_CYCLE_CNTR_IDX, hw->used_mask))
308 return -EAGAIN;
309
310 return CCI_PMU_CYCLE_CNTR_IDX;
311 }
312
313 for (idx = CCI_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx)
314 if (!test_and_set_bit(idx, hw->used_mask))
315 return idx;
316
317 /* No counters available */
318 return -EAGAIN;
319}
320
321static int pmu_map_event(struct perf_event *event)
322{
323 int mapping;
324 u8 config = event->attr.config & CCI_PMU_EVENT_MASK;
325
326 if (event->attr.type < PERF_TYPE_MAX)
327 return -ENOENT;
328
329 if (config == CCI_PMU_CYCLES)
330 mapping = config;
331 else
332 mapping = pmu_validate_hw_event(config);
333
334 return mapping;
335}
336
337static int pmu_request_irq(struct arm_pmu *cci_pmu, irq_handler_t handler)
338{
339 int i;
340 struct platform_device *pmu_device = cci_pmu->plat_device;
341
342 if (unlikely(!pmu_device))
343 return -ENODEV;
344
345 if (pmu->nr_irqs < 1) {
346 dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n");
347 return -ENODEV;
348 }
349
350 /*
351 * Register all available CCI PMU interrupts. In the interrupt handler
352 * we iterate over the counters checking for interrupt source (the
353 * overflowing counter) and clear it.
354 *
355 * This should allow handling of non-unique interrupt for the counters.
356 */
357 for (i = 0; i < pmu->nr_irqs; i++) {
358 int err = request_irq(pmu->irqs[i], handler, IRQF_SHARED,
359 "arm-cci-pmu", cci_pmu);
360 if (err) {
361 dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n",
362 pmu->irqs[i]);
363 return err;
364 }
365
366 set_bit(i, &pmu->active_irqs);
367 }
368
369 return 0;
370}
371
372static irqreturn_t pmu_handle_irq(int irq_num, void *dev)
373{
374 unsigned long flags;
375 struct arm_pmu *cci_pmu = (struct arm_pmu *)dev;
376 struct pmu_hw_events *events = cci_pmu->get_hw_events();
377 struct perf_sample_data data;
378 struct pt_regs *regs;
379 int idx, handled = IRQ_NONE;
380
381 raw_spin_lock_irqsave(&events->pmu_lock, flags);
382 regs = get_irq_regs();
383 /*
384 * Iterate over counters and update the corresponding perf events.
385 * This should work regardless of whether we have per-counter overflow
386 * interrupt or a combined overflow interrupt.
387 */
388 for (idx = CCI_PMU_CYCLE_CNTR_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) {
389 struct perf_event *event = events->events[idx];
390 struct hw_perf_event *hw_counter;
391
392 if (!event)
393 continue;
394
395 hw_counter = &event->hw;
396
397 /* Did this counter overflow? */
398 if (!pmu_read_register(idx, CCI_PMU_OVRFLW) & CCI_PMU_OVRFLW_FLAG)
399 continue;
400
401 pmu_write_register(CCI_PMU_OVRFLW_FLAG, idx, CCI_PMU_OVRFLW);
402
403 handled = IRQ_HANDLED;
404
405 armpmu_event_update(event);
406 perf_sample_data_init(&data, 0, hw_counter->last_period);
407 if (!armpmu_event_set_period(event))
408 continue;
409
410 if (perf_event_overflow(event, &data, regs))
411 cci_pmu->disable(event);
412 }
413 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
414
415 return IRQ_RETVAL(handled);
416}
417
418static void pmu_free_irq(struct arm_pmu *cci_pmu)
419{
420 int i;
421
422 for (i = 0; i < pmu->nr_irqs; i++) {
423 if (!test_and_clear_bit(i, &pmu->active_irqs))
424 continue;
425
426 free_irq(pmu->irqs[i], cci_pmu);
427 }
428}
429
430static void pmu_enable_event(struct perf_event *event)
431{
432 unsigned long flags;
433 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
434 struct pmu_hw_events *events = cci_pmu->get_hw_events();
435 struct hw_perf_event *hw_counter = &event->hw;
436 int idx = hw_counter->idx;
437
438 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
439 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
440 return;
441 }
442
443 raw_spin_lock_irqsave(&events->pmu_lock, flags);
444
445 /* Configure the event to count, unless you are counting cycles */
446 if (idx != CCI_PMU_CYCLE_CNTR_IDX)
447 pmu_set_event(idx, hw_counter->config_base);
448
449 pmu_enable_counter(idx);
450
451 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
452}
453
454static void pmu_disable_event(struct perf_event *event)
455{
456 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
457 struct hw_perf_event *hw_counter = &event->hw;
458 int idx = hw_counter->idx;
459
460 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
461 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
462 return;
463 }
464
465 pmu_disable_counter(idx);
466}
467
468static void pmu_start(struct arm_pmu *cci_pmu)
469{
470 u32 val;
471 unsigned long flags;
472 struct pmu_hw_events *events = cci_pmu->get_hw_events();
473
474 raw_spin_lock_irqsave(&events->pmu_lock, flags);
475
476 /* Enable all the PMU counters. */
477 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) | CCI_PMCR_CEN;
478 writel(val, cci_ctrl_base + CCI_PMCR);
479
480 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
481}
482
483static void pmu_stop(struct arm_pmu *cci_pmu)
484{
485 u32 val;
486 unsigned long flags;
487 struct pmu_hw_events *events = cci_pmu->get_hw_events();
488
489 raw_spin_lock_irqsave(&events->pmu_lock, flags);
490
491 /* Disable all the PMU counters. */
492 val = readl_relaxed(cci_ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN;
493 writel(val, cci_ctrl_base + CCI_PMCR);
494
495 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
496}
497
498static u32 pmu_read_counter(struct perf_event *event)
499{
500 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
501 struct hw_perf_event *hw_counter = &event->hw;
502 int idx = hw_counter->idx;
503 u32 value;
504
505 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) {
506 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
507 return 0;
508 }
509 value = pmu_read_register(idx, CCI_PMU_CNTR);
510
511 return value;
512}
513
514static void pmu_write_counter(struct perf_event *event, u32 value)
515{
516 struct arm_pmu *cci_pmu = to_arm_pmu(event->pmu);
517 struct hw_perf_event *hw_counter = &event->hw;
518 int idx = hw_counter->idx;
519
520 if (unlikely(!pmu_is_valid_counter(cci_pmu, idx)))
521 dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx);
522 else
523 pmu_write_register(value, idx, CCI_PMU_CNTR);
524}
525
526static int cci_pmu_init(struct arm_pmu *cci_pmu, struct platform_device *pdev)
527{
528 *cci_pmu = (struct arm_pmu){
529 .name = PMU_NAME,
530 .max_period = (1LLU << 32) - 1,
531 .get_hw_events = pmu_get_hw_events,
532 .get_event_idx = pmu_get_event_idx,
533 .map_event = pmu_map_event,
534 .request_irq = pmu_request_irq,
535 .handle_irq = pmu_handle_irq,
536 .free_irq = pmu_free_irq,
537 .enable = pmu_enable_event,
538 .disable = pmu_disable_event,
539 .start = pmu_start,
540 .stop = pmu_stop,
541 .read_counter = pmu_read_counter,
542 .write_counter = pmu_write_counter,
543 };
544
545 cci_pmu->plat_device = pdev;
546 cci_pmu->num_events = pmu_get_max_counters();
547
548 return armpmu_register(cci_pmu, -1);
549}
550
551static const struct of_device_id arm_cci_pmu_matches[] = {
552 {
553 .compatible = "arm,cci-400-pmu",
554 },
555 {},
556};
557
558static int cci_pmu_probe(struct platform_device *pdev)
559{
560 struct resource *res;
561 int i, ret, irq;
562
563 pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL);
564 if (!pmu)
565 return -ENOMEM;
566
567 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
568 pmu->base = devm_ioremap_resource(&pdev->dev, res);
569 if (IS_ERR(pmu->base))
570 return -ENOMEM;
571
572 /*
573 * CCI PMU has 5 overflow signals - one per counter; but some may be tied
574 * together to a common interrupt.
575 */
576 pmu->nr_irqs = 0;
577 for (i = 0; i < CCI_PMU_MAX_HW_EVENTS; i++) {
578 irq = platform_get_irq(pdev, i);
579 if (irq < 0)
580 break;
581
582 if (is_duplicate_irq(irq, pmu->irqs, pmu->nr_irqs))
583 continue;
584
585 pmu->irqs[pmu->nr_irqs++] = irq;
586 }
587
588 /*
589 * Ensure that the device tree has as many interrupts as the number
590 * of counters.
591 */
592 if (i < CCI_PMU_MAX_HW_EVENTS) {
593 dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n",
594 i, CCI_PMU_MAX_HW_EVENTS);
595 return -EINVAL;
596 }
597
598 pmu->port_ranges = port_range_by_rev();
599 if (!pmu->port_ranges) {
600 dev_warn(&pdev->dev, "CCI PMU version not supported\n");
601 return -EINVAL;
602 }
603
604 pmu->cci_pmu = devm_kzalloc(&pdev->dev, sizeof(*(pmu->cci_pmu)), GFP_KERNEL);
605 if (!pmu->cci_pmu)
606 return -ENOMEM;
607
608 pmu->hw_events.events = pmu->events;
609 pmu->hw_events.used_mask = pmu->used_mask;
610 raw_spin_lock_init(&pmu->hw_events.pmu_lock);
611
612 ret = cci_pmu_init(pmu->cci_pmu, pdev);
613 if (ret)
614 return ret;
615
616 return 0;
617}
618
619static int cci_platform_probe(struct platform_device *pdev)
620{
621 if (!cci_probed())
622 return -ENODEV;
623
624 return of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
625}
626
627#endif /* CONFIG_HW_PERF_EVENTS */
628
57struct cpu_port { 629struct cpu_port {
58 u64 mpidr; 630 u64 mpidr;
59 u32 port; 631 u32 port;
@@ -120,7 +692,7 @@ int cci_ace_get_port(struct device_node *dn)
120} 692}
121EXPORT_SYMBOL_GPL(cci_ace_get_port); 693EXPORT_SYMBOL_GPL(cci_ace_get_port);
122 694
123static void __init cci_ace_init_ports(void) 695static void cci_ace_init_ports(void)
124{ 696{
125 int port, cpu; 697 int port, cpu;
126 struct device_node *cpun; 698 struct device_node *cpun;
@@ -386,7 +958,7 @@ static const struct of_device_id arm_cci_ctrl_if_matches[] = {
386 {}, 958 {},
387}; 959};
388 960
389static int __init cci_probe(void) 961static int cci_probe(void)
390{ 962{
391 struct cci_nb_ports const *cci_config; 963 struct cci_nb_ports const *cci_config;
392 int ret, i, nb_ace = 0, nb_ace_lite = 0; 964 int ret, i, nb_ace = 0, nb_ace_lite = 0;
@@ -490,7 +1062,7 @@ memalloc_err:
490static int cci_init_status = -EAGAIN; 1062static int cci_init_status = -EAGAIN;
491static DEFINE_MUTEX(cci_probing); 1063static DEFINE_MUTEX(cci_probing);
492 1064
493static int __init cci_init(void) 1065static int cci_init(void)
494{ 1066{
495 if (cci_init_status != -EAGAIN) 1067 if (cci_init_status != -EAGAIN)
496 return cci_init_status; 1068 return cci_init_status;
@@ -502,18 +1074,55 @@ static int __init cci_init(void)
502 return cci_init_status; 1074 return cci_init_status;
503} 1075}
504 1076
1077#ifdef CONFIG_HW_PERF_EVENTS
1078static struct platform_driver cci_pmu_driver = {
1079 .driver = {
1080 .name = DRIVER_NAME_PMU,
1081 .of_match_table = arm_cci_pmu_matches,
1082 },
1083 .probe = cci_pmu_probe,
1084};
1085
1086static struct platform_driver cci_platform_driver = {
1087 .driver = {
1088 .name = DRIVER_NAME,
1089 .of_match_table = arm_cci_matches,
1090 },
1091 .probe = cci_platform_probe,
1092};
1093
1094static int __init cci_platform_init(void)
1095{
1096 int ret;
1097
1098 ret = platform_driver_register(&cci_pmu_driver);
1099 if (ret)
1100 return ret;
1101
1102 return platform_driver_register(&cci_platform_driver);
1103}
1104
1105#else
1106
1107static int __init cci_platform_init(void)
1108{
1109 return 0;
1110}
1111
1112#endif
505/* 1113/*
506 * To sort out early init calls ordering a helper function is provided to 1114 * To sort out early init calls ordering a helper function is provided to
507 * check if the CCI driver has beed initialized. Function check if the driver 1115 * check if the CCI driver has beed initialized. Function check if the driver
508 * has been initialized, if not it calls the init function that probes 1116 * has been initialized, if not it calls the init function that probes
509 * the driver and updates the return value. 1117 * the driver and updates the return value.
510 */ 1118 */
511bool __init cci_probed(void) 1119bool cci_probed(void)
512{ 1120{
513 return cci_init() == 0; 1121 return cci_init() == 0;
514} 1122}
515EXPORT_SYMBOL_GPL(cci_probed); 1123EXPORT_SYMBOL_GPL(cci_probed);
516 1124
517early_initcall(cci_init); 1125early_initcall(cci_init);
1126core_initcall(cci_platform_init);
518MODULE_LICENSE("GPL"); 1127MODULE_LICENSE("GPL");
519MODULE_DESCRIPTION("ARM CCI support"); 1128MODULE_DESCRIPTION("ARM CCI support");