aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/arm/include/asm/pmu.h29
-rw-r--r--arch/arm/kernel/perf_event.c74
-rw-r--r--arch/arm/kernel/pmu.c182
3 files changed, 80 insertions, 205 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index b7e82c4aced..a06ba8773cd 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -14,6 +14,10 @@
14 14
15#include <linux/interrupt.h> 15#include <linux/interrupt.h>
16 16
17/*
18 * Types of PMUs that can be accessed directly and require mutual
19 * exclusion between profiling tools.
20 */
17enum arm_pmu_type { 21enum arm_pmu_type {
18 ARM_PMU_DEVICE_CPU = 0, 22 ARM_PMU_DEVICE_CPU = 0,
19 ARM_NUM_PMU_DEVICES, 23 ARM_NUM_PMU_DEVICES,
@@ -37,21 +41,17 @@ struct arm_pmu_platdata {
37 * reserve_pmu() - reserve the hardware performance counters 41 * reserve_pmu() - reserve the hardware performance counters
38 * 42 *
39 * Reserve the hardware performance counters in the system for exclusive use. 43 * Reserve the hardware performance counters in the system for exclusive use.
40 * The platform_device for the system is returned on success, ERR_PTR() 44 * Returns 0 on success or -EBUSY if the lock is already held.
41 * encoded error on failure.
42 */ 45 */
43extern struct platform_device * 46extern int
44reserve_pmu(enum arm_pmu_type type); 47reserve_pmu(enum arm_pmu_type type);
45 48
46/** 49/**
47 * release_pmu() - Relinquish control of the performance counters 50 * release_pmu() - Relinquish control of the performance counters
48 * 51 *
49 * Release the performance counters and allow someone else to use them. 52 * Release the performance counters and allow someone else to use them.
50 * Callers must have disabled the counters and released IRQs before calling
51 * this. The platform_device returned from reserve_pmu() must be passed as
52 * a cookie.
53 */ 53 */
54extern int 54extern void
55release_pmu(enum arm_pmu_type type); 55release_pmu(enum arm_pmu_type type);
56 56
57/** 57/**
@@ -68,23 +68,14 @@ init_pmu(enum arm_pmu_type type);
68 68
69#include <linux/err.h> 69#include <linux/err.h>
70 70
71static inline struct platform_device *
72reserve_pmu(enum arm_pmu_type type)
73{
74 return ERR_PTR(-ENODEV);
75}
76
77static inline int 71static inline int
78release_pmu(enum arm_pmu_type type) 72reserve_pmu(enum arm_pmu_type type)
79{ 73{
80 return -ENODEV; 74 return -ENODEV;
81} 75}
82 76
83static inline int 77static inline void
84init_pmu(enum arm_pmu_type type) 78release_pmu(enum arm_pmu_type type) { }
85{
86 return -ENODEV;
87}
88 79
89#endif /* CONFIG_CPU_HAS_PMU */ 80#endif /* CONFIG_CPU_HAS_PMU */
90 81
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c
index 73049963bf4..8514855ffc2 100644
--- a/arch/arm/kernel/perf_event.c
+++ b/arch/arm/kernel/perf_event.c
@@ -393,15 +393,15 @@ armpmu_reserve_hardware(void)
393{ 393{
394 struct arm_pmu_platdata *plat; 394 struct arm_pmu_platdata *plat;
395 irq_handler_t handle_irq; 395 irq_handler_t handle_irq;
396 int i, err = -ENODEV, irq; 396 int i, err, irq, irqs;
397 397
398 pmu_device = reserve_pmu(ARM_PMU_DEVICE_CPU); 398 err = reserve_pmu(ARM_PMU_DEVICE_CPU);
399 if (IS_ERR(pmu_device)) { 399 if (err) {
400 pr_warning("unable to reserve pmu\n"); 400 pr_warning("unable to reserve pmu\n");
401 return PTR_ERR(pmu_device); 401 return err;
402 } 402 }
403 403
404 init_pmu(ARM_PMU_DEVICE_CPU); 404 irqs = pmu_device->num_resources;
405 405
406 plat = dev_get_platdata(&pmu_device->dev); 406 plat = dev_get_platdata(&pmu_device->dev);
407 if (plat && plat->handle_irq) 407 if (plat && plat->handle_irq)
@@ -409,22 +409,34 @@ armpmu_reserve_hardware(void)
409 else 409 else
410 handle_irq = armpmu->handle_irq; 410 handle_irq = armpmu->handle_irq;
411 411
412 if (pmu_device->num_resources < 1) { 412 if (irqs < 1) {
413 pr_err("no irqs for PMUs defined\n"); 413 pr_err("no irqs for PMUs defined\n");
414 return -ENODEV; 414 return -ENODEV;
415 } 415 }
416 416
417 for (i = 0; i < pmu_device->num_resources; ++i) { 417 for (i = 0; i < irqs; ++i) {
418 irq = platform_get_irq(pmu_device, i); 418 irq = platform_get_irq(pmu_device, i);
419 if (irq < 0) 419 if (irq < 0)
420 continue; 420 continue;
421 421
422 /*
423 * If we have a single PMU interrupt that we can't shift,
424 * assume that we're running on a uniprocessor machine and
425 * continue.
426 */
427 err = irq_set_affinity(irq, cpumask_of(i));
428 if (err && irqs > 1) {
429 pr_err("unable to set irq affinity (irq=%d, cpu=%u)\n",
430 irq, i);
431 break;
432 }
433
422 err = request_irq(irq, handle_irq, 434 err = request_irq(irq, handle_irq,
423 IRQF_DISABLED | IRQF_NOBALANCING, 435 IRQF_DISABLED | IRQF_NOBALANCING,
424 "armpmu", NULL); 436 "arm-pmu", NULL);
425 if (err) { 437 if (err) {
426 pr_warning("unable to request IRQ%d for ARM perf " 438 pr_err("unable to request IRQ%d for ARM PMU counters\n",
427 "counters\n", irq); 439 irq);
428 break; 440 break;
429 } 441 }
430 } 442 }
@@ -436,7 +448,6 @@ armpmu_reserve_hardware(void)
436 free_irq(irq, NULL); 448 free_irq(irq, NULL);
437 } 449 }
438 release_pmu(ARM_PMU_DEVICE_CPU); 450 release_pmu(ARM_PMU_DEVICE_CPU);
439 pmu_device = NULL;
440 } 451 }
441 452
442 return err; 453 return err;
@@ -455,7 +466,6 @@ armpmu_release_hardware(void)
455 armpmu->stop(); 466 armpmu->stop();
456 467
457 release_pmu(ARM_PMU_DEVICE_CPU); 468 release_pmu(ARM_PMU_DEVICE_CPU);
458 pmu_device = NULL;
459} 469}
460 470
461static atomic_t active_events = ATOMIC_INIT(0); 471static atomic_t active_events = ATOMIC_INIT(0);
@@ -638,6 +648,46 @@ armpmu_reset(void)
638} 648}
639arch_initcall(armpmu_reset); 649arch_initcall(armpmu_reset);
640 650
651/*
652 * PMU platform driver and devicetree bindings.
653 */
654static struct of_device_id armpmu_of_device_ids[] = {
655 {.compatible = "arm,cortex-a9-pmu"},
656 {.compatible = "arm,cortex-a8-pmu"},
657 {.compatible = "arm,arm1136-pmu"},
658 {.compatible = "arm,arm1176-pmu"},
659 {},
660};
661
662static struct platform_device_id armpmu_plat_device_ids[] = {
663 {.name = "arm-pmu"},
664 {},
665};
666
667static int __devinit armpmu_device_probe(struct platform_device *pdev)
668{
669 pmu_device = pdev;
670 return 0;
671}
672
673static struct platform_driver armpmu_driver = {
674 .driver = {
675 .name = "arm-pmu",
676 .of_match_table = armpmu_of_device_ids,
677 },
678 .probe = armpmu_device_probe,
679 .id_table = armpmu_plat_device_ids,
680};
681
682static int __init register_pmu_driver(void)
683{
684 return platform_driver_register(&armpmu_driver);
685}
686device_initcall(register_pmu_driver);
687
688/*
689 * CPU PMU identification and registration.
690 */
641static int __init 691static int __init
642init_hw_perf_events(void) 692init_hw_perf_events(void)
643{ 693{
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c
index c53474fe84d..2c3407ee857 100644
--- a/arch/arm/kernel/pmu.c
+++ b/arch/arm/kernel/pmu.c
@@ -10,192 +10,26 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) "PMU: " fmt
14
15#include <linux/cpumask.h>
16#include <linux/err.h> 13#include <linux/err.h>
17#include <linux/interrupt.h>
18#include <linux/kernel.h> 14#include <linux/kernel.h>
19#include <linux/module.h> 15#include <linux/module.h>
20#include <linux/of_device.h>
21#include <linux/platform_device.h>
22 16
23#include <asm/pmu.h> 17#include <asm/pmu.h>
24 18
25static volatile long pmu_lock; 19/*
26 20 * PMU locking to ensure mutual exclusion between different subsystems.
27static struct platform_device *pmu_devices[ARM_NUM_PMU_DEVICES]; 21 */
28 22static unsigned long pmu_lock[BITS_TO_LONGS(ARM_NUM_PMU_DEVICES)];
29static int __devinit pmu_register(struct platform_device *pdev,
30 enum arm_pmu_type type)
31{
32 if (type < 0 || type >= ARM_NUM_PMU_DEVICES) {
33 pr_warning("received registration request for unknown "
34 "PMU device type %d\n", type);
35 return -EINVAL;
36 }
37
38 if (pmu_devices[type]) {
39 pr_warning("rejecting duplicate registration of PMU device "
40 "type %d.", type);
41 return -ENOSPC;
42 }
43
44 pr_info("registered new PMU device of type %d\n", type);
45 pmu_devices[type] = pdev;
46 return 0;
47}
48
49#define OF_MATCH_PMU(_name, _type) { \
50 .compatible = _name, \
51 .data = (void *)_type, \
52}
53
54#define OF_MATCH_CPU(name) OF_MATCH_PMU(name, ARM_PMU_DEVICE_CPU)
55
56static struct of_device_id armpmu_of_device_ids[] = {
57 OF_MATCH_CPU("arm,cortex-a9-pmu"),
58 OF_MATCH_CPU("arm,cortex-a8-pmu"),
59 OF_MATCH_CPU("arm,arm1136-pmu"),
60 OF_MATCH_CPU("arm,arm1176-pmu"),
61 {},
62};
63
64#define PLAT_MATCH_PMU(_name, _type) { \
65 .name = _name, \
66 .driver_data = _type, \
67}
68
69#define PLAT_MATCH_CPU(_name) PLAT_MATCH_PMU(_name, ARM_PMU_DEVICE_CPU)
70
71static struct platform_device_id armpmu_plat_device_ids[] = {
72 PLAT_MATCH_CPU("arm-pmu"),
73 {},
74};
75
76enum arm_pmu_type armpmu_device_type(struct platform_device *pdev)
77{
78 const struct of_device_id *of_id;
79 const struct platform_device_id *pdev_id;
80
81 /* provided by of_device_id table */
82 if (pdev->dev.of_node) {
83 of_id = of_match_device(armpmu_of_device_ids, &pdev->dev);
84 BUG_ON(!of_id);
85 return (enum arm_pmu_type)of_id->data;
86 }
87
88 /* Provided by platform_device_id table */
89 pdev_id = platform_get_device_id(pdev);
90 BUG_ON(!pdev_id);
91 return pdev_id->driver_data;
92}
93
94static int __devinit armpmu_device_probe(struct platform_device *pdev)
95{
96 return pmu_register(pdev, armpmu_device_type(pdev));
97}
98
99static struct platform_driver armpmu_driver = {
100 .driver = {
101 .name = "arm-pmu",
102 .of_match_table = armpmu_of_device_ids,
103 },
104 .probe = armpmu_device_probe,
105 .id_table = armpmu_plat_device_ids,
106};
107
108static int __init register_pmu_driver(void)
109{
110 return platform_driver_register(&armpmu_driver);
111}
112device_initcall(register_pmu_driver);
113 23
114struct platform_device * 24int
115reserve_pmu(enum arm_pmu_type type) 25reserve_pmu(enum arm_pmu_type type)
116{ 26{
117 struct platform_device *pdev; 27 return test_and_set_bit_lock(type, pmu_lock) ? -EBUSY : 0;
118
119 if (test_and_set_bit_lock(type, &pmu_lock)) {
120 pdev = ERR_PTR(-EBUSY);
121 } else if (pmu_devices[type] == NULL) {
122 clear_bit_unlock(type, &pmu_lock);
123 pdev = ERR_PTR(-ENODEV);
124 } else {
125 pdev = pmu_devices[type];
126 }
127
128 return pdev;
129} 28}
130EXPORT_SYMBOL_GPL(reserve_pmu); 29EXPORT_SYMBOL_GPL(reserve_pmu);
131 30
132int 31void
133release_pmu(enum arm_pmu_type type) 32release_pmu(enum arm_pmu_type type)
134{ 33{
135 if (WARN_ON(!pmu_devices[type])) 34 clear_bit_unlock(type, pmu_lock);
136 return -EINVAL;
137 clear_bit_unlock(type, &pmu_lock);
138 return 0;
139}
140EXPORT_SYMBOL_GPL(release_pmu);
141
142static int
143set_irq_affinity(int irq,
144 unsigned int cpu)
145{
146#ifdef CONFIG_SMP
147 int err = irq_set_affinity(irq, cpumask_of(cpu));
148 if (err)
149 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
150 irq, cpu);
151 return err;
152#else
153 return -EINVAL;
154#endif
155}
156
157static int
158init_cpu_pmu(void)
159{
160 int i, irqs, err = 0;
161 struct platform_device *pdev = pmu_devices[ARM_PMU_DEVICE_CPU];
162
163 if (!pdev)
164 return -ENODEV;
165
166 irqs = pdev->num_resources;
167
168 /*
169 * If we have a single PMU interrupt that we can't shift, assume that
170 * we're running on a uniprocessor machine and continue.
171 */
172 if (irqs == 1 && !irq_can_set_affinity(platform_get_irq(pdev, 0)))
173 return 0;
174
175 for (i = 0; i < irqs; ++i) {
176 err = set_irq_affinity(platform_get_irq(pdev, i), i);
177 if (err)
178 break;
179 }
180
181 return err;
182}
183
184int
185init_pmu(enum arm_pmu_type type)
186{
187 int err = 0;
188
189 switch (type) {
190 case ARM_PMU_DEVICE_CPU:
191 err = init_cpu_pmu();
192 break;
193 default:
194 pr_warning("attempt to initialise PMU of unknown "
195 "type %d\n", type);
196 err = -EINVAL;
197 }
198
199 return err;
200} 35}
201EXPORT_SYMBOL_GPL(init_pmu);