aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event_cpu.c
diff options
context:
space:
mode:
authorSudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>2012-07-31 05:34:25 -0400
committerWill Deacon <will.deacon@arm.com>2012-08-23 06:35:52 -0400
commit051f1b13144dd8553d5a5104dde94c7263ae3ba7 (patch)
treeb9f10b81ace1d986a01c28afddec1f2cc8028e66 /arch/arm/kernel/perf_event_cpu.c
parent5505b206ca006d0506d1d3b3c494aa86234f66e2 (diff)
ARM: perf: move irq registration into pmu implementation
This patch moves the CPU-specific IRQ registration and parsing code into the CPU PMU backend. This is required because a PMU may have more than one interrupt, which in turn can be either PPI (per-cpu) or SPI (requiring strict affinity setting at the interrupt distributor). Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com> [will: cosmetic edits and reworked interrupt dispatching] Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_cpu.c')
-rw-r--r--arch/arm/kernel/perf_event_cpu.c66
1 files changed, 65 insertions, 1 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 56ddc989c909..8d7d8d4de9d6 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -70,6 +70,67 @@ static struct pmu_hw_events *cpu_pmu_get_cpu_events(void)
70 return &__get_cpu_var(cpu_hw_events); 70 return &__get_cpu_var(cpu_hw_events);
71} 71}
72 72
73static void cpu_pmu_free_irq(void)
74{
75 int i, irq, irqs;
76 struct platform_device *pmu_device = cpu_pmu->plat_device;
77
78 irqs = min(pmu_device->num_resources, num_possible_cpus());
79
80 for (i = 0; i < irqs; ++i) {
81 if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs))
82 continue;
83 irq = platform_get_irq(pmu_device, i);
84 if (irq >= 0)
85 free_irq(irq, cpu_pmu);
86 }
87}
88
89static int cpu_pmu_request_irq(irq_handler_t handler)
90{
91 int i, err, irq, irqs;
92 struct platform_device *pmu_device = cpu_pmu->plat_device;
93
94 if (!pmu_device)
95 return -ENODEV;
96
97 irqs = min(pmu_device->num_resources, num_possible_cpus());
98 if (irqs < 1) {
99 pr_err("no irqs for PMUs defined\n");
100 return -ENODEV;
101 }
102
103 for (i = 0; i < irqs; ++i) {
104 err = 0;
105 irq = platform_get_irq(pmu_device, i);
106 if (irq < 0)
107 continue;
108
109 /*
110 * If we have a single PMU interrupt that we can't shift,
111 * assume that we're running on a uniprocessor machine and
112 * continue. Otherwise, continue without this interrupt.
113 */
114 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
115 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
116 irq, i);
117 continue;
118 }
119
120 err = request_irq(irq, handler, IRQF_NOBALANCING, "arm-pmu",
121 cpu_pmu);
122 if (err) {
123 pr_err("unable to request IRQ%d for ARM PMU counters\n",
124 irq);
125 return err;
126 }
127
128 cpumask_set_cpu(i, &cpu_pmu->active_irqs);
129 }
130
131 return 0;
132}
133
73static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu) 134static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
74{ 135{
75 int cpu; 136 int cpu;
@@ -79,7 +140,10 @@ static void __devinit cpu_pmu_init(struct arm_pmu *cpu_pmu)
79 events->used_mask = per_cpu(used_mask, cpu); 140 events->used_mask = per_cpu(used_mask, cpu);
80 raw_spin_lock_init(&events->pmu_lock); 141 raw_spin_lock_init(&events->pmu_lock);
81 } 142 }
82 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events; 143
144 cpu_pmu->get_hw_events = cpu_pmu_get_cpu_events;
145 cpu_pmu->request_irq = cpu_pmu_request_irq;
146 cpu_pmu->free_irq = cpu_pmu_free_irq;
83 147
84 /* Ensure the PMU has sane values out of reset. */ 148 /* Ensure the PMU has sane values out of reset. */
85 if (cpu_pmu && cpu_pmu->reset) 149 if (cpu_pmu && cpu_pmu->reset)