aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2014-10-23 10:23:35 -0400
committerWill Deacon <will.deacon@arm.com>2014-10-30 08:17:01 -0400
commitaf66abfe2ec8bd82211e9e4f036a64c902ff4cdb (patch)
tree6240919ba10cc0f2dedfc7ac57d465cbd7174f08
parentabdf655a30b6464fe86c8369de60ccf92f73f589 (diff)
arm: perf: fold hotplug notifier into arm_pmu
Handling multiple PMUs using a single hotplug notifier requires a list of PMUs to be maintained, with synchronisation in the probe, remove, and notify paths. This is error-prone and makes the code much harder to maintain. Instead of using a single notifier, we can dynamically allocate a notifier block per-PMU. The end result is the same, but the list of PMUs is implicit in the hotplug notifier list rather than within a perf-local data structure, which makes the code far easier to handle. Signed-off-by: Mark Rutland <mark.rutland at arm.com> Reviewed-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/include/asm/pmu.h1
-rw-r--r--arch/arm/kernel/perf_event_cpu.c69
2 files changed, 35 insertions, 35 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h
index cc0149835507..b1596bd59129 100644
--- a/arch/arm/include/asm/pmu.h
+++ b/arch/arm/include/asm/pmu.h
@@ -116,6 +116,7 @@ struct arm_pmu {
116 u64 max_period; 116 u64 max_period;
117 struct platform_device *plat_device; 117 struct platform_device *plat_device;
118 struct pmu_hw_events __percpu *hw_events; 118 struct pmu_hw_events __percpu *hw_events;
119 struct notifier_block hotplug_nb;
119}; 120};
120 121
121#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu)) 122#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index f0f6c5ef41b0..dd9acc95ebc0 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -160,8 +160,31 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler)
160 return 0; 160 return 0;
161} 161}
162 162
163/*
164 * PMU hardware loses all context when a CPU goes offline.
165 * When a CPU is hotplugged back in, since some hardware registers are
166 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
167 * junk values out of them.
168 */
169static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
170 void *hcpu)
171{
172 struct arm_pmu *pmu = container_of(b, struct arm_pmu, hotplug_nb);
173
174 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
175 return NOTIFY_DONE;
176
177 if (pmu->reset)
178 pmu->reset(pmu);
179 else
180 return NOTIFY_DONE;
181
182 return NOTIFY_OK;
183}
184
163static int cpu_pmu_init(struct arm_pmu *cpu_pmu) 185static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
164{ 186{
187 int err;
165 int cpu; 188 int cpu;
166 struct pmu_hw_events __percpu *cpu_hw_events; 189 struct pmu_hw_events __percpu *cpu_hw_events;
167 190
@@ -169,6 +192,11 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
169 if (!cpu_hw_events) 192 if (!cpu_hw_events)
170 return -ENOMEM; 193 return -ENOMEM;
171 194
195 cpu_pmu->hotplug_nb.notifier_call = cpu_pmu_notify;
196 err = register_cpu_notifier(&cpu_pmu->hotplug_nb);
197 if (err)
198 goto out_hw_events;
199
172 for_each_possible_cpu(cpu) { 200 for_each_possible_cpu(cpu) {
173 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu); 201 struct pmu_hw_events *events = per_cpu_ptr(cpu_hw_events, cpu);
174 raw_spin_lock_init(&events->pmu_lock); 202 raw_spin_lock_init(&events->pmu_lock);
@@ -188,38 +216,19 @@ static int cpu_pmu_init(struct arm_pmu *cpu_pmu)
188 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 216 cpu_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
189 217
190 return 0; 218 return 0;
219
220out_hw_events:
221 free_percpu(cpu_hw_events);
222 return err;
191} 223}
192 224
193static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu) 225static void cpu_pmu_destroy(struct arm_pmu *cpu_pmu)
194{ 226{
227 unregister_cpu_notifier(&cpu_pmu->hotplug_nb);
195 free_percpu(cpu_pmu->hw_events); 228 free_percpu(cpu_pmu->hw_events);
196} 229}
197 230
198/* 231/*
199 * PMU hardware loses all context when a CPU goes offline.
200 * When a CPU is hotplugged back in, since some hardware registers are
201 * UNKNOWN at reset, the PMU must be explicitly reset to avoid reading
202 * junk values out of them.
203 */
204static int cpu_pmu_notify(struct notifier_block *b, unsigned long action,
205 void *hcpu)
206{
207 if ((action & ~CPU_TASKS_FROZEN) != CPU_STARTING)
208 return NOTIFY_DONE;
209
210 if (cpu_pmu && cpu_pmu->reset)
211 cpu_pmu->reset(cpu_pmu);
212 else
213 return NOTIFY_DONE;
214
215 return NOTIFY_OK;
216}
217
218static struct notifier_block cpu_pmu_hotplug_notifier = {
219 .notifier_call = cpu_pmu_notify,
220};
221
222/*
223 * PMU platform driver and devicetree bindings. 232 * PMU platform driver and devicetree bindings.
224 */ 233 */
225static struct of_device_id cpu_pmu_of_device_ids[] = { 234static struct of_device_id cpu_pmu_of_device_ids[] = {
@@ -344,16 +353,6 @@ static struct platform_driver cpu_pmu_driver = {
344 353
345static int __init register_pmu_driver(void) 354static int __init register_pmu_driver(void)
346{ 355{
347 int err; 356 return platform_driver_register(&cpu_pmu_driver);
348
349 err = register_cpu_notifier(&cpu_pmu_hotplug_notifier);
350 if (err)
351 return err;
352
353 err = platform_driver_register(&cpu_pmu_driver);
354 if (err)
355 unregister_cpu_notifier(&cpu_pmu_hotplug_notifier);
356
357 return err;
358} 357}
359device_initcall(register_pmu_driver); 358device_initcall(register_pmu_driver);