summaryrefslogtreecommitdiffstats
path: root/drivers/perf/arm-ccn.c
diff options
context:
space:
mode:
authorRobin Murphy <robin.murphy@arm.com>2019-04-16 11:24:25 -0400
committerWill Deacon <will.deacon@arm.com>2019-04-23 07:29:37 -0400
commit9bcb929f969e4054732158908b1d70e787ef780f (patch)
tree296a14430067d951ca84e92cf9c8b254a62d3d68 /drivers/perf/arm-ccn.c
parent0d2e2a82d4de298d006bf8eddc86829e3c7da820 (diff)
perf/arm-ccn: Clean up CPU hotplug handling
Like arm-cci, arm-ccn has the same issue of disabling preemption around operations which can take mutexes. Again, remove the definite bug by simply not trying to fight the theoretical races. And since we are touching the hotplug handling code, take the opportunity to streamline it, as there's really no need to store a full-sized cpumask to keep track of a single CPU ID. Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Robin Murphy <robin.murphy@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/perf/arm-ccn.c')
-rw-r--r--drivers/perf/arm-ccn.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/drivers/perf/arm-ccn.c b/drivers/perf/arm-ccn.c
index 2ae76026e947..0bb52d9bdcf7 100644
--- a/drivers/perf/arm-ccn.c
+++ b/drivers/perf/arm-ccn.c
@@ -167,7 +167,7 @@ struct arm_ccn_dt {
167 167
168 struct hrtimer hrtimer; 168 struct hrtimer hrtimer;
169 169
170 cpumask_t cpu; 170 unsigned int cpu;
171 struct hlist_node node; 171 struct hlist_node node;
172 172
173 struct pmu pmu; 173 struct pmu pmu;
@@ -559,7 +559,7 @@ static ssize_t arm_ccn_pmu_cpumask_show(struct device *dev,
559{ 559{
560 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev)); 560 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
561 561
562 return cpumap_print_to_pagebuf(true, buf, &ccn->dt.cpu); 562 return cpumap_print_to_pagebuf(true, buf, cpumask_of(ccn->dt.cpu));
563} 563}
564 564
565static struct device_attribute arm_ccn_pmu_cpumask_attr = 565static struct device_attribute arm_ccn_pmu_cpumask_attr =
@@ -759,7 +759,7 @@ static int arm_ccn_pmu_event_init(struct perf_event *event)
759 * mitigate this, we enforce CPU assignment to one, selected 759 * mitigate this, we enforce CPU assignment to one, selected
760 * processor (the one described in the "cpumask" attribute). 760 * processor (the one described in the "cpumask" attribute).
761 */ 761 */
762 event->cpu = cpumask_first(&ccn->dt.cpu); 762 event->cpu = ccn->dt.cpu;
763 763
764 node_xp = CCN_CONFIG_NODE(event->attr.config); 764 node_xp = CCN_CONFIG_NODE(event->attr.config);
765 type = CCN_CONFIG_TYPE(event->attr.config); 765 type = CCN_CONFIG_TYPE(event->attr.config);
@@ -1215,15 +1215,15 @@ static int arm_ccn_pmu_offline_cpu(unsigned int cpu, struct hlist_node *node)
1215 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt); 1215 struct arm_ccn *ccn = container_of(dt, struct arm_ccn, dt);
1216 unsigned int target; 1216 unsigned int target;
1217 1217
1218 if (!cpumask_test_and_clear_cpu(cpu, &dt->cpu)) 1218 if (cpu != dt->cpu)
1219 return 0; 1219 return 0;
1220 target = cpumask_any_but(cpu_online_mask, cpu); 1220 target = cpumask_any_but(cpu_online_mask, cpu);
1221 if (target >= nr_cpu_ids) 1221 if (target >= nr_cpu_ids)
1222 return 0; 1222 return 0;
1223 perf_pmu_migrate_context(&dt->pmu, cpu, target); 1223 perf_pmu_migrate_context(&dt->pmu, cpu, target);
1224 cpumask_set_cpu(target, &dt->cpu); 1224 dt->cpu = target;
1225 if (ccn->irq) 1225 if (ccn->irq)
1226 WARN_ON(irq_set_affinity_hint(ccn->irq, &dt->cpu) != 0); 1226 WARN_ON(irq_set_affinity_hint(ccn->irq, cpumask_of(dt->cpu)));
1227 return 0; 1227 return 0;
1228} 1228}
1229 1229
@@ -1299,29 +1299,30 @@ static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1299 } 1299 }
1300 1300
1301 /* Pick one CPU which we will use to collect data from CCN... */ 1301 /* Pick one CPU which we will use to collect data from CCN... */
1302 cpumask_set_cpu(get_cpu(), &ccn->dt.cpu); 1302 ccn->dt.cpu = raw_smp_processor_id();
1303 1303
1304 /* Also make sure that the overflow interrupt is handled by this CPU */ 1304 /* Also make sure that the overflow interrupt is handled by this CPU */
1305 if (ccn->irq) { 1305 if (ccn->irq) {
1306 err = irq_set_affinity_hint(ccn->irq, &ccn->dt.cpu); 1306 err = irq_set_affinity_hint(ccn->irq, cpumask_of(ccn->dt.cpu));
1307 if (err) { 1307 if (err) {
1308 dev_err(ccn->dev, "Failed to set interrupt affinity!\n"); 1308 dev_err(ccn->dev, "Failed to set interrupt affinity!\n");
1309 goto error_set_affinity; 1309 goto error_set_affinity;
1310 } 1310 }
1311 } 1311 }
1312 1312
1313 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1314 &ccn->dt.node);
1315
1313 err = perf_pmu_register(&ccn->dt.pmu, name, -1); 1316 err = perf_pmu_register(&ccn->dt.pmu, name, -1);
1314 if (err) 1317 if (err)
1315 goto error_pmu_register; 1318 goto error_pmu_register;
1316 1319
1317 cpuhp_state_add_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1318 &ccn->dt.node);
1319 put_cpu();
1320 return 0; 1320 return 0;
1321 1321
1322error_pmu_register: 1322error_pmu_register:
1323 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_CCN_ONLINE,
1324 &ccn->dt.node);
1323error_set_affinity: 1325error_set_affinity:
1324 put_cpu();
1325error_choose_name: 1326error_choose_name:
1326 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id); 1327 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1327 for (i = 0; i < ccn->num_xps; i++) 1328 for (i = 0; i < ccn->num_xps; i++)