aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2018-10-31 04:41:34 -0400
committerOlof Johansson <olof@lixom.net>2018-11-02 14:22:09 -0400
commit832ad0e3da4510fd17f98804abe512ea9a747035 (patch)
tree8e97645f71329fbcff5612148028583ba247d366 /drivers
parent4b42745211af552f170f38a1b97f4a112b5da6b2 (diff)
soc: ti: QMSS: Fix usage of irq_set_affinity_hint
The Keystone QMSS driver is pretty damaged, in the sense that it does things like this: irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); where cpu_map is a local variable. As we leave the function, this will point to nowhere-land, and things will end-up badly. Instead, let's use a proper cpumask that gets allocated, giving the driver a chance to actually work with things like irqbalance as well as have a hypothetical 64bit future. Cc: stable@vger.kernel.org Acked-by: Santosh Shilimkar <ssantosh@kernel.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Olof Johansson <olof@lixom.net>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/soc/ti/knav_qmss.h4
-rw-r--r--drivers/soc/ti/knav_qmss_acc.c10
-rw-r--r--drivers/soc/ti/knav_qmss_queue.c22
3 files changed, 22 insertions, 14 deletions
diff --git a/drivers/soc/ti/knav_qmss.h b/drivers/soc/ti/knav_qmss.h
index 7c128132799e..4c28fa938ac7 100644
--- a/drivers/soc/ti/knav_qmss.h
+++ b/drivers/soc/ti/knav_qmss.h
@@ -329,8 +329,8 @@ struct knav_range_ops {
329}; 329};
330 330
331struct knav_irq_info { 331struct knav_irq_info {
332 int irq; 332 int irq;
333 u32 cpu_map; 333 struct cpumask *cpu_mask;
334}; 334};
335 335
336struct knav_range_info { 336struct knav_range_info {
diff --git a/drivers/soc/ti/knav_qmss_acc.c b/drivers/soc/ti/knav_qmss_acc.c
index 316e82e46f6c..2f7fb2dcc1d6 100644
--- a/drivers/soc/ti/knav_qmss_acc.c
+++ b/drivers/soc/ti/knav_qmss_acc.c
@@ -205,18 +205,18 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
205{ 205{
206 struct knav_device *kdev = range->kdev; 206 struct knav_device *kdev = range->kdev;
207 struct knav_acc_channel *acc; 207 struct knav_acc_channel *acc;
208 unsigned long cpu_map; 208 struct cpumask *cpu_mask;
209 int ret = 0, irq; 209 int ret = 0, irq;
210 u32 old, new; 210 u32 old, new;
211 211
212 if (range->flags & RANGE_MULTI_QUEUE) { 212 if (range->flags & RANGE_MULTI_QUEUE) {
213 acc = range->acc; 213 acc = range->acc;
214 irq = range->irqs[0].irq; 214 irq = range->irqs[0].irq;
215 cpu_map = range->irqs[0].cpu_map; 215 cpu_mask = range->irqs[0].cpu_mask;
216 } else { 216 } else {
217 acc = range->acc + queue; 217 acc = range->acc + queue;
218 irq = range->irqs[queue].irq; 218 irq = range->irqs[queue].irq;
219 cpu_map = range->irqs[queue].cpu_map; 219 cpu_mask = range->irqs[queue].cpu_mask;
220 } 220 }
221 221
222 old = acc->open_mask; 222 old = acc->open_mask;
@@ -239,8 +239,8 @@ static int knav_range_setup_acc_irq(struct knav_range_info *range,
239 acc->name, acc->name); 239 acc->name, acc->name);
240 ret = request_irq(irq, knav_acc_int_handler, 0, acc->name, 240 ret = request_irq(irq, knav_acc_int_handler, 0, acc->name,
241 range); 241 range);
242 if (!ret && cpu_map) { 242 if (!ret && cpu_mask) {
243 ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); 243 ret = irq_set_affinity_hint(irq, cpu_mask);
244 if (ret) { 244 if (ret) {
245 dev_warn(range->kdev->dev, 245 dev_warn(range->kdev->dev,
246 "Failed to set IRQ affinity\n"); 246 "Failed to set IRQ affinity\n");
diff --git a/drivers/soc/ti/knav_qmss_queue.c b/drivers/soc/ti/knav_qmss_queue.c
index b5d5673c255c..8b418379272d 100644
--- a/drivers/soc/ti/knav_qmss_queue.c
+++ b/drivers/soc/ti/knav_qmss_queue.c
@@ -118,19 +118,17 @@ static int knav_queue_setup_irq(struct knav_range_info *range,
118 struct knav_queue_inst *inst) 118 struct knav_queue_inst *inst)
119{ 119{
120 unsigned queue = inst->id - range->queue_base; 120 unsigned queue = inst->id - range->queue_base;
121 unsigned long cpu_map;
122 int ret = 0, irq; 121 int ret = 0, irq;
123 122
124 if (range->flags & RANGE_HAS_IRQ) { 123 if (range->flags & RANGE_HAS_IRQ) {
125 irq = range->irqs[queue].irq; 124 irq = range->irqs[queue].irq;
126 cpu_map = range->irqs[queue].cpu_map;
127 ret = request_irq(irq, knav_queue_int_handler, 0, 125 ret = request_irq(irq, knav_queue_int_handler, 0,
128 inst->irq_name, inst); 126 inst->irq_name, inst);
129 if (ret) 127 if (ret)
130 return ret; 128 return ret;
131 disable_irq(irq); 129 disable_irq(irq);
132 if (cpu_map) { 130 if (range->irqs[queue].cpu_mask) {
133 ret = irq_set_affinity_hint(irq, to_cpumask(&cpu_map)); 131 ret = irq_set_affinity_hint(irq, range->irqs[queue].cpu_mask);
134 if (ret) { 132 if (ret) {
135 dev_warn(range->kdev->dev, 133 dev_warn(range->kdev->dev,
136 "Failed to set IRQ affinity\n"); 134 "Failed to set IRQ affinity\n");
@@ -1262,9 +1260,19 @@ static int knav_setup_queue_range(struct knav_device *kdev,
1262 1260
1263 range->num_irqs++; 1261 range->num_irqs++;
1264 1262
1265 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) 1263 if (IS_ENABLED(CONFIG_SMP) && oirq.args_count == 3) {
1266 range->irqs[i].cpu_map = 1264 unsigned long mask;
1267 (oirq.args[2] & 0x0000ff00) >> 8; 1265 int bit;
1266
1267 range->irqs[i].cpu_mask = devm_kzalloc(dev,
1268 cpumask_size(), GFP_KERNEL);
1269 if (!range->irqs[i].cpu_mask)
1270 return -ENOMEM;
1271
1272 mask = (oirq.args[2] & 0x0000ff00) >> 8;
1273 for_each_set_bit(bit, &mask, BITS_PER_LONG)
1274 cpumask_set_cpu(bit, range->irqs[i].cpu_mask);
1275 }
1268 } 1276 }
1269 1277
1270 range->num_irqs = min(range->num_irqs, range->num_queues); 1278 range->num_irqs = min(range->num_irqs, range->num_queues);