summaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2017-06-19 19:37:21 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-06-22 12:21:14 -0400
commitcba4235e6031e9318d68186f6d765c531cbea4e1 (patch)
treeec9f8f0663028e4fbd2c64f5a3b7065c2b902bef /kernel/irq/manage.c
parent8e7b632237df8b17526411d1d98f838580bb6aa3 (diff)
genirq: Remove mask argument from setup_affinity()
No point to have this alloc/free dance of cpumasks. Provide a static mask for setup_affinity() and protect it proper. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Michael Ellerman <mpe@ellerman.id.au> Cc: Keith Busch <keith.busch@intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Hellwig <hch@lst.de> Link: http://lkml.kernel.org/r/20170619235444.851571573@linutronix.de
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c53
1 files changed, 23 insertions, 30 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 284f4eb1ffbe..e2f20d553d60 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -345,15 +345,18 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
345/* 345/*
346 * Generic version of the affinity autoselector. 346 * Generic version of the affinity autoselector.
347 */ 347 */
348static int setup_affinity(struct irq_desc *desc, struct cpumask *mask) 348static int irq_setup_affinity(struct irq_desc *desc)
349{ 349{
350 struct cpumask *set = irq_default_affinity; 350 struct cpumask *set = irq_default_affinity;
351 int node = irq_desc_get_node(desc); 351 int ret, node = irq_desc_get_node(desc);
352 static DEFINE_RAW_SPINLOCK(mask_lock);
353 static struct cpumask mask;
352 354
353 /* Excludes PER_CPU and NO_BALANCE interrupts */ 355 /* Excludes PER_CPU and NO_BALANCE interrupts */
354 if (!__irq_can_set_affinity(desc)) 356 if (!__irq_can_set_affinity(desc))
355 return 0; 357 return 0;
356 358
359 raw_spin_lock(&mask_lock);
357 /* 360 /*
358 * Preserve the managed affinity setting and a userspace affinity 361 * Preserve the managed affinity setting and a userspace affinity
359 * setup, but make sure that one of the targets is online. 362 * setup, but make sure that one of the targets is online.
@@ -367,43 +370,42 @@ static int setup_affinity(struct irq_desc *desc, struct cpumask *mask)
367 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET); 370 irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
368 } 371 }
369 372
370 cpumask_and(mask, cpu_online_mask, set); 373 cpumask_and(&mask, cpu_online_mask, set);
371 if (node != NUMA_NO_NODE) { 374 if (node != NUMA_NO_NODE) {
372 const struct cpumask *nodemask = cpumask_of_node(node); 375 const struct cpumask *nodemask = cpumask_of_node(node);
373 376
374 /* make sure at least one of the cpus in nodemask is online */ 377 /* make sure at least one of the cpus in nodemask is online */
375 if (cpumask_intersects(mask, nodemask)) 378 if (cpumask_intersects(&mask, nodemask))
376 cpumask_and(mask, mask, nodemask); 379 cpumask_and(&mask, &mask, nodemask);
377 } 380 }
378 irq_do_set_affinity(&desc->irq_data, mask, false); 381 ret = irq_do_set_affinity(&desc->irq_data, &mask, false);
379 return 0; 382 raw_spin_unlock(&mask_lock);
383 return ret;
380} 384}
381#else 385#else
382/* Wrapper for ALPHA specific affinity selector magic */ 386/* Wrapper for ALPHA specific affinity selector magic */
383static inline int setup_affinity(struct irq_desc *d, struct cpumask *mask) 387int irq_setup_affinity(struct irq_desc *desc)
384{ 388{
385 return irq_select_affinity(irq_desc_get_irq(d)); 389 return irq_select_affinity(irq_desc_get_irq(desc));
386} 390}
387#endif 391#endif
388 392
389/* 393/*
390 * Called when affinity is set via /proc/irq 394 * Called when a bogus affinity is set via /proc/irq
391 */ 395 */
392int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask) 396int irq_select_affinity_usr(unsigned int irq)
393{ 397{
394 struct irq_desc *desc = irq_to_desc(irq); 398 struct irq_desc *desc = irq_to_desc(irq);
395 unsigned long flags; 399 unsigned long flags;
396 int ret; 400 int ret;
397 401
398 raw_spin_lock_irqsave(&desc->lock, flags); 402 raw_spin_lock_irqsave(&desc->lock, flags);
399 ret = setup_affinity(desc, mask); 403 ret = irq_setup_affinity(desc);
400 raw_spin_unlock_irqrestore(&desc->lock, flags); 404 raw_spin_unlock_irqrestore(&desc->lock, flags);
401 return ret; 405 return ret;
402} 406}
403
404#else 407#else
405static inline int 408static inline int setup_affinity(struct irq_desc *desc)
406setup_affinity(struct irq_desc *desc, struct cpumask *mask)
407{ 409{
408 return 0; 410 return 0;
409} 411}
@@ -1128,7 +1130,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1128 struct irqaction *old, **old_ptr; 1130 struct irqaction *old, **old_ptr;
1129 unsigned long flags, thread_mask = 0; 1131 unsigned long flags, thread_mask = 0;
1130 int ret, nested, shared = 0; 1132 int ret, nested, shared = 0;
1131 cpumask_var_t mask;
1132 1133
1133 if (!desc) 1134 if (!desc)
1134 return -EINVAL; 1135 return -EINVAL;
@@ -1187,11 +1188,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1187 } 1188 }
1188 } 1189 }
1189 1190
1190 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
1191 ret = -ENOMEM;
1192 goto out_thread;
1193 }
1194
1195 /* 1191 /*
1196 * Drivers are often written to work w/o knowledge about the 1192 * Drivers are often written to work w/o knowledge about the
1197 * underlying irq chip implementation, so a request for a 1193 * underlying irq chip implementation, so a request for a
@@ -1256,7 +1252,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1256 */ 1252 */
1257 if (thread_mask == ~0UL) { 1253 if (thread_mask == ~0UL) {
1258 ret = -EBUSY; 1254 ret = -EBUSY;
1259 goto out_mask; 1255 goto out_unlock;
1260 } 1256 }
1261 /* 1257 /*
1262 * The thread_mask for the action is or'ed to 1258 * The thread_mask for the action is or'ed to
@@ -1300,7 +1296,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1300 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", 1296 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1301 irq); 1297 irq);
1302 ret = -EINVAL; 1298 ret = -EINVAL;
1303 goto out_mask; 1299 goto out_unlock;
1304 } 1300 }
1305 1301
1306 if (!shared) { 1302 if (!shared) {
@@ -1308,7 +1304,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1308 if (ret) { 1304 if (ret) {
1309 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n", 1305 pr_err("Failed to request resources for %s (irq %d) on irqchip %s\n",
1310 new->name, irq, desc->irq_data.chip->name); 1306 new->name, irq, desc->irq_data.chip->name);
1311 goto out_mask; 1307 goto out_unlock;
1312 } 1308 }
1313 1309
1314 init_waitqueue_head(&desc->wait_for_threads); 1310 init_waitqueue_head(&desc->wait_for_threads);
@@ -1320,7 +1316,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1320 1316
1321 if (ret) { 1317 if (ret) {
1322 irq_release_resources(desc); 1318 irq_release_resources(desc);
1323 goto out_mask; 1319 goto out_unlock;
1324 } 1320 }
1325 } 1321 }
1326 1322
@@ -1357,7 +1353,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1357 } 1353 }
1358 1354
1359 /* Set default affinity mask once everything is setup */ 1355 /* Set default affinity mask once everything is setup */
1360 setup_affinity(desc, mask); 1356 irq_setup_affinity(desc);
1361 1357
1362 } else if (new->flags & IRQF_TRIGGER_MASK) { 1358 } else if (new->flags & IRQF_TRIGGER_MASK) {
1363 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK; 1359 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
@@ -1401,8 +1397,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1401 irq_add_debugfs_entry(irq, desc); 1397 irq_add_debugfs_entry(irq, desc);
1402 new->dir = NULL; 1398 new->dir = NULL;
1403 register_handler_proc(irq, new); 1399 register_handler_proc(irq, new);
1404 free_cpumask_var(mask);
1405
1406 return 0; 1400 return 0;
1407 1401
1408mismatch: 1402mismatch:
@@ -1415,9 +1409,8 @@ mismatch:
1415 } 1409 }
1416 ret = -EBUSY; 1410 ret = -EBUSY;
1417 1411
1418out_mask: 1412out_unlock:
1419 raw_spin_unlock_irqrestore(&desc->lock, flags); 1413 raw_spin_unlock_irqrestore(&desc->lock, flags);
1420 free_cpumask_var(mask);
1421 1414
1422out_thread: 1415out_thread:
1423 if (new->thread) { 1416 if (new->thread) {