aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-07-12 12:15:02 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-07-12 12:15:02 -0400
commitc4bc680cf7bcd257865dac97dd2debbc9dcffad4 (patch)
treeceb5df05f889df160d04ebf952fc4c3e50de7981
parent59c3cb553f5fc4ed6868eeaae6ffd8e1daf6d93e (diff)
parenta899418167264c7bac574b1a0f1b2c26c5b0995a (diff)
Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq fix from Thomas Gleixner: "A single fix for a cpu hotplug race vs. interrupt descriptors: Prevent irq setup/teardown across the cpu starting/dying parts of cpu hotplug so that the starting/dying cpu has a stable view of the descriptor space. This has been an issue for all architectures in the cpu dying phase, where interrupts are migrated away from the dying cpu. In the starting phase its mostly a x86 issue vs the vector space update" * 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: hotplug: Prevent alloc/free of irq descriptors during cpu up/down
-rw-r--r--include/linux/irqdesc.h7
-rw-r--r--kernel/cpu.c22
-rw-r--r--kernel/irq/internals.h4
3 files changed, 27 insertions, 6 deletions
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 624a668e61f1..fcea4e48e21f 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -87,7 +87,12 @@ struct irq_desc {
87 const char *name; 87 const char *name;
88} ____cacheline_internodealigned_in_smp; 88} ____cacheline_internodealigned_in_smp;
89 89
90#ifndef CONFIG_SPARSE_IRQ 90#ifdef CONFIG_SPARSE_IRQ
91extern void irq_lock_sparse(void);
92extern void irq_unlock_sparse(void);
93#else
94static inline void irq_lock_sparse(void) { }
95static inline void irq_unlock_sparse(void) { }
91extern struct irq_desc irq_desc[NR_IRQS]; 96extern struct irq_desc irq_desc[NR_IRQS];
92#endif 97#endif
93 98
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 9c9c9fab16cc..6a374544d495 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -21,6 +21,7 @@
21#include <linux/suspend.h> 21#include <linux/suspend.h>
22#include <linux/lockdep.h> 22#include <linux/lockdep.h>
23#include <linux/tick.h> 23#include <linux/tick.h>
24#include <linux/irq.h>
24#include <trace/events/power.h> 25#include <trace/events/power.h>
25 26
26#include "smpboot.h" 27#include "smpboot.h"
@@ -392,13 +393,19 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
392 smpboot_park_threads(cpu); 393 smpboot_park_threads(cpu);
393 394
394 /* 395 /*
395 * So now all preempt/rcu users must observe !cpu_active(). 396 * Prevent irq alloc/free while the dying cpu reorganizes the
397 * interrupt affinities.
396 */ 398 */
399 irq_lock_sparse();
397 400
401 /*
402 * So now all preempt/rcu users must observe !cpu_active().
403 */
398 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu)); 404 err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
399 if (err) { 405 if (err) {
400 /* CPU didn't die: tell everyone. Can't complain. */ 406 /* CPU didn't die: tell everyone. Can't complain. */
401 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu); 407 cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
408 irq_unlock_sparse();
402 goto out_release; 409 goto out_release;
403 } 410 }
404 BUG_ON(cpu_online(cpu)); 411 BUG_ON(cpu_online(cpu));
@@ -415,6 +422,9 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
415 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */ 422 smp_mb(); /* Read from cpu_dead_idle before __cpu_die(). */
416 per_cpu(cpu_dead_idle, cpu) = false; 423 per_cpu(cpu_dead_idle, cpu) = false;
417 424
425 /* Interrupts are moved away from the dying cpu, reenable alloc/free */
426 irq_unlock_sparse();
427
418 hotplug_cpu__broadcast_tick_pull(cpu); 428 hotplug_cpu__broadcast_tick_pull(cpu);
419 /* This actually kills the CPU. */ 429 /* This actually kills the CPU. */
420 __cpu_die(cpu); 430 __cpu_die(cpu);
@@ -517,8 +527,18 @@ static int _cpu_up(unsigned int cpu, int tasks_frozen)
517 goto out_notify; 527 goto out_notify;
518 } 528 }
519 529
530 /*
531 * Some architectures have to walk the irq descriptors to
532 * setup the vector space for the cpu which comes online.
533 * Prevent irq alloc/free across the bringup.
534 */
535 irq_lock_sparse();
536
520 /* Arch-specific enabling code. */ 537 /* Arch-specific enabling code. */
521 ret = __cpu_up(cpu, idle); 538 ret = __cpu_up(cpu, idle);
539
540 irq_unlock_sparse();
541
522 if (ret != 0) 542 if (ret != 0)
523 goto out_notify; 543 goto out_notify;
524 BUG_ON(!cpu_online(cpu)); 544 BUG_ON(!cpu_online(cpu));
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 4834ee828c41..61008b8433ab 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -76,12 +76,8 @@ extern void unmask_threaded_irq(struct irq_desc *desc);
76 76
77#ifdef CONFIG_SPARSE_IRQ 77#ifdef CONFIG_SPARSE_IRQ
78static inline void irq_mark_irq(unsigned int irq) { } 78static inline void irq_mark_irq(unsigned int irq) { }
79extern void irq_lock_sparse(void);
80extern void irq_unlock_sparse(void);
81#else 79#else
82extern void irq_mark_irq(unsigned int irq); 80extern void irq_mark_irq(unsigned int irq);
83static inline void irq_lock_sparse(void) { }
84static inline void irq_unlock_sparse(void) { }
85#endif 81#endif
86 82
87extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr); 83extern void init_kstat_irqs(struct irq_desc *desc, int node, int nr);