diff options
author | Yang Yingliang <yangyingliang@huawei.com> | 2015-09-24 05:32:14 -0400 |
---|---|---|
committer | Catalin Marinas <catalin.marinas@arm.com> | 2015-10-09 12:40:35 -0400 |
commit | 217d453d473c5ddfd140a06bf9d8575218551020 (patch) | |
tree | 7cc69c16b02e64faf46344e0f4e6cb3afa43ac7c | |
parent | a78afccbbaa6d2df49768cabad8af28a0a84181d (diff) |
arm64: fix a migrating irq bug when hotplug cpu
When cpu is disabled, all irqs will be migratged to another cpu.
In some cases, a new affinity is different, the old affinity need
to be updated and if irq_set_affinity's return value is IRQ_SET_MASK_OK_DONE,
the old affinity can not be updated. Fix it by using irq_do_set_affinity.
And migrating interrupts is a core code matter, so use the generic
function irq_migrate_all_off_this_cpu() to migrate interrupts in
kernel/irq/migration.c.
Cc: Jiang Liu <jiang.liu@linux.intel.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Will Deacon <will.deacon@arm.com>
Cc: Russell King - ARM Linux <linux@arm.linux.org.uk>
Cc: Hanjun Guo <hanjun.guo@linaro.org>
Acked-by: Marc Zyngier <marc.zyngier@arm.com>
Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r-- | arch/arm64/Kconfig | 1 | ||||
-rw-r--r-- | arch/arm64/include/asm/irq.h | 1 | ||||
-rw-r--r-- | arch/arm64/kernel/irq.c | 62 | ||||
-rw-r--r-- | arch/arm64/kernel/smp.c | 3 |
4 files changed, 3 insertions, 64 deletions
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 98b4f98a13de..1b35bdbd5d74 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig | |||
@@ -427,6 +427,7 @@ config NR_CPUS | |||
427 | 427 | ||
428 | config HOTPLUG_CPU | 428 | config HOTPLUG_CPU |
429 | bool "Support for hot-pluggable CPUs" | 429 | bool "Support for hot-pluggable CPUs" |
430 | select GENERIC_IRQ_MIGRATION | ||
430 | help | 431 | help |
431 | Say Y here to experiment with turning CPUs off and on. CPUs | 432 | Say Y here to experiment with turning CPUs off and on. CPUs |
432 | can be controlled through /sys/devices/system/cpu. | 433 | can be controlled through /sys/devices/system/cpu. |
diff --git a/arch/arm64/include/asm/irq.h b/arch/arm64/include/asm/irq.h index bbb251b14746..09169296c3cc 100644 --- a/arch/arm64/include/asm/irq.h +++ b/arch/arm64/include/asm/irq.h | |||
@@ -7,7 +7,6 @@ | |||
7 | 7 | ||
8 | struct pt_regs; | 8 | struct pt_regs; |
9 | 9 | ||
10 | extern void migrate_irqs(void); | ||
11 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); | 10 | extern void set_handle_irq(void (*handle_irq)(struct pt_regs *)); |
12 | 11 | ||
13 | static inline void acpi_irq_init(void) | 12 | static inline void acpi_irq_init(void) |
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c index 11dc3fd47853..9f17ec071ee0 100644 --- a/arch/arm64/kernel/irq.c +++ b/arch/arm64/kernel/irq.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/init.h> | 27 | #include <linux/init.h> |
28 | #include <linux/irqchip.h> | 28 | #include <linux/irqchip.h> |
29 | #include <linux/seq_file.h> | 29 | #include <linux/seq_file.h> |
30 | #include <linux/ratelimit.h> | ||
31 | 30 | ||
32 | unsigned long irq_err_count; | 31 | unsigned long irq_err_count; |
33 | 32 | ||
@@ -54,64 +53,3 @@ void __init init_IRQ(void) | |||
54 | if (!handle_arch_irq) | 53 | if (!handle_arch_irq) |
55 | panic("No interrupt controller found."); | 54 | panic("No interrupt controller found."); |
56 | } | 55 | } |
57 | |||
58 | #ifdef CONFIG_HOTPLUG_CPU | ||
59 | static bool migrate_one_irq(struct irq_desc *desc) | ||
60 | { | ||
61 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
62 | const struct cpumask *affinity = irq_data_get_affinity_mask(d); | ||
63 | struct irq_chip *c; | ||
64 | bool ret = false; | ||
65 | |||
66 | /* | ||
67 | * If this is a per-CPU interrupt, or the affinity does not | ||
68 | * include this CPU, then we have nothing to do. | ||
69 | */ | ||
70 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
71 | return false; | ||
72 | |||
73 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | ||
74 | affinity = cpu_online_mask; | ||
75 | ret = true; | ||
76 | } | ||
77 | |||
78 | c = irq_data_get_irq_chip(d); | ||
79 | if (!c->irq_set_affinity) | ||
80 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
81 | else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) | ||
82 | cpumask_copy(irq_data_get_affinity_mask(d), affinity); | ||
83 | |||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | /* | ||
88 | * The current CPU has been marked offline. Migrate IRQs off this CPU. | ||
89 | * If the affinity settings do not allow other CPUs, force them onto any | ||
90 | * available CPU. | ||
91 | * | ||
92 | * Note: we must iterate over all IRQs, whether they have an attached | ||
93 | * action structure or not, as we need to get chained interrupts too. | ||
94 | */ | ||
95 | void migrate_irqs(void) | ||
96 | { | ||
97 | unsigned int i; | ||
98 | struct irq_desc *desc; | ||
99 | unsigned long flags; | ||
100 | |||
101 | local_irq_save(flags); | ||
102 | |||
103 | for_each_irq_desc(i, desc) { | ||
104 | bool affinity_broken; | ||
105 | |||
106 | raw_spin_lock(&desc->lock); | ||
107 | affinity_broken = migrate_one_irq(desc); | ||
108 | raw_spin_unlock(&desc->lock); | ||
109 | |||
110 | if (affinity_broken) | ||
111 | pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n", | ||
112 | i, smp_processor_id()); | ||
113 | } | ||
114 | |||
115 | local_irq_restore(flags); | ||
116 | } | ||
117 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c index 03b0aa28ea61..b7a973d6861e 100644 --- a/arch/arm64/kernel/smp.c +++ b/arch/arm64/kernel/smp.c | |||
@@ -231,7 +231,8 @@ int __cpu_disable(void) | |||
231 | /* | 231 | /* |
232 | * OK - migrate IRQs away from this CPU | 232 | * OK - migrate IRQs away from this CPU |
233 | */ | 233 | */ |
234 | migrate_irqs(); | 234 | irq_migrate_all_off_this_cpu(); |
235 | |||
235 | return 0; | 236 | return 0; |
236 | } | 237 | } |
237 | 238 | ||