diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-21 10:14:21 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2011-07-21 10:14:21 -0400 |
commit | 78359cb86b8c4c8946f6732eac2757fa5e1d4de4 (patch) | |
tree | 750a896b26ca01b98e8106bef9f2b8a03ac2d32e /arch/arm/kernel/irq.c | |
parent | ca15af19ac07908c8ca386f6d944a18aa343b868 (diff) |
ARM: CPU hotplug: ensure we migrate all IRQs off a downed CPU
Our selection of interrupts to consider for IRQ migration is sub-
standard. We were potentially including per-CPU interrupts in our
migration strategy, but omitting chained interrupts. This caused
some interrupts to remain on a downed CPU.
We were also trying to migrate interrupts which were not migratable,
resulting in an OOPS.
Instead, iterate over all interrupts, skipping per-CPU interrupts
or interrupts whose affinity does not include the downed CPU, and
attempt to set the affinity for every one else if their chip
implements irq_set_affinity().
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm/kernel/irq.c')
-rw-r--r-- | arch/arm/kernel/irq.c | 39 |
1 files changed, 28 insertions, 11 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ab63c05290e5..0f928a131af8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
@@ -131,46 +131,63 @@ int __init arch_probe_nr_irqs(void) | |||
131 | 131 | ||
132 | #ifdef CONFIG_HOTPLUG_CPU | 132 | #ifdef CONFIG_HOTPLUG_CPU |
133 | 133 | ||
134 | static bool migrate_one_irq(struct irq_data *d) | 134 | static bool migrate_one_irq(struct irq_desc *desc) |
135 | { | 135 | { |
136 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
136 | const struct cpumask *affinity = d->affinity; | 137 | const struct cpumask *affinity = d->affinity; |
138 | struct irq_chip *c; | ||
137 | bool ret = false; | 139 | bool ret = false; |
138 | 140 | ||
141 | /* | ||
142 | * If this is a per-CPU interrupt, or the affinity does not | ||
143 | * include this CPU, then we have nothing to do. | ||
144 | */ | ||
145 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
146 | return false; | ||
147 | |||
139 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | 148 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
140 | affinity cpu_online_mask; | 149 | affinity = cpu_online_mask; |
141 | ret = true; | 150 | ret = true; |
142 | } | 151 | } |
143 | 152 | ||
144 | d->chip->irq_set_affinity(d, affinity, true); | 153 | c = irq_data_get_irq_chip(d); |
154 | if (c->irq_set_affinity) | ||
155 | c->irq_set_affinity(d, affinity, true); | ||
156 | else | ||
157 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
145 | 158 | ||
146 | return ret; | 159 | return ret; |
147 | } | 160 | } |
148 | 161 | ||
149 | /* | 162 | /* |
150 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | 163 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
151 | * the affinity settings do not allow other CPUs, force them onto any | 164 | * If the affinity settings do not allow other CPUs, force them onto any |
152 | * available CPU. | 165 | * available CPU. |
166 | * | ||
167 | * Note: we must iterate over all IRQs, whether they have an attached | ||
168 | * action structure or not, as we need to get chained interrupts too. | ||
153 | */ | 169 | */ |
154 | void migrate_irqs(void) | 170 | void migrate_irqs(void) |
155 | { | 171 | { |
156 | unsigned int i, cpu = smp_processor_id(); | 172 | unsigned int i; |
157 | struct irq_desc *desc; | 173 | struct irq_desc *desc; |
158 | unsigned long flags; | 174 | unsigned long flags; |
159 | 175 | ||
160 | local_irq_save(flags); | 176 | local_irq_save(flags); |
161 | 177 | ||
162 | for_each_irq_desc(i, desc) { | 178 | for_each_irq_desc(i, desc) { |
163 | struct irq_data *d = &desc->irq_data; | ||
164 | bool affinity_broken = false; | 179 | bool affinity_broken = false; |
165 | 180 | ||
181 | if (!desc) | ||
182 | continue; | ||
183 | |||
166 | raw_spin_lock(&desc->lock); | 184 | raw_spin_lock(&desc->lock); |
167 | if (desc->action != NULL && | 185 | affinity_broken = migrate_one_irq(desc); |
168 | cpumask_test_cpu(smp_processor_id(), d->affinity)) | ||
169 | affinity_broken = migrate_one_irq(d); | ||
170 | raw_spin_unlock(&desc->lock); | 186 | raw_spin_unlock(&desc->lock); |
171 | 187 | ||
172 | if (affinity_broken && printk_ratelimit()) | 188 | if (affinity_broken && printk_ratelimit()) |
173 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); | 189 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, |
190 | smp_processor_id()); | ||
174 | } | 191 | } |
175 | 192 | ||
176 | local_irq_restore(flags); | 193 | local_irq_restore(flags); |