diff options
| -rw-r--r-- | arch/arm/kernel/irq.c | 50 |
1 files changed, 31 insertions, 19 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index 2f19aa5f3391..3535d3793e65 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
| @@ -179,14 +179,21 @@ int __init arch_probe_nr_irqs(void) | |||
| 179 | 179 | ||
| 180 | #ifdef CONFIG_HOTPLUG_CPU | 180 | #ifdef CONFIG_HOTPLUG_CPU |
| 181 | 181 | ||
| 182 | static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) | 182 | static bool migrate_one_irq(struct irq_data *d) |
| 183 | { | 183 | { |
| 184 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->irq_data.node, cpu); | 184 | unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); |
| 185 | bool ret = false; | ||
| 185 | 186 | ||
| 186 | raw_spin_lock_irq(&desc->lock); | 187 | if (cpu >= nr_cpu_ids) { |
| 187 | desc->irq_data.chip->irq_set_affinity(&desc->irq_data, | 188 | cpu = cpumask_any(cpu_online_mask); |
| 188 | cpumask_of(cpu), true); | 189 | ret = true; |
| 189 | raw_spin_unlock_irq(&desc->lock); | 190 | } |
| 191 | |||
| 192 | pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); | ||
| 193 | |||
| 194 | d->chip->irq_set_affinity(d, cpumask_of(cpu), true); | ||
| 195 | |||
| 196 | return ret; | ||
| 190 | } | 197 | } |
| 191 | 198 | ||
| 192 | /* | 199 | /* |
| @@ -198,25 +205,30 @@ void migrate_irqs(void) | |||
| 198 | { | 205 | { |
| 199 | unsigned int i, cpu = smp_processor_id(); | 206 | unsigned int i, cpu = smp_processor_id(); |
| 200 | struct irq_desc *desc; | 207 | struct irq_desc *desc; |
| 208 | unsigned long flags; | ||
| 209 | |||
| 210 | local_irq_save(flags); | ||
| 201 | 211 | ||
| 202 | for_each_irq_desc(i, desc) { | 212 | for_each_irq_desc(i, desc) { |
| 203 | struct irq_data *d = &desc->irq_data; | 213 | struct irq_data *d = &desc->irq_data; |
| 214 | bool affinity_broken = false; | ||
| 204 | 215 | ||
| 205 | if (d->node == cpu) { | 216 | raw_spin_lock(&desc->lock); |
| 206 | unsigned int newcpu = cpumask_any_and(d->affinity, | 217 | do { |
| 207 | cpu_online_mask); | 218 | if (desc->action == NULL) |
| 208 | if (newcpu >= nr_cpu_ids) { | 219 | break; |
| 209 | if (printk_ratelimit()) | ||
| 210 | printk(KERN_INFO "IRQ%u no longer affine to CPU%u\n", | ||
| 211 | i, cpu); | ||
| 212 | 220 | ||
| 213 | cpumask_setall(d->affinity); | 221 | if (d->node != cpu) |
| 214 | newcpu = cpumask_any_and(d->affinity, | 222 | break; |
| 215 | cpu_online_mask); | ||
| 216 | } | ||
| 217 | 223 | ||
| 218 | route_irq(desc, i, newcpu); | 224 | affinity_broken = migrate_one_irq(d); |
| 219 | } | 225 | } while (0); |
| 226 | raw_spin_unlock(&desc->lock); | ||
| 227 | |||
| 228 | if (affinity_broken && printk_ratelimit()) | ||
| 229 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); | ||
| 220 | } | 230 | } |
| 231 | |||
| 232 | local_irq_restore(flags); | ||
| 221 | } | 233 | } |
| 222 | #endif /* CONFIG_HOTPLUG_CPU */ | 234 | #endif /* CONFIG_HOTPLUG_CPU */ |
