diff options
| -rw-r--r-- | arch/arm/kernel/irq.c | 39 |
1 files changed, 28 insertions, 11 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c index ab63c05290e5..0f928a131af8 100644 --- a/arch/arm/kernel/irq.c +++ b/arch/arm/kernel/irq.c | |||
| @@ -131,46 +131,63 @@ int __init arch_probe_nr_irqs(void) | |||
| 131 | 131 | ||
| 132 | #ifdef CONFIG_HOTPLUG_CPU | 132 | #ifdef CONFIG_HOTPLUG_CPU |
| 133 | 133 | ||
| 134 | static bool migrate_one_irq(struct irq_data *d) | 134 | static bool migrate_one_irq(struct irq_desc *desc) |
| 135 | { | 135 | { |
| 136 | struct irq_data *d = irq_desc_get_irq_data(desc); | ||
| 136 | const struct cpumask *affinity = d->affinity; | 137 | const struct cpumask *affinity = d->affinity; |
| 138 | struct irq_chip *c; | ||
| 137 | bool ret = false; | 139 | bool ret = false; |
| 138 | 140 | ||
| 141 | /* | ||
| 142 | * If this is a per-CPU interrupt, or the affinity does not | ||
| 143 | * include this CPU, then we have nothing to do. | ||
| 144 | */ | ||
| 145 | if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) | ||
| 146 | return false; | ||
| 147 | |||
| 139 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { | 148 | if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) { |
| 140 | affinity cpu_online_mask; | 149 | affinity = cpu_online_mask; |
| 141 | ret = true; | 150 | ret = true; |
| 142 | } | 151 | } |
| 143 | 152 | ||
| 144 | d->chip->irq_set_affinity(d, affinity, true); | 153 | c = irq_data_get_irq_chip(d); |
| 154 | if (c->irq_set_affinity) | ||
| 155 | c->irq_set_affinity(d, affinity, true); | ||
| 156 | else | ||
| 157 | pr_debug("IRQ%u: unable to set affinity\n", d->irq); | ||
| 145 | 158 | ||
| 146 | return ret; | 159 | return ret; |
| 147 | } | 160 | } |
| 148 | 161 | ||
| 149 | /* | 162 | /* |
| 150 | * The CPU has been marked offline. Migrate IRQs off this CPU. If | 163 | * The current CPU has been marked offline. Migrate IRQs off this CPU. |
| 151 | * the affinity settings do not allow other CPUs, force them onto any | 164 | * If the affinity settings do not allow other CPUs, force them onto any |
| 152 | * available CPU. | 165 | * available CPU. |
| 166 | * | ||
| 167 | * Note: we must iterate over all IRQs, whether they have an attached | ||
| 168 | * action structure or not, as we need to get chained interrupts too. | ||
| 153 | */ | 169 | */ |
| 154 | void migrate_irqs(void) | 170 | void migrate_irqs(void) |
| 155 | { | 171 | { |
| 156 | unsigned int i, cpu = smp_processor_id(); | 172 | unsigned int i; |
| 157 | struct irq_desc *desc; | 173 | struct irq_desc *desc; |
| 158 | unsigned long flags; | 174 | unsigned long flags; |
| 159 | 175 | ||
| 160 | local_irq_save(flags); | 176 | local_irq_save(flags); |
| 161 | 177 | ||
| 162 | for_each_irq_desc(i, desc) { | 178 | for_each_irq_desc(i, desc) { |
| 163 | struct irq_data *d = &desc->irq_data; | ||
| 164 | bool affinity_broken = false; | 179 | bool affinity_broken = false; |
| 165 | 180 | ||
| 181 | if (!desc) | ||
| 182 | continue; | ||
| 183 | |||
| 166 | raw_spin_lock(&desc->lock); | 184 | raw_spin_lock(&desc->lock); |
| 167 | if (desc->action != NULL && | 185 | affinity_broken = migrate_one_irq(desc); |
| 168 | cpumask_test_cpu(smp_processor_id(), d->affinity)) | ||
| 169 | affinity_broken = migrate_one_irq(d); | ||
| 170 | raw_spin_unlock(&desc->lock); | 186 | raw_spin_unlock(&desc->lock); |
| 171 | 187 | ||
| 172 | if (affinity_broken && printk_ratelimit()) | 188 | if (affinity_broken && printk_ratelimit()) |
| 173 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); | 189 | pr_warning("IRQ%u no longer affine to CPU%u\n", i, |
| 190 | smp_processor_id()); | ||
| 174 | } | 191 | } |
| 175 | 192 | ||
| 176 | local_irq_restore(flags); | 193 | local_irq_restore(flags); |
