aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/kernel/irq.c')
-rw-r--r--arch/arm64/kernel/irq.c12
1 files changed, 4 insertions, 8 deletions
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 0f08dfd69ebc..dfa6e3e74fdd 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -97,19 +97,15 @@ static bool migrate_one_irq(struct irq_desc *desc)
97 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity)) 97 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
98 return false; 98 return false;
99 99
100 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) 100 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
101 affinity = cpu_online_mask;
101 ret = true; 102 ret = true;
103 }
102 104
103 /*
104 * when using forced irq_set_affinity we must ensure that the cpu
105 * being offlined is not present in the affinity mask, it may be
106 * selected as the target CPU otherwise
107 */
108 affinity = cpu_online_mask;
109 c = irq_data_get_irq_chip(d); 105 c = irq_data_get_irq_chip(d);
110 if (!c->irq_set_affinity) 106 if (!c->irq_set_affinity)
111 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 107 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
112 else if (c->irq_set_affinity(d, affinity, true) == IRQ_SET_MASK_OK && ret) 108 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
113 cpumask_copy(d->affinity, affinity); 109 cpumask_copy(d->affinity, affinity);
114 110
115 return ret; 111 return ret;