aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/irq.c')
-rw-r--r--arch/arm/kernel/irq.c70
1 files changed, 44 insertions, 26 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 83bbad03fcc..de3dcab8610 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -67,12 +67,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
67} 67}
68 68
69/* 69/*
70 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not 70 * handle_IRQ handles all hardware IRQ's. Decoded IRQs should
71 * come via this function. Instead, they should provide their 71 * not come via this function. Instead, they should provide their
72 * own 'handler' 72 * own 'handler'. Used by platform code implementing C-based 1st
73 * level decoding.
73 */ 74 */
74asmlinkage void __exception_irq_entry 75void handle_IRQ(unsigned int irq, struct pt_regs *regs)
75asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
76{ 76{
77 struct pt_regs *old_regs = set_irq_regs(regs); 77 struct pt_regs *old_regs = set_irq_regs(regs);
78 78
@@ -97,6 +97,15 @@ asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
97 set_irq_regs(old_regs); 97 set_irq_regs(old_regs);
98} 98}
99 99
100/*
101 * asm_do_IRQ is the interface to be used from assembly code.
102 */
103asmlinkage void __exception_irq_entry
104asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
105{
106 handle_IRQ(irq, regs);
107}
108
100void set_irq_flags(unsigned int irq, unsigned int iflags) 109void set_irq_flags(unsigned int irq, unsigned int iflags)
101{ 110{
102 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 111 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
@@ -131,54 +140,63 @@ int __init arch_probe_nr_irqs(void)
131 140
132#ifdef CONFIG_HOTPLUG_CPU 141#ifdef CONFIG_HOTPLUG_CPU
133 142
134static bool migrate_one_irq(struct irq_data *d) 143static bool migrate_one_irq(struct irq_desc *desc)
135{ 144{
136 unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); 145 struct irq_data *d = irq_desc_get_irq_data(desc);
146 const struct cpumask *affinity = d->affinity;
147 struct irq_chip *c;
137 bool ret = false; 148 bool ret = false;
138 149
139 if (cpu >= nr_cpu_ids) { 150 /*
140 cpu = cpumask_any(cpu_online_mask); 151 * If this is a per-CPU interrupt, or the affinity does not
152 * include this CPU, then we have nothing to do.
153 */
154 if (irqd_is_per_cpu(d) || !cpumask_test_cpu(smp_processor_id(), affinity))
155 return false;
156
157 if (cpumask_any_and(affinity, cpu_online_mask) >= nr_cpu_ids) {
158 affinity = cpu_online_mask;
141 ret = true; 159 ret = true;
142 } 160 }
143 161
144 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); 162 c = irq_data_get_irq_chip(d);
145 163 if (c->irq_set_affinity)
146 d->chip->irq_set_affinity(d, cpumask_of(cpu), true); 164 c->irq_set_affinity(d, affinity, true);
165 else
166 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
147 167
148 return ret; 168 return ret;
149} 169}
150 170
151/* 171/*
152 * The CPU has been marked offline. Migrate IRQs off this CPU. If 172 * The current CPU has been marked offline. Migrate IRQs off this CPU.
153 * the affinity settings do not allow other CPUs, force them onto any 173 * If the affinity settings do not allow other CPUs, force them onto any
154 * available CPU. 174 * available CPU.
175 *
176 * Note: we must iterate over all IRQs, whether they have an attached
177 * action structure or not, as we need to get chained interrupts too.
155 */ 178 */
156void migrate_irqs(void) 179void migrate_irqs(void)
157{ 180{
158 unsigned int i, cpu = smp_processor_id(); 181 unsigned int i;
159 struct irq_desc *desc; 182 struct irq_desc *desc;
160 unsigned long flags; 183 unsigned long flags;
161 184
162 local_irq_save(flags); 185 local_irq_save(flags);
163 186
164 for_each_irq_desc(i, desc) { 187 for_each_irq_desc(i, desc) {
165 struct irq_data *d = &desc->irq_data;
166 bool affinity_broken = false; 188 bool affinity_broken = false;
167 189
168 raw_spin_lock(&desc->lock); 190 if (!desc)
169 do { 191 continue;
170 if (desc->action == NULL)
171 break;
172
173 if (d->node != cpu)
174 break;
175 192
176 affinity_broken = migrate_one_irq(d); 193 raw_spin_lock(&desc->lock);
177 } while (0); 194 affinity_broken = migrate_one_irq(desc);
178 raw_spin_unlock(&desc->lock); 195 raw_spin_unlock(&desc->lock);
179 196
180 if (affinity_broken && printk_ratelimit()) 197 if (affinity_broken && printk_ratelimit())
181 pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); 198 pr_warning("IRQ%u no longer affine to CPU%u\n", i,
199 smp_processor_id());
182 } 200 }
183 201
184 local_irq_restore(flags); 202 local_irq_restore(flags);