aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/irq.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/kernel/irq.c')
-rw-r--r--arch/arm/kernel/irq.c28
1 files changed, 8 insertions, 20 deletions
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 6874c7dca75a..b7c3490eaa24 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -98,17 +98,6 @@ unlock:
98 return 0; 98 return 0;
99} 99}
100 100
101/* Handle bad interrupts */
102static struct irq_desc bad_irq_desc = {
103 .handle_irq = handle_bad_irq,
104 .lock = __SPIN_LOCK_UNLOCKED(bad_irq_desc.lock),
105};
106
107#ifdef CONFIG_CPUMASK_OFFSTACK
108/* We are not allocating bad_irq_desc.affinity or .pending_mask */
109#error "ARM architecture does not support CONFIG_CPUMASK_OFFSTACK."
110#endif
111
112/* 101/*
113 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not 102 * do_IRQ handles all hardware IRQ's. Decoded IRQs should not
114 * come via this function. Instead, they should provide their 103 * come via this function. Instead, they should provide their
@@ -124,10 +113,13 @@ asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
124 * Some hardware gives randomly wrong interrupts. Rather 113 * Some hardware gives randomly wrong interrupts. Rather
125 * than crashing, do something sensible. 114 * than crashing, do something sensible.
126 */ 115 */
127 if (irq >= NR_IRQS) 116 if (unlikely(irq >= NR_IRQS)) {
128 handle_bad_irq(irq, &bad_irq_desc); 117 if (printk_ratelimit())
129 else 118 printk(KERN_WARNING "Bad IRQ%u\n", irq);
119 ack_bad_irq(irq);
120 } else {
130 generic_handle_irq(irq); 121 generic_handle_irq(irq);
122 }
131 123
132 /* AT91 specific workaround */ 124 /* AT91 specific workaround */
133 irq_finish(irq); 125 irq_finish(irq);
@@ -165,10 +157,6 @@ void __init init_IRQ(void)
165 for (irq = 0; irq < NR_IRQS; irq++) 157 for (irq = 0; irq < NR_IRQS; irq++)
166 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE; 158 irq_desc[irq].status |= IRQ_NOREQUEST | IRQ_NOPROBE;
167 159
168#ifdef CONFIG_SMP
169 cpumask_setall(bad_irq_desc.affinity);
170 bad_irq_desc.cpu = smp_processor_id();
171#endif
172 init_arch_irq(); 160 init_arch_irq();
173} 161}
174 162
@@ -176,7 +164,7 @@ void __init init_IRQ(void)
176 164
177static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu) 165static void route_irq(struct irq_desc *desc, unsigned int irq, unsigned int cpu)
178{ 166{
179 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->cpu, cpu); 167 pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", irq, desc->node, cpu);
180 168
181 spin_lock_irq(&desc->lock); 169 spin_lock_irq(&desc->lock);
182 desc->chip->set_affinity(irq, cpumask_of(cpu)); 170 desc->chip->set_affinity(irq, cpumask_of(cpu));
@@ -195,7 +183,7 @@ void migrate_irqs(void)
195 for (i = 0; i < NR_IRQS; i++) { 183 for (i = 0; i < NR_IRQS; i++) {
196 struct irq_desc *desc = irq_desc + i; 184 struct irq_desc *desc = irq_desc + i;
197 185
198 if (desc->cpu == cpu) { 186 if (desc->node == cpu) {
199 unsigned int newcpu = cpumask_any_and(desc->affinity, 187 unsigned int newcpu = cpumask_any_and(desc->affinity,
200 cpu_online_mask); 188 cpu_online_mask);
201 if (newcpu >= nr_cpu_ids) { 189 if (newcpu >= nr_cpu_ids) {