diff options
-rw-r--r-- | include/linux/irq.h | 1 | ||||
-rw-r--r-- | kernel/irq/manage.c | 49 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 4 |
3 files changed, 35 insertions, 19 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 1883a85625dd..552e0ec269c9 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -61,6 +61,7 @@ typedef void (*irq_flow_handler_t)(unsigned int irq, | |||
61 | #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ | 61 | #define IRQ_WAKEUP 0x00100000 /* IRQ triggers system wakeup */ |
62 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ | 62 | #define IRQ_MOVE_PENDING 0x00200000 /* need to re-target IRQ destination */ |
63 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ | 63 | #define IRQ_NO_BALANCING 0x00400000 /* IRQ is excluded from balancing */ |
64 | #define IRQ_SPURIOUS_DISABLED 0x00800000 /* IRQ was disabled by the spurious trap */ | ||
64 | 65 | ||
65 | #ifdef CONFIG_IRQ_PER_CPU | 66 | #ifdef CONFIG_IRQ_PER_CPU |
66 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) | 67 | # define CHECK_IRQ_PER_CPU(var) ((var) & IRQ_PER_CPU) |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 46e4ad1723f0..46d6611a33bb 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -150,6 +150,26 @@ void disable_irq(unsigned int irq) | |||
150 | } | 150 | } |
151 | EXPORT_SYMBOL(disable_irq); | 151 | EXPORT_SYMBOL(disable_irq); |
152 | 152 | ||
153 | static void __enable_irq(struct irq_desc *desc, unsigned int irq) | ||
154 | { | ||
155 | switch (desc->depth) { | ||
156 | case 0: | ||
157 | printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | ||
158 | WARN_ON(1); | ||
159 | break; | ||
160 | case 1: { | ||
161 | unsigned int status = desc->status & ~IRQ_DISABLED; | ||
162 | |||
163 | /* Prevent probing on this irq: */ | ||
164 | desc->status = status | IRQ_NOPROBE; | ||
165 | check_irq_resend(desc, irq); | ||
166 | /* fall-through */ | ||
167 | } | ||
168 | default: | ||
169 | desc->depth--; | ||
170 | } | ||
171 | } | ||
172 | |||
153 | /** | 173 | /** |
154 | * enable_irq - enable handling of an irq | 174 | * enable_irq - enable handling of an irq |
155 | * @irq: Interrupt to enable | 175 | * @irq: Interrupt to enable |
@@ -169,22 +189,7 @@ void enable_irq(unsigned int irq) | |||
169 | return; | 189 | return; |
170 | 190 | ||
171 | spin_lock_irqsave(&desc->lock, flags); | 191 | spin_lock_irqsave(&desc->lock, flags); |
172 | switch (desc->depth) { | 192 | __enable_irq(desc, irq); |
173 | case 0: | ||
174 | printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq); | ||
175 | WARN_ON(1); | ||
176 | break; | ||
177 | case 1: { | ||
178 | unsigned int status = desc->status & ~IRQ_DISABLED; | ||
179 | |||
180 | /* Prevent probing on this irq: */ | ||
181 | desc->status = status | IRQ_NOPROBE; | ||
182 | check_irq_resend(desc, irq); | ||
183 | /* fall-through */ | ||
184 | } | ||
185 | default: | ||
186 | desc->depth--; | ||
187 | } | ||
188 | spin_unlock_irqrestore(&desc->lock, flags); | 193 | spin_unlock_irqrestore(&desc->lock, flags); |
189 | } | 194 | } |
190 | EXPORT_SYMBOL(enable_irq); | 195 | EXPORT_SYMBOL(enable_irq); |
@@ -365,7 +370,7 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
365 | compat_irq_chip_set_default_handler(desc); | 370 | compat_irq_chip_set_default_handler(desc); |
366 | 371 | ||
367 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | | 372 | desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | |
368 | IRQ_INPROGRESS); | 373 | IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED); |
369 | 374 | ||
370 | if (!(desc->status & IRQ_NOAUTOEN)) { | 375 | if (!(desc->status & IRQ_NOAUTOEN)) { |
371 | desc->depth = 0; | 376 | desc->depth = 0; |
@@ -381,6 +386,16 @@ int setup_irq(unsigned int irq, struct irqaction *new) | |||
381 | /* Reset broken irq detection when installing new handler */ | 386 | /* Reset broken irq detection when installing new handler */ |
382 | desc->irq_count = 0; | 387 | desc->irq_count = 0; |
383 | desc->irqs_unhandled = 0; | 388 | desc->irqs_unhandled = 0; |
389 | |||
390 | /* | ||
391 | * Check whether we disabled the irq via the spurious handler | ||
392 | * before. Reenable it and give it another chance. | ||
393 | */ | ||
394 | if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) { | ||
395 | desc->status &= ~IRQ_SPURIOUS_DISABLED; | ||
396 | __enable_irq(desc, irq); | ||
397 | } | ||
398 | |||
384 | spin_unlock_irqrestore(&desc->lock, flags); | 399 | spin_unlock_irqrestore(&desc->lock, flags); |
385 | 400 | ||
386 | new->irq = irq; | 401 | new->irq = irq; |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 088dabbf2d6a..c66d3f10e853 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -209,8 +209,8 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
209 | * Now kill the IRQ | 209 | * Now kill the IRQ |
210 | */ | 210 | */ |
211 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 211 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
212 | desc->status |= IRQ_DISABLED; | 212 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
213 | desc->depth = 1; | 213 | desc->depth++; |
214 | desc->chip->disable(irq); | 214 | desc->chip->disable(irq); |
215 | } | 215 | } |
216 | desc->irqs_unhandled = 0; | 216 | desc->irqs_unhandled = 0; |