aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c50
1 files changed, 33 insertions, 17 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 438a01464287..46d6611a33bb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/random.h> 12#include <linux/random.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/slab.h>
14 15
15#include "internals.h" 16#include "internals.h"
16 17
@@ -149,6 +150,26 @@ void disable_irq(unsigned int irq)
149} 150}
150EXPORT_SYMBOL(disable_irq); 151EXPORT_SYMBOL(disable_irq);
151 152
153static void __enable_irq(struct irq_desc *desc, unsigned int irq)
154{
155 switch (desc->depth) {
156 case 0:
157 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
158 WARN_ON(1);
159 break;
160 case 1: {
161 unsigned int status = desc->status & ~IRQ_DISABLED;
162
163 /* Prevent probing on this irq: */
164 desc->status = status | IRQ_NOPROBE;
165 check_irq_resend(desc, irq);
166 /* fall-through */
167 }
168 default:
169 desc->depth--;
170 }
171}
172
152/** 173/**
153 * enable_irq - enable handling of an irq 174 * enable_irq - enable handling of an irq
154 * @irq: Interrupt to enable 175 * @irq: Interrupt to enable
@@ -168,22 +189,7 @@ void enable_irq(unsigned int irq)
168 return; 189 return;
169 190
170 spin_lock_irqsave(&desc->lock, flags); 191 spin_lock_irqsave(&desc->lock, flags);
171 switch (desc->depth) { 192 __enable_irq(desc, irq);
172 case 0:
173 printk(KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
174 WARN_ON(1);
175 break;
176 case 1: {
177 unsigned int status = desc->status & ~IRQ_DISABLED;
178
179 /* Prevent probing on this irq: */
180 desc->status = status | IRQ_NOPROBE;
181 check_irq_resend(desc, irq);
182 /* fall-through */
183 }
184 default:
185 desc->depth--;
186 }
187 spin_unlock_irqrestore(&desc->lock, flags); 193 spin_unlock_irqrestore(&desc->lock, flags);
188} 194}
189EXPORT_SYMBOL(enable_irq); 195EXPORT_SYMBOL(enable_irq);
@@ -364,7 +370,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
364 compat_irq_chip_set_default_handler(desc); 370 compat_irq_chip_set_default_handler(desc);
365 371
366 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING | 372 desc->status &= ~(IRQ_AUTODETECT | IRQ_WAITING |
367 IRQ_INPROGRESS); 373 IRQ_INPROGRESS | IRQ_SPURIOUS_DISABLED);
368 374
369 if (!(desc->status & IRQ_NOAUTOEN)) { 375 if (!(desc->status & IRQ_NOAUTOEN)) {
370 desc->depth = 0; 376 desc->depth = 0;
@@ -380,6 +386,16 @@ int setup_irq(unsigned int irq, struct irqaction *new)
380 /* Reset broken irq detection when installing new handler */ 386 /* Reset broken irq detection when installing new handler */
381 desc->irq_count = 0; 387 desc->irq_count = 0;
382 desc->irqs_unhandled = 0; 388 desc->irqs_unhandled = 0;
389
390 /*
391 * Check whether we disabled the irq via the spurious handler
392 * before. Reenable it and give it another chance.
393 */
394 if (shared && (desc->status & IRQ_SPURIOUS_DISABLED)) {
395 desc->status &= ~IRQ_SPURIOUS_DISABLED;
396 __enable_irq(desc, irq);
397 }
398
383 spin_unlock_irqrestore(&desc->lock, flags); 399 spin_unlock_irqrestore(&desc->lock, flags);
384 400
385 new->irq = irq; 401 new->irq = irq;