diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-07 04:34:30 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-02-19 06:58:09 -0500 |
commit | fe200ae48ef5c79bf7941fe8046ff9505c570ff6 (patch) | |
tree | 767d2cf011437a266a655ce2ec39360cb85f7f28 /kernel/irq/spurious.c | |
parent | d05c65fff0ef672be75429266751f0e015b54d94 (diff) |
genirq: Mark polled irqs and defer the real handler
With the chip.end() function gone we might run into a situation where
a poll call runs and the real interrupt comes in, sees IRQ_INPROGRESS
and disables the line. That might be a perfect working one, which will
then be masked forever.
So mark them polled while the poll runs. When the real handler sees
IRQ_INPROGRESS it checks the poll flag and waits for the polling to
complete. Add the necessary amount of sanity checks to it to avoid
deadlocks.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r-- | kernel/irq/spurious.c | 51 |
1 files changed, 39 insertions, 12 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 56ff8fffb8b0..f749d29bfd81 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -25,12 +25,44 @@ static int irq_poll_cpu; | |||
25 | static atomic_t irq_poll_active; | 25 | static atomic_t irq_poll_active; |
26 | 26 | ||
27 | /* | 27 | /* |
28 | * We wait here for a poller to finish. | ||
29 | * | ||
30 | * If the poll runs on this CPU, then we yell loudly and return | ||
31 | * false. That will leave the interrupt line disabled in the worst | ||
32 | * case, but it should never happen. | ||
33 | * | ||
34 | * We wait until the poller is done and then recheck disabled and | ||
35 | * action (about to be disabled). Only if it's still active, we return | ||
36 | * true and let the handler run. | ||
37 | */ | ||
38 | bool irq_wait_for_poll(struct irq_desc *desc) | ||
39 | { | ||
40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | ||
41 | "irq poll in progress on cpu %d for irq %d\n", | ||
42 | smp_processor_id(), desc->irq_data.irq)) | ||
43 | return false; | ||
44 | |||
45 | #ifdef CONFIG_SMP | ||
46 | do { | ||
47 | raw_spin_unlock(&desc->lock); | ||
48 | while (desc->status & IRQ_INPROGRESS) | ||
49 | cpu_relax(); | ||
50 | raw_spin_lock(&desc->lock); | ||
51 | } while (desc->status & IRQ_INPROGRESS); | ||
52 | /* Might have been disabled in meantime */ | ||
53 | return !(desc->status & IRQ_DISABLED) && desc->action; | ||
54 | #else | ||
55 | return false; | ||
56 | #endif | ||
57 | } | ||
58 | |||
59 | /* | ||
28 | * Recovery handler for misrouted interrupts. | 60 | * Recovery handler for misrouted interrupts. |
29 | */ | 61 | */ |
30 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) | 62 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
31 | { | 63 | { |
32 | struct irqaction *action; | 64 | struct irqaction *action; |
33 | int ok = 0, work = 0; | 65 | int ok = 0; |
34 | 66 | ||
35 | raw_spin_lock(&desc->lock); | 67 | raw_spin_lock(&desc->lock); |
36 | 68 | ||
@@ -64,10 +96,9 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
64 | goto out; | 96 | goto out; |
65 | } | 97 | } |
66 | 98 | ||
67 | /* Honour the normal IRQ locking */ | 99 | /* Honour the normal IRQ locking and mark it poll in progress */ |
68 | desc->status |= IRQ_INPROGRESS; | 100 | desc->status |= IRQ_INPROGRESS | IRQ_POLL_INPROGRESS; |
69 | do { | 101 | do { |
70 | work++; | ||
71 | desc->status &= ~IRQ_PENDING; | 102 | desc->status &= ~IRQ_PENDING; |
72 | raw_spin_unlock(&desc->lock); | 103 | raw_spin_unlock(&desc->lock); |
73 | if (handle_IRQ_event(irq, action) != IRQ_NONE) | 104 | if (handle_IRQ_event(irq, action) != IRQ_NONE) |
@@ -76,14 +107,7 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
76 | action = desc->action; | 107 | action = desc->action; |
77 | } while ((desc->status & IRQ_PENDING) && action); | 108 | } while ((desc->status & IRQ_PENDING) && action); |
78 | 109 | ||
79 | desc->status &= ~IRQ_INPROGRESS; | 110 | desc->status &= ~(IRQ_INPROGRESS | IRQ_POLL_INPROGRESS); |
80 | /* | ||
81 | * If we did actual work for the real IRQ line we must let the | ||
82 | * IRQ controller clean up too | ||
83 | */ | ||
84 | if (work > 1) | ||
85 | irq_end(irq, desc); | ||
86 | |||
87 | out: | 111 | out: |
88 | raw_spin_unlock(&desc->lock); | 112 | raw_spin_unlock(&desc->lock); |
89 | return ok; | 113 | return ok; |
@@ -238,6 +262,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
238 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 262 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
239 | irqreturn_t action_ret) | 263 | irqreturn_t action_ret) |
240 | { | 264 | { |
265 | if (desc->status & IRQ_POLL_INPROGRESS) | ||
266 | return; | ||
267 | |||
241 | if (unlikely(action_ret != IRQ_HANDLED)) { | 268 | if (unlikely(action_ret != IRQ_HANDLED)) { |
242 | /* | 269 | /* |
243 | * If we are seeing only the odd spurious IRQ caused by | 270 | * If we are seeing only the odd spurious IRQ caused by |