diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-07 03:10:39 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-02-19 06:58:08 -0500 |
commit | fa27271bc8d230355c1f24ddea103824fdc12de6 (patch) | |
tree | 311a8b2cb337f9de83047290f72cd511f81b4eae /kernel/irq | |
parent | b738a50a202639614c98b5763b01bf9201779e50 (diff) |
genirq: Fixup poll handling
try_one_irq() contains redundant code and lots of useless checks for
shared interrupts. Check for shared before setting IRQ_INPROGRESS and
then call handle_IRQ_event() while pending. Shorter version with the
same functionality.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/spurious.c | 50 |
1 files changed, 19 insertions, 31 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 2fbfda2716e1..0af9e59c82eb 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -42,48 +42,36 @@ static int try_one_irq(int irq, struct irq_desc *desc) | |||
42 | raw_spin_unlock(&desc->lock); | 42 | raw_spin_unlock(&desc->lock); |
43 | return ok; | 43 | return ok; |
44 | } | 44 | } |
45 | /* Honour the normal IRQ locking */ | ||
46 | desc->status |= IRQ_INPROGRESS; | ||
47 | action = desc->action; | ||
48 | raw_spin_unlock(&desc->lock); | ||
49 | |||
50 | while (action) { | ||
51 | /* Only shared IRQ handlers are safe to call */ | ||
52 | if (action->flags & IRQF_SHARED) { | ||
53 | if (action->handler(irq, action->dev_id) == | ||
54 | IRQ_HANDLED) | ||
55 | ok = 1; | ||
56 | } | ||
57 | action = action->next; | ||
58 | } | ||
59 | local_irq_disable(); | ||
60 | /* Now clean up the flags */ | ||
61 | raw_spin_lock(&desc->lock); | ||
62 | action = desc->action; | ||
63 | |||
64 | /* | 45 | /* |
65 | * While we were looking for a fixup someone queued a real | 46 | * All handlers must agree on IRQF_SHARED, so we test just the |
66 | * IRQ clashing with our walk: | 47 | * first. Check for action->next as well. |
67 | */ | 48 | */ |
68 | while ((desc->status & IRQ_PENDING) && action) { | 49 | action = desc->action; |
69 | /* | 50 | if (!action || !(action->flags & IRQF_SHARED) || !action->next) |
70 | * Perform real IRQ processing for the IRQ we deferred | 51 | goto out; |
71 | */ | 52 | |
72 | work = 1; | 53 | /* Honour the normal IRQ locking */ |
54 | desc->status |= IRQ_INPROGRESS; | ||
55 | do { | ||
56 | work++; | ||
57 | desc->status &= ~IRQ_PENDING; | ||
73 | raw_spin_unlock(&desc->lock); | 58 | raw_spin_unlock(&desc->lock); |
74 | handle_IRQ_event(irq, action); | 59 | if (handle_IRQ_event(irq, action) != IRQ_NONE) |
60 | ok = 1; | ||
75 | raw_spin_lock(&desc->lock); | 61 | raw_spin_lock(&desc->lock); |
76 | desc->status &= ~IRQ_PENDING; | 62 | action = desc->action; |
77 | } | 63 | } while ((desc->status & IRQ_PENDING) && action); |
64 | |||
78 | desc->status &= ~IRQ_INPROGRESS; | 65 | desc->status &= ~IRQ_INPROGRESS; |
79 | /* | 66 | /* |
80 | * If we did actual work for the real IRQ line we must let the | 67 | * If we did actual work for the real IRQ line we must let the |
81 | * IRQ controller clean up too | 68 | * IRQ controller clean up too |
82 | */ | 69 | */ |
83 | if (work) | 70 | if (work > 1) |
84 | irq_end(irq, desc); | 71 | irq_end(irq, desc); |
85 | raw_spin_unlock(&desc->lock); | ||
86 | 72 | ||
73 | out: | ||
74 | raw_spin_unlock(&desc->lock); | ||
87 | return ok; | 75 | return ok; |
88 | } | 76 | } |
89 | 77 | ||