diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2011-02-07 08:31:37 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2011-02-19 06:58:09 -0500 |
commit | d05c65fff0ef672be75429266751f0e015b54d94 (patch) | |
tree | 96b6b8ec9d8ab6d94e4f84bb8bcf32def1fe9e2a /kernel/irq/spurious.c | |
parent | c7259cd7af757ddcd65701c37099dcddae2054f0 (diff) |
genirq: spurious: Run only one poller at a time
No point in running concurrent pollers which confuse each other by
setting PENDING.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r-- | kernel/irq/spurious.c | 16 |
1 files changed, 15 insertions, 1 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index bd0e42d3e0ba..56ff8fffb8b0 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -21,6 +21,8 @@ static int irqfixup __read_mostly; | |||
21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
22 | static void poll_spurious_irqs(unsigned long dummy); | 22 | static void poll_spurious_irqs(unsigned long dummy); |
23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
24 | static int irq_poll_cpu; | ||
25 | static atomic_t irq_poll_active; | ||
24 | 26 | ||
25 | /* | 27 | /* |
26 | * Recovery handler for misrouted interrupts. | 28 | * Recovery handler for misrouted interrupts. |
@@ -92,6 +94,11 @@ static int misrouted_irq(int irq) | |||
92 | struct irq_desc *desc; | 94 | struct irq_desc *desc; |
93 | int i, ok = 0; | 95 | int i, ok = 0; |
94 | 96 | ||
97 | if (atomic_inc_return(&irq_poll_active) == 1) | ||
98 | goto out; | ||
99 | |||
100 | irq_poll_cpu = smp_processor_id(); | ||
101 | |||
95 | for_each_irq_desc(i, desc) { | 102 | for_each_irq_desc(i, desc) { |
96 | if (!i) | 103 | if (!i) |
97 | continue; | 104 | continue; |
@@ -102,6 +109,8 @@ static int misrouted_irq(int irq) | |||
102 | if (try_one_irq(i, desc, false)) | 109 | if (try_one_irq(i, desc, false)) |
103 | ok = 1; | 110 | ok = 1; |
104 | } | 111 | } |
112 | out: | ||
113 | atomic_dec(&irq_poll_active); | ||
105 | /* So the caller can adjust the irq error counts */ | 114 | /* So the caller can adjust the irq error counts */ |
106 | return ok; | 115 | return ok; |
107 | } | 116 | } |
@@ -111,6 +120,10 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
111 | struct irq_desc *desc; | 120 | struct irq_desc *desc; |
112 | int i; | 121 | int i; |
113 | 122 | ||
123 | if (atomic_inc_return(&irq_poll_active) != 1) | ||
124 | goto out; | ||
125 | irq_poll_cpu = smp_processor_id(); | ||
126 | |||
114 | for_each_irq_desc(i, desc) { | 127 | for_each_irq_desc(i, desc) { |
115 | unsigned int status; | 128 | unsigned int status; |
116 | 129 | ||
@@ -127,7 +140,8 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
127 | try_one_irq(i, desc, true); | 140 | try_one_irq(i, desc, true); |
128 | local_irq_enable(); | 141 | local_irq_enable(); |
129 | } | 142 | } |
130 | 143 | out: | |
144 | atomic_dec(&irq_poll_active); | ||
131 | mod_timer(&poll_spurious_irq_timer, | 145 | mod_timer(&poll_spurious_irq_timer, |
132 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 146 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
133 | } | 147 | } |