diff options
Diffstat (limited to 'arch/ppc64')
-rw-r--r-- | arch/ppc64/kernel/irq.c | 108 | ||||
-rw-r--r-- | arch/ppc64/kernel/misc.S | 4 |
2 files changed, 4 insertions, 108 deletions
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c index b61497d5be28..bd6a95a5914d 100644 --- a/arch/ppc64/kernel/irq.c +++ b/arch/ppc64/kernel/irq.c | |||
@@ -144,110 +144,6 @@ void fixup_irqs(cpumask_t map) | |||
144 | } | 144 | } |
145 | #endif | 145 | #endif |
146 | 146 | ||
147 | extern int noirqdebug; | ||
148 | |||
149 | /* | ||
150 | * Eventually, this should take an array of interrupts and an array size | ||
151 | * so it can dispatch multiple interrupts. | ||
152 | */ | ||
153 | void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq) | ||
154 | { | ||
155 | int status; | ||
156 | struct irqaction *action; | ||
157 | int cpu = smp_processor_id(); | ||
158 | irq_desc_t *desc = get_irq_desc(irq); | ||
159 | irqreturn_t action_ret; | ||
160 | |||
161 | kstat_cpu(cpu).irqs[irq]++; | ||
162 | |||
163 | if (desc->status & IRQ_PER_CPU) { | ||
164 | /* no locking required for CPU-local interrupts: */ | ||
165 | ack_irq(irq); | ||
166 | action_ret = handle_IRQ_event(irq, regs, desc->action); | ||
167 | desc->handler->end(irq); | ||
168 | return; | ||
169 | } | ||
170 | |||
171 | spin_lock(&desc->lock); | ||
172 | ack_irq(irq); | ||
173 | /* | ||
174 | REPLAY is when Linux resends an IRQ that was dropped earlier | ||
175 | WAITING is used by probe to mark irqs that are being tested | ||
176 | */ | ||
177 | status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING); | ||
178 | status |= IRQ_PENDING; /* we _want_ to handle it */ | ||
179 | |||
180 | /* | ||
181 | * If the IRQ is disabled for whatever reason, we cannot | ||
182 | * use the action we have. | ||
183 | */ | ||
184 | action = NULL; | ||
185 | if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) { | ||
186 | action = desc->action; | ||
187 | if (!action || !action->handler) { | ||
188 | ppc_spurious_interrupts++; | ||
189 | printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq); | ||
190 | /* We can't call disable_irq here, it would deadlock */ | ||
191 | if (!desc->depth) | ||
192 | desc->depth = 1; | ||
193 | desc->status |= IRQ_DISABLED; | ||
194 | /* This is not a real spurrious interrupt, we | ||
195 | * have to eoi it, so we jump to out | ||
196 | */ | ||
197 | mask_irq(irq); | ||
198 | goto out; | ||
199 | } | ||
200 | status &= ~IRQ_PENDING; /* we commit to handling */ | ||
201 | status |= IRQ_INPROGRESS; /* we are handling it */ | ||
202 | } | ||
203 | desc->status = status; | ||
204 | |||
205 | /* | ||
206 | * If there is no IRQ handler or it was disabled, exit early. | ||
207 | Since we set PENDING, if another processor is handling | ||
208 | a different instance of this same irq, the other processor | ||
209 | will take care of it. | ||
210 | */ | ||
211 | if (unlikely(!action)) | ||
212 | goto out; | ||
213 | |||
214 | /* | ||
215 | * Edge triggered interrupts need to remember | ||
216 | * pending events. | ||
217 | * This applies to any hw interrupts that allow a second | ||
218 | * instance of the same irq to arrive while we are in do_IRQ | ||
219 | * or in the handler. But the code here only handles the _second_ | ||
220 | * instance of the irq, not the third or fourth. So it is mostly | ||
221 | * useful for irq hardware that does not mask cleanly in an | ||
222 | * SMP environment. | ||
223 | */ | ||
224 | for (;;) { | ||
225 | spin_unlock(&desc->lock); | ||
226 | |||
227 | action_ret = handle_IRQ_event(irq, regs, action); | ||
228 | |||
229 | spin_lock(&desc->lock); | ||
230 | if (!noirqdebug) | ||
231 | note_interrupt(irq, desc, action_ret, regs); | ||
232 | if (likely(!(desc->status & IRQ_PENDING))) | ||
233 | break; | ||
234 | desc->status &= ~IRQ_PENDING; | ||
235 | } | ||
236 | out: | ||
237 | desc->status &= ~IRQ_INPROGRESS; | ||
238 | /* | ||
239 | * The ->end() handler has to deal with interrupts which got | ||
240 | * disabled while the handler was running. | ||
241 | */ | ||
242 | if (desc->handler) { | ||
243 | if (desc->handler->end) | ||
244 | desc->handler->end(irq); | ||
245 | else if (desc->handler->enable) | ||
246 | desc->handler->enable(irq); | ||
247 | } | ||
248 | spin_unlock(&desc->lock); | ||
249 | } | ||
250 | |||
251 | #ifdef CONFIG_PPC_ISERIES | 147 | #ifdef CONFIG_PPC_ISERIES |
252 | void do_IRQ(struct pt_regs *regs) | 148 | void do_IRQ(struct pt_regs *regs) |
253 | { | 149 | { |
@@ -325,13 +221,13 @@ void do_IRQ(struct pt_regs *regs) | |||
325 | if (curtp != irqtp) { | 221 | if (curtp != irqtp) { |
326 | irqtp->task = curtp->task; | 222 | irqtp->task = curtp->task; |
327 | irqtp->flags = 0; | 223 | irqtp->flags = 0; |
328 | call_ppc_irq_dispatch_handler(regs, irq, irqtp); | 224 | call___do_IRQ(irq, regs, irqtp); |
329 | irqtp->task = NULL; | 225 | irqtp->task = NULL; |
330 | if (irqtp->flags) | 226 | if (irqtp->flags) |
331 | set_bits(irqtp->flags, &curtp->flags); | 227 | set_bits(irqtp->flags, &curtp->flags); |
332 | } else | 228 | } else |
333 | #endif | 229 | #endif |
334 | ppc_irq_dispatch_handler(regs, irq); | 230 | __do_IRQ(irq, regs); |
335 | } else | 231 | } else |
336 | /* That's not SMP safe ... but who cares ? */ | 232 | /* That's not SMP safe ... but who cares ? */ |
337 | ppc_spurious_interrupts++; | 233 | ppc_spurious_interrupts++; |
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index d82a30dc26f8..492bca6137eb 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S | |||
@@ -78,12 +78,12 @@ _GLOBAL(call_do_softirq) | |||
78 | mtlr r0 | 78 | mtlr r0 |
79 | blr | 79 | blr |
80 | 80 | ||
81 | _GLOBAL(call_ppc_irq_dispatch_handler) | 81 | _GLOBAL(call___do_IRQ) |
82 | mflr r0 | 82 | mflr r0 |
83 | std r0,16(r1) | 83 | std r0,16(r1) |
84 | stdu r1,THREAD_SIZE-112(r5) | 84 | stdu r1,THREAD_SIZE-112(r5) |
85 | mr r1,r5 | 85 | mr r1,r5 |
86 | bl .ppc_irq_dispatch_handler | 86 | bl .__do_IRQ |
87 | ld r1,0(r1) | 87 | ld r1,0(r1) |
88 | ld r0,16(r1) | 88 | ld r0,16(r1) |
89 | mtlr r0 | 89 | mtlr r0 |