diff options
author | Michael Ellerman <michael@ellerman.id.au> | 2009-04-27 21:57:43 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2009-05-21 01:43:58 -0400 |
commit | f2694ba56808a3a0fa45d9bb45289575f31e48d2 (patch) | |
tree | a5b24b590f83c42e7c224f69d0f43867caa743bb /arch/powerpc/kernel/irq.c | |
parent | fb94fc2b89ea0422950cb1220f275622246bd66d (diff) |
powerpc/irq: Move #ifdef'ed body of do_IRQ() into a separate function
Rather than a giant ifdef in the body of do_IRQ(), including a
dangling else, move the irq stack logic into a separate routine and
do the ifdef there.
Signed-off-by: Michael Ellerman <michael@ellerman.id.au>
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 96 |
1 files changed, 56 insertions, 40 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 8c1a4966867e..3d3658d0b7b9 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -248,13 +248,63 @@ void fixup_irqs(cpumask_t map) | |||
248 | } | 248 | } |
249 | #endif | 249 | #endif |
250 | 250 | ||
251 | #ifdef CONFIG_IRQSTACKS | ||
252 | static inline void handle_one_irq(unsigned int irq) | ||
253 | { | ||
254 | struct thread_info *curtp, *irqtp; | ||
255 | unsigned long saved_sp_limit; | ||
256 | struct irq_desc *desc; | ||
257 | void *handler; | ||
258 | |||
259 | /* Switch to the irq stack to handle this */ | ||
260 | curtp = current_thread_info(); | ||
261 | irqtp = hardirq_ctx[smp_processor_id()]; | ||
262 | |||
263 | if (curtp == irqtp) { | ||
264 | /* We're already on the irq stack, just handle it */ | ||
265 | generic_handle_irq(irq); | ||
266 | return; | ||
267 | } | ||
268 | |||
269 | desc = irq_desc + irq; | ||
270 | saved_sp_limit = current->thread.ksp_limit; | ||
271 | |||
272 | handler = desc->handle_irq; | ||
273 | if (handler == NULL) | ||
274 | handler = &__do_IRQ; | ||
275 | |||
276 | irqtp->task = curtp->task; | ||
277 | irqtp->flags = 0; | ||
278 | |||
279 | /* Copy the softirq bits in preempt_count so that the | ||
280 | * softirq checks work in the hardirq context. */ | ||
281 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | ||
282 | (curtp->preempt_count & SOFTIRQ_MASK); | ||
283 | |||
284 | current->thread.ksp_limit = (unsigned long)irqtp + | ||
285 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
286 | |||
287 | call_handle_irq(irq, desc, irqtp, handler); | ||
288 | current->thread.ksp_limit = saved_sp_limit; | ||
289 | irqtp->task = NULL; | ||
290 | |||
291 | /* Set any flag that may have been set on the | ||
292 | * alternate stack | ||
293 | */ | ||
294 | if (irqtp->flags) | ||
295 | set_bits(irqtp->flags, &curtp->flags); | ||
296 | } | ||
297 | #else | ||
298 | static inline void handle_one_irq(unsigned int irq) | ||
299 | { | ||
300 | generic_handle_irq(irq); | ||
301 | } | ||
302 | #endif | ||
303 | |||
251 | void do_IRQ(struct pt_regs *regs) | 304 | void do_IRQ(struct pt_regs *regs) |
252 | { | 305 | { |
253 | struct pt_regs *old_regs = set_irq_regs(regs); | 306 | struct pt_regs *old_regs = set_irq_regs(regs); |
254 | unsigned int irq; | 307 | unsigned int irq; |
255 | #ifdef CONFIG_IRQSTACKS | ||
256 | struct thread_info *curtp, *irqtp; | ||
257 | #endif | ||
258 | 308 | ||
259 | irq_enter(); | 309 | irq_enter(); |
260 | 310 | ||
@@ -282,43 +332,9 @@ void do_IRQ(struct pt_regs *regs) | |||
282 | */ | 332 | */ |
283 | irq = ppc_md.get_irq(); | 333 | irq = ppc_md.get_irq(); |
284 | 334 | ||
285 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { | 335 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) |
286 | #ifdef CONFIG_IRQSTACKS | 336 | handle_one_irq(irq); |
287 | /* Switch to the irq stack to handle this */ | 337 | else if (irq != NO_IRQ_IGNORE) |
288 | curtp = current_thread_info(); | ||
289 | irqtp = hardirq_ctx[smp_processor_id()]; | ||
290 | if (curtp != irqtp) { | ||
291 | struct irq_desc *desc = irq_desc + irq; | ||
292 | void *handler = desc->handle_irq; | ||
293 | unsigned long saved_sp_limit = current->thread.ksp_limit; | ||
294 | if (handler == NULL) | ||
295 | handler = &__do_IRQ; | ||
296 | irqtp->task = curtp->task; | ||
297 | irqtp->flags = 0; | ||
298 | |||
299 | /* Copy the softirq bits in preempt_count so that the | ||
300 | * softirq checks work in the hardirq context. | ||
301 | */ | ||
302 | irqtp->preempt_count = | ||
303 | (irqtp->preempt_count & ~SOFTIRQ_MASK) | | ||
304 | (curtp->preempt_count & SOFTIRQ_MASK); | ||
305 | |||
306 | current->thread.ksp_limit = (unsigned long)irqtp + | ||
307 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
308 | call_handle_irq(irq, desc, irqtp, handler); | ||
309 | current->thread.ksp_limit = saved_sp_limit; | ||
310 | irqtp->task = NULL; | ||
311 | |||
312 | |||
313 | /* Set any flag that may have been set on the | ||
314 | * alternate stack | ||
315 | */ | ||
316 | if (irqtp->flags) | ||
317 | set_bits(irqtp->flags, &curtp->flags); | ||
318 | } else | ||
319 | #endif | ||
320 | generic_handle_irq(irq); | ||
321 | } else if (irq != NO_IRQ_IGNORE) | ||
322 | /* That's not SMP safe ... but who cares ? */ | 338 | /* That's not SMP safe ... but who cares ? */ |
323 | ppc_spurious_interrupts++; | 339 | ppc_spurious_interrupts++; |
324 | 340 | ||