diff options
author | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-09-23 00:29:11 -0400 |
---|---|---|
committer | Benjamin Herrenschmidt <benh@kernel.crashing.org> | 2013-09-25 00:15:36 -0400 |
commit | 0366a1c70b89efed4f9d590216bb004a16effbed (patch) | |
tree | 5090ad127863968876119f1bb022a7bce32842ab /arch/powerpc/kernel/irq.c | |
parent | 4a10c2ac2f368583138b774ca41fac4207911983 (diff) |
powerpc/irq: Run softirqs off the top of the irq stack
Nowadays, irq_exit() calls __do_softirq() pretty much directly
instead of calling do_softirq() which switches to the decicated
softirq stack.
This has lead to observed stack overflows on powerpc since we call
irq_enter() and irq_exit() outside of the scope that switches to
the irq stack.
This fixes it by moving the stack switching up a level, making
irq_enter() and irq_exit() run off the irq stack.
Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/irq.c')
-rw-r--r-- | arch/powerpc/kernel/irq.c | 104 |
1 files changed, 52 insertions, 52 deletions
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index c69440cef7af..2234a1276a77 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -441,50 +441,6 @@ void migrate_irqs(void) | |||
441 | } | 441 | } |
442 | #endif | 442 | #endif |
443 | 443 | ||
444 | static inline void handle_one_irq(unsigned int irq) | ||
445 | { | ||
446 | struct thread_info *curtp, *irqtp; | ||
447 | unsigned long saved_sp_limit; | ||
448 | struct irq_desc *desc; | ||
449 | |||
450 | desc = irq_to_desc(irq); | ||
451 | if (!desc) | ||
452 | return; | ||
453 | |||
454 | /* Switch to the irq stack to handle this */ | ||
455 | curtp = current_thread_info(); | ||
456 | irqtp = hardirq_ctx[smp_processor_id()]; | ||
457 | |||
458 | if (curtp == irqtp) { | ||
459 | /* We're already on the irq stack, just handle it */ | ||
460 | desc->handle_irq(irq, desc); | ||
461 | return; | ||
462 | } | ||
463 | |||
464 | saved_sp_limit = current->thread.ksp_limit; | ||
465 | |||
466 | irqtp->task = curtp->task; | ||
467 | irqtp->flags = 0; | ||
468 | |||
469 | /* Copy the softirq bits in preempt_count so that the | ||
470 | * softirq checks work in the hardirq context. */ | ||
471 | irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) | | ||
472 | (curtp->preempt_count & SOFTIRQ_MASK); | ||
473 | |||
474 | current->thread.ksp_limit = (unsigned long)irqtp + | ||
475 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
476 | |||
477 | call_handle_irq(irq, desc, irqtp, desc->handle_irq); | ||
478 | current->thread.ksp_limit = saved_sp_limit; | ||
479 | irqtp->task = NULL; | ||
480 | |||
481 | /* Set any flag that may have been set on the | ||
482 | * alternate stack | ||
483 | */ | ||
484 | if (irqtp->flags) | ||
485 | set_bits(irqtp->flags, &curtp->flags); | ||
486 | } | ||
487 | |||
488 | static inline void check_stack_overflow(void) | 444 | static inline void check_stack_overflow(void) |
489 | { | 445 | { |
490 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | 446 | #ifdef CONFIG_DEBUG_STACKOVERFLOW |
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void) | |||
501 | #endif | 457 | #endif |
502 | } | 458 | } |
503 | 459 | ||
504 | void do_IRQ(struct pt_regs *regs) | 460 | void __do_irq(struct pt_regs *regs) |
505 | { | 461 | { |
506 | struct pt_regs *old_regs = set_irq_regs(regs); | 462 | struct irq_desc *desc; |
507 | unsigned int irq; | 463 | unsigned int irq; |
508 | 464 | ||
509 | irq_enter(); | 465 | irq_enter(); |
@@ -519,18 +475,64 @@ void do_IRQ(struct pt_regs *regs) | |||
519 | */ | 475 | */ |
520 | irq = ppc_md.get_irq(); | 476 | irq = ppc_md.get_irq(); |
521 | 477 | ||
522 | /* We can hard enable interrupts now */ | 478 | /* We can hard enable interrupts now to allow perf interrupts */ |
523 | may_hard_irq_enable(); | 479 | may_hard_irq_enable(); |
524 | 480 | ||
525 | /* And finally process it */ | 481 | /* And finally process it */ |
526 | if (irq != NO_IRQ) | 482 | if (unlikely(irq == NO_IRQ)) |
527 | handle_one_irq(irq); | ||
528 | else | ||
529 | __get_cpu_var(irq_stat).spurious_irqs++; | 483 | __get_cpu_var(irq_stat).spurious_irqs++; |
484 | else { | ||
485 | desc = irq_to_desc(irq); | ||
486 | if (likely(desc)) | ||
487 | desc->handle_irq(irq, desc); | ||
488 | } | ||
530 | 489 | ||
531 | trace_irq_exit(regs); | 490 | trace_irq_exit(regs); |
532 | 491 | ||
533 | irq_exit(); | 492 | irq_exit(); |
493 | } | ||
494 | |||
495 | void do_IRQ(struct pt_regs *regs) | ||
496 | { | ||
497 | struct pt_regs *old_regs = set_irq_regs(regs); | ||
498 | struct thread_info *curtp, *irqtp; | ||
499 | unsigned long saved_sp_limit; | ||
500 | |||
501 | /* Switch to the irq stack to handle this */ | ||
502 | curtp = current_thread_info(); | ||
503 | irqtp = hardirq_ctx[raw_smp_processor_id()]; | ||
504 | |||
505 | /* Already there ? */ | ||
506 | if (unlikely(curtp == irqtp)) { | ||
507 | __do_irq(regs); | ||
508 | set_irq_regs(old_regs); | ||
509 | return; | ||
510 | } | ||
511 | |||
512 | /* Adjust the stack limit */ | ||
513 | saved_sp_limit = current->thread.ksp_limit; | ||
514 | current->thread.ksp_limit = (unsigned long)irqtp + | ||
515 | _ALIGN_UP(sizeof(struct thread_info), 16); | ||
516 | |||
517 | |||
518 | /* Prepare the thread_info in the irq stack */ | ||
519 | irqtp->task = curtp->task; | ||
520 | irqtp->flags = 0; | ||
521 | |||
522 | /* Copy the preempt_count so that the [soft]irq checks work. */ | ||
523 | irqtp->preempt_count = curtp->preempt_count; | ||
524 | |||
525 | /* Switch stack and call */ | ||
526 | call_do_irq(regs, irqtp); | ||
527 | |||
528 | /* Restore stack limit */ | ||
529 | current->thread.ksp_limit = saved_sp_limit; | ||
530 | irqtp->task = NULL; | ||
531 | |||
532 | /* Copy back updates to the thread_info */ | ||
533 | if (irqtp->flags) | ||
534 | set_bits(irqtp->flags, &curtp->flags); | ||
535 | |||
534 | set_irq_regs(old_regs); | 536 | set_irq_regs(old_regs); |
535 | } | 537 | } |
536 | 538 | ||
@@ -592,12 +594,10 @@ void irq_ctx_init(void) | |||
592 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); | 594 | memset((void *)softirq_ctx[i], 0, THREAD_SIZE); |
593 | tp = softirq_ctx[i]; | 595 | tp = softirq_ctx[i]; |
594 | tp->cpu = i; | 596 | tp->cpu = i; |
595 | tp->preempt_count = 0; | ||
596 | 597 | ||
597 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); | 598 | memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); |
598 | tp = hardirq_ctx[i]; | 599 | tp = hardirq_ctx[i]; |
599 | tp->cpu = i; | 600 | tp->cpu = i; |
600 | tp->preempt_count = HARDIRQ_OFFSET; | ||
601 | } | 601 | } |
602 | } | 602 | } |
603 | 603 | ||