aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-09-23 00:29:11 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2013-09-25 00:15:36 -0400
commit0366a1c70b89efed4f9d590216bb004a16effbed (patch)
tree5090ad127863968876119f1bb022a7bce32842ab
parent4a10c2ac2f368583138b774ca41fac4207911983 (diff)
powerpc/irq: Run softirqs off the top of the irq stack
Nowadays, irq_exit() calls __do_softirq() pretty much directly instead of calling do_softirq() which switches to the decicated softirq stack. This has lead to observed stack overflows on powerpc since we call irq_enter() and irq_exit() outside of the scope that switches to the irq stack. This fixes it by moving the stack switching up a level, making irq_enter() and irq_exit() run off the irq stack. Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/include/asm/irq.h4
-rw-r--r--arch/powerpc/kernel/irq.c104
-rw-r--r--arch/powerpc/kernel/misc_32.S9
-rw-r--r--arch/powerpc/kernel/misc_64.S10
4 files changed, 62 insertions, 65 deletions
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index 0e40843a1c6e..41f13cec8a8f 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -69,9 +69,9 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
69 69
70extern void irq_ctx_init(void); 70extern void irq_ctx_init(void);
71extern void call_do_softirq(struct thread_info *tp); 71extern void call_do_softirq(struct thread_info *tp);
72extern int call_handle_irq(int irq, void *p1, 72extern void call_do_irq(struct pt_regs *regs, struct thread_info *tp);
73 struct thread_info *tp, void *func);
74extern void do_IRQ(struct pt_regs *regs); 73extern void do_IRQ(struct pt_regs *regs);
74extern void __do_irq(struct pt_regs *regs);
75 75
76int irq_choose_cpu(const struct cpumask *mask); 76int irq_choose_cpu(const struct cpumask *mask);
77 77
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c69440cef7af..2234a1276a77 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -441,50 +441,6 @@ void migrate_irqs(void)
441} 441}
442#endif 442#endif
443 443
444static inline void handle_one_irq(unsigned int irq)
445{
446 struct thread_info *curtp, *irqtp;
447 unsigned long saved_sp_limit;
448 struct irq_desc *desc;
449
450 desc = irq_to_desc(irq);
451 if (!desc)
452 return;
453
454 /* Switch to the irq stack to handle this */
455 curtp = current_thread_info();
456 irqtp = hardirq_ctx[smp_processor_id()];
457
458 if (curtp == irqtp) {
459 /* We're already on the irq stack, just handle it */
460 desc->handle_irq(irq, desc);
461 return;
462 }
463
464 saved_sp_limit = current->thread.ksp_limit;
465
466 irqtp->task = curtp->task;
467 irqtp->flags = 0;
468
469 /* Copy the softirq bits in preempt_count so that the
470 * softirq checks work in the hardirq context. */
471 irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
472 (curtp->preempt_count & SOFTIRQ_MASK);
473
474 current->thread.ksp_limit = (unsigned long)irqtp +
475 _ALIGN_UP(sizeof(struct thread_info), 16);
476
477 call_handle_irq(irq, desc, irqtp, desc->handle_irq);
478 current->thread.ksp_limit = saved_sp_limit;
479 irqtp->task = NULL;
480
481 /* Set any flag that may have been set on the
482 * alternate stack
483 */
484 if (irqtp->flags)
485 set_bits(irqtp->flags, &curtp->flags);
486}
487
488static inline void check_stack_overflow(void) 444static inline void check_stack_overflow(void)
489{ 445{
490#ifdef CONFIG_DEBUG_STACKOVERFLOW 446#ifdef CONFIG_DEBUG_STACKOVERFLOW
@@ -501,9 +457,9 @@ static inline void check_stack_overflow(void)
501#endif 457#endif
502} 458}
503 459
504void do_IRQ(struct pt_regs *regs) 460void __do_irq(struct pt_regs *regs)
505{ 461{
506 struct pt_regs *old_regs = set_irq_regs(regs); 462 struct irq_desc *desc;
507 unsigned int irq; 463 unsigned int irq;
508 464
509 irq_enter(); 465 irq_enter();
@@ -519,18 +475,64 @@ void do_IRQ(struct pt_regs *regs)
519 */ 475 */
520 irq = ppc_md.get_irq(); 476 irq = ppc_md.get_irq();
521 477
522 /* We can hard enable interrupts now */ 478 /* We can hard enable interrupts now to allow perf interrupts */
523 may_hard_irq_enable(); 479 may_hard_irq_enable();
524 480
525 /* And finally process it */ 481 /* And finally process it */
526 if (irq != NO_IRQ) 482 if (unlikely(irq == NO_IRQ))
527 handle_one_irq(irq);
528 else
529 __get_cpu_var(irq_stat).spurious_irqs++; 483 __get_cpu_var(irq_stat).spurious_irqs++;
484 else {
485 desc = irq_to_desc(irq);
486 if (likely(desc))
487 desc->handle_irq(irq, desc);
488 }
530 489
531 trace_irq_exit(regs); 490 trace_irq_exit(regs);
532 491
533 irq_exit(); 492 irq_exit();
493}
494
495void do_IRQ(struct pt_regs *regs)
496{
497 struct pt_regs *old_regs = set_irq_regs(regs);
498 struct thread_info *curtp, *irqtp;
499 unsigned long saved_sp_limit;
500
501 /* Switch to the irq stack to handle this */
502 curtp = current_thread_info();
503 irqtp = hardirq_ctx[raw_smp_processor_id()];
504
505 /* Already there ? */
506 if (unlikely(curtp == irqtp)) {
507 __do_irq(regs);
508 set_irq_regs(old_regs);
509 return;
510 }
511
512 /* Adjust the stack limit */
513 saved_sp_limit = current->thread.ksp_limit;
514 current->thread.ksp_limit = (unsigned long)irqtp +
515 _ALIGN_UP(sizeof(struct thread_info), 16);
516
517
518 /* Prepare the thread_info in the irq stack */
519 irqtp->task = curtp->task;
520 irqtp->flags = 0;
521
522 /* Copy the preempt_count so that the [soft]irq checks work. */
523 irqtp->preempt_count = curtp->preempt_count;
524
525 /* Switch stack and call */
526 call_do_irq(regs, irqtp);
527
528 /* Restore stack limit */
529 current->thread.ksp_limit = saved_sp_limit;
530 irqtp->task = NULL;
531
532 /* Copy back updates to the thread_info */
533 if (irqtp->flags)
534 set_bits(irqtp->flags, &curtp->flags);
535
534 set_irq_regs(old_regs); 536 set_irq_regs(old_regs);
535} 537}
536 538
@@ -592,12 +594,10 @@ void irq_ctx_init(void)
592 memset((void *)softirq_ctx[i], 0, THREAD_SIZE); 594 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
593 tp = softirq_ctx[i]; 595 tp = softirq_ctx[i];
594 tp->cpu = i; 596 tp->cpu = i;
595 tp->preempt_count = 0;
596 597
597 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE); 598 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
598 tp = hardirq_ctx[i]; 599 tp = hardirq_ctx[i];
599 tp->cpu = i; 600 tp->cpu = i;
600 tp->preempt_count = HARDIRQ_OFFSET;
601 } 601 }
602} 602}
603 603
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 777d999f563b..7da3882a3622 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -47,13 +47,12 @@ _GLOBAL(call_do_softirq)
47 mtlr r0 47 mtlr r0
48 blr 48 blr
49 49
50_GLOBAL(call_handle_irq) 50_GLOBAL(call_do_irq)
51 mflr r0 51 mflr r0
52 stw r0,4(r1) 52 stw r0,4(r1)
53 mtctr r6 53 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
54 stwu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 54 mr r1,r4
55 mr r1,r5 55 bl __do_irq
56 bctrl
57 lwz r1,0(r1) 56 lwz r1,0(r1)
58 lwz r0,4(r1) 57 lwz r0,4(r1)
59 mtlr r0 58 mtlr r0
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index 971d7e78aff2..e59caf874d05 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -40,14 +40,12 @@ _GLOBAL(call_do_softirq)
40 mtlr r0 40 mtlr r0
41 blr 41 blr
42 42
43_GLOBAL(call_handle_irq) 43_GLOBAL(call_do_irq)
44 ld r8,0(r6)
45 mflr r0 44 mflr r0
46 std r0,16(r1) 45 std r0,16(r1)
47 mtctr r8 46 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r4)
48 stdu r1,THREAD_SIZE-STACK_FRAME_OVERHEAD(r5) 47 mr r1,r4
49 mr r1,r5 48 bl .__do_irq
50 bctrl
51 ld r1,0(r1) 49 ld r1,0(r1)
52 ld r0,16(r1) 50 ld r0,16(r1)
53 mtlr r0 51 mtlr r0