aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/entry_64.S
diff options
context:
space:
mode:
authorMichael Ellerman <michael@ellerman.id.au>2008-07-16 00:21:34 -0400
committerPaul Mackerras <paulus@samba.org>2008-08-20 02:34:57 -0400
commit01f3880dd8a7fa78c419da2db740cba511ca7798 (patch)
tree6a1e6ab37fa42ae1847f2ff44f26270dc6848c7c /arch/powerpc/kernel/entry_64.S
parentcd5aeb9f6cf7ada6baa218e01b4299e201497cde (diff)
powerpc: Streamline ret_from_except_lite for non-iSeries platforms
There is a small passage of code in ret_from_except_lite which is only required on iSeries. For a multi-platform kernel on non-iSeries machines this means we end up executing ~15 nops in ret_from_except_lite. It would be nicer if non-iSeries could skip the code entirely, and on iSeries we can jump out of line to execute the code. I have no performance numbers to justify this, other than the assertion that executing 15 nops takes longer than executing 0. Signed-off-by: Michael Ellerman <michael@ellerman.id.au> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/kernel/entry_64.S')
-rw-r--r--arch/powerpc/kernel/entry_64.S53
1 files changed, 29 insertions, 24 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 2d802e97097c..55445f1dba8a 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -512,31 +512,12 @@ _GLOBAL(ret_from_except_lite)
512#endif 512#endif
513 513
514restore: 514restore:
515 ld r5,SOFTE(r1)
516#ifdef CONFIG_PPC_ISERIES
517BEGIN_FW_FTR_SECTION 515BEGIN_FW_FTR_SECTION
518 cmpdi 0,r5,0 516 ld r5,SOFTE(r1)
519 beq 4f 517FW_FTR_SECTION_ELSE
520 /* Check for pending interrupts (iSeries) */ 518 b iseries_check_pending_irqs
521 ld r3,PACALPPACAPTR(r13) 519ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
522 ld r3,LPPACAANYINT(r3) 5202:
523 cmpdi r3,0
524 beq+ 4f /* skip do_IRQ if no interrupts */
525
526 li r3,0
527 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
528#ifdef CONFIG_TRACE_IRQFLAGS
529 bl .trace_hardirqs_off
530 mfmsr r10
531#endif
532 ori r10,r10,MSR_EE
533 mtmsrd r10 /* hard-enable again */
534 addi r3,r1,STACK_FRAME_OVERHEAD
535 bl .do_IRQ
536 b .ret_from_except_lite /* loop back and handle more */
5374:
538END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
539#endif
540 TRACE_AND_RESTORE_IRQ(r5); 521 TRACE_AND_RESTORE_IRQ(r5);
541 522
542 /* extract EE bit and use it to restore paca->hard_enabled */ 523 /* extract EE bit and use it to restore paca->hard_enabled */
@@ -592,6 +573,30 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
592 rfid 573 rfid
593 b . /* prevent speculative execution */ 574 b . /* prevent speculative execution */
594 575
576iseries_check_pending_irqs:
577#ifdef CONFIG_PPC_ISERIES
578 ld r5,SOFTE(r1)
579 cmpdi 0,r5,0
580 beq 2b
581 /* Check for pending interrupts (iSeries) */
582 ld r3,PACALPPACAPTR(r13)
583 ld r3,LPPACAANYINT(r3)
584 cmpdi r3,0
585 beq+ 2b /* skip do_IRQ if no interrupts */
586
587 li r3,0
588 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */
589#ifdef CONFIG_TRACE_IRQFLAGS
590 bl .trace_hardirqs_off
591 mfmsr r10
592#endif
593 ori r10,r10,MSR_EE
594 mtmsrd r10 /* hard-enable again */
595 addi r3,r1,STACK_FRAME_OVERHEAD
596 bl .do_IRQ
597 b .ret_from_except_lite /* loop back and handle more */
598#endif
599
595do_work: 600do_work:
596#ifdef CONFIG_PREEMPT 601#ifdef CONFIG_PREEMPT
597 andi. r0,r3,MSR_PR /* Returning to user mode? */ 602 andi. r0,r3,MSR_PR /* Returning to user mode? */