diff options
Diffstat (limited to 'arch/powerpc/kernel/entry_64.S')
-rw-r--r-- | arch/powerpc/kernel/entry_64.S | 250 |
1 files changed, 147 insertions, 103 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 866462cbe2d8..f8a7a1a1a9f4 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
33 | #include <asm/irqflags.h> | 33 | #include <asm/irqflags.h> |
34 | #include <asm/ftrace.h> | 34 | #include <asm/ftrace.h> |
35 | #include <asm/hw_irq.h> | ||
35 | 36 | ||
36 | /* | 37 | /* |
37 | * System calls. | 38 | * System calls. |
@@ -115,39 +116,33 @@ BEGIN_FW_FTR_SECTION | |||
115 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | 116 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) |
116 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ | 117 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ |
117 | 118 | ||
118 | #ifdef CONFIG_TRACE_IRQFLAGS | 119 | /* |
119 | bl .trace_hardirqs_on | 120 | * A syscall should always be called with interrupts enabled |
120 | REST_GPR(0,r1) | 121 | * so we just unconditionally hard-enable here. When some kind |
121 | REST_4GPRS(3,r1) | 122 | * of irq tracing is used, we additionally check that condition |
122 | REST_2GPRS(7,r1) | 123 | * is correct |
123 | addi r9,r1,STACK_FRAME_OVERHEAD | 124 | */ |
124 | ld r12,_MSR(r1) | 125 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG) |
125 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 126 | lbz r10,PACASOFTIRQEN(r13) |
126 | li r10,1 | 127 | xori r10,r10,1 |
127 | stb r10,PACASOFTIRQEN(r13) | 128 | 1: tdnei r10,0 |
128 | stb r10,PACAHARDIRQEN(r13) | 129 | EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING |
129 | std r10,SOFTE(r1) | 130 | #endif |
130 | #ifdef CONFIG_PPC_ISERIES | ||
131 | BEGIN_FW_FTR_SECTION | ||
132 | /* Hack for handling interrupts when soft-enabling on iSeries */ | ||
133 | cmpdi cr1,r0,0x5555 /* syscall 0x5555 */ | ||
134 | andi. r10,r12,MSR_PR /* from kernel */ | ||
135 | crand 4*cr0+eq,4*cr1+eq,4*cr0+eq | ||
136 | bne 2f | ||
137 | b hardware_interrupt_entry | ||
138 | 2: | ||
139 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES) | ||
140 | #endif /* CONFIG_PPC_ISERIES */ | ||
141 | 131 | ||
142 | /* Hard enable interrupts */ | ||
143 | #ifdef CONFIG_PPC_BOOK3E | 132 | #ifdef CONFIG_PPC_BOOK3E |
144 | wrteei 1 | 133 | wrteei 1 |
145 | #else | 134 | #else |
146 | mfmsr r11 | 135 | ld r11,PACAKMSR(r13) |
147 | ori r11,r11,MSR_EE | 136 | ori r11,r11,MSR_EE |
148 | mtmsrd r11,1 | 137 | mtmsrd r11,1 |
149 | #endif /* CONFIG_PPC_BOOK3E */ | 138 | #endif /* CONFIG_PPC_BOOK3E */ |
150 | 139 | ||
140 | /* We do need to set SOFTE in the stack frame or the return | ||
141 | * from interrupt will be painful | ||
142 | */ | ||
143 | li r10,1 | ||
144 | std r10,SOFTE(r1) | ||
145 | |||
151 | #ifdef SHOW_SYSCALLS | 146 | #ifdef SHOW_SYSCALLS |
152 | bl .do_show_syscall | 147 | bl .do_show_syscall |
153 | REST_GPR(0,r1) | 148 | REST_GPR(0,r1) |
@@ -198,16 +193,14 @@ syscall_exit: | |||
198 | andi. r10,r8,MSR_RI | 193 | andi. r10,r8,MSR_RI |
199 | beq- unrecov_restore | 194 | beq- unrecov_restore |
200 | #endif | 195 | #endif |
201 | 196 | /* | |
202 | /* Disable interrupts so current_thread_info()->flags can't change, | 197 | * Disable interrupts so current_thread_info()->flags can't change, |
203 | * and so that we don't get interrupted after loading SRR0/1. | 198 | * and so that we don't get interrupted after loading SRR0/1. |
204 | */ | 199 | */ |
205 | #ifdef CONFIG_PPC_BOOK3E | 200 | #ifdef CONFIG_PPC_BOOK3E |
206 | wrteei 0 | 201 | wrteei 0 |
207 | #else | 202 | #else |
208 | mfmsr r10 | 203 | ld r10,PACAKMSR(r13) |
209 | rldicl r10,r10,48,1 | ||
210 | rotldi r10,r10,16 | ||
211 | mtmsrd r10,1 | 204 | mtmsrd r10,1 |
212 | #endif /* CONFIG_PPC_BOOK3E */ | 205 | #endif /* CONFIG_PPC_BOOK3E */ |
213 | 206 | ||
@@ -319,7 +312,7 @@ syscall_exit_work: | |||
319 | #ifdef CONFIG_PPC_BOOK3E | 312 | #ifdef CONFIG_PPC_BOOK3E |
320 | wrteei 1 | 313 | wrteei 1 |
321 | #else | 314 | #else |
322 | mfmsr r10 | 315 | ld r10,PACAKMSR(r13) |
323 | ori r10,r10,MSR_EE | 316 | ori r10,r10,MSR_EE |
324 | mtmsrd r10,1 | 317 | mtmsrd r10,1 |
325 | #endif /* CONFIG_PPC_BOOK3E */ | 318 | #endif /* CONFIG_PPC_BOOK3E */ |
@@ -565,10 +558,8 @@ _GLOBAL(ret_from_except_lite) | |||
565 | #ifdef CONFIG_PPC_BOOK3E | 558 | #ifdef CONFIG_PPC_BOOK3E |
566 | wrteei 0 | 559 | wrteei 0 |
567 | #else | 560 | #else |
568 | mfmsr r10 /* Get current interrupt state */ | 561 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ |
569 | rldicl r9,r10,48,1 /* clear MSR_EE */ | 562 | mtmsrd r10,1 /* Update machine state */ |
570 | rotldi r9,r9,16 | ||
571 | mtmsrd r9,1 /* Update machine state */ | ||
572 | #endif /* CONFIG_PPC_BOOK3E */ | 563 | #endif /* CONFIG_PPC_BOOK3E */ |
573 | 564 | ||
574 | #ifdef CONFIG_PREEMPT | 565 | #ifdef CONFIG_PREEMPT |
@@ -591,25 +582,74 @@ _GLOBAL(ret_from_except_lite) | |||
591 | ld r4,TI_FLAGS(r9) | 582 | ld r4,TI_FLAGS(r9) |
592 | andi. r0,r4,_TIF_USER_WORK_MASK | 583 | andi. r0,r4,_TIF_USER_WORK_MASK |
593 | bne do_work | 584 | bne do_work |
594 | #endif | 585 | #endif /* !CONFIG_PREEMPT */ |
595 | 586 | ||
587 | .globl fast_exc_return_irq | ||
588 | fast_exc_return_irq: | ||
596 | restore: | 589 | restore: |
597 | BEGIN_FW_FTR_SECTION | 590 | /* |
591 | * This is the main kernel exit path, we first check if we | ||
592 | * have to change our interrupt state. | ||
593 | */ | ||
598 | ld r5,SOFTE(r1) | 594 | ld r5,SOFTE(r1) |
599 | FW_FTR_SECTION_ELSE | 595 | lbz r6,PACASOFTIRQEN(r13) |
600 | b .Liseries_check_pending_irqs | 596 | cmpwi cr1,r5,0 |
601 | ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | 597 | cmpw cr0,r5,r6 |
602 | 2: | 598 | beq cr0,4f |
603 | TRACE_AND_RESTORE_IRQ(r5); | 599 | |
600 | /* We do, handle disable first, which is easy */ | ||
601 | bne cr1,3f; | ||
602 | li r0,0 | ||
603 | stb r0,PACASOFTIRQEN(r13); | ||
604 | TRACE_DISABLE_INTS | ||
605 | b 4f | ||
604 | 606 | ||
605 | /* extract EE bit and use it to restore paca->hard_enabled */ | 607 | 3: /* |
606 | ld r3,_MSR(r1) | 608 | * We are about to soft-enable interrupts (we are hard disabled |
607 | rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ | 609 | * at this point). We check if there's anything that needs to |
608 | stb r4,PACAHARDIRQEN(r13) | 610 | * be replayed first. |
611 | */ | ||
612 | lbz r0,PACAIRQHAPPENED(r13) | ||
613 | cmpwi cr0,r0,0 | ||
614 | bne- restore_check_irq_replay | ||
609 | 615 | ||
616 | /* | ||
617 | * Get here when nothing happened while soft-disabled, just | ||
618 | * soft-enable and move-on. We will hard-enable as a side | ||
619 | * effect of rfi | ||
620 | */ | ||
621 | restore_no_replay: | ||
622 | TRACE_ENABLE_INTS | ||
623 | li r0,1 | ||
624 | stb r0,PACASOFTIRQEN(r13); | ||
625 | |||
626 | /* | ||
627 | * Final return path. BookE is handled in a different file | ||
628 | */ | ||
629 | 4: | ||
610 | #ifdef CONFIG_PPC_BOOK3E | 630 | #ifdef CONFIG_PPC_BOOK3E |
611 | b .exception_return_book3e | 631 | b .exception_return_book3e |
612 | #else | 632 | #else |
633 | /* | ||
634 | * Clear the reservation. If we know the CPU tracks the address of | ||
635 | * the reservation then we can potentially save some cycles and use | ||
636 | * a larx. On POWER6 and POWER7 this is significantly faster. | ||
637 | */ | ||
638 | BEGIN_FTR_SECTION | ||
639 | stdcx. r0,0,r1 /* to clear the reservation */ | ||
640 | FTR_SECTION_ELSE | ||
641 | ldarx r4,0,r1 | ||
642 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
643 | |||
644 | /* | ||
645 | * Some code path such as load_up_fpu or altivec return directly | ||
646 | * here. They run entirely hard disabled and do not alter the | ||
647 | * interrupt state. They also don't use lwarx/stwcx. and thus | ||
648 | * are known not to leave dangling reservations. | ||
649 | */ | ||
650 | .globl fast_exception_return | ||
651 | fast_exception_return: | ||
652 | ld r3,_MSR(r1) | ||
613 | ld r4,_CTR(r1) | 653 | ld r4,_CTR(r1) |
614 | ld r0,_LINK(r1) | 654 | ld r0,_LINK(r1) |
615 | mtctr r4 | 655 | mtctr r4 |
@@ -623,28 +663,18 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
623 | beq- unrecov_restore | 663 | beq- unrecov_restore |
624 | 664 | ||
625 | /* | 665 | /* |
626 | * Clear the reservation. If we know the CPU tracks the address of | ||
627 | * the reservation then we can potentially save some cycles and use | ||
628 | * a larx. On POWER6 and POWER7 this is significantly faster. | ||
629 | */ | ||
630 | BEGIN_FTR_SECTION | ||
631 | stdcx. r0,0,r1 /* to clear the reservation */ | ||
632 | FTR_SECTION_ELSE | ||
633 | ldarx r4,0,r1 | ||
634 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
635 | |||
636 | /* | ||
637 | * Clear RI before restoring r13. If we are returning to | 666 | * Clear RI before restoring r13. If we are returning to |
638 | * userspace and we take an exception after restoring r13, | 667 | * userspace and we take an exception after restoring r13, |
639 | * we end up corrupting the userspace r13 value. | 668 | * we end up corrupting the userspace r13 value. |
640 | */ | 669 | */ |
641 | mfmsr r4 | 670 | ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */ |
642 | andc r4,r4,r0 /* r0 contains MSR_RI here */ | 671 | andc r4,r4,r0 /* r0 contains MSR_RI here */ |
643 | mtmsrd r4,1 | 672 | mtmsrd r4,1 |
644 | 673 | ||
645 | /* | 674 | /* |
646 | * r13 is our per cpu area, only restore it if we are returning to | 675 | * r13 is our per cpu area, only restore it if we are returning to |
647 | * userspace | 676 | * userspace the value stored in the stack frame may belong to |
677 | * another CPU. | ||
648 | */ | 678 | */ |
649 | andi. r0,r3,MSR_PR | 679 | andi. r0,r3,MSR_PR |
650 | beq 1f | 680 | beq 1f |
@@ -669,30 +699,55 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | |||
669 | 699 | ||
670 | #endif /* CONFIG_PPC_BOOK3E */ | 700 | #endif /* CONFIG_PPC_BOOK3E */ |
671 | 701 | ||
672 | .Liseries_check_pending_irqs: | 702 | /* |
673 | #ifdef CONFIG_PPC_ISERIES | 703 | * Something did happen, check if a re-emit is needed |
674 | ld r5,SOFTE(r1) | 704 | * (this also clears paca->irq_happened) |
675 | cmpdi 0,r5,0 | 705 | */ |
676 | beq 2b | 706 | restore_check_irq_replay: |
677 | /* Check for pending interrupts (iSeries) */ | 707 | /* XXX: We could implement a fast path here where we check |
678 | ld r3,PACALPPACAPTR(r13) | 708 | * for irq_happened being just 0x01, in which case we can |
679 | ld r3,LPPACAANYINT(r3) | 709 | * clear it and return. That means that we would potentially |
680 | cmpdi r3,0 | 710 | * miss a decrementer having wrapped all the way around. |
681 | beq+ 2b /* skip do_IRQ if no interrupts */ | 711 | * |
682 | 712 | * Still, this might be useful for things like hash_page | |
683 | li r3,0 | 713 | */ |
684 | stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ | 714 | bl .__check_irq_replay |
685 | #ifdef CONFIG_TRACE_IRQFLAGS | 715 | cmpwi cr0,r3,0 |
686 | bl .trace_hardirqs_off | 716 | beq restore_no_replay |
687 | mfmsr r10 | 717 | |
688 | #endif | 718 | /* |
689 | ori r10,r10,MSR_EE | 719 | * We need to re-emit an interrupt. We do so by re-using our |
690 | mtmsrd r10 /* hard-enable again */ | 720 | * existing exception frame. We first change the trap value, |
691 | addi r3,r1,STACK_FRAME_OVERHEAD | 721 | * but we need to ensure we preserve the low nibble of it |
692 | bl .do_IRQ | 722 | */ |
693 | b .ret_from_except_lite /* loop back and handle more */ | 723 | ld r4,_TRAP(r1) |
694 | #endif | 724 | clrldi r4,r4,60 |
725 | or r4,r4,r3 | ||
726 | std r4,_TRAP(r1) | ||
695 | 727 | ||
728 | /* | ||
729 | * Then find the right handler and call it. Interrupts are | ||
730 | * still soft-disabled and we keep them that way. | ||
731 | */ | ||
732 | cmpwi cr0,r3,0x500 | ||
733 | bne 1f | ||
734 | addi r3,r1,STACK_FRAME_OVERHEAD; | ||
735 | bl .do_IRQ | ||
736 | b .ret_from_except | ||
737 | 1: cmpwi cr0,r3,0x900 | ||
738 | bne 1f | ||
739 | addi r3,r1,STACK_FRAME_OVERHEAD; | ||
740 | bl .timer_interrupt | ||
741 | b .ret_from_except | ||
742 | #ifdef CONFIG_PPC_BOOK3E | ||
743 | 1: cmpwi cr0,r3,0x280 | ||
744 | bne 1f | ||
745 | addi r3,r1,STACK_FRAME_OVERHEAD; | ||
746 | bl .doorbell_exception | ||
747 | b .ret_from_except | ||
748 | #endif /* CONFIG_PPC_BOOK3E */ | ||
749 | 1: b .ret_from_except /* What else to do here ? */ | ||
750 | |||
696 | do_work: | 751 | do_work: |
697 | #ifdef CONFIG_PREEMPT | 752 | #ifdef CONFIG_PREEMPT |
698 | andi. r0,r3,MSR_PR /* Returning to user mode? */ | 753 | andi. r0,r3,MSR_PR /* Returning to user mode? */ |
@@ -705,31 +760,22 @@ do_work: | |||
705 | crandc eq,cr1*4+eq,eq | 760 | crandc eq,cr1*4+eq,eq |
706 | bne restore | 761 | bne restore |
707 | 762 | ||
708 | /* Here we are preempting the current task. | 763 | /* |
709 | * | 764 | * Here we are preempting the current task. We want to make |
710 | * Ensure interrupts are soft-disabled. We also properly mark | 765 | * sure we are soft-disabled first |
711 | * the PACA to reflect the fact that they are hard-disabled | ||
712 | * and trace the change | ||
713 | */ | 766 | */ |
714 | li r0,0 | 767 | SOFT_DISABLE_INTS(r3,r4) |
715 | stb r0,PACASOFTIRQEN(r13) | ||
716 | stb r0,PACAHARDIRQEN(r13) | ||
717 | TRACE_DISABLE_INTS | ||
718 | |||
719 | /* Call the scheduler with soft IRQs off */ | ||
720 | 1: bl .preempt_schedule_irq | 768 | 1: bl .preempt_schedule_irq |
721 | 769 | ||
722 | /* Hard-disable interrupts again (and update PACA) */ | 770 | /* Hard-disable interrupts again (and update PACA) */ |
723 | #ifdef CONFIG_PPC_BOOK3E | 771 | #ifdef CONFIG_PPC_BOOK3E |
724 | wrteei 0 | 772 | wrteei 0 |
725 | #else | 773 | #else |
726 | mfmsr r10 | 774 | ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */ |
727 | rldicl r10,r10,48,1 | ||
728 | rotldi r10,r10,16 | ||
729 | mtmsrd r10,1 | 775 | mtmsrd r10,1 |
730 | #endif /* CONFIG_PPC_BOOK3E */ | 776 | #endif /* CONFIG_PPC_BOOK3E */ |
731 | li r0,0 | 777 | li r0,PACA_IRQ_HARD_DIS |
732 | stb r0,PACAHARDIRQEN(r13) | 778 | stb r0,PACAIRQHAPPENED(r13) |
733 | 779 | ||
734 | /* Re-test flags and eventually loop */ | 780 | /* Re-test flags and eventually loop */ |
735 | clrrdi r9,r1,THREAD_SHIFT | 781 | clrrdi r9,r1,THREAD_SHIFT |
@@ -751,14 +797,12 @@ user_work: | |||
751 | 797 | ||
752 | andi. r0,r4,_TIF_NEED_RESCHED | 798 | andi. r0,r4,_TIF_NEED_RESCHED |
753 | beq 1f | 799 | beq 1f |
754 | li r5,1 | 800 | bl .restore_interrupts |
755 | TRACE_AND_RESTORE_IRQ(r5); | ||
756 | bl .schedule | 801 | bl .schedule |
757 | b .ret_from_except_lite | 802 | b .ret_from_except_lite |
758 | 803 | ||
759 | 1: bl .save_nvgprs | 804 | 1: bl .save_nvgprs |
760 | li r5,1 | 805 | bl .restore_interrupts |
761 | TRACE_AND_RESTORE_IRQ(r5); | ||
762 | addi r3,r1,STACK_FRAME_OVERHEAD | 806 | addi r3,r1,STACK_FRAME_OVERHEAD |
763 | bl .do_notify_resume | 807 | bl .do_notify_resume |
764 | b .ret_from_except | 808 | b .ret_from_except |