aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorKumar Gala <galak@kernel.crashing.org>2009-08-18 15:08:33 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2009-08-28 00:24:14 -0400
commitbb1af71ecbfdbecbe9f7e43f703da5840b76c2e4 (patch)
tree91167c249090c0636d56316afb08ca56f51bdfc8 /arch/powerpc
parent4b98d9e713a03bd79ced8800e24a56359f9effbf (diff)
powerpc/book3e-64: Add support to initial_tlb_book3e for non-HES TLB
We now search through TLBnCFG looking for the first array that has IPROT support (we assume that there is only one). If that TLB has hardware entry select (HES) support we use the existing code and with the proper TLB select (the HES code still needs to clean up bolted entries from firmware). The non-HES code is pretty similiar to the 32-bit FSL Book-E code but does make some new assumtions (like that we have tlbilx) and simplifies things down a bit. Signed-off-by: Kumar Gala <galak@kernel.crashing.org> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/include/asm/reg_booke.h2
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S204
2 files changed, 202 insertions, 4 deletions
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 9bb81d99b765..3bf783505528 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -108,6 +108,8 @@
108#define SPRN_PID2 0x27A /* Process ID Register 2 */ 108#define SPRN_PID2 0x27A /* Process ID Register 2 */
109#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */ 109#define SPRN_TLB0CFG 0x2B0 /* TLB 0 Config Register */
110#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */ 110#define SPRN_TLB1CFG 0x2B1 /* TLB 1 Config Register */
111#define SPRN_TLB2CFG 0x2B2 /* TLB 2 Config Register */
112#define SPRN_TLB3CFG 0x2B3 /* TLB 3 Config Register */
111#define SPRN_EPR 0x2BE /* External Proxy Register */ 113#define SPRN_EPR 0x2BE /* External Proxy Register */
112#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */ 114#define SPRN_CCR1 0x378 /* Core Configuration Register 1 */
113#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */ 115#define SPRN_ZPR 0x3B0 /* Zone Protection Register (40x) */
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 662236c72244..9048f96237f6 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -616,18 +616,214 @@ bad_stack_book3e:
616 * Setup the initial TLB for a core. This current implementation 616 * Setup the initial TLB for a core. This current implementation
617 * assume that whatever we are running off will not conflict with 617 * assume that whatever we are running off will not conflict with
618 * the new mapping at PAGE_OFFSET. 618 * the new mapping at PAGE_OFFSET.
619 * We also make various assumptions about the processor we run on,
620 * this might have to be made more flexible based on the content
621 * of MMUCFG and friends.
622 */ 619 */
623_GLOBAL(initial_tlb_book3e) 620_GLOBAL(initial_tlb_book3e)
624 621
622 /* Look for the first TLB with IPROT set */
623 mfspr r4,SPRN_TLB0CFG
624 andi. r3,r4,TLBnCFG_IPROT
625 lis r3,MAS0_TLBSEL(0)@h
626 bne found_iprot
627
628 mfspr r4,SPRN_TLB1CFG
629 andi. r3,r4,TLBnCFG_IPROT
630 lis r3,MAS0_TLBSEL(1)@h
631 bne found_iprot
632
633 mfspr r4,SPRN_TLB2CFG
634 andi. r3,r4,TLBnCFG_IPROT
635 lis r3,MAS0_TLBSEL(2)@h
636 bne found_iprot
637
638 lis r3,MAS0_TLBSEL(3)@h
639 mfspr r4,SPRN_TLB3CFG
640 /* fall through */
641
642found_iprot:
643 andi. r5,r4,TLBnCFG_HES
644 bne have_hes
645
646 mflr r8 /* save LR */
647/* 1. Find the index of the entry we're executing in
648 *
649 * r3 = MAS0_TLBSEL (for the iprot array)
650 * r4 = SPRN_TLBnCFG
651 */
652 bl invstr /* Find our address */
653invstr: mflr r6 /* Make it accessible */
654 mfmsr r7
655 rlwinm r5,r7,27,31,31 /* extract MSR[IS] */
656 mfspr r7,SPRN_PID
657 slwi r7,r7,16
658 or r7,r7,r5
659 mtspr SPRN_MAS6,r7
660 tlbsx 0,r6 /* search MSR[IS], SPID=PID */
661
662 mfspr r3,SPRN_MAS0
663 rlwinm r5,r3,16,20,31 /* Extract MAS0(Entry) */
664
665 mfspr r7,SPRN_MAS1 /* Insure IPROT set */
666 oris r7,r7,MAS1_IPROT@h
667 mtspr SPRN_MAS1,r7
668 tlbwe
669
670/* 2. Invalidate all entries except the entry we're executing in
671 *
672 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
673 * r4 = SPRN_TLBnCFG
674 * r5 = ESEL of entry we are running in
675 */
676 andi. r4,r4,TLBnCFG_N_ENTRY /* Extract # entries */
677 li r6,0 /* Set Entry counter to 0 */
6781: mr r7,r3 /* Set MAS0(TLBSEL) */
679 rlwimi r7,r6,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r6) */
680 mtspr SPRN_MAS0,r7
681 tlbre
682 mfspr r7,SPRN_MAS1
683 rlwinm r7,r7,0,2,31 /* Clear MAS1 Valid and IPROT */
684 cmpw r5,r6
685 beq skpinv /* Dont update the current execution TLB */
686 mtspr SPRN_MAS1,r7
687 tlbwe
688 isync
689skpinv: addi r6,r6,1 /* Increment */
690 cmpw r6,r4 /* Are we done? */
691 bne 1b /* If not, repeat */
692
693 /* Invalidate all TLBs */
694 PPC_TLBILX_ALL(0,0)
695 sync
696 isync
697
698/* 3. Setup a temp mapping and jump to it
699 *
700 * r3 = MAS0 w/TLBSEL & ESEL for the entry we are running in
701 * r5 = ESEL of entry we are running in
702 */
703 andi. r7,r5,0x1 /* Find an entry not used and is non-zero */
704 addi r7,r7,0x1
705 mr r4,r3 /* Set MAS0(TLBSEL) = 1 */
706 mtspr SPRN_MAS0,r4
707 tlbre
708
709 rlwimi r4,r7,16,4,15 /* Setup MAS0 = TLBSEL | ESEL(r7) */
710 mtspr SPRN_MAS0,r4
711
712 mfspr r7,SPRN_MAS1
713 xori r6,r7,MAS1_TS /* Setup TMP mapping in the other Address space */
714 mtspr SPRN_MAS1,r6
715
716 tlbwe
717
718 mfmsr r6
719 xori r6,r6,MSR_IS
720 mtspr SPRN_SRR1,r6
721 bl 1f /* Find our address */
7221: mflr r6
723 addi r6,r6,(2f - 1b)
724 mtspr SPRN_SRR0,r6
725 rfi
7262:
727
728/* 4. Clear out PIDs & Search info
729 *
730 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
731 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
732 * r5 = MAS3
733 */
734 li r6,0
735 mtspr SPRN_MAS6,r6
736 mtspr SPRN_PID,r6
737
738/* 5. Invalidate mapping we started in
739 *
740 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
741 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
742 * r5 = MAS3
743 */
744 mtspr SPRN_MAS0,r3
745 tlbre
746 mfspr r6,SPRN_MAS1
747 rlwinm r6,r6,0,2,0 /* clear IPROT */
748 mtspr SPRN_MAS1,r6
749 tlbwe
750
751 /* Invalidate TLB1 */
752 PPC_TLBILX_ALL(0,0)
753 sync
754 isync
755
756/* The mapping only needs to be cache-coherent on SMP */
757#ifdef CONFIG_SMP
758#define M_IF_SMP MAS2_M
759#else
760#define M_IF_SMP 0
761#endif
762
763/* 6. Setup KERNELBASE mapping in TLB[0]
764 *
765 * r3 = MAS0 w/TLBSEL & ESEL for the entry we started in
766 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
767 * r5 = MAS3
768 */
769 rlwinm r3,r3,0,16,3 /* clear ESEL */
770 mtspr SPRN_MAS0,r3
771 lis r6,(MAS1_VALID|MAS1_IPROT)@h
772 ori r6,r6,(MAS1_TSIZE(BOOK3E_PAGESZ_1GB))@l
773 mtspr SPRN_MAS1,r6
774
775 LOAD_REG_IMMEDIATE(r6, PAGE_OFFSET | M_IF_SMP)
776 mtspr SPRN_MAS2,r6
777
778 rlwinm r5,r5,0,0,25
779 ori r5,r5,MAS3_SR | MAS3_SW | MAS3_SX
780 mtspr SPRN_MAS3,r5
781 li r5,-1
782 rlwinm r5,r5,0,0,25
783
784 tlbwe
785
786/* 7. Jump to KERNELBASE mapping
787 *
788 * r4 = MAS0 w/TLBSEL & ESEL for the temp mapping
789 */
790 /* Now we branch the new virtual address mapped by this entry */
791 LOAD_REG_IMMEDIATE(r6,2f)
792 lis r7,MSR_KERNEL@h
793 ori r7,r7,MSR_KERNEL@l
794 mtspr SPRN_SRR0,r6
795 mtspr SPRN_SRR1,r7
796 rfi /* start execution out of TLB1[0] entry */
7972:
798
799/* 8. Clear out the temp mapping
800 *
801 * r4 = MAS0 w/TLBSEL & ESEL for the entry we are running in
802 */
803 mtspr SPRN_MAS0,r4
804 tlbre
805 mfspr r5,SPRN_MAS1
806 rlwinm r5,r5,0,2,0 /* clear IPROT */
807 mtspr SPRN_MAS1,r5
808 tlbwe
809
810 /* Invalidate TLB1 */
811 PPC_TLBILX_ALL(0,0)
812 sync
813 isync
814
815 /* We translate LR and return */
816 tovirt(r8,r8)
817 mtlr r8
818 blr
819
820have_hes:
625 /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the 821 /* Setup MAS 0,1,2,3 and 7 for tlbwe of a 1G entry that maps the
626 * kernel linear mapping. We also set MAS8 once for all here though 822 * kernel linear mapping. We also set MAS8 once for all here though
627 * that will have to be made dependent on whether we are running under 823 * that will have to be made dependent on whether we are running under
628 * a hypervisor I suppose. 824 * a hypervisor I suppose.
629 */ 825 */
630 li r3,MAS0_HES | MAS0_WQ_ALLWAYS 826 ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS
631 mtspr SPRN_MAS0,r3 827 mtspr SPRN_MAS0,r3
632 lis r3,(MAS1_VALID | MAS1_IPROT)@h 828 lis r3,(MAS1_VALID | MAS1_IPROT)@h
633 ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT 829 ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT