aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2016-05-17 03:02:54 -0400
committerScott Wood <oss@buserror.net>2016-07-09 03:02:48 -0400
commit4ad274502f66614eec3093aaa0cdeb4b70697ddf (patch)
tree0ca9e2c55cd25aba07b0218483e54aad0e35dfc2
parentbb7f380849f8c8722ea383ec5867a79d365d4574 (diff)
powerpc/8xx: Rework CONFIG_PIN_TLB handling
On recent kernels, with some debug options like for instance CONFIG_LOCKDEP, the BSS requires more than 8M memory, allthough the kernel code fits in the first 8M. Today, it is necessary to activate CONFIG_PIN_TLB to get more than 8M at startup, allthough pinning TLB is not necessary for that. We could have inconditionaly mapped 16 or 24M bytes at startup but some old hardware only have 8M and mapping non-existing RAM would be an issue due to speculative accesses. With the preceding patch however, the TLB entries are populated on demand. By setting up the TLB miss handler to handle up to 24M until the handler is patched for the entire memory space, it is possible to allow access up to more memory without mapping non-existing RAM. It is therefore not needed anymore to map memory data at all at startup. It will be handled by the TLB miss handler. One might still want to PIN the IMMR and the first 24M of RAM. It is now possible to do it in the C memory initialisation functions. In addition, we now know how much memory we have when we do it, so we are able to adapt the pining to the real amount of memory available. So boards with less than 24M can now also benefit from PIN_TLB. Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Scott Wood <oss@buserror.net>
-rw-r--r--arch/powerpc/kernel/head_8xx.S44
-rw-r--r--arch/powerpc/mm/8xx_mmu.c27
2 files changed, 23 insertions, 48 deletions
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 3de7d02c36ce..00cc9df7d322 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -491,7 +491,7 @@ _ENTRY(DTLBMiss_jmp)
491 491
4924: 4924:
493_ENTRY(DTLBMiss_cmp) 493_ENTRY(DTLBMiss_cmp)
494 cmpli cr0, r11, PAGE_OFFSET@h 494 cmpli cr0, r11, (PAGE_OFFSET + 0x1800000)@h
495 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 495 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
496 bge- 3b 496 bge- 3b
497 497
@@ -586,7 +586,7 @@ FixupDAR:/* Entry point for dcbx workaround. */
586 BRANCH_UNLESS_KERNEL(3f) 586 BRANCH_UNLESS_KERNEL(3f)
587 rlwinm r11, r10, 16, 0xfff8 587 rlwinm r11, r10, 16, 0xfff8
588_ENTRY(FixupDAR_cmp) 588_ENTRY(FixupDAR_cmp)
589 cmpli cr7, r11, PAGE_OFFSET@h 589 cmpli cr7, r11, (PAGE_OFFSET + 0x1800000)@h
590 blt- cr7, 200f 590 blt- cr7, 200f
591 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha 591 lis r11, (swapper_pg_dir-PAGE_OFFSET)@ha
592 /* Insert level 1 index */ 592 /* Insert level 1 index */
@@ -823,23 +823,16 @@ initial_mmu:
823 mtspr SPRN_MD_CTR, r10 /* Set data TLB control */ 823 mtspr SPRN_MD_CTR, r10 /* Set data TLB control */
824#endif 824#endif
825 825
826 /* Now map the lower 8 Meg into the TLBs. For this quick hack, 826 /* Now map the lower 8 Meg into the ITLB. */
827 * we can load the instruction and data TLB registers with the
828 * same values.
829 */
830 lis r8, KERNELBASE@h /* Create vaddr for TLB */ 827 lis r8, KERNELBASE@h /* Create vaddr for TLB */
831 ori r8, r8, MI_EVALID /* Mark it valid */ 828 ori r8, r8, MI_EVALID /* Mark it valid */
832 mtspr SPRN_MI_EPN, r8 829 mtspr SPRN_MI_EPN, r8
833 mtspr SPRN_MD_EPN, r8
834 li r8, MI_PS8MEG | (2 << 5) /* Set 8M byte page, APG 2 */ 830 li r8, MI_PS8MEG | (2 << 5) /* Set 8M byte page, APG 2 */
835 ori r8, r8, MI_SVALID /* Make it valid */ 831 ori r8, r8, MI_SVALID /* Make it valid */
836 mtspr SPRN_MI_TWC, r8 832 mtspr SPRN_MI_TWC, r8
837 li r8, MI_PS8MEG /* Set 8M byte page, APG 0 */
838 ori r8, r8, MI_SVALID /* Make it valid */
839 mtspr SPRN_MD_TWC, r8
840 li r8, MI_BOOTINIT /* Create RPN for address 0 */ 833 li r8, MI_BOOTINIT /* Create RPN for address 0 */
841 mtspr SPRN_MI_RPN, r8 /* Store TLB entry */ 834 mtspr SPRN_MI_RPN, r8 /* Store TLB entry */
842 mtspr SPRN_MD_RPN, r8 835
843 lis r8, MI_APG_INIT@h /* Set protection modes */ 836 lis r8, MI_APG_INIT@h /* Set protection modes */
844 ori r8, r8, MI_APG_INIT@l 837 ori r8, r8, MI_APG_INIT@l
845 mtspr SPRN_MI_AP, r8 838 mtspr SPRN_MI_AP, r8
@@ -851,9 +844,6 @@ initial_mmu:
851 * internal registers (among other things). 844 * internal registers (among other things).
852 */ 845 */
853#ifdef CONFIG_PIN_TLB 846#ifdef CONFIG_PIN_TLB
854 addi r10, r10, 0x0100
855 mtspr SPRN_MD_CTR, r10
856#endif
857 mfspr r9, 638 /* Get current IMMR */ 847 mfspr r9, 638 /* Get current IMMR */
858 andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */ 848 andis. r9, r9, 0xfff8 /* Get 512 kbytes boundary */
859 849
@@ -866,32 +856,6 @@ initial_mmu:
866 mr r8, r9 /* Create paddr for TLB */ 856 mr r8, r9 /* Create paddr for TLB */
867 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */ 857 ori r8, r8, MI_BOOTINIT|0x2 /* Inhibit cache -- Cort */
868 mtspr SPRN_MD_RPN, r8 858 mtspr SPRN_MD_RPN, r8
869
870#ifdef CONFIG_PIN_TLB
871 /* Map two more 8M kernel data pages.
872 */
873 addi r10, r10, 0x0100
874 mtspr SPRN_MD_CTR, r10
875
876 lis r8, KERNELBASE@h /* Create vaddr for TLB */
877 addis r8, r8, 0x0080 /* Add 8M */
878 ori r8, r8, MI_EVALID /* Mark it valid */
879 mtspr SPRN_MD_EPN, r8
880 li r9, MI_PS8MEG /* Set 8M byte page */
881 ori r9, r9, MI_SVALID /* Make it valid */
882 mtspr SPRN_MD_TWC, r9
883 li r11, MI_BOOTINIT /* Create RPN for address 0 */
884 addis r11, r11, 0x0080 /* Add 8M */
885 mtspr SPRN_MD_RPN, r11
886
887 addi r10, r10, 0x0100
888 mtspr SPRN_MD_CTR, r10
889
890 addis r8, r8, 0x0080 /* Add 8M */
891 mtspr SPRN_MD_EPN, r8
892 mtspr SPRN_MD_TWC, r9
893 addis r11, r11, 0x0080 /* Add 8M */
894 mtspr SPRN_MD_RPN, r11
895#endif 859#endif
896 860
897 /* Since the cache is enabled according to the information we 861 /* Since the cache is enabled according to the information we
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 996dfaa352e0..0f0a83ed7a20 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -50,16 +50,32 @@ unsigned long p_block_mapped(phys_addr_t pa)
50 return 0; 50 return 0;
51} 51}
52 52
53#define LARGE_PAGE_SIZE_8M (1<<23)
54
53/* 55/*
54 * MMU_init_hw does the chip-specific initialization of the MMU hardware. 56 * MMU_init_hw does the chip-specific initialization of the MMU hardware.
55 */ 57 */
56void __init MMU_init_hw(void) 58void __init MMU_init_hw(void)
57{ 59{
58 /* Nothing to do for the time being but keep it similar to other PPC */ 60 /* PIN up to the 3 first 8Mb after IMMR in DTLB table */
61#ifdef CONFIG_PIN_TLB
62 unsigned long ctr = mfspr(SPRN_MD_CTR) & 0xfe000000;
63 unsigned long flags = 0xf0 | MD_SPS16K | _PAGE_SHARED | _PAGE_DIRTY;
64 int i;
65 unsigned long addr = 0;
66 unsigned long mem = total_lowmem;
67
68 for (i = 29; i < 32 && mem >= LARGE_PAGE_SIZE_8M; i++) {
69 mtspr(SPRN_MD_CTR, ctr | (i << 8));
70 mtspr(SPRN_MD_EPN, (unsigned long)__va(addr) | MD_EVALID);
71 mtspr(SPRN_MD_TWC, MD_PS8MEG | MD_SVALID);
72 mtspr(SPRN_MD_RPN, addr | flags | _PAGE_PRESENT);
73 addr += LARGE_PAGE_SIZE_8M;
74 mem -= LARGE_PAGE_SIZE_8M;
75 }
76#endif
59} 77}
60 78
61#define LARGE_PAGE_SIZE_8M (1<<23)
62
63static void mmu_mapin_immr(void) 79static void mmu_mapin_immr(void)
64{ 80{
65 unsigned long p = PHYS_IMMR_BASE; 81 unsigned long p = PHYS_IMMR_BASE;
@@ -124,13 +140,8 @@ void setup_initial_memory_limit(phys_addr_t first_memblock_base,
124 */ 140 */
125 BUG_ON(first_memblock_base != 0); 141 BUG_ON(first_memblock_base != 0);
126 142
127#ifdef CONFIG_PIN_TLB
128 /* 8xx can only access 24MB at the moment */ 143 /* 8xx can only access 24MB at the moment */
129 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000)); 144 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01800000));
130#else
131 /* 8xx can only access 8MB at the moment */
132 memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000));
133#endif
134} 145}
135 146
136/* 147/*