aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorChristophe Leroy <christophe.leroy@c-s.fr>2016-05-17 03:02:51 -0400
committerScott Wood <oss@buserror.net>2016-07-09 03:02:48 -0400
commitbb7f380849f8c8722ea383ec5867a79d365d4574 (patch)
treea0b502202aa6aff0e7f4706af09a26cadb0f4aad /arch/powerpc/mm
parent6264dbb98ff762d71c65e04ae3b2e632d28a5b84 (diff)
powerpc/8xx: Don't use page table for linear memory space
Instead of using the first level page table to define mappings for the linear memory space, we can use direct mapping from the TLB handling routines. This has several advantages: * No need to read the tables at each TLB miss * No issue in 16k pages mode where the 1st level table maps 64 Mbytes The size of the available linear space is known at system startup. In order to avoid data access at each TLB miss to know the memory size, the TLB routine is patched at startup with the proper size This patch provides a 10%-15% improvment of TLB miss handling for kernel addresses Signed-off-by: Christophe Leroy <christophe.leroy@c-s.fr> Signed-off-by: Scott Wood <oss@buserror.net>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/8xx_mmu.c56
1 files changed, 18 insertions, 38 deletions
diff --git a/arch/powerpc/mm/8xx_mmu.c b/arch/powerpc/mm/8xx_mmu.c
index 220772579113..996dfaa352e0 100644
--- a/arch/powerpc/mm/8xx_mmu.c
+++ b/arch/powerpc/mm/8xx_mmu.c
@@ -58,9 +58,7 @@ void __init MMU_init_hw(void)
58 /* Nothing to do for the time being but keep it similar to other PPC */ 58 /* Nothing to do for the time being but keep it similar to other PPC */
59} 59}
60 60
61#define LARGE_PAGE_SIZE_4M (1<<22)
62#define LARGE_PAGE_SIZE_8M (1<<23) 61#define LARGE_PAGE_SIZE_8M (1<<23)
63#define LARGE_PAGE_SIZE_64M (1<<26)
64 62
65static void mmu_mapin_immr(void) 63static void mmu_mapin_immr(void)
66{ 64{
@@ -77,52 +75,33 @@ static void mmu_mapin_immr(void)
77#ifndef CONFIG_PIN_TLB 75#ifndef CONFIG_PIN_TLB
78extern unsigned int DTLBMiss_jmp; 76extern unsigned int DTLBMiss_jmp;
79#endif 77#endif
78extern unsigned int DTLBMiss_cmp, FixupDAR_cmp;
80 79
81unsigned long __init mmu_mapin_ram(unsigned long top) 80void mmu_patch_cmp_limit(unsigned int *addr, unsigned long mapped)
82{ 81{
83 unsigned long v, s, mapped; 82 unsigned int instr = *addr;
84 phys_addr_t p;
85 83
86 v = KERNELBASE; 84 instr &= 0xffff0000;
87 p = 0; 85 instr |= (unsigned long)__va(mapped) >> 16;
88 s = top; 86 patch_instruction(addr, instr);
87}
88
89unsigned long __init mmu_mapin_ram(unsigned long top)
90{
91 unsigned long mapped;
89 92
90 if (__map_without_ltlbs) { 93 if (__map_without_ltlbs) {
94 mapped = 0;
91 mmu_mapin_immr(); 95 mmu_mapin_immr();
92#ifndef CONFIG_PIN_TLB 96#ifndef CONFIG_PIN_TLB
93 patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP); 97 patch_instruction(&DTLBMiss_jmp, PPC_INST_NOP);
94#endif 98#endif
95 return 0; 99 } else {
96 } 100 mapped = top & ~(LARGE_PAGE_SIZE_8M - 1);
97
98#ifdef CONFIG_PPC_4K_PAGES
99 while (s >= LARGE_PAGE_SIZE_8M) {
100 pmd_t *pmdp;
101 unsigned long val = p | MD_PS8MEG;
102
103 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
104 *pmdp++ = __pmd(val);
105 *pmdp++ = __pmd(val + LARGE_PAGE_SIZE_4M);
106
107 v += LARGE_PAGE_SIZE_8M;
108 p += LARGE_PAGE_SIZE_8M;
109 s -= LARGE_PAGE_SIZE_8M;
110 } 101 }
111#else /* CONFIG_PPC_16K_PAGES */
112 while (s >= LARGE_PAGE_SIZE_64M) {
113 pmd_t *pmdp;
114 unsigned long val = p | MD_PS8MEG;
115
116 pmdp = pmd_offset(pud_offset(pgd_offset_k(v), v), v);
117 *pmdp++ = __pmd(val);
118
119 v += LARGE_PAGE_SIZE_64M;
120 p += LARGE_PAGE_SIZE_64M;
121 s -= LARGE_PAGE_SIZE_64M;
122 }
123#endif
124 102
125 mapped = top - s; 103 mmu_patch_cmp_limit(&DTLBMiss_cmp, mapped);
104 mmu_patch_cmp_limit(&FixupDAR_cmp, mapped);
126 105
127 /* If the size of RAM is not an exact power of two, we may not 106 /* If the size of RAM is not an exact power of two, we may not
128 * have covered RAM in its entirety with 8 MiB 107 * have covered RAM in its entirety with 8 MiB
@@ -131,7 +110,8 @@ unsigned long __init mmu_mapin_ram(unsigned long top)
131 * coverage with normal-sized pages (or other reasons) do not 110 * coverage with normal-sized pages (or other reasons) do not
132 * attempt to allocate outside the allowed range. 111 * attempt to allocate outside the allowed range.
133 */ 112 */
134 memblock_set_current_limit(mapped); 113 if (mapped)
114 memblock_set_current_limit(mapped);
135 115
136 return mapped; 116 return mapped;
137} 117}