diff options
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r-- | arch/powerpc/mm/slb.c | 29 |
1 files changed, 14 insertions, 15 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 2cc61736feee..6a8bf6c6000e 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -60,19 +60,19 @@ static inline void create_slbe(unsigned long ea, unsigned long flags, | |||
60 | : "memory" ); | 60 | : "memory" ); |
61 | } | 61 | } |
62 | 62 | ||
63 | static void slb_flush_and_rebolt(void) | 63 | void slb_flush_and_rebolt(void) |
64 | { | 64 | { |
65 | /* If you change this make sure you change SLB_NUM_BOLTED | 65 | /* If you change this make sure you change SLB_NUM_BOLTED |
66 | * appropriately too. */ | 66 | * appropriately too. */ |
67 | unsigned long linear_llp, virtual_llp, lflags, vflags; | 67 | unsigned long linear_llp, vmalloc_llp, lflags, vflags; |
68 | unsigned long ksp_esid_data; | 68 | unsigned long ksp_esid_data; |
69 | 69 | ||
70 | WARN_ON(!irqs_disabled()); | 70 | WARN_ON(!irqs_disabled()); |
71 | 71 | ||
72 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 72 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
73 | virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 73 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; |
74 | lflags = SLB_VSID_KERNEL | linear_llp; | 74 | lflags = SLB_VSID_KERNEL | linear_llp; |
75 | vflags = SLB_VSID_KERNEL | virtual_llp; | 75 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
76 | 76 | ||
77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | 77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); |
78 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) | 78 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) |
@@ -164,11 +164,10 @@ static inline void patch_slb_encoding(unsigned int *insn_addr, | |||
164 | 164 | ||
165 | void slb_initialize(void) | 165 | void slb_initialize(void) |
166 | { | 166 | { |
167 | unsigned long linear_llp, virtual_llp; | 167 | unsigned long linear_llp, vmalloc_llp, io_llp; |
168 | static int slb_encoding_inited; | 168 | static int slb_encoding_inited; |
169 | extern unsigned int *slb_miss_kernel_load_linear; | 169 | extern unsigned int *slb_miss_kernel_load_linear; |
170 | extern unsigned int *slb_miss_kernel_load_virtual; | 170 | extern unsigned int *slb_miss_kernel_load_io; |
171 | extern unsigned int *slb_miss_user_load_normal; | ||
172 | #ifdef CONFIG_HUGETLB_PAGE | 171 | #ifdef CONFIG_HUGETLB_PAGE |
173 | extern unsigned int *slb_miss_user_load_huge; | 172 | extern unsigned int *slb_miss_user_load_huge; |
174 | unsigned long huge_llp; | 173 | unsigned long huge_llp; |
@@ -178,18 +177,19 @@ void slb_initialize(void) | |||
178 | 177 | ||
179 | /* Prepare our SLB miss handler based on our page size */ | 178 | /* Prepare our SLB miss handler based on our page size */ |
180 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; | 179 | linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; |
181 | virtual_llp = mmu_psize_defs[mmu_virtual_psize].sllp; | 180 | io_llp = mmu_psize_defs[mmu_io_psize].sllp; |
181 | vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; | ||
182 | get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; | ||
183 | |||
182 | if (!slb_encoding_inited) { | 184 | if (!slb_encoding_inited) { |
183 | slb_encoding_inited = 1; | 185 | slb_encoding_inited = 1; |
184 | patch_slb_encoding(slb_miss_kernel_load_linear, | 186 | patch_slb_encoding(slb_miss_kernel_load_linear, |
185 | SLB_VSID_KERNEL | linear_llp); | 187 | SLB_VSID_KERNEL | linear_llp); |
186 | patch_slb_encoding(slb_miss_kernel_load_virtual, | 188 | patch_slb_encoding(slb_miss_kernel_load_io, |
187 | SLB_VSID_KERNEL | virtual_llp); | 189 | SLB_VSID_KERNEL | io_llp); |
188 | patch_slb_encoding(slb_miss_user_load_normal, | ||
189 | SLB_VSID_USER | virtual_llp); | ||
190 | 190 | ||
191 | DBG("SLB: linear LLP = %04x\n", linear_llp); | 191 | DBG("SLB: linear LLP = %04x\n", linear_llp); |
192 | DBG("SLB: virtual LLP = %04x\n", virtual_llp); | 192 | DBG("SLB: io LLP = %04x\n", io_llp); |
193 | #ifdef CONFIG_HUGETLB_PAGE | 193 | #ifdef CONFIG_HUGETLB_PAGE |
194 | patch_slb_encoding(slb_miss_user_load_huge, | 194 | patch_slb_encoding(slb_miss_user_load_huge, |
195 | SLB_VSID_USER | huge_llp); | 195 | SLB_VSID_USER | huge_llp); |
@@ -204,7 +204,7 @@ void slb_initialize(void) | |||
204 | unsigned long lflags, vflags; | 204 | unsigned long lflags, vflags; |
205 | 205 | ||
206 | lflags = SLB_VSID_KERNEL | linear_llp; | 206 | lflags = SLB_VSID_KERNEL | linear_llp; |
207 | vflags = SLB_VSID_KERNEL | virtual_llp; | 207 | vflags = SLB_VSID_KERNEL | vmalloc_llp; |
208 | 208 | ||
209 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ | 209 | /* Invalidate the entire SLB (even slot 0) & all the ERATS */ |
210 | asm volatile("isync":::"memory"); | 210 | asm volatile("isync":::"memory"); |
@@ -212,7 +212,6 @@ void slb_initialize(void) | |||
212 | asm volatile("isync; slbia; isync":::"memory"); | 212 | asm volatile("isync; slbia; isync":::"memory"); |
213 | create_slbe(PAGE_OFFSET, lflags, 0); | 213 | create_slbe(PAGE_OFFSET, lflags, 0); |
214 | 214 | ||
215 | /* VMALLOC space has 4K pages always for now */ | ||
216 | create_slbe(VMALLOC_START, vflags, 1); | 215 | create_slbe(VMALLOC_START, vflags, 1); |
217 | 216 | ||
218 | /* We don't bolt the stack for the time being - we're in boot, | 217 | /* We don't bolt the stack for the time being - we're in boot, |