aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c43
1 files changed, 30 insertions, 13 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 906daeda59a8..89497fb04280 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -28,9 +28,9 @@
28#include <asm/udbg.h> 28#include <asm/udbg.h>
29 29
30#ifdef DEBUG 30#ifdef DEBUG
31#define DBG(fmt...) udbg_printf(fmt) 31#define DBG(fmt...) printk(fmt)
32#else 32#else
33#define DBG(fmt...) 33#define DBG pr_debug
34#endif 34#endif
35 35
36extern void slb_allocate_realmode(unsigned long ea); 36extern void slb_allocate_realmode(unsigned long ea);
@@ -44,13 +44,13 @@ static void slb_allocate(unsigned long ea)
44 slb_allocate_realmode(ea); 44 slb_allocate_realmode(ea);
45} 45}
46 46
47#define slb_esid_mask(ssize) \
48 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
49
47static inline unsigned long mk_esid_data(unsigned long ea, int ssize, 50static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
48 unsigned long slot) 51 unsigned long slot)
49{ 52{
50 unsigned long mask; 53 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
51
52 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
53 return (ea & mask) | SLB_ESID_V | slot;
54} 54}
55 55
56#define slb_vsid_shift(ssize) \ 56#define slb_vsid_shift(ssize) \
@@ -263,13 +263,19 @@ void slb_initialize(void)
263 extern unsigned int *slb_miss_kernel_load_linear; 263 extern unsigned int *slb_miss_kernel_load_linear;
264 extern unsigned int *slb_miss_kernel_load_io; 264 extern unsigned int *slb_miss_kernel_load_io;
265 extern unsigned int *slb_compare_rr_to_size; 265 extern unsigned int *slb_compare_rr_to_size;
266#ifdef CONFIG_SPARSEMEM_VMEMMAP
267 extern unsigned int *slb_miss_kernel_load_vmemmap;
268 unsigned long vmemmap_llp;
269#endif
266 270
267 /* Prepare our SLB miss handler based on our page size */ 271 /* Prepare our SLB miss handler based on our page size */
268 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp; 272 linear_llp = mmu_psize_defs[mmu_linear_psize].sllp;
269 io_llp = mmu_psize_defs[mmu_io_psize].sllp; 273 io_llp = mmu_psize_defs[mmu_io_psize].sllp;
270 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp; 274 vmalloc_llp = mmu_psize_defs[mmu_vmalloc_psize].sllp;
271 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp; 275 get_paca()->vmalloc_sllp = SLB_VSID_KERNEL | vmalloc_llp;
272 276#ifdef CONFIG_SPARSEMEM_VMEMMAP
277 vmemmap_llp = mmu_psize_defs[mmu_vmemmap_psize].sllp;
278#endif
273 if (!slb_encoding_inited) { 279 if (!slb_encoding_inited) {
274 slb_encoding_inited = 1; 280 slb_encoding_inited = 1;
275 patch_slb_encoding(slb_miss_kernel_load_linear, 281 patch_slb_encoding(slb_miss_kernel_load_linear,
@@ -279,8 +285,14 @@ void slb_initialize(void)
279 patch_slb_encoding(slb_compare_rr_to_size, 285 patch_slb_encoding(slb_compare_rr_to_size,
280 mmu_slb_size); 286 mmu_slb_size);
281 287
282 DBG("SLB: linear LLP = %04x\n", linear_llp); 288 DBG("SLB: linear LLP = %04lx\n", linear_llp);
283 DBG("SLB: io LLP = %04x\n", io_llp); 289 DBG("SLB: io LLP = %04lx\n", io_llp);
290
291#ifdef CONFIG_SPARSEMEM_VMEMMAP
292 patch_slb_encoding(slb_miss_kernel_load_vmemmap,
293 SLB_VSID_KERNEL | vmemmap_llp);
294 DBG("SLB: vmemmap LLP = %04lx\n", vmemmap_llp);
295#endif
284 } 296 }
285 297
286 get_paca()->stab_rr = SLB_NUM_BOLTED; 298 get_paca()->stab_rr = SLB_NUM_BOLTED;
@@ -301,11 +313,16 @@ void slb_initialize(void)
301 313
302 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 314 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
303 315
316 /* For the boot cpu, we're running on the stack in init_thread_union,
317 * which is in the first segment of the linear mapping, and also
318 * get_paca()->kstack hasn't been initialized yet.
319 * For secondary cpus, we need to bolt the kernel stack entry now.
320 */
304 slb_shadow_clear(2); 321 slb_shadow_clear(2);
322 if (raw_smp_processor_id() != boot_cpuid &&
323 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
324 create_shadowed_slbe(get_paca()->kstack,
325 mmu_kernel_ssize, lflags, 2);
305 326
306 /* We don't bolt the stack for the time being - we're in boot,
307 * so the stack is in the bolted segment. By the time it goes
308 * elsewhere, we'll call _switch() which will bolt in the new
309 * one. */
310 asm volatile("isync":::"memory"); 327 asm volatile("isync":::"memory");
311} 328}