diff options
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r-- | arch/powerpc/mm/slb.c | 27 |
1 files changed, 16 insertions, 11 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 906daeda59a8..cf8705e32d60 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #ifdef DEBUG | 30 | #ifdef DEBUG |
31 | #define DBG(fmt...) udbg_printf(fmt) | 31 | #define DBG(fmt...) udbg_printf(fmt) |
32 | #else | 32 | #else |
33 | #define DBG(fmt...) | 33 | #define DBG pr_debug |
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | extern void slb_allocate_realmode(unsigned long ea); | 36 | extern void slb_allocate_realmode(unsigned long ea); |
@@ -44,13 +44,13 @@ static void slb_allocate(unsigned long ea) | |||
44 | slb_allocate_realmode(ea); | 44 | slb_allocate_realmode(ea); |
45 | } | 45 | } |
46 | 46 | ||
47 | #define slb_esid_mask(ssize) \ | ||
48 | (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T) | ||
49 | |||
47 | static inline unsigned long mk_esid_data(unsigned long ea, int ssize, | 50 | static inline unsigned long mk_esid_data(unsigned long ea, int ssize, |
48 | unsigned long slot) | 51 | unsigned long slot) |
49 | { | 52 | { |
50 | unsigned long mask; | 53 | return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot; |
51 | |||
52 | mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T; | ||
53 | return (ea & mask) | SLB_ESID_V | slot; | ||
54 | } | 54 | } |
55 | 55 | ||
56 | #define slb_vsid_shift(ssize) \ | 56 | #define slb_vsid_shift(ssize) \ |
@@ -279,8 +279,8 @@ void slb_initialize(void) | |||
279 | patch_slb_encoding(slb_compare_rr_to_size, | 279 | patch_slb_encoding(slb_compare_rr_to_size, |
280 | mmu_slb_size); | 280 | mmu_slb_size); |
281 | 281 | ||
282 | DBG("SLB: linear LLP = %04x\n", linear_llp); | 282 | DBG("SLB: linear LLP = %04lx\n", linear_llp); |
283 | DBG("SLB: io LLP = %04x\n", io_llp); | 283 | DBG("SLB: io LLP = %04lx\n", io_llp); |
284 | } | 284 | } |
285 | 285 | ||
286 | get_paca()->stab_rr = SLB_NUM_BOLTED; | 286 | get_paca()->stab_rr = SLB_NUM_BOLTED; |
@@ -301,11 +301,16 @@ void slb_initialize(void) | |||
301 | 301 | ||
302 | create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); | 302 | create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); |
303 | 303 | ||
304 | /* For the boot cpu, we're running on the stack in init_thread_union, | ||
305 | * which is in the first segment of the linear mapping, and also | ||
306 | * get_paca()->kstack hasn't been initialized yet. | ||
307 | * For secondary cpus, we need to bolt the kernel stack entry now. | ||
308 | */ | ||
304 | slb_shadow_clear(2); | 309 | slb_shadow_clear(2); |
310 | if (raw_smp_processor_id() != boot_cpuid && | ||
311 | (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET) | ||
312 | create_shadowed_slbe(get_paca()->kstack, | ||
313 | mmu_kernel_ssize, lflags, 2); | ||
305 | 314 | ||
306 | /* We don't bolt the stack for the time being - we're in boot, | ||
307 | * so the stack is in the bolted segment. By the time it goes | ||
308 | * elsewhere, we'll call _switch() which will bolt in the new | ||
309 | * one. */ | ||
310 | asm volatile("isync":::"memory"); | 315 | asm volatile("isync":::"memory"); |
311 | } | 316 | } |