aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-05-02 00:29:12 -0400
committerPaul Mackerras <paulus@samba.org>2008-05-02 01:00:45 -0400
commit3b5750644b2ffa2a76fdfe7b4e00e4af2ecf3539 (patch)
tree491ea9a2d4c091abadc1d39f694fe13e70390d63 /arch/powerpc
parentd9f2f3f537acb8aa04280509b2eed50c855fd3ef (diff)
[POWERPC] Bolt in SLB entry for kernel stack on secondary cpus
This fixes a regression reported by Kamalesh Bulabel where a POWER4 machine would crash because of an SLB miss at a point where the SLB miss exception was unrecoverable. This regression is tracked at: http://bugzilla.kernel.org/show_bug.cgi?id=10082 SLB misses at such points shouldn't happen because the kernel stack is the only memory accessed other than things in the first segment of the linear mapping (which is mapped at all times by entry 0 of the SLB). The context switch code ensures that SLB entry 2 covers the kernel stack, if it is not already covered by entry 0. None of entries 0 to 2 are ever replaced by the SLB miss handler. Where this went wrong is that the context switch code assumes it doesn't have to write to SLB entry 2 if the new kernel stack is in the same segment as the old kernel stack, since entry 2 should already be correct. However, when we start up a secondary cpu, it calls slb_initialize, which doesn't set up entry 2. This is correct for the boot cpu, where we will be using a stack in the kernel BSS at this point (i.e. init_thread_union), but not necessarily for secondary cpus, whose initial stack can be allocated anywhere. This doesn't cause any immediate problem since the SLB miss handler will just create an SLB entry somewhere else to cover the initial stack. In fact it's possible for the cpu to go quite a long time without SLB entry 2 being valid. Eventually, though, the entry created by the SLB miss handler will get overwritten by some other entry, and if the next access to the stack is at an unrecoverable point, we get the crash. This fixes the problem by making slb_initialize create a suitable entry for the kernel stack, if we are on a secondary cpu and the stack isn't covered by SLB entry 0. This requires initializing the get_paca()->kstack field earlier, so I do that in smp_create_idle where the current field is initialized. This also abstracts a bit of the computation that mk_esid_data in slb.c does so that it can be used in slb_initialize. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/mm/slb.c21
2 files changed, 15 insertions, 8 deletions
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index be35ffae10f0..1457aa0a08f1 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -386,6 +386,8 @@ static void __init smp_create_idle(unsigned int cpu)
386 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); 386 panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p));
387#ifdef CONFIG_PPC64 387#ifdef CONFIG_PPC64
388 paca[cpu].__current = p; 388 paca[cpu].__current = p;
389 paca[cpu].kstack = (unsigned long) task_thread_info(p)
390 + THREAD_SIZE - STACK_FRAME_OVERHEAD;
389#endif 391#endif
390 current_set[cpu] = task_thread_info(p); 392 current_set[cpu] = task_thread_info(p);
391 task_thread_info(p)->cpu = cpu; 393 task_thread_info(p)->cpu = cpu;
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 497ec059bc82..cf8705e32d60 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -44,13 +44,13 @@ static void slb_allocate(unsigned long ea)
44 slb_allocate_realmode(ea); 44 slb_allocate_realmode(ea);
45} 45}
46 46
47#define slb_esid_mask(ssize) \
48 (((ssize) == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T)
49
47static inline unsigned long mk_esid_data(unsigned long ea, int ssize, 50static inline unsigned long mk_esid_data(unsigned long ea, int ssize,
48 unsigned long slot) 51 unsigned long slot)
49{ 52{
50 unsigned long mask; 53 return (ea & slb_esid_mask(ssize)) | SLB_ESID_V | slot;
51
52 mask = (ssize == MMU_SEGSIZE_256M)? ESID_MASK: ESID_MASK_1T;
53 return (ea & mask) | SLB_ESID_V | slot;
54} 54}
55 55
56#define slb_vsid_shift(ssize) \ 56#define slb_vsid_shift(ssize) \
@@ -301,11 +301,16 @@ void slb_initialize(void)
301 301
302 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 302 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
303 303
304 /* For the boot cpu, we're running on the stack in init_thread_union,
305 * which is in the first segment of the linear mapping, and also
306 * get_paca()->kstack hasn't been initialized yet.
307 * For secondary cpus, we need to bolt the kernel stack entry now.
308 */
304 slb_shadow_clear(2); 309 slb_shadow_clear(2);
310 if (raw_smp_processor_id() != boot_cpuid &&
311 (get_paca()->kstack & slb_esid_mask(mmu_kernel_ssize)) > PAGE_OFFSET)
312 create_shadowed_slbe(get_paca()->kstack,
313 mmu_kernel_ssize, lflags, 2);
305 314
306 /* We don't bolt the stack for the time being - we're in boot,
307 * so the stack is in the bolted segment. By the time it goes
308 * elsewhere, we'll call _switch() which will bolt in the new
309 * one. */
310 asm volatile("isync":::"memory"); 315 asm volatile("isync":::"memory");
311} 316}