aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-08-24 23:14:28 -0400
committerPaul Mackerras <paulus@samba.org>2007-08-25 02:58:43 -0400
commit175587cca7447daf5a13e4a53d32360ed8cba804 (patch)
tree9594f43322c74d84e83469916dcd933859a915bb
parente120e8d03a263cf75f2abc0f8b3a03a65cfd3b88 (diff)
[POWERPC] Fix SLB initialization at boot time
This partially reverts edd0622bd2e8f755c960827e15aa6908c3c5aa94. It turns out that the part of that commit that aimed to ensure that we created an SLB entry for the kernel stack on secondary CPUs when starting the CPU didn't achieve its aim, and in fact caused a regression, because get_paca()->kstack is not initialized at the point where slb_initialize is called. This therefore just reverts that part of that commit, while keeping the change to slb_flush_and_rebolt, which is correct and necessary. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/mm/slb.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a73d2d700973..ff1811ac6c81 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -74,6 +74,22 @@ static inline void slb_shadow_clear(unsigned long entry)
74 get_slb_shadow()->save_area[entry].esid = 0; 74 get_slb_shadow()->save_area[entry].esid = 0;
75} 75}
76 76
77static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
78 unsigned long entry)
79{
80 /*
81 * Updating the shadow buffer before writing the SLB ensures
82 * we don't get a stale entry here if we get preempted by PHYP
83 * between these two statements.
84 */
85 slb_shadow_update(ea, flags, entry);
86
87 asm volatile("slbmte %0,%1" :
88 : "r" (mk_vsid_data(ea, flags)),
89 "r" (mk_esid_data(ea, entry))
90 : "memory" );
91}
92
77void slb_flush_and_rebolt(void) 93void slb_flush_and_rebolt(void)
78{ 94{
79 /* If you change this make sure you change SLB_NUM_BOLTED 95 /* If you change this make sure you change SLB_NUM_BOLTED
@@ -226,12 +242,16 @@ void slb_initialize(void)
226 vflags = SLB_VSID_KERNEL | vmalloc_llp; 242 vflags = SLB_VSID_KERNEL | vmalloc_llp;
227 243
228 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 244 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
229 slb_shadow_update(PAGE_OFFSET, lflags, 0); 245 asm volatile("isync":::"memory");
230 asm volatile("isync; slbia; sync; slbmte %0,%1; isync" :: 246 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
231 "r" (get_slb_shadow()->save_area[0].vsid), 247 asm volatile("isync; slbia; isync":::"memory");
232 "r" (get_slb_shadow()->save_area[0].esid) : "memory"); 248 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
233 249
234 slb_shadow_update(VMALLOC_START, vflags, 1); 250 create_shadowed_slbe(VMALLOC_START, vflags, 1);
235 251
236 slb_flush_and_rebolt(); 252 /* We don't bolt the stack for the time being - we're in boot,
253 * so the stack is in the bolted segment. By the time it goes
254 * elsewhere, we'll call _switch() which will bolt in the new
255 * one. */
256 asm volatile("isync":::"memory");
237} 257}