aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-08-28 01:56:11 -0400
committerPaul Mackerras <paulus@samba.org>2007-08-28 01:56:11 -0400
commit35438c4327df18dbf5e7f597b69299119f4a14de (patch)
treea4589d731015db93f2eba8f84ffb1f48a8084020 /arch/powerpc/mm
parent2f6c9d961081dc7b109eb19166244bcb2a5dfc28 (diff)
parentb07d68b5ca4d55a16fab223d63d5fb36f89ff42f (diff)
Merge branch 'linux-2.6' into for-2.6.24
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r--arch/powerpc/mm/slb.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a73d2d700973..ff1811ac6c81 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -74,6 +74,22 @@ static inline void slb_shadow_clear(unsigned long entry)
74 get_slb_shadow()->save_area[entry].esid = 0; 74 get_slb_shadow()->save_area[entry].esid = 0;
75} 75}
76 76
77static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
78 unsigned long entry)
79{
80 /*
81 * Updating the shadow buffer before writing the SLB ensures
82 * we don't get a stale entry here if we get preempted by PHYP
83 * between these two statements.
84 */
85 slb_shadow_update(ea, flags, entry);
86
87 asm volatile("slbmte %0,%1" :
88 : "r" (mk_vsid_data(ea, flags)),
89 "r" (mk_esid_data(ea, entry))
90 : "memory" );
91}
92
77void slb_flush_and_rebolt(void) 93void slb_flush_and_rebolt(void)
78{ 94{
79 /* If you change this make sure you change SLB_NUM_BOLTED 95 /* If you change this make sure you change SLB_NUM_BOLTED
@@ -226,12 +242,16 @@ void slb_initialize(void)
226 vflags = SLB_VSID_KERNEL | vmalloc_llp; 242 vflags = SLB_VSID_KERNEL | vmalloc_llp;
227 243
228 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 244 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
229 slb_shadow_update(PAGE_OFFSET, lflags, 0); 245 asm volatile("isync":::"memory");
230 asm volatile("isync; slbia; sync; slbmte %0,%1; isync" :: 246 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
231 "r" (get_slb_shadow()->save_area[0].vsid), 247 asm volatile("isync; slbia; isync":::"memory");
232 "r" (get_slb_shadow()->save_area[0].esid) : "memory"); 248 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
233 249
234 slb_shadow_update(VMALLOC_START, vflags, 1); 250 create_shadowed_slbe(VMALLOC_START, vflags, 1);
235 251
236 slb_flush_and_rebolt(); 252 /* We don't bolt the stack for the time being - we're in boot,
253 * so the stack is in the bolted segment. By the time it goes
254 * elsewhere, we'll call _switch() which will bolt in the new
255 * one. */
256 asm volatile("isync":::"memory");
237} 257}