aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/powerpc/kernel/entry_64.S3
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/slb.c28
-rw-r--r--include/asm-powerpc/mmu-hash64.h1
4 files changed, 23 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 9ef28da2c7fe..952eba6701f4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -389,8 +389,11 @@ BEGIN_FTR_SECTION
389 ld r9,PACA_SLBSHADOWPTR(r13) 389 ld r9,PACA_SLBSHADOWPTR(r13)
390 li r12,0 390 li r12,0
391 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 391 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
392 eieio
392 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 393 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
394 eieio
393 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 395 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
396 eieio
394 397
395 slbie r6 398 slbie r6
396 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 399 slbie r6 /* Workaround POWER5 < DD2.1 issue */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index bc7b0cedae5e..f1789578747a 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
759 mmu_psize_defs[mmu_vmalloc_psize].sllp) { 759 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
760 get_paca()->vmalloc_sllp = 760 get_paca()->vmalloc_sllp =
761 mmu_psize_defs[mmu_vmalloc_psize].sllp; 761 mmu_psize_defs[mmu_vmalloc_psize].sllp;
762 slb_flush_and_rebolt(); 762 slb_vmalloc_update();
763 } 763 }
764#endif /* CONFIG_PPC_64K_PAGES */ 764#endif /* CONFIG_PPC_64K_PAGES */
765 765
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 304375a73574..b0697017d0e8 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
54} 54}
55 55
56static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, 56static inline void slb_shadow_update(unsigned long ea,
57 unsigned long flags,
57 unsigned long entry) 58 unsigned long entry)
58{ 59{
59 /* 60 /*
@@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
61 * updating it. 62 * updating it.
62 */ 63 */
63 get_slb_shadow()->save_area[entry].esid = 0; 64 get_slb_shadow()->save_area[entry].esid = 0;
64 barrier(); 65 smp_wmb();
65 get_slb_shadow()->save_area[entry].vsid = vsid; 66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
66 barrier(); 67 smp_wmb();
67 get_slb_shadow()->save_area[entry].esid = esid; 68 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
68 69 smp_wmb();
69} 70}
70 71
71static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 72static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
@@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
76 * we don't get a stale entry here if we get preempted by PHYP 77 * we don't get a stale entry here if we get preempted by PHYP
77 * between these two statements. 78 * between these two statements.
78 */ 79 */
79 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), 80 slb_shadow_update(ea, flags, entry);
80 entry);
81 81
82 asm volatile("slbmte %0,%1" : 82 asm volatile("slbmte %0,%1" :
83 : "r" (mk_vsid_data(ea, flags)), 83 : "r" (mk_vsid_data(ea, flags)),
@@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void)
104 ksp_esid_data &= ~SLB_ESID_V; 104 ksp_esid_data &= ~SLB_ESID_V;
105 105
106 /* Only third entry (stack) may change here so only resave that */ 106 /* Only third entry (stack) may change here so only resave that */
107 slb_shadow_update(ksp_esid_data, 107 slb_shadow_update(get_paca()->kstack, lflags, 2);
108 mk_vsid_data(ksp_esid_data, lflags), 2);
109 108
110 /* We need to do this all in asm, so we're sure we don't touch 109 /* We need to do this all in asm, so we're sure we don't touch
111 * the stack between the slbia and rebolting it. */ 110 * the stack between the slbia and rebolting it. */
@@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void)
123 : "memory"); 122 : "memory");
124} 123}
125 124
125void slb_vmalloc_update(void)
126{
127 unsigned long vflags;
128
129 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
130 slb_shadow_update(VMALLOC_START, vflags, 1);
131 slb_flush_and_rebolt();
132}
133
126/* Flush all user entries from the segment table of the current processor. */ 134/* Flush all user entries from the segment table of the current processor. */
127void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 135void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
128{ 136{
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index 695962f02059..3112ad14ad95 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -262,6 +262,7 @@ extern void slb_initialize(void);
262extern void slb_flush_and_rebolt(void); 262extern void slb_flush_and_rebolt(void);
263extern void stab_initialize(unsigned long stab); 263extern void stab_initialize(unsigned long stab);
264 264
265extern void slb_vmalloc_update(void);
265#endif /* __ASSEMBLY__ */ 266#endif /* __ASSEMBLY__ */
266 267
267/* 268/*