aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2007-08-02 21:55:39 -0400
committerPaul Mackerras <paulus@samba.org>2007-08-03 05:36:01 -0400
commit67439b76f29cb278bb3412fc873b980fc65110c9 (patch)
treed70c627453d429f63c777769f5596184c001fe39 /arch
parent5628244059976009151d41c2798855290753d8d5 (diff)
[POWERPC] Fixes for the SLB shadow buffer code
On a machine with hardware 64kB pages and a kernel configured for a 64kB base page size, we need to change the vmalloc segment from 64kB pages to 4kB pages if some driver creates a non-cacheable mapping in the vmalloc area. However, we never updated with SLB shadow buffer. This fixes it. Thanks to paulus for finding this. Also added some write barriers to ensure the shadow buffer contents are always consistent. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/kernel/entry_64.S3
-rw-r--r--arch/powerpc/mm/hash_utils_64.c2
-rw-r--r--arch/powerpc/mm/slb.c28
3 files changed, 22 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 9ef28da2c7fe..952eba6701f4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -389,8 +389,11 @@ BEGIN_FTR_SECTION
389 ld r9,PACA_SLBSHADOWPTR(r13) 389 ld r9,PACA_SLBSHADOWPTR(r13)
390 li r12,0 390 li r12,0
391 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */ 391 std r12,SLBSHADOW_STACKESID(r9) /* Clear ESID */
392 eieio
392 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ 393 std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */
394 eieio
393 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ 395 std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */
396 eieio
394 397
395 slbie r6 398 slbie r6
396 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 399 slbie r6 /* Workaround POWER5 < DD2.1 issue */
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index bc7b0cedae5e..f1789578747a 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -759,7 +759,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
759 mmu_psize_defs[mmu_vmalloc_psize].sllp) { 759 mmu_psize_defs[mmu_vmalloc_psize].sllp) {
760 get_paca()->vmalloc_sllp = 760 get_paca()->vmalloc_sllp =
761 mmu_psize_defs[mmu_vmalloc_psize].sllp; 761 mmu_psize_defs[mmu_vmalloc_psize].sllp;
762 slb_flush_and_rebolt(); 762 slb_vmalloc_update();
763 } 763 }
764#endif /* CONFIG_PPC_64K_PAGES */ 764#endif /* CONFIG_PPC_64K_PAGES */
765 765
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 304375a73574..b0697017d0e8 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -53,7 +53,8 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 53 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
54} 54}
55 55
56static inline void slb_shadow_update(unsigned long esid, unsigned long vsid, 56static inline void slb_shadow_update(unsigned long ea,
57 unsigned long flags,
57 unsigned long entry) 58 unsigned long entry)
58{ 59{
59 /* 60 /*
@@ -61,11 +62,11 @@ static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
61 * updating it. 62 * updating it.
62 */ 63 */
63 get_slb_shadow()->save_area[entry].esid = 0; 64 get_slb_shadow()->save_area[entry].esid = 0;
64 barrier(); 65 smp_wmb();
65 get_slb_shadow()->save_area[entry].vsid = vsid; 66 get_slb_shadow()->save_area[entry].vsid = mk_vsid_data(ea, flags);
66 barrier(); 67 smp_wmb();
67 get_slb_shadow()->save_area[entry].esid = esid; 68 get_slb_shadow()->save_area[entry].esid = mk_esid_data(ea, entry);
68 69 smp_wmb();
69} 70}
70 71
71static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 72static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
@@ -76,8 +77,7 @@ static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
76 * we don't get a stale entry here if we get preempted by PHYP 77 * we don't get a stale entry here if we get preempted by PHYP
77 * between these two statements. 78 * between these two statements.
78 */ 79 */
79 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags), 80 slb_shadow_update(ea, flags, entry);
80 entry);
81 81
82 asm volatile("slbmte %0,%1" : 82 asm volatile("slbmte %0,%1" :
83 : "r" (mk_vsid_data(ea, flags)), 83 : "r" (mk_vsid_data(ea, flags)),
@@ -104,8 +104,7 @@ void slb_flush_and_rebolt(void)
104 ksp_esid_data &= ~SLB_ESID_V; 104 ksp_esid_data &= ~SLB_ESID_V;
105 105
106 /* Only third entry (stack) may change here so only resave that */ 106 /* Only third entry (stack) may change here so only resave that */
107 slb_shadow_update(ksp_esid_data, 107 slb_shadow_update(get_paca()->kstack, lflags, 2);
108 mk_vsid_data(ksp_esid_data, lflags), 2);
109 108
110 /* We need to do this all in asm, so we're sure we don't touch 109 /* We need to do this all in asm, so we're sure we don't touch
111 * the stack between the slbia and rebolting it. */ 110 * the stack between the slbia and rebolting it. */
@@ -123,6 +122,15 @@ void slb_flush_and_rebolt(void)
123 : "memory"); 122 : "memory");
124} 123}
125 124
125void slb_vmalloc_update(void)
126{
127 unsigned long vflags;
128
129 vflags = SLB_VSID_KERNEL | mmu_psize_defs[mmu_vmalloc_psize].sllp;
130 slb_shadow_update(VMALLOC_START, vflags, 1);
131 slb_flush_and_rebolt();
132}
133
126/* Flush all user entries from the segment table of the current processor. */ 134/* Flush all user entries from the segment table of the current processor. */
127void switch_slb(struct task_struct *tsk, struct mm_struct *mm) 135void switch_slb(struct task_struct *tsk, struct mm_struct *mm)
128{ 136{