aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2007-08-10 07:04:07 -0400
committerPaul Mackerras <paulus@samba.org>2007-08-10 07:04:07 -0400
commitedd0622bd2e8f755c960827e15aa6908c3c5aa94 (patch)
treea53acb20f1d326ff736d45ecda66a848604a65b2
parentac07860264bd2b18834d3fa3be47032115524cea (diff)
[POWERPC] Fix potential duplicate entry in SLB shadow buffer
We were getting a duplicate entry in the SLB shadow buffer in slb_flush_and_rebolt() if the kernel stack was in the same segment as PAGE_OFFSET, which on POWER6 causes the hypervisor to terminate the partition with an error. This fixes it. Also we were not creating an SLB entry (or an SLB shadow buffer entry) for the kernel stack on secondary CPUs when starting the CPU. This isn't a major problem, since an appropriate entry will be created on demand, but this fixes that also for consistency. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/mm/slb.c45
1 files changed, 16 insertions, 29 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index b0697017d0e8..a73d2d700973 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -69,20 +69,9 @@ static inline void slb_shadow_update(unsigned long ea,
69 smp_wmb(); 69 smp_wmb();
70} 70}
71 71
72static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags, 72static inline void slb_shadow_clear(unsigned long entry)
73 unsigned long entry)
74{ 73{
75 /* 74 get_slb_shadow()->save_area[entry].esid = 0;
76 * Updating the shadow buffer before writing the SLB ensures
77 * we don't get a stale entry here if we get preempted by PHYP
78 * between these two statements.
79 */
80 slb_shadow_update(ea, flags, entry);
81
82 asm volatile("slbmte %0,%1" :
83 : "r" (mk_vsid_data(ea, flags)),
84 "r" (mk_esid_data(ea, entry))
85 : "memory" );
86} 75}
87 76
88void slb_flush_and_rebolt(void) 77void slb_flush_and_rebolt(void)
@@ -100,11 +89,13 @@ void slb_flush_and_rebolt(void)
100 vflags = SLB_VSID_KERNEL | vmalloc_llp; 89 vflags = SLB_VSID_KERNEL | vmalloc_llp;
101 90
102 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); 91 ksp_esid_data = mk_esid_data(get_paca()->kstack, 2);
103 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 92 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) {
104 ksp_esid_data &= ~SLB_ESID_V; 93 ksp_esid_data &= ~SLB_ESID_V;
105 94 slb_shadow_clear(2);
106 /* Only third entry (stack) may change here so only resave that */ 95 } else {
107 slb_shadow_update(get_paca()->kstack, lflags, 2); 96 /* Update stack entry; others don't change */
97 slb_shadow_update(get_paca()->kstack, lflags, 2);
98 }
108 99
109 /* We need to do this all in asm, so we're sure we don't touch 100 /* We need to do this all in asm, so we're sure we don't touch
110 * the stack between the slbia and rebolting it. */ 101 * the stack between the slbia and rebolting it. */
@@ -235,16 +226,12 @@ void slb_initialize(void)
235 vflags = SLB_VSID_KERNEL | vmalloc_llp; 226 vflags = SLB_VSID_KERNEL | vmalloc_llp;
236 227
237 /* Invalidate the entire SLB (even slot 0) & all the ERATS */ 228 /* Invalidate the entire SLB (even slot 0) & all the ERATS */
238 asm volatile("isync":::"memory"); 229 slb_shadow_update(PAGE_OFFSET, lflags, 0);
239 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 230 asm volatile("isync; slbia; sync; slbmte %0,%1; isync" ::
240 asm volatile("isync; slbia; isync":::"memory"); 231 "r" (get_slb_shadow()->save_area[0].vsid),
241 create_shadowed_slbe(PAGE_OFFSET, lflags, 0); 232 "r" (get_slb_shadow()->save_area[0].esid) : "memory");
242 233
243 create_shadowed_slbe(VMALLOC_START, vflags, 1); 234 slb_shadow_update(VMALLOC_START, vflags, 1);
244 235
245 /* We don't bolt the stack for the time being - we're in boot, 236 slb_flush_and_rebolt();
246 * so the stack is in the bolted segment. By the time it goes
247 * elsewhere, we'll call _switch() which will bolt in the new
248 * one. */
249 asm volatile("isync":::"memory");
250} 237}