aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-01-15 01:29:33 -0500
committerPaul Mackerras <paulus@samba.org>2008-01-15 01:30:58 -0500
commitdfbe0d3b6be52596b5694b1bb75b19562e769021 (patch)
tree75e3ef05b6dd8cc7065fe77a05a9cd1b3495619a
parentd262c32a4bcc3e5fda0325a64e53c25fe1e999d7 (diff)
[POWERPC] Fix boot failure on POWER6
Commit 473980a99316c0e788bca50996375a2815124ce1 added a call to clear the SLB shadow buffer before registering it. Unfortunately this means that we clear out the entries that slb_initialize has previously set in there. On POWER6, the hypervisor uses the SLB shadow buffer when doing partition switches, and that means that after the next partition switch, each non-boot CPU has no SLB entries to map the kernel text and data, which causes it to crash. This fixes it by reverting most of 473980a9 and instead clearing the 3rd entry explicitly in slb_initialize. This fixes the problem that 473980a9 was trying to solve, but without breaking POWER6. Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/mm/slb.c10
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--include/asm-powerpc/mmu-hash64.h1
3 files changed, 2 insertions, 10 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index a282bc212e80..50d7372bc2ce 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -82,14 +82,6 @@ static inline void slb_shadow_clear(unsigned long entry)
82 get_slb_shadow()->save_area[entry].esid = 0; 82 get_slb_shadow()->save_area[entry].esid = 0;
83} 83}
84 84
85void slb_shadow_clear_all(void)
86{
87 int i;
88
89 for (i = 0; i < SLB_NUM_BOLTED; i++)
90 slb_shadow_clear(i);
91}
92
93static inline void create_shadowed_slbe(unsigned long ea, int ssize, 85static inline void create_shadowed_slbe(unsigned long ea, int ssize,
94 unsigned long flags, 86 unsigned long flags,
95 unsigned long entry) 87 unsigned long entry)
@@ -300,6 +292,8 @@ void slb_initialize(void)
300 292
301 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1); 293 create_shadowed_slbe(VMALLOC_START, mmu_kernel_ssize, vflags, 1);
302 294
295 slb_shadow_clear(2);
296
303 /* We don't bolt the stack for the time being - we're in boot, 297 /* We don't bolt the stack for the time being - we're in boot,
304 * so the stack is in the bolted segment. By the time it goes 298 * so the stack is in the bolted segment. By the time it goes
305 * elsewhere, we'll call _switch() which will bolt in the new 299 * elsewhere, we'll call _switch() which will bolt in the new
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 34317aa148a8..9a455d46379d 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -272,7 +272,6 @@ void vpa_init(int cpu)
272 */ 272 */
273 addr = __pa(&slb_shadow[cpu]); 273 addr = __pa(&slb_shadow[cpu]);
274 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 274 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
275 slb_shadow_clear_all();
276 ret = register_slb_shadow(hwcpu, addr); 275 ret = register_slb_shadow(hwcpu, addr);
277 if (ret) 276 if (ret)
278 printk(KERN_ERR 277 printk(KERN_ERR
diff --git a/include/asm-powerpc/mmu-hash64.h b/include/asm-powerpc/mmu-hash64.h
index 951e2487aa69..82328dec2b52 100644
--- a/include/asm-powerpc/mmu-hash64.h
+++ b/include/asm-powerpc/mmu-hash64.h
@@ -286,7 +286,6 @@ extern void hpte_init_iSeries(void);
286extern void hpte_init_beat(void); 286extern void hpte_init_beat(void);
287extern void hpte_init_beat_v3(void); 287extern void hpte_init_beat_v3(void);
288 288
289extern void slb_shadow_clear_all(void);
290extern void stabs_alloc(void); 289extern void stabs_alloc(void);
291extern void slb_initialize(void); 290extern void slb_initialize(void);
292extern void slb_flush_and_rebolt(void); 291extern void slb_flush_and_rebolt(void);