aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S13
-rw-r--r--arch/powerpc/kernel/paca.c15
-rw-r--r--arch/powerpc/mm/slb.c37
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c24
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h10
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
7 files changed, 102 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ac0631958b20..2ef7ea860379 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -135,11 +135,13 @@ int main(void)
135 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); 135 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
136 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 136 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
137 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 137 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
138 DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
138 139
139 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); 140 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
140 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 141 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
141 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); 142 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
142 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); 143 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
144 DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
143#endif /* CONFIG_PPC64 */ 145#endif /* CONFIG_PPC64 */
144 146
145 /* RTAS */ 147 /* RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 54d9f5cdaab4..5baea498ea64 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -323,6 +323,11 @@ _GLOBAL(ret_from_fork)
323 * The code which creates the new task context is in 'copy_thread' 323 * The code which creates the new task context is in 'copy_thread'
324 * in arch/powerpc/kernel/process.c 324 * in arch/powerpc/kernel/process.c
325 */ 325 */
326#define SHADOW_SLB_BOLTED_STACK_ESID \
327 (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1))
328#define SHADOW_SLB_BOLTED_STACK_VSID \
329 (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1) + 8)
330
326 .align 7 331 .align 7
327_GLOBAL(_switch) 332_GLOBAL(_switch)
328 mflr r0 333 mflr r0
@@ -375,6 +380,14 @@ BEGIN_FTR_SECTION
375 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 380 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
376 oris r0,r6,(SLB_ESID_V)@h 381 oris r0,r6,(SLB_ESID_V)@h
377 ori r0,r0,(SLB_NUM_BOLTED-1)@l 382 ori r0,r0,(SLB_NUM_BOLTED-1)@l
383
384 /* Update the last bolted SLB */
385 ld r9,PACA_SLBSHADOWPTR(r13)
386 li r12,0
387 std r12,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Clear ESID */
388 std r7,SHADOW_SLB_BOLTED_STACK_VSID(r9) /* Save VSID */
389 std r0,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Save ESID */
390
378 slbie r6 391 slbie r6
379 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 392 slbie r6 /* Workaround POWER5 < DD2.1 issue */
380 slbmte r7,r0 393 slbmte r7,r0
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index c68741fed14b..55f1a25085cd 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -17,6 +17,7 @@
17#include <asm/lppaca.h> 17#include <asm/lppaca.h>
18#include <asm/iseries/it_lp_reg_save.h> 18#include <asm/iseries/it_lp_reg_save.h>
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/mmu.h>
20 21
21 22
22/* This symbol is provided by the linker - let it fill in the paca 23/* This symbol is provided by the linker - let it fill in the paca
@@ -45,6 +46,17 @@ struct lppaca lppaca[] = {
45 }, 46 },
46}; 47};
47 48
49/*
50 * 3 persistent SLBs are registered here. The buffer will be zero
51 * initially, hence will all be invaild until we actually write them.
52 */
53struct slb_shadow slb_shadow[] __cacheline_aligned = {
54 [0 ... (NR_CPUS-1)] = {
55 .persistent = SLB_NUM_BOLTED,
56 .buffer_length = sizeof(struct slb_shadow),
57 },
58};
59
48/* The Paca is an array with one entry per processor. Each contains an 60/* The Paca is an array with one entry per processor. Each contains an
49 * lppaca, which contains the information shared between the 61 * lppaca, which contains the information shared between the
50 * hypervisor and Linux. 62 * hypervisor and Linux.
@@ -59,7 +71,8 @@ struct lppaca lppaca[] = {
59 .lock_token = 0x8000, \ 71 .lock_token = 0x8000, \
60 .paca_index = (number), /* Paca Index */ \ 72 .paca_index = (number), /* Paca Index */ \
61 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ 73 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
62 .hw_cpu_id = 0xffff, 74 .hw_cpu_id = 0xffff, \
75 .slb_shadow_ptr = &slb_shadow[number],
63 76
64#ifdef CONFIG_PPC_ISERIES 77#ifdef CONFIG_PPC_ISERIES
65#define PACA_INIT_ISERIES(number) \ 78#define PACA_INIT_ISERIES(number) \
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index de0c8842415c..d3733912adb4 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -22,6 +22,8 @@
22#include <asm/paca.h> 22#include <asm/paca.h>
23#include <asm/cputable.h> 23#include <asm/cputable.h>
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
25#include <asm/smp.h>
26#include <linux/compiler.h>
25 27
26#ifdef DEBUG 28#ifdef DEBUG
27#define DBG(fmt...) udbg_printf(fmt) 29#define DBG(fmt...) udbg_printf(fmt)
@@ -50,9 +52,32 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
50 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 52 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
51} 53}
52 54
53static inline void create_slbe(unsigned long ea, unsigned long flags, 55static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
54 unsigned long entry) 56 unsigned long entry)
55{ 57{
58 /*
59 * Clear the ESID first so the entry is not valid while we are
60 * updating it.
61 */
62 get_slb_shadow()->save_area[entry].esid = 0;
63 barrier();
64 get_slb_shadow()->save_area[entry].vsid = vsid;
65 barrier();
66 get_slb_shadow()->save_area[entry].esid = esid;
67
68}
69
70static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
71 unsigned long entry)
72{
73 /*
74 * Updating the shadow buffer before writing the SLB ensures
75 * we don't get a stale entry here if we get preempted by PHYP
76 * between these two statements.
77 */
78 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
79 entry);
80
56 asm volatile("slbmte %0,%1" : 81 asm volatile("slbmte %0,%1" :
57 : "r" (mk_vsid_data(ea, flags)), 82 : "r" (mk_vsid_data(ea, flags)),
58 "r" (mk_esid_data(ea, entry)) 83 "r" (mk_esid_data(ea, entry))
@@ -77,6 +102,10 @@ void slb_flush_and_rebolt(void)
77 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 102 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
78 ksp_esid_data &= ~SLB_ESID_V; 103 ksp_esid_data &= ~SLB_ESID_V;
79 104
105 /* Only third entry (stack) may change here so only resave that */
106 slb_shadow_update(ksp_esid_data,
107 mk_vsid_data(ksp_esid_data, lflags), 2);
108
80 /* We need to do this all in asm, so we're sure we don't touch 109 /* We need to do this all in asm, so we're sure we don't touch
81 * the stack between the slbia and rebolting it. */ 110 * the stack between the slbia and rebolting it. */
82 asm volatile("isync\n" 111 asm volatile("isync\n"
@@ -209,9 +238,9 @@ void slb_initialize(void)
209 asm volatile("isync":::"memory"); 238 asm volatile("isync":::"memory");
210 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 239 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
211 asm volatile("isync; slbia; isync":::"memory"); 240 asm volatile("isync; slbia; isync":::"memory");
212 create_slbe(PAGE_OFFSET, lflags, 0); 241 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
213 242
214 create_slbe(VMALLOC_START, vflags, 1); 243 create_shadowed_slbe(VMALLOC_START, vflags, 1);
215 244
216 /* We don't bolt the stack for the time being - we're in boot, 245 /* We don't bolt the stack for the time being - we're in boot,
217 * so the stack is in the bolted segment. By the time it goes 246 * so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 6cbf14266d5e..1820a0b0a8c6 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -252,18 +252,34 @@ out:
252void vpa_init(int cpu) 252void vpa_init(int cpu)
253{ 253{
254 int hwcpu = get_hard_smp_processor_id(cpu); 254 int hwcpu = get_hard_smp_processor_id(cpu);
255 unsigned long vpa = __pa(&lppaca[cpu]); 255 unsigned long addr;
256 long ret; 256 long ret;
257 257
258 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 258 if (cpu_has_feature(CPU_FTR_ALTIVEC))
259 lppaca[cpu].vmxregs_in_use = 1; 259 lppaca[cpu].vmxregs_in_use = 1;
260 260
261 ret = register_vpa(hwcpu, vpa); 261 addr = __pa(&lppaca[cpu]);
262 ret = register_vpa(hwcpu, addr);
262 263
263 if (ret) 264 if (ret) {
264 printk(KERN_ERR "WARNING: vpa_init: VPA registration for " 265 printk(KERN_ERR "WARNING: vpa_init: VPA registration for "
265 "cpu %d (hw %d) of area %lx returns %ld\n", 266 "cpu %d (hw %d) of area %lx returns %ld\n",
266 cpu, hwcpu, vpa, ret); 267 cpu, hwcpu, addr, ret);
268 return;
269 }
270 /*
271 * PAPR says this feature is SLB-Buffer but firmware never
272 * reports that. All SPLPAR support SLB shadow buffer.
273 */
274 addr = __pa(&slb_shadow[cpu]);
275 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
276 ret = register_slb_shadow(hwcpu, addr);
277 if (ret)
278 printk(KERN_ERR
279 "WARNING: vpa_init: SLB shadow buffer "
280 "registration for cpu %d (hw %d) of area %lx "
281 "returns %ld\n", cpu, hwcpu, addr, ret);
282 }
267} 283}
268 284
269long pSeries_lpar_hpte_insert(unsigned long hpte_group, 285long pSeries_lpar_hpte_insert(unsigned long hpte_group,
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index ebd15de7597e..3eb7b294d92f 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -37,6 +37,16 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa)
37 return vpa_call(0x1, cpu, vpa); 37 return vpa_call(0x1, cpu, vpa);
38} 38}
39 39
40static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa)
41{
42 return vpa_call(0x7, cpu, vpa);
43}
44
45static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
46{
47 return vpa_call(0x3, cpu, vpa);
48}
49
40extern void vpa_init(int cpu); 50extern void vpa_init(int cpu);
41 51
42static inline long plpar_pte_enter(unsigned long flags, 52static inline long plpar_pte_enter(unsigned long flags,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index de214d86ff44..6ebeecfd6bcb 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -234,9 +234,17 @@ static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
234{ 234{
235 /* Don't risk a hypervisor call if we're crashing */ 235 /* Don't risk a hypervisor call if we're crashing */
236 if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { 236 if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
237 unsigned long vpa = __pa(get_lppaca()); 237 unsigned long addr;
238 238
239 if (unregister_vpa(hard_smp_processor_id(), vpa)) { 239 addr = __pa(get_slb_shadow());
240 if (unregister_slb_shadow(hard_smp_processor_id(), addr))
241 printk("SLB shadow buffer deregistration of "
242 "cpu %u (hw_cpu_id %d) failed\n",
243 smp_processor_id(),
244 hard_smp_processor_id());
245
246 addr = __pa(get_lppaca());
247 if (unregister_vpa(hard_smp_processor_id(), addr)) {
240 printk("VPA deregistration of cpu %u (hw_cpu_id %d) " 248 printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
241 "failed\n", smp_processor_id(), 249 "failed\n", smp_processor_id(),
242 hard_smp_processor_id()); 250 hard_smp_processor_id());