aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2006-08-07 02:19:19 -0400
committerPaul Mackerras <paulus@samba.org>2006-08-08 03:08:56 -0400
commit2f6093c84730b4bad65bcd0f2f904a5769b1dfc5 (patch)
treeab4e64a0520e944062f418e91706ff968e23a6ea
parent452b5e21216011f2f068e80443568f5f3f3f4d63 (diff)
[POWERPC] Implement SLB shadow buffer
This adds a shadow buffer for the SLBs and regsiters it with PHYP. Only the bolted SLB entries (top 3) are shadowed. The SLB shadow buffer tells the hypervisor what the kernel needs to have in the SLB for the kernel to be able to function. The hypervisor can use this information to speed up partition context switches. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
-rw-r--r--arch/powerpc/kernel/asm-offsets.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S13
-rw-r--r--arch/powerpc/kernel/paca.c15
-rw-r--r--arch/powerpc/mm/slb.c37
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c24
-rw-r--r--arch/powerpc/platforms/pseries/plpar_wrappers.h10
-rw-r--r--arch/powerpc/platforms/pseries/setup.c12
-rw-r--r--include/asm-powerpc/lppaca.h19
-rw-r--r--include/asm-powerpc/paca.h3
9 files changed, 124 insertions, 11 deletions
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index ac0631958b20..2ef7ea860379 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -135,11 +135,13 @@ int main(void)
135 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); 135 DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr));
136 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); 136 DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time));
137 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); 137 DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time));
138 DEFINE(PACA_SLBSHADOWPTR, offsetof(struct paca_struct, slb_shadow_ptr));
138 139
139 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); 140 DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0));
140 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); 141 DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1));
141 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); 142 DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int));
142 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); 143 DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int));
144 DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area));
143#endif /* CONFIG_PPC64 */ 145#endif /* CONFIG_PPC64 */
144 146
145 /* RTAS */ 147 /* RTAS */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 54d9f5cdaab4..5baea498ea64 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -323,6 +323,11 @@ _GLOBAL(ret_from_fork)
323 * The code which creates the new task context is in 'copy_thread' 323 * The code which creates the new task context is in 'copy_thread'
324 * in arch/powerpc/kernel/process.c 324 * in arch/powerpc/kernel/process.c
325 */ 325 */
326#define SHADOW_SLB_BOLTED_STACK_ESID \
327 (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1))
328#define SHADOW_SLB_BOLTED_STACK_VSID \
329 (SLBSHADOW_SAVEAREA + 0x10*(SLB_NUM_BOLTED-1) + 8)
330
326 .align 7 331 .align 7
327_GLOBAL(_switch) 332_GLOBAL(_switch)
328 mflr r0 333 mflr r0
@@ -375,6 +380,14 @@ BEGIN_FTR_SECTION
375 ld r7,KSP_VSID(r4) /* Get new stack's VSID */ 380 ld r7,KSP_VSID(r4) /* Get new stack's VSID */
376 oris r0,r6,(SLB_ESID_V)@h 381 oris r0,r6,(SLB_ESID_V)@h
377 ori r0,r0,(SLB_NUM_BOLTED-1)@l 382 ori r0,r0,(SLB_NUM_BOLTED-1)@l
383
384 /* Update the last bolted SLB */
385 ld r9,PACA_SLBSHADOWPTR(r13)
386 li r12,0
387 std r12,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Clear ESID */
388 std r7,SHADOW_SLB_BOLTED_STACK_VSID(r9) /* Save VSID */
389 std r0,SHADOW_SLB_BOLTED_STACK_ESID(r9) /* Save ESID */
390
378 slbie r6 391 slbie r6
379 slbie r6 /* Workaround POWER5 < DD2.1 issue */ 392 slbie r6 /* Workaround POWER5 < DD2.1 issue */
380 slbmte r7,r0 393 slbmte r7,r0
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index c68741fed14b..55f1a25085cd 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -17,6 +17,7 @@
17#include <asm/lppaca.h> 17#include <asm/lppaca.h>
18#include <asm/iseries/it_lp_reg_save.h> 18#include <asm/iseries/it_lp_reg_save.h>
19#include <asm/paca.h> 19#include <asm/paca.h>
20#include <asm/mmu.h>
20 21
21 22
22/* This symbol is provided by the linker - let it fill in the paca 23/* This symbol is provided by the linker - let it fill in the paca
@@ -45,6 +46,17 @@ struct lppaca lppaca[] = {
45 }, 46 },
46}; 47};
47 48
49/*
50 * 3 persistent SLBs are registered here. The buffer will be zero
51 * initially, hence will all be invaild until we actually write them.
52 */
53struct slb_shadow slb_shadow[] __cacheline_aligned = {
54 [0 ... (NR_CPUS-1)] = {
55 .persistent = SLB_NUM_BOLTED,
56 .buffer_length = sizeof(struct slb_shadow),
57 },
58};
59
48/* The Paca is an array with one entry per processor. Each contains an 60/* The Paca is an array with one entry per processor. Each contains an
49 * lppaca, which contains the information shared between the 61 * lppaca, which contains the information shared between the
50 * hypervisor and Linux. 62 * hypervisor and Linux.
@@ -59,7 +71,8 @@ struct lppaca lppaca[] = {
59 .lock_token = 0x8000, \ 71 .lock_token = 0x8000, \
60 .paca_index = (number), /* Paca Index */ \ 72 .paca_index = (number), /* Paca Index */ \
61 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ 73 .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \
62 .hw_cpu_id = 0xffff, 74 .hw_cpu_id = 0xffff, \
75 .slb_shadow_ptr = &slb_shadow[number],
63 76
64#ifdef CONFIG_PPC_ISERIES 77#ifdef CONFIG_PPC_ISERIES
65#define PACA_INIT_ISERIES(number) \ 78#define PACA_INIT_ISERIES(number) \
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index de0c8842415c..d3733912adb4 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -22,6 +22,8 @@
22#include <asm/paca.h> 22#include <asm/paca.h>
23#include <asm/cputable.h> 23#include <asm/cputable.h>
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
25#include <asm/smp.h>
26#include <linux/compiler.h>
25 27
26#ifdef DEBUG 28#ifdef DEBUG
27#define DBG(fmt...) udbg_printf(fmt) 29#define DBG(fmt...) udbg_printf(fmt)
@@ -50,9 +52,32 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
50 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 52 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
51} 53}
52 54
53static inline void create_slbe(unsigned long ea, unsigned long flags, 55static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
54 unsigned long entry) 56 unsigned long entry)
55{ 57{
58 /*
59 * Clear the ESID first so the entry is not valid while we are
60 * updating it.
61 */
62 get_slb_shadow()->save_area[entry].esid = 0;
63 barrier();
64 get_slb_shadow()->save_area[entry].vsid = vsid;
65 barrier();
66 get_slb_shadow()->save_area[entry].esid = esid;
67
68}
69
70static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
71 unsigned long entry)
72{
73 /*
74 * Updating the shadow buffer before writing the SLB ensures
75 * we don't get a stale entry here if we get preempted by PHYP
76 * between these two statements.
77 */
78 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
79 entry);
80
56 asm volatile("slbmte %0,%1" : 81 asm volatile("slbmte %0,%1" :
57 : "r" (mk_vsid_data(ea, flags)), 82 : "r" (mk_vsid_data(ea, flags)),
58 "r" (mk_esid_data(ea, entry)) 83 "r" (mk_esid_data(ea, entry))
@@ -77,6 +102,10 @@ void slb_flush_and_rebolt(void)
77 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 102 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
78 ksp_esid_data &= ~SLB_ESID_V; 103 ksp_esid_data &= ~SLB_ESID_V;
79 104
105 /* Only third entry (stack) may change here so only resave that */
106 slb_shadow_update(ksp_esid_data,
107 mk_vsid_data(ksp_esid_data, lflags), 2);
108
80 /* We need to do this all in asm, so we're sure we don't touch 109 /* We need to do this all in asm, so we're sure we don't touch
81 * the stack between the slbia and rebolting it. */ 110 * the stack between the slbia and rebolting it. */
82 asm volatile("isync\n" 111 asm volatile("isync\n"
@@ -209,9 +238,9 @@ void slb_initialize(void)
209 asm volatile("isync":::"memory"); 238 asm volatile("isync":::"memory");
210 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 239 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
211 asm volatile("isync; slbia; isync":::"memory"); 240 asm volatile("isync; slbia; isync":::"memory");
212 create_slbe(PAGE_OFFSET, lflags, 0); 241 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
213 242
214 create_slbe(VMALLOC_START, vflags, 1); 243 create_shadowed_slbe(VMALLOC_START, vflags, 1);
215 244
216 /* We don't bolt the stack for the time being - we're in boot, 245 /* We don't bolt the stack for the time being - we're in boot,
217 * so the stack is in the bolted segment. By the time it goes 246 * so the stack is in the bolted segment. By the time it goes
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 6cbf14266d5e..1820a0b0a8c6 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -252,18 +252,34 @@ out:
252void vpa_init(int cpu) 252void vpa_init(int cpu)
253{ 253{
254 int hwcpu = get_hard_smp_processor_id(cpu); 254 int hwcpu = get_hard_smp_processor_id(cpu);
255 unsigned long vpa = __pa(&lppaca[cpu]); 255 unsigned long addr;
256 long ret; 256 long ret;
257 257
258 if (cpu_has_feature(CPU_FTR_ALTIVEC)) 258 if (cpu_has_feature(CPU_FTR_ALTIVEC))
259 lppaca[cpu].vmxregs_in_use = 1; 259 lppaca[cpu].vmxregs_in_use = 1;
260 260
261 ret = register_vpa(hwcpu, vpa); 261 addr = __pa(&lppaca[cpu]);
262 ret = register_vpa(hwcpu, addr);
262 263
263 if (ret) 264 if (ret) {
264 printk(KERN_ERR "WARNING: vpa_init: VPA registration for " 265 printk(KERN_ERR "WARNING: vpa_init: VPA registration for "
265 "cpu %d (hw %d) of area %lx returns %ld\n", 266 "cpu %d (hw %d) of area %lx returns %ld\n",
266 cpu, hwcpu, vpa, ret); 267 cpu, hwcpu, addr, ret);
268 return;
269 }
270 /*
271 * PAPR says this feature is SLB-Buffer but firmware never
272 * reports that. All SPLPAR support SLB shadow buffer.
273 */
274 addr = __pa(&slb_shadow[cpu]);
275 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
276 ret = register_slb_shadow(hwcpu, addr);
277 if (ret)
278 printk(KERN_ERR
279 "WARNING: vpa_init: SLB shadow buffer "
280 "registration for cpu %d (hw %d) of area %lx "
281 "returns %ld\n", cpu, hwcpu, addr, ret);
282 }
267} 283}
268 284
269long pSeries_lpar_hpte_insert(unsigned long hpte_group, 285long pSeries_lpar_hpte_insert(unsigned long hpte_group,
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h
index ebd15de7597e..3eb7b294d92f 100644
--- a/arch/powerpc/platforms/pseries/plpar_wrappers.h
+++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h
@@ -37,6 +37,16 @@ static inline long register_vpa(unsigned long cpu, unsigned long vpa)
37 return vpa_call(0x1, cpu, vpa); 37 return vpa_call(0x1, cpu, vpa);
38} 38}
39 39
40static inline long unregister_slb_shadow(unsigned long cpu, unsigned long vpa)
41{
42 return vpa_call(0x7, cpu, vpa);
43}
44
45static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
46{
47 return vpa_call(0x3, cpu, vpa);
48}
49
40extern void vpa_init(int cpu); 50extern void vpa_init(int cpu);
41 51
42static inline long plpar_pte_enter(unsigned long flags, 52static inline long plpar_pte_enter(unsigned long flags,
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index de214d86ff44..6ebeecfd6bcb 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -234,9 +234,17 @@ static void pseries_kexec_cpu_down_xics(int crash_shutdown, int secondary)
234{ 234{
235 /* Don't risk a hypervisor call if we're crashing */ 235 /* Don't risk a hypervisor call if we're crashing */
236 if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { 236 if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) {
237 unsigned long vpa = __pa(get_lppaca()); 237 unsigned long addr;
238 238
239 if (unregister_vpa(hard_smp_processor_id(), vpa)) { 239 addr = __pa(get_slb_shadow());
240 if (unregister_slb_shadow(hard_smp_processor_id(), addr))
241 printk("SLB shadow buffer deregistration of "
242 "cpu %u (hw_cpu_id %d) failed\n",
243 smp_processor_id(),
244 hard_smp_processor_id());
245
246 addr = __pa(get_lppaca());
247 if (unregister_vpa(hard_smp_processor_id(), addr)) {
240 printk("VPA deregistration of cpu %u (hw_cpu_id %d) " 248 printk("VPA deregistration of cpu %u (hw_cpu_id %d) "
241 "failed\n", smp_processor_id(), 249 "failed\n", smp_processor_id(),
242 hard_smp_processor_id()); 250 hard_smp_processor_id());
diff --git a/include/asm-powerpc/lppaca.h b/include/asm-powerpc/lppaca.h
index 4dc514aabfe7..942bb450baff 100644
--- a/include/asm-powerpc/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -27,7 +27,9 @@
27// 27//
28// 28//
29//---------------------------------------------------------------------------- 29//----------------------------------------------------------------------------
30#include <linux/cache.h>
30#include <asm/types.h> 31#include <asm/types.h>
32#include <asm/mmu.h>
31 33
32/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 34/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
33 * alignment is sufficient to prevent this */ 35 * alignment is sufficient to prevent this */
@@ -133,5 +135,22 @@ struct lppaca {
133 135
134extern struct lppaca lppaca[]; 136extern struct lppaca lppaca[];
135 137
138/*
139 * SLB shadow buffer structure as defined in the PAPR. The save_area
140 * contains adjacent ESID and VSID pairs for each shadowed SLB. The
141 * ESID is stored in the lower 64bits, then the VSID.
142 */
143struct slb_shadow {
144 u32 persistent; // Number of persistent SLBs x00-x03
145 u32 buffer_length; // Total shadow buffer length x04-x07
146 u64 reserved; // Alignment x08-x0f
147 struct {
148 u64 esid;
149 u64 vsid;
150 } save_area[SLB_NUM_BOLTED]; // x10-x40
151} ____cacheline_aligned;
152
153extern struct slb_shadow slb_shadow[];
154
136#endif /* __KERNEL__ */ 155#endif /* __KERNEL__ */
137#endif /* _ASM_POWERPC_LPPACA_H */ 156#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/include/asm-powerpc/paca.h b/include/asm-powerpc/paca.h
index 2d4585f06209..7ffa2512524e 100644
--- a/include/asm-powerpc/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -23,6 +23,7 @@
23register struct paca_struct *local_paca asm("r13"); 23register struct paca_struct *local_paca asm("r13");
24#define get_paca() local_paca 24#define get_paca() local_paca
25#define get_lppaca() (get_paca()->lppaca_ptr) 25#define get_lppaca() (get_paca()->lppaca_ptr)
26#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
26 27
27struct task_struct; 28struct task_struct;
28 29
@@ -98,6 +99,8 @@ struct paca_struct {
98 u64 user_time; /* accumulated usermode TB ticks */ 99 u64 user_time; /* accumulated usermode TB ticks */
99 u64 system_time; /* accumulated system TB ticks */ 100 u64 system_time; /* accumulated system TB ticks */
100 u64 startpurr; /* PURR/TB value snapshot */ 101 u64 startpurr; /* PURR/TB value snapshot */
102
103 struct slb_shadow *slb_shadow_ptr;
101}; 104};
102 105
103extern struct paca_struct paca[]; 106extern struct paca_struct paca[];