aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/slb.c
diff options
context:
space:
mode:
authorMichael Neuling <mikey@neuling.org>2006-08-07 02:19:19 -0400
committerPaul Mackerras <paulus@samba.org>2006-08-08 03:08:56 -0400
commit2f6093c84730b4bad65bcd0f2f904a5769b1dfc5 (patch)
treeab4e64a0520e944062f418e91706ff968e23a6ea /arch/powerpc/mm/slb.c
parent452b5e21216011f2f068e80443568f5f3f3f4d63 (diff)
[POWERPC] Implement SLB shadow buffer
This adds a shadow buffer for the SLBs and regsiters it with PHYP. Only the bolted SLB entries (top 3) are shadowed. The SLB shadow buffer tells the hypervisor what the kernel needs to have in the SLB for the kernel to be able to function. The hypervisor can use this information to speed up partition context switches. Signed-off-by: Michael Neuling <mikey@neuling.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm/slb.c')
-rw-r--r--arch/powerpc/mm/slb.c37
1 files changed, 33 insertions, 4 deletions
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index de0c8842415c..d3733912adb4 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -22,6 +22,8 @@
22#include <asm/paca.h> 22#include <asm/paca.h>
23#include <asm/cputable.h> 23#include <asm/cputable.h>
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
25#include <asm/smp.h>
26#include <linux/compiler.h>
25 27
26#ifdef DEBUG 28#ifdef DEBUG
27#define DBG(fmt...) udbg_printf(fmt) 29#define DBG(fmt...) udbg_printf(fmt)
@@ -50,9 +52,32 @@ static inline unsigned long mk_vsid_data(unsigned long ea, unsigned long flags)
50 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags; 52 return (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | flags;
51} 53}
52 54
53static inline void create_slbe(unsigned long ea, unsigned long flags, 55static inline void slb_shadow_update(unsigned long esid, unsigned long vsid,
54 unsigned long entry) 56 unsigned long entry)
55{ 57{
58 /*
59 * Clear the ESID first so the entry is not valid while we are
60 * updating it.
61 */
62 get_slb_shadow()->save_area[entry].esid = 0;
63 barrier();
64 get_slb_shadow()->save_area[entry].vsid = vsid;
65 barrier();
66 get_slb_shadow()->save_area[entry].esid = esid;
67
68}
69
70static inline void create_shadowed_slbe(unsigned long ea, unsigned long flags,
71 unsigned long entry)
72{
73 /*
74 * Updating the shadow buffer before writing the SLB ensures
75 * we don't get a stale entry here if we get preempted by PHYP
76 * between these two statements.
77 */
78 slb_shadow_update(mk_esid_data(ea, entry), mk_vsid_data(ea, flags),
79 entry);
80
56 asm volatile("slbmte %0,%1" : 81 asm volatile("slbmte %0,%1" :
57 : "r" (mk_vsid_data(ea, flags)), 82 : "r" (mk_vsid_data(ea, flags)),
58 "r" (mk_esid_data(ea, entry)) 83 "r" (mk_esid_data(ea, entry))
@@ -77,6 +102,10 @@ void slb_flush_and_rebolt(void)
77 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) 102 if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET)
78 ksp_esid_data &= ~SLB_ESID_V; 103 ksp_esid_data &= ~SLB_ESID_V;
79 104
105 /* Only third entry (stack) may change here so only resave that */
106 slb_shadow_update(ksp_esid_data,
107 mk_vsid_data(ksp_esid_data, lflags), 2);
108
80 /* We need to do this all in asm, so we're sure we don't touch 109 /* We need to do this all in asm, so we're sure we don't touch
81 * the stack between the slbia and rebolting it. */ 110 * the stack between the slbia and rebolting it. */
82 asm volatile("isync\n" 111 asm volatile("isync\n"
@@ -209,9 +238,9 @@ void slb_initialize(void)
209 asm volatile("isync":::"memory"); 238 asm volatile("isync":::"memory");
210 asm volatile("slbmte %0,%0"::"r" (0) : "memory"); 239 asm volatile("slbmte %0,%0"::"r" (0) : "memory");
211 asm volatile("isync; slbia; isync":::"memory"); 240 asm volatile("isync; slbia; isync":::"memory");
212 create_slbe(PAGE_OFFSET, lflags, 0); 241 create_shadowed_slbe(PAGE_OFFSET, lflags, 0);
213 242
214 create_slbe(VMALLOC_START, vflags, 1); 243 create_shadowed_slbe(VMALLOC_START, vflags, 1);
215 244
216 /* We don't bolt the stack for the time being - we're in boot, 245 /* We don't bolt the stack for the time being - we're in boot,
217 * so the stack is in the bolted segment. By the time it goes 246 * so the stack is in the bolted segment. By the time it goes