aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2014-05-15 08:38:03 -0400
committerAlexander Graf <agraf@suse.de>2014-05-30 08:26:30 -0400
commitd8d164a9850d486cc48081c18831680254688d0f (patch)
tree6c8d77919d3f2c0ce26f448c148be55cbf2c0bfa
parent207438d4e21e05728a8a58b5e25b0f6553260068 (diff)
KVM: PPC: Book3S PR: Rework SLB switching code
On LPAR guest systems Linux enables the shadow SLB to indicate to the hypervisor a number of SLB entries that always have to be available. Today we go through this shadow SLB and disable all ESID's valid bits. However, pHyp doesn't like this approach very much and honors us with fancy machine checks. Fortunately the shadow SLB descriptor also has an entry that indicates the number of valid entries following. During the lifetime of a guest we can just swap that value to 0 and don't have to worry about the SLB restoration magic. While we're touching the code, let's also make it more readable (get rid of rldicl), allow it to deal with a dynamic number of bolted SLB entries and only do shadow SLB swizzling on LPAR systems. Signed-off-by: Alexander Graf <agraf@suse.de>
-rw-r--r--arch/powerpc/kernel/paca.c3
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S83
-rw-r--r--arch/powerpc/mm/slb.c2
3 files changed, 42 insertions, 46 deletions
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index ad302f845e5d..d6e195e8cd4c 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -98,6 +98,9 @@ static inline void free_lppacas(void) { }
98/* 98/*
99 * 3 persistent SLBs are registered here. The buffer will be zero 99 * 3 persistent SLBs are registered here. The buffer will be zero
100 * initially, hence will all be invaild until we actually write them. 100 * initially, hence will all be invaild until we actually write them.
101 *
102 * If you make the number of persistent SLB entries dynamic, please also
103 * update PR KVM to flush and restore them accordingly.
101 */ 104 */
102static struct slb_shadow *slb_shadow; 105static struct slb_shadow *slb_shadow;
103 106
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 84c52c6b5837..3589c4e3d49b 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -17,29 +17,9 @@
17 * Authors: Alexander Graf <agraf@suse.de> 17 * Authors: Alexander Graf <agraf@suse.de>
18 */ 18 */
19 19
20#define SHADOW_SLB_ESID(num) (SLBSHADOW_SAVEAREA + (num * 0x10)) 20#define SHADOW_SLB_ENTRY_LEN 0x10
21#define SHADOW_SLB_VSID(num) (SLBSHADOW_SAVEAREA + (num * 0x10) + 0x8) 21#define OFFSET_ESID(x) (SHADOW_SLB_ENTRY_LEN * x)
22#define UNBOLT_SLB_ENTRY(num) \ 22#define OFFSET_VSID(x) ((SHADOW_SLB_ENTRY_LEN * x) + 8)
23 li r11, SHADOW_SLB_ESID(num); \
24 LDX_BE r9, r12, r11; \
25 /* Invalid? Skip. */; \
26 rldicl. r0, r9, 37, 63; \
27 beq slb_entry_skip_ ## num; \
28 xoris r9, r9, SLB_ESID_V@h; \
29 STDX_BE r9, r12, r11; \
30 slb_entry_skip_ ## num:
31
32#define REBOLT_SLB_ENTRY(num) \
33 li r8, SHADOW_SLB_ESID(num); \
34 li r7, SHADOW_SLB_VSID(num); \
35 LDX_BE r10, r11, r8; \
36 cmpdi r10, 0; \
37 beq slb_exit_skip_ ## num; \
38 oris r10, r10, SLB_ESID_V@h; \
39 LDX_BE r9, r11, r7; \
40 slbmte r9, r10; \
41 STDX_BE r10, r11, r8; \
42slb_exit_skip_ ## num:
43 23
44/****************************************************************************** 24/******************************************************************************
45 * * 25 * *
@@ -63,20 +43,15 @@ slb_exit_skip_ ## num:
63 * SVCPU[LR] = guest LR 43 * SVCPU[LR] = guest LR
64 */ 44 */
65 45
66 /* Remove LPAR shadow entries */ 46BEGIN_FW_FTR_SECTION
67 47
68#if SLB_NUM_BOLTED == 3 48 /* Declare SLB shadow as 0 entries big */
69 49
70 ld r12, PACA_SLBSHADOWPTR(r13) 50 ld r11, PACA_SLBSHADOWPTR(r13)
51 li r8, 0
52 stb r8, 3(r11)
71 53
72 /* Remove bolted entries */ 54END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
73 UNBOLT_SLB_ENTRY(0)
74 UNBOLT_SLB_ENTRY(1)
75 UNBOLT_SLB_ENTRY(2)
76
77#else
78#error unknown number of bolted entries
79#endif
80 55
81 /* Flush SLB */ 56 /* Flush SLB */
82 57
@@ -99,7 +74,7 @@ slb_loop_enter:
99 74
100 ld r10, 0(r11) 75 ld r10, 0(r11)
101 76
102 rldicl. r0, r10, 37, 63 77 andis. r9, r10, SLB_ESID_V@h
103 beq slb_loop_enter_skip 78 beq slb_loop_enter_skip
104 79
105 ld r9, 8(r11) 80 ld r9, 8(r11)
@@ -136,24 +111,42 @@ slb_do_enter:
136 * 111 *
137 */ 112 */
138 113
139 /* Restore bolted entries from the shadow and fix it along the way */ 114 /* Remove all SLB entries that are in use. */
140 115
141 li r0, r0 116 li r0, r0
142 slbmte r0, r0 117 slbmte r0, r0
143 slbia 118 slbia
144 isync
145 119
146#if SLB_NUM_BOLTED == 3 120 /* Restore bolted entries from the shadow */
147 121
148 ld r11, PACA_SLBSHADOWPTR(r13) 122 ld r11, PACA_SLBSHADOWPTR(r13)
149 123
150 REBOLT_SLB_ENTRY(0) 124BEGIN_FW_FTR_SECTION
151 REBOLT_SLB_ENTRY(1) 125
152 REBOLT_SLB_ENTRY(2) 126 /* Declare SLB shadow as SLB_NUM_BOLTED entries big */
153 127
154#else 128 li r8, SLB_NUM_BOLTED
155#error unknown number of bolted entries 129 stb r8, 3(r11)
156#endif 130
131END_FW_FTR_SECTION_IFSET(FW_FEATURE_LPAR)
132
133 /* Manually load all entries from shadow SLB */
134
135 li r8, SLBSHADOW_SAVEAREA
136 li r7, SLBSHADOW_SAVEAREA + 8
137
138 .rept SLB_NUM_BOLTED
139 LDX_BE r10, r11, r8
140 cmpdi r10, 0
141 beq 1f
142 LDX_BE r9, r11, r7
143 slbmte r9, r10
1441: addi r7, r7, SHADOW_SLB_ENTRY_LEN
145 addi r8, r8, SHADOW_SLB_ENTRY_LEN
146 .endr
147
148 isync
149 sync
157 150
158slb_do_exit: 151slb_do_exit:
159 152
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index 9d1d33cd2be5..964a5f61488a 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -97,7 +97,7 @@ static inline void create_shadowed_slbe(unsigned long ea, int ssize,
97static void __slb_flush_and_rebolt(void) 97static void __slb_flush_and_rebolt(void)
98{ 98{
99 /* If you change this make sure you change SLB_NUM_BOLTED 99 /* If you change this make sure you change SLB_NUM_BOLTED
100 * appropriately too. */ 100 * and PR KVM appropriately too. */
101 unsigned long linear_llp, vmalloc_llp, lflags, vflags; 101 unsigned long linear_llp, vmalloc_llp, lflags, vflags;
102 unsigned long ksp_esid_data, ksp_vsid_data; 102 unsigned long ksp_esid_data, ksp_vsid_data;
103 103