aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_64_slb.S
diff options
context:
space:
mode:
authorAlexander Graf <agraf@suse.de>2010-04-15 18:11:48 -0400
committerAvi Kivity <avi@redhat.com>2010-05-17 05:18:38 -0400
commit53e5b8bbbd0d0305234b2cfeae400183db98f993 (patch)
treeaab01d2962a4b30666edba8e71191240bdda0c83 /arch/powerpc/kvm/book3s_64_slb.S
parentb79fcdf67e9e03773fb032679675d8008d5cc2dc (diff)
KVM: PPC: Make SLB switching code the new segment framework
We just introduced generic segment switching code that only needs to call small macros to do the actual switching, but keeps most of the entry / exit code generic. So let's move the SLB switching code over to use this new mechanism. Signed-off-by: Alexander Graf <agraf@suse.de> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_slb.S')
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S183
1 files changed, 24 insertions, 159 deletions
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index 09196790795..04e7d3bbfe8 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -44,8 +44,7 @@ slb_exit_skip_ ## num:
44 * * 44 * *
45 *****************************************************************************/ 45 *****************************************************************************/
46 46
47.global kvmppc_handler_trampoline_enter 47.macro LOAD_GUEST_SEGMENTS
48kvmppc_handler_trampoline_enter:
49 48
50 /* Required state: 49 /* Required state:
51 * 50 *
@@ -53,20 +52,14 @@ kvmppc_handler_trampoline_enter:
53 * R13 = PACA 52 * R13 = PACA
54 * R1 = host R1 53 * R1 = host R1
55 * R2 = host R2 54 * R2 = host R2
56 * R9 = guest IP 55 * R3 = shadow vcpu
57 * R10 = guest MSR 56 * all other volatile GPRS = free
58 * all other GPRS = free 57 * SVCPU[CR] = guest CR
59 * PACA[KVM_CR] = guest CR 58 * SVCPU[XER] = guest XER
60 * PACA[KVM_XER] = guest XER 59 * SVCPU[CTR] = guest CTR
60 * SVCPU[LR] = guest LR
61 */ 61 */
62 62
63 mtsrr0 r9
64 mtsrr1 r10
65
66 /* Activate guest mode, so faults get handled by KVM */
67 li r11, KVM_GUEST_MODE_GUEST
68 stb r11, PACA_KVM_IN_GUEST(r13)
69
70 /* Remove LPAR shadow entries */ 63 /* Remove LPAR shadow entries */
71 64
72#if SLB_NUM_BOLTED == 3 65#if SLB_NUM_BOLTED == 3
@@ -101,14 +94,14 @@ kvmppc_handler_trampoline_enter:
101 94
102 /* Fill SLB with our shadow */ 95 /* Fill SLB with our shadow */
103 96
104 lbz r12, PACA_KVM_SLB_MAX(r13) 97 lbz r12, SVCPU_SLB_MAX(r3)
105 mulli r12, r12, 16 98 mulli r12, r12, 16
106 addi r12, r12, PACA_KVM_SLB 99 addi r12, r12, SVCPU_SLB
107 add r12, r12, r13 100 add r12, r12, r3
108 101
109 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */ 102 /* for (r11 = kvm_slb; r11 < kvm_slb + kvm_slb_size; r11+=slb_entry) */
110 li r11, PACA_KVM_SLB 103 li r11, SVCPU_SLB
111 add r11, r11, r13 104 add r11, r11, r3
112 105
113slb_loop_enter: 106slb_loop_enter:
114 107
@@ -127,34 +120,7 @@ slb_loop_enter_skip:
127 120
128slb_do_enter: 121slb_do_enter:
129 122
130 /* Enter guest */ 123.endm
131
132 ld r0, (PACA_KVM_R0)(r13)
133 ld r1, (PACA_KVM_R1)(r13)
134 ld r2, (PACA_KVM_R2)(r13)
135 ld r3, (PACA_KVM_R3)(r13)
136 ld r4, (PACA_KVM_R4)(r13)
137 ld r5, (PACA_KVM_R5)(r13)
138 ld r6, (PACA_KVM_R6)(r13)
139 ld r7, (PACA_KVM_R7)(r13)
140 ld r8, (PACA_KVM_R8)(r13)
141 ld r9, (PACA_KVM_R9)(r13)
142 ld r10, (PACA_KVM_R10)(r13)
143 ld r12, (PACA_KVM_R12)(r13)
144
145 lwz r11, (PACA_KVM_CR)(r13)
146 mtcr r11
147
148 lwz r11, (PACA_KVM_XER)(r13)
149 mtxer r11
150
151 ld r11, (PACA_KVM_R11)(r13)
152 ld r13, (PACA_KVM_R13)(r13)
153
154 RFI
155kvmppc_handler_trampoline_enter_end:
156
157
158 124
159/****************************************************************************** 125/******************************************************************************
160 * * 126 * *
@@ -162,99 +128,22 @@ kvmppc_handler_trampoline_enter_end:
162 * * 128 * *
163 *****************************************************************************/ 129 *****************************************************************************/
164 130
165.global kvmppc_handler_trampoline_exit 131.macro LOAD_HOST_SEGMENTS
166kvmppc_handler_trampoline_exit:
167 132
168 /* Register usage at this point: 133 /* Register usage at this point:
169 * 134 *
170 * SPRG_SCRATCH0 = guest R13 135 * R1 = host R1
171 * R12 = exit handler id 136 * R2 = host R2
172 * R13 = PACA 137 * R12 = exit handler id
173 * PACA.KVM.SCRATCH0 = guest R12 138 * R13 = shadow vcpu - SHADOW_VCPU_OFF [=PACA on PPC64]
174 * PACA.KVM.SCRATCH1 = guest CR 139 * SVCPU.* = guest *
140 * SVCPU[CR] = guest CR
141 * SVCPU[XER] = guest XER
142 * SVCPU[CTR] = guest CTR
143 * SVCPU[LR] = guest LR
175 * 144 *
176 */ 145 */
177 146
178 /* Save registers */
179
180 std r0, PACA_KVM_R0(r13)
181 std r1, PACA_KVM_R1(r13)
182 std r2, PACA_KVM_R2(r13)
183 std r3, PACA_KVM_R3(r13)
184 std r4, PACA_KVM_R4(r13)
185 std r5, PACA_KVM_R5(r13)
186 std r6, PACA_KVM_R6(r13)
187 std r7, PACA_KVM_R7(r13)
188 std r8, PACA_KVM_R8(r13)
189 std r9, PACA_KVM_R9(r13)
190 std r10, PACA_KVM_R10(r13)
191 std r11, PACA_KVM_R11(r13)
192
193 /* Restore R1/R2 so we can handle faults */
194 ld r1, PACA_KVM_HOST_R1(r13)
195 ld r2, PACA_KVM_HOST_R2(r13)
196
197 /* Save guest PC and MSR in GPRs */
198 mfsrr0 r3
199 mfsrr1 r4
200
201 /* Get scratch'ed off registers */
202 mfspr r9, SPRN_SPRG_SCRATCH0
203 std r9, PACA_KVM_R13(r13)
204
205 ld r8, PACA_KVM_SCRATCH0(r13)
206 std r8, PACA_KVM_R12(r13)
207
208 lwz r7, PACA_KVM_SCRATCH1(r13)
209 stw r7, PACA_KVM_CR(r13)
210
211 /* Save more register state */
212
213 mfxer r6
214 stw r6, PACA_KVM_XER(r13)
215
216 mfdar r5
217 mfdsisr r6
218
219 /*
220 * In order for us to easily get the last instruction,
221 * we got the #vmexit at, we exploit the fact that the
222 * virtual layout is still the same here, so we can just
223 * ld from the guest's PC address
224 */
225
226 /* We only load the last instruction when it's safe */
227 cmpwi r12, BOOK3S_INTERRUPT_DATA_STORAGE
228 beq ld_last_inst
229 cmpwi r12, BOOK3S_INTERRUPT_PROGRAM
230 beq ld_last_inst
231
232 b no_ld_last_inst
233
234ld_last_inst:
235 /* Save off the guest instruction we're at */
236
237 /* Set guest mode to 'jump over instruction' so if lwz faults
238 * we'll just continue at the next IP. */
239 li r9, KVM_GUEST_MODE_SKIP
240 stb r9, PACA_KVM_IN_GUEST(r13)
241
242 /* 1) enable paging for data */
243 mfmsr r9
244 ori r11, r9, MSR_DR /* Enable paging for data */
245 mtmsr r11
246 /* 2) fetch the instruction */
247 li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */
248 lwz r0, 0(r3)
249 /* 3) disable paging again */
250 mtmsr r9
251
252no_ld_last_inst:
253
254 /* Unset guest mode */
255 li r9, KVM_GUEST_MODE_NONE
256 stb r9, PACA_KVM_IN_GUEST(r13)
257
258 /* Restore bolted entries from the shadow and fix it along the way */ 147 /* Restore bolted entries from the shadow and fix it along the way */
259 148
260 /* We don't store anything in entry 0, so we don't need to take care of it */ 149 /* We don't store anything in entry 0, so we don't need to take care of it */
@@ -275,28 +164,4 @@ no_ld_last_inst:
275 164
276slb_do_exit: 165slb_do_exit:
277 166
278 /* Register usage at this point: 167.endm
279 *
280 * R0 = guest last inst
281 * R1 = host R1
282 * R2 = host R2
283 * R3 = guest PC
284 * R4 = guest MSR
285 * R5 = guest DAR
286 * R6 = guest DSISR
287 * R12 = exit handler id
288 * R13 = PACA
289 * PACA.KVM.* = guest *
290 *
291 */
292
293 /* RFI into the highmem handler */
294 mfmsr r7
295 ori r7, r7, MSR_IR|MSR_DR|MSR_RI /* Enable paging */
296 mtsrr1 r7
297 ld r8, PACA_KVM_VMHANDLER(r13) /* Highmem handler address */
298 mtsrr0 r8
299
300 RFI
301kvmppc_handler_trampoline_exit_end:
302