diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_64_interrupts.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_64_interrupts.S | 216 |
1 files changed, 73 insertions, 143 deletions
diff --git a/arch/powerpc/kvm/book3s_64_interrupts.S b/arch/powerpc/kvm/book3s_64_interrupts.S index d95d0d967d56..66e3b1179b32 100644 --- a/arch/powerpc/kvm/book3s_64_interrupts.S +++ b/arch/powerpc/kvm/book3s_64_interrupts.S | |||
@@ -28,11 +28,6 @@ | |||
28 | #define ULONG_SIZE 8 | 28 | #define ULONG_SIZE 8 |
29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | 29 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) |
30 | 30 | ||
31 | .macro mfpaca tmp_reg, src_reg, offset, vcpu_reg | ||
32 | ld \tmp_reg, (PACA_EXMC+\offset)(r13) | ||
33 | std \tmp_reg, VCPU_GPR(\src_reg)(\vcpu_reg) | ||
34 | .endm | ||
35 | |||
36 | .macro DISABLE_INTERRUPTS | 31 | .macro DISABLE_INTERRUPTS |
37 | mfmsr r0 | 32 | mfmsr r0 |
38 | rldicl r0,r0,48,1 | 33 | rldicl r0,r0,48,1 |
@@ -92,37 +87,30 @@ kvm_start_entry: | |||
92 | /* Load non-volatile guest state from the vcpu */ | 87 | /* Load non-volatile guest state from the vcpu */ |
93 | VCPU_LOAD_NVGPRS(r4) | 88 | VCPU_LOAD_NVGPRS(r4) |
94 | 89 | ||
95 | kvm_start_lightweight: | ||
96 | |||
97 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ | ||
98 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ | ||
99 | |||
100 | DISABLE_INTERRUPTS | ||
101 | |||
102 | /* Save R1/R2 in the PACA */ | 90 | /* Save R1/R2 in the PACA */ |
103 | std r1, PACAR1(r13) | 91 | std r1, PACA_KVM_HOST_R1(r13) |
104 | std r2, (PACA_EXMC+EX_SRR0)(r13) | 92 | std r2, PACA_KVM_HOST_R2(r13) |
93 | |||
94 | /* XXX swap in/out on load? */ | ||
105 | ld r3, VCPU_HIGHMEM_HANDLER(r4) | 95 | ld r3, VCPU_HIGHMEM_HANDLER(r4) |
106 | std r3, PACASAVEDMSR(r13) | 96 | std r3, PACA_KVM_VMHANDLER(r13) |
107 | 97 | ||
108 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) | 98 | ld r3, VCPU_TRAMPOLINE_ENTER(r4) |
109 | mtsrr0 r3 | 99 | std r3, PACA_KVM_RMHANDLER(r13) |
110 | 100 | ||
111 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | 101 | kvm_start_lightweight: |
112 | mtsrr1 r3 | ||
113 | 102 | ||
114 | /* Load guest state in the respective registers */ | 103 | ld r9, VCPU_PC(r4) /* r9 = vcpu->arch.pc */ |
115 | lwz r3, VCPU_CR(r4) /* r3 = vcpu->arch.cr */ | 104 | ld r10, VCPU_SHADOW_MSR(r4) /* r10 = vcpu->arch.shadow_msr */ |
116 | stw r3, (PACA_EXMC + EX_CCR)(r13) | ||
117 | 105 | ||
106 | /* Load some guest state in the respective registers */ | ||
118 | ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ | 107 | ld r3, VCPU_CTR(r4) /* r3 = vcpu->arch.ctr */ |
119 | mtctr r3 /* CTR = r3 */ | 108 | mtctr r3 /* CTR = r3 */ |
120 | 109 | ||
121 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ | 110 | ld r3, VCPU_LR(r4) /* r3 = vcpu->arch.lr */ |
122 | mtlr r3 /* LR = r3 */ | 111 | mtlr r3 /* LR = r3 */ |
123 | 112 | ||
124 | ld r3, VCPU_XER(r4) /* r3 = vcpu->arch.xer */ | 113 | DISABLE_INTERRUPTS |
125 | std r3, (PACA_EXMC + EX_R3)(r13) | ||
126 | 114 | ||
127 | /* Some guests may need to have dcbz set to 32 byte length. | 115 | /* Some guests may need to have dcbz set to 32 byte length. |
128 | * | 116 | * |
@@ -142,34 +130,21 @@ kvm_start_lightweight: | |||
142 | mtspr SPRN_HID5,r3 | 130 | mtspr SPRN_HID5,r3 |
143 | 131 | ||
144 | no_dcbz32_on: | 132 | no_dcbz32_on: |
145 | /* Load guest GPRs */ | ||
146 | |||
147 | ld r3, VCPU_GPR(r9)(r4) | ||
148 | std r3, (PACA_EXMC + EX_R9)(r13) | ||
149 | ld r3, VCPU_GPR(r10)(r4) | ||
150 | std r3, (PACA_EXMC + EX_R10)(r13) | ||
151 | ld r3, VCPU_GPR(r11)(r4) | ||
152 | std r3, (PACA_EXMC + EX_R11)(r13) | ||
153 | ld r3, VCPU_GPR(r12)(r4) | ||
154 | std r3, (PACA_EXMC + EX_R12)(r13) | ||
155 | ld r3, VCPU_GPR(r13)(r4) | ||
156 | std r3, (PACA_EXMC + EX_R13)(r13) | ||
157 | |||
158 | ld r0, VCPU_GPR(r0)(r4) | ||
159 | ld r1, VCPU_GPR(r1)(r4) | ||
160 | ld r2, VCPU_GPR(r2)(r4) | ||
161 | ld r3, VCPU_GPR(r3)(r4) | ||
162 | ld r5, VCPU_GPR(r5)(r4) | ||
163 | ld r6, VCPU_GPR(r6)(r4) | ||
164 | ld r7, VCPU_GPR(r7)(r4) | ||
165 | ld r8, VCPU_GPR(r8)(r4) | ||
166 | ld r4, VCPU_GPR(r4)(r4) | ||
167 | 133 | ||
168 | /* This sets the Magic value for the trampoline */ | 134 | /* This sets the Magic value for the trampoline */ |
169 | 135 | ||
136 | /* XXX this needs to move into a safe function, so we can | ||
137 | be sure we don't get any interrupts */ | ||
138 | |||
170 | li r11, 1 | 139 | li r11, 1 |
171 | stb r11, PACA_KVM_IN_GUEST(r13) | 140 | stb r11, PACA_KVM_IN_GUEST(r13) |
172 | 141 | ||
142 | ld r3, PACA_KVM_RMHANDLER(r13) | ||
143 | mtsrr0 r3 | ||
144 | |||
145 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | ||
146 | mtsrr1 r3 | ||
147 | |||
173 | /* Jump to SLB patching handlder and into our guest */ | 148 | /* Jump to SLB patching handlder and into our guest */ |
174 | RFI | 149 | RFI |
175 | 150 | ||
@@ -185,60 +160,31 @@ kvmppc_handler_highmem: | |||
185 | /* | 160 | /* |
186 | * Register usage at this point: | 161 | * Register usage at this point: |
187 | * | 162 | * |
188 | * R00 = guest R13 | 163 | * R0 = guest last inst |
189 | * R01 = host R1 | 164 | * R1 = host R1 |
190 | * R02 = host R2 | 165 | * R2 = host R2 |
191 | * R10 = guest PC | 166 | * R3 = guest PC |
192 | * R11 = guest MSR | 167 | * R4 = guest MSR |
193 | * R12 = exit handler id | 168 | * R5 = guest DAR |
194 | * R13 = PACA | 169 | * R6 = guest DSISR |
195 | * PACA.exmc.R9 = guest R1 | 170 | * R13 = PACA |
196 | * PACA.exmc.R10 = guest R10 | 171 | * PACA.KVM.* = guest * |
197 | * PACA.exmc.R11 = guest R11 | ||
198 | * PACA.exmc.R12 = guest R12 | ||
199 | * PACA.exmc.R13 = guest R2 | ||
200 | * PACA.exmc.DAR = guest DAR | ||
201 | * PACA.exmc.DSISR = guest DSISR | ||
202 | * PACA.exmc.LR = guest instruction | ||
203 | * PACA.exmc.CCR = guest CR | ||
204 | * PACA.exmc.SRR0 = guest R0 | ||
205 | * | 172 | * |
206 | */ | 173 | */ |
207 | 174 | ||
208 | std r3, (PACA_EXMC+EX_R3)(r13) | 175 | /* R7 = vcpu */ |
176 | ld r7, GPR4(r1) | ||
209 | 177 | ||
210 | /* save the exit id in R3 */ | 178 | /* Now save the guest state */ |
211 | mr r3, r12 | ||
212 | 179 | ||
213 | /* R12 = vcpu */ | 180 | stw r0, VCPU_LAST_INST(r7) |
214 | ld r12, GPR4(r1) | ||
215 | 181 | ||
216 | /* Now save the guest state */ | 182 | std r3, VCPU_PC(r7) |
183 | std r4, VCPU_SHADOW_MSR(r7) | ||
184 | std r5, VCPU_FAULT_DEAR(r7) | ||
185 | std r6, VCPU_FAULT_DSISR(r7) | ||
217 | 186 | ||
218 | std r0, VCPU_GPR(r13)(r12) | 187 | ld r5, VCPU_HFLAGS(r7) |
219 | std r4, VCPU_GPR(r4)(r12) | ||
220 | std r5, VCPU_GPR(r5)(r12) | ||
221 | std r6, VCPU_GPR(r6)(r12) | ||
222 | std r7, VCPU_GPR(r7)(r12) | ||
223 | std r8, VCPU_GPR(r8)(r12) | ||
224 | std r9, VCPU_GPR(r9)(r12) | ||
225 | |||
226 | /* get registers from PACA */ | ||
227 | mfpaca r5, r0, EX_SRR0, r12 | ||
228 | mfpaca r5, r3, EX_R3, r12 | ||
229 | mfpaca r5, r1, EX_R9, r12 | ||
230 | mfpaca r5, r10, EX_R10, r12 | ||
231 | mfpaca r5, r11, EX_R11, r12 | ||
232 | mfpaca r5, r12, EX_R12, r12 | ||
233 | mfpaca r5, r2, EX_R13, r12 | ||
234 | |||
235 | lwz r5, (PACA_EXMC+EX_LR)(r13) | ||
236 | stw r5, VCPU_LAST_INST(r12) | ||
237 | |||
238 | lwz r5, (PACA_EXMC+EX_CCR)(r13) | ||
239 | stw r5, VCPU_CR(r12) | ||
240 | |||
241 | ld r5, VCPU_HFLAGS(r12) | ||
242 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ | 188 | rldicl. r5, r5, 0, 63 /* CR = ((r5 & 1) == 0) */ |
243 | beq no_dcbz32_off | 189 | beq no_dcbz32_off |
244 | 190 | ||
@@ -248,58 +194,42 @@ kvmppc_handler_highmem: | |||
248 | 194 | ||
249 | no_dcbz32_off: | 195 | no_dcbz32_off: |
250 | 196 | ||
251 | std r14, VCPU_GPR(r14)(r12) | 197 | std r14, VCPU_GPR(r14)(r7) |
252 | std r15, VCPU_GPR(r15)(r12) | 198 | std r15, VCPU_GPR(r15)(r7) |
253 | std r16, VCPU_GPR(r16)(r12) | 199 | std r16, VCPU_GPR(r16)(r7) |
254 | std r17, VCPU_GPR(r17)(r12) | 200 | std r17, VCPU_GPR(r17)(r7) |
255 | std r18, VCPU_GPR(r18)(r12) | 201 | std r18, VCPU_GPR(r18)(r7) |
256 | std r19, VCPU_GPR(r19)(r12) | 202 | std r19, VCPU_GPR(r19)(r7) |
257 | std r20, VCPU_GPR(r20)(r12) | 203 | std r20, VCPU_GPR(r20)(r7) |
258 | std r21, VCPU_GPR(r21)(r12) | 204 | std r21, VCPU_GPR(r21)(r7) |
259 | std r22, VCPU_GPR(r22)(r12) | 205 | std r22, VCPU_GPR(r22)(r7) |
260 | std r23, VCPU_GPR(r23)(r12) | 206 | std r23, VCPU_GPR(r23)(r7) |
261 | std r24, VCPU_GPR(r24)(r12) | 207 | std r24, VCPU_GPR(r24)(r7) |
262 | std r25, VCPU_GPR(r25)(r12) | 208 | std r25, VCPU_GPR(r25)(r7) |
263 | std r26, VCPU_GPR(r26)(r12) | 209 | std r26, VCPU_GPR(r26)(r7) |
264 | std r27, VCPU_GPR(r27)(r12) | 210 | std r27, VCPU_GPR(r27)(r7) |
265 | std r28, VCPU_GPR(r28)(r12) | 211 | std r28, VCPU_GPR(r28)(r7) |
266 | std r29, VCPU_GPR(r29)(r12) | 212 | std r29, VCPU_GPR(r29)(r7) |
267 | std r30, VCPU_GPR(r30)(r12) | 213 | std r30, VCPU_GPR(r30)(r7) |
268 | std r31, VCPU_GPR(r31)(r12) | 214 | std r31, VCPU_GPR(r31)(r7) |
269 | 215 | ||
270 | /* Save guest PC (R10) */ | 216 | /* Save guest CTR */ |
271 | std r10, VCPU_PC(r12) | ||
272 | |||
273 | /* Save guest msr (R11) */ | ||
274 | std r11, VCPU_SHADOW_MSR(r12) | ||
275 | |||
276 | /* Save guest CTR (in R12) */ | ||
277 | mfctr r5 | 217 | mfctr r5 |
278 | std r5, VCPU_CTR(r12) | 218 | std r5, VCPU_CTR(r7) |
279 | 219 | ||
280 | /* Save guest LR */ | 220 | /* Save guest LR */ |
281 | mflr r5 | 221 | mflr r5 |
282 | std r5, VCPU_LR(r12) | 222 | std r5, VCPU_LR(r7) |
283 | |||
284 | /* Save guest XER */ | ||
285 | mfxer r5 | ||
286 | std r5, VCPU_XER(r12) | ||
287 | 223 | ||
288 | /* Save guest DAR */ | 224 | /* XXX convert to safe function call */ |
289 | ld r5, (PACA_EXMC+EX_DAR)(r13) | ||
290 | std r5, VCPU_FAULT_DEAR(r12) | ||
291 | |||
292 | /* Save guest DSISR */ | ||
293 | lwz r5, (PACA_EXMC+EX_DSISR)(r13) | ||
294 | std r5, VCPU_FAULT_DSISR(r12) | ||
295 | 225 | ||
296 | /* Restore host msr -> SRR1 */ | 226 | /* Restore host msr -> SRR1 */ |
297 | ld r7, VCPU_HOST_MSR(r12) | 227 | ld r6, VCPU_HOST_MSR(r7) |
298 | mtsrr1 r7 | 228 | mtsrr1 r6 |
299 | 229 | ||
300 | /* Restore host IP -> SRR0 */ | 230 | /* Restore host IP -> SRR0 */ |
301 | ld r6, VCPU_HOST_RETIP(r12) | 231 | ld r5, VCPU_HOST_RETIP(r7) |
302 | mtsrr0 r6 | 232 | mtsrr0 r5 |
303 | 233 | ||
304 | /* | 234 | /* |
305 | * For some interrupts, we need to call the real Linux | 235 | * For some interrupts, we need to call the real Linux |
@@ -311,9 +241,9 @@ no_dcbz32_off: | |||
311 | * r3 = address of interrupt handler (exit reason) | 241 | * r3 = address of interrupt handler (exit reason) |
312 | */ | 242 | */ |
313 | 243 | ||
314 | cmpwi r3, BOOK3S_INTERRUPT_EXTERNAL | 244 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL |
315 | beq call_linux_handler | 245 | beq call_linux_handler |
316 | cmpwi r3, BOOK3S_INTERRUPT_DECREMENTER | 246 | cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER |
317 | beq call_linux_handler | 247 | beq call_linux_handler |
318 | 248 | ||
319 | /* Back to Interruptable Mode! (goto kvm_return_point) */ | 249 | /* Back to Interruptable Mode! (goto kvm_return_point) */ |
@@ -334,12 +264,12 @@ call_linux_handler: | |||
334 | * R7 VCPU_HOST_MSR | 264 | * R7 VCPU_HOST_MSR |
335 | */ | 265 | */ |
336 | 266 | ||
337 | mtlr r3 | 267 | mtlr r12 |
338 | 268 | ||
339 | ld r5, VCPU_TRAMPOLINE_LOWMEM(r12) | 269 | ld r4, VCPU_TRAMPOLINE_LOWMEM(r7) |
340 | mtsrr0 r5 | 270 | mtsrr0 r4 |
341 | LOAD_REG_IMMEDIATE(r5, MSR_KERNEL & ~(MSR_IR | MSR_DR)) | 271 | LOAD_REG_IMMEDIATE(r3, MSR_KERNEL & ~(MSR_IR | MSR_DR)) |
342 | mtsrr1 r5 | 272 | mtsrr1 r3 |
343 | 273 | ||
344 | RFI | 274 | RFI |
345 | 275 | ||
@@ -350,7 +280,7 @@ kvm_return_point: | |||
350 | /* go back into the guest */ | 280 | /* go back into the guest */ |
351 | 281 | ||
352 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ | 282 | /* Pass the exit number as 3rd argument to kvmppc_handle_exit */ |
353 | mr r5, r3 | 283 | mr r5, r12 |
354 | 284 | ||
355 | /* Restore r3 (kvm_run) and r4 (vcpu) */ | 285 | /* Restore r3 (kvm_run) and r4 (vcpu) */ |
356 | REST_2GPRS(3, r1) | 286 | REST_2GPRS(3, r1) |