diff options
Diffstat (limited to 'arch/powerpc/kvm/book3s_hv_rmhandlers.S')
-rw-r--r-- | arch/powerpc/kvm/book3s_hv_rmhandlers.S | 806 |
1 files changed, 806 insertions, 0 deletions
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S new file mode 100644 index 000000000000..9af264840b98 --- /dev/null +++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S | |||
@@ -0,0 +1,806 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | ||
12 | * | ||
13 | * Derived from book3s_rmhandlers.S and other files, which are: | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2009 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | #include <asm/exception-64s.h> | ||
26 | |||
27 | /***************************************************************************** | ||
28 | * * | ||
29 | * Real Mode handlers that need to be in the linear mapping * | ||
30 | * * | ||
31 | ****************************************************************************/ | ||
32 | |||
33 | #define SHADOW_VCPU_OFF PACA_KVM_SVCPU | ||
34 | |||
35 | .globl kvmppc_skip_interrupt | ||
36 | kvmppc_skip_interrupt: | ||
37 | mfspr r13,SPRN_SRR0 | ||
38 | addi r13,r13,4 | ||
39 | mtspr SPRN_SRR0,r13 | ||
40 | GET_SCRATCH0(r13) | ||
41 | rfid | ||
42 | b . | ||
43 | |||
44 | .globl kvmppc_skip_Hinterrupt | ||
45 | kvmppc_skip_Hinterrupt: | ||
46 | mfspr r13,SPRN_HSRR0 | ||
47 | addi r13,r13,4 | ||
48 | mtspr SPRN_HSRR0,r13 | ||
49 | GET_SCRATCH0(r13) | ||
50 | hrfid | ||
51 | b . | ||
52 | |||
53 | /* | ||
54 | * Call kvmppc_handler_trampoline_enter in real mode. | ||
55 | * Must be called with interrupts hard-disabled. | ||
56 | * | ||
57 | * Input Registers: | ||
58 | * | ||
59 | * LR = return address to continue at after eventually re-enabling MMU | ||
60 | */ | ||
61 | _GLOBAL(kvmppc_hv_entry_trampoline) | ||
62 | mfmsr r10 | ||
63 | LOAD_REG_ADDR(r5, kvmppc_hv_entry) | ||
64 | li r0,MSR_RI | ||
65 | andc r0,r10,r0 | ||
66 | li r6,MSR_IR | MSR_DR | ||
67 | andc r6,r10,r6 | ||
68 | mtmsrd r0,1 /* clear RI in MSR */ | ||
69 | mtsrr0 r5 | ||
70 | mtsrr1 r6 | ||
71 | RFI | ||
72 | |||
73 | #define ULONG_SIZE 8 | ||
74 | #define VCPU_GPR(n) (VCPU_GPRS + (n * ULONG_SIZE)) | ||
75 | |||
76 | /****************************************************************************** | ||
77 | * * | ||
78 | * Entry code * | ||
79 | * * | ||
80 | *****************************************************************************/ | ||
81 | |||
82 | .global kvmppc_hv_entry | ||
83 | kvmppc_hv_entry: | ||
84 | |||
85 | /* Required state: | ||
86 | * | ||
87 | * R4 = vcpu pointer | ||
88 | * MSR = ~IR|DR | ||
89 | * R13 = PACA | ||
90 | * R1 = host R1 | ||
91 | * all other volatile GPRS = free | ||
92 | */ | ||
93 | mflr r0 | ||
94 | std r0, HSTATE_VMHANDLER(r13) | ||
95 | |||
96 | ld r14, VCPU_GPR(r14)(r4) | ||
97 | ld r15, VCPU_GPR(r15)(r4) | ||
98 | ld r16, VCPU_GPR(r16)(r4) | ||
99 | ld r17, VCPU_GPR(r17)(r4) | ||
100 | ld r18, VCPU_GPR(r18)(r4) | ||
101 | ld r19, VCPU_GPR(r19)(r4) | ||
102 | ld r20, VCPU_GPR(r20)(r4) | ||
103 | ld r21, VCPU_GPR(r21)(r4) | ||
104 | ld r22, VCPU_GPR(r22)(r4) | ||
105 | ld r23, VCPU_GPR(r23)(r4) | ||
106 | ld r24, VCPU_GPR(r24)(r4) | ||
107 | ld r25, VCPU_GPR(r25)(r4) | ||
108 | ld r26, VCPU_GPR(r26)(r4) | ||
109 | ld r27, VCPU_GPR(r27)(r4) | ||
110 | ld r28, VCPU_GPR(r28)(r4) | ||
111 | ld r29, VCPU_GPR(r29)(r4) | ||
112 | ld r30, VCPU_GPR(r30)(r4) | ||
113 | ld r31, VCPU_GPR(r31)(r4) | ||
114 | |||
115 | /* Load guest PMU registers */ | ||
116 | /* R4 is live here (vcpu pointer) */ | ||
117 | li r3, 1 | ||
118 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | ||
119 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | ||
120 | isync | ||
121 | lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */ | ||
122 | lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */ | ||
123 | lwz r6, VCPU_PMC + 8(r4) | ||
124 | lwz r7, VCPU_PMC + 12(r4) | ||
125 | lwz r8, VCPU_PMC + 16(r4) | ||
126 | lwz r9, VCPU_PMC + 20(r4) | ||
127 | mtspr SPRN_PMC1, r3 | ||
128 | mtspr SPRN_PMC2, r5 | ||
129 | mtspr SPRN_PMC3, r6 | ||
130 | mtspr SPRN_PMC4, r7 | ||
131 | mtspr SPRN_PMC5, r8 | ||
132 | mtspr SPRN_PMC6, r9 | ||
133 | ld r3, VCPU_MMCR(r4) | ||
134 | ld r5, VCPU_MMCR + 8(r4) | ||
135 | ld r6, VCPU_MMCR + 16(r4) | ||
136 | mtspr SPRN_MMCR1, r5 | ||
137 | mtspr SPRN_MMCRA, r6 | ||
138 | mtspr SPRN_MMCR0, r3 | ||
139 | isync | ||
140 | |||
141 | /* Load up FP, VMX and VSX registers */ | ||
142 | bl kvmppc_load_fp | ||
143 | |||
144 | /* Switch DSCR to guest value */ | ||
145 | ld r5, VCPU_DSCR(r4) | ||
146 | mtspr SPRN_DSCR, r5 | ||
147 | |||
148 | /* | ||
149 | * Set the decrementer to the guest decrementer. | ||
150 | */ | ||
151 | ld r8,VCPU_DEC_EXPIRES(r4) | ||
152 | mftb r7 | ||
153 | subf r3,r7,r8 | ||
154 | mtspr SPRN_DEC,r3 | ||
155 | stw r3,VCPU_DEC(r4) | ||
156 | |||
157 | ld r5, VCPU_SPRG0(r4) | ||
158 | ld r6, VCPU_SPRG1(r4) | ||
159 | ld r7, VCPU_SPRG2(r4) | ||
160 | ld r8, VCPU_SPRG3(r4) | ||
161 | mtspr SPRN_SPRG0, r5 | ||
162 | mtspr SPRN_SPRG1, r6 | ||
163 | mtspr SPRN_SPRG2, r7 | ||
164 | mtspr SPRN_SPRG3, r8 | ||
165 | |||
166 | /* Save R1 in the PACA */ | ||
167 | std r1, HSTATE_HOST_R1(r13) | ||
168 | |||
169 | /* Load up DAR and DSISR */ | ||
170 | ld r5, VCPU_DAR(r4) | ||
171 | lwz r6, VCPU_DSISR(r4) | ||
172 | mtspr SPRN_DAR, r5 | ||
173 | mtspr SPRN_DSISR, r6 | ||
174 | |||
175 | /* Set partition DABR */ | ||
176 | li r5,3 | ||
177 | ld r6,VCPU_DABR(r4) | ||
178 | mtspr SPRN_DABRX,r5 | ||
179 | mtspr SPRN_DABR,r6 | ||
180 | |||
181 | /* Restore AMR and UAMOR, set AMOR to all 1s */ | ||
182 | ld r5,VCPU_AMR(r4) | ||
183 | ld r6,VCPU_UAMOR(r4) | ||
184 | li r7,-1 | ||
185 | mtspr SPRN_AMR,r5 | ||
186 | mtspr SPRN_UAMOR,r6 | ||
187 | mtspr SPRN_AMOR,r7 | ||
188 | |||
189 | /* Clear out SLB */ | ||
190 | li r6,0 | ||
191 | slbmte r6,r6 | ||
192 | slbia | ||
193 | ptesync | ||
194 | |||
195 | /* Switch to guest partition. */ | ||
196 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | ||
197 | ld r6,KVM_SDR1(r9) | ||
198 | lwz r7,KVM_LPID(r9) | ||
199 | li r0,LPID_RSVD /* switch to reserved LPID */ | ||
200 | mtspr SPRN_LPID,r0 | ||
201 | ptesync | ||
202 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | ||
203 | mtspr SPRN_LPID,r7 | ||
204 | isync | ||
205 | ld r8,VCPU_LPCR(r4) | ||
206 | mtspr SPRN_LPCR,r8 | ||
207 | isync | ||
208 | |||
209 | /* Check if HDEC expires soon */ | ||
210 | mfspr r3,SPRN_HDEC | ||
211 | cmpwi r3,10 | ||
212 | li r12,BOOK3S_INTERRUPT_HV_DECREMENTER | ||
213 | mr r9,r4 | ||
214 | blt hdec_soon | ||
215 | |||
216 | /* | ||
217 | * Invalidate the TLB if we could possibly have stale TLB | ||
218 | * entries for this partition on this core due to the use | ||
219 | * of tlbiel. | ||
220 | */ | ||
221 | ld r9,VCPU_KVM(r4) /* pointer to struct kvm */ | ||
222 | lwz r5,VCPU_VCPUID(r4) | ||
223 | lhz r6,PACAPACAINDEX(r13) | ||
224 | lhz r8,VCPU_LAST_CPU(r4) | ||
225 | sldi r7,r6,1 /* see if this is the same vcpu */ | ||
226 | add r7,r7,r9 /* as last ran on this pcpu */ | ||
227 | lhz r0,KVM_LAST_VCPU(r7) | ||
228 | cmpw r6,r8 /* on the same cpu core as last time? */ | ||
229 | bne 3f | ||
230 | cmpw r0,r5 /* same vcpu as this core last ran? */ | ||
231 | beq 1f | ||
232 | 3: sth r6,VCPU_LAST_CPU(r4) /* if not, invalidate partition TLB */ | ||
233 | sth r5,KVM_LAST_VCPU(r7) | ||
234 | li r6,128 | ||
235 | mtctr r6 | ||
236 | li r7,0x800 /* IS field = 0b10 */ | ||
237 | ptesync | ||
238 | 2: tlbiel r7 | ||
239 | addi r7,r7,0x1000 | ||
240 | bdnz 2b | ||
241 | ptesync | ||
242 | 1: | ||
243 | |||
244 | /* Save purr/spurr */ | ||
245 | mfspr r5,SPRN_PURR | ||
246 | mfspr r6,SPRN_SPURR | ||
247 | std r5,HSTATE_PURR(r13) | ||
248 | std r6,HSTATE_SPURR(r13) | ||
249 | ld r7,VCPU_PURR(r4) | ||
250 | ld r8,VCPU_SPURR(r4) | ||
251 | mtspr SPRN_PURR,r7 | ||
252 | mtspr SPRN_SPURR,r8 | ||
253 | |||
254 | /* Load up guest SLB entries */ | ||
255 | lwz r5,VCPU_SLB_MAX(r4) | ||
256 | cmpwi r5,0 | ||
257 | beq 9f | ||
258 | mtctr r5 | ||
259 | addi r6,r4,VCPU_SLB | ||
260 | 1: ld r8,VCPU_SLB_E(r6) | ||
261 | ld r9,VCPU_SLB_V(r6) | ||
262 | slbmte r9,r8 | ||
263 | addi r6,r6,VCPU_SLB_SIZE | ||
264 | bdnz 1b | ||
265 | 9: | ||
266 | |||
267 | /* Restore state of CTRL run bit; assume 1 on entry */ | ||
268 | lwz r5,VCPU_CTRL(r4) | ||
269 | andi. r5,r5,1 | ||
270 | bne 4f | ||
271 | mfspr r6,SPRN_CTRLF | ||
272 | clrrdi r6,r6,1 | ||
273 | mtspr SPRN_CTRLT,r6 | ||
274 | 4: | ||
275 | ld r6, VCPU_CTR(r4) | ||
276 | lwz r7, VCPU_XER(r4) | ||
277 | |||
278 | mtctr r6 | ||
279 | mtxer r7 | ||
280 | |||
281 | /* Move SRR0 and SRR1 into the respective regs */ | ||
282 | ld r6, VCPU_SRR0(r4) | ||
283 | ld r7, VCPU_SRR1(r4) | ||
284 | mtspr SPRN_SRR0, r6 | ||
285 | mtspr SPRN_SRR1, r7 | ||
286 | |||
287 | ld r10, VCPU_PC(r4) | ||
288 | |||
289 | ld r11, VCPU_MSR(r4) /* r10 = vcpu->arch.msr & ~MSR_HV */ | ||
290 | rldicl r11, r11, 63 - MSR_HV_LG, 1 | ||
291 | rotldi r11, r11, 1 + MSR_HV_LG | ||
292 | ori r11, r11, MSR_ME | ||
293 | |||
294 | fast_guest_return: | ||
295 | mtspr SPRN_HSRR0,r10 | ||
296 | mtspr SPRN_HSRR1,r11 | ||
297 | |||
298 | /* Activate guest mode, so faults get handled by KVM */ | ||
299 | li r9, KVM_GUEST_MODE_GUEST | ||
300 | stb r9, HSTATE_IN_GUEST(r13) | ||
301 | |||
302 | /* Enter guest */ | ||
303 | |||
304 | ld r5, VCPU_LR(r4) | ||
305 | lwz r6, VCPU_CR(r4) | ||
306 | mtlr r5 | ||
307 | mtcr r6 | ||
308 | |||
309 | ld r0, VCPU_GPR(r0)(r4) | ||
310 | ld r1, VCPU_GPR(r1)(r4) | ||
311 | ld r2, VCPU_GPR(r2)(r4) | ||
312 | ld r3, VCPU_GPR(r3)(r4) | ||
313 | ld r5, VCPU_GPR(r5)(r4) | ||
314 | ld r6, VCPU_GPR(r6)(r4) | ||
315 | ld r7, VCPU_GPR(r7)(r4) | ||
316 | ld r8, VCPU_GPR(r8)(r4) | ||
317 | ld r9, VCPU_GPR(r9)(r4) | ||
318 | ld r10, VCPU_GPR(r10)(r4) | ||
319 | ld r11, VCPU_GPR(r11)(r4) | ||
320 | ld r12, VCPU_GPR(r12)(r4) | ||
321 | ld r13, VCPU_GPR(r13)(r4) | ||
322 | |||
323 | ld r4, VCPU_GPR(r4)(r4) | ||
324 | |||
325 | hrfid | ||
326 | b . | ||
327 | |||
328 | /****************************************************************************** | ||
329 | * * | ||
330 | * Exit code * | ||
331 | * * | ||
332 | *****************************************************************************/ | ||
333 | |||
334 | /* | ||
335 | * We come here from the first-level interrupt handlers. | ||
336 | */ | ||
337 | .globl kvmppc_interrupt | ||
338 | kvmppc_interrupt: | ||
339 | /* | ||
340 | * Register contents: | ||
341 | * R12 = interrupt vector | ||
342 | * R13 = PACA | ||
343 | * guest CR, R12 saved in shadow VCPU SCRATCH1/0 | ||
344 | * guest R13 saved in SPRN_SCRATCH0 | ||
345 | */ | ||
346 | /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */ | ||
347 | std r9, HSTATE_HOST_R2(r13) | ||
348 | ld r9, HSTATE_KVM_VCPU(r13) | ||
349 | |||
350 | /* Save registers */ | ||
351 | |||
352 | std r0, VCPU_GPR(r0)(r9) | ||
353 | std r1, VCPU_GPR(r1)(r9) | ||
354 | std r2, VCPU_GPR(r2)(r9) | ||
355 | std r3, VCPU_GPR(r3)(r9) | ||
356 | std r4, VCPU_GPR(r4)(r9) | ||
357 | std r5, VCPU_GPR(r5)(r9) | ||
358 | std r6, VCPU_GPR(r6)(r9) | ||
359 | std r7, VCPU_GPR(r7)(r9) | ||
360 | std r8, VCPU_GPR(r8)(r9) | ||
361 | ld r0, HSTATE_HOST_R2(r13) | ||
362 | std r0, VCPU_GPR(r9)(r9) | ||
363 | std r10, VCPU_GPR(r10)(r9) | ||
364 | std r11, VCPU_GPR(r11)(r9) | ||
365 | ld r3, HSTATE_SCRATCH0(r13) | ||
366 | lwz r4, HSTATE_SCRATCH1(r13) | ||
367 | std r3, VCPU_GPR(r12)(r9) | ||
368 | stw r4, VCPU_CR(r9) | ||
369 | |||
370 | /* Restore R1/R2 so we can handle faults */ | ||
371 | ld r1, HSTATE_HOST_R1(r13) | ||
372 | ld r2, PACATOC(r13) | ||
373 | |||
374 | mfspr r10, SPRN_SRR0 | ||
375 | mfspr r11, SPRN_SRR1 | ||
376 | std r10, VCPU_SRR0(r9) | ||
377 | std r11, VCPU_SRR1(r9) | ||
378 | andi. r0, r12, 2 /* need to read HSRR0/1? */ | ||
379 | beq 1f | ||
380 | mfspr r10, SPRN_HSRR0 | ||
381 | mfspr r11, SPRN_HSRR1 | ||
382 | clrrdi r12, r12, 2 | ||
383 | 1: std r10, VCPU_PC(r9) | ||
384 | std r11, VCPU_MSR(r9) | ||
385 | |||
386 | GET_SCRATCH0(r3) | ||
387 | mflr r4 | ||
388 | std r3, VCPU_GPR(r13)(r9) | ||
389 | std r4, VCPU_LR(r9) | ||
390 | |||
391 | /* Unset guest mode */ | ||
392 | li r0, KVM_GUEST_MODE_NONE | ||
393 | stb r0, HSTATE_IN_GUEST(r13) | ||
394 | |||
395 | stw r12,VCPU_TRAP(r9) | ||
396 | |||
397 | /* See if this is a leftover HDEC interrupt */ | ||
398 | cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER | ||
399 | bne 2f | ||
400 | mfspr r3,SPRN_HDEC | ||
401 | cmpwi r3,0 | ||
402 | bge ignore_hdec | ||
403 | 2: | ||
404 | |||
405 | /* Check for mediated interrupts (could be done earlier really ...) */ | ||
406 | cmpwi r12,BOOK3S_INTERRUPT_EXTERNAL | ||
407 | bne+ 1f | ||
408 | ld r5,VCPU_LPCR(r9) | ||
409 | andi. r0,r11,MSR_EE | ||
410 | beq 1f | ||
411 | andi. r0,r5,LPCR_MER | ||
412 | bne bounce_ext_interrupt | ||
413 | 1: | ||
414 | |||
415 | /* Save DEC */ | ||
416 | mfspr r5,SPRN_DEC | ||
417 | mftb r6 | ||
418 | extsw r5,r5 | ||
419 | add r5,r5,r6 | ||
420 | std r5,VCPU_DEC_EXPIRES(r9) | ||
421 | |||
422 | /* Save HEIR (HV emulation assist reg) in last_inst | ||
423 | if this is an HEI (HV emulation interrupt, e40) */ | ||
424 | li r3,-1 | ||
425 | cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST | ||
426 | bne 11f | ||
427 | mfspr r3,SPRN_HEIR | ||
428 | 11: stw r3,VCPU_LAST_INST(r9) | ||
429 | |||
430 | /* Save more register state */ | ||
431 | mfxer r5 | ||
432 | mfdar r6 | ||
433 | mfdsisr r7 | ||
434 | mfctr r8 | ||
435 | |||
436 | stw r5, VCPU_XER(r9) | ||
437 | std r6, VCPU_DAR(r9) | ||
438 | stw r7, VCPU_DSISR(r9) | ||
439 | std r8, VCPU_CTR(r9) | ||
440 | /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */ | ||
441 | cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE | ||
442 | beq 6f | ||
443 | 7: std r6, VCPU_FAULT_DAR(r9) | ||
444 | stw r7, VCPU_FAULT_DSISR(r9) | ||
445 | |||
446 | /* Save guest CTRL register, set runlatch to 1 */ | ||
447 | mfspr r6,SPRN_CTRLF | ||
448 | stw r6,VCPU_CTRL(r9) | ||
449 | andi. r0,r6,1 | ||
450 | bne 4f | ||
451 | ori r6,r6,1 | ||
452 | mtspr SPRN_CTRLT,r6 | ||
453 | 4: | ||
454 | /* Read the guest SLB and save it away */ | ||
455 | lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */ | ||
456 | mtctr r0 | ||
457 | li r6,0 | ||
458 | addi r7,r9,VCPU_SLB | ||
459 | li r5,0 | ||
460 | 1: slbmfee r8,r6 | ||
461 | andis. r0,r8,SLB_ESID_V@h | ||
462 | beq 2f | ||
463 | add r8,r8,r6 /* put index in */ | ||
464 | slbmfev r3,r6 | ||
465 | std r8,VCPU_SLB_E(r7) | ||
466 | std r3,VCPU_SLB_V(r7) | ||
467 | addi r7,r7,VCPU_SLB_SIZE | ||
468 | addi r5,r5,1 | ||
469 | 2: addi r6,r6,1 | ||
470 | bdnz 1b | ||
471 | stw r5,VCPU_SLB_MAX(r9) | ||
472 | |||
473 | /* | ||
474 | * Save the guest PURR/SPURR | ||
475 | */ | ||
476 | mfspr r5,SPRN_PURR | ||
477 | mfspr r6,SPRN_SPURR | ||
478 | ld r7,VCPU_PURR(r9) | ||
479 | ld r8,VCPU_SPURR(r9) | ||
480 | std r5,VCPU_PURR(r9) | ||
481 | std r6,VCPU_SPURR(r9) | ||
482 | subf r5,r7,r5 | ||
483 | subf r6,r8,r6 | ||
484 | |||
485 | /* | ||
486 | * Restore host PURR/SPURR and add guest times | ||
487 | * so that the time in the guest gets accounted. | ||
488 | */ | ||
489 | ld r3,HSTATE_PURR(r13) | ||
490 | ld r4,HSTATE_SPURR(r13) | ||
491 | add r3,r3,r5 | ||
492 | add r4,r4,r6 | ||
493 | mtspr SPRN_PURR,r3 | ||
494 | mtspr SPRN_SPURR,r4 | ||
495 | |||
496 | /* Clear out SLB */ | ||
497 | li r5,0 | ||
498 | slbmte r5,r5 | ||
499 | slbia | ||
500 | ptesync | ||
501 | |||
502 | hdec_soon: | ||
503 | /* Switch back to host partition */ | ||
504 | ld r4,VCPU_KVM(r9) /* pointer to struct kvm */ | ||
505 | ld r6,KVM_HOST_SDR1(r4) | ||
506 | lwz r7,KVM_HOST_LPID(r4) | ||
507 | li r8,LPID_RSVD /* switch to reserved LPID */ | ||
508 | mtspr SPRN_LPID,r8 | ||
509 | ptesync | ||
510 | mtspr SPRN_SDR1,r6 /* switch to partition page table */ | ||
511 | mtspr SPRN_LPID,r7 | ||
512 | isync | ||
513 | lis r8,0x7fff /* MAX_INT@h */ | ||
514 | mtspr SPRN_HDEC,r8 | ||
515 | |||
516 | ld r8,KVM_HOST_LPCR(r4) | ||
517 | mtspr SPRN_LPCR,r8 | ||
518 | isync | ||
519 | |||
520 | /* load host SLB entries */ | ||
521 | ld r8,PACA_SLBSHADOWPTR(r13) | ||
522 | |||
523 | .rept SLB_NUM_BOLTED | ||
524 | ld r5,SLBSHADOW_SAVEAREA(r8) | ||
525 | ld r6,SLBSHADOW_SAVEAREA+8(r8) | ||
526 | andis. r7,r5,SLB_ESID_V@h | ||
527 | beq 1f | ||
528 | slbmte r6,r5 | ||
529 | 1: addi r8,r8,16 | ||
530 | .endr | ||
531 | |||
532 | /* Save and reset AMR and UAMOR before turning on the MMU */ | ||
533 | mfspr r5,SPRN_AMR | ||
534 | mfspr r6,SPRN_UAMOR | ||
535 | std r5,VCPU_AMR(r9) | ||
536 | std r6,VCPU_UAMOR(r9) | ||
537 | li r6,0 | ||
538 | mtspr SPRN_AMR,r6 | ||
539 | |||
540 | /* Restore host DABR and DABRX */ | ||
541 | ld r5,HSTATE_DABR(r13) | ||
542 | li r6,7 | ||
543 | mtspr SPRN_DABR,r5 | ||
544 | mtspr SPRN_DABRX,r6 | ||
545 | |||
546 | /* Switch DSCR back to host value */ | ||
547 | mfspr r8, SPRN_DSCR | ||
548 | ld r7, HSTATE_DSCR(r13) | ||
549 | std r8, VCPU_DSCR(r7) | ||
550 | mtspr SPRN_DSCR, r7 | ||
551 | |||
552 | /* Save non-volatile GPRs */ | ||
553 | std r14, VCPU_GPR(r14)(r9) | ||
554 | std r15, VCPU_GPR(r15)(r9) | ||
555 | std r16, VCPU_GPR(r16)(r9) | ||
556 | std r17, VCPU_GPR(r17)(r9) | ||
557 | std r18, VCPU_GPR(r18)(r9) | ||
558 | std r19, VCPU_GPR(r19)(r9) | ||
559 | std r20, VCPU_GPR(r20)(r9) | ||
560 | std r21, VCPU_GPR(r21)(r9) | ||
561 | std r22, VCPU_GPR(r22)(r9) | ||
562 | std r23, VCPU_GPR(r23)(r9) | ||
563 | std r24, VCPU_GPR(r24)(r9) | ||
564 | std r25, VCPU_GPR(r25)(r9) | ||
565 | std r26, VCPU_GPR(r26)(r9) | ||
566 | std r27, VCPU_GPR(r27)(r9) | ||
567 | std r28, VCPU_GPR(r28)(r9) | ||
568 | std r29, VCPU_GPR(r29)(r9) | ||
569 | std r30, VCPU_GPR(r30)(r9) | ||
570 | std r31, VCPU_GPR(r31)(r9) | ||
571 | |||
572 | /* Save SPRGs */ | ||
573 | mfspr r3, SPRN_SPRG0 | ||
574 | mfspr r4, SPRN_SPRG1 | ||
575 | mfspr r5, SPRN_SPRG2 | ||
576 | mfspr r6, SPRN_SPRG3 | ||
577 | std r3, VCPU_SPRG0(r9) | ||
578 | std r4, VCPU_SPRG1(r9) | ||
579 | std r5, VCPU_SPRG2(r9) | ||
580 | std r6, VCPU_SPRG3(r9) | ||
581 | |||
582 | /* Save PMU registers */ | ||
583 | li r3, 1 | ||
584 | sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ | ||
585 | mfspr r4, SPRN_MMCR0 /* save MMCR0 */ | ||
586 | mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ | ||
587 | isync | ||
588 | mfspr r5, SPRN_MMCR1 | ||
589 | mfspr r6, SPRN_MMCRA | ||
590 | std r4, VCPU_MMCR(r9) | ||
591 | std r5, VCPU_MMCR + 8(r9) | ||
592 | std r6, VCPU_MMCR + 16(r9) | ||
593 | mfspr r3, SPRN_PMC1 | ||
594 | mfspr r4, SPRN_PMC2 | ||
595 | mfspr r5, SPRN_PMC3 | ||
596 | mfspr r6, SPRN_PMC4 | ||
597 | mfspr r7, SPRN_PMC5 | ||
598 | mfspr r8, SPRN_PMC6 | ||
599 | stw r3, VCPU_PMC(r9) | ||
600 | stw r4, VCPU_PMC + 4(r9) | ||
601 | stw r5, VCPU_PMC + 8(r9) | ||
602 | stw r6, VCPU_PMC + 12(r9) | ||
603 | stw r7, VCPU_PMC + 16(r9) | ||
604 | stw r8, VCPU_PMC + 20(r9) | ||
605 | 22: | ||
606 | /* save FP state */ | ||
607 | mr r3, r9 | ||
608 | bl .kvmppc_save_fp | ||
609 | |||
610 | /* | ||
611 | * Reload DEC. HDEC interrupts were disabled when | ||
612 | * we reloaded the host's LPCR value. | ||
613 | */ | ||
614 | ld r3, HSTATE_DECEXP(r13) | ||
615 | mftb r4 | ||
616 | subf r4, r4, r3 | ||
617 | mtspr SPRN_DEC, r4 | ||
618 | |||
619 | /* Reload the host's PMU registers */ | ||
620 | ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ | ||
621 | lbz r4, LPPACA_PMCINUSE(r3) | ||
622 | cmpwi r4, 0 | ||
623 | beq 23f /* skip if not */ | ||
624 | lwz r3, HSTATE_PMC(r13) | ||
625 | lwz r4, HSTATE_PMC + 4(r13) | ||
626 | lwz r5, HSTATE_PMC + 8(r13) | ||
627 | lwz r6, HSTATE_PMC + 12(r13) | ||
628 | lwz r8, HSTATE_PMC + 16(r13) | ||
629 | lwz r9, HSTATE_PMC + 20(r13) | ||
630 | mtspr SPRN_PMC1, r3 | ||
631 | mtspr SPRN_PMC2, r4 | ||
632 | mtspr SPRN_PMC3, r5 | ||
633 | mtspr SPRN_PMC4, r6 | ||
634 | mtspr SPRN_PMC5, r8 | ||
635 | mtspr SPRN_PMC6, r9 | ||
636 | ld r3, HSTATE_MMCR(r13) | ||
637 | ld r4, HSTATE_MMCR + 8(r13) | ||
638 | ld r5, HSTATE_MMCR + 16(r13) | ||
639 | mtspr SPRN_MMCR1, r4 | ||
640 | mtspr SPRN_MMCRA, r5 | ||
641 | mtspr SPRN_MMCR0, r3 | ||
642 | isync | ||
643 | 23: | ||
644 | /* | ||
645 | * For external and machine check interrupts, we need | ||
646 | * to call the Linux handler to process the interrupt. | ||
647 | * We do that by jumping to the interrupt vector address | ||
648 | * which we have in r12. The [h]rfid at the end of the | ||
649 | * handler will return to the book3s_hv_interrupts.S code. | ||
650 | * For other interrupts we do the rfid to get back | ||
651 | * to the book3s_interrupts.S code here. | ||
652 | */ | ||
653 | ld r8, HSTATE_VMHANDLER(r13) | ||
654 | ld r7, HSTATE_HOST_MSR(r13) | ||
655 | |||
656 | cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL | ||
657 | beq 11f | ||
658 | cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK | ||
659 | |||
660 | /* RFI into the highmem handler, or branch to interrupt handler */ | ||
661 | mfmsr r6 | ||
662 | mtctr r12 | ||
663 | li r0, MSR_RI | ||
664 | andc r6, r6, r0 | ||
665 | mtmsrd r6, 1 /* Clear RI in MSR */ | ||
666 | mtsrr0 r8 | ||
667 | mtsrr1 r7 | ||
668 | beqctr | ||
669 | RFI | ||
670 | |||
671 | 11: mtspr SPRN_HSRR0, r8 | ||
672 | mtspr SPRN_HSRR1, r7 | ||
673 | ba 0x500 | ||
674 | |||
675 | 6: mfspr r6,SPRN_HDAR | ||
676 | mfspr r7,SPRN_HDSISR | ||
677 | b 7b | ||
678 | |||
679 | ignore_hdec: | ||
680 | mr r4,r9 | ||
681 | b fast_guest_return | ||
682 | |||
683 | bounce_ext_interrupt: | ||
684 | mr r4,r9 | ||
685 | mtspr SPRN_SRR0,r10 | ||
686 | mtspr SPRN_SRR1,r11 | ||
687 | li r10,BOOK3S_INTERRUPT_EXTERNAL | ||
688 | LOAD_REG_IMMEDIATE(r11,MSR_SF | MSR_ME); | ||
689 | b fast_guest_return | ||
690 | |||
691 | /* | ||
692 | * Save away FP, VMX and VSX registers. | ||
693 | * r3 = vcpu pointer | ||
694 | */ | ||
695 | _GLOBAL(kvmppc_save_fp) | ||
696 | mfmsr r9 | ||
697 | ori r8,r9,MSR_FP | ||
698 | #ifdef CONFIG_ALTIVEC | ||
699 | BEGIN_FTR_SECTION | ||
700 | oris r8,r8,MSR_VEC@h | ||
701 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
702 | #endif | ||
703 | #ifdef CONFIG_VSX | ||
704 | BEGIN_FTR_SECTION | ||
705 | oris r8,r8,MSR_VSX@h | ||
706 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
707 | #endif | ||
708 | mtmsrd r8 | ||
709 | isync | ||
710 | #ifdef CONFIG_VSX | ||
711 | BEGIN_FTR_SECTION | ||
712 | reg = 0 | ||
713 | .rept 32 | ||
714 | li r6,reg*16+VCPU_VSRS | ||
715 | stxvd2x reg,r6,r3 | ||
716 | reg = reg + 1 | ||
717 | .endr | ||
718 | FTR_SECTION_ELSE | ||
719 | #endif | ||
720 | reg = 0 | ||
721 | .rept 32 | ||
722 | stfd reg,reg*8+VCPU_FPRS(r3) | ||
723 | reg = reg + 1 | ||
724 | .endr | ||
725 | #ifdef CONFIG_VSX | ||
726 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | ||
727 | #endif | ||
728 | mffs fr0 | ||
729 | stfd fr0,VCPU_FPSCR(r3) | ||
730 | |||
731 | #ifdef CONFIG_ALTIVEC | ||
732 | BEGIN_FTR_SECTION | ||
733 | reg = 0 | ||
734 | .rept 32 | ||
735 | li r6,reg*16+VCPU_VRS | ||
736 | stvx reg,r6,r3 | ||
737 | reg = reg + 1 | ||
738 | .endr | ||
739 | mfvscr vr0 | ||
740 | li r6,VCPU_VSCR | ||
741 | stvx vr0,r6,r3 | ||
742 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
743 | #endif | ||
744 | mfspr r6,SPRN_VRSAVE | ||
745 | stw r6,VCPU_VRSAVE(r3) | ||
746 | mtmsrd r9 | ||
747 | isync | ||
748 | blr | ||
749 | |||
750 | /* | ||
751 | * Load up FP, VMX and VSX registers | ||
752 | * r4 = vcpu pointer | ||
753 | */ | ||
754 | .globl kvmppc_load_fp | ||
755 | kvmppc_load_fp: | ||
756 | mfmsr r9 | ||
757 | ori r8,r9,MSR_FP | ||
758 | #ifdef CONFIG_ALTIVEC | ||
759 | BEGIN_FTR_SECTION | ||
760 | oris r8,r8,MSR_VEC@h | ||
761 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
762 | #endif | ||
763 | #ifdef CONFIG_VSX | ||
764 | BEGIN_FTR_SECTION | ||
765 | oris r8,r8,MSR_VSX@h | ||
766 | END_FTR_SECTION_IFSET(CPU_FTR_VSX) | ||
767 | #endif | ||
768 | mtmsrd r8 | ||
769 | isync | ||
770 | lfd fr0,VCPU_FPSCR(r4) | ||
771 | MTFSF_L(fr0) | ||
772 | #ifdef CONFIG_VSX | ||
773 | BEGIN_FTR_SECTION | ||
774 | reg = 0 | ||
775 | .rept 32 | ||
776 | li r7,reg*16+VCPU_VSRS | ||
777 | lxvd2x reg,r7,r4 | ||
778 | reg = reg + 1 | ||
779 | .endr | ||
780 | FTR_SECTION_ELSE | ||
781 | #endif | ||
782 | reg = 0 | ||
783 | .rept 32 | ||
784 | lfd reg,reg*8+VCPU_FPRS(r4) | ||
785 | reg = reg + 1 | ||
786 | .endr | ||
787 | #ifdef CONFIG_VSX | ||
788 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX) | ||
789 | #endif | ||
790 | |||
791 | #ifdef CONFIG_ALTIVEC | ||
792 | BEGIN_FTR_SECTION | ||
793 | li r7,VCPU_VSCR | ||
794 | lvx vr0,r7,r4 | ||
795 | mtvscr vr0 | ||
796 | reg = 0 | ||
797 | .rept 32 | ||
798 | li r7,reg*16+VCPU_VRS | ||
799 | lvx reg,r7,r4 | ||
800 | reg = reg + 1 | ||
801 | .endr | ||
802 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
803 | #endif | ||
804 | lwz r7,VCPU_VRSAVE(r4) | ||
805 | mtspr SPRN_VRSAVE,r7 | ||
806 | blr | ||