aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm/book3s_segment.S
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2011-07-23 03:41:44 -0400
committerAvi Kivity <avi@redhat.com>2011-09-25 12:52:29 -0400
commit02143947603fe90237a0423d34dd8943de229f78 (patch)
treeafcc617d3dd3378a1820de979ae6f59b16df7003 /arch/powerpc/kvm/book3s_segment.S
parent177339d7f7c99a25ecfdb6baeea6a2508fb2349f (diff)
KVM: PPC: book3s_pr: Simplify transitions between virtual and real mode
This simplifies the way that the book3s_pr makes the transition to real mode when entering the guest. We now call kvmppc_entry_trampoline (renamed from kvmppc_rmcall) in the base kernel using a normal function call instead of doing an indirect call through a pointer in the vcpu. If kvm is a module, the module loader takes care of generating a trampoline as it does for other calls to functions outside the module. kvmppc_entry_trampoline then disables interrupts and jumps to kvmppc_handler_trampoline_enter in real mode using an rfi[d]. That then uses the link register as the address to return to (potentially in module space) when the guest exits. This also simplifies the way that we call the Linux interrupt handler when we exit the guest due to an external, decrementer or performance monitor interrupt. Instead of turning on the MMU, then deciding that we need to call the Linux handler and turning the MMU back off again, we now go straight to the handler at the point where we would turn the MMU on. The handler will then return to the virtual-mode code (potentially in the module). Along the way, this moves the setting and clearing of the HID5 DCBZ32 bit into real-mode interrupts-off code, and also makes sure that we clear the MSR[RI] bit before loading values into SRR0/1. The net result is that we no longer need any code addresses to be stored in vcpu->arch. Signed-off-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Alexander Graf <agraf@suse.de>
Diffstat (limited to 'arch/powerpc/kvm/book3s_segment.S')
-rw-r--r--arch/powerpc/kvm/book3s_segment.S112
1 files changed, 92 insertions, 20 deletions
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 678b6be31693..0676ae249b9f 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -23,6 +23,7 @@
23 23
24#define GET_SHADOW_VCPU(reg) \ 24#define GET_SHADOW_VCPU(reg) \
25 mr reg, r13 25 mr reg, r13
26#define MTMSR_EERI(reg) mtmsrd (reg),1
26 27
27#elif defined(CONFIG_PPC_BOOK3S_32) 28#elif defined(CONFIG_PPC_BOOK3S_32)
28 29
@@ -30,6 +31,7 @@
30 tophys(reg, r2); \ 31 tophys(reg, r2); \
31 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \ 32 lwz reg, (THREAD + THREAD_KVM_SVCPU)(reg); \
32 tophys(reg, reg) 33 tophys(reg, reg)
34#define MTMSR_EERI(reg) mtmsr (reg)
33 35
34#endif 36#endif
35 37
@@ -57,10 +59,12 @@ kvmppc_handler_trampoline_enter:
57 /* Required state: 59 /* Required state:
58 * 60 *
59 * MSR = ~IR|DR 61 * MSR = ~IR|DR
60 * R13 = PACA
61 * R1 = host R1 62 * R1 = host R1
62 * R2 = host R2 63 * R2 = host R2
63 * R10 = guest MSR 64 * R4 = guest shadow MSR
65 * R5 = normal host MSR
66 * R6 = current host MSR (EE, IR, DR off)
67 * LR = highmem guest exit code
64 * all other volatile GPRS = free 68 * all other volatile GPRS = free
65 * SVCPU[CR] = guest CR 69 * SVCPU[CR] = guest CR
66 * SVCPU[XER] = guest XER 70 * SVCPU[XER] = guest XER
@@ -71,15 +75,15 @@ kvmppc_handler_trampoline_enter:
71 /* r3 = shadow vcpu */ 75 /* r3 = shadow vcpu */
72 GET_SHADOW_VCPU(r3) 76 GET_SHADOW_VCPU(r3)
73 77
78 /* Save guest exit handler address and MSR */
79 mflr r0
80 PPC_STL r0, HSTATE_VMHANDLER(r3)
81 PPC_STL r5, HSTATE_HOST_MSR(r3)
82
74 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */ 83 /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
75 PPC_STL r1, HSTATE_HOST_R1(r3) 84 PPC_STL r1, HSTATE_HOST_R1(r3)
76 PPC_STL r2, HSTATE_HOST_R2(r3) 85 PPC_STL r2, HSTATE_HOST_R2(r3)
77 86
78 /* Move SRR0 and SRR1 into the respective regs */
79 PPC_LL r9, SVCPU_PC(r3)
80 mtsrr0 r9
81 mtsrr1 r10
82
83 /* Activate guest mode, so faults get handled by KVM */ 87 /* Activate guest mode, so faults get handled by KVM */
84 li r11, KVM_GUEST_MODE_GUEST 88 li r11, KVM_GUEST_MODE_GUEST
85 stb r11, HSTATE_IN_GUEST(r3) 89 stb r11, HSTATE_IN_GUEST(r3)
@@ -87,17 +91,46 @@ kvmppc_handler_trampoline_enter:
87 /* Switch to guest segment. This is subarch specific. */ 91 /* Switch to guest segment. This is subarch specific. */
88 LOAD_GUEST_SEGMENTS 92 LOAD_GUEST_SEGMENTS
89 93
94#ifdef CONFIG_PPC_BOOK3S_64
95 /* Some guests may need to have dcbz set to 32 byte length.
96 *
97 * Usually we ensure that by patching the guest's instructions
98 * to trap on dcbz and emulate it in the hypervisor.
99 *
100 * If we can, we should tell the CPU to use 32 byte dcbz though,
101 * because that's a lot faster.
102 */
103 lbz r0, HSTATE_RESTORE_HID5(r3)
104 cmpwi r0, 0
105 beq no_dcbz32_on
106
107 mfspr r0,SPRN_HID5
108 ori r0, r0, 0x80 /* XXX HID5_dcbz32 = 0x80 */
109 mtspr SPRN_HID5,r0
110no_dcbz32_on:
111
112#endif /* CONFIG_PPC_BOOK3S_64 */
113
90 /* Enter guest */ 114 /* Enter guest */
91 115
92 PPC_LL r4, SVCPU_CTR(r3) 116 PPC_LL r8, SVCPU_CTR(r3)
93 PPC_LL r5, SVCPU_LR(r3) 117 PPC_LL r9, SVCPU_LR(r3)
94 lwz r6, SVCPU_CR(r3) 118 lwz r10, SVCPU_CR(r3)
95 lwz r7, SVCPU_XER(r3) 119 lwz r11, SVCPU_XER(r3)
120
121 mtctr r8
122 mtlr r9
123 mtcr r10
124 mtxer r11
96 125
97 mtctr r4 126 /* Move SRR0 and SRR1 into the respective regs */
98 mtlr r5 127 PPC_LL r9, SVCPU_PC(r3)
99 mtcr r6 128 /* First clear RI in our current MSR value */
100 mtxer r7 129 li r0, MSR_RI
130 andc r6, r6, r0
131 MTMSR_EERI(r6)
132 mtsrr0 r9
133 mtsrr1 r4
101 134
102 PPC_LL r0, SVCPU_R0(r3) 135 PPC_LL r0, SVCPU_R0(r3)
103 PPC_LL r1, SVCPU_R1(r3) 136 PPC_LL r1, SVCPU_R1(r3)
@@ -259,6 +292,43 @@ no_ld_last_inst:
259 /* Switch back to host MMU */ 292 /* Switch back to host MMU */
260 LOAD_HOST_SEGMENTS 293 LOAD_HOST_SEGMENTS
261 294
295#ifdef CONFIG_PPC_BOOK3S_64
296
297 lbz r5, HSTATE_RESTORE_HID5(r13)
298 cmpwi r5, 0
299 beq no_dcbz32_off
300
301 li r4, 0
302 mfspr r5,SPRN_HID5
303 rldimi r5,r4,6,56
304 mtspr SPRN_HID5,r5
305
306no_dcbz32_off:
307
308#endif /* CONFIG_PPC_BOOK3S_64 */
309
310 /*
311 * For some interrupts, we need to call the real Linux
312 * handler, so it can do work for us. This has to happen
313 * as if the interrupt arrived from the kernel though,
314 * so let's fake it here where most state is restored.
315 *
316 * Having set up SRR0/1 with the address where we want
317 * to continue with relocation on (potentially in module
318 * space), we either just go straight there with rfi[d],
319 * or we jump to an interrupt handler with bctr if there
320 * is an interrupt to be handled first. In the latter
321 * case, the rfi[d] at the end of the interrupt handler
322 * will get us back to where we want to continue.
323 */
324
325 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
326 beq 1f
327 cmpwi r12, BOOK3S_INTERRUPT_DECREMENTER
328 beq 1f
329 cmpwi r12, BOOK3S_INTERRUPT_PERFMON
3301: mtctr r12
331
262 /* Register usage at this point: 332 /* Register usage at this point:
263 * 333 *
264 * R1 = host R1 334 * R1 = host R1
@@ -269,13 +339,15 @@ no_ld_last_inst:
269 * 339 *
270 */ 340 */
271 341
272 /* RFI into the highmem handler */ 342 PPC_LL r6, HSTATE_HOST_MSR(r13)
273 mfmsr r7
274 ori r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME /* Enable paging */
275 mtsrr1 r7
276 /* Load highmem handler address */
277 PPC_LL r8, HSTATE_VMHANDLER(r13) 343 PPC_LL r8, HSTATE_VMHANDLER(r13)
344
345 /* Restore host msr -> SRR1 */
346 mtsrr1 r6
347 /* Load highmem handler address */
278 mtsrr0 r8 348 mtsrr0 r8
279 349
350 /* RFI into the highmem handler, or jump to interrupt handler */
351 beqctr
280 RFI 352 RFI
281kvmppc_handler_trampoline_exit_end: 353kvmppc_handler_trampoline_exit_end: