diff options
author | Alexander Graf <agraf@suse.de> | 2010-01-07 20:58:04 -0500 |
---|---|---|
committer | Marcelo Tosatti <mtosatti@redhat.com> | 2010-03-01 10:35:48 -0500 |
commit | b4433a7cceed59714b0778e1ace624befdd15ded (patch) | |
tree | 745f0477d7368155cddc363fed6e8f3dc28f83cd /arch/powerpc | |
parent | 7e57cba06074da84d7c24d8c3f44040d2d8c88ac (diff) |
KVM: PPC: Implement 'skip instruction' mode
To fetch the last instruction we were interrupted on, we enable DR in early
exit code, where we are still in a very transitional phase between guest
and host state.
Most of the time this seemed to work, but another CPU can easily flush our
TLB and HTAB which makes us go in the Linux page fault handler which totally
breaks because we still use the guest's SLB entries.
To work around that, let's introduce a second KVM guest mode that defines
that whenever we get a trap, we don't call the Linux handler or go into
the KVM exit code, but just jump over the faulting instruction.
That way a potentially bad lwz doesn't trigger any faults and we can later
on interpret the invalid instruction we fetched as "fetch didn't work".
Signed-off-by: Alexander Graf <agraf@suse.de>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/powerpc')
-rw-r--r-- | arch/powerpc/include/asm/kvm_asm.h | 6 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_rmhandlers.S | 39 | ||||
-rw-r--r-- | arch/powerpc/kvm/book3s_64_slb.S | 16 | ||||
-rw-r--r-- | arch/powerpc/kvm/emulate.c | 4 |
4 files changed, 59 insertions, 6 deletions
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index af2abe74f544..aadf2dd6f84e 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -97,4 +97,10 @@ | |||
97 | #define RESUME_HOST RESUME_FLAG_HOST | 97 | #define RESUME_HOST RESUME_FLAG_HOST |
98 | #define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV) | 98 | #define RESUME_HOST_NV (RESUME_FLAG_HOST|RESUME_FLAG_NV) |
99 | 99 | ||
100 | #define KVM_GUEST_MODE_NONE 0 | ||
101 | #define KVM_GUEST_MODE_GUEST 1 | ||
102 | #define KVM_GUEST_MODE_SKIP 2 | ||
103 | |||
104 | #define KVM_INST_FETCH_FAILED -1 | ||
105 | |||
100 | #endif /* __POWERPC_KVM_ASM_H__ */ | 106 | #endif /* __POWERPC_KVM_ASM_H__ */ |
diff --git a/arch/powerpc/kvm/book3s_64_rmhandlers.S b/arch/powerpc/kvm/book3s_64_rmhandlers.S index cd9f0b609e48..9ad1c2645d6f 100644 --- a/arch/powerpc/kvm/book3s_64_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_64_rmhandlers.S | |||
@@ -49,7 +49,7 @@ kvmppc_trampoline_\intno: | |||
49 | mfcr r12 | 49 | mfcr r12 |
50 | stw r12, PACA_KVM_SCRATCH1(r13) | 50 | stw r12, PACA_KVM_SCRATCH1(r13) |
51 | lbz r12, PACA_KVM_IN_GUEST(r13) | 51 | lbz r12, PACA_KVM_IN_GUEST(r13) |
52 | cmpwi r12, 0 | 52 | cmpwi r12, KVM_GUEST_MODE_NONE |
53 | bne ..kvmppc_handler_hasmagic_\intno | 53 | bne ..kvmppc_handler_hasmagic_\intno |
54 | /* No KVM guest? Then jump back to the Linux handler! */ | 54 | /* No KVM guest? Then jump back to the Linux handler! */ |
55 | lwz r12, PACA_KVM_SCRATCH1(r13) | 55 | lwz r12, PACA_KVM_SCRATCH1(r13) |
@@ -60,6 +60,11 @@ kvmppc_trampoline_\intno: | |||
60 | 60 | ||
61 | /* Now we know we're handling a KVM guest */ | 61 | /* Now we know we're handling a KVM guest */ |
62 | ..kvmppc_handler_hasmagic_\intno: | 62 | ..kvmppc_handler_hasmagic_\intno: |
63 | |||
64 | /* Should we just skip the faulting instruction? */ | ||
65 | cmpwi r12, KVM_GUEST_MODE_SKIP | ||
66 | beq kvmppc_handler_skip_ins | ||
67 | |||
63 | /* Let's store which interrupt we're handling */ | 68 | /* Let's store which interrupt we're handling */ |
64 | li r12, \intno | 69 | li r12, \intno |
65 | 70 | ||
@@ -86,6 +91,38 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALTIVEC | |||
86 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX | 91 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_VSX |
87 | 92 | ||
88 | /* | 93 | /* |
94 | * Bring us back to the faulting code, but skip the | ||
95 | * faulting instruction. | ||
96 | * | ||
97 | * This is a generic exit path from the interrupt | ||
98 | * trampolines above. | ||
99 | * | ||
100 | * Input Registers: | ||
101 | * | ||
102 | * R12 = free | ||
103 | * R13 = PACA | ||
104 | * PACA.KVM.SCRATCH0 = guest R12 | ||
105 | * PACA.KVM.SCRATCH1 = guest CR | ||
106 | * SPRG_SCRATCH0 = guest R13 | ||
107 | * | ||
108 | */ | ||
109 | kvmppc_handler_skip_ins: | ||
110 | |||
111 | /* Patch the IP to the next instruction */ | ||
112 | mfsrr0 r12 | ||
113 | addi r12, r12, 4 | ||
114 | mtsrr0 r12 | ||
115 | |||
116 | /* Clean up all state */ | ||
117 | lwz r12, PACA_KVM_SCRATCH1(r13) | ||
118 | mtcr r12 | ||
119 | ld r12, PACA_KVM_SCRATCH0(r13) | ||
120 | mfspr r13, SPRN_SPRG_SCRATCH0 | ||
121 | |||
122 | /* And get back into the code */ | ||
123 | RFI | ||
124 | |||
125 | /* | ||
89 | * This trampoline brings us back to a real mode handler | 126 | * This trampoline brings us back to a real mode handler |
90 | * | 127 | * |
91 | * Input Registers: | 128 | * Input Registers: |
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S index 7188c11ed7d1..d07b88617b2c 100644 --- a/arch/powerpc/kvm/book3s_64_slb.S +++ b/arch/powerpc/kvm/book3s_64_slb.S | |||
@@ -212,10 +212,6 @@ kvmppc_handler_trampoline_exit: | |||
212 | mfdar r5 | 212 | mfdar r5 |
213 | mfdsisr r6 | 213 | mfdsisr r6 |
214 | 214 | ||
215 | /* Unset guest state */ | ||
216 | li r9, 0 | ||
217 | stb r9, PACA_KVM_IN_GUEST(r13) | ||
218 | |||
219 | /* | 215 | /* |
220 | * In order for us to easily get the last instruction, | 216 | * In order for us to easily get the last instruction, |
221 | * we got the #vmexit at, we exploit the fact that the | 217 | * we got the #vmexit at, we exploit the fact that the |
@@ -233,18 +229,28 @@ kvmppc_handler_trampoline_exit: | |||
233 | 229 | ||
234 | ld_last_inst: | 230 | ld_last_inst: |
235 | /* Save off the guest instruction we're at */ | 231 | /* Save off the guest instruction we're at */ |
232 | |||
233 | /* Set guest mode to 'jump over instruction' so if lwz faults | ||
234 | * we'll just continue at the next IP. */ | ||
235 | li r9, KVM_GUEST_MODE_SKIP | ||
236 | stb r9, PACA_KVM_IN_GUEST(r13) | ||
237 | |||
236 | /* 1) enable paging for data */ | 238 | /* 1) enable paging for data */ |
237 | mfmsr r9 | 239 | mfmsr r9 |
238 | ori r11, r9, MSR_DR /* Enable paging for data */ | 240 | ori r11, r9, MSR_DR /* Enable paging for data */ |
239 | mtmsr r11 | 241 | mtmsr r11 |
240 | /* 2) fetch the instruction */ | 242 | /* 2) fetch the instruction */ |
241 | /* XXX implement PACA_KVM_IN_GUEST=2 path to safely jump over this */ | 243 | li r0, KVM_INST_FETCH_FAILED /* In case lwz faults */ |
242 | lwz r0, 0(r3) | 244 | lwz r0, 0(r3) |
243 | /* 3) disable paging again */ | 245 | /* 3) disable paging again */ |
244 | mtmsr r9 | 246 | mtmsr r9 |
245 | 247 | ||
246 | no_ld_last_inst: | 248 | no_ld_last_inst: |
247 | 249 | ||
250 | /* Unset guest mode */ | ||
251 | li r9, KVM_GUEST_MODE_NONE | ||
252 | stb r9, PACA_KVM_IN_GUEST(r13) | ||
253 | |||
248 | /* Restore bolted entries from the shadow and fix it along the way */ | 254 | /* Restore bolted entries from the shadow and fix it along the way */ |
249 | 255 | ||
250 | /* We don't store anything in entry 0, so we don't need to take care of it */ | 256 | /* We don't store anything in entry 0, so we don't need to take care of it */ |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 38219af0cd0e..04e317c1bbee 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -143,6 +143,10 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
143 | 143 | ||
144 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 144 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
145 | 145 | ||
146 | /* Try again next time */ | ||
147 | if (inst == KVM_INST_FETCH_FAILED) | ||
148 | return EMULATE_DONE; | ||
149 | |||
146 | switch (get_op(inst)) { | 150 | switch (get_op(inst)) { |
147 | case OP_TRAP: | 151 | case OP_TRAP: |
148 | #ifdef CONFIG_PPC64 | 152 | #ifdef CONFIG_PPC64 |