aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2010-09-02 11:29:46 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:52:24 -0400
commitcda0008299a06f0d7218c6037c3c02d7a865e954 (patch)
tree43ce57af80bf3b963f04a057864ce766dbf263a6 /arch/x86
parentf87f928882d080eaec8b0d76aecff003d664697d (diff)
KVM: SVM: Restore correct registers after sel_cr0 intercept emulation
This patch implements restoring of the correct rip, rsp, and rax after the svm emulation in KVM injected a selective_cr0 write intercept into the guest hypervisor. The problem was that the vmexit is emulated in the instruction emulation which later commits the registers right after the write-cr0 instruction. So the l1 guest will continue to run with the l2 rip, rsp and rax resulting in unpredictable behavior. This patch is not the final word, it is just an easy patch to fix the issue. The real fix will be done when the instruction emulator is made aware of nested virtualization. Until this is done this patch fixes the issue and provides an easy way to fix this in -stable too. Cc: stable@kernel.org Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/svm.c33
1 files changed, 31 insertions, 2 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a1a83b955ed7..07655345f50b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -88,6 +88,14 @@ struct nested_state {
88 /* A VMEXIT is required but not yet emulated */ 88 /* A VMEXIT is required but not yet emulated */
89 bool exit_required; 89 bool exit_required;
90 90
91 /*
92 * If we vmexit during an instruction emulation we need this to restore
93 * the l1 guest rip after the emulation
94 */
95 unsigned long vmexit_rip;
96 unsigned long vmexit_rsp;
97 unsigned long vmexit_rax;
98
91 /* cache for intercepts of the guest */ 99 /* cache for intercepts of the guest */
92 u16 intercept_cr_read; 100 u16 intercept_cr_read;
93 u16 intercept_cr_write; 101 u16 intercept_cr_write;
@@ -1213,8 +1221,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1213 if (old == new) { 1221 if (old == new) {
1214 /* cr0 write with ts and mp unchanged */ 1222 /* cr0 write with ts and mp unchanged */
1215 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 1223 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
1216 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) 1224 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
1225 svm->nested.vmexit_rip = kvm_rip_read(vcpu);
1226 svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
1227 svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
1217 return; 1228 return;
1229 }
1218 } 1230 }
1219 } 1231 }
1220 1232
@@ -2430,6 +2442,23 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2430 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; 2442 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2431} 2443}
2432 2444
2445static int cr0_write_interception(struct vcpu_svm *svm)
2446{
2447 struct kvm_vcpu *vcpu = &svm->vcpu;
2448 int r;
2449
2450 r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2451
2452 if (svm->nested.vmexit_rip) {
2453 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2454 kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2455 kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2456 svm->nested.vmexit_rip = 0;
2457 }
2458
2459 return r == EMULATE_DONE;
2460}
2461
2433static int cr8_write_interception(struct vcpu_svm *svm) 2462static int cr8_write_interception(struct vcpu_svm *svm)
2434{ 2463{
2435 struct kvm_run *kvm_run = svm->vcpu.run; 2464 struct kvm_run *kvm_run = svm->vcpu.run;
@@ -2692,7 +2721,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2692 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2721 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2693 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2722 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2694 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, 2723 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2695 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2724 [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
2696 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2725 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2697 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2726 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
2698 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 2727 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,