aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-10-09 10:08:26 -0400
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:15 -0500
commitcd3ff653ae0b45bac7a19208e9c75034fcacc85f (patch)
tree636bee346e23154216babd9d588ff7715ac11859 /arch/x86/kvm/svm.c
parent8d23c4662427507f432c96ac4fa3b76f0a8360cd (diff)
KVM: SVM: Move INTR vmexit out of atomic code
The nested SVM code emulates a #vmexit caused by a request to open the irq window right in the request function. This is a bug because the request function runs with preemption and interrupts disabled but the #vmexit emulation might sleep. This can cause a schedule()-while-atomic bug and is fixed with this patch. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c26
1 files changed, 25 insertions, 1 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e37285446cb7..884bffc70c7f 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -85,6 +85,9 @@ struct nested_state {
85 /* gpa pointers to the real vectors */ 85 /* gpa pointers to the real vectors */
86 u64 vmcb_msrpm; 86 u64 vmcb_msrpm;
87 87
88 /* A VMEXIT is required but not yet emulated */
89 bool exit_required;
90
88 /* cache for intercepts of the guest */ 91 /* cache for intercepts of the guest */
89 u16 intercept_cr_read; 92 u16 intercept_cr_read;
90 u16 intercept_cr_write; 93 u16 intercept_cr_write;
@@ -1379,7 +1382,14 @@ static inline int nested_svm_intr(struct vcpu_svm *svm)
1379 1382
1380 svm->vmcb->control.exit_code = SVM_EXIT_INTR; 1383 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1381 1384
1382 if (nested_svm_exit_handled(svm)) { 1385 if (svm->nested.intercept & 1ULL) {
1386 /*
1387 * The #vmexit can't be emulated here directly because this
1388 * code path runs with irqs and preemtion disabled. A
1389 * #vmexit emulation might sleep. Only signal request for
1390 * the #vmexit here.
1391 */
1392 svm->nested.exit_required = true;
1383 nsvm_printk("VMexit -> INTR\n"); 1393 nsvm_printk("VMexit -> INTR\n");
1384 return 1; 1394 return 1;
1385 } 1395 }
@@ -2340,6 +2350,13 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2340 2350
2341 trace_kvm_exit(exit_code, svm->vmcb->save.rip); 2351 trace_kvm_exit(exit_code, svm->vmcb->save.rip);
2342 2352
2353 if (unlikely(svm->nested.exit_required)) {
2354 nested_svm_vmexit(svm);
2355 svm->nested.exit_required = false;
2356
2357 return 1;
2358 }
2359
2343 if (is_nested(svm)) { 2360 if (is_nested(svm)) {
2344 int vmexit; 2361 int vmexit;
2345 2362
@@ -2615,6 +2632,13 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
2615 u16 gs_selector; 2632 u16 gs_selector;
2616 u16 ldt_selector; 2633 u16 ldt_selector;
2617 2634
2635 /*
2636 * A vmexit emulation is required before the vcpu can be executed
2637 * again.
2638 */
2639 if (unlikely(svm->nested.exit_required))
2640 return;
2641
2618 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; 2642 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
2619 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; 2643 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
2620 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; 2644 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];