diff options
author | Mohammed Gamal <m.gamal005@gmail.com> | 2009-09-01 06:48:18 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-12-03 02:32:09 -0500 |
commit | 80ced186d1761d2a66163d9eeb468ddb1f7e0697 (patch) | |
tree | 6dd263059ef5a8f5ea82da1ea0a6880f941f7915 /arch/x86/kvm | |
parent | abcf14b560a4ba62c659e6f5aafc8f9934d8c130 (diff) |
KVM: VMX: Enhance invalid guest state emulation
- Change returned handle_invalid_guest_state() to return relevant exit codes
- Move triggering the emulation from vmx_vcpu_run() to vmx_handle_exit()
- Return to userspace instead of repeatedly trying to emulate instructions that have already failed
Signed-off-by: Mohammed Gamal <m.gamal005@gmail.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/vmx.c | 44 |
1 files changed, 20 insertions, 24 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 4635298d000..73cb5dd960c 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -107,7 +107,6 @@ struct vcpu_vmx { | |||
107 | } rmode; | 107 | } rmode; |
108 | int vpid; | 108 | int vpid; |
109 | bool emulation_required; | 109 | bool emulation_required; |
110 | enum emulation_result invalid_state_emulation_result; | ||
111 | 110 | ||
112 | /* Support for vnmi-less CPUs */ | 111 | /* Support for vnmi-less CPUs */ |
113 | int soft_vnmi_blocked; | 112 | int soft_vnmi_blocked; |
@@ -3322,35 +3321,37 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu) | |||
3322 | return 1; | 3321 | return 1; |
3323 | } | 3322 | } |
3324 | 3323 | ||
3325 | static void handle_invalid_guest_state(struct kvm_vcpu *vcpu) | 3324 | static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) |
3326 | { | 3325 | { |
3327 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 3326 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
3328 | enum emulation_result err = EMULATE_DONE; | 3327 | enum emulation_result err = EMULATE_DONE; |
3329 | 3328 | int ret = 1; | |
3330 | local_irq_enable(); | ||
3331 | preempt_enable(); | ||
3332 | 3329 | ||
3333 | while (!guest_state_valid(vcpu)) { | 3330 | while (!guest_state_valid(vcpu)) { |
3334 | err = emulate_instruction(vcpu, 0, 0, 0); | 3331 | err = emulate_instruction(vcpu, 0, 0, 0); |
3335 | 3332 | ||
3336 | if (err == EMULATE_DO_MMIO) | 3333 | if (err == EMULATE_DO_MMIO) { |
3337 | break; | 3334 | ret = 0; |
3335 | goto out; | ||
3336 | } | ||
3338 | 3337 | ||
3339 | if (err != EMULATE_DONE) { | 3338 | if (err != EMULATE_DONE) { |
3340 | kvm_report_emulation_failure(vcpu, "emulation failure"); | 3339 | kvm_report_emulation_failure(vcpu, "emulation failure"); |
3341 | break; | 3340 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
3341 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | ||
3342 | ret = 0; | ||
3343 | goto out; | ||
3342 | } | 3344 | } |
3343 | 3345 | ||
3344 | if (signal_pending(current)) | 3346 | if (signal_pending(current)) |
3345 | break; | 3347 | goto out; |
3346 | if (need_resched()) | 3348 | if (need_resched()) |
3347 | schedule(); | 3349 | schedule(); |
3348 | } | 3350 | } |
3349 | 3351 | ||
3350 | preempt_disable(); | 3352 | vmx->emulation_required = 0; |
3351 | local_irq_disable(); | 3353 | out: |
3352 | 3354 | return ret; | |
3353 | vmx->invalid_state_emulation_result = err; | ||
3354 | } | 3355 | } |
3355 | 3356 | ||
3356 | /* | 3357 | /* |
@@ -3406,13 +3407,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) | |||
3406 | 3407 | ||
3407 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); | 3408 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); |
3408 | 3409 | ||
3409 | /* If we need to emulate an MMIO from handle_invalid_guest_state | 3410 | /* If guest state is invalid, start emulating */ |
3410 | * we just return 0 */ | 3411 | if (vmx->emulation_required && emulate_invalid_guest_state) |
3411 | if (vmx->emulation_required && emulate_invalid_guest_state) { | 3412 | return handle_invalid_guest_state(vcpu); |
3412 | if (guest_state_valid(vcpu)) | ||
3413 | vmx->emulation_required = 0; | ||
3414 | return vmx->invalid_state_emulation_result != EMULATE_DO_MMIO; | ||
3415 | } | ||
3416 | 3413 | ||
3417 | /* Access CR3 don't cause VMExit in paging mode, so we need | 3414 | /* Access CR3 don't cause VMExit in paging mode, so we need |
3418 | * to sync with guest real CR3. */ | 3415 | * to sync with guest real CR3. */ |
@@ -3607,11 +3604,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
3607 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) | 3604 | if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked)) |
3608 | vmx->entry_time = ktime_get(); | 3605 | vmx->entry_time = ktime_get(); |
3609 | 3606 | ||
3610 | /* Handle invalid guest state instead of entering VMX */ | 3607 | /* Don't enter VMX if guest state is invalid, let the exit handler |
3611 | if (vmx->emulation_required && emulate_invalid_guest_state) { | 3608 | start emulation until we arrive back to a valid state */ |
3612 | handle_invalid_guest_state(vcpu); | 3609 | if (vmx->emulation_required && emulate_invalid_guest_state) |
3613 | return; | 3610 | return; |
3614 | } | ||
3615 | 3611 | ||
3616 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) | 3612 | if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) |
3617 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); | 3613 | vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); |