diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2014-03-07 14:03:15 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-03-11 03:41:47 -0400 |
commit | c9a7953f09bbe2b66050ebf97e0532eaeefbc9f3 (patch) | |
tree | 8a4387796e3f9946cea1725566e584dff525c3f8 /arch/x86/kvm/svm.c | |
parent | 220c56729766444f3dd823f740a147ca6d82c4c6 (diff) |
KVM: x86: Remove return code from enable_irq/nmi_window
It's no longer possible to enter enable_irq_window in guest mode when
L1 intercepts external interrupts and we are entering L2. This is now
caught in vcpu_enter_guest. So we can remove the check from the VMX
version of enable_irq_window, thus the need to return an error code from
both enable_irq_window and enable_nmi_window.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 8 |
1 files changed, 3 insertions, 5 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 64d9bb9590e3..1e8616e304a7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3650,7 +3650,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
3650 | return ret; | 3650 | return ret; |
3651 | } | 3651 | } |
3652 | 3652 | ||
3653 | static int enable_irq_window(struct kvm_vcpu *vcpu) | 3653 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
3654 | { | 3654 | { |
3655 | struct vcpu_svm *svm = to_svm(vcpu); | 3655 | struct vcpu_svm *svm = to_svm(vcpu); |
3656 | 3656 | ||
@@ -3664,16 +3664,15 @@ static int enable_irq_window(struct kvm_vcpu *vcpu) | |||
3664 | svm_set_vintr(svm); | 3664 | svm_set_vintr(svm); |
3665 | svm_inject_irq(svm, 0x0); | 3665 | svm_inject_irq(svm, 0x0); |
3666 | } | 3666 | } |
3667 | return 0; | ||
3668 | } | 3667 | } |
3669 | 3668 | ||
3670 | static int enable_nmi_window(struct kvm_vcpu *vcpu) | 3669 | static void enable_nmi_window(struct kvm_vcpu *vcpu) |
3671 | { | 3670 | { |
3672 | struct vcpu_svm *svm = to_svm(vcpu); | 3671 | struct vcpu_svm *svm = to_svm(vcpu); |
3673 | 3672 | ||
3674 | if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) | 3673 | if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK)) |
3675 | == HF_NMI_MASK) | 3674 | == HF_NMI_MASK) |
3676 | return 0; /* IRET will cause a vm exit */ | 3675 | return; /* IRET will cause a vm exit */ |
3677 | 3676 | ||
3678 | /* | 3677 | /* |
3679 | * Something prevents NMI from been injected. Single step over possible | 3678 | * Something prevents NMI from been injected. Single step over possible |
@@ -3682,7 +3681,6 @@ static int enable_nmi_window(struct kvm_vcpu *vcpu) | |||
3682 | svm->nmi_singlestep = true; | 3681 | svm->nmi_singlestep = true; |
3683 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); | 3682 | svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); |
3684 | update_db_bp_intercept(vcpu); | 3683 | update_db_bp_intercept(vcpu); |
3685 | return 0; | ||
3686 | } | 3684 | } |
3687 | 3685 | ||
3688 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) | 3686 | static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) |