diff options
| -rw-r--r-- | arch/x86/kvm/emulate.c | 12 | ||||
| -rw-r--r-- | arch/x86/kvm/svm.c | 12 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx/vmx.c | 2 |
3 files changed, 10 insertions, 16 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a6b282853253..f526acee2eed 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
| @@ -2571,6 +2571,12 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) | |||
| 2571 | if (ret != X86EMUL_CONTINUE) | 2571 | if (ret != X86EMUL_CONTINUE) |
| 2572 | return X86EMUL_UNHANDLEABLE; | 2572 | return X86EMUL_UNHANDLEABLE; |
| 2573 | 2573 | ||
| 2574 | if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) | ||
| 2575 | ctxt->ops->set_nmi_mask(ctxt, false); | ||
| 2576 | |||
| 2577 | ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & | ||
| 2578 | ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); | ||
| 2579 | |||
| 2574 | /* | 2580 | /* |
| 2575 | * Get back to real mode, to prepare a safe state in which to load | 2581 | * Get back to real mode, to prepare a safe state in which to load |
| 2576 | * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU | 2582 | * CR0/CR3/CR4/EFER. It's all a bit more complicated if the vCPU |
| @@ -2624,12 +2630,6 @@ static int em_rsm(struct x86_emulate_ctxt *ctxt) | |||
| 2624 | return X86EMUL_UNHANDLEABLE; | 2630 | return X86EMUL_UNHANDLEABLE; |
| 2625 | } | 2631 | } |
| 2626 | 2632 | ||
| 2627 | if ((ctxt->ops->get_hflags(ctxt) & X86EMUL_SMM_INSIDE_NMI_MASK) == 0) | ||
| 2628 | ctxt->ops->set_nmi_mask(ctxt, false); | ||
| 2629 | |||
| 2630 | ctxt->ops->set_hflags(ctxt, ctxt->ops->get_hflags(ctxt) & | ||
| 2631 | ~(X86EMUL_SMM_INSIDE_NMI_MASK | X86EMUL_SMM_MASK)); | ||
| 2632 | |||
| 2633 | ctxt->ops->post_leave_smm(ctxt); | 2633 | ctxt->ops->post_leave_smm(ctxt); |
| 2634 | 2634 | ||
| 2635 | return X86EMUL_CONTINUE; | 2635 | return X86EMUL_CONTINUE; |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 6b1cd73e4053..406b558abfef 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -6239,21 +6239,17 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) | |||
| 6239 | struct page *page; | 6239 | struct page *page; |
| 6240 | u64 guest; | 6240 | u64 guest; |
| 6241 | u64 vmcb; | 6241 | u64 vmcb; |
| 6242 | int ret; | ||
| 6243 | 6242 | ||
| 6244 | guest = GET_SMSTATE(u64, smstate, 0x7ed8); | 6243 | guest = GET_SMSTATE(u64, smstate, 0x7ed8); |
| 6245 | vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); | 6244 | vmcb = GET_SMSTATE(u64, smstate, 0x7ee0); |
| 6246 | 6245 | ||
| 6247 | if (guest) { | 6246 | if (guest) { |
| 6248 | vcpu->arch.hflags &= ~HF_SMM_MASK; | ||
| 6249 | nested_vmcb = nested_svm_map(svm, vmcb, &page); | 6247 | nested_vmcb = nested_svm_map(svm, vmcb, &page); |
| 6250 | if (nested_vmcb) | 6248 | if (!nested_vmcb) |
| 6251 | enter_svm_guest_mode(svm, vmcb, nested_vmcb, page); | 6249 | return 1; |
| 6252 | else | 6250 | enter_svm_guest_mode(svm, vmcb, nested_vmcb, page); |
| 6253 | ret = 1; | ||
| 6254 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
| 6255 | } | 6251 | } |
| 6256 | return ret; | 6252 | return 0; |
| 6257 | } | 6253 | } |
| 6258 | 6254 | ||
| 6259 | static int enable_smi_window(struct kvm_vcpu *vcpu) | 6255 | static int enable_smi_window(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 14ea25eadde8..b4e7d645275a 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c | |||
| @@ -7409,9 +7409,7 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, const char *smstate) | |||
| 7409 | } | 7409 | } |
| 7410 | 7410 | ||
| 7411 | if (vmx->nested.smm.guest_mode) { | 7411 | if (vmx->nested.smm.guest_mode) { |
| 7412 | vcpu->arch.hflags &= ~HF_SMM_MASK; | ||
| 7413 | ret = nested_vmx_enter_non_root_mode(vcpu, false); | 7412 | ret = nested_vmx_enter_non_root_mode(vcpu, false); |
| 7414 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
| 7415 | if (ret) | 7413 | if (ret) |
| 7416 | return ret; | 7414 | return ret; |
| 7417 | 7415 | ||
