aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLadi Prosek <lprosek@redhat.com>2017-10-11 10:54:45 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2017-10-12 08:01:56 -0400
commit05cade71cf3b925042569c3e8dc1fa68a2b26995 (patch)
treed306a16402886eb9b2d813087dc57baf40f9d14e
parentc26340651b75d649bea585eba45e32b871188e6e (diff)
KVM: nSVM: fix SMI injection in guest mode
Entering SMM while running in guest mode wasn't working very well because several pieces of the vcpu state were left set up for nested operation. Some of the issues observed: * L1 was getting unexpected VM exits (using L1 interception controls but running in SMM execution environment) * MMU was confused (walk_mmu was still set to nested_mmu) * INTERCEPT_SMI was not emulated for L1 (KVM never injected SVM_EXIT_SMI) Intel SDM actually prescribes the logical processor to "leave VMX operation" upon entering SMM in 34.14.1 Default Treatment of SMI Delivery. AMD doesn't seem to document this but they provide fields in the SMM state-save area to stash the current state of SVM. What we need to do is basically get out of guest mode for the duration of SMM. All this completely transparent to L1, i.e. L1 is not given control and no L1 observable state changes. To avoid code duplication this commit takes advantage of the existing nested vmexit and run functionality, perhaps at the cost of efficiency. To get out of guest mode, nested_svm_vmexit is called, unchanged. Re-entering is performed using enter_svm_guest_mode. This commit fixes running Windows Server 2016 with Hyper-V enabled in a VM with OVMF firmware (OVMF_CODE-need-smm.fd). Signed-off-by: Ladi Prosek <lprosek@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/svm.c58
-rw-r--r--arch/x86/kvm/x86.c3
3 files changed, 58 insertions, 6 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 411ddbbaeabf..8700b845f780 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1430,4 +1430,7 @@ static inline int kvm_cpu_get_apicid(int mps_cpu)
1430#endif 1430#endif
1431} 1431}
1432 1432
1433#define put_smstate(type, buf, offset, val) \
1434 *(type *)((buf) + (offset) - 0x7e00) = val
1435
1433#endif /* _ASM_X86_KVM_HOST_H */ 1436#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6edefabd5a82..ff94552f85d0 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -5409,19 +5409,71 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
5409 5409
5410static int svm_smi_allowed(struct kvm_vcpu *vcpu) 5410static int svm_smi_allowed(struct kvm_vcpu *vcpu)
5411{ 5411{
5412 struct vcpu_svm *svm = to_svm(vcpu);
5413
5414 /* Per APM Vol.2 15.22.2 "Response to SMI" */
5415 if (!gif_set(svm))
5416 return 0;
5417
5418 if (is_guest_mode(&svm->vcpu) &&
5419 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
5420 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
5421 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
5422 svm->nested.exit_required = true;
5423 return 0;
5424 }
5425
5412 return 1; 5426 return 1;
5413} 5427}
5414 5428
5415static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) 5429static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
5416{ 5430{
5417 /* TODO: Implement */ 5431 struct vcpu_svm *svm = to_svm(vcpu);
5432 int ret;
5433
5434 if (is_guest_mode(vcpu)) {
5435 /* FED8h - SVM Guest */
5436 put_smstate(u64, smstate, 0x7ed8, 1);
5437 /* FEE0h - SVM Guest VMCB Physical Address */
5438 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
5439
5440 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5441 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5442 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5443
5444 ret = nested_svm_vmexit(svm);
5445 if (ret)
5446 return ret;
5447 }
5418 return 0; 5448 return 0;
5419} 5449}
5420 5450
5421static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) 5451static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
5422{ 5452{
5423 /* TODO: Implement */ 5453 struct vcpu_svm *svm = to_svm(vcpu);
5424 return 0; 5454 struct vmcb *nested_vmcb;
5455 struct page *page;
5456 struct {
5457 u64 guest;
5458 u64 vmcb;
5459 } svm_state_save;
5460 int ret;
5461
5462 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
5463 sizeof(svm_state_save));
5464 if (ret)
5465 return ret;
5466
5467 if (svm_state_save.guest) {
5468 vcpu->arch.hflags &= ~HF_SMM_MASK;
5469 nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
5470 if (nested_vmcb)
5471 enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
5472 else
5473 ret = 1;
5474 vcpu->arch.hflags |= HF_SMM_MASK;
5475 }
5476 return ret;
5425} 5477}
5426 5478
5427static struct kvm_x86_ops svm_x86_ops __ro_after_init = { 5479static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 693bf8d01128..5669af09b732 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6485,9 +6485,6 @@ static void process_nmi(struct kvm_vcpu *vcpu)
6485 kvm_make_request(KVM_REQ_EVENT, vcpu); 6485 kvm_make_request(KVM_REQ_EVENT, vcpu);
6486} 6486}
6487 6487
6488#define put_smstate(type, buf, offset, val) \
6489 *(type *)((buf) + (offset) - 0x7e00) = val
6490
6491static u32 enter_smm_get_segment_flags(struct kvm_segment *seg) 6488static u32 enter_smm_get_segment_flags(struct kvm_segment *seg)
6492{ 6489{
6493 u32 flags = 0; 6490 u32 flags = 0;