aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLadi Prosek <lprosek@redhat.com>2017-10-17 10:02:39 -0400
committerRadim Krčmář <rkrcmar@redhat.com>2017-10-18 15:21:22 -0400
commitcc3d967f7e32ceeb9b78dc962126ebcf1a2b24b2 (patch)
treeb5a6a7dc1f6c9c181be3bab24d05b5066b423ce0
parent9b8ebbdb74b5ad76b9dfd8b101af17839174b126 (diff)
KVM: SVM: detect opening of SMI window using STGI intercept
Commit 05cade71cf3b ("KVM: nSVM: fix SMI injection in guest mode") made KVM mask SMI if GIF=0 but it didn't do anything to unmask it when GIF is enabled. The issue manifests for me as a significantly longer boot time of Windows guests when running with SMM-enabled OVMF. This commit fixes it by intercepting STGI instead of requesting immediate exit if the reason why SMM was masked is GIF. Fixes: 05cade71cf3b ("KVM: nSVM: fix SMI injection in guest mode") Signed-off-by: Ladi Prosek <lprosek@redhat.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/svm.c16
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c22
4 files changed, 36 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 8700b845f780..7233445a20bd 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -1065,6 +1065,7 @@ struct kvm_x86_ops {
1065 int (*smi_allowed)(struct kvm_vcpu *vcpu); 1065 int (*smi_allowed)(struct kvm_vcpu *vcpu);
1066 int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate); 1066 int (*pre_enter_smm)(struct kvm_vcpu *vcpu, char *smstate);
1067 int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase); 1067 int (*pre_leave_smm)(struct kvm_vcpu *vcpu, u64 smbase);
1068 int (*enable_smi_window)(struct kvm_vcpu *vcpu);
1068}; 1069};
1069 1070
1070struct kvm_arch_async_pf { 1071struct kvm_arch_async_pf {
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ff94552f85d0..b71daed3cca2 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3187,7 +3187,7 @@ static int stgi_interception(struct vcpu_svm *svm)
3187 3187
3188 /* 3188 /*
3189 * If VGIF is enabled, the STGI intercept is only added to 3189 * If VGIF is enabled, the STGI intercept is only added to
3190 * detect the opening of the NMI window; remove it now. 3190 * detect the opening of the SMI/NMI window; remove it now.
3191 */ 3191 */
3192 if (vgif_enabled(svm)) 3192 if (vgif_enabled(svm))
3193 clr_intercept(svm, INTERCEPT_STGI); 3193 clr_intercept(svm, INTERCEPT_STGI);
@@ -5476,6 +5476,19 @@ static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
5476 return ret; 5476 return ret;
5477} 5477}
5478 5478
5479static int enable_smi_window(struct kvm_vcpu *vcpu)
5480{
5481 struct vcpu_svm *svm = to_svm(vcpu);
5482
5483 if (!gif_set(svm)) {
5484 if (vgif_enabled(svm))
5485 set_intercept(svm, INTERCEPT_STGI);
5486 /* STGI will cause a vm exit */
5487 return 1;
5488 }
5489 return 0;
5490}
5491
5479static struct kvm_x86_ops svm_x86_ops __ro_after_init = { 5492static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
5480 .cpu_has_kvm_support = has_svm, 5493 .cpu_has_kvm_support = has_svm,
5481 .disabled_by_bios = is_disabled, 5494 .disabled_by_bios = is_disabled,
@@ -5590,6 +5603,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
5590 .smi_allowed = svm_smi_allowed, 5603 .smi_allowed = svm_smi_allowed,
5591 .pre_enter_smm = svm_pre_enter_smm, 5604 .pre_enter_smm = svm_pre_enter_smm,
5592 .pre_leave_smm = svm_pre_leave_smm, 5605 .pre_leave_smm = svm_pre_leave_smm,
5606 .enable_smi_window = enable_smi_window,
5593}; 5607};
5594 5608
5595static int __init svm_init(void) 5609static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c460b0b439d3..69d45734091f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -11973,6 +11973,11 @@ static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
11973 return 0; 11973 return 0;
11974} 11974}
11975 11975
11976static int enable_smi_window(struct kvm_vcpu *vcpu)
11977{
11978 return 0;
11979}
11980
11976static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { 11981static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
11977 .cpu_has_kvm_support = cpu_has_kvm_support, 11982 .cpu_has_kvm_support = cpu_has_kvm_support,
11978 .disabled_by_bios = vmx_disabled_by_bios, 11983 .disabled_by_bios = vmx_disabled_by_bios,
@@ -12102,6 +12107,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
12102 .smi_allowed = vmx_smi_allowed, 12107 .smi_allowed = vmx_smi_allowed,
12103 .pre_enter_smm = vmx_pre_enter_smm, 12108 .pre_enter_smm = vmx_pre_enter_smm,
12104 .pre_leave_smm = vmx_pre_leave_smm, 12109 .pre_leave_smm = vmx_pre_leave_smm,
12110 .enable_smi_window = enable_smi_window,
12105}; 12111};
12106 12112
12107static int __init vmx_init(void) 12113static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5669af09b732..3b51c8659741 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6892,17 +6892,23 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6892 if (inject_pending_event(vcpu, req_int_win) != 0) 6892 if (inject_pending_event(vcpu, req_int_win) != 0)
6893 req_immediate_exit = true; 6893 req_immediate_exit = true;
6894 else { 6894 else {
6895 /* Enable NMI/IRQ window open exits if needed. 6895 /* Enable SMI/NMI/IRQ window open exits if needed.
6896 * 6896 *
6897 * SMIs have two cases: 1) they can be nested, and 6897 * SMIs have three cases:
6898 * then there is nothing to do here because RSM will 6898 * 1) They can be nested, and then there is nothing to
6899 * cause a vmexit anyway; 2) or the SMI can be pending 6899 * do here because RSM will cause a vmexit anyway.
6900 * because inject_pending_event has completed the 6900 * 2) There is an ISA-specific reason why SMI cannot be
6901 * injection of an IRQ or NMI from the previous vmexit, 6901 * injected, and the moment when this changes can be
6902 * and then we request an immediate exit to inject the SMI. 6902 * intercepted.
6903 * 3) Or the SMI can be pending because
6904 * inject_pending_event has completed the injection
6905 * of an IRQ or NMI from the previous vmexit, and
6906 * then we request an immediate exit to inject the
6907 * SMI.
6903 */ 6908 */
6904 if (vcpu->arch.smi_pending && !is_smm(vcpu)) 6909 if (vcpu->arch.smi_pending && !is_smm(vcpu))
6905 req_immediate_exit = true; 6910 if (!kvm_x86_ops->enable_smi_window(vcpu))
6911 req_immediate_exit = true;
6906 if (vcpu->arch.nmi_pending) 6912 if (vcpu->arch.nmi_pending)
6907 kvm_x86_ops->enable_nmi_window(vcpu); 6913 kvm_x86_ops->enable_nmi_window(vcpu);
6908 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win) 6914 if (kvm_cpu_has_injectable_intr(vcpu) || req_int_win)