aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/vmx.c31
1 files changed, 21 insertions, 10 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bfee7f4ac1dd..714a0673ec3c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -70,6 +70,9 @@ MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
70static bool __read_mostly enable_vpid = 1; 70static bool __read_mostly enable_vpid = 1;
71module_param_named(vpid, enable_vpid, bool, 0444); 71module_param_named(vpid, enable_vpid, bool, 0444);
72 72
73static bool __read_mostly enable_vnmi = 1;
74module_param_named(vnmi, enable_vnmi, bool, S_IRUGO);
75
73static bool __read_mostly flexpriority_enabled = 1; 76static bool __read_mostly flexpriority_enabled = 1;
74module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO); 77module_param_named(flexpriority, flexpriority_enabled, bool, S_IRUGO);
75 78
@@ -5236,6 +5239,10 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
5236 5239
5237 if (!kvm_vcpu_apicv_active(&vmx->vcpu)) 5240 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
5238 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 5241 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
5242
5243 if (!enable_vnmi)
5244 pin_based_exec_ctrl &= ~PIN_BASED_VIRTUAL_NMIS;
5245
5239 /* Enable the preemption timer dynamically */ 5246 /* Enable the preemption timer dynamically */
5240 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER; 5247 pin_based_exec_ctrl &= ~PIN_BASED_VMX_PREEMPTION_TIMER;
5241 return pin_based_exec_ctrl; 5248 return pin_based_exec_ctrl;
@@ -5670,7 +5677,7 @@ static void enable_irq_window(struct kvm_vcpu *vcpu)
5670 5677
5671static void enable_nmi_window(struct kvm_vcpu *vcpu) 5678static void enable_nmi_window(struct kvm_vcpu *vcpu)
5672{ 5679{
5673 if (!cpu_has_virtual_nmis() || 5680 if (!enable_vnmi ||
5674 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) { 5681 vmcs_read32(GUEST_INTERRUPTIBILITY_INFO) & GUEST_INTR_STATE_STI) {
5675 enable_irq_window(vcpu); 5682 enable_irq_window(vcpu);
5676 return; 5683 return;
@@ -5711,7 +5718,7 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu)
5711{ 5718{
5712 struct vcpu_vmx *vmx = to_vmx(vcpu); 5719 struct vcpu_vmx *vmx = to_vmx(vcpu);
5713 5720
5714 if (!cpu_has_virtual_nmis()) { 5721 if (!enable_vnmi) {
5715 /* 5722 /*
5716 * Tracking the NMI-blocked state in software is built upon 5723 * Tracking the NMI-blocked state in software is built upon
5717 * finding the next open IRQ window. This, in turn, depends on 5724 * finding the next open IRQ window. This, in turn, depends on
@@ -5742,7 +5749,7 @@ static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu)
5742 struct vcpu_vmx *vmx = to_vmx(vcpu); 5749 struct vcpu_vmx *vmx = to_vmx(vcpu);
5743 bool masked; 5750 bool masked;
5744 5751
5745 if (!cpu_has_virtual_nmis()) 5752 if (!enable_vnmi)
5746 return vmx->loaded_vmcs->soft_vnmi_blocked; 5753 return vmx->loaded_vmcs->soft_vnmi_blocked;
5747 if (vmx->loaded_vmcs->nmi_known_unmasked) 5754 if (vmx->loaded_vmcs->nmi_known_unmasked)
5748 return false; 5755 return false;
@@ -5755,7 +5762,7 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5755{ 5762{
5756 struct vcpu_vmx *vmx = to_vmx(vcpu); 5763 struct vcpu_vmx *vmx = to_vmx(vcpu);
5757 5764
5758 if (!cpu_has_virtual_nmis()) { 5765 if (!enable_vnmi) {
5759 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) { 5766 if (vmx->loaded_vmcs->soft_vnmi_blocked != masked) {
5760 vmx->loaded_vmcs->soft_vnmi_blocked = masked; 5767 vmx->loaded_vmcs->soft_vnmi_blocked = masked;
5761 vmx->loaded_vmcs->vnmi_blocked_time = 0; 5768 vmx->loaded_vmcs->vnmi_blocked_time = 0;
@@ -5776,7 +5783,7 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
5776 if (to_vmx(vcpu)->nested.nested_run_pending) 5783 if (to_vmx(vcpu)->nested.nested_run_pending)
5777 return 0; 5784 return 0;
5778 5785
5779 if (!cpu_has_virtual_nmis() && 5786 if (!enable_vnmi &&
5780 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked) 5787 to_vmx(vcpu)->loaded_vmcs->soft_vnmi_blocked)
5781 return 0; 5788 return 0;
5782 5789
@@ -6507,7 +6514,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu)
6507 * AAK134, BY25. 6514 * AAK134, BY25.
6508 */ 6515 */
6509 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 6516 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
6510 cpu_has_virtual_nmis() && 6517 enable_vnmi &&
6511 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 6518 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
6512 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI); 6519 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, GUEST_INTR_STATE_NMI);
6513 6520
@@ -6567,6 +6574,7 @@ static int handle_ept_misconfig(struct kvm_vcpu *vcpu)
6567 6574
6568static int handle_nmi_window(struct kvm_vcpu *vcpu) 6575static int handle_nmi_window(struct kvm_vcpu *vcpu)
6569{ 6576{
6577 WARN_ON_ONCE(!enable_vnmi);
6570 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL, 6578 vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
6571 CPU_BASED_VIRTUAL_NMI_PENDING); 6579 CPU_BASED_VIRTUAL_NMI_PENDING);
6572 ++vcpu->stat.nmi_window_exits; 6580 ++vcpu->stat.nmi_window_exits;
@@ -6790,6 +6798,9 @@ static __init int hardware_setup(void)
6790 if (!cpu_has_vmx_flexpriority()) 6798 if (!cpu_has_vmx_flexpriority())
6791 flexpriority_enabled = 0; 6799 flexpriority_enabled = 0;
6792 6800
6801 if (!cpu_has_virtual_nmis())
6802 enable_vnmi = 0;
6803
6793 /* 6804 /*
6794 * set_apic_access_page_addr() is used to reload apic access 6805 * set_apic_access_page_addr() is used to reload apic access
6795 * page upon invalidation. No need to do anything if not 6806 * page upon invalidation. No need to do anything if not
@@ -8011,7 +8022,7 @@ static int handle_pml_full(struct kvm_vcpu *vcpu)
8011 * "blocked by NMI" bit has to be set before next VM entry. 8022 * "blocked by NMI" bit has to be set before next VM entry.
8012 */ 8023 */
8013 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) && 8024 if (!(to_vmx(vcpu)->idt_vectoring_info & VECTORING_INFO_VALID_MASK) &&
8014 cpu_has_virtual_nmis() && 8025 enable_vnmi &&
8015 (exit_qualification & INTR_INFO_UNBLOCK_NMI)) 8026 (exit_qualification & INTR_INFO_UNBLOCK_NMI))
8016 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO, 8027 vmcs_set_bits(GUEST_INTERRUPTIBILITY_INFO,
8017 GUEST_INTR_STATE_NMI); 8028 GUEST_INTR_STATE_NMI);
@@ -8856,7 +8867,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
8856 return 0; 8867 return 0;
8857 } 8868 }
8858 8869
8859 if (unlikely(!cpu_has_virtual_nmis() && 8870 if (unlikely(!enable_vnmi &&
8860 vmx->loaded_vmcs->soft_vnmi_blocked)) { 8871 vmx->loaded_vmcs->soft_vnmi_blocked)) {
8861 if (vmx_interrupt_allowed(vcpu)) { 8872 if (vmx_interrupt_allowed(vcpu)) {
8862 vmx->loaded_vmcs->soft_vnmi_blocked = 0; 8873 vmx->loaded_vmcs->soft_vnmi_blocked = 0;
@@ -9157,7 +9168,7 @@ static void vmx_recover_nmi_blocking(struct vcpu_vmx *vmx)
9157 9168
9158 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK; 9169 idtv_info_valid = vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK;
9159 9170
9160 if (cpu_has_virtual_nmis()) { 9171 if (enable_vnmi) {
9161 if (vmx->loaded_vmcs->nmi_known_unmasked) 9172 if (vmx->loaded_vmcs->nmi_known_unmasked)
9162 return; 9173 return;
9163 /* 9174 /*
@@ -9306,7 +9317,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
9306 unsigned long debugctlmsr, cr3, cr4; 9317 unsigned long debugctlmsr, cr3, cr4;
9307 9318
9308 /* Record the guest's net vcpu time for enforced NMI injections. */ 9319 /* Record the guest's net vcpu time for enforced NMI injections. */
9309 if (unlikely(!cpu_has_virtual_nmis() && 9320 if (unlikely(!enable_vnmi &&
9310 vmx->loaded_vmcs->soft_vnmi_blocked)) 9321 vmx->loaded_vmcs->soft_vnmi_blocked))
9311 vmx->loaded_vmcs->entry_time = ktime_get(); 9322 vmx->loaded_vmcs->entry_time = ktime_get();
9312 9323