diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 208 |
1 files changed, 131 insertions, 77 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index a6f4f095f8f4..7c3522a989d0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -486,6 +486,14 @@ struct nested_vmx { | |||
486 | u64 nested_vmx_cr4_fixed1; | 486 | u64 nested_vmx_cr4_fixed1; |
487 | u64 nested_vmx_vmcs_enum; | 487 | u64 nested_vmx_vmcs_enum; |
488 | u64 nested_vmx_vmfunc_controls; | 488 | u64 nested_vmx_vmfunc_controls; |
489 | |||
490 | /* SMM related state */ | ||
491 | struct { | ||
492 | /* in VMX operation on SMM entry? */ | ||
493 | bool vmxon; | ||
494 | /* in guest mode on SMM entry? */ | ||
495 | bool guest_mode; | ||
496 | } smm; | ||
489 | }; | 497 | }; |
490 | 498 | ||
491 | #define POSTED_INTR_ON 0 | 499 | #define POSTED_INTR_ON 0 |
@@ -900,16 +908,13 @@ static bool nested_ept_ad_enabled(struct kvm_vcpu *vcpu); | |||
900 | static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); | 908 | static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); |
901 | static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); | 909 | static u64 construct_eptp(struct kvm_vcpu *vcpu, unsigned long root_hpa); |
902 | static bool vmx_xsaves_supported(void); | 910 | static bool vmx_xsaves_supported(void); |
903 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); | ||
904 | static void vmx_set_segment(struct kvm_vcpu *vcpu, | 911 | static void vmx_set_segment(struct kvm_vcpu *vcpu, |
905 | struct kvm_segment *var, int seg); | 912 | struct kvm_segment *var, int seg); |
906 | static void vmx_get_segment(struct kvm_vcpu *vcpu, | 913 | static void vmx_get_segment(struct kvm_vcpu *vcpu, |
907 | struct kvm_segment *var, int seg); | 914 | struct kvm_segment *var, int seg); |
908 | static bool guest_state_valid(struct kvm_vcpu *vcpu); | 915 | static bool guest_state_valid(struct kvm_vcpu *vcpu); |
909 | static u32 vmx_segment_access_rights(struct kvm_segment *var); | 916 | static u32 vmx_segment_access_rights(struct kvm_segment *var); |
910 | static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); | ||
911 | static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); | 917 | static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); |
912 | static int alloc_identity_pagetable(struct kvm *kvm); | ||
913 | static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); | 918 | static bool vmx_get_nmi_mask(struct kvm_vcpu *vcpu); |
914 | static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); | 919 | static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked); |
915 | static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, | 920 | static bool nested_vmx_is_page_fault_vmexit(struct vmcs12 *vmcs12, |
@@ -1598,18 +1603,15 @@ static inline void vpid_sync_context(int vpid) | |||
1598 | 1603 | ||
1599 | static inline void ept_sync_global(void) | 1604 | static inline void ept_sync_global(void) |
1600 | { | 1605 | { |
1601 | if (cpu_has_vmx_invept_global()) | 1606 | __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); |
1602 | __invept(VMX_EPT_EXTENT_GLOBAL, 0, 0); | ||
1603 | } | 1607 | } |
1604 | 1608 | ||
1605 | static inline void ept_sync_context(u64 eptp) | 1609 | static inline void ept_sync_context(u64 eptp) |
1606 | { | 1610 | { |
1607 | if (enable_ept) { | 1611 | if (cpu_has_vmx_invept_context()) |
1608 | if (cpu_has_vmx_invept_context()) | 1612 | __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); |
1609 | __invept(VMX_EPT_EXTENT_CONTEXT, eptp, 0); | 1613 | else |
1610 | else | 1614 | ept_sync_global(); |
1611 | ept_sync_global(); | ||
1612 | } | ||
1613 | } | 1615 | } |
1614 | 1616 | ||
1615 | static __always_inline void vmcs_check16(unsigned long field) | 1617 | static __always_inline void vmcs_check16(unsigned long field) |
@@ -2831,8 +2833,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2831 | SECONDARY_EXEC_ENABLE_PML; | 2833 | SECONDARY_EXEC_ENABLE_PML; |
2832 | vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT; | 2834 | vmx->nested.nested_vmx_ept_caps |= VMX_EPT_AD_BIT; |
2833 | } | 2835 | } |
2834 | } else | 2836 | } |
2835 | vmx->nested.nested_vmx_ept_caps = 0; | ||
2836 | 2837 | ||
2837 | if (cpu_has_vmx_vmfunc()) { | 2838 | if (cpu_has_vmx_vmfunc()) { |
2838 | vmx->nested.nested_vmx_secondary_ctls_high |= | 2839 | vmx->nested.nested_vmx_secondary_ctls_high |= |
@@ -2841,8 +2842,9 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2841 | * Advertise EPTP switching unconditionally | 2842 | * Advertise EPTP switching unconditionally |
2842 | * since we emulate it | 2843 | * since we emulate it |
2843 | */ | 2844 | */ |
2844 | vmx->nested.nested_vmx_vmfunc_controls = | 2845 | if (enable_ept) |
2845 | VMX_VMFUNC_EPTP_SWITCHING; | 2846 | vmx->nested.nested_vmx_vmfunc_controls = |
2847 | VMX_VMFUNC_EPTP_SWITCHING; | ||
2846 | } | 2848 | } |
2847 | 2849 | ||
2848 | /* | 2850 | /* |
@@ -2856,8 +2858,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx) | |||
2856 | SECONDARY_EXEC_ENABLE_VPID; | 2858 | SECONDARY_EXEC_ENABLE_VPID; |
2857 | vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | | 2859 | vmx->nested.nested_vmx_vpid_caps = VMX_VPID_INVVPID_BIT | |
2858 | VMX_VPID_EXTENT_SUPPORTED_MASK; | 2860 | VMX_VPID_EXTENT_SUPPORTED_MASK; |
2859 | } else | 2861 | } |
2860 | vmx->nested.nested_vmx_vpid_caps = 0; | ||
2861 | 2862 | ||
2862 | if (enable_unrestricted_guest) | 2863 | if (enable_unrestricted_guest) |
2863 | vmx->nested.nested_vmx_secondary_ctls_high |= | 2864 | vmx->nested.nested_vmx_secondary_ctls_high |= |
@@ -3544,7 +3545,8 @@ static int hardware_enable(void) | |||
3544 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); | 3545 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); |
3545 | } | 3546 | } |
3546 | kvm_cpu_vmxon(phys_addr); | 3547 | kvm_cpu_vmxon(phys_addr); |
3547 | ept_sync_global(); | 3548 | if (enable_ept) |
3549 | ept_sync_global(); | ||
3548 | 3550 | ||
3549 | return 0; | 3551 | return 0; |
3550 | } | 3552 | } |
@@ -3657,8 +3659,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
3657 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | | 3659 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY | |
3658 | SECONDARY_EXEC_SHADOW_VMCS | | 3660 | SECONDARY_EXEC_SHADOW_VMCS | |
3659 | SECONDARY_EXEC_XSAVES | | 3661 | SECONDARY_EXEC_XSAVES | |
3660 | SECONDARY_EXEC_RDSEED | | 3662 | SECONDARY_EXEC_RDSEED_EXITING | |
3661 | SECONDARY_EXEC_RDRAND | | 3663 | SECONDARY_EXEC_RDRAND_EXITING | |
3662 | SECONDARY_EXEC_ENABLE_PML | | 3664 | SECONDARY_EXEC_ENABLE_PML | |
3663 | SECONDARY_EXEC_TSC_SCALING | | 3665 | SECONDARY_EXEC_TSC_SCALING | |
3664 | SECONDARY_EXEC_ENABLE_VMFUNC; | 3666 | SECONDARY_EXEC_ENABLE_VMFUNC; |
@@ -3679,14 +3681,25 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
3679 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | | 3681 | SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE | |
3680 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); | 3682 | SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); |
3681 | 3683 | ||
3684 | rdmsr_safe(MSR_IA32_VMX_EPT_VPID_CAP, | ||
3685 | &vmx_capability.ept, &vmx_capability.vpid); | ||
3686 | |||
3682 | if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { | 3687 | if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { |
3683 | /* CR3 accesses and invlpg don't need to cause VM Exits when EPT | 3688 | /* CR3 accesses and invlpg don't need to cause VM Exits when EPT |
3684 | enabled */ | 3689 | enabled */ |
3685 | _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | | 3690 | _cpu_based_exec_control &= ~(CPU_BASED_CR3_LOAD_EXITING | |
3686 | CPU_BASED_CR3_STORE_EXITING | | 3691 | CPU_BASED_CR3_STORE_EXITING | |
3687 | CPU_BASED_INVLPG_EXITING); | 3692 | CPU_BASED_INVLPG_EXITING); |
3688 | rdmsr(MSR_IA32_VMX_EPT_VPID_CAP, | 3693 | } else if (vmx_capability.ept) { |
3689 | vmx_capability.ept, vmx_capability.vpid); | 3694 | vmx_capability.ept = 0; |
3695 | pr_warn_once("EPT CAP should not exist if not support " | ||
3696 | "1-setting enable EPT VM-execution control\n"); | ||
3697 | } | ||
3698 | if (!(_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_VPID) && | ||
3699 | vmx_capability.vpid) { | ||
3700 | vmx_capability.vpid = 0; | ||
3701 | pr_warn_once("VPID CAP should not exist if not support " | ||
3702 | "1-setting enable VPID VM-execution control\n"); | ||
3690 | } | 3703 | } |
3691 | 3704 | ||
3692 | min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; | 3705 | min = VM_EXIT_SAVE_DEBUG_CONTROLS | VM_EXIT_ACK_INTR_ON_EXIT; |
@@ -4781,18 +4794,18 @@ static int init_rmode_identity_map(struct kvm *kvm) | |||
4781 | kvm_pfn_t identity_map_pfn; | 4794 | kvm_pfn_t identity_map_pfn; |
4782 | u32 tmp; | 4795 | u32 tmp; |
4783 | 4796 | ||
4784 | if (!enable_ept) | ||
4785 | return 0; | ||
4786 | |||
4787 | /* Protect kvm->arch.ept_identity_pagetable_done. */ | 4797 | /* Protect kvm->arch.ept_identity_pagetable_done. */ |
4788 | mutex_lock(&kvm->slots_lock); | 4798 | mutex_lock(&kvm->slots_lock); |
4789 | 4799 | ||
4790 | if (likely(kvm->arch.ept_identity_pagetable_done)) | 4800 | if (likely(kvm->arch.ept_identity_pagetable_done)) |
4791 | goto out2; | 4801 | goto out2; |
4792 | 4802 | ||
4803 | if (!kvm->arch.ept_identity_map_addr) | ||
4804 | kvm->arch.ept_identity_map_addr = VMX_EPT_IDENTITY_PAGETABLE_ADDR; | ||
4793 | identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; | 4805 | identity_map_pfn = kvm->arch.ept_identity_map_addr >> PAGE_SHIFT; |
4794 | 4806 | ||
4795 | r = alloc_identity_pagetable(kvm); | 4807 | r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, |
4808 | kvm->arch.ept_identity_map_addr, PAGE_SIZE); | ||
4796 | if (r < 0) | 4809 | if (r < 0) |
4797 | goto out2; | 4810 | goto out2; |
4798 | 4811 | ||
@@ -4864,20 +4877,6 @@ out: | |||
4864 | return r; | 4877 | return r; |
4865 | } | 4878 | } |
4866 | 4879 | ||
4867 | static int alloc_identity_pagetable(struct kvm *kvm) | ||
4868 | { | ||
4869 | /* Called with kvm->slots_lock held. */ | ||
4870 | |||
4871 | int r = 0; | ||
4872 | |||
4873 | BUG_ON(kvm->arch.ept_identity_pagetable_done); | ||
4874 | |||
4875 | r = __x86_set_memory_region(kvm, IDENTITY_PAGETABLE_PRIVATE_MEMSLOT, | ||
4876 | kvm->arch.ept_identity_map_addr, PAGE_SIZE); | ||
4877 | |||
4878 | return r; | ||
4879 | } | ||
4880 | |||
4881 | static int allocate_vpid(void) | 4880 | static int allocate_vpid(void) |
4882 | { | 4881 | { |
4883 | int vpid; | 4882 | int vpid; |
@@ -5282,13 +5281,13 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx) | |||
5282 | static bool vmx_rdrand_supported(void) | 5281 | static bool vmx_rdrand_supported(void) |
5283 | { | 5282 | { |
5284 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 5283 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
5285 | SECONDARY_EXEC_RDRAND; | 5284 | SECONDARY_EXEC_RDRAND_EXITING; |
5286 | } | 5285 | } |
5287 | 5286 | ||
5288 | static bool vmx_rdseed_supported(void) | 5287 | static bool vmx_rdseed_supported(void) |
5289 | { | 5288 | { |
5290 | return vmcs_config.cpu_based_2nd_exec_ctrl & | 5289 | return vmcs_config.cpu_based_2nd_exec_ctrl & |
5291 | SECONDARY_EXEC_RDSEED; | 5290 | SECONDARY_EXEC_RDSEED_EXITING; |
5292 | } | 5291 | } |
5293 | 5292 | ||
5294 | static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) | 5293 | static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) |
@@ -5382,30 +5381,30 @@ static void vmx_compute_secondary_exec_control(struct vcpu_vmx *vmx) | |||
5382 | if (vmx_rdrand_supported()) { | 5381 | if (vmx_rdrand_supported()) { |
5383 | bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); | 5382 | bool rdrand_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDRAND); |
5384 | if (rdrand_enabled) | 5383 | if (rdrand_enabled) |
5385 | exec_control &= ~SECONDARY_EXEC_RDRAND; | 5384 | exec_control &= ~SECONDARY_EXEC_RDRAND_EXITING; |
5386 | 5385 | ||
5387 | if (nested) { | 5386 | if (nested) { |
5388 | if (rdrand_enabled) | 5387 | if (rdrand_enabled) |
5389 | vmx->nested.nested_vmx_secondary_ctls_high |= | 5388 | vmx->nested.nested_vmx_secondary_ctls_high |= |
5390 | SECONDARY_EXEC_RDRAND; | 5389 | SECONDARY_EXEC_RDRAND_EXITING; |
5391 | else | 5390 | else |
5392 | vmx->nested.nested_vmx_secondary_ctls_high &= | 5391 | vmx->nested.nested_vmx_secondary_ctls_high &= |
5393 | ~SECONDARY_EXEC_RDRAND; | 5392 | ~SECONDARY_EXEC_RDRAND_EXITING; |
5394 | } | 5393 | } |
5395 | } | 5394 | } |
5396 | 5395 | ||
5397 | if (vmx_rdseed_supported()) { | 5396 | if (vmx_rdseed_supported()) { |
5398 | bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); | 5397 | bool rdseed_enabled = guest_cpuid_has(vcpu, X86_FEATURE_RDSEED); |
5399 | if (rdseed_enabled) | 5398 | if (rdseed_enabled) |
5400 | exec_control &= ~SECONDARY_EXEC_RDSEED; | 5399 | exec_control &= ~SECONDARY_EXEC_RDSEED_EXITING; |
5401 | 5400 | ||
5402 | if (nested) { | 5401 | if (nested) { |
5403 | if (rdseed_enabled) | 5402 | if (rdseed_enabled) |
5404 | vmx->nested.nested_vmx_secondary_ctls_high |= | 5403 | vmx->nested.nested_vmx_secondary_ctls_high |= |
5405 | SECONDARY_EXEC_RDSEED; | 5404 | SECONDARY_EXEC_RDSEED_EXITING; |
5406 | else | 5405 | else |
5407 | vmx->nested.nested_vmx_secondary_ctls_high &= | 5406 | vmx->nested.nested_vmx_secondary_ctls_high &= |
5408 | ~SECONDARY_EXEC_RDSEED; | 5407 | ~SECONDARY_EXEC_RDSEED_EXITING; |
5409 | } | 5408 | } |
5410 | } | 5409 | } |
5411 | 5410 | ||
@@ -5426,7 +5425,7 @@ static void ept_set_mmio_spte_mask(void) | |||
5426 | /* | 5425 | /* |
5427 | * Sets up the vmcs for emulated real mode. | 5426 | * Sets up the vmcs for emulated real mode. |
5428 | */ | 5427 | */ |
5429 | static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | 5428 | static void vmx_vcpu_setup(struct vcpu_vmx *vmx) |
5430 | { | 5429 | { |
5431 | #ifdef CONFIG_X86_64 | 5430 | #ifdef CONFIG_X86_64 |
5432 | unsigned long a; | 5431 | unsigned long a; |
@@ -5539,8 +5538,6 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
5539 | vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); | 5538 | vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); |
5540 | vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); | 5539 | vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); |
5541 | } | 5540 | } |
5542 | |||
5543 | return 0; | ||
5544 | } | 5541 | } |
5545 | 5542 | ||
5546 | static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | 5543 | static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) |
@@ -5604,6 +5601,8 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) | |||
5604 | vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); | 5601 | vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); |
5605 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); | 5602 | vmcs_write32(GUEST_INTERRUPTIBILITY_INFO, 0); |
5606 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); | 5603 | vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS, 0); |
5604 | if (kvm_mpx_supported()) | ||
5605 | vmcs_write64(GUEST_BNDCFGS, 0); | ||
5607 | 5606 | ||
5608 | setup_msrs(vmx); | 5607 | setup_msrs(vmx); |
5609 | 5608 | ||
@@ -5912,8 +5911,7 @@ static int handle_exception(struct kvm_vcpu *vcpu) | |||
5912 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 5911 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
5913 | /* EPT won't cause page fault directly */ | 5912 | /* EPT won't cause page fault directly */ |
5914 | WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); | 5913 | WARN_ON_ONCE(!vcpu->arch.apf.host_apf_reason && enable_ept); |
5915 | return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0, | 5914 | return kvm_handle_page_fault(vcpu, error_code, cr2, NULL, 0); |
5916 | true); | ||
5917 | } | 5915 | } |
5918 | 5916 | ||
5919 | ex_no = intr_info & INTR_INFO_VECTOR_MASK; | 5917 | ex_no = intr_info & INTR_INFO_VECTOR_MASK; |
@@ -6747,16 +6745,14 @@ static __init int hardware_setup(void) | |||
6747 | 6745 | ||
6748 | if (!cpu_has_vmx_ept() || | 6746 | if (!cpu_has_vmx_ept() || |
6749 | !cpu_has_vmx_ept_4levels() || | 6747 | !cpu_has_vmx_ept_4levels() || |
6750 | !cpu_has_vmx_ept_mt_wb()) { | 6748 | !cpu_has_vmx_ept_mt_wb() || |
6749 | !cpu_has_vmx_invept_global()) | ||
6751 | enable_ept = 0; | 6750 | enable_ept = 0; |
6752 | enable_unrestricted_guest = 0; | ||
6753 | enable_ept_ad_bits = 0; | ||
6754 | } | ||
6755 | 6751 | ||
6756 | if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) | 6752 | if (!cpu_has_vmx_ept_ad_bits() || !enable_ept) |
6757 | enable_ept_ad_bits = 0; | 6753 | enable_ept_ad_bits = 0; |
6758 | 6754 | ||
6759 | if (!cpu_has_vmx_unrestricted_guest()) | 6755 | if (!cpu_has_vmx_unrestricted_guest() || !enable_ept) |
6760 | enable_unrestricted_guest = 0; | 6756 | enable_unrestricted_guest = 0; |
6761 | 6757 | ||
6762 | if (!cpu_has_vmx_flexpriority()) | 6758 | if (!cpu_has_vmx_flexpriority()) |
@@ -6776,8 +6772,13 @@ static __init int hardware_setup(void) | |||
6776 | if (enable_ept && !cpu_has_vmx_ept_2m_page()) | 6772 | if (enable_ept && !cpu_has_vmx_ept_2m_page()) |
6777 | kvm_disable_largepages(); | 6773 | kvm_disable_largepages(); |
6778 | 6774 | ||
6779 | if (!cpu_has_vmx_ple()) | 6775 | if (!cpu_has_vmx_ple()) { |
6780 | ple_gap = 0; | 6776 | ple_gap = 0; |
6777 | ple_window = 0; | ||
6778 | ple_window_grow = 0; | ||
6779 | ple_window_max = 0; | ||
6780 | ple_window_shrink = 0; | ||
6781 | } | ||
6781 | 6782 | ||
6782 | if (!cpu_has_vmx_apicv()) { | 6783 | if (!cpu_has_vmx_apicv()) { |
6783 | enable_apicv = 0; | 6784 | enable_apicv = 0; |
@@ -8415,9 +8416,9 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason) | |||
8415 | case EXIT_REASON_RDPMC: | 8416 | case EXIT_REASON_RDPMC: |
8416 | return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); | 8417 | return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING); |
8417 | case EXIT_REASON_RDRAND: | 8418 | case EXIT_REASON_RDRAND: |
8418 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND); | 8419 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDRAND_EXITING); |
8419 | case EXIT_REASON_RDSEED: | 8420 | case EXIT_REASON_RDSEED: |
8420 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED); | 8421 | return nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDSEED_EXITING); |
8421 | case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: | 8422 | case EXIT_REASON_RDTSC: case EXIT_REASON_RDTSCP: |
8422 | return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); | 8423 | return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING); |
8423 | case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: | 8424 | case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR: |
@@ -9475,7 +9476,6 @@ static void vmx_switch_vmcs(struct kvm_vcpu *vcpu, struct loaded_vmcs *vmcs) | |||
9475 | vmx->loaded_vmcs = vmcs; | 9476 | vmx->loaded_vmcs = vmcs; |
9476 | vmx_vcpu_put(vcpu); | 9477 | vmx_vcpu_put(vcpu); |
9477 | vmx_vcpu_load(vcpu, cpu); | 9478 | vmx_vcpu_load(vcpu, cpu); |
9478 | vcpu->cpu = cpu; | ||
9479 | put_cpu(); | 9479 | put_cpu(); |
9480 | } | 9480 | } |
9481 | 9481 | ||
@@ -9556,11 +9556,9 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
9556 | cpu = get_cpu(); | 9556 | cpu = get_cpu(); |
9557 | vmx_vcpu_load(&vmx->vcpu, cpu); | 9557 | vmx_vcpu_load(&vmx->vcpu, cpu); |
9558 | vmx->vcpu.cpu = cpu; | 9558 | vmx->vcpu.cpu = cpu; |
9559 | err = vmx_vcpu_setup(vmx); | 9559 | vmx_vcpu_setup(vmx); |
9560 | vmx_vcpu_put(&vmx->vcpu); | 9560 | vmx_vcpu_put(&vmx->vcpu); |
9561 | put_cpu(); | 9561 | put_cpu(); |
9562 | if (err) | ||
9563 | goto free_vmcs; | ||
9564 | if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { | 9562 | if (cpu_need_virtualize_apic_accesses(&vmx->vcpu)) { |
9565 | err = alloc_apic_access_page(kvm); | 9563 | err = alloc_apic_access_page(kvm); |
9566 | if (err) | 9564 | if (err) |
@@ -9568,9 +9566,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
9568 | } | 9566 | } |
9569 | 9567 | ||
9570 | if (enable_ept) { | 9568 | if (enable_ept) { |
9571 | if (!kvm->arch.ept_identity_map_addr) | ||
9572 | kvm->arch.ept_identity_map_addr = | ||
9573 | VMX_EPT_IDENTITY_PAGETABLE_ADDR; | ||
9574 | err = init_rmode_identity_map(kvm); | 9569 | err = init_rmode_identity_map(kvm); |
9575 | if (err) | 9570 | if (err) |
9576 | goto free_vmcs; | 9571 | goto free_vmcs; |
@@ -11325,6 +11320,8 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu, | |||
11325 | vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); | 11320 | vmcs_writel(GUEST_SYSENTER_EIP, vmcs12->host_ia32_sysenter_eip); |
11326 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); | 11321 | vmcs_writel(GUEST_IDTR_BASE, vmcs12->host_idtr_base); |
11327 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); | 11322 | vmcs_writel(GUEST_GDTR_BASE, vmcs12->host_gdtr_base); |
11323 | vmcs_write32(GUEST_IDTR_LIMIT, 0xFFFF); | ||
11324 | vmcs_write32(GUEST_GDTR_LIMIT, 0xFFFF); | ||
11328 | 11325 | ||
11329 | /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ | 11326 | /* If not VM_EXIT_CLEAR_BNDCFGS, the L2 value propagates to L1. */ |
11330 | if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) | 11327 | if (vmcs12->vm_exit_controls & VM_EXIT_CLEAR_BNDCFGS) |
@@ -11421,8 +11418,11 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
11421 | leave_guest_mode(vcpu); | 11418 | leave_guest_mode(vcpu); |
11422 | 11419 | ||
11423 | if (likely(!vmx->fail)) { | 11420 | if (likely(!vmx->fail)) { |
11424 | prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, | 11421 | if (exit_reason == -1) |
11425 | exit_qualification); | 11422 | sync_vmcs12(vcpu, vmcs12); |
11423 | else | ||
11424 | prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info, | ||
11425 | exit_qualification); | ||
11426 | 11426 | ||
11427 | if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, | 11427 | if (nested_vmx_store_msr(vcpu, vmcs12->vm_exit_msr_store_addr, |
11428 | vmcs12->vm_exit_msr_store_count)) | 11428 | vmcs12->vm_exit_msr_store_count)) |
@@ -11486,7 +11486,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
11486 | */ | 11486 | */ |
11487 | kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); | 11487 | kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); |
11488 | 11488 | ||
11489 | if (enable_shadow_vmcs) | 11489 | if (enable_shadow_vmcs && exit_reason != -1) |
11490 | vmx->nested.sync_shadow_vmcs = true; | 11490 | vmx->nested.sync_shadow_vmcs = true; |
11491 | 11491 | ||
11492 | /* in case we halted in L2 */ | 11492 | /* in case we halted in L2 */ |
@@ -11510,12 +11510,13 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason, | |||
11510 | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; | 11510 | INTR_INFO_VALID_MASK | INTR_TYPE_EXT_INTR; |
11511 | } | 11511 | } |
11512 | 11512 | ||
11513 | trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, | 11513 | if (exit_reason != -1) |
11514 | vmcs12->exit_qualification, | 11514 | trace_kvm_nested_vmexit_inject(vmcs12->vm_exit_reason, |
11515 | vmcs12->idt_vectoring_info_field, | 11515 | vmcs12->exit_qualification, |
11516 | vmcs12->vm_exit_intr_info, | 11516 | vmcs12->idt_vectoring_info_field, |
11517 | vmcs12->vm_exit_intr_error_code, | 11517 | vmcs12->vm_exit_intr_info, |
11518 | KVM_ISA_VMX); | 11518 | vmcs12->vm_exit_intr_error_code, |
11519 | KVM_ISA_VMX); | ||
11519 | 11520 | ||
11520 | load_vmcs12_host_state(vcpu, vmcs12); | 11521 | load_vmcs12_host_state(vcpu, vmcs12); |
11521 | 11522 | ||
@@ -11938,6 +11939,54 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu) | |||
11938 | ~FEATURE_CONTROL_LMCE; | 11939 | ~FEATURE_CONTROL_LMCE; |
11939 | } | 11940 | } |
11940 | 11941 | ||
11942 | static int vmx_smi_allowed(struct kvm_vcpu *vcpu) | ||
11943 | { | ||
11944 | /* we need a nested vmexit to enter SMM, postpone if run is pending */ | ||
11945 | if (to_vmx(vcpu)->nested.nested_run_pending) | ||
11946 | return 0; | ||
11947 | return 1; | ||
11948 | } | ||
11949 | |||
11950 | static int vmx_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) | ||
11951 | { | ||
11952 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
11953 | |||
11954 | vmx->nested.smm.guest_mode = is_guest_mode(vcpu); | ||
11955 | if (vmx->nested.smm.guest_mode) | ||
11956 | nested_vmx_vmexit(vcpu, -1, 0, 0); | ||
11957 | |||
11958 | vmx->nested.smm.vmxon = vmx->nested.vmxon; | ||
11959 | vmx->nested.vmxon = false; | ||
11960 | return 0; | ||
11961 | } | ||
11962 | |||
11963 | static int vmx_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) | ||
11964 | { | ||
11965 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
11966 | int ret; | ||
11967 | |||
11968 | if (vmx->nested.smm.vmxon) { | ||
11969 | vmx->nested.vmxon = true; | ||
11970 | vmx->nested.smm.vmxon = false; | ||
11971 | } | ||
11972 | |||
11973 | if (vmx->nested.smm.guest_mode) { | ||
11974 | vcpu->arch.hflags &= ~HF_SMM_MASK; | ||
11975 | ret = enter_vmx_non_root_mode(vcpu, false); | ||
11976 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
11977 | if (ret) | ||
11978 | return ret; | ||
11979 | |||
11980 | vmx->nested.smm.guest_mode = false; | ||
11981 | } | ||
11982 | return 0; | ||
11983 | } | ||
11984 | |||
11985 | static int enable_smi_window(struct kvm_vcpu *vcpu) | ||
11986 | { | ||
11987 | return 0; | ||
11988 | } | ||
11989 | |||
11941 | static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | 11990 | static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { |
11942 | .cpu_has_kvm_support = cpu_has_kvm_support, | 11991 | .cpu_has_kvm_support = cpu_has_kvm_support, |
11943 | .disabled_by_bios = vmx_disabled_by_bios, | 11992 | .disabled_by_bios = vmx_disabled_by_bios, |
@@ -12063,6 +12112,11 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
12063 | #endif | 12112 | #endif |
12064 | 12113 | ||
12065 | .setup_mce = vmx_setup_mce, | 12114 | .setup_mce = vmx_setup_mce, |
12115 | |||
12116 | .smi_allowed = vmx_smi_allowed, | ||
12117 | .pre_enter_smm = vmx_pre_enter_smm, | ||
12118 | .pre_leave_smm = vmx_pre_leave_smm, | ||
12119 | .enable_smi_window = enable_smi_window, | ||
12066 | }; | 12120 | }; |
12067 | 12121 | ||
12068 | static int __init vmx_init(void) | 12122 | static int __init vmx_init(void) |