diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 53 |
1 files changed, 22 insertions, 31 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 246490f643b6..280751c84724 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -70,9 +70,6 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO); | |||
70 | static bool __read_mostly vmm_exclusive = 1; | 70 | static bool __read_mostly vmm_exclusive = 1; |
71 | module_param(vmm_exclusive, bool, S_IRUGO); | 71 | module_param(vmm_exclusive, bool, S_IRUGO); |
72 | 72 | ||
73 | static bool __read_mostly yield_on_hlt = 1; | ||
74 | module_param(yield_on_hlt, bool, S_IRUGO); | ||
75 | |||
76 | static bool __read_mostly fasteoi = 1; | 73 | static bool __read_mostly fasteoi = 1; |
77 | module_param(fasteoi, bool, S_IRUGO); | 74 | module_param(fasteoi, bool, S_IRUGO); |
78 | 75 | ||
@@ -1655,17 +1652,6 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu) | |||
1655 | vmx_set_interrupt_shadow(vcpu, 0); | 1652 | vmx_set_interrupt_shadow(vcpu, 0); |
1656 | } | 1653 | } |
1657 | 1654 | ||
1658 | static void vmx_clear_hlt(struct kvm_vcpu *vcpu) | ||
1659 | { | ||
1660 | /* Ensure that we clear the HLT state in the VMCS. We don't need to | ||
1661 | * explicitly skip the instruction because if the HLT state is set, then | ||
1662 | * the instruction is already executing and RIP has already been | ||
1663 | * advanced. */ | ||
1664 | if (!yield_on_hlt && | ||
1665 | vmcs_read32(GUEST_ACTIVITY_STATE) == GUEST_ACTIVITY_HLT) | ||
1666 | vmcs_write32(GUEST_ACTIVITY_STATE, GUEST_ACTIVITY_ACTIVE); | ||
1667 | } | ||
1668 | |||
1669 | /* | 1655 | /* |
1670 | * KVM wants to inject page-faults which it got to the guest. This function | 1656 | * KVM wants to inject page-faults which it got to the guest. This function |
1671 | * checks whether in a nested guest, we need to inject them to L1 or L2. | 1657 | * checks whether in a nested guest, we need to inject them to L1 or L2. |
@@ -1678,7 +1664,7 @@ static int nested_pf_handled(struct kvm_vcpu *vcpu) | |||
1678 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); | 1664 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
1679 | 1665 | ||
1680 | /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ | 1666 | /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ |
1681 | if (!(vmcs12->exception_bitmap & PF_VECTOR)) | 1667 | if (!(vmcs12->exception_bitmap & (1u << PF_VECTOR))) |
1682 | return 0; | 1668 | return 0; |
1683 | 1669 | ||
1684 | nested_vmx_vmexit(vcpu); | 1670 | nested_vmx_vmexit(vcpu); |
@@ -1718,7 +1704,6 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
1718 | intr_info |= INTR_TYPE_HARD_EXCEPTION; | 1704 | intr_info |= INTR_TYPE_HARD_EXCEPTION; |
1719 | 1705 | ||
1720 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); | 1706 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr_info); |
1721 | vmx_clear_hlt(vcpu); | ||
1722 | } | 1707 | } |
1723 | 1708 | ||
1724 | static bool vmx_rdtscp_supported(void) | 1709 | static bool vmx_rdtscp_supported(void) |
@@ -1817,13 +1802,19 @@ u64 vmx_read_l1_tsc(struct kvm_vcpu *vcpu) | |||
1817 | } | 1802 | } |
1818 | 1803 | ||
1819 | /* | 1804 | /* |
1820 | * Empty call-back. Needs to be implemented when VMX enables the SET_TSC_KHZ | 1805 | * Engage any workarounds for mis-matched TSC rates. Currently limited to |
1821 | * ioctl. In this case the call-back should update internal vmx state to make | 1806 | * software catchup for faster rates on slower CPUs. |
1822 | * the changes effective. | ||
1823 | */ | 1807 | */ |
1824 | static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) | 1808 | static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale) |
1825 | { | 1809 | { |
1826 | /* Nothing to do here */ | 1810 | if (!scale) |
1811 | return; | ||
1812 | |||
1813 | if (user_tsc_khz > tsc_khz) { | ||
1814 | vcpu->arch.tsc_catchup = 1; | ||
1815 | vcpu->arch.tsc_always_catchup = 1; | ||
1816 | } else | ||
1817 | WARN(1, "user requested TSC rate below hardware speed\n"); | ||
1827 | } | 1818 | } |
1828 | 1819 | ||
1829 | /* | 1820 | /* |
@@ -1850,7 +1841,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | |||
1850 | } | 1841 | } |
1851 | } | 1842 | } |
1852 | 1843 | ||
1853 | static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) | 1844 | static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host) |
1854 | { | 1845 | { |
1855 | u64 offset = vmcs_read64(TSC_OFFSET); | 1846 | u64 offset = vmcs_read64(TSC_OFFSET); |
1856 | vmcs_write64(TSC_OFFSET, offset + adjustment); | 1847 | vmcs_write64(TSC_OFFSET, offset + adjustment); |
@@ -2219,6 +2210,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
2219 | msr = find_msr_entry(vmx, msr_index); | 2210 | msr = find_msr_entry(vmx, msr_index); |
2220 | if (msr) { | 2211 | if (msr) { |
2221 | msr->data = data; | 2212 | msr->data = data; |
2213 | if (msr - vmx->guest_msrs < vmx->save_nmsrs) | ||
2214 | kvm_set_shared_msr(msr->index, msr->data, | ||
2215 | msr->mask); | ||
2222 | break; | 2216 | break; |
2223 | } | 2217 | } |
2224 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 2218 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
@@ -2399,7 +2393,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
2399 | &_pin_based_exec_control) < 0) | 2393 | &_pin_based_exec_control) < 0) |
2400 | return -EIO; | 2394 | return -EIO; |
2401 | 2395 | ||
2402 | min = | 2396 | min = CPU_BASED_HLT_EXITING | |
2403 | #ifdef CONFIG_X86_64 | 2397 | #ifdef CONFIG_X86_64 |
2404 | CPU_BASED_CR8_LOAD_EXITING | | 2398 | CPU_BASED_CR8_LOAD_EXITING | |
2405 | CPU_BASED_CR8_STORE_EXITING | | 2399 | CPU_BASED_CR8_STORE_EXITING | |
@@ -2414,9 +2408,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
2414 | CPU_BASED_INVLPG_EXITING | | 2408 | CPU_BASED_INVLPG_EXITING | |
2415 | CPU_BASED_RDPMC_EXITING; | 2409 | CPU_BASED_RDPMC_EXITING; |
2416 | 2410 | ||
2417 | if (yield_on_hlt) | ||
2418 | min |= CPU_BASED_HLT_EXITING; | ||
2419 | |||
2420 | opt = CPU_BASED_TPR_SHADOW | | 2411 | opt = CPU_BASED_TPR_SHADOW | |
2421 | CPU_BASED_USE_MSR_BITMAPS | | 2412 | CPU_BASED_USE_MSR_BITMAPS | |
2422 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | 2413 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
@@ -4003,7 +3994,6 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu) | |||
4003 | } else | 3994 | } else |
4004 | intr |= INTR_TYPE_EXT_INTR; | 3995 | intr |= INTR_TYPE_EXT_INTR; |
4005 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); | 3996 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, intr); |
4006 | vmx_clear_hlt(vcpu); | ||
4007 | } | 3997 | } |
4008 | 3998 | ||
4009 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | 3999 | static void vmx_inject_nmi(struct kvm_vcpu *vcpu) |
@@ -4035,7 +4025,6 @@ static void vmx_inject_nmi(struct kvm_vcpu *vcpu) | |||
4035 | } | 4025 | } |
4036 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, | 4026 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, |
4037 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); | 4027 | INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK | NMI_VECTOR); |
4038 | vmx_clear_hlt(vcpu); | ||
4039 | } | 4028 | } |
4040 | 4029 | ||
4041 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) | 4030 | static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) |
@@ -4672,9 +4661,10 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) | |||
4672 | bool has_error_code = false; | 4661 | bool has_error_code = false; |
4673 | u32 error_code = 0; | 4662 | u32 error_code = 0; |
4674 | u16 tss_selector; | 4663 | u16 tss_selector; |
4675 | int reason, type, idt_v; | 4664 | int reason, type, idt_v, idt_index; |
4676 | 4665 | ||
4677 | idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); | 4666 | idt_v = (vmx->idt_vectoring_info & VECTORING_INFO_VALID_MASK); |
4667 | idt_index = (vmx->idt_vectoring_info & VECTORING_INFO_VECTOR_MASK); | ||
4678 | type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); | 4668 | type = (vmx->idt_vectoring_info & VECTORING_INFO_TYPE_MASK); |
4679 | 4669 | ||
4680 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 4670 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
@@ -4712,8 +4702,9 @@ static int handle_task_switch(struct kvm_vcpu *vcpu) | |||
4712 | type != INTR_TYPE_NMI_INTR)) | 4702 | type != INTR_TYPE_NMI_INTR)) |
4713 | skip_emulated_instruction(vcpu); | 4703 | skip_emulated_instruction(vcpu); |
4714 | 4704 | ||
4715 | if (kvm_task_switch(vcpu, tss_selector, reason, | 4705 | if (kvm_task_switch(vcpu, tss_selector, |
4716 | has_error_code, error_code) == EMULATE_FAIL) { | 4706 | type == INTR_TYPE_SOFT_INTR ? idt_index : -1, reason, |
4707 | has_error_code, error_code) == EMULATE_FAIL) { | ||
4717 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | 4708 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
4718 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | 4709 | vcpu->run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; |
4719 | vcpu->run->internal.ndata = 0; | 4710 | vcpu->run->internal.ndata = 0; |