diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 45 |
1 files changed, 30 insertions, 15 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 579a0b51696a..906a7e84200f 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include "irq.h" | 19 | #include "irq.h" |
20 | #include "mmu.h" | 20 | #include "mmu.h" |
21 | #include "cpuid.h" | ||
21 | 22 | ||
22 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
23 | #include <linux/module.h> | 24 | #include <linux/module.h> |
@@ -1747,7 +1748,6 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
1747 | int save_nmsrs, index; | 1748 | int save_nmsrs, index; |
1748 | unsigned long *msr_bitmap; | 1749 | unsigned long *msr_bitmap; |
1749 | 1750 | ||
1750 | vmx_load_host_state(vmx); | ||
1751 | save_nmsrs = 0; | 1751 | save_nmsrs = 0; |
1752 | #ifdef CONFIG_X86_64 | 1752 | #ifdef CONFIG_X86_64 |
1753 | if (is_long_mode(&vmx->vcpu)) { | 1753 | if (is_long_mode(&vmx->vcpu)) { |
@@ -1956,6 +1956,7 @@ static __init void nested_vmx_setup_ctls_msrs(void) | |||
1956 | #endif | 1956 | #endif |
1957 | CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | | 1957 | CPU_BASED_MOV_DR_EXITING | CPU_BASED_UNCOND_IO_EXITING | |
1958 | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | | 1958 | CPU_BASED_USE_IO_BITMAPS | CPU_BASED_MONITOR_EXITING | |
1959 | CPU_BASED_RDPMC_EXITING | | ||
1959 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | 1960 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
1960 | /* | 1961 | /* |
1961 | * We can allow some features even when not supported by the | 1962 | * We can allow some features even when not supported by the |
@@ -2142,12 +2143,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2142 | return 1; | 2143 | return 1; |
2143 | /* Otherwise falls through */ | 2144 | /* Otherwise falls through */ |
2144 | default: | 2145 | default: |
2145 | vmx_load_host_state(to_vmx(vcpu)); | ||
2146 | if (vmx_get_vmx_msr(vcpu, msr_index, pdata)) | 2146 | if (vmx_get_vmx_msr(vcpu, msr_index, pdata)) |
2147 | return 0; | 2147 | return 0; |
2148 | msr = find_msr_entry(to_vmx(vcpu), msr_index); | 2148 | msr = find_msr_entry(to_vmx(vcpu), msr_index); |
2149 | if (msr) { | 2149 | if (msr) { |
2150 | vmx_load_host_state(to_vmx(vcpu)); | ||
2151 | data = msr->data; | 2150 | data = msr->data; |
2152 | break; | 2151 | break; |
2153 | } | 2152 | } |
@@ -2171,7 +2170,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
2171 | 2170 | ||
2172 | switch (msr_index) { | 2171 | switch (msr_index) { |
2173 | case MSR_EFER: | 2172 | case MSR_EFER: |
2174 | vmx_load_host_state(vmx); | ||
2175 | ret = kvm_set_msr_common(vcpu, msr_index, data); | 2173 | ret = kvm_set_msr_common(vcpu, msr_index, data); |
2176 | break; | 2174 | break; |
2177 | #ifdef CONFIG_X86_64 | 2175 | #ifdef CONFIG_X86_64 |
@@ -2220,7 +2218,6 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | |||
2220 | break; | 2218 | break; |
2221 | msr = find_msr_entry(vmx, msr_index); | 2219 | msr = find_msr_entry(vmx, msr_index); |
2222 | if (msr) { | 2220 | if (msr) { |
2223 | vmx_load_host_state(vmx); | ||
2224 | msr->data = data; | 2221 | msr->data = data; |
2225 | break; | 2222 | break; |
2226 | } | 2223 | } |
@@ -2414,7 +2411,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
2414 | CPU_BASED_USE_TSC_OFFSETING | | 2411 | CPU_BASED_USE_TSC_OFFSETING | |
2415 | CPU_BASED_MWAIT_EXITING | | 2412 | CPU_BASED_MWAIT_EXITING | |
2416 | CPU_BASED_MONITOR_EXITING | | 2413 | CPU_BASED_MONITOR_EXITING | |
2417 | CPU_BASED_INVLPG_EXITING; | 2414 | CPU_BASED_INVLPG_EXITING | |
2415 | CPU_BASED_RDPMC_EXITING; | ||
2418 | 2416 | ||
2419 | if (yield_on_hlt) | 2417 | if (yield_on_hlt) |
2420 | min |= CPU_BASED_HLT_EXITING; | 2418 | min |= CPU_BASED_HLT_EXITING; |
@@ -2716,11 +2714,13 @@ static gva_t rmode_tss_base(struct kvm *kvm) | |||
2716 | { | 2714 | { |
2717 | if (!kvm->arch.tss_addr) { | 2715 | if (!kvm->arch.tss_addr) { |
2718 | struct kvm_memslots *slots; | 2716 | struct kvm_memslots *slots; |
2717 | struct kvm_memory_slot *slot; | ||
2719 | gfn_t base_gfn; | 2718 | gfn_t base_gfn; |
2720 | 2719 | ||
2721 | slots = kvm_memslots(kvm); | 2720 | slots = kvm_memslots(kvm); |
2722 | base_gfn = slots->memslots[0].base_gfn + | 2721 | slot = id_to_memslot(slots, 0); |
2723 | kvm->memslots->memslots[0].npages - 3; | 2722 | base_gfn = slot->base_gfn + slot->npages - 3; |
2723 | |||
2724 | return base_gfn << PAGE_SHIFT; | 2724 | return base_gfn << PAGE_SHIFT; |
2725 | } | 2725 | } |
2726 | return kvm->arch.tss_addr; | 2726 | return kvm->arch.tss_addr; |
@@ -3945,12 +3945,15 @@ static bool nested_exit_on_intr(struct kvm_vcpu *vcpu) | |||
3945 | static void enable_irq_window(struct kvm_vcpu *vcpu) | 3945 | static void enable_irq_window(struct kvm_vcpu *vcpu) |
3946 | { | 3946 | { |
3947 | u32 cpu_based_vm_exec_control; | 3947 | u32 cpu_based_vm_exec_control; |
3948 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) | 3948 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { |
3949 | /* We can get here when nested_run_pending caused | 3949 | /* |
3950 | * vmx_interrupt_allowed() to return false. In this case, do | 3950 | * We get here if vmx_interrupt_allowed() said we can't |
3951 | * nothing - the interrupt will be injected later. | 3951 | * inject to L1 now because L2 must run. Ask L2 to exit |
3952 | * right after entry, so we can inject to L1 more promptly. | ||
3952 | */ | 3953 | */ |
3954 | kvm_make_request(KVM_REQ_IMMEDIATE_EXIT, vcpu); | ||
3953 | return; | 3955 | return; |
3956 | } | ||
3954 | 3957 | ||
3955 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 3958 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
3956 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; | 3959 | cpu_based_vm_exec_control |= CPU_BASED_VIRTUAL_INTR_PENDING; |
@@ -4077,11 +4080,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked) | |||
4077 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) | 4080 | static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) |
4078 | { | 4081 | { |
4079 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { | 4082 | if (is_guest_mode(vcpu) && nested_exit_on_intr(vcpu)) { |
4080 | struct vmcs12 *vmcs12; | 4083 | struct vmcs12 *vmcs12 = get_vmcs12(vcpu); |
4081 | if (to_vmx(vcpu)->nested.nested_run_pending) | 4084 | if (to_vmx(vcpu)->nested.nested_run_pending || |
4085 | (vmcs12->idt_vectoring_info_field & | ||
4086 | VECTORING_INFO_VALID_MASK)) | ||
4082 | return 0; | 4087 | return 0; |
4083 | nested_vmx_vmexit(vcpu); | 4088 | nested_vmx_vmexit(vcpu); |
4084 | vmcs12 = get_vmcs12(vcpu); | ||
4085 | vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; | 4089 | vmcs12->vm_exit_reason = EXIT_REASON_EXTERNAL_INTERRUPT; |
4086 | vmcs12->vm_exit_intr_info = 0; | 4090 | vmcs12->vm_exit_intr_info = 0; |
4087 | /* fall through to normal code, but now in L1, not L2 */ | 4091 | /* fall through to normal code, but now in L1, not L2 */ |
@@ -4611,6 +4615,16 @@ static int handle_invlpg(struct kvm_vcpu *vcpu) | |||
4611 | return 1; | 4615 | return 1; |
4612 | } | 4616 | } |
4613 | 4617 | ||
4618 | static int handle_rdpmc(struct kvm_vcpu *vcpu) | ||
4619 | { | ||
4620 | int err; | ||
4621 | |||
4622 | err = kvm_rdpmc(vcpu); | ||
4623 | kvm_complete_insn_gp(vcpu, err); | ||
4624 | |||
4625 | return 1; | ||
4626 | } | ||
4627 | |||
4614 | static int handle_wbinvd(struct kvm_vcpu *vcpu) | 4628 | static int handle_wbinvd(struct kvm_vcpu *vcpu) |
4615 | { | 4629 | { |
4616 | skip_emulated_instruction(vcpu); | 4630 | skip_emulated_instruction(vcpu); |
@@ -5561,6 +5575,7 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = { | |||
5561 | [EXIT_REASON_HLT] = handle_halt, | 5575 | [EXIT_REASON_HLT] = handle_halt, |
5562 | [EXIT_REASON_INVD] = handle_invd, | 5576 | [EXIT_REASON_INVD] = handle_invd, |
5563 | [EXIT_REASON_INVLPG] = handle_invlpg, | 5577 | [EXIT_REASON_INVLPG] = handle_invlpg, |
5578 | [EXIT_REASON_RDPMC] = handle_rdpmc, | ||
5564 | [EXIT_REASON_VMCALL] = handle_vmcall, | 5579 | [EXIT_REASON_VMCALL] = handle_vmcall, |
5565 | [EXIT_REASON_VMCLEAR] = handle_vmclear, | 5580 | [EXIT_REASON_VMCLEAR] = handle_vmclear, |
5566 | [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, | 5581 | [EXIT_REASON_VMLAUNCH] = handle_vmlaunch, |