diff options
author | Nadav Har'El <nyh@math.technion.ac.il> | 2013-07-08 07:12:35 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2013-07-18 06:29:29 -0400 |
commit | b3897a49e22fc173efa77527a447c714f753f681 (patch) | |
tree | 71387ace3ec9f606f98413719dcaffbeedd48266 /arch | |
parent | 6b61edf76551c4ee3ad2e6e377bc4c23c42cedf5 (diff) |
KVM: nVMX: Fix read/write to MSR_IA32_FEATURE_CONTROL
Fix read/write to IA32_FEATURE_CONTROL MSR in nested environment.
This patch simulate this MSR in nested_vmx and the default value is
0x0. BIOS should set it to 0x5 before VMXON. After setting the lock
bit, write to it will cause #GP(0).
Another QEMU patch is also needed to handle emulation of reset
and migration. Reset to vCPU should clear this MSR and migration
should reserve value of it.
This patch is based on Nadav's previous commit.
http://permalink.gmane.org/gmane.comp.emulators.kvm.devel/88478
Signed-off-by: Nadav Har'El <nyh@math.technion.ac.il>
Signed-off-by: Arthur Chunqi Li <yzt356@gmail.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/vmx.c | 35 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 3 |
2 files changed, 31 insertions, 7 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 183dc72b2523..e37b2a33fd24 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -373,6 +373,7 @@ struct nested_vmx { | |||
373 | * we must keep them pinned while L2 runs. | 373 | * we must keep them pinned while L2 runs. |
374 | */ | 374 | */ |
375 | struct page *apic_access_page; | 375 | struct page *apic_access_page; |
376 | u64 msr_ia32_feature_control; | ||
376 | }; | 377 | }; |
377 | 378 | ||
378 | #define POSTED_INTR_ON 0 | 379 | #define POSTED_INTR_ON 0 |
@@ -2282,8 +2283,11 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2282 | 2283 | ||
2283 | switch (msr_index) { | 2284 | switch (msr_index) { |
2284 | case MSR_IA32_FEATURE_CONTROL: | 2285 | case MSR_IA32_FEATURE_CONTROL: |
2285 | *pdata = 0; | 2286 | if (nested_vmx_allowed(vcpu)) { |
2286 | break; | 2287 | *pdata = to_vmx(vcpu)->nested.msr_ia32_feature_control; |
2288 | break; | ||
2289 | } | ||
2290 | return 0; | ||
2287 | case MSR_IA32_VMX_BASIC: | 2291 | case MSR_IA32_VMX_BASIC: |
2288 | /* | 2292 | /* |
2289 | * This MSR reports some information about VMX support. We | 2293 | * This MSR reports some information about VMX support. We |
@@ -2356,14 +2360,24 @@ static int vmx_get_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2356 | return 1; | 2360 | return 1; |
2357 | } | 2361 | } |
2358 | 2362 | ||
2359 | static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 2363 | static int vmx_set_vmx_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
2360 | { | 2364 | { |
2365 | u32 msr_index = msr_info->index; | ||
2366 | u64 data = msr_info->data; | ||
2367 | bool host_initialized = msr_info->host_initiated; | ||
2368 | |||
2361 | if (!nested_vmx_allowed(vcpu)) | 2369 | if (!nested_vmx_allowed(vcpu)) |
2362 | return 0; | 2370 | return 0; |
2363 | 2371 | ||
2364 | if (msr_index == MSR_IA32_FEATURE_CONTROL) | 2372 | if (msr_index == MSR_IA32_FEATURE_CONTROL) { |
2365 | /* TODO: the right thing. */ | 2373 | if (!host_initialized && |
2374 | to_vmx(vcpu)->nested.msr_ia32_feature_control | ||
2375 | & FEATURE_CONTROL_LOCKED) | ||
2376 | return 0; | ||
2377 | to_vmx(vcpu)->nested.msr_ia32_feature_control = data; | ||
2366 | return 1; | 2378 | return 1; |
2379 | } | ||
2380 | |||
2367 | /* | 2381 | /* |
2368 | * No need to treat VMX capability MSRs specially: If we don't handle | 2382 | * No need to treat VMX capability MSRs specially: If we don't handle |
2369 | * them, handle_wrmsr will #GP(0), which is correct (they are readonly) | 2383 | * them, handle_wrmsr will #GP(0), which is correct (they are readonly) |
@@ -2494,7 +2508,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2494 | return 1; | 2508 | return 1; |
2495 | /* Otherwise falls through */ | 2509 | /* Otherwise falls through */ |
2496 | default: | 2510 | default: |
2497 | if (vmx_set_vmx_msr(vcpu, msr_index, data)) | 2511 | if (vmx_set_vmx_msr(vcpu, msr_info)) |
2498 | break; | 2512 | break; |
2499 | msr = find_msr_entry(vmx, msr_index); | 2513 | msr = find_msr_entry(vmx, msr_index); |
2500 | if (msr) { | 2514 | if (msr) { |
@@ -5622,6 +5636,8 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | |||
5622 | struct kvm_segment cs; | 5636 | struct kvm_segment cs; |
5623 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 5637 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
5624 | struct vmcs *shadow_vmcs; | 5638 | struct vmcs *shadow_vmcs; |
5639 | const u64 VMXON_NEEDED_FEATURES = FEATURE_CONTROL_LOCKED | ||
5640 | | FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX; | ||
5625 | 5641 | ||
5626 | /* The Intel VMX Instruction Reference lists a bunch of bits that | 5642 | /* The Intel VMX Instruction Reference lists a bunch of bits that |
5627 | * are prerequisite to running VMXON, most notably cr4.VMXE must be | 5643 | * are prerequisite to running VMXON, most notably cr4.VMXE must be |
@@ -5650,6 +5666,13 @@ static int handle_vmon(struct kvm_vcpu *vcpu) | |||
5650 | skip_emulated_instruction(vcpu); | 5666 | skip_emulated_instruction(vcpu); |
5651 | return 1; | 5667 | return 1; |
5652 | } | 5668 | } |
5669 | |||
5670 | if ((vmx->nested.msr_ia32_feature_control & VMXON_NEEDED_FEATURES) | ||
5671 | != VMXON_NEEDED_FEATURES) { | ||
5672 | kvm_inject_gp(vcpu, 0); | ||
5673 | return 1; | ||
5674 | } | ||
5675 | |||
5653 | if (enable_shadow_vmcs) { | 5676 | if (enable_shadow_vmcs) { |
5654 | shadow_vmcs = alloc_vmcs(); | 5677 | shadow_vmcs = alloc_vmcs(); |
5655 | if (!shadow_vmcs) | 5678 | if (!shadow_vmcs) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index abbcaa7f6e8f..d2caeb9e592f 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -850,7 +850,8 @@ static u32 msrs_to_save[] = { | |||
850 | #ifdef CONFIG_X86_64 | 850 | #ifdef CONFIG_X86_64 |
851 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, | 851 | MSR_CSTAR, MSR_KERNEL_GS_BASE, MSR_SYSCALL_MASK, MSR_LSTAR, |
852 | #endif | 852 | #endif |
853 | MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA | 853 | MSR_IA32_TSC, MSR_IA32_CR_PAT, MSR_VM_HSAVE_PA, |
854 | MSR_IA32_FEATURE_CONTROL | ||
854 | }; | 855 | }; |
855 | 856 | ||
856 | static unsigned num_msrs_to_save; | 857 | static unsigned num_msrs_to_save; |