diff options
author | David Hildenbrand <david@redhat.com> | 2017-03-10 06:47:13 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-04-21 05:42:49 -0400 |
commit | fe0e80befd4d3a62d40f24b98b17483ea00ef2dd (patch) | |
tree | 99e342939c4fdfc0ac845782625befc721b5563f | |
parent | 332518706195007f9fbafa69652aa5b3cf72df24 (diff) |
KVM: VMX: drop vmm_exclusive module parameter
vmm_exclusive=0 leads to KVM setting X86_CR4_VMXE always and calling
VMXON only when the vcpu is loaded. X86_CR4_VMXE is used as an
indication in cpu_emergency_vmxoff() (called on kdump) if VMXOFF has to be
called. This is obviously not the case if both are used independtly.
Calling VMXOFF without a previous VMXON will result in an exception.
In addition, X86_CR4_VMXE is used as a mean to test if VMX is already in
use by another VMM in hardware_enable(). So there can't really be
co-existance. If the other VMM is prepared for co-existance and does a
similar check, only one VMM can exist. If the other VMM is not prepared
and blindly sets/clears X86_CR4_VMXE, we will get inconsistencies with
X86_CR4_VMXE.
As we also had bug reports related to clearing of vmcs with vmm_exclusive=0
this seems to be pretty much untested. So let's better drop it.
While at it, directly move setting/clearing X86_CR4_VMXE into
kvm_cpu_vmxon/off.
Signed-off-by: David Hildenbrand <david@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 38 |
1 files changed, 7 insertions, 31 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 837f6dd1ae9c..c1a12b94e1fd 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -84,9 +84,6 @@ module_param_named(eptad, enable_ept_ad_bits, bool, S_IRUGO); | |||
84 | static bool __read_mostly emulate_invalid_guest_state = true; | 84 | static bool __read_mostly emulate_invalid_guest_state = true; |
85 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); | 85 | module_param(emulate_invalid_guest_state, bool, S_IRUGO); |
86 | 86 | ||
87 | static bool __read_mostly vmm_exclusive = 1; | ||
88 | module_param(vmm_exclusive, bool, S_IRUGO); | ||
89 | |||
90 | static bool __read_mostly fasteoi = 1; | 87 | static bool __read_mostly fasteoi = 1; |
91 | module_param(fasteoi, bool, S_IRUGO); | 88 | module_param(fasteoi, bool, S_IRUGO); |
92 | 89 | ||
@@ -910,8 +907,6 @@ static void nested_release_page_clean(struct page *page) | |||
910 | 907 | ||
911 | static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); | 908 | static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); |
912 | static u64 construct_eptp(unsigned long root_hpa); | 909 | static u64 construct_eptp(unsigned long root_hpa); |
913 | static void kvm_cpu_vmxon(u64 addr); | ||
914 | static void kvm_cpu_vmxoff(void); | ||
915 | static bool vmx_xsaves_supported(void); | 910 | static bool vmx_xsaves_supported(void); |
916 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); | 911 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); |
917 | static void vmx_set_segment(struct kvm_vcpu *vcpu, | 912 | static void vmx_set_segment(struct kvm_vcpu *vcpu, |
@@ -2231,15 +2226,10 @@ static void decache_tsc_multiplier(struct vcpu_vmx *vmx) | |||
2231 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 2226 | static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
2232 | { | 2227 | { |
2233 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2228 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2234 | u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); | ||
2235 | bool already_loaded = vmx->loaded_vmcs->cpu == cpu; | 2229 | bool already_loaded = vmx->loaded_vmcs->cpu == cpu; |
2236 | 2230 | ||
2237 | if (!vmm_exclusive) | ||
2238 | kvm_cpu_vmxon(phys_addr); | ||
2239 | else if (!already_loaded) | ||
2240 | loaded_vmcs_clear(vmx->loaded_vmcs); | ||
2241 | |||
2242 | if (!already_loaded) { | 2231 | if (!already_loaded) { |
2232 | loaded_vmcs_clear(vmx->loaded_vmcs); | ||
2243 | local_irq_disable(); | 2233 | local_irq_disable(); |
2244 | crash_disable_local_vmclear(cpu); | 2234 | crash_disable_local_vmclear(cpu); |
2245 | 2235 | ||
@@ -2317,11 +2307,6 @@ static void vmx_vcpu_put(struct kvm_vcpu *vcpu) | |||
2317 | vmx_vcpu_pi_put(vcpu); | 2307 | vmx_vcpu_pi_put(vcpu); |
2318 | 2308 | ||
2319 | __vmx_load_host_state(to_vmx(vcpu)); | 2309 | __vmx_load_host_state(to_vmx(vcpu)); |
2320 | if (!vmm_exclusive) { | ||
2321 | __loaded_vmcs_clear(to_vmx(vcpu)->loaded_vmcs); | ||
2322 | vcpu->cpu = -1; | ||
2323 | kvm_cpu_vmxoff(); | ||
2324 | } | ||
2325 | } | 2310 | } |
2326 | 2311 | ||
2327 | static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); | 2312 | static void vmx_decache_cr0_guest_bits(struct kvm_vcpu *vcpu); |
@@ -3416,6 +3401,7 @@ static __init int vmx_disabled_by_bios(void) | |||
3416 | 3401 | ||
3417 | static void kvm_cpu_vmxon(u64 addr) | 3402 | static void kvm_cpu_vmxon(u64 addr) |
3418 | { | 3403 | { |
3404 | cr4_set_bits(X86_CR4_VMXE); | ||
3419 | intel_pt_handle_vmx(1); | 3405 | intel_pt_handle_vmx(1); |
3420 | 3406 | ||
3421 | asm volatile (ASM_VMX_VMXON_RAX | 3407 | asm volatile (ASM_VMX_VMXON_RAX |
@@ -3458,12 +3444,8 @@ static int hardware_enable(void) | |||
3458 | /* enable and lock */ | 3444 | /* enable and lock */ |
3459 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); | 3445 | wrmsrl(MSR_IA32_FEATURE_CONTROL, old | test_bits); |
3460 | } | 3446 | } |
3461 | cr4_set_bits(X86_CR4_VMXE); | 3447 | kvm_cpu_vmxon(phys_addr); |
3462 | 3448 | ept_sync_global(); | |
3463 | if (vmm_exclusive) { | ||
3464 | kvm_cpu_vmxon(phys_addr); | ||
3465 | ept_sync_global(); | ||
3466 | } | ||
3467 | 3449 | ||
3468 | native_store_gdt(this_cpu_ptr(&host_gdt)); | 3450 | native_store_gdt(this_cpu_ptr(&host_gdt)); |
3469 | 3451 | ||
@@ -3489,15 +3471,13 @@ static void kvm_cpu_vmxoff(void) | |||
3489 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); | 3471 | asm volatile (__ex(ASM_VMX_VMXOFF) : : : "cc"); |
3490 | 3472 | ||
3491 | intel_pt_handle_vmx(0); | 3473 | intel_pt_handle_vmx(0); |
3474 | cr4_clear_bits(X86_CR4_VMXE); | ||
3492 | } | 3475 | } |
3493 | 3476 | ||
3494 | static void hardware_disable(void) | 3477 | static void hardware_disable(void) |
3495 | { | 3478 | { |
3496 | if (vmm_exclusive) { | 3479 | vmclear_local_loaded_vmcss(); |
3497 | vmclear_local_loaded_vmcss(); | 3480 | kvm_cpu_vmxoff(); |
3498 | kvm_cpu_vmxoff(); | ||
3499 | } | ||
3500 | cr4_clear_bits(X86_CR4_VMXE); | ||
3501 | } | 3481 | } |
3502 | 3482 | ||
3503 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, | 3483 | static __init int adjust_vmx_controls(u32 ctl_min, u32 ctl_opt, |
@@ -9170,11 +9150,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
9170 | vmx->loaded_vmcs->shadow_vmcs = NULL; | 9150 | vmx->loaded_vmcs->shadow_vmcs = NULL; |
9171 | if (!vmx->loaded_vmcs->vmcs) | 9151 | if (!vmx->loaded_vmcs->vmcs) |
9172 | goto free_msrs; | 9152 | goto free_msrs; |
9173 | if (!vmm_exclusive) | ||
9174 | kvm_cpu_vmxon(__pa(per_cpu(vmxarea, raw_smp_processor_id()))); | ||
9175 | loaded_vmcs_init(vmx->loaded_vmcs); | 9153 | loaded_vmcs_init(vmx->loaded_vmcs); |
9176 | if (!vmm_exclusive) | ||
9177 | kvm_cpu_vmxoff(); | ||
9178 | 9154 | ||
9179 | cpu = get_cpu(); | 9155 | cpu = get_cpu(); |
9180 | vmx_vcpu_load(&vmx->vcpu, cpu); | 9156 | vmx_vcpu_load(&vmx->vcpu, cpu); |