aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2019-05-07 15:18:00 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-06-18 05:47:44 -0400
commit3af80fec6e7fe2e89aa131a0ebdb90be780668f8 (patch)
tree52d676e074b5f84b7dec508fe626ea134e518172
parentae81d08993cbc515e3181ee6bebce5cd878133f2 (diff)
KVM: VMX: Explicitly initialize controls shadow at VMCS allocation
Or: Don't re-initialize vmcs02's controls on every nested VM-Entry. VMWRITEs to the major VMCS controls are deceptively expensive. Intel CPUs with VMCS caching (Westmere and later) also optimize away consistency checks on VM-Entry, i.e. skip consistency checks if the relevant fields have not changed since the last successful VM-Entry (of the cached VMCS). Because uops are a precious commodity, uCode's dirty VMCS field tracking isn't as precise as software would prefer. Notably, writing any of the major VMCS fields effectively marks the entire VMCS dirty, i.e. causes the next VM-Entry to perform all consistency checks, which consumes several hundred cycles. Zero out the controls' shadow copies during VMCS allocation and use the optimized setter when "initializing" controls. While this technically affects both non-nested and nested virtualization, nested virtualization is the primary beneficiary as avoid VMWRITEs when prepare vmcs02 allows hardware to optimizie away consistency checks. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx/nested.c10
-rw-r--r--arch/x86/kvm/vmx/vmx.c12
-rw-r--r--arch/x86/kvm/vmx/vmx.h5
3 files changed, 12 insertions, 15 deletions
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index d4f529a2e194..3f76a1f3fe3c 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -2024,7 +2024,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2024 } else { 2024 } else {
2025 exec_control &= ~PIN_BASED_POSTED_INTR; 2025 exec_control &= ~PIN_BASED_POSTED_INTR;
2026 } 2026 }
2027 pin_controls_init(vmx, exec_control); 2027 pin_controls_set(vmx, exec_control);
2028 2028
2029 /* 2029 /*
2030 * EXEC CONTROLS 2030 * EXEC CONTROLS
@@ -2049,7 +2049,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2049 */ 2049 */
2050 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 2050 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
2051 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 2051 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
2052 exec_controls_init(vmx, exec_control); 2052 exec_controls_set(vmx, exec_control);
2053 2053
2054 /* 2054 /*
2055 * SECONDARY EXEC CONTROLS 2055 * SECONDARY EXEC CONTROLS
@@ -2079,7 +2079,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2079 vmcs_write16(GUEST_INTR_STATUS, 2079 vmcs_write16(GUEST_INTR_STATUS,
2080 vmcs12->guest_intr_status); 2080 vmcs12->guest_intr_status);
2081 2081
2082 secondary_exec_controls_init(vmx, exec_control); 2082 secondary_exec_controls_set(vmx, exec_control);
2083 } 2083 }
2084 2084
2085 /* 2085 /*
@@ -2098,7 +2098,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2098 if (guest_efer != host_efer) 2098 if (guest_efer != host_efer)
2099 exec_control |= VM_ENTRY_LOAD_IA32_EFER; 2099 exec_control |= VM_ENTRY_LOAD_IA32_EFER;
2100 } 2100 }
2101 vm_entry_controls_init(vmx, exec_control); 2101 vm_entry_controls_set(vmx, exec_control);
2102 2102
2103 /* 2103 /*
2104 * EXIT CONTROLS 2104 * EXIT CONTROLS
@@ -2110,7 +2110,7 @@ static void prepare_vmcs02_early(struct vcpu_vmx *vmx, struct vmcs12 *vmcs12)
2110 exec_control = vmx_vmexit_ctrl(); 2110 exec_control = vmx_vmexit_ctrl();
2111 if (cpu_has_load_ia32_efer() && guest_efer != host_efer) 2111 if (cpu_has_load_ia32_efer() && guest_efer != host_efer)
2112 exec_control |= VM_EXIT_LOAD_IA32_EFER; 2112 exec_control |= VM_EXIT_LOAD_IA32_EFER;
2113 vm_exit_controls_init(vmx, exec_control); 2113 vm_exit_controls_set(vmx, exec_control);
2114 2114
2115 /* 2115 /*
2116 * Interrupt/Exception Fields 2116 * Interrupt/Exception Fields
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 8c1cbc19af97..bae376bf9c20 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -2485,6 +2485,8 @@ int alloc_loaded_vmcs(struct loaded_vmcs *loaded_vmcs)
2485 } 2485 }
2486 2486
2487 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state)); 2487 memset(&loaded_vmcs->host_state, 0, sizeof(struct vmcs_host_state));
2488 memset(&loaded_vmcs->controls_shadow, 0,
2489 sizeof(struct vmcs_controls_shadow));
2488 2490
2489 return 0; 2491 return 0;
2490 2492
@@ -4040,14 +4042,14 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
4040 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ 4042 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
4041 4043
4042 /* Control */ 4044 /* Control */
4043 pin_controls_init(vmx, vmx_pin_based_exec_ctrl(vmx)); 4045 pin_controls_set(vmx, vmx_pin_based_exec_ctrl(vmx));
4044 vmx->hv_deadline_tsc = -1; 4046 vmx->hv_deadline_tsc = -1;
4045 4047
4046 exec_controls_init(vmx, vmx_exec_control(vmx)); 4048 exec_controls_set(vmx, vmx_exec_control(vmx));
4047 4049
4048 if (cpu_has_secondary_exec_ctrls()) { 4050 if (cpu_has_secondary_exec_ctrls()) {
4049 vmx_compute_secondary_exec_control(vmx); 4051 vmx_compute_secondary_exec_control(vmx);
4050 secondary_exec_controls_init(vmx, vmx->secondary_exec_control); 4052 secondary_exec_controls_set(vmx, vmx->secondary_exec_control);
4051 } 4053 }
4052 4054
4053 if (kvm_vcpu_apicv_active(&vmx->vcpu)) { 4055 if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
@@ -4105,10 +4107,10 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
4105 ++vmx->nmsrs; 4107 ++vmx->nmsrs;
4106 } 4108 }
4107 4109
4108 vm_exit_controls_init(vmx, vmx_vmexit_ctrl()); 4110 vm_exit_controls_set(vmx, vmx_vmexit_ctrl());
4109 4111
4110 /* 22.2.1, 20.8.1 */ 4112 /* 22.2.1, 20.8.1 */
4111 vm_entry_controls_init(vmx, vmx_vmentry_ctrl()); 4113 vm_entry_controls_set(vmx, vmx_vmentry_ctrl());
4112 4114
4113 vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS; 4115 vmx->vcpu.arch.cr0_guest_owned_bits = X86_CR0_TS;
4114 vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS); 4116 vmcs_writel(CR0_GUEST_HOST_MASK, ~X86_CR0_TS);
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 52d7bc90d9ef..82d0bc3a4d52 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -388,11 +388,6 @@ static inline u8 vmx_get_rvi(void)
388} 388}
389 389
390#define BUILD_CONTROLS_SHADOW(lname, uname) \ 390#define BUILD_CONTROLS_SHADOW(lname, uname) \
391static inline void lname##_controls_init(struct vcpu_vmx *vmx, u32 val) \
392{ \
393 vmcs_write32(uname, val); \
394 vmx->loaded_vmcs->controls_shadow.lname = val; \
395} \
396static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \ 391static inline void lname##_controls_set(struct vcpu_vmx *vmx, u32 val) \
397{ \ 392{ \
398 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \ 393 if (vmx->loaded_vmcs->controls_shadow.lname != val) { \