aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Feiner <pfeiner@google.com>2016-07-07 17:49:58 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2016-07-14 13:11:19 -0400
commit4e59516a12a6ef6dcb660cb3a3f70c64bd60cfec (patch)
tree679def54f0735826312d63b690d811f4022e68df
parent9770404a0061ec46dec6e15c4b07731ce2e2d7bb (diff)
kvm: vmx: ensure VMCS is current while enabling PML
Between loading the new VMCS and enabling PML, the CPU was unpinned. If the vCPU thread were migrated to another CPU in the interim (e.g., due to preemption or sleeping alloc_page), then the VMWRITEs to enable PML would target the wrong VMCS -- or no VMCS at all: [ 2087.266950] vmwrite error: reg 200e value 3fe1d52000 (err -506126336) [ 2087.267062] vmwrite error: reg 812 value 1ff (err 511) [ 2087.267125] vmwrite error: reg 401e value 12229c00 (err 304258048) This patch ensures that the VMCS remains current while enabling PML by doing the VMWRITEs while the CPU is pinned. Allocation of the PML buffer is hoisted out of the critical section. Signed-off-by: Peter Feiner <pfeiner@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c56
1 files changed, 24 insertions, 32 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 64a79f271276..e34965b37a88 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4979,6 +4979,12 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4979 if (vmx_xsaves_supported()) 4979 if (vmx_xsaves_supported())
4980 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP); 4980 vmcs_write64(XSS_EXIT_BITMAP, VMX_XSS_EXIT_BITMAP);
4981 4981
4982 if (enable_pml) {
4983 ASSERT(vmx->pml_pg);
4984 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
4985 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
4986 }
4987
4982 return 0; 4988 return 0;
4983} 4989}
4984 4990
@@ -7937,22 +7943,6 @@ static void vmx_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
7937 *info2 = vmcs_read32(VM_EXIT_INTR_INFO); 7943 *info2 = vmcs_read32(VM_EXIT_INTR_INFO);
7938} 7944}
7939 7945
7940static int vmx_create_pml_buffer(struct vcpu_vmx *vmx)
7941{
7942 struct page *pml_pg;
7943
7944 pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
7945 if (!pml_pg)
7946 return -ENOMEM;
7947
7948 vmx->pml_pg = pml_pg;
7949
7950 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
7951 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
7952
7953 return 0;
7954}
7955
7956static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx) 7946static void vmx_destroy_pml_buffer(struct vcpu_vmx *vmx)
7957{ 7947{
7958 if (vmx->pml_pg) { 7948 if (vmx->pml_pg) {
@@ -8885,14 +8875,26 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8885 if (err) 8875 if (err)
8886 goto free_vcpu; 8876 goto free_vcpu;
8887 8877
8878 err = -ENOMEM;
8879
8880 /*
8881 * If PML is turned on, failure on enabling PML just results in failure
8882 * of creating the vcpu, therefore we can simplify PML logic (by
8883 * avoiding dealing with cases, such as enabling PML partially on vcpus
8884 * for the guest, etc.
8885 */
8886 if (enable_pml) {
8887 vmx->pml_pg = alloc_page(GFP_KERNEL | __GFP_ZERO);
8888 if (!vmx->pml_pg)
8889 goto uninit_vcpu;
8890 }
8891
8888 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); 8892 vmx->guest_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL);
8889 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0]) 8893 BUILD_BUG_ON(ARRAY_SIZE(vmx_msr_index) * sizeof(vmx->guest_msrs[0])
8890 > PAGE_SIZE); 8894 > PAGE_SIZE);
8891 8895
8892 err = -ENOMEM; 8896 if (!vmx->guest_msrs)
8893 if (!vmx->guest_msrs) { 8897 goto free_pml;
8894 goto uninit_vcpu;
8895 }
8896 8898
8897 vmx->loaded_vmcs = &vmx->vmcs01; 8899 vmx->loaded_vmcs = &vmx->vmcs01;
8898 vmx->loaded_vmcs->vmcs = alloc_vmcs(); 8900 vmx->loaded_vmcs->vmcs = alloc_vmcs();
@@ -8936,18 +8938,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
8936 vmx->nested.current_vmptr = -1ull; 8938 vmx->nested.current_vmptr = -1ull;
8937 vmx->nested.current_vmcs12 = NULL; 8939 vmx->nested.current_vmcs12 = NULL;
8938 8940
8939 /*
8940 * If PML is turned on, failure on enabling PML just results in failure
8941 * of creating the vcpu, therefore we can simplify PML logic (by
8942 * avoiding dealing with cases, such as enabling PML partially on vcpus
8943 * for the guest, etc.
8944 */
8945 if (enable_pml) {
8946 err = vmx_create_pml_buffer(vmx);
8947 if (err)
8948 goto free_vmcs;
8949 }
8950
8951 return &vmx->vcpu; 8941 return &vmx->vcpu;
8952 8942
8953free_vmcs: 8943free_vmcs:
@@ -8955,6 +8945,8 @@ free_vmcs:
8955 free_loaded_vmcs(vmx->loaded_vmcs); 8945 free_loaded_vmcs(vmx->loaded_vmcs);
8956free_msrs: 8946free_msrs:
8957 kfree(vmx->guest_msrs); 8947 kfree(vmx->guest_msrs);
8948free_pml:
8949 vmx_destroy_pml_buffer(vmx);
8958uninit_vcpu: 8950uninit_vcpu:
8959 kvm_vcpu_uninit(&vmx->vcpu); 8951 kvm_vcpu_uninit(&vmx->vcpu);
8960free_vcpu: 8952free_vcpu: