aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWincy Van <fanwenyi0529@gmail.com>2015-02-03 10:49:31 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-02-03 11:02:32 -0500
commit3af18d9c5fe95a6b377dca7b9ff9c6d3ab7f0969 (patch)
treee963341b8a466dbf2a7130201facf5bb1d213064
parent2e6d015799d523dcce11c7d1465e6feb7b69fab1 (diff)
KVM: nVMX: Prepare for using hardware MSR bitmap
Currently, if L1 enables MSR_BITMAP, we will emulate this feature, all of L2's msr access is intercepted by L0. Features like "virtualize x2apic mode" require that the MSR bitmap is enabled, or the hardware will exit and for example not virtualize the x2apic MSRs. In order to let L1 use these features, we need to build a merged bitmap that only not cause a VMEXIT if 1) L1 requires that 2) the bit is not required by the processor for APIC virtualization. For now the guests are still run with MSR bitmap disabled, but this patch already introduces nested_vmx_merge_msr_bitmap for future use. Signed-off-by: Wincy Van <fanwenyi0529@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c77
1 files changed, 66 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 15b78936c101..6d1d26f017c1 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -800,6 +800,7 @@ static unsigned long *vmx_msr_bitmap_legacy;
800static unsigned long *vmx_msr_bitmap_longmode; 800static unsigned long *vmx_msr_bitmap_longmode;
801static unsigned long *vmx_msr_bitmap_legacy_x2apic; 801static unsigned long *vmx_msr_bitmap_legacy_x2apic;
802static unsigned long *vmx_msr_bitmap_longmode_x2apic; 802static unsigned long *vmx_msr_bitmap_longmode_x2apic;
803static unsigned long *vmx_msr_bitmap_nested;
803static unsigned long *vmx_vmread_bitmap; 804static unsigned long *vmx_vmread_bitmap;
804static unsigned long *vmx_vmwrite_bitmap; 805static unsigned long *vmx_vmwrite_bitmap;
805 806
@@ -5823,13 +5824,21 @@ static __init int hardware_setup(void)
5823 (unsigned long *)__get_free_page(GFP_KERNEL); 5824 (unsigned long *)__get_free_page(GFP_KERNEL);
5824 if (!vmx_msr_bitmap_longmode_x2apic) 5825 if (!vmx_msr_bitmap_longmode_x2apic)
5825 goto out4; 5826 goto out4;
5827
5828 if (nested) {
5829 vmx_msr_bitmap_nested =
5830 (unsigned long *)__get_free_page(GFP_KERNEL);
5831 if (!vmx_msr_bitmap_nested)
5832 goto out5;
5833 }
5834
5826 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 5835 vmx_vmread_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
5827 if (!vmx_vmread_bitmap) 5836 if (!vmx_vmread_bitmap)
5828 goto out5; 5837 goto out6;
5829 5838
5830 vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 5839 vmx_vmwrite_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL);
5831 if (!vmx_vmwrite_bitmap) 5840 if (!vmx_vmwrite_bitmap)
5832 goto out6; 5841 goto out7;
5833 5842
5834 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE); 5843 memset(vmx_vmread_bitmap, 0xff, PAGE_SIZE);
5835 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE); 5844 memset(vmx_vmwrite_bitmap, 0xff, PAGE_SIZE);
@@ -5845,10 +5854,12 @@ static __init int hardware_setup(void)
5845 5854
5846 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE); 5855 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
5847 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE); 5856 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
5857 if (nested)
5858 memset(vmx_msr_bitmap_nested, 0xff, PAGE_SIZE);
5848 5859
5849 if (setup_vmcs_config(&vmcs_config) < 0) { 5860 if (setup_vmcs_config(&vmcs_config) < 0) {
5850 r = -EIO; 5861 r = -EIO;
5851 goto out7; 5862 goto out8;
5852 } 5863 }
5853 5864
5854 if (boot_cpu_has(X86_FEATURE_NX)) 5865 if (boot_cpu_has(X86_FEATURE_NX))
@@ -5968,10 +5979,13 @@ static __init int hardware_setup(void)
5968 5979
5969 return alloc_kvm_area(); 5980 return alloc_kvm_area();
5970 5981
5971out7: 5982out8:
5972 free_page((unsigned long)vmx_vmwrite_bitmap); 5983 free_page((unsigned long)vmx_vmwrite_bitmap);
5973out6: 5984out7:
5974 free_page((unsigned long)vmx_vmread_bitmap); 5985 free_page((unsigned long)vmx_vmread_bitmap);
5986out6:
5987 if (nested)
5988 free_page((unsigned long)vmx_msr_bitmap_nested);
5975out5: 5989out5:
5976 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic); 5990 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
5977out4: 5991out4:
@@ -5998,6 +6012,8 @@ static __exit void hardware_unsetup(void)
5998 free_page((unsigned long)vmx_io_bitmap_a); 6012 free_page((unsigned long)vmx_io_bitmap_a);
5999 free_page((unsigned long)vmx_vmwrite_bitmap); 6013 free_page((unsigned long)vmx_vmwrite_bitmap);
6000 free_page((unsigned long)vmx_vmread_bitmap); 6014 free_page((unsigned long)vmx_vmread_bitmap);
6015 if (nested)
6016 free_page((unsigned long)vmx_msr_bitmap_nested);
6001 6017
6002 free_kvm_area(); 6018 free_kvm_area();
6003} 6019}
@@ -8455,6 +8471,38 @@ static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
8455 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL); 8471 ns_to_ktime(preemption_timeout), HRTIMER_MODE_REL);
8456} 8472}
8457 8473
8474static int nested_vmx_check_msr_bitmap_controls(struct kvm_vcpu *vcpu,
8475 struct vmcs12 *vmcs12)
8476{
8477 int maxphyaddr;
8478 u64 addr;
8479
8480 if (!nested_cpu_has(vmcs12, CPU_BASED_USE_MSR_BITMAPS))
8481 return 0;
8482
8483 if (vmcs12_read_any(vcpu, MSR_BITMAP, &addr)) {
8484 WARN_ON(1);
8485 return -EINVAL;
8486 }
8487 maxphyaddr = cpuid_maxphyaddr(vcpu);
8488
8489 if (!PAGE_ALIGNED(vmcs12->msr_bitmap) ||
8490 ((addr + PAGE_SIZE) >> maxphyaddr))
8491 return -EINVAL;
8492
8493 return 0;
8494}
8495
8496/*
8497 * Merge L0's and L1's MSR bitmap, return false to indicate that
8498 * we do not use the hardware.
8499 */
8500static inline bool nested_vmx_merge_msr_bitmap(struct kvm_vcpu *vcpu,
8501 struct vmcs12 *vmcs12)
8502{
8503 return false;
8504}
8505
8458static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu, 8506static int nested_vmx_check_msr_switch(struct kvm_vcpu *vcpu,
8459 unsigned long count_field, 8507 unsigned long count_field,
8460 unsigned long addr_field, 8508 unsigned long addr_field,
@@ -8787,11 +8835,17 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8787 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold); 8835 vmcs_write32(TPR_THRESHOLD, vmcs12->tpr_threshold);
8788 } 8836 }
8789 8837
8838 if (cpu_has_vmx_msr_bitmap() &&
8839 exec_control & CPU_BASED_USE_MSR_BITMAPS &&
8840 nested_vmx_merge_msr_bitmap(vcpu, vmcs12)) {
8841 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_nested));
8842 } else
8843 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
8844
8790 /* 8845 /*
8791 * Merging of IO and MSR bitmaps not currently supported. 8846 * Merging of IO bitmap not currently supported.
8792 * Rather, exit every time. 8847 * Rather, exit every time.
8793 */ 8848 */
8794 exec_control &= ~CPU_BASED_USE_MSR_BITMAPS;
8795 exec_control &= ~CPU_BASED_USE_IO_BITMAPS; 8849 exec_control &= ~CPU_BASED_USE_IO_BITMAPS;
8796 exec_control |= CPU_BASED_UNCOND_IO_EXITING; 8850 exec_control |= CPU_BASED_UNCOND_IO_EXITING;
8797 8851
@@ -8942,15 +8996,13 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8942 return 1; 8996 return 1;
8943 } 8997 }
8944 8998
8945 if ((vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_MSR_BITMAPS) && 8999 if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
8946 !PAGE_ALIGNED(vmcs12->msr_bitmap)) {
8947 /*TODO: Also verify bits beyond physical address width are 0*/ 9000 /*TODO: Also verify bits beyond physical address width are 0*/
8948 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 9001 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8949 return 1; 9002 return 1;
8950 } 9003 }
8951 9004
8952 if (!nested_get_vmcs12_pages(vcpu, vmcs12)) { 9005 if (nested_vmx_check_msr_bitmap_controls(vcpu, vmcs12)) {
8953 /*TODO: Also verify bits beyond physical address width are 0*/
8954 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 9006 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8955 return 1; 9007 return 1;
8956 } 9008 }
@@ -9506,6 +9558,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
9506 kvm_set_dr(vcpu, 7, 0x400); 9558 kvm_set_dr(vcpu, 7, 0x400);
9507 vmcs_write64(GUEST_IA32_DEBUGCTL, 0); 9559 vmcs_write64(GUEST_IA32_DEBUGCTL, 0);
9508 9560
9561 if (cpu_has_vmx_msr_bitmap())
9562 vmx_set_msr_bitmap(vcpu);
9563
9509 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr, 9564 if (nested_vmx_load_msr(vcpu, vmcs12->vm_exit_msr_load_addr,
9510 vmcs12->vm_exit_msr_load_count)) 9565 vmcs12->vm_exit_msr_load_count))
9511 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL); 9566 nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);