aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@web.de>2013-04-20 04:52:36 -0400
committerGleb Natapov <gleb@redhat.com>2013-04-22 05:53:42 -0400
commit384bb783275145b70d769acf4c687957d1c61802 (patch)
tree8db3286f89b5a1cf38ba2e3231b94c822b229199 /arch/x86
parentea8ceb8354e1c84a13cf2a8e915dc74f96759393 (diff)
KVM: nVMX: Validate EFER values for VM_ENTRY/EXIT_LOAD_IA32_EFER
As we may emulate the loading of EFER on VM-entry and VM-exit, implement the checks that VMX performs on the guest and host values on vmlaunch/ vmresume. Factor out kvm_valid_efer for this purpose which checks for set reserved bits. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/vmx.c40
-rw-r--r--arch/x86/kvm/x86.c29
3 files changed, 60 insertions, 10 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 599f98b612d4..18635ae42a8e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -809,6 +809,7 @@ static inline int emulate_instruction(struct kvm_vcpu *vcpu,
809} 809}
810 810
811void kvm_enable_efer_bits(u64); 811void kvm_enable_efer_bits(u64);
812bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer);
812int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data); 813int kvm_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *data);
813int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr); 814int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr);
814 815
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 11ea3cbbb78c..d7ef55686227 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7558,6 +7558,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
7558 struct vcpu_vmx *vmx = to_vmx(vcpu); 7558 struct vcpu_vmx *vmx = to_vmx(vcpu);
7559 int cpu; 7559 int cpu;
7560 struct loaded_vmcs *vmcs02; 7560 struct loaded_vmcs *vmcs02;
7561 bool ia32e;
7561 7562
7562 if (!nested_vmx_check_permission(vcpu) || 7563 if (!nested_vmx_check_permission(vcpu) ||
7563 !nested_vmx_check_vmcs12(vcpu)) 7564 !nested_vmx_check_vmcs12(vcpu))
@@ -7649,6 +7650,45 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
7649 } 7650 }
7650 7651
7651 /* 7652 /*
7653 * If the “load IA32_EFER” VM-entry control is 1, the following checks
7654 * are performed on the field for the IA32_EFER MSR:
7655 * - Bits reserved in the IA32_EFER MSR must be 0.
7656 * - Bit 10 (corresponding to IA32_EFER.LMA) must equal the value of
7657 * the IA-32e mode guest VM-exit control. It must also be identical
7658 * to bit 8 (LME) if bit 31 in the CR0 field (corresponding to
7659 * CR0.PG) is 1.
7660 */
7661 if (vmcs12->vm_entry_controls & VM_ENTRY_LOAD_IA32_EFER) {
7662 ia32e = (vmcs12->vm_entry_controls & VM_ENTRY_IA32E_MODE) != 0;
7663 if (!kvm_valid_efer(vcpu, vmcs12->guest_ia32_efer) ||
7664 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LMA) ||
7665 ((vmcs12->guest_cr0 & X86_CR0_PG) &&
7666 ia32e != !!(vmcs12->guest_ia32_efer & EFER_LME))) {
7667 nested_vmx_entry_failure(vcpu, vmcs12,
7668 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
7669 return 1;
7670 }
7671 }
7672
7673 /*
7674 * If the load IA32_EFER VM-exit control is 1, bits reserved in the
7675 * IA32_EFER MSR must be 0 in the field for that register. In addition,
7676 * the values of the LMA and LME bits in the field must each be that of
7677 * the host address-space size VM-exit control.
7678 */
7679 if (vmcs12->vm_exit_controls & VM_EXIT_LOAD_IA32_EFER) {
7680 ia32e = (vmcs12->vm_exit_controls &
7681 VM_EXIT_HOST_ADDR_SPACE_SIZE) != 0;
7682 if (!kvm_valid_efer(vcpu, vmcs12->host_ia32_efer) ||
7683 ia32e != !!(vmcs12->host_ia32_efer & EFER_LMA) ||
7684 ia32e != !!(vmcs12->host_ia32_efer & EFER_LME)) {
7685 nested_vmx_entry_failure(vcpu, vmcs12,
7686 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
7687 return 1;
7688 }
7689 }
7690
7691 /*
7652 * We're finally done with prerequisite checking, and can start with 7692 * We're finally done with prerequisite checking, and can start with
7653 * the nested entry. 7693 * the nested entry.
7654 */ 7694 */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e730a462ed05..2a434bf3918d 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -845,23 +845,17 @@ static const u32 emulated_msrs[] = {
845 MSR_IA32_MCG_CTL, 845 MSR_IA32_MCG_CTL,
846}; 846};
847 847
848static int set_efer(struct kvm_vcpu *vcpu, u64 efer) 848bool kvm_valid_efer(struct kvm_vcpu *vcpu, u64 efer)
849{ 849{
850 u64 old_efer = vcpu->arch.efer;
851
852 if (efer & efer_reserved_bits) 850 if (efer & efer_reserved_bits)
853 return 1; 851 return false;
854
855 if (is_paging(vcpu)
856 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
857 return 1;
858 852
859 if (efer & EFER_FFXSR) { 853 if (efer & EFER_FFXSR) {
860 struct kvm_cpuid_entry2 *feat; 854 struct kvm_cpuid_entry2 *feat;
861 855
862 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 856 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
863 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT))) 857 if (!feat || !(feat->edx & bit(X86_FEATURE_FXSR_OPT)))
864 return 1; 858 return false;
865 } 859 }
866 860
867 if (efer & EFER_SVME) { 861 if (efer & EFER_SVME) {
@@ -869,9 +863,24 @@ static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
869 863
870 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0); 864 feat = kvm_find_cpuid_entry(vcpu, 0x80000001, 0);
871 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM))) 865 if (!feat || !(feat->ecx & bit(X86_FEATURE_SVM)))
872 return 1; 866 return false;
873 } 867 }
874 868
869 return true;
870}
871EXPORT_SYMBOL_GPL(kvm_valid_efer);
872
873static int set_efer(struct kvm_vcpu *vcpu, u64 efer)
874{
875 u64 old_efer = vcpu->arch.efer;
876
877 if (!kvm_valid_efer(vcpu, efer))
878 return 1;
879
880 if (is_paging(vcpu)
881 && (vcpu->arch.efer & EFER_LME) != (efer & EFER_LME))
882 return 1;
883
875 efer &= ~EFER_LMA; 884 efer &= ~EFER_LMA;
876 efer |= vcpu->arch.efer & EFER_LMA; 885 efer |= vcpu->arch.efer & EFER_LMA;
877 886