aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorBandan Das <bsd@redhat.com>2014-05-06 02:19:18 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-06 13:00:43 -0400
commit4291b58885f5af560488a5b9667ca6930b9fdc3d (patch)
tree077d4bfbb946157b1be6720835df21fa5d306d33 /arch/x86/kvm
parent96ec146330d18a938b4773be8d6dd1f93399507c (diff)
KVM: nVMX: move vmclear and vmptrld pre-checks to nested_vmx_check_vmptr
Some checks are common to all, and moreover, according to the spec, the check for whether any bits beyond the physical address width are set are also applicable to all of them Signed-off-by: Bandan Das <bsd@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/vmx.c83
1 files changed, 37 insertions, 46 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1d7e7279f1b4..a5fd47e4abfc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5850,8 +5850,10 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
5850 * - if it's 4KB aligned 5850 * - if it's 4KB aligned
5851 * - No bits beyond the physical address width are set 5851 * - No bits beyond the physical address width are set
5852 * - Returns 0 on success or else 1 5852 * - Returns 0 on success or else 1
5853 * (Intel SDM Section 30.3)
5853 */ 5854 */
5854static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason) 5855static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
5856 gpa_t *vmpointer)
5855{ 5857{
5856 gva_t gva; 5858 gva_t gva;
5857 gpa_t vmptr; 5859 gpa_t vmptr;
@@ -5899,11 +5901,42 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason)
5899 kunmap(page); 5901 kunmap(page);
5900 vmx->nested.vmxon_ptr = vmptr; 5902 vmx->nested.vmxon_ptr = vmptr;
5901 break; 5903 break;
5904 case EXIT_REASON_VMCLEAR:
5905 if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
5906 nested_vmx_failValid(vcpu,
5907 VMXERR_VMCLEAR_INVALID_ADDRESS);
5908 skip_emulated_instruction(vcpu);
5909 return 1;
5910 }
5902 5911
5912 if (vmptr == vmx->nested.vmxon_ptr) {
5913 nested_vmx_failValid(vcpu,
5914 VMXERR_VMCLEAR_VMXON_POINTER);
5915 skip_emulated_instruction(vcpu);
5916 return 1;
5917 }
5918 break;
5919 case EXIT_REASON_VMPTRLD:
5920 if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
5921 nested_vmx_failValid(vcpu,
5922 VMXERR_VMPTRLD_INVALID_ADDRESS);
5923 skip_emulated_instruction(vcpu);
5924 return 1;
5925 }
5926
5927 if (vmptr == vmx->nested.vmxon_ptr) {
5928 nested_vmx_failValid(vcpu,
5929 VMXERR_VMCLEAR_VMXON_POINTER);
5930 skip_emulated_instruction(vcpu);
5931 return 1;
5932 }
5933 break;
5903 default: 5934 default:
5904 return 1; /* shouldn't happen */ 5935 return 1; /* shouldn't happen */
5905 } 5936 }
5906 5937
5938 if (vmpointer)
5939 *vmpointer = vmptr;
5907 return 0; 5940 return 0;
5908} 5941}
5909 5942
@@ -5946,7 +5979,7 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
5946 return 1; 5979 return 1;
5947 } 5980 }
5948 5981
5949 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON)) 5982 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON, NULL))
5950 return 1; 5983 return 1;
5951 5984
5952 if (vmx->nested.vmxon) { 5985 if (vmx->nested.vmxon) {
@@ -6075,37 +6108,16 @@ static int handle_vmoff(struct kvm_vcpu *vcpu)
6075static int handle_vmclear(struct kvm_vcpu *vcpu) 6108static int handle_vmclear(struct kvm_vcpu *vcpu)
6076{ 6109{
6077 struct vcpu_vmx *vmx = to_vmx(vcpu); 6110 struct vcpu_vmx *vmx = to_vmx(vcpu);
6078 gva_t gva;
6079 gpa_t vmptr; 6111 gpa_t vmptr;
6080 struct vmcs12 *vmcs12; 6112 struct vmcs12 *vmcs12;
6081 struct page *page; 6113 struct page *page;
6082 struct x86_exception e;
6083 6114
6084 if (!nested_vmx_check_permission(vcpu)) 6115 if (!nested_vmx_check_permission(vcpu))
6085 return 1; 6116 return 1;
6086 6117
6087 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 6118 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMCLEAR, &vmptr))
6088 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
6089 return 1; 6119 return 1;
6090 6120
6091 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
6092 sizeof(vmptr), &e)) {
6093 kvm_inject_page_fault(vcpu, &e);
6094 return 1;
6095 }
6096
6097 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
6098 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_INVALID_ADDRESS);
6099 skip_emulated_instruction(vcpu);
6100 return 1;
6101 }
6102
6103 if (vmptr == vmx->nested.vmxon_ptr) {
6104 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
6105 skip_emulated_instruction(vcpu);
6106 return 1;
6107 }
6108
6109 if (vmptr == vmx->nested.current_vmptr) { 6121 if (vmptr == vmx->nested.current_vmptr) {
6110 nested_release_vmcs12(vmx); 6122 nested_release_vmcs12(vmx);
6111 vmx->nested.current_vmptr = -1ull; 6123 vmx->nested.current_vmptr = -1ull;
@@ -6425,35 +6437,14 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
6425static int handle_vmptrld(struct kvm_vcpu *vcpu) 6437static int handle_vmptrld(struct kvm_vcpu *vcpu)
6426{ 6438{
6427 struct vcpu_vmx *vmx = to_vmx(vcpu); 6439 struct vcpu_vmx *vmx = to_vmx(vcpu);
6428 gva_t gva;
6429 gpa_t vmptr; 6440 gpa_t vmptr;
6430 struct x86_exception e;
6431 u32 exec_control; 6441 u32 exec_control;
6432 6442
6433 if (!nested_vmx_check_permission(vcpu)) 6443 if (!nested_vmx_check_permission(vcpu))
6434 return 1; 6444 return 1;
6435 6445
6436 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 6446 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMPTRLD, &vmptr))
6437 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
6438 return 1;
6439
6440 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
6441 sizeof(vmptr), &e)) {
6442 kvm_inject_page_fault(vcpu, &e);
6443 return 1;
6444 }
6445
6446 if (!IS_ALIGNED(vmptr, PAGE_SIZE)) {
6447 nested_vmx_failValid(vcpu, VMXERR_VMPTRLD_INVALID_ADDRESS);
6448 skip_emulated_instruction(vcpu);
6449 return 1; 6447 return 1;
6450 }
6451
6452 if (vmptr == vmx->nested.vmxon_ptr) {
6453 nested_vmx_failValid(vcpu, VMXERR_VMCLEAR_VMXON_POINTER);
6454 skip_emulated_instruction(vcpu);
6455 return 1;
6456 }
6457 6448
6458 if (vmx->nested.current_vmptr != vmptr) { 6449 if (vmx->nested.current_vmptr != vmptr) {
6459 struct vmcs12 *new_vmcs12; 6450 struct vmcs12 *new_vmcs12;