aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEugene Korenevsky <ekorenevsky@gmail.com>2015-04-16 22:22:21 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2015-07-23 02:26:39 -0400
commitf9eb4af67c9dde195bae965a86c35c35402249c0 (patch)
treefab333824144143d7ed92295575e85efd8e60bc2
parent0da029ed7ee5fdf49a2a0e14160c3e9999be9292 (diff)
KVM: nVMX: VMX instructions: add checks for #GP/#SS exceptions
According to Intel SDM several checks must be applied for memory operands of VMX instructions. Long mode: #GP(0) or #SS(0) depending on the segment must be thrown if the memory address is in a non-canonical form. Protected mode, checks in chronological order: - The segment type must be checked with access type (read or write) taken into account. For write access: #GP(0) must be generated if the destination operand is located in a read-only data segment or any code segment. For read access: #GP(0) must be generated if if the source operand is located in an execute-only code segment. - Usability of the segment must be checked. #GP(0) or #SS(0) depending on the segment must be thrown if the segment is unusable. - Limit check. #GP(0) or #SS(0) depending on the segment must be thrown if the memory operand effective address is outside the segment limit. Signed-off-by: Eugene Korenevsky <ekorenevsky@gmail.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c77
1 files changed, 61 insertions, 16 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 83b7b5cd75d5..65f0a50f1e9c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -6408,8 +6408,12 @@ static enum hrtimer_restart vmx_preemption_timer_fn(struct hrtimer *timer)
6408 */ 6408 */
6409static int get_vmx_mem_address(struct kvm_vcpu *vcpu, 6409static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6410 unsigned long exit_qualification, 6410 unsigned long exit_qualification,
6411 u32 vmx_instruction_info, gva_t *ret) 6411 u32 vmx_instruction_info, bool wr, gva_t *ret)
6412{ 6412{
6413 gva_t off;
6414 bool exn;
6415 struct kvm_segment s;
6416
6413 /* 6417 /*
6414 * According to Vol. 3B, "Information for VM Exits Due to Instruction 6418 * According to Vol. 3B, "Information for VM Exits Due to Instruction
6415 * Execution", on an exit, vmx_instruction_info holds most of the 6419 * Execution", on an exit, vmx_instruction_info holds most of the
@@ -6434,22 +6438,63 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
6434 6438
6435 /* Addr = segment_base + offset */ 6439 /* Addr = segment_base + offset */
6436 /* offset = base + [index * scale] + displacement */ 6440 /* offset = base + [index * scale] + displacement */
6437 *ret = vmx_get_segment_base(vcpu, seg_reg); 6441 off = exit_qualification; /* holds the displacement */
6438 if (base_is_valid) 6442 if (base_is_valid)
6439 *ret += kvm_register_read(vcpu, base_reg); 6443 off += kvm_register_read(vcpu, base_reg);
6440 if (index_is_valid) 6444 if (index_is_valid)
6441 *ret += kvm_register_read(vcpu, index_reg)<<scaling; 6445 off += kvm_register_read(vcpu, index_reg)<<scaling;
6442 *ret += exit_qualification; /* holds the displacement */ 6446 vmx_get_segment(vcpu, &s, seg_reg);
6447 *ret = s.base + off;
6443 6448
6444 if (addr_size == 1) /* 32 bit */ 6449 if (addr_size == 1) /* 32 bit */
6445 *ret &= 0xffffffff; 6450 *ret &= 0xffffffff;
6446 6451
6447 /* 6452 /* Checks for #GP/#SS exceptions. */
6448 * TODO: throw #GP (and return 1) in various cases that the VM* 6453 exn = false;
6449 * instructions require it - e.g., offset beyond segment limit, 6454 if (is_protmode(vcpu)) {
6450 * unusable or unreadable/unwritable segment, non-canonical 64-bit 6455 /* Protected mode: apply checks for segment validity in the
6451 * address, and so on. Currently these are not checked. 6456 * following order:
6452 */ 6457 * - segment type check (#GP(0) may be thrown)
6458 * - usability check (#GP(0)/#SS(0))
6459 * - limit check (#GP(0)/#SS(0))
6460 */
6461 if (wr)
6462 /* #GP(0) if the destination operand is located in a
6463 * read-only data segment or any code segment.
6464 */
6465 exn = ((s.type & 0xa) == 0 || (s.type & 8));
6466 else
6467 /* #GP(0) if the source operand is located in an
6468 * execute-only code segment
6469 */
6470 exn = ((s.type & 0xa) == 8);
6471 }
6472 if (exn) {
6473 kvm_queue_exception_e(vcpu, GP_VECTOR, 0);
6474 return 1;
6475 }
6476 if (is_long_mode(vcpu)) {
6477 /* Long mode: #GP(0)/#SS(0) if the memory address is in a
6478 * non-canonical form. This is an only check for long mode.
6479 */
6480 exn = is_noncanonical_address(*ret);
6481 } else if (is_protmode(vcpu)) {
6482 /* Protected mode: #GP(0)/#SS(0) if the segment is unusable.
6483 */
6484 exn = (s.unusable != 0);
6485 /* Protected mode: #GP(0)/#SS(0) if the memory
6486 * operand is outside the segment limit.
6487 */
6488 exn = exn || (off + sizeof(u64) > s.limit);
6489 }
6490 if (exn) {
6491 kvm_queue_exception_e(vcpu,
6492 seg_reg == VCPU_SREG_SS ?
6493 SS_VECTOR : GP_VECTOR,
6494 0);
6495 return 1;
6496 }
6497
6453 return 0; 6498 return 0;
6454} 6499}
6455 6500
@@ -6471,7 +6516,7 @@ static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason,
6471 int maxphyaddr = cpuid_maxphyaddr(vcpu); 6516 int maxphyaddr = cpuid_maxphyaddr(vcpu);
6472 6517
6473 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 6518 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
6474 vmcs_read32(VMX_INSTRUCTION_INFO), &gva)) 6519 vmcs_read32(VMX_INSTRUCTION_INFO), false, &gva))
6475 return 1; 6520 return 1;
6476 6521
6477 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr, 6522 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
@@ -6999,7 +7044,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
6999 field_value); 7044 field_value);
7000 } else { 7045 } else {
7001 if (get_vmx_mem_address(vcpu, exit_qualification, 7046 if (get_vmx_mem_address(vcpu, exit_qualification,
7002 vmx_instruction_info, &gva)) 7047 vmx_instruction_info, true, &gva))
7003 return 1; 7048 return 1;
7004 /* _system ok, as nested_vmx_check_permission verified cpl=0 */ 7049 /* _system ok, as nested_vmx_check_permission verified cpl=0 */
7005 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, 7050 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
@@ -7036,7 +7081,7 @@ static int handle_vmwrite(struct kvm_vcpu *vcpu)
7036 (((vmx_instruction_info) >> 3) & 0xf)); 7081 (((vmx_instruction_info) >> 3) & 0xf));
7037 else { 7082 else {
7038 if (get_vmx_mem_address(vcpu, exit_qualification, 7083 if (get_vmx_mem_address(vcpu, exit_qualification,
7039 vmx_instruction_info, &gva)) 7084 vmx_instruction_info, false, &gva))
7040 return 1; 7085 return 1;
7041 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, 7086 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva,
7042 &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) { 7087 &field_value, (is_64_bit_mode(vcpu) ? 8 : 4), &e)) {
@@ -7128,7 +7173,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
7128 return 1; 7173 return 1;
7129 7174
7130 if (get_vmx_mem_address(vcpu, exit_qualification, 7175 if (get_vmx_mem_address(vcpu, exit_qualification,
7131 vmx_instruction_info, &vmcs_gva)) 7176 vmx_instruction_info, true, &vmcs_gva))
7132 return 1; 7177 return 1;
7133 /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */ 7178 /* ok to use *_system, as nested_vmx_check_permission verified cpl=0 */
7134 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, 7179 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
@@ -7184,7 +7229,7 @@ static int handle_invept(struct kvm_vcpu *vcpu)
7184 * operand is read even if it isn't needed (e.g., for type==global) 7229 * operand is read even if it isn't needed (e.g., for type==global)
7185 */ 7230 */
7186 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION), 7231 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
7187 vmx_instruction_info, &gva)) 7232 vmx_instruction_info, false, &gva))
7188 return 1; 7233 return 1;
7189 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand, 7234 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &operand,
7190 sizeof(operand), &e)) { 7235 sizeof(operand), &e)) {