aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorNadav Amit <namit@cs.technion.ac.il>2014-11-19 10:43:09 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-11-19 12:17:10 -0500
commit1c1c35ae4b75d6fc08393a1f73a5a06dc68eebb8 (patch)
treef8a03ce263b63e50c06266415b193785f5a1ce1a /arch/x86/kvm
parent7d882ffa81d52a19071952acf4460c06a38861e5 (diff)
KVM: x86: Stack size is overridden by __linearize
When performing segmented-read/write in the emulator for stack operations, it ignores the stack size, and uses the ad_bytes as indication for the pointer size. As a result, a wrong address may be accessed. To fix this behavior, we can remove the masking of address in __linearize and perform it beforehand. It is already done for the operands (so currently it is inefficiently done twice). It is missing in two cases: 1. When using rip_relative 2. On fetch_bit_operand that changes the address. This patch masks the address on these two occassions, and removes the masking from __linearize. Note that it does not mask EIP during fetch. In protected/legacy mode code fetch when RIP >= 2^32 should result in #GP and not wrap-around. Since we make limit checks within __linearize, this is the expected behavior. Partial revert of commit 518547b32ab4 (KVM: x86: Emulator does not calculate address correctly, 2014-09-30). Signed-off-by: Nadav Amit <namit@cs.technion.ac.il> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/emulate.c9
1 files changed, 5 insertions, 4 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 5d47714d895d..1317560d0823 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -665,8 +665,7 @@ static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
665 u16 sel; 665 u16 sel;
666 unsigned cpl; 666 unsigned cpl;
667 667
668 la = seg_base(ctxt, addr.seg) + 668 la = seg_base(ctxt, addr.seg) + addr.ea;
669 (fetch || ctxt->ad_bytes == 8 ? addr.ea : (u32)addr.ea);
670 *max_size = 0; 669 *max_size = 0;
671 switch (ctxt->mode) { 670 switch (ctxt->mode) {
672 case X86EMUL_MODE_PROT64: 671 case X86EMUL_MODE_PROT64:
@@ -1289,7 +1288,8 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1289 else 1288 else
1290 sv = (s64)ctxt->src.val & (s64)mask; 1289 sv = (s64)ctxt->src.val & (s64)mask;
1291 1290
1292 ctxt->dst.addr.mem.ea += (sv >> 3); 1291 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1292 ctxt->dst.addr.mem.ea + (sv >> 3));
1293 } 1293 }
1294 1294
1295 /* only subword offset */ 1295 /* only subword offset */
@@ -4638,7 +4638,8 @@ done_prefixes:
4638 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 4638 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4639 4639
4640 if (ctxt->rip_relative) 4640 if (ctxt->rip_relative)
4641 ctxt->memopp->addr.mem.ea += ctxt->_eip; 4641 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4642 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4642 4643
4643done: 4644done:
4644 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 4645 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;