aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-11-17 08:28:21 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:35 -0500
commit90de84f50b425805bf7ddc430143ed2e224ebd8e (patch)
tree259b70846fc9972c77e60c85e0afe172b4d87d7d /arch
parentd53db5efc2f6026f7cb0871c91b887ed55e0f265 (diff)
KVM: x86 emulator: preserve an operand's segment identity
Currently the x86 emulator converts the segment register associated with an operand into a segment base which is added into the operand address. This loss of information results in us not doing segment limit checks properly. Replace struct operand's addr.mem field by a segmented_address structure which holds both the effetive address and segment. This will allow us to do the limit check at the point of access. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h5
-rw-r--r--arch/x86/kvm/emulate.c106
2 files changed, 59 insertions, 52 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index b36c6b3fe144..b48c133c95ab 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -159,7 +159,10 @@ struct operand {
159 }; 159 };
160 union { 160 union {
161 unsigned long *reg; 161 unsigned long *reg;
162 unsigned long mem; 162 struct segmented_address {
163 ulong ea;
164 unsigned seg;
165 } mem;
163 } addr; 166 } addr;
164 union { 167 union {
165 unsigned long val; 168 unsigned long val;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 3325b4747394..e96705542634 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -410,9 +410,9 @@ address_mask(struct decode_cache *c, unsigned long reg)
410} 410}
411 411
412static inline unsigned long 412static inline unsigned long
413register_address(struct decode_cache *c, unsigned long base, unsigned long reg) 413register_address(struct decode_cache *c, unsigned long reg)
414{ 414{
415 return base + address_mask(c, reg); 415 return address_mask(c, reg);
416} 416}
417 417
418static inline void 418static inline void
@@ -444,26 +444,26 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
444 return ops->get_cached_segment_base(seg, ctxt->vcpu); 444 return ops->get_cached_segment_base(seg, ctxt->vcpu);
445} 445}
446 446
447static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, 447static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
448 struct x86_emulate_ops *ops, 448 struct x86_emulate_ops *ops,
449 struct decode_cache *c) 449 struct decode_cache *c)
450{ 450{
451 if (!c->has_seg_override) 451 if (!c->has_seg_override)
452 return 0; 452 return 0;
453 453
454 return seg_base(ctxt, ops, c->seg_override); 454 return c->seg_override;
455} 455}
456 456
457static unsigned long es_base(struct x86_emulate_ctxt *ctxt, 457static ulong linear(struct x86_emulate_ctxt *ctxt,
458 struct x86_emulate_ops *ops) 458 struct segmented_address addr)
459{ 459{
460 return seg_base(ctxt, ops, VCPU_SREG_ES); 460 struct decode_cache *c = &ctxt->decode;
461} 461 ulong la;
462 462
463static unsigned long ss_base(struct x86_emulate_ctxt *ctxt, 463 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
464 struct x86_emulate_ops *ops) 464 if (c->ad_bytes != 8)
465{ 465 la &= (u32)-1;
466 return seg_base(ctxt, ops, VCPU_SREG_SS); 466 return la;
467} 467}
468 468
469static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 469static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
@@ -556,7 +556,7 @@ static void *decode_register(u8 modrm_reg, unsigned long *regs,
556 556
557static int read_descriptor(struct x86_emulate_ctxt *ctxt, 557static int read_descriptor(struct x86_emulate_ctxt *ctxt,
558 struct x86_emulate_ops *ops, 558 struct x86_emulate_ops *ops,
559 ulong addr, 559 struct segmented_address addr,
560 u16 *size, unsigned long *address, int op_bytes) 560 u16 *size, unsigned long *address, int op_bytes)
561{ 561{
562 int rc; 562 int rc;
@@ -564,10 +564,12 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt,
564 if (op_bytes == 2) 564 if (op_bytes == 2)
565 op_bytes = 3; 565 op_bytes = 3;
566 *address = 0; 566 *address = 0;
567 rc = ops->read_std(addr, (unsigned long *)size, 2, ctxt->vcpu, NULL); 567 rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2,
568 ctxt->vcpu, NULL);
568 if (rc != X86EMUL_CONTINUE) 569 if (rc != X86EMUL_CONTINUE)
569 return rc; 570 return rc;
570 rc = ops->read_std(addr + 2, address, op_bytes, ctxt->vcpu, NULL); 571 rc = ops->read_std(linear(ctxt, addr) + 2, address, op_bytes,
572 ctxt->vcpu, NULL);
571 return rc; 573 return rc;
572} 574}
573 575
@@ -760,7 +762,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
760 break; 762 break;
761 } 763 }
762 } 764 }
763 op->addr.mem = modrm_ea; 765 op->addr.mem.ea = modrm_ea;
764done: 766done:
765 return rc; 767 return rc;
766} 768}
@@ -775,13 +777,13 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt,
775 op->type = OP_MEM; 777 op->type = OP_MEM;
776 switch (c->ad_bytes) { 778 switch (c->ad_bytes) {
777 case 2: 779 case 2:
778 op->addr.mem = insn_fetch(u16, 2, c->eip); 780 op->addr.mem.ea = insn_fetch(u16, 2, c->eip);
779 break; 781 break;
780 case 4: 782 case 4:
781 op->addr.mem = insn_fetch(u32, 4, c->eip); 783 op->addr.mem.ea = insn_fetch(u32, 4, c->eip);
782 break; 784 break;
783 case 8: 785 case 8:
784 op->addr.mem = insn_fetch(u64, 8, c->eip); 786 op->addr.mem.ea = insn_fetch(u64, 8, c->eip);
785 break; 787 break;
786 } 788 }
787done: 789done:
@@ -800,7 +802,7 @@ static void fetch_bit_operand(struct decode_cache *c)
800 else if (c->src.bytes == 4) 802 else if (c->src.bytes == 4)
801 sv = (s32)c->src.val & (s32)mask; 803 sv = (s32)c->src.val & (s32)mask;
802 804
803 c->dst.addr.mem += (sv >> 3); 805 c->dst.addr.mem.ea += (sv >> 3);
804 } 806 }
805 807
806 /* only subword offset */ 808 /* only subword offset */
@@ -1093,7 +1095,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1093 case OP_MEM: 1095 case OP_MEM:
1094 if (c->lock_prefix) 1096 if (c->lock_prefix)
1095 rc = ops->cmpxchg_emulated( 1097 rc = ops->cmpxchg_emulated(
1096 c->dst.addr.mem, 1098 linear(ctxt, c->dst.addr.mem),
1097 &c->dst.orig_val, 1099 &c->dst.orig_val,
1098 &c->dst.val, 1100 &c->dst.val,
1099 c->dst.bytes, 1101 c->dst.bytes,
@@ -1101,7 +1103,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1101 ctxt->vcpu); 1103 ctxt->vcpu);
1102 else 1104 else
1103 rc = ops->write_emulated( 1105 rc = ops->write_emulated(
1104 c->dst.addr.mem, 1106 linear(ctxt, c->dst.addr.mem),
1105 &c->dst.val, 1107 &c->dst.val,
1106 c->dst.bytes, 1108 c->dst.bytes,
1107 &err, 1109 &err,
@@ -1129,8 +1131,8 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1129 c->dst.bytes = c->op_bytes; 1131 c->dst.bytes = c->op_bytes;
1130 c->dst.val = c->src.val; 1132 c->dst.val = c->src.val;
1131 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); 1133 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1132 c->dst.addr.mem = register_address(c, ss_base(ctxt, ops), 1134 c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1133 c->regs[VCPU_REGS_RSP]); 1135 c->dst.addr.mem.seg = VCPU_SREG_SS;
1134} 1136}
1135 1137
1136static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1138static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1139,10 +1141,11 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1139{ 1141{
1140 struct decode_cache *c = &ctxt->decode; 1142 struct decode_cache *c = &ctxt->decode;
1141 int rc; 1143 int rc;
1144 struct segmented_address addr;
1142 1145
1143 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops), 1146 addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]);
1144 c->regs[VCPU_REGS_RSP]), 1147 addr.seg = VCPU_SREG_SS;
1145 dest, len); 1148 rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len);
1146 if (rc != X86EMUL_CONTINUE) 1149 if (rc != X86EMUL_CONTINUE)
1147 return rc; 1150 return rc;
1148 1151
@@ -2223,14 +2226,15 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2223 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; 2226 return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0;
2224} 2227}
2225 2228
2226static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base, 2229static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg,
2227 int reg, struct operand *op) 2230 int reg, struct operand *op)
2228{ 2231{
2229 struct decode_cache *c = &ctxt->decode; 2232 struct decode_cache *c = &ctxt->decode;
2230 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; 2233 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1;
2231 2234
2232 register_address_increment(c, &c->regs[reg], df * op->bytes); 2235 register_address_increment(c, &c->regs[reg], df * op->bytes);
2233 op->addr.mem = register_address(c, base, c->regs[reg]); 2236 op->addr.mem.ea = register_address(c, c->regs[reg]);
2237 op->addr.mem.seg = seg;
2234} 2238}
2235 2239
2236static int em_push(struct x86_emulate_ctxt *ctxt) 2240static int em_push(struct x86_emulate_ctxt *ctxt)
@@ -2639,7 +2643,7 @@ static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op,
2639 2643
2640 op->type = OP_IMM; 2644 op->type = OP_IMM;
2641 op->bytes = size; 2645 op->bytes = size;
2642 op->addr.mem = c->eip; 2646 op->addr.mem.ea = c->eip;
2643 /* NB. Immediates are sign-extended as necessary. */ 2647 /* NB. Immediates are sign-extended as necessary. */
2644 switch (op->bytes) { 2648 switch (op->bytes) {
2645 case 1: 2649 case 1:
@@ -2821,14 +2825,13 @@ done_prefixes:
2821 if (!c->has_seg_override) 2825 if (!c->has_seg_override)
2822 set_seg_override(c, VCPU_SREG_DS); 2826 set_seg_override(c, VCPU_SREG_DS);
2823 2827
2824 if (memop.type == OP_MEM && !(!c->twobyte && c->b == 0x8d)) 2828 memop.addr.mem.seg = seg_override(ctxt, ops, c);
2825 memop.addr.mem += seg_override_base(ctxt, ops, c);
2826 2829
2827 if (memop.type == OP_MEM && c->ad_bytes != 8) 2830 if (memop.type == OP_MEM && c->ad_bytes != 8)
2828 memop.addr.mem = (u32)memop.addr.mem; 2831 memop.addr.mem.ea = (u32)memop.addr.mem.ea;
2829 2832
2830 if (memop.type == OP_MEM && c->rip_relative) 2833 if (memop.type == OP_MEM && c->rip_relative)
2831 memop.addr.mem += c->eip; 2834 memop.addr.mem.ea += c->eip;
2832 2835
2833 /* 2836 /*
2834 * Decode and fetch the source operand: register, memory 2837 * Decode and fetch the source operand: register, memory
@@ -2880,14 +2883,14 @@ done_prefixes:
2880 case SrcSI: 2883 case SrcSI:
2881 c->src.type = OP_MEM; 2884 c->src.type = OP_MEM;
2882 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 2885 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2883 c->src.addr.mem = 2886 c->src.addr.mem.ea =
2884 register_address(c, seg_override_base(ctxt, ops, c), 2887 register_address(c, c->regs[VCPU_REGS_RSI]);
2885 c->regs[VCPU_REGS_RSI]); 2888 c->src.addr.mem.seg = seg_override(ctxt, ops, c),
2886 c->src.val = 0; 2889 c->src.val = 0;
2887 break; 2890 break;
2888 case SrcImmFAddr: 2891 case SrcImmFAddr:
2889 c->src.type = OP_IMM; 2892 c->src.type = OP_IMM;
2890 c->src.addr.mem = c->eip; 2893 c->src.addr.mem.ea = c->eip;
2891 c->src.bytes = c->op_bytes + 2; 2894 c->src.bytes = c->op_bytes + 2;
2892 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip); 2895 insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip);
2893 break; 2896 break;
@@ -2934,7 +2937,7 @@ done_prefixes:
2934 break; 2937 break;
2935 case DstImmUByte: 2938 case DstImmUByte:
2936 c->dst.type = OP_IMM; 2939 c->dst.type = OP_IMM;
2937 c->dst.addr.mem = c->eip; 2940 c->dst.addr.mem.ea = c->eip;
2938 c->dst.bytes = 1; 2941 c->dst.bytes = 1;
2939 c->dst.val = insn_fetch(u8, 1, c->eip); 2942 c->dst.val = insn_fetch(u8, 1, c->eip);
2940 break; 2943 break;
@@ -2959,9 +2962,9 @@ done_prefixes:
2959 case DstDI: 2962 case DstDI:
2960 c->dst.type = OP_MEM; 2963 c->dst.type = OP_MEM;
2961 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 2964 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2962 c->dst.addr.mem = 2965 c->dst.addr.mem.ea =
2963 register_address(c, es_base(ctxt, ops), 2966 register_address(c, c->regs[VCPU_REGS_RDI]);
2964 c->regs[VCPU_REGS_RDI]); 2967 c->dst.addr.mem.seg = VCPU_SREG_ES;
2965 c->dst.val = 0; 2968 c->dst.val = 0;
2966 break; 2969 break;
2967 case ImplicitOps: 2970 case ImplicitOps:
@@ -3040,7 +3043,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3040 } 3043 }
3041 3044
3042 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) { 3045 if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) {
3043 rc = read_emulated(ctxt, ops, c->src.addr.mem, 3046 rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem),
3044 c->src.valptr, c->src.bytes); 3047 c->src.valptr, c->src.bytes);
3045 if (rc != X86EMUL_CONTINUE) 3048 if (rc != X86EMUL_CONTINUE)
3046 goto done; 3049 goto done;
@@ -3048,7 +3051,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3048 } 3051 }
3049 3052
3050 if (c->src2.type == OP_MEM) { 3053 if (c->src2.type == OP_MEM) {
3051 rc = read_emulated(ctxt, ops, c->src2.addr.mem, 3054 rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem),
3052 &c->src2.val, c->src2.bytes); 3055 &c->src2.val, c->src2.bytes);
3053 if (rc != X86EMUL_CONTINUE) 3056 if (rc != X86EMUL_CONTINUE)
3054 goto done; 3057 goto done;
@@ -3060,7 +3063,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3060 3063
3061 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { 3064 if ((c->dst.type == OP_MEM) && !(c->d & Mov)) {
3062 /* optimisation - avoid slow emulated read if Mov */ 3065 /* optimisation - avoid slow emulated read if Mov */
3063 rc = read_emulated(ctxt, ops, c->dst.addr.mem, 3066 rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem),
3064 &c->dst.val, c->dst.bytes); 3067 &c->dst.val, c->dst.bytes);
3065 if (rc != X86EMUL_CONTINUE) 3068 if (rc != X86EMUL_CONTINUE)
3066 goto done; 3069 goto done;
@@ -3211,7 +3214,7 @@ special_insn:
3211 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); 3214 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
3212 break; 3215 break;
3213 case 0x8d: /* lea r16/r32, m */ 3216 case 0x8d: /* lea r16/r32, m */
3214 c->dst.val = c->src.addr.mem; 3217 c->dst.val = c->src.addr.mem.ea;
3215 break; 3218 break;
3216 case 0x8e: { /* mov seg, r/m16 */ 3219 case 0x8e: { /* mov seg, r/m16 */
3217 uint16_t sel; 3220 uint16_t sel;
@@ -3438,11 +3441,11 @@ writeback:
3438 c->dst.type = saved_dst_type; 3441 c->dst.type = saved_dst_type;
3439 3442
3440 if ((c->d & SrcMask) == SrcSI) 3443 if ((c->d & SrcMask) == SrcSI)
3441 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c), 3444 string_addr_inc(ctxt, seg_override(ctxt, ops, c),
3442 VCPU_REGS_RSI, &c->src); 3445 VCPU_REGS_RSI, &c->src);
3443 3446
3444 if ((c->d & DstMask) == DstDI) 3447 if ((c->d & DstMask) == DstDI)
3445 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI, 3448 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI,
3446 &c->dst); 3449 &c->dst);
3447 3450
3448 if (c->rep_prefix && (c->d & String)) { 3451 if (c->rep_prefix && (c->d & String)) {
@@ -3535,7 +3538,8 @@ twobyte_insn:
3535 emulate_ud(ctxt); 3538 emulate_ud(ctxt);
3536 goto done; 3539 goto done;
3537 case 7: /* invlpg*/ 3540 case 7: /* invlpg*/
3538 emulate_invlpg(ctxt->vcpu, c->src.addr.mem); 3541 emulate_invlpg(ctxt->vcpu,
3542 linear(ctxt, c->src.addr.mem));
3539 /* Disable writeback. */ 3543 /* Disable writeback. */
3540 c->dst.type = OP_NONE; 3544 c->dst.type = OP_NONE;
3541 break; 3545 break;