aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c408
1 files changed, 218 insertions, 190 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9f8a2faf5040..169b09d76ddd 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -123,6 +123,7 @@
123#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ 123#define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */
124#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ 124#define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */
125#define Escape (5<<15) /* Escape to coprocessor instruction */ 125#define Escape (5<<15) /* Escape to coprocessor instruction */
126#define InstrDual (6<<15) /* Alternate instruction decoding of mod == 3 */
126#define Sse (1<<18) /* SSE Vector instruction */ 127#define Sse (1<<18) /* SSE Vector instruction */
127/* Generic ModRM decode. */ 128/* Generic ModRM decode. */
128#define ModRM (1<<19) 129#define ModRM (1<<19)
@@ -166,6 +167,8 @@
166#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */ 167#define CheckPerm ((u64)1 << 49) /* Has valid check_perm field */
167#define NoBigReal ((u64)1 << 50) /* No big real mode */ 168#define NoBigReal ((u64)1 << 50) /* No big real mode */
168#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */ 169#define PrivUD ((u64)1 << 51) /* #UD instead of #GP on CPL > 0 */
170#define NearBranch ((u64)1 << 52) /* Near branches */
171#define No16 ((u64)1 << 53) /* No 16 bit operand */
169 172
170#define DstXacc (DstAccLo | SrcAccHi | SrcWrite) 173#define DstXacc (DstAccLo | SrcAccHi | SrcWrite)
171 174
@@ -209,6 +212,7 @@ struct opcode {
209 const struct group_dual *gdual; 212 const struct group_dual *gdual;
210 const struct gprefix *gprefix; 213 const struct gprefix *gprefix;
211 const struct escape *esc; 214 const struct escape *esc;
215 const struct instr_dual *idual;
212 void (*fastop)(struct fastop *fake); 216 void (*fastop)(struct fastop *fake);
213 } u; 217 } u;
214 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 218 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
@@ -231,6 +235,11 @@ struct escape {
231 struct opcode high[64]; 235 struct opcode high[64];
232}; 236};
233 237
238struct instr_dual {
239 struct opcode mod012;
240 struct opcode mod3;
241};
242
234/* EFLAGS bit definitions. */ 243/* EFLAGS bit definitions. */
235#define EFLG_ID (1<<21) 244#define EFLG_ID (1<<21)
236#define EFLG_VIP (1<<20) 245#define EFLG_VIP (1<<20)
@@ -379,6 +388,15 @@ static int fastop(struct x86_emulate_ctxt *ctxt, void (*fop)(struct fastop *));
379 ON64(FOP2E(op##q, rax, cl)) \ 388 ON64(FOP2E(op##q, rax, cl)) \
380 FOP_END 389 FOP_END
381 390
391/* 2 operand, src and dest are reversed */
392#define FASTOP2R(op, name) \
393 FOP_START(name) \
394 FOP2E(op##b, dl, al) \
395 FOP2E(op##w, dx, ax) \
396 FOP2E(op##l, edx, eax) \
397 ON64(FOP2E(op##q, rdx, rax)) \
398 FOP_END
399
382#define FOP3E(op, dst, src, src2) \ 400#define FOP3E(op, dst, src, src2) \
383 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET 401 FOP_ALIGN #op " %" #src2 ", %" #src ", %" #dst " \n\t" FOP_RET
384 402
@@ -477,9 +495,9 @@ address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
477} 495}
478 496
479static inline unsigned long 497static inline unsigned long
480register_address(struct x86_emulate_ctxt *ctxt, unsigned long reg) 498register_address(struct x86_emulate_ctxt *ctxt, int reg)
481{ 499{
482 return address_mask(ctxt, reg); 500 return address_mask(ctxt, reg_read(ctxt, reg));
483} 501}
484 502
485static void masked_increment(ulong *reg, ulong mask, int inc) 503static void masked_increment(ulong *reg, ulong mask, int inc)
@@ -488,7 +506,7 @@ static void masked_increment(ulong *reg, ulong mask, int inc)
488} 506}
489 507
490static inline void 508static inline void
491register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, int inc) 509register_address_increment(struct x86_emulate_ctxt *ctxt, int reg, int inc)
492{ 510{
493 ulong mask; 511 ulong mask;
494 512
@@ -496,7 +514,7 @@ register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, in
496 mask = ~0UL; 514 mask = ~0UL;
497 else 515 else
498 mask = ad_mask(ctxt); 516 mask = ad_mask(ctxt);
499 masked_increment(reg, mask, inc); 517 masked_increment(reg_rmw(ctxt, reg), mask, inc);
500} 518}
501 519
502static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 520static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
@@ -564,40 +582,6 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
564 return emulate_exception(ctxt, NM_VECTOR, 0, false); 582 return emulate_exception(ctxt, NM_VECTOR, 0, false);
565} 583}
566 584
567static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
568 int cs_l)
569{
570 switch (ctxt->op_bytes) {
571 case 2:
572 ctxt->_eip = (u16)dst;
573 break;
574 case 4:
575 ctxt->_eip = (u32)dst;
576 break;
577#ifdef CONFIG_X86_64
578 case 8:
579 if ((cs_l && is_noncanonical_address(dst)) ||
580 (!cs_l && (dst >> 32) != 0))
581 return emulate_gp(ctxt, 0);
582 ctxt->_eip = dst;
583 break;
584#endif
585 default:
586 WARN(1, "unsupported eip assignment size\n");
587 }
588 return X86EMUL_CONTINUE;
589}
590
591static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
592{
593 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
594}
595
596static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
597{
598 return assign_eip_near(ctxt, ctxt->_eip + rel);
599}
600
601static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 585static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
602{ 586{
603 u16 selector; 587 u16 selector;
@@ -641,25 +625,24 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641 return true; 625 return true;
642} 626}
643 627
644static int __linearize(struct x86_emulate_ctxt *ctxt, 628static __always_inline int __linearize(struct x86_emulate_ctxt *ctxt,
645 struct segmented_address addr, 629 struct segmented_address addr,
646 unsigned *max_size, unsigned size, 630 unsigned *max_size, unsigned size,
647 bool write, bool fetch, 631 bool write, bool fetch,
648 ulong *linear) 632 enum x86emul_mode mode, ulong *linear)
649{ 633{
650 struct desc_struct desc; 634 struct desc_struct desc;
651 bool usable; 635 bool usable;
652 ulong la; 636 ulong la;
653 u32 lim; 637 u32 lim;
654 u16 sel; 638 u16 sel;
655 unsigned cpl;
656 639
657 la = seg_base(ctxt, addr.seg) + addr.ea; 640 la = seg_base(ctxt, addr.seg) + addr.ea;
658 *max_size = 0; 641 *max_size = 0;
659 switch (ctxt->mode) { 642 switch (mode) {
660 case X86EMUL_MODE_PROT64: 643 case X86EMUL_MODE_PROT64:
661 if (((signed long)la << 16) >> 16 != la) 644 if (is_noncanonical_address(la))
662 return emulate_gp(ctxt, 0); 645 goto bad;
663 646
664 *max_size = min_t(u64, ~0u, (1ull << 48) - la); 647 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
665 if (size > *max_size) 648 if (size > *max_size)
@@ -678,46 +661,20 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
678 if (!fetch && (desc.type & 8) && !(desc.type & 2)) 661 if (!fetch && (desc.type & 8) && !(desc.type & 2))
679 goto bad; 662 goto bad;
680 lim = desc_limit_scaled(&desc); 663 lim = desc_limit_scaled(&desc);
681 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && 664 if (!(desc.type & 8) && (desc.type & 4)) {
682 (ctxt->d & NoBigReal)) {
683 /* la is between zero and 0xffff */
684 if (la > 0xffff)
685 goto bad;
686 *max_size = 0x10000 - la;
687 } else if ((desc.type & 8) || !(desc.type & 4)) {
688 /* expand-up segment */
689 if (addr.ea > lim)
690 goto bad;
691 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
692 } else {
693 /* expand-down segment */ 665 /* expand-down segment */
694 if (addr.ea <= lim) 666 if (addr.ea <= lim)
695 goto bad; 667 goto bad;
696 lim = desc.d ? 0xffffffff : 0xffff; 668 lim = desc.d ? 0xffffffff : 0xffff;
697 if (addr.ea > lim)
698 goto bad;
699 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
700 } 669 }
670 if (addr.ea > lim)
671 goto bad;
672 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
701 if (size > *max_size) 673 if (size > *max_size)
702 goto bad; 674 goto bad;
703 cpl = ctxt->ops->cpl(ctxt); 675 la &= (u32)-1;
704 if (!(desc.type & 8)) {
705 /* data segment */
706 if (cpl > desc.dpl)
707 goto bad;
708 } else if ((desc.type & 8) && !(desc.type & 4)) {
709 /* nonconforming code segment */
710 if (cpl != desc.dpl)
711 goto bad;
712 } else if ((desc.type & 8) && (desc.type & 4)) {
713 /* conforming code segment */
714 if (cpl < desc.dpl)
715 goto bad;
716 }
717 break; 676 break;
718 } 677 }
719 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
720 la &= (u32)-1;
721 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0)) 678 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
722 return emulate_gp(ctxt, 0); 679 return emulate_gp(ctxt, 0);
723 *linear = la; 680 *linear = la;
@@ -735,9 +692,55 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
735 ulong *linear) 692 ulong *linear)
736{ 693{
737 unsigned max_size; 694 unsigned max_size;
738 return __linearize(ctxt, addr, &max_size, size, write, false, linear); 695 return __linearize(ctxt, addr, &max_size, size, write, false,
696 ctxt->mode, linear);
697}
698
699static inline int assign_eip(struct x86_emulate_ctxt *ctxt, ulong dst,
700 enum x86emul_mode mode)
701{
702 ulong linear;
703 int rc;
704 unsigned max_size;
705 struct segmented_address addr = { .seg = VCPU_SREG_CS,
706 .ea = dst };
707
708 if (ctxt->op_bytes != sizeof(unsigned long))
709 addr.ea = dst & ((1UL << (ctxt->op_bytes << 3)) - 1);
710 rc = __linearize(ctxt, addr, &max_size, 1, false, true, mode, &linear);
711 if (rc == X86EMUL_CONTINUE)
712 ctxt->_eip = addr.ea;
713 return rc;
714}
715
716static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
717{
718 return assign_eip(ctxt, dst, ctxt->mode);
739} 719}
740 720
721static int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
722 const struct desc_struct *cs_desc)
723{
724 enum x86emul_mode mode = ctxt->mode;
725
726#ifdef CONFIG_X86_64
727 if (ctxt->mode >= X86EMUL_MODE_PROT32 && cs_desc->l) {
728 u64 efer = 0;
729
730 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
731 if (efer & EFER_LMA)
732 mode = X86EMUL_MODE_PROT64;
733 }
734#endif
735 if (mode == X86EMUL_MODE_PROT16 || mode == X86EMUL_MODE_PROT32)
736 mode = cs_desc->d ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
737 return assign_eip(ctxt, dst, mode);
738}
739
740static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
741{
742 return assign_eip_near(ctxt, ctxt->_eip + rel);
743}
741 744
742static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 745static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
743 struct segmented_address addr, 746 struct segmented_address addr,
@@ -776,7 +779,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
776 * boundary check itself. Instead, we use max_size to check 779 * boundary check itself. Instead, we use max_size to check
777 * against op_size. 780 * against op_size.
778 */ 781 */
779 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear); 782 rc = __linearize(ctxt, addr, &max_size, 0, false, true, ctxt->mode,
783 &linear);
780 if (unlikely(rc != X86EMUL_CONTINUE)) 784 if (unlikely(rc != X86EMUL_CONTINUE))
781 return rc; 785 return rc;
782 786
@@ -911,6 +915,8 @@ FASTOP2W(btc);
911 915
912FASTOP2(xadd); 916FASTOP2(xadd);
913 917
918FASTOP2R(cmp, cmp_r);
919
914static u8 test_cc(unsigned int condition, unsigned long flags) 920static u8 test_cc(unsigned int condition, unsigned long flags)
915{ 921{
916 u8 rc; 922 u8 rc;
@@ -1221,6 +1227,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1221 if (index_reg != 4) 1227 if (index_reg != 4)
1222 modrm_ea += reg_read(ctxt, index_reg) << scale; 1228 modrm_ea += reg_read(ctxt, index_reg) << scale;
1223 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1229 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1230 modrm_ea += insn_fetch(s32, ctxt);
1224 if (ctxt->mode == X86EMUL_MODE_PROT64) 1231 if (ctxt->mode == X86EMUL_MODE_PROT64)
1225 ctxt->rip_relative = 1; 1232 ctxt->rip_relative = 1;
1226 } else { 1233 } else {
@@ -1229,10 +1236,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1229 adjust_modrm_seg(ctxt, base_reg); 1236 adjust_modrm_seg(ctxt, base_reg);
1230 } 1237 }
1231 switch (ctxt->modrm_mod) { 1238 switch (ctxt->modrm_mod) {
1232 case 0:
1233 if (ctxt->modrm_rm == 5)
1234 modrm_ea += insn_fetch(s32, ctxt);
1235 break;
1236 case 1: 1239 case 1:
1237 modrm_ea += insn_fetch(s8, ctxt); 1240 modrm_ea += insn_fetch(s8, ctxt);
1238 break; 1241 break;
@@ -1284,7 +1287,8 @@ static void fetch_bit_operand(struct x86_emulate_ctxt *ctxt)
1284 else 1287 else
1285 sv = (s64)ctxt->src.val & (s64)mask; 1288 sv = (s64)ctxt->src.val & (s64)mask;
1286 1289
1287 ctxt->dst.addr.mem.ea += (sv >> 3); 1290 ctxt->dst.addr.mem.ea = address_mask(ctxt,
1291 ctxt->dst.addr.mem.ea + (sv >> 3));
1288 } 1292 }
1289 1293
1290 /* only subword offset */ 1294 /* only subword offset */
@@ -1610,6 +1614,9 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1610 sizeof(base3), &ctxt->exception); 1614 sizeof(base3), &ctxt->exception);
1611 if (ret != X86EMUL_CONTINUE) 1615 if (ret != X86EMUL_CONTINUE)
1612 return ret; 1616 return ret;
1617 if (is_noncanonical_address(get_desc_base(&seg_desc) |
1618 ((u64)base3 << 32)))
1619 return emulate_gp(ctxt, 0);
1613 } 1620 }
1614load: 1621load:
1615 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); 1622 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
@@ -1807,6 +1814,10 @@ static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1807 int seg = ctxt->src2.val; 1814 int seg = ctxt->src2.val;
1808 1815
1809 ctxt->src.val = get_segment_selector(ctxt, seg); 1816 ctxt->src.val = get_segment_selector(ctxt, seg);
1817 if (ctxt->op_bytes == 4) {
1818 rsp_increment(ctxt, -2);
1819 ctxt->op_bytes = 2;
1820 }
1810 1821
1811 return em_push(ctxt); 1822 return em_push(ctxt);
1812} 1823}
@@ -1850,7 +1861,7 @@ static int em_pusha(struct x86_emulate_ctxt *ctxt)
1850 1861
1851static int em_pushf(struct x86_emulate_ctxt *ctxt) 1862static int em_pushf(struct x86_emulate_ctxt *ctxt)
1852{ 1863{
1853 ctxt->src.val = (unsigned long)ctxt->eflags; 1864 ctxt->src.val = (unsigned long)ctxt->eflags & ~EFLG_VM;
1854 return em_push(ctxt); 1865 return em_push(ctxt);
1855} 1866}
1856 1867
@@ -2035,7 +2046,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2035 if (rc != X86EMUL_CONTINUE) 2046 if (rc != X86EMUL_CONTINUE)
2036 return rc; 2047 return rc;
2037 2048
2038 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); 2049 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
2039 if (rc != X86EMUL_CONTINUE) { 2050 if (rc != X86EMUL_CONTINUE) {
2040 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); 2051 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2041 /* assigning eip failed; restore the old cs */ 2052 /* assigning eip failed; restore the old cs */
@@ -2045,31 +2056,22 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2045 return rc; 2056 return rc;
2046} 2057}
2047 2058
2048static int em_grp45(struct x86_emulate_ctxt *ctxt) 2059static int em_jmp_abs(struct x86_emulate_ctxt *ctxt)
2049{ 2060{
2050 int rc = X86EMUL_CONTINUE; 2061 return assign_eip_near(ctxt, ctxt->src.val);
2062}
2051 2063
2052 switch (ctxt->modrm_reg) { 2064static int em_call_near_abs(struct x86_emulate_ctxt *ctxt)
2053 case 2: /* call near abs */ { 2065{
2054 long int old_eip; 2066 int rc;
2055 old_eip = ctxt->_eip; 2067 long int old_eip;
2056 rc = assign_eip_near(ctxt, ctxt->src.val); 2068
2057 if (rc != X86EMUL_CONTINUE) 2069 old_eip = ctxt->_eip;
2058 break; 2070 rc = assign_eip_near(ctxt, ctxt->src.val);
2059 ctxt->src.val = old_eip; 2071 if (rc != X86EMUL_CONTINUE)
2060 rc = em_push(ctxt); 2072 return rc;
2061 break; 2073 ctxt->src.val = old_eip;
2062 } 2074 rc = em_push(ctxt);
2063 case 4: /* jmp abs */
2064 rc = assign_eip_near(ctxt, ctxt->src.val);
2065 break;
2066 case 5: /* jmp far */
2067 rc = em_jmp_far(ctxt);
2068 break;
2069 case 6: /* push */
2070 rc = em_push(ctxt);
2071 break;
2072 }
2073 return rc; 2075 return rc;
2074} 2076}
2075 2077
@@ -2128,11 +2130,11 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2128 /* Outer-privilege level return is not implemented */ 2130 /* Outer-privilege level return is not implemented */
2129 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) 2131 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2130 return X86EMUL_UNHANDLEABLE; 2132 return X86EMUL_UNHANDLEABLE;
2131 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false, 2133 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false,
2132 &new_desc); 2134 &new_desc);
2133 if (rc != X86EMUL_CONTINUE) 2135 if (rc != X86EMUL_CONTINUE)
2134 return rc; 2136 return rc;
2135 rc = assign_eip_far(ctxt, eip, new_desc.l); 2137 rc = assign_eip_far(ctxt, eip, &new_desc);
2136 if (rc != X86EMUL_CONTINUE) { 2138 if (rc != X86EMUL_CONTINUE) {
2137 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); 2139 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2138 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 2140 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
@@ -2316,6 +2318,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
2316 2318
2317 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); 2319 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
2318 ctxt->eflags &= ~msr_data; 2320 ctxt->eflags &= ~msr_data;
2321 ctxt->eflags |= EFLG_RESERVED_ONE_MASK;
2319#endif 2322#endif
2320 } else { 2323 } else {
2321 /* legacy mode */ 2324 /* legacy mode */
@@ -2349,11 +2352,9 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2349 && !vendor_intel(ctxt)) 2352 && !vendor_intel(ctxt))
2350 return emulate_ud(ctxt); 2353 return emulate_ud(ctxt);
2351 2354
2352 /* XXX sysenter/sysexit have not been tested in 64bit mode. 2355 /* sysenter/sysexit have not been tested in 64bit mode. */
2353 * Therefore, we inject an #UD.
2354 */
2355 if (ctxt->mode == X86EMUL_MODE_PROT64) 2356 if (ctxt->mode == X86EMUL_MODE_PROT64)
2356 return emulate_ud(ctxt); 2357 return X86EMUL_UNHANDLEABLE;
2357 2358
2358 setup_syscalls_segments(ctxt, &cs, &ss); 2359 setup_syscalls_segments(ctxt, &cs, &ss);
2359 2360
@@ -2425,6 +2426,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2425 if ((msr_data & 0xfffc) == 0x0) 2426 if ((msr_data & 0xfffc) == 0x0)
2426 return emulate_gp(ctxt, 0); 2427 return emulate_gp(ctxt, 0);
2427 ss_sel = (u16)(msr_data + 24); 2428 ss_sel = (u16)(msr_data + 24);
2429 rcx = (u32)rcx;
2430 rdx = (u32)rdx;
2428 break; 2431 break;
2429 case X86EMUL_MODE_PROT64: 2432 case X86EMUL_MODE_PROT64:
2430 cs_sel = (u16)(msr_data + 32); 2433 cs_sel = (u16)(msr_data + 32);
@@ -2599,7 +2602,6 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2599 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2602 ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2600 &ctxt->exception); 2603 &ctxt->exception);
2601 if (ret != X86EMUL_CONTINUE) 2604 if (ret != X86EMUL_CONTINUE)
2602 /* FIXME: need to provide precise fault address */
2603 return ret; 2605 return ret;
2604 2606
2605 save_state_to_tss16(ctxt, &tss_seg); 2607 save_state_to_tss16(ctxt, &tss_seg);
@@ -2607,13 +2609,11 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2607 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2609 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2608 &ctxt->exception); 2610 &ctxt->exception);
2609 if (ret != X86EMUL_CONTINUE) 2611 if (ret != X86EMUL_CONTINUE)
2610 /* FIXME: need to provide precise fault address */
2611 return ret; 2612 return ret;
2612 2613
2613 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, 2614 ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg,
2614 &ctxt->exception); 2615 &ctxt->exception);
2615 if (ret != X86EMUL_CONTINUE) 2616 if (ret != X86EMUL_CONTINUE)
2616 /* FIXME: need to provide precise fault address */
2617 return ret; 2617 return ret;
2618 2618
2619 if (old_tss_sel != 0xffff) { 2619 if (old_tss_sel != 0xffff) {
@@ -2624,7 +2624,6 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2624 sizeof tss_seg.prev_task_link, 2624 sizeof tss_seg.prev_task_link,
2625 &ctxt->exception); 2625 &ctxt->exception);
2626 if (ret != X86EMUL_CONTINUE) 2626 if (ret != X86EMUL_CONTINUE)
2627 /* FIXME: need to provide precise fault address */
2628 return ret; 2627 return ret;
2629 } 2628 }
2630 2629
@@ -2813,7 +2812,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2813 * 2812 *
2814 * 1. jmp/call/int to task gate: Check against DPL of the task gate 2813 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2815 * 2. Exception/IRQ/iret: No check is performed 2814 * 2. Exception/IRQ/iret: No check is performed
2816 * 3. jmp/call to TSS: Check against DPL of the TSS 2815 * 3. jmp/call to TSS/task-gate: No check is performed since the
2816 * hardware checks it before exiting.
2817 */ 2817 */
2818 if (reason == TASK_SWITCH_GATE) { 2818 if (reason == TASK_SWITCH_GATE) {
2819 if (idt_index != -1) { 2819 if (idt_index != -1) {
@@ -2830,13 +2830,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2830 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl) 2830 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2831 return emulate_gp(ctxt, (idt_index << 3) | 0x2); 2831 return emulate_gp(ctxt, (idt_index << 3) | 0x2);
2832 } 2832 }
2833 } else if (reason != TASK_SWITCH_IRET) {
2834 int dpl = next_tss_desc.dpl;
2835 if ((tss_selector & 3) > dpl || ops->cpl(ctxt) > dpl)
2836 return emulate_gp(ctxt, tss_selector);
2837 } 2833 }
2838 2834
2839
2840 desc_limit = desc_limit_scaled(&next_tss_desc); 2835 desc_limit = desc_limit_scaled(&next_tss_desc);
2841 if (!next_tss_desc.p || 2836 if (!next_tss_desc.p ||
2842 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 2837 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
@@ -2913,8 +2908,8 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2913{ 2908{
2914 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count; 2909 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2915 2910
2916 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes); 2911 register_address_increment(ctxt, reg, df * op->bytes);
2917 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg)); 2912 op->addr.mem.ea = register_address(ctxt, reg);
2918} 2913}
2919 2914
2920static int em_das(struct x86_emulate_ctxt *ctxt) 2915static int em_das(struct x86_emulate_ctxt *ctxt)
@@ -3025,7 +3020,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
3025 if (rc != X86EMUL_CONTINUE) 3020 if (rc != X86EMUL_CONTINUE)
3026 return X86EMUL_CONTINUE; 3021 return X86EMUL_CONTINUE;
3027 3022
3028 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); 3023 rc = assign_eip_far(ctxt, ctxt->src.val, &new_desc);
3029 if (rc != X86EMUL_CONTINUE) 3024 if (rc != X86EMUL_CONTINUE)
3030 goto fail; 3025 goto fail;
3031 3026
@@ -3215,6 +3210,8 @@ static int em_mov_rm_sreg(struct x86_emulate_ctxt *ctxt)
3215 return emulate_ud(ctxt); 3210 return emulate_ud(ctxt);
3216 3211
3217 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg); 3212 ctxt->dst.val = get_segment_selector(ctxt, ctxt->modrm_reg);
3213 if (ctxt->dst.bytes == 4 && ctxt->dst.type == OP_MEM)
3214 ctxt->dst.bytes = 2;
3218 return X86EMUL_CONTINUE; 3215 return X86EMUL_CONTINUE;
3219} 3216}
3220 3217
@@ -3317,7 +3314,7 @@ static int em_sidt(struct x86_emulate_ctxt *ctxt)
3317 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt); 3314 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3318} 3315}
3319 3316
3320static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3317static int em_lgdt_lidt(struct x86_emulate_ctxt *ctxt, bool lgdt)
3321{ 3318{
3322 struct desc_ptr desc_ptr; 3319 struct desc_ptr desc_ptr;
3323 int rc; 3320 int rc;
@@ -3329,12 +3326,23 @@ static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3329 ctxt->op_bytes); 3326 ctxt->op_bytes);
3330 if (rc != X86EMUL_CONTINUE) 3327 if (rc != X86EMUL_CONTINUE)
3331 return rc; 3328 return rc;
3332 ctxt->ops->set_gdt(ctxt, &desc_ptr); 3329 if (ctxt->mode == X86EMUL_MODE_PROT64 &&
3330 is_noncanonical_address(desc_ptr.address))
3331 return emulate_gp(ctxt, 0);
3332 if (lgdt)
3333 ctxt->ops->set_gdt(ctxt, &desc_ptr);
3334 else
3335 ctxt->ops->set_idt(ctxt, &desc_ptr);
3333 /* Disable writeback. */ 3336 /* Disable writeback. */
3334 ctxt->dst.type = OP_NONE; 3337 ctxt->dst.type = OP_NONE;
3335 return X86EMUL_CONTINUE; 3338 return X86EMUL_CONTINUE;
3336} 3339}
3337 3340
3341static int em_lgdt(struct x86_emulate_ctxt *ctxt)
3342{
3343 return em_lgdt_lidt(ctxt, true);
3344}
3345
3338static int em_vmmcall(struct x86_emulate_ctxt *ctxt) 3346static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3339{ 3347{
3340 int rc; 3348 int rc;
@@ -3348,20 +3356,7 @@ static int em_vmmcall(struct x86_emulate_ctxt *ctxt)
3348 3356
3349static int em_lidt(struct x86_emulate_ctxt *ctxt) 3357static int em_lidt(struct x86_emulate_ctxt *ctxt)
3350{ 3358{
3351 struct desc_ptr desc_ptr; 3359 return em_lgdt_lidt(ctxt, false);
3352 int rc;
3353
3354 if (ctxt->mode == X86EMUL_MODE_PROT64)
3355 ctxt->op_bytes = 8;
3356 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3357 &desc_ptr.size, &desc_ptr.address,
3358 ctxt->op_bytes);
3359 if (rc != X86EMUL_CONTINUE)
3360 return rc;
3361 ctxt->ops->set_idt(ctxt, &desc_ptr);
3362 /* Disable writeback. */
3363 ctxt->dst.type = OP_NONE;
3364 return X86EMUL_CONTINUE;
3365} 3360}
3366 3361
3367static int em_smsw(struct x86_emulate_ctxt *ctxt) 3362static int em_smsw(struct x86_emulate_ctxt *ctxt)
@@ -3384,7 +3379,7 @@ static int em_loop(struct x86_emulate_ctxt *ctxt)
3384{ 3379{
3385 int rc = X86EMUL_CONTINUE; 3380 int rc = X86EMUL_CONTINUE;
3386 3381
3387 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); 3382 register_address_increment(ctxt, VCPU_REGS_RCX, -1);
3388 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3383 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3389 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3384 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3390 rc = jmp_rel(ctxt, ctxt->src.val); 3385 rc = jmp_rel(ctxt, ctxt->src.val);
@@ -3554,7 +3549,7 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
3554 3549
3555 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 3550 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
3556 if (efer & EFER_LMA) 3551 if (efer & EFER_LMA)
3557 rsvd = CR3_L_MODE_RESERVED_BITS; 3552 rsvd = CR3_L_MODE_RESERVED_BITS & ~CR3_PCID_INVD;
3558 3553
3559 if (new_val & rsvd) 3554 if (new_val & rsvd)
3560 return emulate_gp(ctxt, 0); 3555 return emulate_gp(ctxt, 0);
@@ -3596,8 +3591,15 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
3596 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 3591 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
3597 return emulate_ud(ctxt); 3592 return emulate_ud(ctxt);
3598 3593
3599 if (check_dr7_gd(ctxt)) 3594 if (check_dr7_gd(ctxt)) {
3595 ulong dr6;
3596
3597 ctxt->ops->get_dr(ctxt, 6, &dr6);
3598 dr6 &= ~15;
3599 dr6 |= DR6_BD | DR6_RTM;
3600 ctxt->ops->set_dr(ctxt, 6, dr6);
3600 return emulate_db(ctxt); 3601 return emulate_db(ctxt);
3602 }
3601 3603
3602 return X86EMUL_CONTINUE; 3604 return X86EMUL_CONTINUE;
3603} 3605}
@@ -3684,6 +3686,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3684#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 3686#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3685#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) } 3687#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3686#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) } 3688#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3689#define ID(_f, _i) { .flags = ((_f) | InstrDual | ModRM), .u.idual = (_i) }
3687#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) } 3690#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
3688#define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 3691#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3689#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) } 3692#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
@@ -3780,11 +3783,11 @@ static const struct opcode group4[] = {
3780static const struct opcode group5[] = { 3783static const struct opcode group5[] = {
3781 F(DstMem | SrcNone | Lock, em_inc), 3784 F(DstMem | SrcNone | Lock, em_inc),
3782 F(DstMem | SrcNone | Lock, em_dec), 3785 F(DstMem | SrcNone | Lock, em_dec),
3783 I(SrcMem | Stack, em_grp45), 3786 I(SrcMem | NearBranch, em_call_near_abs),
3784 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far), 3787 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3785 I(SrcMem | Stack, em_grp45), 3788 I(SrcMem | NearBranch, em_jmp_abs),
3786 I(SrcMemFAddr | ImplicitOps, em_grp45), 3789 I(SrcMemFAddr | ImplicitOps, em_jmp_far),
3787 I(SrcMem | Stack, em_grp45), D(Undefined), 3790 I(SrcMem | Stack, em_push), D(Undefined),
3788}; 3791};
3789 3792
3790static const struct opcode group6[] = { 3793static const struct opcode group6[] = {
@@ -3845,8 +3848,12 @@ static const struct gprefix pfx_0f_6f_0f_7f = {
3845 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 3848 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3846}; 3849};
3847 3850
3851static const struct instr_dual instr_dual_0f_2b = {
3852 I(0, em_mov), N
3853};
3854
3848static const struct gprefix pfx_0f_2b = { 3855static const struct gprefix pfx_0f_2b = {
3849 I(0, em_mov), I(0, em_mov), N, N, 3856 ID(0, &instr_dual_0f_2b), ID(0, &instr_dual_0f_2b), N, N,
3850}; 3857};
3851 3858
3852static const struct gprefix pfx_0f_28_0f_29 = { 3859static const struct gprefix pfx_0f_28_0f_29 = {
@@ -3920,6 +3927,10 @@ static const struct escape escape_dd = { {
3920 N, N, N, N, N, N, N, N, 3927 N, N, N, N, N, N, N, N,
3921} }; 3928} };
3922 3929
3930static const struct instr_dual instr_dual_0f_c3 = {
3931 I(DstMem | SrcReg | ModRM | No16 | Mov, em_mov), N
3932};
3933
3923static const struct opcode opcode_table[256] = { 3934static const struct opcode opcode_table[256] = {
3924 /* 0x00 - 0x07 */ 3935 /* 0x00 - 0x07 */
3925 F6ALU(Lock, em_add), 3936 F6ALU(Lock, em_add),
@@ -3964,7 +3975,7 @@ static const struct opcode opcode_table[256] = {
3964 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */ 3975 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3965 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 3976 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3966 /* 0x70 - 0x7F */ 3977 /* 0x70 - 0x7F */
3967 X16(D(SrcImmByte)), 3978 X16(D(SrcImmByte | NearBranch)),
3968 /* 0x80 - 0x87 */ 3979 /* 0x80 - 0x87 */
3969 G(ByteOp | DstMem | SrcImm, group1), 3980 G(ByteOp | DstMem | SrcImm, group1),
3970 G(DstMem | SrcImm, group1), 3981 G(DstMem | SrcImm, group1),
@@ -3991,20 +4002,20 @@ static const struct opcode opcode_table[256] = {
3991 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 4002 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3992 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 4003 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
3993 I2bv(SrcSI | DstDI | Mov | String, em_mov), 4004 I2bv(SrcSI | DstDI | Mov | String, em_mov),
3994 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp), 4005 F2bv(SrcSI | DstDI | String | NoWrite, em_cmp_r),
3995 /* 0xA8 - 0xAF */ 4006 /* 0xA8 - 0xAF */
3996 F2bv(DstAcc | SrcImm | NoWrite, em_test), 4007 F2bv(DstAcc | SrcImm | NoWrite, em_test),
3997 I2bv(SrcAcc | DstDI | Mov | String, em_mov), 4008 I2bv(SrcAcc | DstDI | Mov | String, em_mov),
3998 I2bv(SrcSI | DstAcc | Mov | String, em_mov), 4009 I2bv(SrcSI | DstAcc | Mov | String, em_mov),
3999 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp), 4010 F2bv(SrcAcc | DstDI | String | NoWrite, em_cmp_r),
4000 /* 0xB0 - 0xB7 */ 4011 /* 0xB0 - 0xB7 */
4001 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), 4012 X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)),
4002 /* 0xB8 - 0xBF */ 4013 /* 0xB8 - 0xBF */
4003 X8(I(DstReg | SrcImm64 | Mov, em_mov)), 4014 X8(I(DstReg | SrcImm64 | Mov, em_mov)),
4004 /* 0xC0 - 0xC7 */ 4015 /* 0xC0 - 0xC7 */
4005 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2), 4016 G(ByteOp | Src2ImmByte, group2), G(Src2ImmByte, group2),
4006 I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), 4017 I(ImplicitOps | NearBranch | SrcImmU16, em_ret_near_imm),
4007 I(ImplicitOps | Stack, em_ret), 4018 I(ImplicitOps | NearBranch, em_ret),
4008 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg), 4019 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2ES, em_lseg),
4009 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 4020 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
4010 G(ByteOp, group11), G(0, group11), 4021 G(ByteOp, group11), G(0, group11),
@@ -4024,13 +4035,14 @@ static const struct opcode opcode_table[256] = {
4024 /* 0xD8 - 0xDF */ 4035 /* 0xD8 - 0xDF */
4025 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N, 4036 N, E(0, &escape_d9), N, E(0, &escape_db), N, E(0, &escape_dd), N, N,
4026 /* 0xE0 - 0xE7 */ 4037 /* 0xE0 - 0xE7 */
4027 X3(I(SrcImmByte, em_loop)), 4038 X3(I(SrcImmByte | NearBranch, em_loop)),
4028 I(SrcImmByte, em_jcxz), 4039 I(SrcImmByte | NearBranch, em_jcxz),
4029 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in), 4040 I2bvIP(SrcImmUByte | DstAcc, em_in, in, check_perm_in),
4030 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out), 4041 I2bvIP(SrcAcc | DstImmUByte, em_out, out, check_perm_out),
4031 /* 0xE8 - 0xEF */ 4042 /* 0xE8 - 0xEF */
4032 I(SrcImm | Stack, em_call), D(SrcImm | ImplicitOps), 4043 I(SrcImm | NearBranch, em_call), D(SrcImm | ImplicitOps | NearBranch),
4033 I(SrcImmFAddr | No64, em_jmp_far), D(SrcImmByte | ImplicitOps), 4044 I(SrcImmFAddr | No64, em_jmp_far),
4045 D(SrcImmByte | ImplicitOps | NearBranch),
4034 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in), 4046 I2bvIP(SrcDX | DstAcc, em_in, in, check_perm_in),
4035 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out), 4047 I2bvIP(SrcAcc | DstDX, em_out, out, check_perm_out),
4036 /* 0xF0 - 0xF7 */ 4048 /* 0xF0 - 0xF7 */
@@ -4090,7 +4102,7 @@ static const struct opcode twobyte_table[256] = {
4090 N, N, N, N, 4102 N, N, N, N,
4091 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), 4103 N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f),
4092 /* 0x80 - 0x8F */ 4104 /* 0x80 - 0x8F */
4093 X16(D(SrcImm)), 4105 X16(D(SrcImm | NearBranch)),
4094 /* 0x90 - 0x9F */ 4106 /* 0x90 - 0x9F */
4095 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 4107 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
4096 /* 0xA0 - 0xA7 */ 4108 /* 0xA0 - 0xA7 */
@@ -4121,7 +4133,7 @@ static const struct opcode twobyte_table[256] = {
4121 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 4133 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
4122 /* 0xC0 - 0xC7 */ 4134 /* 0xC0 - 0xC7 */
4123 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd), 4135 F2bv(DstMem | SrcReg | ModRM | SrcWrite | Lock, em_xadd),
4124 N, D(DstMem | SrcReg | ModRM | Mov), 4136 N, ID(0, &instr_dual_0f_c3),
4125 N, N, N, GD(0, &group9), 4137 N, N, N, GD(0, &group9),
4126 /* 0xC8 - 0xCF */ 4138 /* 0xC8 - 0xCF */
4127 X8(I(DstReg, em_bswap)), 4139 X8(I(DstReg, em_bswap)),
@@ -4134,12 +4146,20 @@ static const struct opcode twobyte_table[256] = {
4134 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N 4146 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N
4135}; 4147};
4136 4148
4149static const struct instr_dual instr_dual_0f_38_f0 = {
4150 I(DstReg | SrcMem | Mov, em_movbe), N
4151};
4152
4153static const struct instr_dual instr_dual_0f_38_f1 = {
4154 I(DstMem | SrcReg | Mov, em_movbe), N
4155};
4156
4137static const struct gprefix three_byte_0f_38_f0 = { 4157static const struct gprefix three_byte_0f_38_f0 = {
4138 I(DstReg | SrcMem | Mov, em_movbe), N, N, N 4158 ID(0, &instr_dual_0f_38_f0), N, N, N
4139}; 4159};
4140 4160
4141static const struct gprefix three_byte_0f_38_f1 = { 4161static const struct gprefix three_byte_0f_38_f1 = {
4142 I(DstMem | SrcReg | Mov, em_movbe), N, N, N 4162 ID(0, &instr_dual_0f_38_f1), N, N, N
4143}; 4163};
4144 4164
4145/* 4165/*
@@ -4152,8 +4172,8 @@ static const struct opcode opcode_map_0f_38[256] = {
4152 /* 0x80 - 0xef */ 4172 /* 0x80 - 0xef */
4153 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), 4173 X16(N), X16(N), X16(N), X16(N), X16(N), X16(N), X16(N),
4154 /* 0xf0 - 0xf1 */ 4174 /* 0xf0 - 0xf1 */
4155 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f0), 4175 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f0),
4156 GP(EmulateOnUD | ModRM | Prefix, &three_byte_0f_38_f1), 4176 GP(EmulateOnUD | ModRM, &three_byte_0f_38_f1),
4157 /* 0xf2 - 0xff */ 4177 /* 0xf2 - 0xff */
4158 N, N, X4(N), X8(N) 4178 N, N, X4(N), X8(N)
4159}; 4179};
@@ -4275,7 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4275 op->type = OP_MEM; 4295 op->type = OP_MEM;
4276 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4296 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4277 op->addr.mem.ea = 4297 op->addr.mem.ea =
4278 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI)); 4298 register_address(ctxt, VCPU_REGS_RDI);
4279 op->addr.mem.seg = VCPU_SREG_ES; 4299 op->addr.mem.seg = VCPU_SREG_ES;
4280 op->val = 0; 4300 op->val = 0;
4281 op->count = 1; 4301 op->count = 1;
@@ -4329,7 +4349,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4329 op->type = OP_MEM; 4349 op->type = OP_MEM;
4330 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4350 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4331 op->addr.mem.ea = 4351 op->addr.mem.ea =
4332 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); 4352 register_address(ctxt, VCPU_REGS_RSI);
4333 op->addr.mem.seg = ctxt->seg_override; 4353 op->addr.mem.seg = ctxt->seg_override;
4334 op->val = 0; 4354 op->val = 0;
4335 op->count = 1; 4355 op->count = 1;
@@ -4338,7 +4358,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4338 op->type = OP_MEM; 4358 op->type = OP_MEM;
4339 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4359 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
4340 op->addr.mem.ea = 4360 op->addr.mem.ea =
4341 register_address(ctxt, 4361 address_mask(ctxt,
4342 reg_read(ctxt, VCPU_REGS_RBX) + 4362 reg_read(ctxt, VCPU_REGS_RBX) +
4343 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff)); 4363 (reg_read(ctxt, VCPU_REGS_RAX) & 0xff));
4344 op->addr.mem.seg = ctxt->seg_override; 4364 op->addr.mem.seg = ctxt->seg_override;
@@ -4510,8 +4530,7 @@ done_prefixes:
4510 4530
4511 /* vex-prefix instructions are not implemented */ 4531 /* vex-prefix instructions are not implemented */
4512 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) && 4532 if (ctxt->opcode_len == 1 && (ctxt->b == 0xc5 || ctxt->b == 0xc4) &&
4513 (mode == X86EMUL_MODE_PROT64 || 4533 (mode == X86EMUL_MODE_PROT64 || (ctxt->modrm & 0xc0) == 0xc0)) {
4514 (mode >= X86EMUL_MODE_PROT16 && (ctxt->modrm & 0x80)))) {
4515 ctxt->d = NotImpl; 4534 ctxt->d = NotImpl;
4516 } 4535 }
4517 4536
@@ -4549,6 +4568,12 @@ done_prefixes:
4549 else 4568 else
4550 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7]; 4569 opcode = opcode.u.esc->op[(ctxt->modrm >> 3) & 7];
4551 break; 4570 break;
4571 case InstrDual:
4572 if ((ctxt->modrm >> 6) == 3)
4573 opcode = opcode.u.idual->mod3;
4574 else
4575 opcode = opcode.u.idual->mod012;
4576 break;
4552 default: 4577 default:
4553 return EMULATION_FAILED; 4578 return EMULATION_FAILED;
4554 } 4579 }
@@ -4567,7 +4592,8 @@ done_prefixes:
4567 return EMULATION_FAILED; 4592 return EMULATION_FAILED;
4568 4593
4569 if (unlikely(ctxt->d & 4594 if (unlikely(ctxt->d &
4570 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm))) { 4595 (NotImpl|Stack|Op3264|Sse|Mmx|Intercept|CheckPerm|NearBranch|
4596 No16))) {
4571 /* 4597 /*
4572 * These are copied unconditionally here, and checked unconditionally 4598 * These are copied unconditionally here, and checked unconditionally
4573 * in x86_emulate_insn. 4599 * in x86_emulate_insn.
@@ -4578,8 +4604,12 @@ done_prefixes:
4578 if (ctxt->d & NotImpl) 4604 if (ctxt->d & NotImpl)
4579 return EMULATION_FAILED; 4605 return EMULATION_FAILED;
4580 4606
4581 if (mode == X86EMUL_MODE_PROT64 && (ctxt->d & Stack)) 4607 if (mode == X86EMUL_MODE_PROT64) {
4582 ctxt->op_bytes = 8; 4608 if (ctxt->op_bytes == 4 && (ctxt->d & Stack))
4609 ctxt->op_bytes = 8;
4610 else if (ctxt->d & NearBranch)
4611 ctxt->op_bytes = 8;
4612 }
4583 4613
4584 if (ctxt->d & Op3264) { 4614 if (ctxt->d & Op3264) {
4585 if (mode == X86EMUL_MODE_PROT64) 4615 if (mode == X86EMUL_MODE_PROT64)
@@ -4588,6 +4618,9 @@ done_prefixes:
4588 ctxt->op_bytes = 4; 4618 ctxt->op_bytes = 4;
4589 } 4619 }
4590 4620
4621 if ((ctxt->d & No16) && ctxt->op_bytes == 2)
4622 ctxt->op_bytes = 4;
4623
4591 if (ctxt->d & Sse) 4624 if (ctxt->d & Sse)
4592 ctxt->op_bytes = 16; 4625 ctxt->op_bytes = 16;
4593 else if (ctxt->d & Mmx) 4626 else if (ctxt->d & Mmx)
@@ -4631,7 +4664,8 @@ done_prefixes:
4631 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 4664 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4632 4665
4633 if (ctxt->rip_relative) 4666 if (ctxt->rip_relative)
4634 ctxt->memopp->addr.mem.ea += ctxt->_eip; 4667 ctxt->memopp->addr.mem.ea = address_mask(ctxt,
4668 ctxt->memopp->addr.mem.ea + ctxt->_eip);
4635 4669
4636done: 4670done:
4637 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 4671 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
@@ -4775,6 +4809,12 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4775 goto done; 4809 goto done;
4776 } 4810 }
4777 4811
4812 /* Instruction can only be executed in protected mode */
4813 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4814 rc = emulate_ud(ctxt);
4815 goto done;
4816 }
4817
4778 /* Privileged instruction can be executed only in CPL=0 */ 4818 /* Privileged instruction can be executed only in CPL=0 */
4779 if ((ctxt->d & Priv) && ops->cpl(ctxt)) { 4819 if ((ctxt->d & Priv) && ops->cpl(ctxt)) {
4780 if (ctxt->d & PrivUD) 4820 if (ctxt->d & PrivUD)
@@ -4784,12 +4824,6 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4784 goto done; 4824 goto done;
4785 } 4825 }
4786 4826
4787 /* Instruction can only be executed in protected mode */
4788 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4789 rc = emulate_ud(ctxt);
4790 goto done;
4791 }
4792
4793 /* Do instruction specific permission checks */ 4827 /* Do instruction specific permission checks */
4794 if (ctxt->d & CheckPerm) { 4828 if (ctxt->d & CheckPerm) {
4795 rc = ctxt->check_perm(ctxt); 4829 rc = ctxt->check_perm(ctxt);
@@ -4974,8 +5008,7 @@ writeback:
4974 count = ctxt->src.count; 5008 count = ctxt->src.count;
4975 else 5009 else
4976 count = ctxt->dst.count; 5010 count = ctxt->dst.count;
4977 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), 5011 register_address_increment(ctxt, VCPU_REGS_RCX, -count);
4978 -count);
4979 5012
4980 if (!string_insn_completed(ctxt)) { 5013 if (!string_insn_completed(ctxt)) {
4981 /* 5014 /*
@@ -5053,11 +5086,6 @@ twobyte_insn:
5053 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val : 5086 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
5054 (s16) ctxt->src.val; 5087 (s16) ctxt->src.val;
5055 break; 5088 break;
5056 case 0xc3: /* movnti */
5057 ctxt->dst.bytes = ctxt->op_bytes;
5058 ctxt->dst.val = (ctxt->op_bytes == 8) ? (u64) ctxt->src.val :
5059 (u32) ctxt->src.val;
5060 break;
5061 default: 5089 default:
5062 goto cannot_emulate; 5090 goto cannot_emulate;
5063 } 5091 }