diff options
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 307 |
1 files changed, 231 insertions, 76 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index a46207a05835..9f8a2faf5040 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) | |||
504 | masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); | 504 | masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); |
505 | } | 505 | } |
506 | 506 | ||
507 | static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) | ||
508 | { | ||
509 | register_address_increment(ctxt, &ctxt->_eip, rel); | ||
510 | } | ||
511 | |||
512 | static u32 desc_limit_scaled(struct desc_struct *desc) | 507 | static u32 desc_limit_scaled(struct desc_struct *desc) |
513 | { | 508 | { |
514 | u32 limit = get_desc_limit(desc); | 509 | u32 limit = get_desc_limit(desc); |
@@ -569,6 +564,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt) | |||
569 | return emulate_exception(ctxt, NM_VECTOR, 0, false); | 564 | return emulate_exception(ctxt, NM_VECTOR, 0, false); |
570 | } | 565 | } |
571 | 566 | ||
567 | static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst, | ||
568 | int cs_l) | ||
569 | { | ||
570 | switch (ctxt->op_bytes) { | ||
571 | case 2: | ||
572 | ctxt->_eip = (u16)dst; | ||
573 | break; | ||
574 | case 4: | ||
575 | ctxt->_eip = (u32)dst; | ||
576 | break; | ||
577 | #ifdef CONFIG_X86_64 | ||
578 | case 8: | ||
579 | if ((cs_l && is_noncanonical_address(dst)) || | ||
580 | (!cs_l && (dst >> 32) != 0)) | ||
581 | return emulate_gp(ctxt, 0); | ||
582 | ctxt->_eip = dst; | ||
583 | break; | ||
584 | #endif | ||
585 | default: | ||
586 | WARN(1, "unsupported eip assignment size\n"); | ||
587 | } | ||
588 | return X86EMUL_CONTINUE; | ||
589 | } | ||
590 | |||
591 | static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst) | ||
592 | { | ||
593 | return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64); | ||
594 | } | ||
595 | |||
596 | static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) | ||
597 | { | ||
598 | return assign_eip_near(ctxt, ctxt->_eip + rel); | ||
599 | } | ||
600 | |||
572 | static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) | 601 | static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) |
573 | { | 602 | { |
574 | u16 selector; | 603 | u16 selector; |
@@ -614,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size) | |||
614 | 643 | ||
615 | static int __linearize(struct x86_emulate_ctxt *ctxt, | 644 | static int __linearize(struct x86_emulate_ctxt *ctxt, |
616 | struct segmented_address addr, | 645 | struct segmented_address addr, |
617 | unsigned size, bool write, bool fetch, | 646 | unsigned *max_size, unsigned size, |
647 | bool write, bool fetch, | ||
618 | ulong *linear) | 648 | ulong *linear) |
619 | { | 649 | { |
620 | struct desc_struct desc; | 650 | struct desc_struct desc; |
@@ -625,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, | |||
625 | unsigned cpl; | 655 | unsigned cpl; |
626 | 656 | ||
627 | la = seg_base(ctxt, addr.seg) + addr.ea; | 657 | la = seg_base(ctxt, addr.seg) + addr.ea; |
658 | *max_size = 0; | ||
628 | switch (ctxt->mode) { | 659 | switch (ctxt->mode) { |
629 | case X86EMUL_MODE_PROT64: | 660 | case X86EMUL_MODE_PROT64: |
630 | if (((signed long)la << 16) >> 16 != la) | 661 | if (((signed long)la << 16) >> 16 != la) |
631 | return emulate_gp(ctxt, 0); | 662 | return emulate_gp(ctxt, 0); |
663 | |||
664 | *max_size = min_t(u64, ~0u, (1ull << 48) - la); | ||
665 | if (size > *max_size) | ||
666 | goto bad; | ||
632 | break; | 667 | break; |
633 | default: | 668 | default: |
634 | usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, | 669 | usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, |
@@ -646,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, | |||
646 | if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && | 681 | if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && |
647 | (ctxt->d & NoBigReal)) { | 682 | (ctxt->d & NoBigReal)) { |
648 | /* la is between zero and 0xffff */ | 683 | /* la is between zero and 0xffff */ |
649 | if (la > 0xffff || (u32)(la + size - 1) > 0xffff) | 684 | if (la > 0xffff) |
650 | goto bad; | 685 | goto bad; |
686 | *max_size = 0x10000 - la; | ||
651 | } else if ((desc.type & 8) || !(desc.type & 4)) { | 687 | } else if ((desc.type & 8) || !(desc.type & 4)) { |
652 | /* expand-up segment */ | 688 | /* expand-up segment */ |
653 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | 689 | if (addr.ea > lim) |
654 | goto bad; | 690 | goto bad; |
691 | *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); | ||
655 | } else { | 692 | } else { |
656 | /* expand-down segment */ | 693 | /* expand-down segment */ |
657 | if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) | 694 | if (addr.ea <= lim) |
658 | goto bad; | 695 | goto bad; |
659 | lim = desc.d ? 0xffffffff : 0xffff; | 696 | lim = desc.d ? 0xffffffff : 0xffff; |
660 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | 697 | if (addr.ea > lim) |
661 | goto bad; | 698 | goto bad; |
699 | *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea); | ||
662 | } | 700 | } |
701 | if (size > *max_size) | ||
702 | goto bad; | ||
663 | cpl = ctxt->ops->cpl(ctxt); | 703 | cpl = ctxt->ops->cpl(ctxt); |
664 | if (!(desc.type & 8)) { | 704 | if (!(desc.type & 8)) { |
665 | /* data segment */ | 705 | /* data segment */ |
@@ -684,9 +724,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, | |||
684 | return X86EMUL_CONTINUE; | 724 | return X86EMUL_CONTINUE; |
685 | bad: | 725 | bad: |
686 | if (addr.seg == VCPU_SREG_SS) | 726 | if (addr.seg == VCPU_SREG_SS) |
687 | return emulate_ss(ctxt, sel); | 727 | return emulate_ss(ctxt, 0); |
688 | else | 728 | else |
689 | return emulate_gp(ctxt, sel); | 729 | return emulate_gp(ctxt, 0); |
690 | } | 730 | } |
691 | 731 | ||
692 | static int linearize(struct x86_emulate_ctxt *ctxt, | 732 | static int linearize(struct x86_emulate_ctxt *ctxt, |
@@ -694,7 +734,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt, | |||
694 | unsigned size, bool write, | 734 | unsigned size, bool write, |
695 | ulong *linear) | 735 | ulong *linear) |
696 | { | 736 | { |
697 | return __linearize(ctxt, addr, size, write, false, linear); | 737 | unsigned max_size; |
738 | return __linearize(ctxt, addr, &max_size, size, write, false, linear); | ||
698 | } | 739 | } |
699 | 740 | ||
700 | 741 | ||
@@ -719,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt, | |||
719 | static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) | 760 | static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) |
720 | { | 761 | { |
721 | int rc; | 762 | int rc; |
722 | unsigned size; | 763 | unsigned size, max_size; |
723 | unsigned long linear; | 764 | unsigned long linear; |
724 | int cur_size = ctxt->fetch.end - ctxt->fetch.data; | 765 | int cur_size = ctxt->fetch.end - ctxt->fetch.data; |
725 | struct segmented_address addr = { .seg = VCPU_SREG_CS, | 766 | struct segmented_address addr = { .seg = VCPU_SREG_CS, |
726 | .ea = ctxt->eip + cur_size }; | 767 | .ea = ctxt->eip + cur_size }; |
727 | 768 | ||
728 | size = 15UL ^ cur_size; | 769 | /* |
729 | rc = __linearize(ctxt, addr, size, false, true, &linear); | 770 | * We do not know exactly how many bytes will be needed, and |
771 | * __linearize is expensive, so fetch as much as possible. We | ||
772 | * just have to avoid going beyond the 15 byte limit, the end | ||
773 | * of the segment, or the end of the page. | ||
774 | * | ||
775 | * __linearize is called with size 0 so that it does not do any | ||
776 | * boundary check itself. Instead, we use max_size to check | ||
777 | * against op_size. | ||
778 | */ | ||
779 | rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear); | ||
730 | if (unlikely(rc != X86EMUL_CONTINUE)) | 780 | if (unlikely(rc != X86EMUL_CONTINUE)) |
731 | return rc; | 781 | return rc; |
732 | 782 | ||
783 | size = min_t(unsigned, 15UL ^ cur_size, max_size); | ||
733 | size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); | 784 | size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); |
734 | 785 | ||
735 | /* | 786 | /* |
@@ -739,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) | |||
739 | * still, we must have hit the 15-byte boundary. | 790 | * still, we must have hit the 15-byte boundary. |
740 | */ | 791 | */ |
741 | if (unlikely(size < op_size)) | 792 | if (unlikely(size < op_size)) |
742 | return X86EMUL_UNHANDLEABLE; | 793 | return emulate_gp(ctxt, 0); |
794 | |||
743 | rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, | 795 | rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, |
744 | size, &ctxt->exception); | 796 | size, &ctxt->exception); |
745 | if (unlikely(rc != X86EMUL_CONTINUE)) | 797 | if (unlikely(rc != X86EMUL_CONTINUE)) |
@@ -751,8 +803,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) | |||
751 | static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, | 803 | static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, |
752 | unsigned size) | 804 | unsigned size) |
753 | { | 805 | { |
754 | if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) | 806 | unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr; |
755 | return __do_insn_fetch_bytes(ctxt, size); | 807 | |
808 | if (unlikely(done_size < size)) | ||
809 | return __do_insn_fetch_bytes(ctxt, size - done_size); | ||
756 | else | 810 | else |
757 | return X86EMUL_CONTINUE; | 811 | return X86EMUL_CONTINUE; |
758 | } | 812 | } |
@@ -1416,7 +1470,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1416 | 1470 | ||
1417 | /* Does not support long mode */ | 1471 | /* Does not support long mode */ |
1418 | static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | 1472 | static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, |
1419 | u16 selector, int seg, u8 cpl, bool in_task_switch) | 1473 | u16 selector, int seg, u8 cpl, |
1474 | bool in_task_switch, | ||
1475 | struct desc_struct *desc) | ||
1420 | { | 1476 | { |
1421 | struct desc_struct seg_desc, old_desc; | 1477 | struct desc_struct seg_desc, old_desc; |
1422 | u8 dpl, rpl; | 1478 | u8 dpl, rpl; |
@@ -1557,6 +1613,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1557 | } | 1613 | } |
1558 | load: | 1614 | load: |
1559 | ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); | 1615 | ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); |
1616 | if (desc) | ||
1617 | *desc = seg_desc; | ||
1560 | return X86EMUL_CONTINUE; | 1618 | return X86EMUL_CONTINUE; |
1561 | exception: | 1619 | exception: |
1562 | return emulate_exception(ctxt, err_vec, err_code, true); | 1620 | return emulate_exception(ctxt, err_vec, err_code, true); |
@@ -1566,7 +1624,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1566 | u16 selector, int seg) | 1624 | u16 selector, int seg) |
1567 | { | 1625 | { |
1568 | u8 cpl = ctxt->ops->cpl(ctxt); | 1626 | u8 cpl = ctxt->ops->cpl(ctxt); |
1569 | return __load_segment_descriptor(ctxt, selector, seg, cpl, false); | 1627 | return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); |
1570 | } | 1628 | } |
1571 | 1629 | ||
1572 | static void write_register_operand(struct operand *op) | 1630 | static void write_register_operand(struct operand *op) |
@@ -1960,17 +2018,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt) | |||
1960 | static int em_jmp_far(struct x86_emulate_ctxt *ctxt) | 2018 | static int em_jmp_far(struct x86_emulate_ctxt *ctxt) |
1961 | { | 2019 | { |
1962 | int rc; | 2020 | int rc; |
1963 | unsigned short sel; | 2021 | unsigned short sel, old_sel; |
2022 | struct desc_struct old_desc, new_desc; | ||
2023 | const struct x86_emulate_ops *ops = ctxt->ops; | ||
2024 | u8 cpl = ctxt->ops->cpl(ctxt); | ||
2025 | |||
2026 | /* Assignment of RIP may only fail in 64-bit mode */ | ||
2027 | if (ctxt->mode == X86EMUL_MODE_PROT64) | ||
2028 | ops->get_segment(ctxt, &old_sel, &old_desc, NULL, | ||
2029 | VCPU_SREG_CS); | ||
1964 | 2030 | ||
1965 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); | 2031 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
1966 | 2032 | ||
1967 | rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); | 2033 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, |
2034 | &new_desc); | ||
1968 | if (rc != X86EMUL_CONTINUE) | 2035 | if (rc != X86EMUL_CONTINUE) |
1969 | return rc; | 2036 | return rc; |
1970 | 2037 | ||
1971 | ctxt->_eip = 0; | 2038 | rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); |
1972 | memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); | 2039 | if (rc != X86EMUL_CONTINUE) { |
1973 | return X86EMUL_CONTINUE; | 2040 | WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); |
2041 | /* assigning eip failed; restore the old cs */ | ||
2042 | ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); | ||
2043 | return rc; | ||
2044 | } | ||
2045 | return rc; | ||
1974 | } | 2046 | } |
1975 | 2047 | ||
1976 | static int em_grp45(struct x86_emulate_ctxt *ctxt) | 2048 | static int em_grp45(struct x86_emulate_ctxt *ctxt) |
@@ -1981,13 +2053,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt) | |||
1981 | case 2: /* call near abs */ { | 2053 | case 2: /* call near abs */ { |
1982 | long int old_eip; | 2054 | long int old_eip; |
1983 | old_eip = ctxt->_eip; | 2055 | old_eip = ctxt->_eip; |
1984 | ctxt->_eip = ctxt->src.val; | 2056 | rc = assign_eip_near(ctxt, ctxt->src.val); |
2057 | if (rc != X86EMUL_CONTINUE) | ||
2058 | break; | ||
1985 | ctxt->src.val = old_eip; | 2059 | ctxt->src.val = old_eip; |
1986 | rc = em_push(ctxt); | 2060 | rc = em_push(ctxt); |
1987 | break; | 2061 | break; |
1988 | } | 2062 | } |
1989 | case 4: /* jmp abs */ | 2063 | case 4: /* jmp abs */ |
1990 | ctxt->_eip = ctxt->src.val; | 2064 | rc = assign_eip_near(ctxt, ctxt->src.val); |
1991 | break; | 2065 | break; |
1992 | case 5: /* jmp far */ | 2066 | case 5: /* jmp far */ |
1993 | rc = em_jmp_far(ctxt); | 2067 | rc = em_jmp_far(ctxt); |
@@ -2022,30 +2096,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) | |||
2022 | 2096 | ||
2023 | static int em_ret(struct x86_emulate_ctxt *ctxt) | 2097 | static int em_ret(struct x86_emulate_ctxt *ctxt) |
2024 | { | 2098 | { |
2025 | ctxt->dst.type = OP_REG; | 2099 | int rc; |
2026 | ctxt->dst.addr.reg = &ctxt->_eip; | 2100 | unsigned long eip; |
2027 | ctxt->dst.bytes = ctxt->op_bytes; | 2101 | |
2028 | return em_pop(ctxt); | 2102 | rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); |
2103 | if (rc != X86EMUL_CONTINUE) | ||
2104 | return rc; | ||
2105 | |||
2106 | return assign_eip_near(ctxt, eip); | ||
2029 | } | 2107 | } |
2030 | 2108 | ||
2031 | static int em_ret_far(struct x86_emulate_ctxt *ctxt) | 2109 | static int em_ret_far(struct x86_emulate_ctxt *ctxt) |
2032 | { | 2110 | { |
2033 | int rc; | 2111 | int rc; |
2034 | unsigned long cs; | 2112 | unsigned long eip, cs; |
2113 | u16 old_cs; | ||
2035 | int cpl = ctxt->ops->cpl(ctxt); | 2114 | int cpl = ctxt->ops->cpl(ctxt); |
2115 | struct desc_struct old_desc, new_desc; | ||
2116 | const struct x86_emulate_ops *ops = ctxt->ops; | ||
2036 | 2117 | ||
2037 | rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); | 2118 | if (ctxt->mode == X86EMUL_MODE_PROT64) |
2119 | ops->get_segment(ctxt, &old_cs, &old_desc, NULL, | ||
2120 | VCPU_SREG_CS); | ||
2121 | |||
2122 | rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); | ||
2038 | if (rc != X86EMUL_CONTINUE) | 2123 | if (rc != X86EMUL_CONTINUE) |
2039 | return rc; | 2124 | return rc; |
2040 | if (ctxt->op_bytes == 4) | ||
2041 | ctxt->_eip = (u32)ctxt->_eip; | ||
2042 | rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); | 2125 | rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); |
2043 | if (rc != X86EMUL_CONTINUE) | 2126 | if (rc != X86EMUL_CONTINUE) |
2044 | return rc; | 2127 | return rc; |
2045 | /* Outer-privilege level return is not implemented */ | 2128 | /* Outer-privilege level return is not implemented */ |
2046 | if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) | 2129 | if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) |
2047 | return X86EMUL_UNHANDLEABLE; | 2130 | return X86EMUL_UNHANDLEABLE; |
2048 | rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); | 2131 | rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false, |
2132 | &new_desc); | ||
2133 | if (rc != X86EMUL_CONTINUE) | ||
2134 | return rc; | ||
2135 | rc = assign_eip_far(ctxt, eip, new_desc.l); | ||
2136 | if (rc != X86EMUL_CONTINUE) { | ||
2137 | WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64); | ||
2138 | ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); | ||
2139 | } | ||
2049 | return rc; | 2140 | return rc; |
2050 | } | 2141 | } |
2051 | 2142 | ||
@@ -2306,7 +2397,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) | |||
2306 | { | 2397 | { |
2307 | const struct x86_emulate_ops *ops = ctxt->ops; | 2398 | const struct x86_emulate_ops *ops = ctxt->ops; |
2308 | struct desc_struct cs, ss; | 2399 | struct desc_struct cs, ss; |
2309 | u64 msr_data; | 2400 | u64 msr_data, rcx, rdx; |
2310 | int usermode; | 2401 | int usermode; |
2311 | u16 cs_sel = 0, ss_sel = 0; | 2402 | u16 cs_sel = 0, ss_sel = 0; |
2312 | 2403 | ||
@@ -2322,6 +2413,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) | |||
2322 | else | 2413 | else |
2323 | usermode = X86EMUL_MODE_PROT32; | 2414 | usermode = X86EMUL_MODE_PROT32; |
2324 | 2415 | ||
2416 | rcx = reg_read(ctxt, VCPU_REGS_RCX); | ||
2417 | rdx = reg_read(ctxt, VCPU_REGS_RDX); | ||
2418 | |||
2325 | cs.dpl = 3; | 2419 | cs.dpl = 3; |
2326 | ss.dpl = 3; | 2420 | ss.dpl = 3; |
2327 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); | 2421 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); |
@@ -2339,6 +2433,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) | |||
2339 | ss_sel = cs_sel + 8; | 2433 | ss_sel = cs_sel + 8; |
2340 | cs.d = 0; | 2434 | cs.d = 0; |
2341 | cs.l = 1; | 2435 | cs.l = 1; |
2436 | if (is_noncanonical_address(rcx) || | ||
2437 | is_noncanonical_address(rdx)) | ||
2438 | return emulate_gp(ctxt, 0); | ||
2342 | break; | 2439 | break; |
2343 | } | 2440 | } |
2344 | cs_sel |= SELECTOR_RPL_MASK; | 2441 | cs_sel |= SELECTOR_RPL_MASK; |
@@ -2347,8 +2444,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) | |||
2347 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); | 2444 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
2348 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); | 2445 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
2349 | 2446 | ||
2350 | ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); | 2447 | ctxt->_eip = rdx; |
2351 | *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); | 2448 | *reg_write(ctxt, VCPU_REGS_RSP) = rcx; |
2352 | 2449 | ||
2353 | return X86EMUL_CONTINUE; | 2450 | return X86EMUL_CONTINUE; |
2354 | } | 2451 | } |
@@ -2466,19 +2563,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | |||
2466 | * Now load segment descriptors. If fault happens at this stage | 2563 | * Now load segment descriptors. If fault happens at this stage |
2467 | * it is handled in a context of new task | 2564 | * it is handled in a context of new task |
2468 | */ | 2565 | */ |
2469 | ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); | 2566 | ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, |
2567 | true, NULL); | ||
2470 | if (ret != X86EMUL_CONTINUE) | 2568 | if (ret != X86EMUL_CONTINUE) |
2471 | return ret; | 2569 | return ret; |
2472 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); | 2570 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, |
2571 | true, NULL); | ||
2473 | if (ret != X86EMUL_CONTINUE) | 2572 | if (ret != X86EMUL_CONTINUE) |
2474 | return ret; | 2573 | return ret; |
2475 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); | 2574 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, |
2575 | true, NULL); | ||
2476 | if (ret != X86EMUL_CONTINUE) | 2576 | if (ret != X86EMUL_CONTINUE) |
2477 | return ret; | 2577 | return ret; |
2478 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); | 2578 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, |
2579 | true, NULL); | ||
2479 | if (ret != X86EMUL_CONTINUE) | 2580 | if (ret != X86EMUL_CONTINUE) |
2480 | return ret; | 2581 | return ret; |
2481 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); | 2582 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, |
2583 | true, NULL); | ||
2482 | if (ret != X86EMUL_CONTINUE) | 2584 | if (ret != X86EMUL_CONTINUE) |
2483 | return ret; | 2585 | return ret; |
2484 | 2586 | ||
@@ -2603,25 +2705,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2603 | * Now load segment descriptors. If fault happenes at this stage | 2705 | * Now load segment descriptors. If fault happenes at this stage |
2604 | * it is handled in a context of new task | 2706 | * it is handled in a context of new task |
2605 | */ | 2707 | */ |
2606 | ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); | 2708 | ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, |
2709 | cpl, true, NULL); | ||
2607 | if (ret != X86EMUL_CONTINUE) | 2710 | if (ret != X86EMUL_CONTINUE) |
2608 | return ret; | 2711 | return ret; |
2609 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); | 2712 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, |
2713 | true, NULL); | ||
2610 | if (ret != X86EMUL_CONTINUE) | 2714 | if (ret != X86EMUL_CONTINUE) |
2611 | return ret; | 2715 | return ret; |
2612 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); | 2716 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, |
2717 | true, NULL); | ||
2613 | if (ret != X86EMUL_CONTINUE) | 2718 | if (ret != X86EMUL_CONTINUE) |
2614 | return ret; | 2719 | return ret; |
2615 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); | 2720 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, |
2721 | true, NULL); | ||
2616 | if (ret != X86EMUL_CONTINUE) | 2722 | if (ret != X86EMUL_CONTINUE) |
2617 | return ret; | 2723 | return ret; |
2618 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); | 2724 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, |
2725 | true, NULL); | ||
2619 | if (ret != X86EMUL_CONTINUE) | 2726 | if (ret != X86EMUL_CONTINUE) |
2620 | return ret; | 2727 | return ret; |
2621 | ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); | 2728 | ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, |
2729 | true, NULL); | ||
2622 | if (ret != X86EMUL_CONTINUE) | 2730 | if (ret != X86EMUL_CONTINUE) |
2623 | return ret; | 2731 | return ret; |
2624 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); | 2732 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, |
2733 | true, NULL); | ||
2625 | if (ret != X86EMUL_CONTINUE) | 2734 | if (ret != X86EMUL_CONTINUE) |
2626 | return ret; | 2735 | return ret; |
2627 | 2736 | ||
@@ -2888,10 +2997,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt) | |||
2888 | 2997 | ||
2889 | static int em_call(struct x86_emulate_ctxt *ctxt) | 2998 | static int em_call(struct x86_emulate_ctxt *ctxt) |
2890 | { | 2999 | { |
3000 | int rc; | ||
2891 | long rel = ctxt->src.val; | 3001 | long rel = ctxt->src.val; |
2892 | 3002 | ||
2893 | ctxt->src.val = (unsigned long)ctxt->_eip; | 3003 | ctxt->src.val = (unsigned long)ctxt->_eip; |
2894 | jmp_rel(ctxt, rel); | 3004 | rc = jmp_rel(ctxt, rel); |
3005 | if (rc != X86EMUL_CONTINUE) | ||
3006 | return rc; | ||
2895 | return em_push(ctxt); | 3007 | return em_push(ctxt); |
2896 | } | 3008 | } |
2897 | 3009 | ||
@@ -2900,34 +3012,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) | |||
2900 | u16 sel, old_cs; | 3012 | u16 sel, old_cs; |
2901 | ulong old_eip; | 3013 | ulong old_eip; |
2902 | int rc; | 3014 | int rc; |
3015 | struct desc_struct old_desc, new_desc; | ||
3016 | const struct x86_emulate_ops *ops = ctxt->ops; | ||
3017 | int cpl = ctxt->ops->cpl(ctxt); | ||
2903 | 3018 | ||
2904 | old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); | ||
2905 | old_eip = ctxt->_eip; | 3019 | old_eip = ctxt->_eip; |
3020 | ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); | ||
2906 | 3021 | ||
2907 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); | 3022 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
2908 | if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) | 3023 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, |
3024 | &new_desc); | ||
3025 | if (rc != X86EMUL_CONTINUE) | ||
2909 | return X86EMUL_CONTINUE; | 3026 | return X86EMUL_CONTINUE; |
2910 | 3027 | ||
2911 | ctxt->_eip = 0; | 3028 | rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); |
2912 | memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); | 3029 | if (rc != X86EMUL_CONTINUE) |
3030 | goto fail; | ||
2913 | 3031 | ||
2914 | ctxt->src.val = old_cs; | 3032 | ctxt->src.val = old_cs; |
2915 | rc = em_push(ctxt); | 3033 | rc = em_push(ctxt); |
2916 | if (rc != X86EMUL_CONTINUE) | 3034 | if (rc != X86EMUL_CONTINUE) |
2917 | return rc; | 3035 | goto fail; |
2918 | 3036 | ||
2919 | ctxt->src.val = old_eip; | 3037 | ctxt->src.val = old_eip; |
2920 | return em_push(ctxt); | 3038 | rc = em_push(ctxt); |
3039 | /* If we failed, we tainted the memory, but the very least we should | ||
3040 | restore cs */ | ||
3041 | if (rc != X86EMUL_CONTINUE) | ||
3042 | goto fail; | ||
3043 | return rc; | ||
3044 | fail: | ||
3045 | ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); | ||
3046 | return rc; | ||
3047 | |||
2921 | } | 3048 | } |
2922 | 3049 | ||
2923 | static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) | 3050 | static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) |
2924 | { | 3051 | { |
2925 | int rc; | 3052 | int rc; |
3053 | unsigned long eip; | ||
2926 | 3054 | ||
2927 | ctxt->dst.type = OP_REG; | 3055 | rc = emulate_pop(ctxt, &eip, ctxt->op_bytes); |
2928 | ctxt->dst.addr.reg = &ctxt->_eip; | 3056 | if (rc != X86EMUL_CONTINUE) |
2929 | ctxt->dst.bytes = ctxt->op_bytes; | 3057 | return rc; |
2930 | rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); | 3058 | rc = assign_eip_near(ctxt, eip); |
2931 | if (rc != X86EMUL_CONTINUE) | 3059 | if (rc != X86EMUL_CONTINUE) |
2932 | return rc; | 3060 | return rc; |
2933 | rsp_increment(ctxt, ctxt->src.val); | 3061 | rsp_increment(ctxt, ctxt->src.val); |
@@ -3254,20 +3382,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt) | |||
3254 | 3382 | ||
3255 | static int em_loop(struct x86_emulate_ctxt *ctxt) | 3383 | static int em_loop(struct x86_emulate_ctxt *ctxt) |
3256 | { | 3384 | { |
3385 | int rc = X86EMUL_CONTINUE; | ||
3386 | |||
3257 | register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); | 3387 | register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); |
3258 | if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && | 3388 | if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && |
3259 | (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) | 3389 | (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) |
3260 | jmp_rel(ctxt, ctxt->src.val); | 3390 | rc = jmp_rel(ctxt, ctxt->src.val); |
3261 | 3391 | ||
3262 | return X86EMUL_CONTINUE; | 3392 | return rc; |
3263 | } | 3393 | } |
3264 | 3394 | ||
3265 | static int em_jcxz(struct x86_emulate_ctxt *ctxt) | 3395 | static int em_jcxz(struct x86_emulate_ctxt *ctxt) |
3266 | { | 3396 | { |
3397 | int rc = X86EMUL_CONTINUE; | ||
3398 | |||
3267 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) | 3399 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) |
3268 | jmp_rel(ctxt, ctxt->src.val); | 3400 | rc = jmp_rel(ctxt, ctxt->src.val); |
3269 | 3401 | ||
3270 | return X86EMUL_CONTINUE; | 3402 | return rc; |
3271 | } | 3403 | } |
3272 | 3404 | ||
3273 | static int em_in(struct x86_emulate_ctxt *ctxt) | 3405 | static int em_in(struct x86_emulate_ctxt *ctxt) |
@@ -3355,6 +3487,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt) | |||
3355 | return X86EMUL_CONTINUE; | 3487 | return X86EMUL_CONTINUE; |
3356 | } | 3488 | } |
3357 | 3489 | ||
3490 | static int em_clflush(struct x86_emulate_ctxt *ctxt) | ||
3491 | { | ||
3492 | /* emulating clflush regardless of cpuid */ | ||
3493 | return X86EMUL_CONTINUE; | ||
3494 | } | ||
3495 | |||
3358 | static bool valid_cr(int nr) | 3496 | static bool valid_cr(int nr) |
3359 | { | 3497 | { |
3360 | switch (nr) { | 3498 | switch (nr) { |
@@ -3693,6 +3831,16 @@ static const struct opcode group11[] = { | |||
3693 | X7(D(Undefined)), | 3831 | X7(D(Undefined)), |
3694 | }; | 3832 | }; |
3695 | 3833 | ||
3834 | static const struct gprefix pfx_0f_ae_7 = { | ||
3835 | I(SrcMem | ByteOp, em_clflush), N, N, N, | ||
3836 | }; | ||
3837 | |||
3838 | static const struct group_dual group15 = { { | ||
3839 | N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7), | ||
3840 | }, { | ||
3841 | N, N, N, N, N, N, N, N, | ||
3842 | } }; | ||
3843 | |||
3696 | static const struct gprefix pfx_0f_6f_0f_7f = { | 3844 | static const struct gprefix pfx_0f_6f_0f_7f = { |
3697 | I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), | 3845 | I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), |
3698 | }; | 3846 | }; |
@@ -3901,10 +4049,11 @@ static const struct opcode twobyte_table[256] = { | |||
3901 | N, I(ImplicitOps | EmulateOnUD, em_syscall), | 4049 | N, I(ImplicitOps | EmulateOnUD, em_syscall), |
3902 | II(ImplicitOps | Priv, em_clts, clts), N, | 4050 | II(ImplicitOps | Priv, em_clts, clts), N, |
3903 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, | 4051 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, |
3904 | N, D(ImplicitOps | ModRM), N, N, | 4052 | N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N, |
3905 | /* 0x10 - 0x1F */ | 4053 | /* 0x10 - 0x1F */ |
3906 | N, N, N, N, N, N, N, N, | 4054 | N, N, N, N, N, N, N, N, |
3907 | D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), | 4055 | D(ImplicitOps | ModRM | SrcMem | NoAccess), |
4056 | N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess), | ||
3908 | /* 0x20 - 0x2F */ | 4057 | /* 0x20 - 0x2F */ |
3909 | DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), | 4058 | DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), |
3910 | DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), | 4059 | DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), |
@@ -3956,7 +4105,7 @@ static const struct opcode twobyte_table[256] = { | |||
3956 | F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), | 4105 | F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), |
3957 | F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), | 4106 | F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), |
3958 | F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), | 4107 | F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), |
3959 | D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), | 4108 | GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul), |
3960 | /* 0xB0 - 0xB7 */ | 4109 | /* 0xB0 - 0xB7 */ |
3961 | I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), | 4110 | I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), |
3962 | I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), | 4111 | I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), |
@@ -4138,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4138 | fetch_register_operand(op); | 4287 | fetch_register_operand(op); |
4139 | break; | 4288 | break; |
4140 | case OpCL: | 4289 | case OpCL: |
4290 | op->type = OP_IMM; | ||
4141 | op->bytes = 1; | 4291 | op->bytes = 1; |
4142 | op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; | 4292 | op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; |
4143 | break; | 4293 | break; |
@@ -4145,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4145 | rc = decode_imm(ctxt, op, 1, true); | 4295 | rc = decode_imm(ctxt, op, 1, true); |
4146 | break; | 4296 | break; |
4147 | case OpOne: | 4297 | case OpOne: |
4298 | op->type = OP_IMM; | ||
4148 | op->bytes = 1; | 4299 | op->bytes = 1; |
4149 | op->val = 1; | 4300 | op->val = 1; |
4150 | break; | 4301 | break; |
@@ -4203,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
4203 | ctxt->memop.bytes = ctxt->op_bytes + 2; | 4354 | ctxt->memop.bytes = ctxt->op_bytes + 2; |
4204 | goto mem_common; | 4355 | goto mem_common; |
4205 | case OpES: | 4356 | case OpES: |
4357 | op->type = OP_IMM; | ||
4206 | op->val = VCPU_SREG_ES; | 4358 | op->val = VCPU_SREG_ES; |
4207 | break; | 4359 | break; |
4208 | case OpCS: | 4360 | case OpCS: |
4361 | op->type = OP_IMM; | ||
4209 | op->val = VCPU_SREG_CS; | 4362 | op->val = VCPU_SREG_CS; |
4210 | break; | 4363 | break; |
4211 | case OpSS: | 4364 | case OpSS: |
4365 | op->type = OP_IMM; | ||
4212 | op->val = VCPU_SREG_SS; | 4366 | op->val = VCPU_SREG_SS; |
4213 | break; | 4367 | break; |
4214 | case OpDS: | 4368 | case OpDS: |
4369 | op->type = OP_IMM; | ||
4215 | op->val = VCPU_SREG_DS; | 4370 | op->val = VCPU_SREG_DS; |
4216 | break; | 4371 | break; |
4217 | case OpFS: | 4372 | case OpFS: |
4373 | op->type = OP_IMM; | ||
4218 | op->val = VCPU_SREG_FS; | 4374 | op->val = VCPU_SREG_FS; |
4219 | break; | 4375 | break; |
4220 | case OpGS: | 4376 | case OpGS: |
4377 | op->type = OP_IMM; | ||
4221 | op->val = VCPU_SREG_GS; | 4378 | op->val = VCPU_SREG_GS; |
4222 | break; | 4379 | break; |
4223 | case OpImplicit: | 4380 | case OpImplicit: |
@@ -4473,10 +4630,10 @@ done_prefixes: | |||
4473 | /* Decode and fetch the destination operand: register or memory. */ | 4630 | /* Decode and fetch the destination operand: register or memory. */ |
4474 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); | 4631 | rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); |
4475 | 4632 | ||
4476 | done: | ||
4477 | if (ctxt->rip_relative) | 4633 | if (ctxt->rip_relative) |
4478 | ctxt->memopp->addr.mem.ea += ctxt->_eip; | 4634 | ctxt->memopp->addr.mem.ea += ctxt->_eip; |
4479 | 4635 | ||
4636 | done: | ||
4480 | return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; | 4637 | return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; |
4481 | } | 4638 | } |
4482 | 4639 | ||
@@ -4726,7 +4883,7 @@ special_insn: | |||
4726 | break; | 4883 | break; |
4727 | case 0x70 ... 0x7f: /* jcc (short) */ | 4884 | case 0x70 ... 0x7f: /* jcc (short) */ |
4728 | if (test_cc(ctxt->b, ctxt->eflags)) | 4885 | if (test_cc(ctxt->b, ctxt->eflags)) |
4729 | jmp_rel(ctxt, ctxt->src.val); | 4886 | rc = jmp_rel(ctxt, ctxt->src.val); |
4730 | break; | 4887 | break; |
4731 | case 0x8d: /* lea r16/r32, m */ | 4888 | case 0x8d: /* lea r16/r32, m */ |
4732 | ctxt->dst.val = ctxt->src.addr.mem.ea; | 4889 | ctxt->dst.val = ctxt->src.addr.mem.ea; |
@@ -4756,7 +4913,7 @@ special_insn: | |||
4756 | break; | 4913 | break; |
4757 | case 0xe9: /* jmp rel */ | 4914 | case 0xe9: /* jmp rel */ |
4758 | case 0xeb: /* jmp rel short */ | 4915 | case 0xeb: /* jmp rel short */ |
4759 | jmp_rel(ctxt, ctxt->src.val); | 4916 | rc = jmp_rel(ctxt, ctxt->src.val); |
4760 | ctxt->dst.type = OP_NONE; /* Disable writeback. */ | 4917 | ctxt->dst.type = OP_NONE; /* Disable writeback. */ |
4761 | break; | 4918 | break; |
4762 | case 0xf4: /* hlt */ | 4919 | case 0xf4: /* hlt */ |
@@ -4881,13 +5038,11 @@ twobyte_insn: | |||
4881 | break; | 5038 | break; |
4882 | case 0x80 ... 0x8f: /* jnz rel, etc*/ | 5039 | case 0x80 ... 0x8f: /* jnz rel, etc*/ |
4883 | if (test_cc(ctxt->b, ctxt->eflags)) | 5040 | if (test_cc(ctxt->b, ctxt->eflags)) |
4884 | jmp_rel(ctxt, ctxt->src.val); | 5041 | rc = jmp_rel(ctxt, ctxt->src.val); |
4885 | break; | 5042 | break; |
4886 | case 0x90 ... 0x9f: /* setcc r/m8 */ | 5043 | case 0x90 ... 0x9f: /* setcc r/m8 */ |
4887 | ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); | 5044 | ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); |
4888 | break; | 5045 | break; |
4889 | case 0xae: /* clflush */ | ||
4890 | break; | ||
4891 | case 0xb6 ... 0xb7: /* movzx */ | 5046 | case 0xb6 ... 0xb7: /* movzx */ |
4892 | ctxt->dst.bytes = ctxt->op_bytes; | 5047 | ctxt->dst.bytes = ctxt->op_bytes; |
4893 | ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val | 5048 | ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val |