diff options
-rw-r--r-- | arch/x86/include/asm/kvm_emulate.h | 20 | ||||
-rw-r--r-- | arch/x86/kvm/emulate.c | 299 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 45 |
3 files changed, 220 insertions, 144 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h index c764f43b71c5..282aee5d6ac1 100644 --- a/arch/x86/include/asm/kvm_emulate.h +++ b/arch/x86/include/asm/kvm_emulate.h | |||
@@ -86,6 +86,19 @@ struct x86_instruction_info { | |||
86 | 86 | ||
87 | struct x86_emulate_ops { | 87 | struct x86_emulate_ops { |
88 | /* | 88 | /* |
89 | * read_gpr: read a general purpose register (rax - r15) | ||
90 | * | ||
91 | * @reg: gpr number. | ||
92 | */ | ||
93 | ulong (*read_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg); | ||
94 | /* | ||
95 | * write_gpr: write a general purpose register (rax - r15) | ||
96 | * | ||
97 | * @reg: gpr number. | ||
98 | * @val: value to write. | ||
99 | */ | ||
100 | void (*write_gpr)(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val); | ||
101 | /* | ||
89 | * read_std: Read bytes of standard (non-emulated/special) memory. | 102 | * read_std: Read bytes of standard (non-emulated/special) memory. |
90 | * Used for descriptor reading. | 103 | * Used for descriptor reading. |
91 | * @addr: [IN ] Linear address from which to read. | 104 | * @addr: [IN ] Linear address from which to read. |
@@ -281,8 +294,10 @@ struct x86_emulate_ctxt { | |||
281 | bool rip_relative; | 294 | bool rip_relative; |
282 | unsigned long _eip; | 295 | unsigned long _eip; |
283 | struct operand memop; | 296 | struct operand memop; |
297 | u32 regs_valid; /* bitmaps of registers in _regs[] that can be read */ | ||
298 | u32 regs_dirty; /* bitmaps of registers in _regs[] that have been written */ | ||
284 | /* Fields above regs are cleared together. */ | 299 | /* Fields above regs are cleared together. */ |
285 | unsigned long regs[NR_VCPU_REGS]; | 300 | unsigned long _regs[NR_VCPU_REGS]; |
286 | struct operand *memopp; | 301 | struct operand *memopp; |
287 | struct fetch_cache fetch; | 302 | struct fetch_cache fetch; |
288 | struct read_cache io_read; | 303 | struct read_cache io_read; |
@@ -394,4 +409,7 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
394 | u16 tss_selector, int idt_index, int reason, | 409 | u16 tss_selector, int idt_index, int reason, |
395 | bool has_error_code, u32 error_code); | 410 | bool has_error_code, u32 error_code); |
396 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); | 411 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq); |
412 | void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt); | ||
413 | void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt); | ||
414 | |||
397 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ | 415 | #endif /* _ASM_X86_KVM_X86_EMULATE_H */ |
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index e8fb6c5c6c0a..5e27ba532613 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -202,6 +202,42 @@ struct gprefix { | |||
202 | #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a | 202 | #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a |
203 | #define EFLG_RESERVED_ONE_MASK 2 | 203 | #define EFLG_RESERVED_ONE_MASK 2 |
204 | 204 | ||
205 | static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) | ||
206 | { | ||
207 | if (!(ctxt->regs_valid & (1 << nr))) { | ||
208 | ctxt->regs_valid |= 1 << nr; | ||
209 | ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr); | ||
210 | } | ||
211 | return ctxt->_regs[nr]; | ||
212 | } | ||
213 | |||
214 | static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr) | ||
215 | { | ||
216 | ctxt->regs_valid |= 1 << nr; | ||
217 | ctxt->regs_dirty |= 1 << nr; | ||
218 | return &ctxt->_regs[nr]; | ||
219 | } | ||
220 | |||
221 | static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr) | ||
222 | { | ||
223 | reg_read(ctxt, nr); | ||
224 | return reg_write(ctxt, nr); | ||
225 | } | ||
226 | |||
227 | static void writeback_registers(struct x86_emulate_ctxt *ctxt) | ||
228 | { | ||
229 | unsigned reg; | ||
230 | |||
231 | for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16) | ||
232 | ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]); | ||
233 | } | ||
234 | |||
235 | static void invalidate_registers(struct x86_emulate_ctxt *ctxt) | ||
236 | { | ||
237 | ctxt->regs_dirty = 0; | ||
238 | ctxt->regs_valid = 0; | ||
239 | } | ||
240 | |||
205 | /* | 241 | /* |
206 | * Instruction emulation: | 242 | * Instruction emulation: |
207 | * Most instructions are emulated directly via a fragment of inline assembly | 243 | * Most instructions are emulated directly via a fragment of inline assembly |
@@ -374,8 +410,8 @@ struct gprefix { | |||
374 | #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ | 410 | #define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ |
375 | do { \ | 411 | do { \ |
376 | unsigned long _tmp; \ | 412 | unsigned long _tmp; \ |
377 | ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \ | 413 | ulong *rax = reg_rmw((ctxt), VCPU_REGS_RAX); \ |
378 | ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \ | 414 | ulong *rdx = reg_rmw((ctxt), VCPU_REGS_RDX); \ |
379 | \ | 415 | \ |
380 | __asm__ __volatile__ ( \ | 416 | __asm__ __volatile__ ( \ |
381 | _PRE_EFLAGS("0", "5", "1") \ | 417 | _PRE_EFLAGS("0", "5", "1") \ |
@@ -494,7 +530,7 @@ register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, in | |||
494 | 530 | ||
495 | static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) | 531 | static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) |
496 | { | 532 | { |
497 | masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc); | 533 | masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); |
498 | } | 534 | } |
499 | 535 | ||
500 | static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) | 536 | static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) |
@@ -786,14 +822,15 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, | |||
786 | * pointer into the block that addresses the relevant register. | 822 | * pointer into the block that addresses the relevant register. |
787 | * @highbyte_regs specifies whether to decode AH,CH,DH,BH. | 823 | * @highbyte_regs specifies whether to decode AH,CH,DH,BH. |
788 | */ | 824 | */ |
789 | static void *decode_register(u8 modrm_reg, unsigned long *regs, | 825 | static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg, |
790 | int highbyte_regs) | 826 | int highbyte_regs) |
791 | { | 827 | { |
792 | void *p; | 828 | void *p; |
793 | 829 | ||
794 | p = ®s[modrm_reg]; | ||
795 | if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) | 830 | if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) |
796 | p = (unsigned char *)®s[modrm_reg & 3] + 1; | 831 | p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1; |
832 | else | ||
833 | p = reg_rmw(ctxt, modrm_reg); | ||
797 | return p; | 834 | return p; |
798 | } | 835 | } |
799 | 836 | ||
@@ -982,10 +1019,10 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt, | |||
982 | 1019 | ||
983 | op->type = OP_REG; | 1020 | op->type = OP_REG; |
984 | if (ctxt->d & ByteOp) { | 1021 | if (ctxt->d & ByteOp) { |
985 | op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs); | 1022 | op->addr.reg = decode_register(ctxt, reg, highbyte_regs); |
986 | op->bytes = 1; | 1023 | op->bytes = 1; |
987 | } else { | 1024 | } else { |
988 | op->addr.reg = decode_register(reg, ctxt->regs, 0); | 1025 | op->addr.reg = decode_register(ctxt, reg, 0); |
989 | op->bytes = ctxt->op_bytes; | 1026 | op->bytes = ctxt->op_bytes; |
990 | } | 1027 | } |
991 | fetch_register_operand(op); | 1028 | fetch_register_operand(op); |
@@ -1020,8 +1057,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1020 | if (ctxt->modrm_mod == 3) { | 1057 | if (ctxt->modrm_mod == 3) { |
1021 | op->type = OP_REG; | 1058 | op->type = OP_REG; |
1022 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 1059 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
1023 | op->addr.reg = decode_register(ctxt->modrm_rm, | 1060 | op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp); |
1024 | ctxt->regs, ctxt->d & ByteOp); | ||
1025 | if (ctxt->d & Sse) { | 1061 | if (ctxt->d & Sse) { |
1026 | op->type = OP_XMM; | 1062 | op->type = OP_XMM; |
1027 | op->bytes = 16; | 1063 | op->bytes = 16; |
@@ -1042,10 +1078,10 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1042 | op->type = OP_MEM; | 1078 | op->type = OP_MEM; |
1043 | 1079 | ||
1044 | if (ctxt->ad_bytes == 2) { | 1080 | if (ctxt->ad_bytes == 2) { |
1045 | unsigned bx = ctxt->regs[VCPU_REGS_RBX]; | 1081 | unsigned bx = reg_read(ctxt, VCPU_REGS_RBX); |
1046 | unsigned bp = ctxt->regs[VCPU_REGS_RBP]; | 1082 | unsigned bp = reg_read(ctxt, VCPU_REGS_RBP); |
1047 | unsigned si = ctxt->regs[VCPU_REGS_RSI]; | 1083 | unsigned si = reg_read(ctxt, VCPU_REGS_RSI); |
1048 | unsigned di = ctxt->regs[VCPU_REGS_RDI]; | 1084 | unsigned di = reg_read(ctxt, VCPU_REGS_RDI); |
1049 | 1085 | ||
1050 | /* 16-bit ModR/M decode. */ | 1086 | /* 16-bit ModR/M decode. */ |
1051 | switch (ctxt->modrm_mod) { | 1087 | switch (ctxt->modrm_mod) { |
@@ -1102,17 +1138,17 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
1102 | if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) | 1138 | if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) |
1103 | modrm_ea += insn_fetch(s32, ctxt); | 1139 | modrm_ea += insn_fetch(s32, ctxt); |
1104 | else { | 1140 | else { |
1105 | modrm_ea += ctxt->regs[base_reg]; | 1141 | modrm_ea += reg_read(ctxt, base_reg); |
1106 | adjust_modrm_seg(ctxt, base_reg); | 1142 | adjust_modrm_seg(ctxt, base_reg); |
1107 | } | 1143 | } |
1108 | if (index_reg != 4) | 1144 | if (index_reg != 4) |
1109 | modrm_ea += ctxt->regs[index_reg] << scale; | 1145 | modrm_ea += reg_read(ctxt, index_reg) << scale; |
1110 | } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { | 1146 | } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { |
1111 | if (ctxt->mode == X86EMUL_MODE_PROT64) | 1147 | if (ctxt->mode == X86EMUL_MODE_PROT64) |
1112 | ctxt->rip_relative = 1; | 1148 | ctxt->rip_relative = 1; |
1113 | } else { | 1149 | } else { |
1114 | base_reg = ctxt->modrm_rm; | 1150 | base_reg = ctxt->modrm_rm; |
1115 | modrm_ea += ctxt->regs[base_reg]; | 1151 | modrm_ea += reg_read(ctxt, base_reg); |
1116 | adjust_modrm_seg(ctxt, base_reg); | 1152 | adjust_modrm_seg(ctxt, base_reg); |
1117 | } | 1153 | } |
1118 | switch (ctxt->modrm_mod) { | 1154 | switch (ctxt->modrm_mod) { |
@@ -1250,10 +1286,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
1250 | if (rc->pos == rc->end) { /* refill pio read ahead */ | 1286 | if (rc->pos == rc->end) { /* refill pio read ahead */ |
1251 | unsigned int in_page, n; | 1287 | unsigned int in_page, n; |
1252 | unsigned int count = ctxt->rep_prefix ? | 1288 | unsigned int count = ctxt->rep_prefix ? |
1253 | address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1; | 1289 | address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1; |
1254 | in_page = (ctxt->eflags & EFLG_DF) ? | 1290 | in_page = (ctxt->eflags & EFLG_DF) ? |
1255 | offset_in_page(ctxt->regs[VCPU_REGS_RDI]) : | 1291 | offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) : |
1256 | PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]); | 1292 | PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)); |
1257 | n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, | 1293 | n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, |
1258 | count); | 1294 | count); |
1259 | if (n == 0) | 1295 | if (n == 0) |
@@ -1533,7 +1569,7 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes) | |||
1533 | struct segmented_address addr; | 1569 | struct segmented_address addr; |
1534 | 1570 | ||
1535 | rsp_increment(ctxt, -bytes); | 1571 | rsp_increment(ctxt, -bytes); |
1536 | addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); | 1572 | addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); |
1537 | addr.seg = VCPU_SREG_SS; | 1573 | addr.seg = VCPU_SREG_SS; |
1538 | 1574 | ||
1539 | return segmented_write(ctxt, addr, data, bytes); | 1575 | return segmented_write(ctxt, addr, data, bytes); |
@@ -1552,7 +1588,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, | |||
1552 | int rc; | 1588 | int rc; |
1553 | struct segmented_address addr; | 1589 | struct segmented_address addr; |
1554 | 1590 | ||
1555 | addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); | 1591 | addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt); |
1556 | addr.seg = VCPU_SREG_SS; | 1592 | addr.seg = VCPU_SREG_SS; |
1557 | rc = segmented_read(ctxt, addr, dest, len); | 1593 | rc = segmented_read(ctxt, addr, dest, len); |
1558 | if (rc != X86EMUL_CONTINUE) | 1594 | if (rc != X86EMUL_CONTINUE) |
@@ -1620,26 +1656,28 @@ static int em_enter(struct x86_emulate_ctxt *ctxt) | |||
1620 | int rc; | 1656 | int rc; |
1621 | unsigned frame_size = ctxt->src.val; | 1657 | unsigned frame_size = ctxt->src.val; |
1622 | unsigned nesting_level = ctxt->src2.val & 31; | 1658 | unsigned nesting_level = ctxt->src2.val & 31; |
1659 | ulong rbp; | ||
1623 | 1660 | ||
1624 | if (nesting_level) | 1661 | if (nesting_level) |
1625 | return X86EMUL_UNHANDLEABLE; | 1662 | return X86EMUL_UNHANDLEABLE; |
1626 | 1663 | ||
1627 | rc = push(ctxt, &ctxt->regs[VCPU_REGS_RBP], stack_size(ctxt)); | 1664 | rbp = reg_read(ctxt, VCPU_REGS_RBP); |
1665 | rc = push(ctxt, &rbp, stack_size(ctxt)); | ||
1628 | if (rc != X86EMUL_CONTINUE) | 1666 | if (rc != X86EMUL_CONTINUE) |
1629 | return rc; | 1667 | return rc; |
1630 | assign_masked(&ctxt->regs[VCPU_REGS_RBP], ctxt->regs[VCPU_REGS_RSP], | 1668 | assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP), |
1631 | stack_mask(ctxt)); | 1669 | stack_mask(ctxt)); |
1632 | assign_masked(&ctxt->regs[VCPU_REGS_RSP], | 1670 | assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), |
1633 | ctxt->regs[VCPU_REGS_RSP] - frame_size, | 1671 | reg_read(ctxt, VCPU_REGS_RSP) - frame_size, |
1634 | stack_mask(ctxt)); | 1672 | stack_mask(ctxt)); |
1635 | return X86EMUL_CONTINUE; | 1673 | return X86EMUL_CONTINUE; |
1636 | } | 1674 | } |
1637 | 1675 | ||
1638 | static int em_leave(struct x86_emulate_ctxt *ctxt) | 1676 | static int em_leave(struct x86_emulate_ctxt *ctxt) |
1639 | { | 1677 | { |
1640 | assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP], | 1678 | assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP), |
1641 | stack_mask(ctxt)); | 1679 | stack_mask(ctxt)); |
1642 | return emulate_pop(ctxt, &ctxt->regs[VCPU_REGS_RBP], ctxt->op_bytes); | 1680 | return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes); |
1643 | } | 1681 | } |
1644 | 1682 | ||
1645 | static int em_push_sreg(struct x86_emulate_ctxt *ctxt) | 1683 | static int em_push_sreg(struct x86_emulate_ctxt *ctxt) |
@@ -1667,13 +1705,13 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt) | |||
1667 | 1705 | ||
1668 | static int em_pusha(struct x86_emulate_ctxt *ctxt) | 1706 | static int em_pusha(struct x86_emulate_ctxt *ctxt) |
1669 | { | 1707 | { |
1670 | unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP]; | 1708 | unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP); |
1671 | int rc = X86EMUL_CONTINUE; | 1709 | int rc = X86EMUL_CONTINUE; |
1672 | int reg = VCPU_REGS_RAX; | 1710 | int reg = VCPU_REGS_RAX; |
1673 | 1711 | ||
1674 | while (reg <= VCPU_REGS_RDI) { | 1712 | while (reg <= VCPU_REGS_RDI) { |
1675 | (reg == VCPU_REGS_RSP) ? | 1713 | (reg == VCPU_REGS_RSP) ? |
1676 | (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]); | 1714 | (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg)); |
1677 | 1715 | ||
1678 | rc = em_push(ctxt); | 1716 | rc = em_push(ctxt); |
1679 | if (rc != X86EMUL_CONTINUE) | 1717 | if (rc != X86EMUL_CONTINUE) |
@@ -1702,7 +1740,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) | |||
1702 | --reg; | 1740 | --reg; |
1703 | } | 1741 | } |
1704 | 1742 | ||
1705 | rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes); | 1743 | rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes); |
1706 | if (rc != X86EMUL_CONTINUE) | 1744 | if (rc != X86EMUL_CONTINUE) |
1707 | break; | 1745 | break; |
1708 | --reg; | 1746 | --reg; |
@@ -1710,7 +1748,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt) | |||
1710 | return rc; | 1748 | return rc; |
1711 | } | 1749 | } |
1712 | 1750 | ||
1713 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) | 1751 | static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) |
1714 | { | 1752 | { |
1715 | struct x86_emulate_ops *ops = ctxt->ops; | 1753 | struct x86_emulate_ops *ops = ctxt->ops; |
1716 | int rc; | 1754 | int rc; |
@@ -1759,11 +1797,22 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) | |||
1759 | return rc; | 1797 | return rc; |
1760 | } | 1798 | } |
1761 | 1799 | ||
1800 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) | ||
1801 | { | ||
1802 | int rc; | ||
1803 | |||
1804 | invalidate_registers(ctxt); | ||
1805 | rc = __emulate_int_real(ctxt, irq); | ||
1806 | if (rc == X86EMUL_CONTINUE) | ||
1807 | writeback_registers(ctxt); | ||
1808 | return rc; | ||
1809 | } | ||
1810 | |||
1762 | static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) | 1811 | static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) |
1763 | { | 1812 | { |
1764 | switch(ctxt->mode) { | 1813 | switch(ctxt->mode) { |
1765 | case X86EMUL_MODE_REAL: | 1814 | case X86EMUL_MODE_REAL: |
1766 | return emulate_int_real(ctxt, irq); | 1815 | return __emulate_int_real(ctxt, irq); |
1767 | case X86EMUL_MODE_VM86: | 1816 | case X86EMUL_MODE_VM86: |
1768 | case X86EMUL_MODE_PROT16: | 1817 | case X86EMUL_MODE_PROT16: |
1769 | case X86EMUL_MODE_PROT32: | 1818 | case X86EMUL_MODE_PROT32: |
@@ -1970,14 +2019,14 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt) | |||
1970 | { | 2019 | { |
1971 | u64 old = ctxt->dst.orig_val64; | 2020 | u64 old = ctxt->dst.orig_val64; |
1972 | 2021 | ||
1973 | if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) || | 2022 | if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) || |
1974 | ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) { | 2023 | ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) { |
1975 | ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0); | 2024 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0); |
1976 | ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32); | 2025 | *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32); |
1977 | ctxt->eflags &= ~EFLG_ZF; | 2026 | ctxt->eflags &= ~EFLG_ZF; |
1978 | } else { | 2027 | } else { |
1979 | ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) | | 2028 | ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) | |
1980 | (u32) ctxt->regs[VCPU_REGS_RBX]; | 2029 | (u32) reg_read(ctxt, VCPU_REGS_RBX); |
1981 | 2030 | ||
1982 | ctxt->eflags |= EFLG_ZF; | 2031 | ctxt->eflags |= EFLG_ZF; |
1983 | } | 2032 | } |
@@ -2013,7 +2062,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) | |||
2013 | { | 2062 | { |
2014 | /* Save real source value, then compare EAX against destination. */ | 2063 | /* Save real source value, then compare EAX against destination. */ |
2015 | ctxt->src.orig_val = ctxt->src.val; | 2064 | ctxt->src.orig_val = ctxt->src.val; |
2016 | ctxt->src.val = ctxt->regs[VCPU_REGS_RAX]; | 2065 | ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX); |
2017 | emulate_2op_SrcV(ctxt, "cmp"); | 2066 | emulate_2op_SrcV(ctxt, "cmp"); |
2018 | 2067 | ||
2019 | if (ctxt->eflags & EFLG_ZF) { | 2068 | if (ctxt->eflags & EFLG_ZF) { |
@@ -2022,7 +2071,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt) | |||
2022 | } else { | 2071 | } else { |
2023 | /* Failure: write the value we saw to EAX. */ | 2072 | /* Failure: write the value we saw to EAX. */ |
2024 | ctxt->dst.type = OP_REG; | 2073 | ctxt->dst.type = OP_REG; |
2025 | ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX]; | 2074 | ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); |
2026 | } | 2075 | } |
2027 | return X86EMUL_CONTINUE; | 2076 | return X86EMUL_CONTINUE; |
2028 | } | 2077 | } |
@@ -2159,10 +2208,10 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt) | |||
2159 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); | 2208 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
2160 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); | 2209 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
2161 | 2210 | ||
2162 | ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip; | 2211 | *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip; |
2163 | if (efer & EFER_LMA) { | 2212 | if (efer & EFER_LMA) { |
2164 | #ifdef CONFIG_X86_64 | 2213 | #ifdef CONFIG_X86_64 |
2165 | ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; | 2214 | *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF; |
2166 | 2215 | ||
2167 | ops->get_msr(ctxt, | 2216 | ops->get_msr(ctxt, |
2168 | ctxt->mode == X86EMUL_MODE_PROT64 ? | 2217 | ctxt->mode == X86EMUL_MODE_PROT64 ? |
@@ -2241,7 +2290,7 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt) | |||
2241 | ctxt->_eip = msr_data; | 2290 | ctxt->_eip = msr_data; |
2242 | 2291 | ||
2243 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); | 2292 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); |
2244 | ctxt->regs[VCPU_REGS_RSP] = msr_data; | 2293 | *reg_write(ctxt, VCPU_REGS_RSP) = msr_data; |
2245 | 2294 | ||
2246 | return X86EMUL_CONTINUE; | 2295 | return X86EMUL_CONTINUE; |
2247 | } | 2296 | } |
@@ -2291,8 +2340,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt) | |||
2291 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); | 2340 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
2292 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); | 2341 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
2293 | 2342 | ||
2294 | ctxt->_eip = ctxt->regs[VCPU_REGS_RDX]; | 2343 | ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); |
2295 | ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX]; | 2344 | *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); |
2296 | 2345 | ||
2297 | return X86EMUL_CONTINUE; | 2346 | return X86EMUL_CONTINUE; |
2298 | } | 2347 | } |
@@ -2361,14 +2410,14 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, | |||
2361 | { | 2410 | { |
2362 | tss->ip = ctxt->_eip; | 2411 | tss->ip = ctxt->_eip; |
2363 | tss->flag = ctxt->eflags; | 2412 | tss->flag = ctxt->eflags; |
2364 | tss->ax = ctxt->regs[VCPU_REGS_RAX]; | 2413 | tss->ax = reg_read(ctxt, VCPU_REGS_RAX); |
2365 | tss->cx = ctxt->regs[VCPU_REGS_RCX]; | 2414 | tss->cx = reg_read(ctxt, VCPU_REGS_RCX); |
2366 | tss->dx = ctxt->regs[VCPU_REGS_RDX]; | 2415 | tss->dx = reg_read(ctxt, VCPU_REGS_RDX); |
2367 | tss->bx = ctxt->regs[VCPU_REGS_RBX]; | 2416 | tss->bx = reg_read(ctxt, VCPU_REGS_RBX); |
2368 | tss->sp = ctxt->regs[VCPU_REGS_RSP]; | 2417 | tss->sp = reg_read(ctxt, VCPU_REGS_RSP); |
2369 | tss->bp = ctxt->regs[VCPU_REGS_RBP]; | 2418 | tss->bp = reg_read(ctxt, VCPU_REGS_RBP); |
2370 | tss->si = ctxt->regs[VCPU_REGS_RSI]; | 2419 | tss->si = reg_read(ctxt, VCPU_REGS_RSI); |
2371 | tss->di = ctxt->regs[VCPU_REGS_RDI]; | 2420 | tss->di = reg_read(ctxt, VCPU_REGS_RDI); |
2372 | 2421 | ||
2373 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); | 2422 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
2374 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); | 2423 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
@@ -2384,14 +2433,14 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | |||
2384 | 2433 | ||
2385 | ctxt->_eip = tss->ip; | 2434 | ctxt->_eip = tss->ip; |
2386 | ctxt->eflags = tss->flag | 2; | 2435 | ctxt->eflags = tss->flag | 2; |
2387 | ctxt->regs[VCPU_REGS_RAX] = tss->ax; | 2436 | *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax; |
2388 | ctxt->regs[VCPU_REGS_RCX] = tss->cx; | 2437 | *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx; |
2389 | ctxt->regs[VCPU_REGS_RDX] = tss->dx; | 2438 | *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx; |
2390 | ctxt->regs[VCPU_REGS_RBX] = tss->bx; | 2439 | *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx; |
2391 | ctxt->regs[VCPU_REGS_RSP] = tss->sp; | 2440 | *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp; |
2392 | ctxt->regs[VCPU_REGS_RBP] = tss->bp; | 2441 | *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp; |
2393 | ctxt->regs[VCPU_REGS_RSI] = tss->si; | 2442 | *reg_write(ctxt, VCPU_REGS_RSI) = tss->si; |
2394 | ctxt->regs[VCPU_REGS_RDI] = tss->di; | 2443 | *reg_write(ctxt, VCPU_REGS_RDI) = tss->di; |
2395 | 2444 | ||
2396 | /* | 2445 | /* |
2397 | * SDM says that segment selectors are loaded before segment | 2446 | * SDM says that segment selectors are loaded before segment |
@@ -2476,14 +2525,14 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | |||
2476 | tss->cr3 = ctxt->ops->get_cr(ctxt, 3); | 2525 | tss->cr3 = ctxt->ops->get_cr(ctxt, 3); |
2477 | tss->eip = ctxt->_eip; | 2526 | tss->eip = ctxt->_eip; |
2478 | tss->eflags = ctxt->eflags; | 2527 | tss->eflags = ctxt->eflags; |
2479 | tss->eax = ctxt->regs[VCPU_REGS_RAX]; | 2528 | tss->eax = reg_read(ctxt, VCPU_REGS_RAX); |
2480 | tss->ecx = ctxt->regs[VCPU_REGS_RCX]; | 2529 | tss->ecx = reg_read(ctxt, VCPU_REGS_RCX); |
2481 | tss->edx = ctxt->regs[VCPU_REGS_RDX]; | 2530 | tss->edx = reg_read(ctxt, VCPU_REGS_RDX); |
2482 | tss->ebx = ctxt->regs[VCPU_REGS_RBX]; | 2531 | tss->ebx = reg_read(ctxt, VCPU_REGS_RBX); |
2483 | tss->esp = ctxt->regs[VCPU_REGS_RSP]; | 2532 | tss->esp = reg_read(ctxt, VCPU_REGS_RSP); |
2484 | tss->ebp = ctxt->regs[VCPU_REGS_RBP]; | 2533 | tss->ebp = reg_read(ctxt, VCPU_REGS_RBP); |
2485 | tss->esi = ctxt->regs[VCPU_REGS_RSI]; | 2534 | tss->esi = reg_read(ctxt, VCPU_REGS_RSI); |
2486 | tss->edi = ctxt->regs[VCPU_REGS_RDI]; | 2535 | tss->edi = reg_read(ctxt, VCPU_REGS_RDI); |
2487 | 2536 | ||
2488 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); | 2537 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
2489 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); | 2538 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
@@ -2505,14 +2554,14 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2505 | ctxt->eflags = tss->eflags | 2; | 2554 | ctxt->eflags = tss->eflags | 2; |
2506 | 2555 | ||
2507 | /* General purpose registers */ | 2556 | /* General purpose registers */ |
2508 | ctxt->regs[VCPU_REGS_RAX] = tss->eax; | 2557 | *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax; |
2509 | ctxt->regs[VCPU_REGS_RCX] = tss->ecx; | 2558 | *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx; |
2510 | ctxt->regs[VCPU_REGS_RDX] = tss->edx; | 2559 | *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx; |
2511 | ctxt->regs[VCPU_REGS_RBX] = tss->ebx; | 2560 | *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx; |
2512 | ctxt->regs[VCPU_REGS_RSP] = tss->esp; | 2561 | *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp; |
2513 | ctxt->regs[VCPU_REGS_RBP] = tss->ebp; | 2562 | *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp; |
2514 | ctxt->regs[VCPU_REGS_RSI] = tss->esi; | 2563 | *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi; |
2515 | ctxt->regs[VCPU_REGS_RDI] = tss->edi; | 2564 | *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi; |
2516 | 2565 | ||
2517 | /* | 2566 | /* |
2518 | * SDM says that segment selectors are loaded before segment | 2567 | * SDM says that segment selectors are loaded before segment |
@@ -2727,14 +2776,17 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2727 | { | 2776 | { |
2728 | int rc; | 2777 | int rc; |
2729 | 2778 | ||
2779 | invalidate_registers(ctxt); | ||
2730 | ctxt->_eip = ctxt->eip; | 2780 | ctxt->_eip = ctxt->eip; |
2731 | ctxt->dst.type = OP_NONE; | 2781 | ctxt->dst.type = OP_NONE; |
2732 | 2782 | ||
2733 | rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, | 2783 | rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, |
2734 | has_error_code, error_code); | 2784 | has_error_code, error_code); |
2735 | 2785 | ||
2736 | if (rc == X86EMUL_CONTINUE) | 2786 | if (rc == X86EMUL_CONTINUE) { |
2737 | ctxt->eip = ctxt->_eip; | 2787 | ctxt->eip = ctxt->_eip; |
2788 | writeback_registers(ctxt); | ||
2789 | } | ||
2738 | 2790 | ||
2739 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; | 2791 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; |
2740 | } | 2792 | } |
@@ -2744,8 +2796,8 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, | |||
2744 | { | 2796 | { |
2745 | int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; | 2797 | int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; |
2746 | 2798 | ||
2747 | register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes); | 2799 | register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes); |
2748 | op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]); | 2800 | op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg)); |
2749 | op->addr.mem.seg = seg; | 2801 | op->addr.mem.seg = seg; |
2750 | } | 2802 | } |
2751 | 2803 | ||
@@ -2921,7 +2973,7 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt) | |||
2921 | { | 2973 | { |
2922 | ctxt->dst.type = OP_REG; | 2974 | ctxt->dst.type = OP_REG; |
2923 | ctxt->dst.bytes = ctxt->src.bytes; | 2975 | ctxt->dst.bytes = ctxt->src.bytes; |
2924 | ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX]; | 2976 | ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); |
2925 | ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); | 2977 | ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); |
2926 | 2978 | ||
2927 | return X86EMUL_CONTINUE; | 2979 | return X86EMUL_CONTINUE; |
@@ -2932,8 +2984,8 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt) | |||
2932 | u64 tsc = 0; | 2984 | u64 tsc = 0; |
2933 | 2985 | ||
2934 | ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); | 2986 | ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); |
2935 | ctxt->regs[VCPU_REGS_RAX] = (u32)tsc; | 2987 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc; |
2936 | ctxt->regs[VCPU_REGS_RDX] = tsc >> 32; | 2988 | *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32; |
2937 | return X86EMUL_CONTINUE; | 2989 | return X86EMUL_CONTINUE; |
2938 | } | 2990 | } |
2939 | 2991 | ||
@@ -2941,10 +2993,10 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt) | |||
2941 | { | 2993 | { |
2942 | u64 pmc; | 2994 | u64 pmc; |
2943 | 2995 | ||
2944 | if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc)) | 2996 | if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc)) |
2945 | return emulate_gp(ctxt, 0); | 2997 | return emulate_gp(ctxt, 0); |
2946 | ctxt->regs[VCPU_REGS_RAX] = (u32)pmc; | 2998 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc; |
2947 | ctxt->regs[VCPU_REGS_RDX] = pmc >> 32; | 2999 | *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32; |
2948 | return X86EMUL_CONTINUE; | 3000 | return X86EMUL_CONTINUE; |
2949 | } | 3001 | } |
2950 | 3002 | ||
@@ -2986,9 +3038,9 @@ static int em_wrmsr(struct x86_emulate_ctxt *ctxt) | |||
2986 | { | 3038 | { |
2987 | u64 msr_data; | 3039 | u64 msr_data; |
2988 | 3040 | ||
2989 | msr_data = (u32)ctxt->regs[VCPU_REGS_RAX] | 3041 | msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX) |
2990 | | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32); | 3042 | | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32); |
2991 | if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) | 3043 | if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data)) |
2992 | return emulate_gp(ctxt, 0); | 3044 | return emulate_gp(ctxt, 0); |
2993 | 3045 | ||
2994 | return X86EMUL_CONTINUE; | 3046 | return X86EMUL_CONTINUE; |
@@ -2998,11 +3050,11 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt) | |||
2998 | { | 3050 | { |
2999 | u64 msr_data; | 3051 | u64 msr_data; |
3000 | 3052 | ||
3001 | if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) | 3053 | if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data)) |
3002 | return emulate_gp(ctxt, 0); | 3054 | return emulate_gp(ctxt, 0); |
3003 | 3055 | ||
3004 | ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data; | 3056 | *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data; |
3005 | ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32; | 3057 | *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32; |
3006 | return X86EMUL_CONTINUE; | 3058 | return X86EMUL_CONTINUE; |
3007 | } | 3059 | } |
3008 | 3060 | ||
@@ -3182,8 +3234,8 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt) | |||
3182 | 3234 | ||
3183 | static int em_loop(struct x86_emulate_ctxt *ctxt) | 3235 | static int em_loop(struct x86_emulate_ctxt *ctxt) |
3184 | { | 3236 | { |
3185 | register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1); | 3237 | register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); |
3186 | if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) && | 3238 | if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && |
3187 | (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) | 3239 | (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) |
3188 | jmp_rel(ctxt, ctxt->src.val); | 3240 | jmp_rel(ctxt, ctxt->src.val); |
3189 | 3241 | ||
@@ -3192,7 +3244,7 @@ static int em_loop(struct x86_emulate_ctxt *ctxt) | |||
3192 | 3244 | ||
3193 | static int em_jcxz(struct x86_emulate_ctxt *ctxt) | 3245 | static int em_jcxz(struct x86_emulate_ctxt *ctxt) |
3194 | { | 3246 | { |
3195 | if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) | 3247 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) |
3196 | jmp_rel(ctxt, ctxt->src.val); | 3248 | jmp_rel(ctxt, ctxt->src.val); |
3197 | 3249 | ||
3198 | return X86EMUL_CONTINUE; | 3250 | return X86EMUL_CONTINUE; |
@@ -3280,20 +3332,20 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt) | |||
3280 | { | 3332 | { |
3281 | u32 eax, ebx, ecx, edx; | 3333 | u32 eax, ebx, ecx, edx; |
3282 | 3334 | ||
3283 | eax = ctxt->regs[VCPU_REGS_RAX]; | 3335 | eax = reg_read(ctxt, VCPU_REGS_RAX); |
3284 | ecx = ctxt->regs[VCPU_REGS_RCX]; | 3336 | ecx = reg_read(ctxt, VCPU_REGS_RCX); |
3285 | ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); | 3337 | ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); |
3286 | ctxt->regs[VCPU_REGS_RAX] = eax; | 3338 | *reg_write(ctxt, VCPU_REGS_RAX) = eax; |
3287 | ctxt->regs[VCPU_REGS_RBX] = ebx; | 3339 | *reg_write(ctxt, VCPU_REGS_RBX) = ebx; |
3288 | ctxt->regs[VCPU_REGS_RCX] = ecx; | 3340 | *reg_write(ctxt, VCPU_REGS_RCX) = ecx; |
3289 | ctxt->regs[VCPU_REGS_RDX] = edx; | 3341 | *reg_write(ctxt, VCPU_REGS_RDX) = edx; |
3290 | return X86EMUL_CONTINUE; | 3342 | return X86EMUL_CONTINUE; |
3291 | } | 3343 | } |
3292 | 3344 | ||
3293 | static int em_lahf(struct x86_emulate_ctxt *ctxt) | 3345 | static int em_lahf(struct x86_emulate_ctxt *ctxt) |
3294 | { | 3346 | { |
3295 | ctxt->regs[VCPU_REGS_RAX] &= ~0xff00UL; | 3347 | *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL; |
3296 | ctxt->regs[VCPU_REGS_RAX] |= (ctxt->eflags & 0xff) << 8; | 3348 | *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8; |
3297 | return X86EMUL_CONTINUE; | 3349 | return X86EMUL_CONTINUE; |
3298 | } | 3350 | } |
3299 | 3351 | ||
@@ -3450,7 +3502,7 @@ static int check_svme(struct x86_emulate_ctxt *ctxt) | |||
3450 | 3502 | ||
3451 | static int check_svme_pa(struct x86_emulate_ctxt *ctxt) | 3503 | static int check_svme_pa(struct x86_emulate_ctxt *ctxt) |
3452 | { | 3504 | { |
3453 | u64 rax = ctxt->regs[VCPU_REGS_RAX]; | 3505 | u64 rax = reg_read(ctxt, VCPU_REGS_RAX); |
3454 | 3506 | ||
3455 | /* Valid physical address? */ | 3507 | /* Valid physical address? */ |
3456 | if (rax & 0xffff000000000000ULL) | 3508 | if (rax & 0xffff000000000000ULL) |
@@ -3472,7 +3524,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt) | |||
3472 | static int check_rdpmc(struct x86_emulate_ctxt *ctxt) | 3524 | static int check_rdpmc(struct x86_emulate_ctxt *ctxt) |
3473 | { | 3525 | { |
3474 | u64 cr4 = ctxt->ops->get_cr(ctxt, 4); | 3526 | u64 cr4 = ctxt->ops->get_cr(ctxt, 4); |
3475 | u64 rcx = ctxt->regs[VCPU_REGS_RCX]; | 3527 | u64 rcx = reg_read(ctxt, VCPU_REGS_RCX); |
3476 | 3528 | ||
3477 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || | 3529 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || |
3478 | (rcx > 3)) | 3530 | (rcx > 3)) |
@@ -3930,7 +3982,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
3930 | case OpAcc: | 3982 | case OpAcc: |
3931 | op->type = OP_REG; | 3983 | op->type = OP_REG; |
3932 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 3984 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
3933 | op->addr.reg = &ctxt->regs[VCPU_REGS_RAX]; | 3985 | op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX); |
3934 | fetch_register_operand(op); | 3986 | fetch_register_operand(op); |
3935 | op->orig_val = op->val; | 3987 | op->orig_val = op->val; |
3936 | break; | 3988 | break; |
@@ -3938,19 +3990,19 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
3938 | op->type = OP_MEM; | 3990 | op->type = OP_MEM; |
3939 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 3991 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
3940 | op->addr.mem.ea = | 3992 | op->addr.mem.ea = |
3941 | register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]); | 3993 | register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI)); |
3942 | op->addr.mem.seg = VCPU_SREG_ES; | 3994 | op->addr.mem.seg = VCPU_SREG_ES; |
3943 | op->val = 0; | 3995 | op->val = 0; |
3944 | break; | 3996 | break; |
3945 | case OpDX: | 3997 | case OpDX: |
3946 | op->type = OP_REG; | 3998 | op->type = OP_REG; |
3947 | op->bytes = 2; | 3999 | op->bytes = 2; |
3948 | op->addr.reg = &ctxt->regs[VCPU_REGS_RDX]; | 4000 | op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX); |
3949 | fetch_register_operand(op); | 4001 | fetch_register_operand(op); |
3950 | break; | 4002 | break; |
3951 | case OpCL: | 4003 | case OpCL: |
3952 | op->bytes = 1; | 4004 | op->bytes = 1; |
3953 | op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff; | 4005 | op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; |
3954 | break; | 4006 | break; |
3955 | case OpImmByte: | 4007 | case OpImmByte: |
3956 | rc = decode_imm(ctxt, op, 1, true); | 4008 | rc = decode_imm(ctxt, op, 1, true); |
@@ -3981,7 +4033,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op, | |||
3981 | op->type = OP_MEM; | 4033 | op->type = OP_MEM; |
3982 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; | 4034 | op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; |
3983 | op->addr.mem.ea = | 4035 | op->addr.mem.ea = |
3984 | register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]); | 4036 | register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI)); |
3985 | op->addr.mem.seg = seg_override(ctxt); | 4037 | op->addr.mem.seg = seg_override(ctxt); |
3986 | op->val = 0; | 4038 | op->val = 0; |
3987 | break; | 4039 | break; |
@@ -4287,6 +4339,7 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt, | |||
4287 | read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); | 4339 | read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); |
4288 | } | 4340 | } |
4289 | 4341 | ||
4342 | |||
4290 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | 4343 | int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) |
4291 | { | 4344 | { |
4292 | struct x86_emulate_ops *ops = ctxt->ops; | 4345 | struct x86_emulate_ops *ops = ctxt->ops; |
@@ -4371,7 +4424,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
4371 | 4424 | ||
4372 | if (ctxt->rep_prefix && (ctxt->d & String)) { | 4425 | if (ctxt->rep_prefix && (ctxt->d & String)) { |
4373 | /* All REP prefixes have the same first termination condition */ | 4426 | /* All REP prefixes have the same first termination condition */ |
4374 | if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) { | 4427 | if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) { |
4375 | ctxt->eip = ctxt->_eip; | 4428 | ctxt->eip = ctxt->_eip; |
4376 | goto done; | 4429 | goto done; |
4377 | } | 4430 | } |
@@ -4444,7 +4497,7 @@ special_insn: | |||
4444 | ctxt->dst.val = ctxt->src.addr.mem.ea; | 4497 | ctxt->dst.val = ctxt->src.addr.mem.ea; |
4445 | break; | 4498 | break; |
4446 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ | 4499 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ |
4447 | if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX]) | 4500 | if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX)) |
4448 | break; | 4501 | break; |
4449 | rc = em_xchg(ctxt); | 4502 | rc = em_xchg(ctxt); |
4450 | break; | 4503 | break; |
@@ -4472,7 +4525,7 @@ special_insn: | |||
4472 | rc = em_grp2(ctxt); | 4525 | rc = em_grp2(ctxt); |
4473 | break; | 4526 | break; |
4474 | case 0xd2 ... 0xd3: /* Grp2 */ | 4527 | case 0xd2 ... 0xd3: /* Grp2 */ |
4475 | ctxt->src.val = ctxt->regs[VCPU_REGS_RCX]; | 4528 | ctxt->src.val = reg_read(ctxt, VCPU_REGS_RCX); |
4476 | rc = em_grp2(ctxt); | 4529 | rc = em_grp2(ctxt); |
4477 | break; | 4530 | break; |
4478 | case 0xe9: /* jmp rel */ | 4531 | case 0xe9: /* jmp rel */ |
@@ -4527,14 +4580,14 @@ writeback: | |||
4527 | 4580 | ||
4528 | if (ctxt->rep_prefix && (ctxt->d & String)) { | 4581 | if (ctxt->rep_prefix && (ctxt->d & String)) { |
4529 | struct read_cache *r = &ctxt->io_read; | 4582 | struct read_cache *r = &ctxt->io_read; |
4530 | register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1); | 4583 | register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); |
4531 | 4584 | ||
4532 | if (!string_insn_completed(ctxt)) { | 4585 | if (!string_insn_completed(ctxt)) { |
4533 | /* | 4586 | /* |
4534 | * Re-enter guest when pio read ahead buffer is empty | 4587 | * Re-enter guest when pio read ahead buffer is empty |
4535 | * or, if it is not used, after each 1024 iteration. | 4588 | * or, if it is not used, after each 1024 iteration. |
4536 | */ | 4589 | */ |
4537 | if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) && | 4590 | if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) && |
4538 | (r->end == 0 || r->end != r->pos)) { | 4591 | (r->end == 0 || r->end != r->pos)) { |
4539 | /* | 4592 | /* |
4540 | * Reset read cache. Usually happens before | 4593 | * Reset read cache. Usually happens before |
@@ -4542,6 +4595,7 @@ writeback: | |||
4542 | * we have to do it here. | 4595 | * we have to do it here. |
4543 | */ | 4596 | */ |
4544 | ctxt->mem_read.end = 0; | 4597 | ctxt->mem_read.end = 0; |
4598 | writeback_registers(ctxt); | ||
4545 | return EMULATION_RESTART; | 4599 | return EMULATION_RESTART; |
4546 | } | 4600 | } |
4547 | goto done; /* skip rip writeback */ | 4601 | goto done; /* skip rip writeback */ |
@@ -4556,6 +4610,9 @@ done: | |||
4556 | if (rc == X86EMUL_INTERCEPTED) | 4610 | if (rc == X86EMUL_INTERCEPTED) |
4557 | return EMULATION_INTERCEPTED; | 4611 | return EMULATION_INTERCEPTED; |
4558 | 4612 | ||
4613 | if (rc == X86EMUL_CONTINUE) | ||
4614 | writeback_registers(ctxt); | ||
4615 | |||
4559 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; | 4616 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; |
4560 | 4617 | ||
4561 | twobyte_insn: | 4618 | twobyte_insn: |
@@ -4628,3 +4685,13 @@ twobyte_insn: | |||
4628 | cannot_emulate: | 4685 | cannot_emulate: |
4629 | return EMULATION_FAILED; | 4686 | return EMULATION_FAILED; |
4630 | } | 4687 | } |
4688 | |||
4689 | void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt) | ||
4690 | { | ||
4691 | invalidate_registers(ctxt); | ||
4692 | } | ||
4693 | |||
4694 | void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt) | ||
4695 | { | ||
4696 | writeback_registers(ctxt); | ||
4697 | } | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 42bbf4187d20..e00050ce7a6a 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -4313,7 +4313,19 @@ static void emulator_get_cpuid(struct x86_emulate_ctxt *ctxt, | |||
4313 | kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); | 4313 | kvm_cpuid(emul_to_vcpu(ctxt), eax, ebx, ecx, edx); |
4314 | } | 4314 | } |
4315 | 4315 | ||
4316 | static ulong emulator_read_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg) | ||
4317 | { | ||
4318 | return kvm_register_read(emul_to_vcpu(ctxt), reg); | ||
4319 | } | ||
4320 | |||
4321 | static void emulator_write_gpr(struct x86_emulate_ctxt *ctxt, unsigned reg, ulong val) | ||
4322 | { | ||
4323 | kvm_register_write(emul_to_vcpu(ctxt), reg, val); | ||
4324 | } | ||
4325 | |||
4316 | static struct x86_emulate_ops emulate_ops = { | 4326 | static struct x86_emulate_ops emulate_ops = { |
4327 | .read_gpr = emulator_read_gpr, | ||
4328 | .write_gpr = emulator_write_gpr, | ||
4317 | .read_std = kvm_read_guest_virt_system, | 4329 | .read_std = kvm_read_guest_virt_system, |
4318 | .write_std = kvm_write_guest_virt_system, | 4330 | .write_std = kvm_write_guest_virt_system, |
4319 | .fetch = kvm_fetch_guest_virt, | 4331 | .fetch = kvm_fetch_guest_virt, |
@@ -4348,14 +4360,6 @@ static struct x86_emulate_ops emulate_ops = { | |||
4348 | .get_cpuid = emulator_get_cpuid, | 4360 | .get_cpuid = emulator_get_cpuid, |
4349 | }; | 4361 | }; |
4350 | 4362 | ||
4351 | static void cache_all_regs(struct kvm_vcpu *vcpu) | ||
4352 | { | ||
4353 | kvm_register_read(vcpu, VCPU_REGS_RAX); | ||
4354 | kvm_register_read(vcpu, VCPU_REGS_RSP); | ||
4355 | kvm_register_read(vcpu, VCPU_REGS_RIP); | ||
4356 | vcpu->arch.regs_dirty = ~0; | ||
4357 | } | ||
4358 | |||
4359 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) | 4363 | static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask) |
4360 | { | 4364 | { |
4361 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); | 4365 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(vcpu, mask); |
@@ -4382,12 +4386,10 @@ static void inject_emulated_exception(struct kvm_vcpu *vcpu) | |||
4382 | kvm_queue_exception(vcpu, ctxt->exception.vector); | 4386 | kvm_queue_exception(vcpu, ctxt->exception.vector); |
4383 | } | 4387 | } |
4384 | 4388 | ||
4385 | static void init_decode_cache(struct x86_emulate_ctxt *ctxt, | 4389 | static void init_decode_cache(struct x86_emulate_ctxt *ctxt) |
4386 | const unsigned long *regs) | ||
4387 | { | 4390 | { |
4388 | memset(&ctxt->twobyte, 0, | 4391 | memset(&ctxt->twobyte, 0, |
4389 | (void *)&ctxt->regs - (void *)&ctxt->twobyte); | 4392 | (void *)&ctxt->_regs - (void *)&ctxt->twobyte); |
4390 | memcpy(ctxt->regs, regs, sizeof(ctxt->regs)); | ||
4391 | 4393 | ||
4392 | ctxt->fetch.start = 0; | 4394 | ctxt->fetch.start = 0; |
4393 | ctxt->fetch.end = 0; | 4395 | ctxt->fetch.end = 0; |
@@ -4402,14 +4404,6 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) | |||
4402 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; | 4404 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; |
4403 | int cs_db, cs_l; | 4405 | int cs_db, cs_l; |
4404 | 4406 | ||
4405 | /* | ||
4406 | * TODO: fix emulate.c to use guest_read/write_register | ||
4407 | * instead of direct ->regs accesses, can save hundred cycles | ||
4408 | * on Intel for instructions that don't read/change RSP, for | ||
4409 | * for example. | ||
4410 | */ | ||
4411 | cache_all_regs(vcpu); | ||
4412 | |||
4413 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); | 4407 | kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); |
4414 | 4408 | ||
4415 | ctxt->eflags = kvm_get_rflags(vcpu); | 4409 | ctxt->eflags = kvm_get_rflags(vcpu); |
@@ -4421,7 +4415,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu) | |||
4421 | X86EMUL_MODE_PROT16; | 4415 | X86EMUL_MODE_PROT16; |
4422 | ctxt->guest_mode = is_guest_mode(vcpu); | 4416 | ctxt->guest_mode = is_guest_mode(vcpu); |
4423 | 4417 | ||
4424 | init_decode_cache(ctxt, vcpu->arch.regs); | 4418 | init_decode_cache(ctxt); |
4425 | vcpu->arch.emulate_regs_need_sync_from_vcpu = false; | 4419 | vcpu->arch.emulate_regs_need_sync_from_vcpu = false; |
4426 | } | 4420 | } |
4427 | 4421 | ||
@@ -4441,7 +4435,6 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip) | |||
4441 | return EMULATE_FAIL; | 4435 | return EMULATE_FAIL; |
4442 | 4436 | ||
4443 | ctxt->eip = ctxt->_eip; | 4437 | ctxt->eip = ctxt->_eip; |
4444 | memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); | ||
4445 | kvm_rip_write(vcpu, ctxt->eip); | 4438 | kvm_rip_write(vcpu, ctxt->eip); |
4446 | kvm_set_rflags(vcpu, ctxt->eflags); | 4439 | kvm_set_rflags(vcpu, ctxt->eflags); |
4447 | 4440 | ||
@@ -4599,7 +4592,7 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu, | |||
4599 | changes registers values during IO operation */ | 4592 | changes registers values during IO operation */ |
4600 | if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { | 4593 | if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { |
4601 | vcpu->arch.emulate_regs_need_sync_from_vcpu = false; | 4594 | vcpu->arch.emulate_regs_need_sync_from_vcpu = false; |
4602 | memcpy(ctxt->regs, vcpu->arch.regs, sizeof ctxt->regs); | 4595 | emulator_invalidate_register_cache(ctxt); |
4603 | } | 4596 | } |
4604 | 4597 | ||
4605 | restart: | 4598 | restart: |
@@ -4637,7 +4630,6 @@ restart: | |||
4637 | toggle_interruptibility(vcpu, ctxt->interruptibility); | 4630 | toggle_interruptibility(vcpu, ctxt->interruptibility); |
4638 | kvm_set_rflags(vcpu, ctxt->eflags); | 4631 | kvm_set_rflags(vcpu, ctxt->eflags); |
4639 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 4632 | kvm_make_request(KVM_REQ_EVENT, vcpu); |
4640 | memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); | ||
4641 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; | 4633 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; |
4642 | kvm_rip_write(vcpu, ctxt->eip); | 4634 | kvm_rip_write(vcpu, ctxt->eip); |
4643 | } else | 4635 | } else |
@@ -5591,8 +5583,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
5591 | * that usually, but some bad designed PV devices (vmware | 5583 | * that usually, but some bad designed PV devices (vmware |
5592 | * backdoor interface) need this to work | 5584 | * backdoor interface) need this to work |
5593 | */ | 5585 | */ |
5594 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; | 5586 | emulator_writeback_register_cache(&vcpu->arch.emulate_ctxt); |
5595 | memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); | ||
5596 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; | 5587 | vcpu->arch.emulate_regs_need_sync_to_vcpu = false; |
5597 | } | 5588 | } |
5598 | regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); | 5589 | regs->rax = kvm_register_read(vcpu, VCPU_REGS_RAX); |
@@ -5723,6 +5714,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, | |||
5723 | { | 5714 | { |
5724 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; | 5715 | struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt; |
5725 | int ret; | 5716 | int ret; |
5717 | unsigned reg; | ||
5726 | 5718 | ||
5727 | init_emulate_ctxt(vcpu); | 5719 | init_emulate_ctxt(vcpu); |
5728 | 5720 | ||
@@ -5732,7 +5724,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index, | |||
5732 | if (ret) | 5724 | if (ret) |
5733 | return EMULATE_FAIL; | 5725 | return EMULATE_FAIL; |
5734 | 5726 | ||
5735 | memcpy(vcpu->arch.regs, ctxt->regs, sizeof ctxt->regs); | ||
5736 | kvm_rip_write(vcpu, ctxt->eip); | 5727 | kvm_rip_write(vcpu, ctxt->eip); |
5737 | kvm_set_rflags(vcpu, ctxt->eflags); | 5728 | kvm_set_rflags(vcpu, ctxt->eflags); |
5738 | kvm_make_request(KVM_REQ_EVENT, vcpu); | 5729 | kvm_make_request(KVM_REQ_EVENT, vcpu); |