aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c538
1 files changed, 309 insertions, 229 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a3b57a27be88..39171cb307ea 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -161,9 +161,9 @@ struct opcode {
161 u64 intercept : 8; 161 u64 intercept : 8;
162 union { 162 union {
163 int (*execute)(struct x86_emulate_ctxt *ctxt); 163 int (*execute)(struct x86_emulate_ctxt *ctxt);
164 struct opcode *group; 164 const struct opcode *group;
165 struct group_dual *gdual; 165 const struct group_dual *gdual;
166 struct gprefix *gprefix; 166 const struct gprefix *gprefix;
167 } u; 167 } u;
168 int (*check_perm)(struct x86_emulate_ctxt *ctxt); 168 int (*check_perm)(struct x86_emulate_ctxt *ctxt);
169}; 169};
@@ -202,6 +202,42 @@ struct gprefix {
202#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a 202#define EFLG_RESERVED_ZEROS_MASK 0xffc0802a
203#define EFLG_RESERVED_ONE_MASK 2 203#define EFLG_RESERVED_ONE_MASK 2
204 204
205static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr)
206{
207 if (!(ctxt->regs_valid & (1 << nr))) {
208 ctxt->regs_valid |= 1 << nr;
209 ctxt->_regs[nr] = ctxt->ops->read_gpr(ctxt, nr);
210 }
211 return ctxt->_regs[nr];
212}
213
214static ulong *reg_write(struct x86_emulate_ctxt *ctxt, unsigned nr)
215{
216 ctxt->regs_valid |= 1 << nr;
217 ctxt->regs_dirty |= 1 << nr;
218 return &ctxt->_regs[nr];
219}
220
221static ulong *reg_rmw(struct x86_emulate_ctxt *ctxt, unsigned nr)
222{
223 reg_read(ctxt, nr);
224 return reg_write(ctxt, nr);
225}
226
227static void writeback_registers(struct x86_emulate_ctxt *ctxt)
228{
229 unsigned reg;
230
231 for_each_set_bit(reg, (ulong *)&ctxt->regs_dirty, 16)
232 ctxt->ops->write_gpr(ctxt, reg, ctxt->_regs[reg]);
233}
234
235static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
236{
237 ctxt->regs_dirty = 0;
238 ctxt->regs_valid = 0;
239}
240
205/* 241/*
206 * Instruction emulation: 242 * Instruction emulation:
207 * Most instructions are emulated directly via a fragment of inline assembly 243 * Most instructions are emulated directly via a fragment of inline assembly
@@ -374,8 +410,8 @@ struct gprefix {
374#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \ 410#define __emulate_1op_rax_rdx(ctxt, _op, _suffix, _ex) \
375 do { \ 411 do { \
376 unsigned long _tmp; \ 412 unsigned long _tmp; \
377 ulong *rax = &(ctxt)->regs[VCPU_REGS_RAX]; \ 413 ulong *rax = reg_rmw((ctxt), VCPU_REGS_RAX); \
378 ulong *rdx = &(ctxt)->regs[VCPU_REGS_RDX]; \ 414 ulong *rdx = reg_rmw((ctxt), VCPU_REGS_RDX); \
379 \ 415 \
380 __asm__ __volatile__ ( \ 416 __asm__ __volatile__ ( \
381 _PRE_EFLAGS("0", "5", "1") \ 417 _PRE_EFLAGS("0", "5", "1") \
@@ -494,7 +530,7 @@ register_address_increment(struct x86_emulate_ctxt *ctxt, unsigned long *reg, in
494 530
495static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc) 531static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
496{ 532{
497 masked_increment(&ctxt->regs[VCPU_REGS_RSP], stack_mask(ctxt), inc); 533 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
498} 534}
499 535
500static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel) 536static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
@@ -632,8 +668,6 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
632 668
633 la = seg_base(ctxt, addr.seg) + addr.ea; 669 la = seg_base(ctxt, addr.seg) + addr.ea;
634 switch (ctxt->mode) { 670 switch (ctxt->mode) {
635 case X86EMUL_MODE_REAL:
636 break;
637 case X86EMUL_MODE_PROT64: 671 case X86EMUL_MODE_PROT64:
638 if (((signed long)la << 16) >> 16 != la) 672 if (((signed long)la << 16) >> 16 != la)
639 return emulate_gp(ctxt, 0); 673 return emulate_gp(ctxt, 0);
@@ -655,7 +689,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
655 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 689 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
656 goto bad; 690 goto bad;
657 } else { 691 } else {
658 /* exapand-down segment */ 692 /* expand-down segment */
659 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 693 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
660 goto bad; 694 goto bad;
661 lim = desc.d ? 0xffffffff : 0xffff; 695 lim = desc.d ? 0xffffffff : 0xffff;
@@ -663,7 +697,10 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
663 goto bad; 697 goto bad;
664 } 698 }
665 cpl = ctxt->ops->cpl(ctxt); 699 cpl = ctxt->ops->cpl(ctxt);
666 rpl = sel & 3; 700 if (ctxt->mode == X86EMUL_MODE_REAL)
701 rpl = 0;
702 else
703 rpl = sel & 3;
667 cpl = max(cpl, rpl); 704 cpl = max(cpl, rpl);
668 if (!(desc.type & 8)) { 705 if (!(desc.type & 8)) {
669 /* data segment */ 706 /* data segment */
@@ -688,9 +725,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
688 return X86EMUL_CONTINUE; 725 return X86EMUL_CONTINUE;
689bad: 726bad:
690 if (addr.seg == VCPU_SREG_SS) 727 if (addr.seg == VCPU_SREG_SS)
691 return emulate_ss(ctxt, addr.seg); 728 return emulate_ss(ctxt, sel);
692 else 729 else
693 return emulate_gp(ctxt, addr.seg); 730 return emulate_gp(ctxt, sel);
694} 731}
695 732
696static int linearize(struct x86_emulate_ctxt *ctxt, 733static int linearize(struct x86_emulate_ctxt *ctxt,
@@ -786,14 +823,15 @@ static int do_insn_fetch(struct x86_emulate_ctxt *ctxt,
786 * pointer into the block that addresses the relevant register. 823 * pointer into the block that addresses the relevant register.
787 * @highbyte_regs specifies whether to decode AH,CH,DH,BH. 824 * @highbyte_regs specifies whether to decode AH,CH,DH,BH.
788 */ 825 */
789static void *decode_register(u8 modrm_reg, unsigned long *regs, 826static void *decode_register(struct x86_emulate_ctxt *ctxt, u8 modrm_reg,
790 int highbyte_regs) 827 int highbyte_regs)
791{ 828{
792 void *p; 829 void *p;
793 830
794 p = &regs[modrm_reg];
795 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8) 831 if (highbyte_regs && modrm_reg >= 4 && modrm_reg < 8)
796 p = (unsigned char *)&regs[modrm_reg & 3] + 1; 832 p = (unsigned char *)reg_rmw(ctxt, modrm_reg & 3) + 1;
833 else
834 p = reg_rmw(ctxt, modrm_reg);
797 return p; 835 return p;
798} 836}
799 837
@@ -871,23 +909,23 @@ static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
871{ 909{
872 ctxt->ops->get_fpu(ctxt); 910 ctxt->ops->get_fpu(ctxt);
873 switch (reg) { 911 switch (reg) {
874 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break; 912 case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
875 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break; 913 case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
876 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break; 914 case 2: asm("movdqa %%xmm2, %0" : "=m"(*data)); break;
877 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break; 915 case 3: asm("movdqa %%xmm3, %0" : "=m"(*data)); break;
878 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break; 916 case 4: asm("movdqa %%xmm4, %0" : "=m"(*data)); break;
879 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break; 917 case 5: asm("movdqa %%xmm5, %0" : "=m"(*data)); break;
880 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break; 918 case 6: asm("movdqa %%xmm6, %0" : "=m"(*data)); break;
881 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break; 919 case 7: asm("movdqa %%xmm7, %0" : "=m"(*data)); break;
882#ifdef CONFIG_X86_64 920#ifdef CONFIG_X86_64
883 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break; 921 case 8: asm("movdqa %%xmm8, %0" : "=m"(*data)); break;
884 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break; 922 case 9: asm("movdqa %%xmm9, %0" : "=m"(*data)); break;
885 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break; 923 case 10: asm("movdqa %%xmm10, %0" : "=m"(*data)); break;
886 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break; 924 case 11: asm("movdqa %%xmm11, %0" : "=m"(*data)); break;
887 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break; 925 case 12: asm("movdqa %%xmm12, %0" : "=m"(*data)); break;
888 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break; 926 case 13: asm("movdqa %%xmm13, %0" : "=m"(*data)); break;
889 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break; 927 case 14: asm("movdqa %%xmm14, %0" : "=m"(*data)); break;
890 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break; 928 case 15: asm("movdqa %%xmm15, %0" : "=m"(*data)); break;
891#endif 929#endif
892 default: BUG(); 930 default: BUG();
893 } 931 }
@@ -899,23 +937,23 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
899{ 937{
900 ctxt->ops->get_fpu(ctxt); 938 ctxt->ops->get_fpu(ctxt);
901 switch (reg) { 939 switch (reg) {
902 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break; 940 case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
903 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break; 941 case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
904 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break; 942 case 2: asm("movdqa %0, %%xmm2" : : "m"(*data)); break;
905 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break; 943 case 3: asm("movdqa %0, %%xmm3" : : "m"(*data)); break;
906 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break; 944 case 4: asm("movdqa %0, %%xmm4" : : "m"(*data)); break;
907 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break; 945 case 5: asm("movdqa %0, %%xmm5" : : "m"(*data)); break;
908 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break; 946 case 6: asm("movdqa %0, %%xmm6" : : "m"(*data)); break;
909 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break; 947 case 7: asm("movdqa %0, %%xmm7" : : "m"(*data)); break;
910#ifdef CONFIG_X86_64 948#ifdef CONFIG_X86_64
911 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break; 949 case 8: asm("movdqa %0, %%xmm8" : : "m"(*data)); break;
912 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break; 950 case 9: asm("movdqa %0, %%xmm9" : : "m"(*data)); break;
913 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break; 951 case 10: asm("movdqa %0, %%xmm10" : : "m"(*data)); break;
914 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break; 952 case 11: asm("movdqa %0, %%xmm11" : : "m"(*data)); break;
915 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break; 953 case 12: asm("movdqa %0, %%xmm12" : : "m"(*data)); break;
916 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break; 954 case 13: asm("movdqa %0, %%xmm13" : : "m"(*data)); break;
917 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break; 955 case 14: asm("movdqa %0, %%xmm14" : : "m"(*data)); break;
918 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break; 956 case 15: asm("movdqa %0, %%xmm15" : : "m"(*data)); break;
919#endif 957#endif
920 default: BUG(); 958 default: BUG();
921 } 959 }
@@ -982,10 +1020,10 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
982 1020
983 op->type = OP_REG; 1021 op->type = OP_REG;
984 if (ctxt->d & ByteOp) { 1022 if (ctxt->d & ByteOp) {
985 op->addr.reg = decode_register(reg, ctxt->regs, highbyte_regs); 1023 op->addr.reg = decode_register(ctxt, reg, highbyte_regs);
986 op->bytes = 1; 1024 op->bytes = 1;
987 } else { 1025 } else {
988 op->addr.reg = decode_register(reg, ctxt->regs, 0); 1026 op->addr.reg = decode_register(ctxt, reg, 0);
989 op->bytes = ctxt->op_bytes; 1027 op->bytes = ctxt->op_bytes;
990 } 1028 }
991 fetch_register_operand(op); 1029 fetch_register_operand(op);
@@ -1020,8 +1058,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1020 if (ctxt->modrm_mod == 3) { 1058 if (ctxt->modrm_mod == 3) {
1021 op->type = OP_REG; 1059 op->type = OP_REG;
1022 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 1060 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
1023 op->addr.reg = decode_register(ctxt->modrm_rm, 1061 op->addr.reg = decode_register(ctxt, ctxt->modrm_rm, ctxt->d & ByteOp);
1024 ctxt->regs, ctxt->d & ByteOp);
1025 if (ctxt->d & Sse) { 1062 if (ctxt->d & Sse) {
1026 op->type = OP_XMM; 1063 op->type = OP_XMM;
1027 op->bytes = 16; 1064 op->bytes = 16;
@@ -1042,10 +1079,10 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1042 op->type = OP_MEM; 1079 op->type = OP_MEM;
1043 1080
1044 if (ctxt->ad_bytes == 2) { 1081 if (ctxt->ad_bytes == 2) {
1045 unsigned bx = ctxt->regs[VCPU_REGS_RBX]; 1082 unsigned bx = reg_read(ctxt, VCPU_REGS_RBX);
1046 unsigned bp = ctxt->regs[VCPU_REGS_RBP]; 1083 unsigned bp = reg_read(ctxt, VCPU_REGS_RBP);
1047 unsigned si = ctxt->regs[VCPU_REGS_RSI]; 1084 unsigned si = reg_read(ctxt, VCPU_REGS_RSI);
1048 unsigned di = ctxt->regs[VCPU_REGS_RDI]; 1085 unsigned di = reg_read(ctxt, VCPU_REGS_RDI);
1049 1086
1050 /* 16-bit ModR/M decode. */ 1087 /* 16-bit ModR/M decode. */
1051 switch (ctxt->modrm_mod) { 1088 switch (ctxt->modrm_mod) {
@@ -1102,17 +1139,17 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1102 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1139 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1103 modrm_ea += insn_fetch(s32, ctxt); 1140 modrm_ea += insn_fetch(s32, ctxt);
1104 else { 1141 else {
1105 modrm_ea += ctxt->regs[base_reg]; 1142 modrm_ea += reg_read(ctxt, base_reg);
1106 adjust_modrm_seg(ctxt, base_reg); 1143 adjust_modrm_seg(ctxt, base_reg);
1107 } 1144 }
1108 if (index_reg != 4) 1145 if (index_reg != 4)
1109 modrm_ea += ctxt->regs[index_reg] << scale; 1146 modrm_ea += reg_read(ctxt, index_reg) << scale;
1110 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1147 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1111 if (ctxt->mode == X86EMUL_MODE_PROT64) 1148 if (ctxt->mode == X86EMUL_MODE_PROT64)
1112 ctxt->rip_relative = 1; 1149 ctxt->rip_relative = 1;
1113 } else { 1150 } else {
1114 base_reg = ctxt->modrm_rm; 1151 base_reg = ctxt->modrm_rm;
1115 modrm_ea += ctxt->regs[base_reg]; 1152 modrm_ea += reg_read(ctxt, base_reg);
1116 adjust_modrm_seg(ctxt, base_reg); 1153 adjust_modrm_seg(ctxt, base_reg);
1117 } 1154 }
1118 switch (ctxt->modrm_mod) { 1155 switch (ctxt->modrm_mod) {
@@ -1179,24 +1216,21 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
1179 int rc; 1216 int rc;
1180 struct read_cache *mc = &ctxt->mem_read; 1217 struct read_cache *mc = &ctxt->mem_read;
1181 1218
1182 while (size) { 1219 if (mc->pos < mc->end)
1183 int n = min(size, 8u); 1220 goto read_cached;
1184 size -= n;
1185 if (mc->pos < mc->end)
1186 goto read_cached;
1187 1221
1188 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n, 1222 WARN_ON((mc->end + size) >= sizeof(mc->data));
1189 &ctxt->exception);
1190 if (rc != X86EMUL_CONTINUE)
1191 return rc;
1192 mc->end += n;
1193 1223
1194 read_cached: 1224 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, size,
1195 memcpy(dest, mc->data + mc->pos, n); 1225 &ctxt->exception);
1196 mc->pos += n; 1226 if (rc != X86EMUL_CONTINUE)
1197 dest += n; 1227 return rc;
1198 addr += n; 1228
1199 } 1229 mc->end += size;
1230
1231read_cached:
1232 memcpy(dest, mc->data + mc->pos, size);
1233 mc->pos += size;
1200 return X86EMUL_CONTINUE; 1234 return X86EMUL_CONTINUE;
1201} 1235}
1202 1236
@@ -1253,10 +1287,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1253 if (rc->pos == rc->end) { /* refill pio read ahead */ 1287 if (rc->pos == rc->end) { /* refill pio read ahead */
1254 unsigned int in_page, n; 1288 unsigned int in_page, n;
1255 unsigned int count = ctxt->rep_prefix ? 1289 unsigned int count = ctxt->rep_prefix ?
1256 address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) : 1; 1290 address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) : 1;
1257 in_page = (ctxt->eflags & EFLG_DF) ? 1291 in_page = (ctxt->eflags & EFLG_DF) ?
1258 offset_in_page(ctxt->regs[VCPU_REGS_RDI]) : 1292 offset_in_page(reg_read(ctxt, VCPU_REGS_RDI)) :
1259 PAGE_SIZE - offset_in_page(ctxt->regs[VCPU_REGS_RDI]); 1293 PAGE_SIZE - offset_in_page(reg_read(ctxt, VCPU_REGS_RDI));
1260 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, 1294 n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size,
1261 count); 1295 count);
1262 if (n == 0) 1296 if (n == 0)
@@ -1267,8 +1301,15 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1267 rc->end = n * size; 1301 rc->end = n * size;
1268 } 1302 }
1269 1303
1270 memcpy(dest, rc->data + rc->pos, size); 1304 if (ctxt->rep_prefix && !(ctxt->eflags & EFLG_DF)) {
1271 rc->pos += size; 1305 ctxt->dst.data = rc->data + rc->pos;
1306 ctxt->dst.type = OP_MEM_STR;
1307 ctxt->dst.count = (rc->end - rc->pos) / size;
1308 rc->pos = rc->end;
1309 } else {
1310 memcpy(dest, rc->data + rc->pos, size);
1311 rc->pos += size;
1312 }
1272 return 1; 1313 return 1;
1273} 1314}
1274 1315
@@ -1291,7 +1332,7 @@ static int read_interrupt_descriptor(struct x86_emulate_ctxt *ctxt,
1291static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1332static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1292 u16 selector, struct desc_ptr *dt) 1333 u16 selector, struct desc_ptr *dt)
1293{ 1334{
1294 struct x86_emulate_ops *ops = ctxt->ops; 1335 const struct x86_emulate_ops *ops = ctxt->ops;
1295 1336
1296 if (selector & 1 << 2) { 1337 if (selector & 1 << 2) {
1297 struct desc_struct desc; 1338 struct desc_struct desc;
@@ -1355,19 +1396,15 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1355 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1396 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1356 ulong desc_addr; 1397 ulong desc_addr;
1357 int ret; 1398 int ret;
1399 u16 dummy;
1358 1400
1359 memset(&seg_desc, 0, sizeof seg_desc); 1401 memset(&seg_desc, 0, sizeof seg_desc);
1360 1402
1361 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) 1403 if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86)
1362 || ctxt->mode == X86EMUL_MODE_REAL) { 1404 || ctxt->mode == X86EMUL_MODE_REAL) {
1363 /* set real mode segment descriptor */ 1405 /* set real mode segment descriptor */
1406 ctxt->ops->get_segment(ctxt, &dummy, &seg_desc, NULL, seg);
1364 set_desc_base(&seg_desc, selector << 4); 1407 set_desc_base(&seg_desc, selector << 4);
1365 set_desc_limit(&seg_desc, 0xffff);
1366 seg_desc.type = 3;
1367 seg_desc.p = 1;
1368 seg_desc.s = 1;
1369 if (ctxt->mode == X86EMUL_MODE_VM86)
1370 seg_desc.dpl = 3;
1371 goto load; 1408 goto load;
1372 } 1409 }
1373 1410
@@ -1396,7 +1433,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1396 err_code = selector & 0xfffc; 1433 err_code = selector & 0xfffc;
1397 err_vec = GP_VECTOR; 1434 err_vec = GP_VECTOR;
1398 1435
1399 /* can't load system descriptor into segment selecor */ 1436 /* can't load system descriptor into segment selector */
1400 if (seg <= VCPU_SREG_GS && !seg_desc.s) 1437 if (seg <= VCPU_SREG_GS && !seg_desc.s)
1401 goto exception; 1438 goto exception;
1402 1439
@@ -1516,6 +1553,14 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
1516 if (rc != X86EMUL_CONTINUE) 1553 if (rc != X86EMUL_CONTINUE)
1517 return rc; 1554 return rc;
1518 break; 1555 break;
1556 case OP_MEM_STR:
1557 rc = segmented_write(ctxt,
1558 ctxt->dst.addr.mem,
1559 ctxt->dst.data,
1560 ctxt->dst.bytes * ctxt->dst.count);
1561 if (rc != X86EMUL_CONTINUE)
1562 return rc;
1563 break;
1519 case OP_XMM: 1564 case OP_XMM:
1520 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm); 1565 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1521 break; 1566 break;
@@ -1536,7 +1581,7 @@ static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1536 struct segmented_address addr; 1581 struct segmented_address addr;
1537 1582
1538 rsp_increment(ctxt, -bytes); 1583 rsp_increment(ctxt, -bytes);
1539 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); 1584 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1540 addr.seg = VCPU_SREG_SS; 1585 addr.seg = VCPU_SREG_SS;
1541 1586
1542 return segmented_write(ctxt, addr, data, bytes); 1587 return segmented_write(ctxt, addr, data, bytes);
@@ -1555,7 +1600,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1555 int rc; 1600 int rc;
1556 struct segmented_address addr; 1601 struct segmented_address addr;
1557 1602
1558 addr.ea = ctxt->regs[VCPU_REGS_RSP] & stack_mask(ctxt); 1603 addr.ea = reg_read(ctxt, VCPU_REGS_RSP) & stack_mask(ctxt);
1559 addr.seg = VCPU_SREG_SS; 1604 addr.seg = VCPU_SREG_SS;
1560 rc = segmented_read(ctxt, addr, dest, len); 1605 rc = segmented_read(ctxt, addr, dest, len);
1561 if (rc != X86EMUL_CONTINUE) 1606 if (rc != X86EMUL_CONTINUE)
@@ -1623,26 +1668,28 @@ static int em_enter(struct x86_emulate_ctxt *ctxt)
1623 int rc; 1668 int rc;
1624 unsigned frame_size = ctxt->src.val; 1669 unsigned frame_size = ctxt->src.val;
1625 unsigned nesting_level = ctxt->src2.val & 31; 1670 unsigned nesting_level = ctxt->src2.val & 31;
1671 ulong rbp;
1626 1672
1627 if (nesting_level) 1673 if (nesting_level)
1628 return X86EMUL_UNHANDLEABLE; 1674 return X86EMUL_UNHANDLEABLE;
1629 1675
1630 rc = push(ctxt, &ctxt->regs[VCPU_REGS_RBP], stack_size(ctxt)); 1676 rbp = reg_read(ctxt, VCPU_REGS_RBP);
1677 rc = push(ctxt, &rbp, stack_size(ctxt));
1631 if (rc != X86EMUL_CONTINUE) 1678 if (rc != X86EMUL_CONTINUE)
1632 return rc; 1679 return rc;
1633 assign_masked(&ctxt->regs[VCPU_REGS_RBP], ctxt->regs[VCPU_REGS_RSP], 1680 assign_masked(reg_rmw(ctxt, VCPU_REGS_RBP), reg_read(ctxt, VCPU_REGS_RSP),
1634 stack_mask(ctxt)); 1681 stack_mask(ctxt));
1635 assign_masked(&ctxt->regs[VCPU_REGS_RSP], 1682 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP),
1636 ctxt->regs[VCPU_REGS_RSP] - frame_size, 1683 reg_read(ctxt, VCPU_REGS_RSP) - frame_size,
1637 stack_mask(ctxt)); 1684 stack_mask(ctxt));
1638 return X86EMUL_CONTINUE; 1685 return X86EMUL_CONTINUE;
1639} 1686}
1640 1687
1641static int em_leave(struct x86_emulate_ctxt *ctxt) 1688static int em_leave(struct x86_emulate_ctxt *ctxt)
1642{ 1689{
1643 assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP], 1690 assign_masked(reg_rmw(ctxt, VCPU_REGS_RSP), reg_read(ctxt, VCPU_REGS_RBP),
1644 stack_mask(ctxt)); 1691 stack_mask(ctxt));
1645 return emulate_pop(ctxt, &ctxt->regs[VCPU_REGS_RBP], ctxt->op_bytes); 1692 return emulate_pop(ctxt, reg_rmw(ctxt, VCPU_REGS_RBP), ctxt->op_bytes);
1646} 1693}
1647 1694
1648static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1695static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
@@ -1670,13 +1717,13 @@ static int em_pop_sreg(struct x86_emulate_ctxt *ctxt)
1670 1717
1671static int em_pusha(struct x86_emulate_ctxt *ctxt) 1718static int em_pusha(struct x86_emulate_ctxt *ctxt)
1672{ 1719{
1673 unsigned long old_esp = ctxt->regs[VCPU_REGS_RSP]; 1720 unsigned long old_esp = reg_read(ctxt, VCPU_REGS_RSP);
1674 int rc = X86EMUL_CONTINUE; 1721 int rc = X86EMUL_CONTINUE;
1675 int reg = VCPU_REGS_RAX; 1722 int reg = VCPU_REGS_RAX;
1676 1723
1677 while (reg <= VCPU_REGS_RDI) { 1724 while (reg <= VCPU_REGS_RDI) {
1678 (reg == VCPU_REGS_RSP) ? 1725 (reg == VCPU_REGS_RSP) ?
1679 (ctxt->src.val = old_esp) : (ctxt->src.val = ctxt->regs[reg]); 1726 (ctxt->src.val = old_esp) : (ctxt->src.val = reg_read(ctxt, reg));
1680 1727
1681 rc = em_push(ctxt); 1728 rc = em_push(ctxt);
1682 if (rc != X86EMUL_CONTINUE) 1729 if (rc != X86EMUL_CONTINUE)
@@ -1705,7 +1752,7 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
1705 --reg; 1752 --reg;
1706 } 1753 }
1707 1754
1708 rc = emulate_pop(ctxt, &ctxt->regs[reg], ctxt->op_bytes); 1755 rc = emulate_pop(ctxt, reg_rmw(ctxt, reg), ctxt->op_bytes);
1709 if (rc != X86EMUL_CONTINUE) 1756 if (rc != X86EMUL_CONTINUE)
1710 break; 1757 break;
1711 --reg; 1758 --reg;
@@ -1713,9 +1760,9 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
1713 return rc; 1760 return rc;
1714} 1761}
1715 1762
1716int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq) 1763static int __emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1717{ 1764{
1718 struct x86_emulate_ops *ops = ctxt->ops; 1765 const struct x86_emulate_ops *ops = ctxt->ops;
1719 int rc; 1766 int rc;
1720 struct desc_ptr dt; 1767 struct desc_ptr dt;
1721 gva_t cs_addr; 1768 gva_t cs_addr;
@@ -1762,11 +1809,22 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1762 return rc; 1809 return rc;
1763} 1810}
1764 1811
1812int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1813{
1814 int rc;
1815
1816 invalidate_registers(ctxt);
1817 rc = __emulate_int_real(ctxt, irq);
1818 if (rc == X86EMUL_CONTINUE)
1819 writeback_registers(ctxt);
1820 return rc;
1821}
1822
1765static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq) 1823static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1766{ 1824{
1767 switch(ctxt->mode) { 1825 switch(ctxt->mode) {
1768 case X86EMUL_MODE_REAL: 1826 case X86EMUL_MODE_REAL:
1769 return emulate_int_real(ctxt, irq); 1827 return __emulate_int_real(ctxt, irq);
1770 case X86EMUL_MODE_VM86: 1828 case X86EMUL_MODE_VM86:
1771 case X86EMUL_MODE_PROT16: 1829 case X86EMUL_MODE_PROT16:
1772 case X86EMUL_MODE_PROT32: 1830 case X86EMUL_MODE_PROT32:
@@ -1973,14 +2031,14 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
1973{ 2031{
1974 u64 old = ctxt->dst.orig_val64; 2032 u64 old = ctxt->dst.orig_val64;
1975 2033
1976 if (((u32) (old >> 0) != (u32) ctxt->regs[VCPU_REGS_RAX]) || 2034 if (((u32) (old >> 0) != (u32) reg_read(ctxt, VCPU_REGS_RAX)) ||
1977 ((u32) (old >> 32) != (u32) ctxt->regs[VCPU_REGS_RDX])) { 2035 ((u32) (old >> 32) != (u32) reg_read(ctxt, VCPU_REGS_RDX))) {
1978 ctxt->regs[VCPU_REGS_RAX] = (u32) (old >> 0); 2036 *reg_write(ctxt, VCPU_REGS_RAX) = (u32) (old >> 0);
1979 ctxt->regs[VCPU_REGS_RDX] = (u32) (old >> 32); 2037 *reg_write(ctxt, VCPU_REGS_RDX) = (u32) (old >> 32);
1980 ctxt->eflags &= ~EFLG_ZF; 2038 ctxt->eflags &= ~EFLG_ZF;
1981 } else { 2039 } else {
1982 ctxt->dst.val64 = ((u64)ctxt->regs[VCPU_REGS_RCX] << 32) | 2040 ctxt->dst.val64 = ((u64)reg_read(ctxt, VCPU_REGS_RCX) << 32) |
1983 (u32) ctxt->regs[VCPU_REGS_RBX]; 2041 (u32) reg_read(ctxt, VCPU_REGS_RBX);
1984 2042
1985 ctxt->eflags |= EFLG_ZF; 2043 ctxt->eflags |= EFLG_ZF;
1986 } 2044 }
@@ -2016,7 +2074,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2016{ 2074{
2017 /* Save real source value, then compare EAX against destination. */ 2075 /* Save real source value, then compare EAX against destination. */
2018 ctxt->src.orig_val = ctxt->src.val; 2076 ctxt->src.orig_val = ctxt->src.val;
2019 ctxt->src.val = ctxt->regs[VCPU_REGS_RAX]; 2077 ctxt->src.val = reg_read(ctxt, VCPU_REGS_RAX);
2020 emulate_2op_SrcV(ctxt, "cmp"); 2078 emulate_2op_SrcV(ctxt, "cmp");
2021 2079
2022 if (ctxt->eflags & EFLG_ZF) { 2080 if (ctxt->eflags & EFLG_ZF) {
@@ -2025,7 +2083,7 @@ static int em_cmpxchg(struct x86_emulate_ctxt *ctxt)
2025 } else { 2083 } else {
2026 /* Failure: write the value we saw to EAX. */ 2084 /* Failure: write the value we saw to EAX. */
2027 ctxt->dst.type = OP_REG; 2085 ctxt->dst.type = OP_REG;
2028 ctxt->dst.addr.reg = (unsigned long *)&ctxt->regs[VCPU_REGS_RAX]; 2086 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
2029 } 2087 }
2030 return X86EMUL_CONTINUE; 2088 return X86EMUL_CONTINUE;
2031} 2089}
@@ -2050,12 +2108,6 @@ static void
2050setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 2108setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2051 struct desc_struct *cs, struct desc_struct *ss) 2109 struct desc_struct *cs, struct desc_struct *ss)
2052{ 2110{
2053 u16 selector;
2054
2055 memset(cs, 0, sizeof(struct desc_struct));
2056 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
2057 memset(ss, 0, sizeof(struct desc_struct));
2058
2059 cs->l = 0; /* will be adjusted later */ 2111 cs->l = 0; /* will be adjusted later */
2060 set_desc_base(cs, 0); /* flat segment */ 2112 set_desc_base(cs, 0); /* flat segment */
2061 cs->g = 1; /* 4kb granularity */ 2113 cs->g = 1; /* 4kb granularity */
@@ -2065,6 +2117,7 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2065 cs->dpl = 0; /* will be adjusted later */ 2117 cs->dpl = 0; /* will be adjusted later */
2066 cs->p = 1; 2118 cs->p = 1;
2067 cs->d = 1; 2119 cs->d = 1;
2120 cs->avl = 0;
2068 2121
2069 set_desc_base(ss, 0); /* flat segment */ 2122 set_desc_base(ss, 0); /* flat segment */
2070 set_desc_limit(ss, 0xfffff); /* 4GB limit */ 2123 set_desc_limit(ss, 0xfffff); /* 4GB limit */
@@ -2074,6 +2127,8 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
2074 ss->d = 1; /* 32bit stack segment */ 2127 ss->d = 1; /* 32bit stack segment */
2075 ss->dpl = 0; 2128 ss->dpl = 0;
2076 ss->p = 1; 2129 ss->p = 1;
2130 ss->l = 0;
2131 ss->avl = 0;
2077} 2132}
2078 2133
2079static bool vendor_intel(struct x86_emulate_ctxt *ctxt) 2134static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
@@ -2089,7 +2144,7 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
2089 2144
2090static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt) 2145static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2091{ 2146{
2092 struct x86_emulate_ops *ops = ctxt->ops; 2147 const struct x86_emulate_ops *ops = ctxt->ops;
2093 u32 eax, ebx, ecx, edx; 2148 u32 eax, ebx, ecx, edx;
2094 2149
2095 /* 2150 /*
@@ -2133,7 +2188,7 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2133 2188
2134static int em_syscall(struct x86_emulate_ctxt *ctxt) 2189static int em_syscall(struct x86_emulate_ctxt *ctxt)
2135{ 2190{
2136 struct x86_emulate_ops *ops = ctxt->ops; 2191 const struct x86_emulate_ops *ops = ctxt->ops;
2137 struct desc_struct cs, ss; 2192 struct desc_struct cs, ss;
2138 u64 msr_data; 2193 u64 msr_data;
2139 u16 cs_sel, ss_sel; 2194 u16 cs_sel, ss_sel;
@@ -2165,10 +2220,10 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
2165 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2220 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2166 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2221 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2167 2222
2168 ctxt->regs[VCPU_REGS_RCX] = ctxt->_eip; 2223 *reg_write(ctxt, VCPU_REGS_RCX) = ctxt->_eip;
2169 if (efer & EFER_LMA) { 2224 if (efer & EFER_LMA) {
2170#ifdef CONFIG_X86_64 2225#ifdef CONFIG_X86_64
2171 ctxt->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; 2226 *reg_write(ctxt, VCPU_REGS_R11) = ctxt->eflags & ~EFLG_RF;
2172 2227
2173 ops->get_msr(ctxt, 2228 ops->get_msr(ctxt,
2174 ctxt->mode == X86EMUL_MODE_PROT64 ? 2229 ctxt->mode == X86EMUL_MODE_PROT64 ?
@@ -2191,7 +2246,7 @@ static int em_syscall(struct x86_emulate_ctxt *ctxt)
2191 2246
2192static int em_sysenter(struct x86_emulate_ctxt *ctxt) 2247static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2193{ 2248{
2194 struct x86_emulate_ops *ops = ctxt->ops; 2249 const struct x86_emulate_ops *ops = ctxt->ops;
2195 struct desc_struct cs, ss; 2250 struct desc_struct cs, ss;
2196 u64 msr_data; 2251 u64 msr_data;
2197 u16 cs_sel, ss_sel; 2252 u16 cs_sel, ss_sel;
@@ -2228,6 +2283,8 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2228 if (msr_data == 0x0) 2283 if (msr_data == 0x0)
2229 return emulate_gp(ctxt, 0); 2284 return emulate_gp(ctxt, 0);
2230 break; 2285 break;
2286 default:
2287 break;
2231 } 2288 }
2232 2289
2233 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); 2290 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
@@ -2247,14 +2304,14 @@ static int em_sysenter(struct x86_emulate_ctxt *ctxt)
2247 ctxt->_eip = msr_data; 2304 ctxt->_eip = msr_data;
2248 2305
2249 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); 2306 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
2250 ctxt->regs[VCPU_REGS_RSP] = msr_data; 2307 *reg_write(ctxt, VCPU_REGS_RSP) = msr_data;
2251 2308
2252 return X86EMUL_CONTINUE; 2309 return X86EMUL_CONTINUE;
2253} 2310}
2254 2311
2255static int em_sysexit(struct x86_emulate_ctxt *ctxt) 2312static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2256{ 2313{
2257 struct x86_emulate_ops *ops = ctxt->ops; 2314 const struct x86_emulate_ops *ops = ctxt->ops;
2258 struct desc_struct cs, ss; 2315 struct desc_struct cs, ss;
2259 u64 msr_data; 2316 u64 msr_data;
2260 int usermode; 2317 int usermode;
@@ -2297,8 +2354,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2297 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2354 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2298 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2355 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2299 2356
2300 ctxt->_eip = ctxt->regs[VCPU_REGS_RDX]; 2357 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2301 ctxt->regs[VCPU_REGS_RSP] = ctxt->regs[VCPU_REGS_RCX]; 2358 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
2302 2359
2303 return X86EMUL_CONTINUE; 2360 return X86EMUL_CONTINUE;
2304} 2361}
@@ -2317,7 +2374,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2317static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2374static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2318 u16 port, u16 len) 2375 u16 port, u16 len)
2319{ 2376{
2320 struct x86_emulate_ops *ops = ctxt->ops; 2377 const struct x86_emulate_ops *ops = ctxt->ops;
2321 struct desc_struct tr_seg; 2378 struct desc_struct tr_seg;
2322 u32 base3; 2379 u32 base3;
2323 int r; 2380 int r;
@@ -2367,14 +2424,14 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2367{ 2424{
2368 tss->ip = ctxt->_eip; 2425 tss->ip = ctxt->_eip;
2369 tss->flag = ctxt->eflags; 2426 tss->flag = ctxt->eflags;
2370 tss->ax = ctxt->regs[VCPU_REGS_RAX]; 2427 tss->ax = reg_read(ctxt, VCPU_REGS_RAX);
2371 tss->cx = ctxt->regs[VCPU_REGS_RCX]; 2428 tss->cx = reg_read(ctxt, VCPU_REGS_RCX);
2372 tss->dx = ctxt->regs[VCPU_REGS_RDX]; 2429 tss->dx = reg_read(ctxt, VCPU_REGS_RDX);
2373 tss->bx = ctxt->regs[VCPU_REGS_RBX]; 2430 tss->bx = reg_read(ctxt, VCPU_REGS_RBX);
2374 tss->sp = ctxt->regs[VCPU_REGS_RSP]; 2431 tss->sp = reg_read(ctxt, VCPU_REGS_RSP);
2375 tss->bp = ctxt->regs[VCPU_REGS_RBP]; 2432 tss->bp = reg_read(ctxt, VCPU_REGS_RBP);
2376 tss->si = ctxt->regs[VCPU_REGS_RSI]; 2433 tss->si = reg_read(ctxt, VCPU_REGS_RSI);
2377 tss->di = ctxt->regs[VCPU_REGS_RDI]; 2434 tss->di = reg_read(ctxt, VCPU_REGS_RDI);
2378 2435
2379 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2436 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2380 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2437 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
@@ -2390,14 +2447,14 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2390 2447
2391 ctxt->_eip = tss->ip; 2448 ctxt->_eip = tss->ip;
2392 ctxt->eflags = tss->flag | 2; 2449 ctxt->eflags = tss->flag | 2;
2393 ctxt->regs[VCPU_REGS_RAX] = tss->ax; 2450 *reg_write(ctxt, VCPU_REGS_RAX) = tss->ax;
2394 ctxt->regs[VCPU_REGS_RCX] = tss->cx; 2451 *reg_write(ctxt, VCPU_REGS_RCX) = tss->cx;
2395 ctxt->regs[VCPU_REGS_RDX] = tss->dx; 2452 *reg_write(ctxt, VCPU_REGS_RDX) = tss->dx;
2396 ctxt->regs[VCPU_REGS_RBX] = tss->bx; 2453 *reg_write(ctxt, VCPU_REGS_RBX) = tss->bx;
2397 ctxt->regs[VCPU_REGS_RSP] = tss->sp; 2454 *reg_write(ctxt, VCPU_REGS_RSP) = tss->sp;
2398 ctxt->regs[VCPU_REGS_RBP] = tss->bp; 2455 *reg_write(ctxt, VCPU_REGS_RBP) = tss->bp;
2399 ctxt->regs[VCPU_REGS_RSI] = tss->si; 2456 *reg_write(ctxt, VCPU_REGS_RSI) = tss->si;
2400 ctxt->regs[VCPU_REGS_RDI] = tss->di; 2457 *reg_write(ctxt, VCPU_REGS_RDI) = tss->di;
2401 2458
2402 /* 2459 /*
2403 * SDM says that segment selectors are loaded before segment 2460 * SDM says that segment selectors are loaded before segment
@@ -2410,7 +2467,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2410 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2467 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2411 2468
2412 /* 2469 /*
2413 * Now load segment descriptors. If fault happenes at this stage 2470 * Now load segment descriptors. If fault happens at this stage
2414 * it is handled in a context of new task 2471 * it is handled in a context of new task
2415 */ 2472 */
2416 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR); 2473 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
@@ -2436,7 +2493,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2436 u16 tss_selector, u16 old_tss_sel, 2493 u16 tss_selector, u16 old_tss_sel,
2437 ulong old_tss_base, struct desc_struct *new_desc) 2494 ulong old_tss_base, struct desc_struct *new_desc)
2438{ 2495{
2439 struct x86_emulate_ops *ops = ctxt->ops; 2496 const struct x86_emulate_ops *ops = ctxt->ops;
2440 struct tss_segment_16 tss_seg; 2497 struct tss_segment_16 tss_seg;
2441 int ret; 2498 int ret;
2442 u32 new_tss_base = get_desc_base(new_desc); 2499 u32 new_tss_base = get_desc_base(new_desc);
@@ -2482,14 +2539,14 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2482 tss->cr3 = ctxt->ops->get_cr(ctxt, 3); 2539 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2483 tss->eip = ctxt->_eip; 2540 tss->eip = ctxt->_eip;
2484 tss->eflags = ctxt->eflags; 2541 tss->eflags = ctxt->eflags;
2485 tss->eax = ctxt->regs[VCPU_REGS_RAX]; 2542 tss->eax = reg_read(ctxt, VCPU_REGS_RAX);
2486 tss->ecx = ctxt->regs[VCPU_REGS_RCX]; 2543 tss->ecx = reg_read(ctxt, VCPU_REGS_RCX);
2487 tss->edx = ctxt->regs[VCPU_REGS_RDX]; 2544 tss->edx = reg_read(ctxt, VCPU_REGS_RDX);
2488 tss->ebx = ctxt->regs[VCPU_REGS_RBX]; 2545 tss->ebx = reg_read(ctxt, VCPU_REGS_RBX);
2489 tss->esp = ctxt->regs[VCPU_REGS_RSP]; 2546 tss->esp = reg_read(ctxt, VCPU_REGS_RSP);
2490 tss->ebp = ctxt->regs[VCPU_REGS_RBP]; 2547 tss->ebp = reg_read(ctxt, VCPU_REGS_RBP);
2491 tss->esi = ctxt->regs[VCPU_REGS_RSI]; 2548 tss->esi = reg_read(ctxt, VCPU_REGS_RSI);
2492 tss->edi = ctxt->regs[VCPU_REGS_RDI]; 2549 tss->edi = reg_read(ctxt, VCPU_REGS_RDI);
2493 2550
2494 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); 2551 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2495 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); 2552 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
@@ -2511,14 +2568,14 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2511 ctxt->eflags = tss->eflags | 2; 2568 ctxt->eflags = tss->eflags | 2;
2512 2569
2513 /* General purpose registers */ 2570 /* General purpose registers */
2514 ctxt->regs[VCPU_REGS_RAX] = tss->eax; 2571 *reg_write(ctxt, VCPU_REGS_RAX) = tss->eax;
2515 ctxt->regs[VCPU_REGS_RCX] = tss->ecx; 2572 *reg_write(ctxt, VCPU_REGS_RCX) = tss->ecx;
2516 ctxt->regs[VCPU_REGS_RDX] = tss->edx; 2573 *reg_write(ctxt, VCPU_REGS_RDX) = tss->edx;
2517 ctxt->regs[VCPU_REGS_RBX] = tss->ebx; 2574 *reg_write(ctxt, VCPU_REGS_RBX) = tss->ebx;
2518 ctxt->regs[VCPU_REGS_RSP] = tss->esp; 2575 *reg_write(ctxt, VCPU_REGS_RSP) = tss->esp;
2519 ctxt->regs[VCPU_REGS_RBP] = tss->ebp; 2576 *reg_write(ctxt, VCPU_REGS_RBP) = tss->ebp;
2520 ctxt->regs[VCPU_REGS_RSI] = tss->esi; 2577 *reg_write(ctxt, VCPU_REGS_RSI) = tss->esi;
2521 ctxt->regs[VCPU_REGS_RDI] = tss->edi; 2578 *reg_write(ctxt, VCPU_REGS_RDI) = tss->edi;
2522 2579
2523 /* 2580 /*
2524 * SDM says that segment selectors are loaded before segment 2581 * SDM says that segment selectors are loaded before segment
@@ -2583,7 +2640,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2583 u16 tss_selector, u16 old_tss_sel, 2640 u16 tss_selector, u16 old_tss_sel,
2584 ulong old_tss_base, struct desc_struct *new_desc) 2641 ulong old_tss_base, struct desc_struct *new_desc)
2585{ 2642{
2586 struct x86_emulate_ops *ops = ctxt->ops; 2643 const struct x86_emulate_ops *ops = ctxt->ops;
2587 struct tss_segment_32 tss_seg; 2644 struct tss_segment_32 tss_seg;
2588 int ret; 2645 int ret;
2589 u32 new_tss_base = get_desc_base(new_desc); 2646 u32 new_tss_base = get_desc_base(new_desc);
@@ -2627,7 +2684,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2627 u16 tss_selector, int idt_index, int reason, 2684 u16 tss_selector, int idt_index, int reason,
2628 bool has_error_code, u32 error_code) 2685 bool has_error_code, u32 error_code)
2629{ 2686{
2630 struct x86_emulate_ops *ops = ctxt->ops; 2687 const struct x86_emulate_ops *ops = ctxt->ops;
2631 struct desc_struct curr_tss_desc, next_tss_desc; 2688 struct desc_struct curr_tss_desc, next_tss_desc;
2632 int ret; 2689 int ret;
2633 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 2690 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
@@ -2652,7 +2709,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2652 * 2709 *
2653 * 1. jmp/call/int to task gate: Check against DPL of the task gate 2710 * 1. jmp/call/int to task gate: Check against DPL of the task gate
2654 * 2. Exception/IRQ/iret: No check is performed 2711 * 2. Exception/IRQ/iret: No check is performed
2655 * 3. jmp/call to TSS: Check agains DPL of the TSS 2712 * 3. jmp/call to TSS: Check against DPL of the TSS
2656 */ 2713 */
2657 if (reason == TASK_SWITCH_GATE) { 2714 if (reason == TASK_SWITCH_GATE) {
2658 if (idt_index != -1) { 2715 if (idt_index != -1) {
@@ -2693,7 +2750,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2693 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; 2750 ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT;
2694 2751
2695 /* set back link to prev task only if NT bit is set in eflags 2752 /* set back link to prev task only if NT bit is set in eflags
2696 note that old_tss_sel is not used afetr this point */ 2753 note that old_tss_sel is not used after this point */
2697 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) 2754 if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE)
2698 old_tss_sel = 0xffff; 2755 old_tss_sel = 0xffff;
2699 2756
@@ -2733,26 +2790,28 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2733{ 2790{
2734 int rc; 2791 int rc;
2735 2792
2793 invalidate_registers(ctxt);
2736 ctxt->_eip = ctxt->eip; 2794 ctxt->_eip = ctxt->eip;
2737 ctxt->dst.type = OP_NONE; 2795 ctxt->dst.type = OP_NONE;
2738 2796
2739 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason, 2797 rc = emulator_do_task_switch(ctxt, tss_selector, idt_index, reason,
2740 has_error_code, error_code); 2798 has_error_code, error_code);
2741 2799
2742 if (rc == X86EMUL_CONTINUE) 2800 if (rc == X86EMUL_CONTINUE) {
2743 ctxt->eip = ctxt->_eip; 2801 ctxt->eip = ctxt->_eip;
2802 writeback_registers(ctxt);
2803 }
2744 2804
2745 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 2805 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
2746} 2806}
2747 2807
2748static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, 2808static void string_addr_inc(struct x86_emulate_ctxt *ctxt, int reg,
2749 int reg, struct operand *op) 2809 struct operand *op)
2750{ 2810{
2751 int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; 2811 int df = (ctxt->eflags & EFLG_DF) ? -op->count : op->count;
2752 2812
2753 register_address_increment(ctxt, &ctxt->regs[reg], df * op->bytes); 2813 register_address_increment(ctxt, reg_rmw(ctxt, reg), df * op->bytes);
2754 op->addr.mem.ea = register_address(ctxt, ctxt->regs[reg]); 2814 op->addr.mem.ea = register_address(ctxt, reg_read(ctxt, reg));
2755 op->addr.mem.seg = seg;
2756} 2815}
2757 2816
2758static int em_das(struct x86_emulate_ctxt *ctxt) 2817static int em_das(struct x86_emulate_ctxt *ctxt)
@@ -2927,7 +2986,7 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt)
2927{ 2986{
2928 ctxt->dst.type = OP_REG; 2987 ctxt->dst.type = OP_REG;
2929 ctxt->dst.bytes = ctxt->src.bytes; 2988 ctxt->dst.bytes = ctxt->src.bytes;
2930 ctxt->dst.addr.reg = &ctxt->regs[VCPU_REGS_RDX]; 2989 ctxt->dst.addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
2931 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1); 2990 ctxt->dst.val = ~((ctxt->src.val >> (ctxt->src.bytes * 8 - 1)) - 1);
2932 2991
2933 return X86EMUL_CONTINUE; 2992 return X86EMUL_CONTINUE;
@@ -2938,8 +2997,8 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2938 u64 tsc = 0; 2997 u64 tsc = 0;
2939 2998
2940 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); 2999 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2941 ctxt->regs[VCPU_REGS_RAX] = (u32)tsc; 3000 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)tsc;
2942 ctxt->regs[VCPU_REGS_RDX] = tsc >> 32; 3001 *reg_write(ctxt, VCPU_REGS_RDX) = tsc >> 32;
2943 return X86EMUL_CONTINUE; 3002 return X86EMUL_CONTINUE;
2944} 3003}
2945 3004
@@ -2947,10 +3006,10 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2947{ 3006{
2948 u64 pmc; 3007 u64 pmc;
2949 3008
2950 if (ctxt->ops->read_pmc(ctxt, ctxt->regs[VCPU_REGS_RCX], &pmc)) 3009 if (ctxt->ops->read_pmc(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &pmc))
2951 return emulate_gp(ctxt, 0); 3010 return emulate_gp(ctxt, 0);
2952 ctxt->regs[VCPU_REGS_RAX] = (u32)pmc; 3011 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)pmc;
2953 ctxt->regs[VCPU_REGS_RDX] = pmc >> 32; 3012 *reg_write(ctxt, VCPU_REGS_RDX) = pmc >> 32;
2954 return X86EMUL_CONTINUE; 3013 return X86EMUL_CONTINUE;
2955} 3014}
2956 3015
@@ -2992,9 +3051,9 @@ static int em_wrmsr(struct x86_emulate_ctxt *ctxt)
2992{ 3051{
2993 u64 msr_data; 3052 u64 msr_data;
2994 3053
2995 msr_data = (u32)ctxt->regs[VCPU_REGS_RAX] 3054 msr_data = (u32)reg_read(ctxt, VCPU_REGS_RAX)
2996 | ((u64)ctxt->regs[VCPU_REGS_RDX] << 32); 3055 | ((u64)reg_read(ctxt, VCPU_REGS_RDX) << 32);
2997 if (ctxt->ops->set_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], msr_data)) 3056 if (ctxt->ops->set_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), msr_data))
2998 return emulate_gp(ctxt, 0); 3057 return emulate_gp(ctxt, 0);
2999 3058
3000 return X86EMUL_CONTINUE; 3059 return X86EMUL_CONTINUE;
@@ -3004,11 +3063,11 @@ static int em_rdmsr(struct x86_emulate_ctxt *ctxt)
3004{ 3063{
3005 u64 msr_data; 3064 u64 msr_data;
3006 3065
3007 if (ctxt->ops->get_msr(ctxt, ctxt->regs[VCPU_REGS_RCX], &msr_data)) 3066 if (ctxt->ops->get_msr(ctxt, reg_read(ctxt, VCPU_REGS_RCX), &msr_data))
3008 return emulate_gp(ctxt, 0); 3067 return emulate_gp(ctxt, 0);
3009 3068
3010 ctxt->regs[VCPU_REGS_RAX] = (u32)msr_data; 3069 *reg_write(ctxt, VCPU_REGS_RAX) = (u32)msr_data;
3011 ctxt->regs[VCPU_REGS_RDX] = msr_data >> 32; 3070 *reg_write(ctxt, VCPU_REGS_RDX) = msr_data >> 32;
3012 return X86EMUL_CONTINUE; 3071 return X86EMUL_CONTINUE;
3013} 3072}
3014 3073
@@ -3188,8 +3247,8 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3188 3247
3189static int em_loop(struct x86_emulate_ctxt *ctxt) 3248static int em_loop(struct x86_emulate_ctxt *ctxt)
3190{ 3249{
3191 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1); 3250 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3192 if ((address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) != 0) && 3251 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3193 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3252 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3194 jmp_rel(ctxt, ctxt->src.val); 3253 jmp_rel(ctxt, ctxt->src.val);
3195 3254
@@ -3198,7 +3257,7 @@ static int em_loop(struct x86_emulate_ctxt *ctxt)
3198 3257
3199static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3258static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3200{ 3259{
3201 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) 3260 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3202 jmp_rel(ctxt, ctxt->src.val); 3261 jmp_rel(ctxt, ctxt->src.val);
3203 3262
3204 return X86EMUL_CONTINUE; 3263 return X86EMUL_CONTINUE;
@@ -3286,20 +3345,20 @@ static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3286{ 3345{
3287 u32 eax, ebx, ecx, edx; 3346 u32 eax, ebx, ecx, edx;
3288 3347
3289 eax = ctxt->regs[VCPU_REGS_RAX]; 3348 eax = reg_read(ctxt, VCPU_REGS_RAX);
3290 ecx = ctxt->regs[VCPU_REGS_RCX]; 3349 ecx = reg_read(ctxt, VCPU_REGS_RCX);
3291 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx); 3350 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3292 ctxt->regs[VCPU_REGS_RAX] = eax; 3351 *reg_write(ctxt, VCPU_REGS_RAX) = eax;
3293 ctxt->regs[VCPU_REGS_RBX] = ebx; 3352 *reg_write(ctxt, VCPU_REGS_RBX) = ebx;
3294 ctxt->regs[VCPU_REGS_RCX] = ecx; 3353 *reg_write(ctxt, VCPU_REGS_RCX) = ecx;
3295 ctxt->regs[VCPU_REGS_RDX] = edx; 3354 *reg_write(ctxt, VCPU_REGS_RDX) = edx;
3296 return X86EMUL_CONTINUE; 3355 return X86EMUL_CONTINUE;
3297} 3356}
3298 3357
3299static int em_lahf(struct x86_emulate_ctxt *ctxt) 3358static int em_lahf(struct x86_emulate_ctxt *ctxt)
3300{ 3359{
3301 ctxt->regs[VCPU_REGS_RAX] &= ~0xff00UL; 3360 *reg_rmw(ctxt, VCPU_REGS_RAX) &= ~0xff00UL;
3302 ctxt->regs[VCPU_REGS_RAX] |= (ctxt->eflags & 0xff) << 8; 3361 *reg_rmw(ctxt, VCPU_REGS_RAX) |= (ctxt->eflags & 0xff) << 8;
3303 return X86EMUL_CONTINUE; 3362 return X86EMUL_CONTINUE;
3304} 3363}
3305 3364
@@ -3456,7 +3515,7 @@ static int check_svme(struct x86_emulate_ctxt *ctxt)
3456 3515
3457static int check_svme_pa(struct x86_emulate_ctxt *ctxt) 3516static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
3458{ 3517{
3459 u64 rax = ctxt->regs[VCPU_REGS_RAX]; 3518 u64 rax = reg_read(ctxt, VCPU_REGS_RAX);
3460 3519
3461 /* Valid physical address? */ 3520 /* Valid physical address? */
3462 if (rax & 0xffff000000000000ULL) 3521 if (rax & 0xffff000000000000ULL)
@@ -3478,7 +3537,7 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
3478static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 3537static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
3479{ 3538{
3480 u64 cr4 = ctxt->ops->get_cr(ctxt, 4); 3539 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
3481 u64 rcx = ctxt->regs[VCPU_REGS_RCX]; 3540 u64 rcx = reg_read(ctxt, VCPU_REGS_RCX);
3482 3541
3483 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || 3542 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
3484 (rcx > 3)) 3543 (rcx > 3))
@@ -3531,13 +3590,13 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3531 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ 3590 I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \
3532 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 3591 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3533 3592
3534static struct opcode group7_rm1[] = { 3593static const struct opcode group7_rm1[] = {
3535 DI(SrcNone | Priv, monitor), 3594 DI(SrcNone | Priv, monitor),
3536 DI(SrcNone | Priv, mwait), 3595 DI(SrcNone | Priv, mwait),
3537 N, N, N, N, N, N, 3596 N, N, N, N, N, N,
3538}; 3597};
3539 3598
3540static struct opcode group7_rm3[] = { 3599static const struct opcode group7_rm3[] = {
3541 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa), 3600 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3542 II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall), 3601 II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall),
3543 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa), 3602 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
@@ -3548,13 +3607,13 @@ static struct opcode group7_rm3[] = {
3548 DIP(SrcNone | Prot | Priv, invlpga, check_svme), 3607 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3549}; 3608};
3550 3609
3551static struct opcode group7_rm7[] = { 3610static const struct opcode group7_rm7[] = {
3552 N, 3611 N,
3553 DIP(SrcNone, rdtscp, check_rdtsc), 3612 DIP(SrcNone, rdtscp, check_rdtsc),
3554 N, N, N, N, N, N, 3613 N, N, N, N, N, N,
3555}; 3614};
3556 3615
3557static struct opcode group1[] = { 3616static const struct opcode group1[] = {
3558 I(Lock, em_add), 3617 I(Lock, em_add),
3559 I(Lock | PageTable, em_or), 3618 I(Lock | PageTable, em_or),
3560 I(Lock, em_adc), 3619 I(Lock, em_adc),
@@ -3565,11 +3624,11 @@ static struct opcode group1[] = {
3565 I(0, em_cmp), 3624 I(0, em_cmp),
3566}; 3625};
3567 3626
3568static struct opcode group1A[] = { 3627static const struct opcode group1A[] = {
3569 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N, 3628 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3570}; 3629};
3571 3630
3572static struct opcode group3[] = { 3631static const struct opcode group3[] = {
3573 I(DstMem | SrcImm, em_test), 3632 I(DstMem | SrcImm, em_test),
3574 I(DstMem | SrcImm, em_test), 3633 I(DstMem | SrcImm, em_test),
3575 I(DstMem | SrcNone | Lock, em_not), 3634 I(DstMem | SrcNone | Lock, em_not),
@@ -3580,13 +3639,13 @@ static struct opcode group3[] = {
3580 I(SrcMem, em_idiv_ex), 3639 I(SrcMem, em_idiv_ex),
3581}; 3640};
3582 3641
3583static struct opcode group4[] = { 3642static const struct opcode group4[] = {
3584 I(ByteOp | DstMem | SrcNone | Lock, em_grp45), 3643 I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3585 I(ByteOp | DstMem | SrcNone | Lock, em_grp45), 3644 I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3586 N, N, N, N, N, N, 3645 N, N, N, N, N, N,
3587}; 3646};
3588 3647
3589static struct opcode group5[] = { 3648static const struct opcode group5[] = {
3590 I(DstMem | SrcNone | Lock, em_grp45), 3649 I(DstMem | SrcNone | Lock, em_grp45),
3591 I(DstMem | SrcNone | Lock, em_grp45), 3650 I(DstMem | SrcNone | Lock, em_grp45),
3592 I(SrcMem | Stack, em_grp45), 3651 I(SrcMem | Stack, em_grp45),
@@ -3596,7 +3655,7 @@ static struct opcode group5[] = {
3596 I(SrcMem | Stack, em_grp45), N, 3655 I(SrcMem | Stack, em_grp45), N,
3597}; 3656};
3598 3657
3599static struct opcode group6[] = { 3658static const struct opcode group6[] = {
3600 DI(Prot, sldt), 3659 DI(Prot, sldt),
3601 DI(Prot, str), 3660 DI(Prot, str),
3602 II(Prot | Priv | SrcMem16, em_lldt, lldt), 3661 II(Prot | Priv | SrcMem16, em_lldt, lldt),
@@ -3604,7 +3663,7 @@ static struct opcode group6[] = {
3604 N, N, N, N, 3663 N, N, N, N,
3605}; 3664};
3606 3665
3607static struct group_dual group7 = { { 3666static const struct group_dual group7 = { {
3608 II(Mov | DstMem | Priv, em_sgdt, sgdt), 3667 II(Mov | DstMem | Priv, em_sgdt, sgdt),
3609 II(Mov | DstMem | Priv, em_sidt, sidt), 3668 II(Mov | DstMem | Priv, em_sidt, sidt),
3610 II(SrcMem | Priv, em_lgdt, lgdt), 3669 II(SrcMem | Priv, em_lgdt, lgdt),
@@ -3621,7 +3680,7 @@ static struct group_dual group7 = { {
3621 EXT(0, group7_rm7), 3680 EXT(0, group7_rm7),
3622} }; 3681} };
3623 3682
3624static struct opcode group8[] = { 3683static const struct opcode group8[] = {
3625 N, N, N, N, 3684 N, N, N, N,
3626 I(DstMem | SrcImmByte, em_bt), 3685 I(DstMem | SrcImmByte, em_bt),
3627 I(DstMem | SrcImmByte | Lock | PageTable, em_bts), 3686 I(DstMem | SrcImmByte | Lock | PageTable, em_bts),
@@ -3629,26 +3688,26 @@ static struct opcode group8[] = {
3629 I(DstMem | SrcImmByte | Lock | PageTable, em_btc), 3688 I(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3630}; 3689};
3631 3690
3632static struct group_dual group9 = { { 3691static const struct group_dual group9 = { {
3633 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 3692 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3634}, { 3693}, {
3635 N, N, N, N, N, N, N, N, 3694 N, N, N, N, N, N, N, N,
3636} }; 3695} };
3637 3696
3638static struct opcode group11[] = { 3697static const struct opcode group11[] = {
3639 I(DstMem | SrcImm | Mov | PageTable, em_mov), 3698 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3640 X7(D(Undefined)), 3699 X7(D(Undefined)),
3641}; 3700};
3642 3701
3643static struct gprefix pfx_0f_6f_0f_7f = { 3702static const struct gprefix pfx_0f_6f_0f_7f = {
3644 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 3703 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3645}; 3704};
3646 3705
3647static struct gprefix pfx_vmovntpx = { 3706static const struct gprefix pfx_vmovntpx = {
3648 I(0, em_mov), N, N, N, 3707 I(0, em_mov), N, N, N,
3649}; 3708};
3650 3709
3651static struct opcode opcode_table[256] = { 3710static const struct opcode opcode_table[256] = {
3652 /* 0x00 - 0x07 */ 3711 /* 0x00 - 0x07 */
3653 I6ALU(Lock, em_add), 3712 I6ALU(Lock, em_add),
3654 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg), 3713 I(ImplicitOps | Stack | No64 | Src2ES, em_push_sreg),
@@ -3689,7 +3748,7 @@ static struct opcode opcode_table[256] = {
3689 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), 3748 I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op),
3690 I(SrcImmByte | Mov | Stack, em_push), 3749 I(SrcImmByte | Mov | Stack, em_push),
3691 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), 3750 I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op),
3692 I2bvIP(DstDI | SrcDX | Mov | String, em_in, ins, check_perm_in), /* insb, insw/insd */ 3751 I2bvIP(DstDI | SrcDX | Mov | String | Unaligned, em_in, ins, check_perm_in), /* insb, insw/insd */
3693 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */ 3752 I2bvIP(SrcSI | DstDX | String, em_out, outs, check_perm_out), /* outsb, outsw/outsd */
3694 /* 0x70 - 0x7F */ 3753 /* 0x70 - 0x7F */
3695 X16(D(SrcImmByte)), 3754 X16(D(SrcImmByte)),
@@ -3765,7 +3824,7 @@ static struct opcode opcode_table[256] = {
3765 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), 3824 D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5),
3766}; 3825};
3767 3826
3768static struct opcode twobyte_table[256] = { 3827static const struct opcode twobyte_table[256] = {
3769 /* 0x00 - 0x0F */ 3828 /* 0x00 - 0x0F */
3770 G(0, group6), GD(0, &group7), N, N, 3829 G(0, group6), GD(0, &group7), N, N,
3771 N, I(ImplicitOps | VendorSpecific, em_syscall), 3830 N, I(ImplicitOps | VendorSpecific, em_syscall),
@@ -3936,7 +3995,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3936 case OpAcc: 3995 case OpAcc:
3937 op->type = OP_REG; 3996 op->type = OP_REG;
3938 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 3997 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3939 op->addr.reg = &ctxt->regs[VCPU_REGS_RAX]; 3998 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RAX);
3940 fetch_register_operand(op); 3999 fetch_register_operand(op);
3941 op->orig_val = op->val; 4000 op->orig_val = op->val;
3942 break; 4001 break;
@@ -3944,19 +4003,20 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3944 op->type = OP_MEM; 4003 op->type = OP_MEM;
3945 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4004 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3946 op->addr.mem.ea = 4005 op->addr.mem.ea =
3947 register_address(ctxt, ctxt->regs[VCPU_REGS_RDI]); 4006 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RDI));
3948 op->addr.mem.seg = VCPU_SREG_ES; 4007 op->addr.mem.seg = VCPU_SREG_ES;
3949 op->val = 0; 4008 op->val = 0;
4009 op->count = 1;
3950 break; 4010 break;
3951 case OpDX: 4011 case OpDX:
3952 op->type = OP_REG; 4012 op->type = OP_REG;
3953 op->bytes = 2; 4013 op->bytes = 2;
3954 op->addr.reg = &ctxt->regs[VCPU_REGS_RDX]; 4014 op->addr.reg = reg_rmw(ctxt, VCPU_REGS_RDX);
3955 fetch_register_operand(op); 4015 fetch_register_operand(op);
3956 break; 4016 break;
3957 case OpCL: 4017 case OpCL:
3958 op->bytes = 1; 4018 op->bytes = 1;
3959 op->val = ctxt->regs[VCPU_REGS_RCX] & 0xff; 4019 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
3960 break; 4020 break;
3961 case OpImmByte: 4021 case OpImmByte:
3962 rc = decode_imm(ctxt, op, 1, true); 4022 rc = decode_imm(ctxt, op, 1, true);
@@ -3987,9 +4047,10 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
3987 op->type = OP_MEM; 4047 op->type = OP_MEM;
3988 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes; 4048 op->bytes = (ctxt->d & ByteOp) ? 1 : ctxt->op_bytes;
3989 op->addr.mem.ea = 4049 op->addr.mem.ea =
3990 register_address(ctxt, ctxt->regs[VCPU_REGS_RSI]); 4050 register_address(ctxt, reg_read(ctxt, VCPU_REGS_RSI));
3991 op->addr.mem.seg = seg_override(ctxt); 4051 op->addr.mem.seg = seg_override(ctxt);
3992 op->val = 0; 4052 op->val = 0;
4053 op->count = 1;
3993 break; 4054 break;
3994 case OpImmFAddr: 4055 case OpImmFAddr:
3995 op->type = OP_IMM; 4056 op->type = OP_IMM;
@@ -4293,9 +4354,10 @@ static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4293 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm); 4354 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4294} 4355}
4295 4356
4357
4296int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 4358int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4297{ 4359{
4298 struct x86_emulate_ops *ops = ctxt->ops; 4360 const struct x86_emulate_ops *ops = ctxt->ops;
4299 int rc = X86EMUL_CONTINUE; 4361 int rc = X86EMUL_CONTINUE;
4300 int saved_dst_type = ctxt->dst.type; 4362 int saved_dst_type = ctxt->dst.type;
4301 4363
@@ -4356,7 +4418,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4356 } 4418 }
4357 4419
4358 /* Instruction can only be executed in protected mode */ 4420 /* Instruction can only be executed in protected mode */
4359 if ((ctxt->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) { 4421 if ((ctxt->d & Prot) && ctxt->mode < X86EMUL_MODE_PROT16) {
4360 rc = emulate_ud(ctxt); 4422 rc = emulate_ud(ctxt);
4361 goto done; 4423 goto done;
4362 } 4424 }
@@ -4377,7 +4439,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4377 4439
4378 if (ctxt->rep_prefix && (ctxt->d & String)) { 4440 if (ctxt->rep_prefix && (ctxt->d & String)) {
4379 /* All REP prefixes have the same first termination condition */ 4441 /* All REP prefixes have the same first termination condition */
4380 if (address_mask(ctxt, ctxt->regs[VCPU_REGS_RCX]) == 0) { 4442 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) {
4381 ctxt->eip = ctxt->_eip; 4443 ctxt->eip = ctxt->_eip;
4382 goto done; 4444 goto done;
4383 } 4445 }
@@ -4450,7 +4512,7 @@ special_insn:
4450 ctxt->dst.val = ctxt->src.addr.mem.ea; 4512 ctxt->dst.val = ctxt->src.addr.mem.ea;
4451 break; 4513 break;
4452 case 0x90 ... 0x97: /* nop / xchg reg, rax */ 4514 case 0x90 ... 0x97: /* nop / xchg reg, rax */
4453 if (ctxt->dst.addr.reg == &ctxt->regs[VCPU_REGS_RAX]) 4515 if (ctxt->dst.addr.reg == reg_rmw(ctxt, VCPU_REGS_RAX))
4454 break; 4516 break;
4455 rc = em_xchg(ctxt); 4517 rc = em_xchg(ctxt);
4456 break; 4518 break;
@@ -4478,7 +4540,7 @@ special_insn:
4478 rc = em_grp2(ctxt); 4540 rc = em_grp2(ctxt);
4479 break; 4541 break;
4480 case 0xd2 ... 0xd3: /* Grp2 */ 4542 case 0xd2 ... 0xd3: /* Grp2 */
4481 ctxt->src.val = ctxt->regs[VCPU_REGS_RCX]; 4543 ctxt->src.val = reg_read(ctxt, VCPU_REGS_RCX);
4482 rc = em_grp2(ctxt); 4544 rc = em_grp2(ctxt);
4483 break; 4545 break;
4484 case 0xe9: /* jmp rel */ 4546 case 0xe9: /* jmp rel */
@@ -4524,23 +4586,27 @@ writeback:
4524 ctxt->dst.type = saved_dst_type; 4586 ctxt->dst.type = saved_dst_type;
4525 4587
4526 if ((ctxt->d & SrcMask) == SrcSI) 4588 if ((ctxt->d & SrcMask) == SrcSI)
4527 string_addr_inc(ctxt, seg_override(ctxt), 4589 string_addr_inc(ctxt, VCPU_REGS_RSI, &ctxt->src);
4528 VCPU_REGS_RSI, &ctxt->src);
4529 4590
4530 if ((ctxt->d & DstMask) == DstDI) 4591 if ((ctxt->d & DstMask) == DstDI)
4531 string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI, 4592 string_addr_inc(ctxt, VCPU_REGS_RDI, &ctxt->dst);
4532 &ctxt->dst);
4533 4593
4534 if (ctxt->rep_prefix && (ctxt->d & String)) { 4594 if (ctxt->rep_prefix && (ctxt->d & String)) {
4595 unsigned int count;
4535 struct read_cache *r = &ctxt->io_read; 4596 struct read_cache *r = &ctxt->io_read;
4536 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RCX], -1); 4597 if ((ctxt->d & SrcMask) == SrcSI)
4598 count = ctxt->src.count;
4599 else
4600 count = ctxt->dst.count;
4601 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX),
4602 -count);
4537 4603
4538 if (!string_insn_completed(ctxt)) { 4604 if (!string_insn_completed(ctxt)) {
4539 /* 4605 /*
4540 * Re-enter guest when pio read ahead buffer is empty 4606 * Re-enter guest when pio read ahead buffer is empty
4541 * or, if it is not used, after each 1024 iteration. 4607 * or, if it is not used, after each 1024 iteration.
4542 */ 4608 */
4543 if ((r->end != 0 || ctxt->regs[VCPU_REGS_RCX] & 0x3ff) && 4609 if ((r->end != 0 || reg_read(ctxt, VCPU_REGS_RCX) & 0x3ff) &&
4544 (r->end == 0 || r->end != r->pos)) { 4610 (r->end == 0 || r->end != r->pos)) {
4545 /* 4611 /*
4546 * Reset read cache. Usually happens before 4612 * Reset read cache. Usually happens before
@@ -4548,6 +4614,7 @@ writeback:
4548 * we have to do it here. 4614 * we have to do it here.
4549 */ 4615 */
4550 ctxt->mem_read.end = 0; 4616 ctxt->mem_read.end = 0;
4617 writeback_registers(ctxt);
4551 return EMULATION_RESTART; 4618 return EMULATION_RESTART;
4552 } 4619 }
4553 goto done; /* skip rip writeback */ 4620 goto done; /* skip rip writeback */
@@ -4562,6 +4629,9 @@ done:
4562 if (rc == X86EMUL_INTERCEPTED) 4629 if (rc == X86EMUL_INTERCEPTED)
4563 return EMULATION_INTERCEPTED; 4630 return EMULATION_INTERCEPTED;
4564 4631
4632 if (rc == X86EMUL_CONTINUE)
4633 writeback_registers(ctxt);
4634
4565 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; 4635 return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK;
4566 4636
4567twobyte_insn: 4637twobyte_insn:
@@ -4634,3 +4704,13 @@ twobyte_insn:
4634cannot_emulate: 4704cannot_emulate:
4635 return EMULATION_FAILED; 4705 return EMULATION_FAILED;
4636} 4706}
4707
4708void emulator_invalidate_register_cache(struct x86_emulate_ctxt *ctxt)
4709{
4710 invalidate_registers(ctxt);
4711}
4712
4713void emulator_writeback_register_cache(struct x86_emulate_ctxt *ctxt)
4714{
4715 writeback_registers(ctxt);
4716}