diff options
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 749 |
1 files changed, 422 insertions, 327 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 5ac0bb465ed6..b38bd8b92aa6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -9,6 +9,7 @@ | |||
9 | * privileged instructions: | 9 | * privileged instructions: |
10 | * | 10 | * |
11 | * Copyright (C) 2006 Qumranet | 11 | * Copyright (C) 2006 Qumranet |
12 | * Copyright 2010 Red Hat, Inc. and/or its affilates. | ||
12 | * | 13 | * |
13 | * Avi Kivity <avi@qumranet.com> | 14 | * Avi Kivity <avi@qumranet.com> |
14 | * Yaniv Kamay <yaniv@qumranet.com> | 15 | * Yaniv Kamay <yaniv@qumranet.com> |
@@ -67,6 +68,9 @@ | |||
67 | #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ | 68 | #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ |
68 | #define SrcImmU (9<<4) /* Immediate operand, unsigned */ | 69 | #define SrcImmU (9<<4) /* Immediate operand, unsigned */ |
69 | #define SrcSI (0xa<<4) /* Source is in the DS:RSI */ | 70 | #define SrcSI (0xa<<4) /* Source is in the DS:RSI */ |
71 | #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ | ||
72 | #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ | ||
73 | #define SrcAcc (0xd<<4) /* Source Accumulator */ | ||
70 | #define SrcMask (0xf<<4) | 74 | #define SrcMask (0xf<<4) |
71 | /* Generic ModRM decode. */ | 75 | /* Generic ModRM decode. */ |
72 | #define ModRM (1<<8) | 76 | #define ModRM (1<<8) |
@@ -88,10 +92,6 @@ | |||
88 | #define Src2CL (1<<29) | 92 | #define Src2CL (1<<29) |
89 | #define Src2ImmByte (2<<29) | 93 | #define Src2ImmByte (2<<29) |
90 | #define Src2One (3<<29) | 94 | #define Src2One (3<<29) |
91 | #define Src2Imm16 (4<<29) | ||
92 | #define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be | ||
93 | in memory and second argument is located | ||
94 | immediately after the first one in memory. */ | ||
95 | #define Src2Mask (7<<29) | 95 | #define Src2Mask (7<<29) |
96 | 96 | ||
97 | enum { | 97 | enum { |
@@ -124,15 +124,15 @@ static u32 opcode_table[256] = { | |||
124 | /* 0x20 - 0x27 */ | 124 | /* 0x20 - 0x27 */ |
125 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | 125 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, |
126 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 126 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
127 | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, | 127 | ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, |
128 | /* 0x28 - 0x2F */ | 128 | /* 0x28 - 0x2F */ |
129 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | 129 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, |
130 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 130 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
131 | 0, 0, 0, 0, | 131 | ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, |
132 | /* 0x30 - 0x37 */ | 132 | /* 0x30 - 0x37 */ |
133 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | 133 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, |
134 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 134 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
135 | 0, 0, 0, 0, | 135 | ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, |
136 | /* 0x38 - 0x3F */ | 136 | /* 0x38 - 0x3F */ |
137 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | 137 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, |
138 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 138 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, |
@@ -170,20 +170,20 @@ static u32 opcode_table[256] = { | |||
170 | /* 0x88 - 0x8F */ | 170 | /* 0x88 - 0x8F */ |
171 | ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, | 171 | ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, |
172 | ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | 172 | ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, |
173 | DstMem | SrcReg | ModRM | Mov, ModRM | DstReg, | 173 | DstMem | SrcNone | ModRM | Mov, ModRM | DstReg, |
174 | DstReg | SrcMem | ModRM | Mov, Group | Group1A, | 174 | ImplicitOps | SrcMem16 | ModRM, Group | Group1A, |
175 | /* 0x90 - 0x97 */ | 175 | /* 0x90 - 0x97 */ |
176 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, | 176 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, |
177 | /* 0x98 - 0x9F */ | 177 | /* 0x98 - 0x9F */ |
178 | 0, 0, SrcImm | Src2Imm16 | No64, 0, | 178 | 0, 0, SrcImmFAddr | No64, 0, |
179 | ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, | 179 | ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, |
180 | /* 0xA0 - 0xA7 */ | 180 | /* 0xA0 - 0xA7 */ |
181 | ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, | 181 | ByteOp | DstAcc | SrcMem | Mov | MemAbs, DstAcc | SrcMem | Mov | MemAbs, |
182 | ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs, | 182 | ByteOp | DstMem | SrcAcc | Mov | MemAbs, DstMem | SrcAcc | Mov | MemAbs, |
183 | ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String, | 183 | ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String, |
184 | ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String, | 184 | ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String, |
185 | /* 0xA8 - 0xAF */ | 185 | /* 0xA8 - 0xAF */ |
186 | 0, 0, ByteOp | DstDI | Mov | String, DstDI | Mov | String, | 186 | DstAcc | SrcImmByte | ByteOp, DstAcc | SrcImm, ByteOp | DstDI | Mov | String, DstDI | Mov | String, |
187 | ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String, | 187 | ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String, |
188 | ByteOp | DstDI | String, DstDI | String, | 188 | ByteOp | DstDI | String, DstDI | String, |
189 | /* 0xB0 - 0xB7 */ | 189 | /* 0xB0 - 0xB7 */ |
@@ -215,7 +215,7 @@ static u32 opcode_table[256] = { | |||
215 | ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc, | 215 | ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc, |
216 | /* 0xE8 - 0xEF */ | 216 | /* 0xE8 - 0xEF */ |
217 | SrcImm | Stack, SrcImm | ImplicitOps, | 217 | SrcImm | Stack, SrcImm | ImplicitOps, |
218 | SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps, | 218 | SrcImmFAddr | No64, SrcImmByte | ImplicitOps, |
219 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, | 219 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, |
220 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, | 220 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, |
221 | /* 0xF0 - 0xF7 */ | 221 | /* 0xF0 - 0xF7 */ |
@@ -337,20 +337,20 @@ static u32 group_table[] = { | |||
337 | [Group1A*8] = | 337 | [Group1A*8] = |
338 | DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0, | 338 | DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0, |
339 | [Group3_Byte*8] = | 339 | [Group3_Byte*8] = |
340 | ByteOp | SrcImm | DstMem | ModRM, 0, | 340 | ByteOp | SrcImm | DstMem | ModRM, ByteOp | SrcImm | DstMem | ModRM, |
341 | ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, | 341 | ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, |
342 | 0, 0, 0, 0, | 342 | 0, 0, 0, 0, |
343 | [Group3*8] = | 343 | [Group3*8] = |
344 | DstMem | SrcImm | ModRM, 0, | 344 | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, |
345 | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, | 345 | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, |
346 | 0, 0, 0, 0, | 346 | 0, 0, 0, 0, |
347 | [Group4*8] = | 347 | [Group4*8] = |
348 | ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, | 348 | ByteOp | DstMem | SrcNone | ModRM | Lock, ByteOp | DstMem | SrcNone | ModRM | Lock, |
349 | 0, 0, 0, 0, 0, 0, | 349 | 0, 0, 0, 0, 0, 0, |
350 | [Group5*8] = | 350 | [Group5*8] = |
351 | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, | 351 | DstMem | SrcNone | ModRM | Lock, DstMem | SrcNone | ModRM | Lock, |
352 | SrcMem | ModRM | Stack, 0, | 352 | SrcMem | ModRM | Stack, 0, |
353 | SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps, | 353 | SrcMem | ModRM | Stack, SrcMemFAddr | ModRM | ImplicitOps, |
354 | SrcMem | ModRM | Stack, 0, | 354 | SrcMem | ModRM | Stack, 0, |
355 | [Group7*8] = | 355 | [Group7*8] = |
356 | 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv, | 356 | 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv, |
@@ -576,6 +576,13 @@ static u32 group2_table[] = { | |||
576 | (_type)_x; \ | 576 | (_type)_x; \ |
577 | }) | 577 | }) |
578 | 578 | ||
579 | #define insn_fetch_arr(_arr, _size, _eip) \ | ||
580 | ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \ | ||
581 | if (rc != X86EMUL_CONTINUE) \ | ||
582 | goto done; \ | ||
583 | (_eip) += (_size); \ | ||
584 | }) | ||
585 | |||
579 | static inline unsigned long ad_mask(struct decode_cache *c) | 586 | static inline unsigned long ad_mask(struct decode_cache *c) |
580 | { | 587 | { |
581 | return (1UL << (c->ad_bytes << 3)) - 1; | 588 | return (1UL << (c->ad_bytes << 3)) - 1; |
@@ -617,31 +624,66 @@ static void set_seg_override(struct decode_cache *c, int seg) | |||
617 | c->seg_override = seg; | 624 | c->seg_override = seg; |
618 | } | 625 | } |
619 | 626 | ||
620 | static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) | 627 | static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, |
628 | struct x86_emulate_ops *ops, int seg) | ||
621 | { | 629 | { |
622 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) | 630 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) |
623 | return 0; | 631 | return 0; |
624 | 632 | ||
625 | return kvm_x86_ops->get_segment_base(ctxt->vcpu, seg); | 633 | return ops->get_cached_segment_base(seg, ctxt->vcpu); |
626 | } | 634 | } |
627 | 635 | ||
628 | static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, | 636 | static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, |
637 | struct x86_emulate_ops *ops, | ||
629 | struct decode_cache *c) | 638 | struct decode_cache *c) |
630 | { | 639 | { |
631 | if (!c->has_seg_override) | 640 | if (!c->has_seg_override) |
632 | return 0; | 641 | return 0; |
633 | 642 | ||
634 | return seg_base(ctxt, c->seg_override); | 643 | return seg_base(ctxt, ops, c->seg_override); |
644 | } | ||
645 | |||
646 | static unsigned long es_base(struct x86_emulate_ctxt *ctxt, | ||
647 | struct x86_emulate_ops *ops) | ||
648 | { | ||
649 | return seg_base(ctxt, ops, VCPU_SREG_ES); | ||
650 | } | ||
651 | |||
652 | static unsigned long ss_base(struct x86_emulate_ctxt *ctxt, | ||
653 | struct x86_emulate_ops *ops) | ||
654 | { | ||
655 | return seg_base(ctxt, ops, VCPU_SREG_SS); | ||
656 | } | ||
657 | |||
658 | static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, | ||
659 | u32 error, bool valid) | ||
660 | { | ||
661 | ctxt->exception = vec; | ||
662 | ctxt->error_code = error; | ||
663 | ctxt->error_code_valid = valid; | ||
664 | ctxt->restart = false; | ||
665 | } | ||
666 | |||
667 | static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) | ||
668 | { | ||
669 | emulate_exception(ctxt, GP_VECTOR, err, true); | ||
635 | } | 670 | } |
636 | 671 | ||
637 | static unsigned long es_base(struct x86_emulate_ctxt *ctxt) | 672 | static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr, |
673 | int err) | ||
638 | { | 674 | { |
639 | return seg_base(ctxt, VCPU_SREG_ES); | 675 | ctxt->cr2 = addr; |
676 | emulate_exception(ctxt, PF_VECTOR, err, true); | ||
640 | } | 677 | } |
641 | 678 | ||
642 | static unsigned long ss_base(struct x86_emulate_ctxt *ctxt) | 679 | static void emulate_ud(struct x86_emulate_ctxt *ctxt) |
643 | { | 680 | { |
644 | return seg_base(ctxt, VCPU_SREG_SS); | 681 | emulate_exception(ctxt, UD_VECTOR, 0, false); |
682 | } | ||
683 | |||
684 | static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err) | ||
685 | { | ||
686 | emulate_exception(ctxt, TS_VECTOR, err, true); | ||
645 | } | 687 | } |
646 | 688 | ||
647 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, | 689 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, |
@@ -932,12 +974,9 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
932 | /* we cannot decode insn before we complete previous rep insn */ | 974 | /* we cannot decode insn before we complete previous rep insn */ |
933 | WARN_ON(ctxt->restart); | 975 | WARN_ON(ctxt->restart); |
934 | 976 | ||
935 | /* Shadow copy of register state. Committed on successful emulation. */ | ||
936 | memset(c, 0, sizeof(struct decode_cache)); | ||
937 | c->eip = ctxt->eip; | 977 | c->eip = ctxt->eip; |
938 | c->fetch.start = c->fetch.end = c->eip; | 978 | c->fetch.start = c->fetch.end = c->eip; |
939 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); | 979 | ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS); |
940 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | ||
941 | 980 | ||
942 | switch (mode) { | 981 | switch (mode) { |
943 | case X86EMUL_MODE_REAL: | 982 | case X86EMUL_MODE_REAL: |
@@ -1060,7 +1099,7 @@ done_prefixes: | |||
1060 | set_seg_override(c, VCPU_SREG_DS); | 1099 | set_seg_override(c, VCPU_SREG_DS); |
1061 | 1100 | ||
1062 | if (!(!c->twobyte && c->b == 0x8d)) | 1101 | if (!(!c->twobyte && c->b == 0x8d)) |
1063 | c->modrm_ea += seg_override_base(ctxt, c); | 1102 | c->modrm_ea += seg_override_base(ctxt, ops, c); |
1064 | 1103 | ||
1065 | if (c->ad_bytes != 8) | 1104 | if (c->ad_bytes != 8) |
1066 | c->modrm_ea = (u32)c->modrm_ea; | 1105 | c->modrm_ea = (u32)c->modrm_ea; |
@@ -1148,6 +1187,25 @@ done_prefixes: | |||
1148 | else | 1187 | else |
1149 | c->src.val = insn_fetch(u8, 1, c->eip); | 1188 | c->src.val = insn_fetch(u8, 1, c->eip); |
1150 | break; | 1189 | break; |
1190 | case SrcAcc: | ||
1191 | c->src.type = OP_REG; | ||
1192 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1193 | c->src.ptr = &c->regs[VCPU_REGS_RAX]; | ||
1194 | switch (c->src.bytes) { | ||
1195 | case 1: | ||
1196 | c->src.val = *(u8 *)c->src.ptr; | ||
1197 | break; | ||
1198 | case 2: | ||
1199 | c->src.val = *(u16 *)c->src.ptr; | ||
1200 | break; | ||
1201 | case 4: | ||
1202 | c->src.val = *(u32 *)c->src.ptr; | ||
1203 | break; | ||
1204 | case 8: | ||
1205 | c->src.val = *(u64 *)c->src.ptr; | ||
1206 | break; | ||
1207 | } | ||
1208 | break; | ||
1151 | case SrcOne: | 1209 | case SrcOne: |
1152 | c->src.bytes = 1; | 1210 | c->src.bytes = 1; |
1153 | c->src.val = 1; | 1211 | c->src.val = 1; |
@@ -1156,10 +1214,21 @@ done_prefixes: | |||
1156 | c->src.type = OP_MEM; | 1214 | c->src.type = OP_MEM; |
1157 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1215 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1158 | c->src.ptr = (unsigned long *) | 1216 | c->src.ptr = (unsigned long *) |
1159 | register_address(c, seg_override_base(ctxt, c), | 1217 | register_address(c, seg_override_base(ctxt, ops, c), |
1160 | c->regs[VCPU_REGS_RSI]); | 1218 | c->regs[VCPU_REGS_RSI]); |
1161 | c->src.val = 0; | 1219 | c->src.val = 0; |
1162 | break; | 1220 | break; |
1221 | case SrcImmFAddr: | ||
1222 | c->src.type = OP_IMM; | ||
1223 | c->src.ptr = (unsigned long *)c->eip; | ||
1224 | c->src.bytes = c->op_bytes + 2; | ||
1225 | insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip); | ||
1226 | break; | ||
1227 | case SrcMemFAddr: | ||
1228 | c->src.type = OP_MEM; | ||
1229 | c->src.ptr = (unsigned long *)c->modrm_ea; | ||
1230 | c->src.bytes = c->op_bytes + 2; | ||
1231 | break; | ||
1163 | } | 1232 | } |
1164 | 1233 | ||
1165 | /* | 1234 | /* |
@@ -1179,22 +1248,10 @@ done_prefixes: | |||
1179 | c->src2.bytes = 1; | 1248 | c->src2.bytes = 1; |
1180 | c->src2.val = insn_fetch(u8, 1, c->eip); | 1249 | c->src2.val = insn_fetch(u8, 1, c->eip); |
1181 | break; | 1250 | break; |
1182 | case Src2Imm16: | ||
1183 | c->src2.type = OP_IMM; | ||
1184 | c->src2.ptr = (unsigned long *)c->eip; | ||
1185 | c->src2.bytes = 2; | ||
1186 | c->src2.val = insn_fetch(u16, 2, c->eip); | ||
1187 | break; | ||
1188 | case Src2One: | 1251 | case Src2One: |
1189 | c->src2.bytes = 1; | 1252 | c->src2.bytes = 1; |
1190 | c->src2.val = 1; | 1253 | c->src2.val = 1; |
1191 | break; | 1254 | break; |
1192 | case Src2Mem16: | ||
1193 | c->src2.type = OP_MEM; | ||
1194 | c->src2.bytes = 2; | ||
1195 | c->src2.ptr = (unsigned long *)(c->modrm_ea + c->src.bytes); | ||
1196 | c->src2.val = 0; | ||
1197 | break; | ||
1198 | } | 1255 | } |
1199 | 1256 | ||
1200 | /* Decode and fetch the destination operand: register or memory. */ | 1257 | /* Decode and fetch the destination operand: register or memory. */ |
@@ -1253,7 +1310,7 @@ done_prefixes: | |||
1253 | c->dst.type = OP_MEM; | 1310 | c->dst.type = OP_MEM; |
1254 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1311 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1255 | c->dst.ptr = (unsigned long *) | 1312 | c->dst.ptr = (unsigned long *) |
1256 | register_address(c, es_base(ctxt), | 1313 | register_address(c, es_base(ctxt, ops), |
1257 | c->regs[VCPU_REGS_RDI]); | 1314 | c->regs[VCPU_REGS_RDI]); |
1258 | c->dst.val = 0; | 1315 | c->dst.val = 0; |
1259 | break; | 1316 | break; |
@@ -1263,6 +1320,37 @@ done: | |||
1263 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 1320 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; |
1264 | } | 1321 | } |
1265 | 1322 | ||
1323 | static int read_emulated(struct x86_emulate_ctxt *ctxt, | ||
1324 | struct x86_emulate_ops *ops, | ||
1325 | unsigned long addr, void *dest, unsigned size) | ||
1326 | { | ||
1327 | int rc; | ||
1328 | struct read_cache *mc = &ctxt->decode.mem_read; | ||
1329 | u32 err; | ||
1330 | |||
1331 | while (size) { | ||
1332 | int n = min(size, 8u); | ||
1333 | size -= n; | ||
1334 | if (mc->pos < mc->end) | ||
1335 | goto read_cached; | ||
1336 | |||
1337 | rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, | ||
1338 | ctxt->vcpu); | ||
1339 | if (rc == X86EMUL_PROPAGATE_FAULT) | ||
1340 | emulate_pf(ctxt, addr, err); | ||
1341 | if (rc != X86EMUL_CONTINUE) | ||
1342 | return rc; | ||
1343 | mc->end += n; | ||
1344 | |||
1345 | read_cached: | ||
1346 | memcpy(dest, mc->data + mc->pos, n); | ||
1347 | mc->pos += n; | ||
1348 | dest += n; | ||
1349 | addr += n; | ||
1350 | } | ||
1351 | return X86EMUL_CONTINUE; | ||
1352 | } | ||
1353 | |||
1266 | static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | 1354 | static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, |
1267 | struct x86_emulate_ops *ops, | 1355 | struct x86_emulate_ops *ops, |
1268 | unsigned int size, unsigned short port, | 1356 | unsigned int size, unsigned short port, |
@@ -1330,13 +1418,13 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1330 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | 1418 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); |
1331 | 1419 | ||
1332 | if (dt.size < index * 8 + 7) { | 1420 | if (dt.size < index * 8 + 7) { |
1333 | kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); | 1421 | emulate_gp(ctxt, selector & 0xfffc); |
1334 | return X86EMUL_PROPAGATE_FAULT; | 1422 | return X86EMUL_PROPAGATE_FAULT; |
1335 | } | 1423 | } |
1336 | addr = dt.address + index * 8; | 1424 | addr = dt.address + index * 8; |
1337 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | 1425 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); |
1338 | if (ret == X86EMUL_PROPAGATE_FAULT) | 1426 | if (ret == X86EMUL_PROPAGATE_FAULT) |
1339 | kvm_inject_page_fault(ctxt->vcpu, addr, err); | 1427 | emulate_pf(ctxt, addr, err); |
1340 | 1428 | ||
1341 | return ret; | 1429 | return ret; |
1342 | } | 1430 | } |
@@ -1355,14 +1443,14 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1355 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | 1443 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); |
1356 | 1444 | ||
1357 | if (dt.size < index * 8 + 7) { | 1445 | if (dt.size < index * 8 + 7) { |
1358 | kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); | 1446 | emulate_gp(ctxt, selector & 0xfffc); |
1359 | return X86EMUL_PROPAGATE_FAULT; | 1447 | return X86EMUL_PROPAGATE_FAULT; |
1360 | } | 1448 | } |
1361 | 1449 | ||
1362 | addr = dt.address + index * 8; | 1450 | addr = dt.address + index * 8; |
1363 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | 1451 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); |
1364 | if (ret == X86EMUL_PROPAGATE_FAULT) | 1452 | if (ret == X86EMUL_PROPAGATE_FAULT) |
1365 | kvm_inject_page_fault(ctxt->vcpu, addr, err); | 1453 | emulate_pf(ctxt, addr, err); |
1366 | 1454 | ||
1367 | return ret; | 1455 | return ret; |
1368 | } | 1456 | } |
@@ -1481,11 +1569,70 @@ load: | |||
1481 | ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); | 1569 | ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); |
1482 | return X86EMUL_CONTINUE; | 1570 | return X86EMUL_CONTINUE; |
1483 | exception: | 1571 | exception: |
1484 | kvm_queue_exception_e(ctxt->vcpu, err_vec, err_code); | 1572 | emulate_exception(ctxt, err_vec, err_code, true); |
1485 | return X86EMUL_PROPAGATE_FAULT; | 1573 | return X86EMUL_PROPAGATE_FAULT; |
1486 | } | 1574 | } |
1487 | 1575 | ||
1488 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt) | 1576 | static inline int writeback(struct x86_emulate_ctxt *ctxt, |
1577 | struct x86_emulate_ops *ops) | ||
1578 | { | ||
1579 | int rc; | ||
1580 | struct decode_cache *c = &ctxt->decode; | ||
1581 | u32 err; | ||
1582 | |||
1583 | switch (c->dst.type) { | ||
1584 | case OP_REG: | ||
1585 | /* The 4-byte case *is* correct: | ||
1586 | * in 64-bit mode we zero-extend. | ||
1587 | */ | ||
1588 | switch (c->dst.bytes) { | ||
1589 | case 1: | ||
1590 | *(u8 *)c->dst.ptr = (u8)c->dst.val; | ||
1591 | break; | ||
1592 | case 2: | ||
1593 | *(u16 *)c->dst.ptr = (u16)c->dst.val; | ||
1594 | break; | ||
1595 | case 4: | ||
1596 | *c->dst.ptr = (u32)c->dst.val; | ||
1597 | break; /* 64b: zero-ext */ | ||
1598 | case 8: | ||
1599 | *c->dst.ptr = c->dst.val; | ||
1600 | break; | ||
1601 | } | ||
1602 | break; | ||
1603 | case OP_MEM: | ||
1604 | if (c->lock_prefix) | ||
1605 | rc = ops->cmpxchg_emulated( | ||
1606 | (unsigned long)c->dst.ptr, | ||
1607 | &c->dst.orig_val, | ||
1608 | &c->dst.val, | ||
1609 | c->dst.bytes, | ||
1610 | &err, | ||
1611 | ctxt->vcpu); | ||
1612 | else | ||
1613 | rc = ops->write_emulated( | ||
1614 | (unsigned long)c->dst.ptr, | ||
1615 | &c->dst.val, | ||
1616 | c->dst.bytes, | ||
1617 | &err, | ||
1618 | ctxt->vcpu); | ||
1619 | if (rc == X86EMUL_PROPAGATE_FAULT) | ||
1620 | emulate_pf(ctxt, | ||
1621 | (unsigned long)c->dst.ptr, err); | ||
1622 | if (rc != X86EMUL_CONTINUE) | ||
1623 | return rc; | ||
1624 | break; | ||
1625 | case OP_NONE: | ||
1626 | /* no writeback */ | ||
1627 | break; | ||
1628 | default: | ||
1629 | break; | ||
1630 | } | ||
1631 | return X86EMUL_CONTINUE; | ||
1632 | } | ||
1633 | |||
1634 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt, | ||
1635 | struct x86_emulate_ops *ops) | ||
1489 | { | 1636 | { |
1490 | struct decode_cache *c = &ctxt->decode; | 1637 | struct decode_cache *c = &ctxt->decode; |
1491 | 1638 | ||
@@ -1493,7 +1640,7 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt) | |||
1493 | c->dst.bytes = c->op_bytes; | 1640 | c->dst.bytes = c->op_bytes; |
1494 | c->dst.val = c->src.val; | 1641 | c->dst.val = c->src.val; |
1495 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); | 1642 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); |
1496 | c->dst.ptr = (void *) register_address(c, ss_base(ctxt), | 1643 | c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops), |
1497 | c->regs[VCPU_REGS_RSP]); | 1644 | c->regs[VCPU_REGS_RSP]); |
1498 | } | 1645 | } |
1499 | 1646 | ||
@@ -1504,9 +1651,9 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, | |||
1504 | struct decode_cache *c = &ctxt->decode; | 1651 | struct decode_cache *c = &ctxt->decode; |
1505 | int rc; | 1652 | int rc; |
1506 | 1653 | ||
1507 | rc = ops->read_emulated(register_address(c, ss_base(ctxt), | 1654 | rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops), |
1508 | c->regs[VCPU_REGS_RSP]), | 1655 | c->regs[VCPU_REGS_RSP]), |
1509 | dest, len, ctxt->vcpu); | 1656 | dest, len); |
1510 | if (rc != X86EMUL_CONTINUE) | 1657 | if (rc != X86EMUL_CONTINUE) |
1511 | return rc; | 1658 | return rc; |
1512 | 1659 | ||
@@ -1541,7 +1688,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1541 | break; | 1688 | break; |
1542 | case X86EMUL_MODE_VM86: | 1689 | case X86EMUL_MODE_VM86: |
1543 | if (iopl < 3) { | 1690 | if (iopl < 3) { |
1544 | kvm_inject_gp(ctxt->vcpu, 0); | 1691 | emulate_gp(ctxt, 0); |
1545 | return X86EMUL_PROPAGATE_FAULT; | 1692 | return X86EMUL_PROPAGATE_FAULT; |
1546 | } | 1693 | } |
1547 | change_mask |= EFLG_IF; | 1694 | change_mask |= EFLG_IF; |
@@ -1557,15 +1704,14 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1557 | return rc; | 1704 | return rc; |
1558 | } | 1705 | } |
1559 | 1706 | ||
1560 | static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg) | 1707 | static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, |
1708 | struct x86_emulate_ops *ops, int seg) | ||
1561 | { | 1709 | { |
1562 | struct decode_cache *c = &ctxt->decode; | 1710 | struct decode_cache *c = &ctxt->decode; |
1563 | struct kvm_segment segment; | ||
1564 | 1711 | ||
1565 | kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg); | 1712 | c->src.val = ops->get_segment_selector(seg, ctxt->vcpu); |
1566 | 1713 | ||
1567 | c->src.val = segment.selector; | 1714 | emulate_push(ctxt, ops); |
1568 | emulate_push(ctxt); | ||
1569 | } | 1715 | } |
1570 | 1716 | ||
1571 | static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | 1717 | static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, |
@@ -1583,19 +1729,31 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | |||
1583 | return rc; | 1729 | return rc; |
1584 | } | 1730 | } |
1585 | 1731 | ||
1586 | static void emulate_pusha(struct x86_emulate_ctxt *ctxt) | 1732 | static int emulate_pusha(struct x86_emulate_ctxt *ctxt, |
1733 | struct x86_emulate_ops *ops) | ||
1587 | { | 1734 | { |
1588 | struct decode_cache *c = &ctxt->decode; | 1735 | struct decode_cache *c = &ctxt->decode; |
1589 | unsigned long old_esp = c->regs[VCPU_REGS_RSP]; | 1736 | unsigned long old_esp = c->regs[VCPU_REGS_RSP]; |
1737 | int rc = X86EMUL_CONTINUE; | ||
1590 | int reg = VCPU_REGS_RAX; | 1738 | int reg = VCPU_REGS_RAX; |
1591 | 1739 | ||
1592 | while (reg <= VCPU_REGS_RDI) { | 1740 | while (reg <= VCPU_REGS_RDI) { |
1593 | (reg == VCPU_REGS_RSP) ? | 1741 | (reg == VCPU_REGS_RSP) ? |
1594 | (c->src.val = old_esp) : (c->src.val = c->regs[reg]); | 1742 | (c->src.val = old_esp) : (c->src.val = c->regs[reg]); |
1595 | 1743 | ||
1596 | emulate_push(ctxt); | 1744 | emulate_push(ctxt, ops); |
1745 | |||
1746 | rc = writeback(ctxt, ops); | ||
1747 | if (rc != X86EMUL_CONTINUE) | ||
1748 | return rc; | ||
1749 | |||
1597 | ++reg; | 1750 | ++reg; |
1598 | } | 1751 | } |
1752 | |||
1753 | /* Disable writeback. */ | ||
1754 | c->dst.type = OP_NONE; | ||
1755 | |||
1756 | return rc; | ||
1599 | } | 1757 | } |
1600 | 1758 | ||
1601 | static int emulate_popa(struct x86_emulate_ctxt *ctxt, | 1759 | static int emulate_popa(struct x86_emulate_ctxt *ctxt, |
@@ -1695,14 +1853,14 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, | |||
1695 | old_eip = c->eip; | 1853 | old_eip = c->eip; |
1696 | c->eip = c->src.val; | 1854 | c->eip = c->src.val; |
1697 | c->src.val = old_eip; | 1855 | c->src.val = old_eip; |
1698 | emulate_push(ctxt); | 1856 | emulate_push(ctxt, ops); |
1699 | break; | 1857 | break; |
1700 | } | 1858 | } |
1701 | case 4: /* jmp abs */ | 1859 | case 4: /* jmp abs */ |
1702 | c->eip = c->src.val; | 1860 | c->eip = c->src.val; |
1703 | break; | 1861 | break; |
1704 | case 6: /* push */ | 1862 | case 6: /* push */ |
1705 | emulate_push(ctxt); | 1863 | emulate_push(ctxt, ops); |
1706 | break; | 1864 | break; |
1707 | } | 1865 | } |
1708 | return X86EMUL_CONTINUE; | 1866 | return X86EMUL_CONTINUE; |
@@ -1748,145 +1906,82 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, | |||
1748 | return rc; | 1906 | return rc; |
1749 | } | 1907 | } |
1750 | 1908 | ||
1751 | static inline int writeback(struct x86_emulate_ctxt *ctxt, | ||
1752 | struct x86_emulate_ops *ops) | ||
1753 | { | ||
1754 | int rc; | ||
1755 | struct decode_cache *c = &ctxt->decode; | ||
1756 | |||
1757 | switch (c->dst.type) { | ||
1758 | case OP_REG: | ||
1759 | /* The 4-byte case *is* correct: | ||
1760 | * in 64-bit mode we zero-extend. | ||
1761 | */ | ||
1762 | switch (c->dst.bytes) { | ||
1763 | case 1: | ||
1764 | *(u8 *)c->dst.ptr = (u8)c->dst.val; | ||
1765 | break; | ||
1766 | case 2: | ||
1767 | *(u16 *)c->dst.ptr = (u16)c->dst.val; | ||
1768 | break; | ||
1769 | case 4: | ||
1770 | *c->dst.ptr = (u32)c->dst.val; | ||
1771 | break; /* 64b: zero-ext */ | ||
1772 | case 8: | ||
1773 | *c->dst.ptr = c->dst.val; | ||
1774 | break; | ||
1775 | } | ||
1776 | break; | ||
1777 | case OP_MEM: | ||
1778 | if (c->lock_prefix) | ||
1779 | rc = ops->cmpxchg_emulated( | ||
1780 | (unsigned long)c->dst.ptr, | ||
1781 | &c->dst.orig_val, | ||
1782 | &c->dst.val, | ||
1783 | c->dst.bytes, | ||
1784 | ctxt->vcpu); | ||
1785 | else | ||
1786 | rc = ops->write_emulated( | ||
1787 | (unsigned long)c->dst.ptr, | ||
1788 | &c->dst.val, | ||
1789 | c->dst.bytes, | ||
1790 | ctxt->vcpu); | ||
1791 | if (rc != X86EMUL_CONTINUE) | ||
1792 | return rc; | ||
1793 | break; | ||
1794 | case OP_NONE: | ||
1795 | /* no writeback */ | ||
1796 | break; | ||
1797 | default: | ||
1798 | break; | ||
1799 | } | ||
1800 | return X86EMUL_CONTINUE; | ||
1801 | } | ||
1802 | |||
1803 | static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask) | ||
1804 | { | ||
1805 | u32 int_shadow = kvm_x86_ops->get_interrupt_shadow(ctxt->vcpu, mask); | ||
1806 | /* | ||
1807 | * an sti; sti; sequence only disable interrupts for the first | ||
1808 | * instruction. So, if the last instruction, be it emulated or | ||
1809 | * not, left the system with the INT_STI flag enabled, it | ||
1810 | * means that the last instruction is an sti. We should not | ||
1811 | * leave the flag on in this case. The same goes for mov ss | ||
1812 | */ | ||
1813 | if (!(int_shadow & mask)) | ||
1814 | ctxt->interruptibility = mask; | ||
1815 | } | ||
1816 | |||
1817 | static inline void | 1909 | static inline void |
1818 | setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | 1910 | setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, |
1819 | struct kvm_segment *cs, struct kvm_segment *ss) | 1911 | struct x86_emulate_ops *ops, struct desc_struct *cs, |
1912 | struct desc_struct *ss) | ||
1820 | { | 1913 | { |
1821 | memset(cs, 0, sizeof(struct kvm_segment)); | 1914 | memset(cs, 0, sizeof(struct desc_struct)); |
1822 | kvm_x86_ops->get_segment(ctxt->vcpu, cs, VCPU_SREG_CS); | 1915 | ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu); |
1823 | memset(ss, 0, sizeof(struct kvm_segment)); | 1916 | memset(ss, 0, sizeof(struct desc_struct)); |
1824 | 1917 | ||
1825 | cs->l = 0; /* will be adjusted later */ | 1918 | cs->l = 0; /* will be adjusted later */ |
1826 | cs->base = 0; /* flat segment */ | 1919 | set_desc_base(cs, 0); /* flat segment */ |
1827 | cs->g = 1; /* 4kb granularity */ | 1920 | cs->g = 1; /* 4kb granularity */ |
1828 | cs->limit = 0xffffffff; /* 4GB limit */ | 1921 | set_desc_limit(cs, 0xfffff); /* 4GB limit */ |
1829 | cs->type = 0x0b; /* Read, Execute, Accessed */ | 1922 | cs->type = 0x0b; /* Read, Execute, Accessed */ |
1830 | cs->s = 1; | 1923 | cs->s = 1; |
1831 | cs->dpl = 0; /* will be adjusted later */ | 1924 | cs->dpl = 0; /* will be adjusted later */ |
1832 | cs->present = 1; | 1925 | cs->p = 1; |
1833 | cs->db = 1; | 1926 | cs->d = 1; |
1834 | 1927 | ||
1835 | ss->unusable = 0; | 1928 | set_desc_base(ss, 0); /* flat segment */ |
1836 | ss->base = 0; /* flat segment */ | 1929 | set_desc_limit(ss, 0xfffff); /* 4GB limit */ |
1837 | ss->limit = 0xffffffff; /* 4GB limit */ | ||
1838 | ss->g = 1; /* 4kb granularity */ | 1930 | ss->g = 1; /* 4kb granularity */ |
1839 | ss->s = 1; | 1931 | ss->s = 1; |
1840 | ss->type = 0x03; /* Read/Write, Accessed */ | 1932 | ss->type = 0x03; /* Read/Write, Accessed */ |
1841 | ss->db = 1; /* 32bit stack segment */ | 1933 | ss->d = 1; /* 32bit stack segment */ |
1842 | ss->dpl = 0; | 1934 | ss->dpl = 0; |
1843 | ss->present = 1; | 1935 | ss->p = 1; |
1844 | } | 1936 | } |
1845 | 1937 | ||
1846 | static int | 1938 | static int |
1847 | emulate_syscall(struct x86_emulate_ctxt *ctxt) | 1939 | emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
1848 | { | 1940 | { |
1849 | struct decode_cache *c = &ctxt->decode; | 1941 | struct decode_cache *c = &ctxt->decode; |
1850 | struct kvm_segment cs, ss; | 1942 | struct desc_struct cs, ss; |
1851 | u64 msr_data; | 1943 | u64 msr_data; |
1944 | u16 cs_sel, ss_sel; | ||
1852 | 1945 | ||
1853 | /* syscall is not available in real mode */ | 1946 | /* syscall is not available in real mode */ |
1854 | if (ctxt->mode == X86EMUL_MODE_REAL || | 1947 | if (ctxt->mode == X86EMUL_MODE_REAL || |
1855 | ctxt->mode == X86EMUL_MODE_VM86) { | 1948 | ctxt->mode == X86EMUL_MODE_VM86) { |
1856 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 1949 | emulate_ud(ctxt); |
1857 | return X86EMUL_PROPAGATE_FAULT; | 1950 | return X86EMUL_PROPAGATE_FAULT; |
1858 | } | 1951 | } |
1859 | 1952 | ||
1860 | setup_syscalls_segments(ctxt, &cs, &ss); | 1953 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
1861 | 1954 | ||
1862 | kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); | 1955 | ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); |
1863 | msr_data >>= 32; | 1956 | msr_data >>= 32; |
1864 | cs.selector = (u16)(msr_data & 0xfffc); | 1957 | cs_sel = (u16)(msr_data & 0xfffc); |
1865 | ss.selector = (u16)(msr_data + 8); | 1958 | ss_sel = (u16)(msr_data + 8); |
1866 | 1959 | ||
1867 | if (is_long_mode(ctxt->vcpu)) { | 1960 | if (is_long_mode(ctxt->vcpu)) { |
1868 | cs.db = 0; | 1961 | cs.d = 0; |
1869 | cs.l = 1; | 1962 | cs.l = 1; |
1870 | } | 1963 | } |
1871 | kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS); | 1964 | ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); |
1872 | kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS); | 1965 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); |
1966 | ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); | ||
1967 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
1873 | 1968 | ||
1874 | c->regs[VCPU_REGS_RCX] = c->eip; | 1969 | c->regs[VCPU_REGS_RCX] = c->eip; |
1875 | if (is_long_mode(ctxt->vcpu)) { | 1970 | if (is_long_mode(ctxt->vcpu)) { |
1876 | #ifdef CONFIG_X86_64 | 1971 | #ifdef CONFIG_X86_64 |
1877 | c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; | 1972 | c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; |
1878 | 1973 | ||
1879 | kvm_x86_ops->get_msr(ctxt->vcpu, | 1974 | ops->get_msr(ctxt->vcpu, |
1880 | ctxt->mode == X86EMUL_MODE_PROT64 ? | 1975 | ctxt->mode == X86EMUL_MODE_PROT64 ? |
1881 | MSR_LSTAR : MSR_CSTAR, &msr_data); | 1976 | MSR_LSTAR : MSR_CSTAR, &msr_data); |
1882 | c->eip = msr_data; | 1977 | c->eip = msr_data; |
1883 | 1978 | ||
1884 | kvm_x86_ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data); | 1979 | ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data); |
1885 | ctxt->eflags &= ~(msr_data | EFLG_RF); | 1980 | ctxt->eflags &= ~(msr_data | EFLG_RF); |
1886 | #endif | 1981 | #endif |
1887 | } else { | 1982 | } else { |
1888 | /* legacy mode */ | 1983 | /* legacy mode */ |
1889 | kvm_x86_ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); | 1984 | ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); |
1890 | c->eip = (u32)msr_data; | 1985 | c->eip = (u32)msr_data; |
1891 | 1986 | ||
1892 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 1987 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); |
@@ -1896,15 +1991,16 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) | |||
1896 | } | 1991 | } |
1897 | 1992 | ||
1898 | static int | 1993 | static int |
1899 | emulate_sysenter(struct x86_emulate_ctxt *ctxt) | 1994 | emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
1900 | { | 1995 | { |
1901 | struct decode_cache *c = &ctxt->decode; | 1996 | struct decode_cache *c = &ctxt->decode; |
1902 | struct kvm_segment cs, ss; | 1997 | struct desc_struct cs, ss; |
1903 | u64 msr_data; | 1998 | u64 msr_data; |
1999 | u16 cs_sel, ss_sel; | ||
1904 | 2000 | ||
1905 | /* inject #GP if in real mode */ | 2001 | /* inject #GP if in real mode */ |
1906 | if (ctxt->mode == X86EMUL_MODE_REAL) { | 2002 | if (ctxt->mode == X86EMUL_MODE_REAL) { |
1907 | kvm_inject_gp(ctxt->vcpu, 0); | 2003 | emulate_gp(ctxt, 0); |
1908 | return X86EMUL_PROPAGATE_FAULT; | 2004 | return X86EMUL_PROPAGATE_FAULT; |
1909 | } | 2005 | } |
1910 | 2006 | ||
@@ -1912,67 +2008,70 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) | |||
1912 | * Therefore, we inject an #UD. | 2008 | * Therefore, we inject an #UD. |
1913 | */ | 2009 | */ |
1914 | if (ctxt->mode == X86EMUL_MODE_PROT64) { | 2010 | if (ctxt->mode == X86EMUL_MODE_PROT64) { |
1915 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 2011 | emulate_ud(ctxt); |
1916 | return X86EMUL_PROPAGATE_FAULT; | 2012 | return X86EMUL_PROPAGATE_FAULT; |
1917 | } | 2013 | } |
1918 | 2014 | ||
1919 | setup_syscalls_segments(ctxt, &cs, &ss); | 2015 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
1920 | 2016 | ||
1921 | kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); | 2017 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); |
1922 | switch (ctxt->mode) { | 2018 | switch (ctxt->mode) { |
1923 | case X86EMUL_MODE_PROT32: | 2019 | case X86EMUL_MODE_PROT32: |
1924 | if ((msr_data & 0xfffc) == 0x0) { | 2020 | if ((msr_data & 0xfffc) == 0x0) { |
1925 | kvm_inject_gp(ctxt->vcpu, 0); | 2021 | emulate_gp(ctxt, 0); |
1926 | return X86EMUL_PROPAGATE_FAULT; | 2022 | return X86EMUL_PROPAGATE_FAULT; |
1927 | } | 2023 | } |
1928 | break; | 2024 | break; |
1929 | case X86EMUL_MODE_PROT64: | 2025 | case X86EMUL_MODE_PROT64: |
1930 | if (msr_data == 0x0) { | 2026 | if (msr_data == 0x0) { |
1931 | kvm_inject_gp(ctxt->vcpu, 0); | 2027 | emulate_gp(ctxt, 0); |
1932 | return X86EMUL_PROPAGATE_FAULT; | 2028 | return X86EMUL_PROPAGATE_FAULT; |
1933 | } | 2029 | } |
1934 | break; | 2030 | break; |
1935 | } | 2031 | } |
1936 | 2032 | ||
1937 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 2033 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); |
1938 | cs.selector = (u16)msr_data; | 2034 | cs_sel = (u16)msr_data; |
1939 | cs.selector &= ~SELECTOR_RPL_MASK; | 2035 | cs_sel &= ~SELECTOR_RPL_MASK; |
1940 | ss.selector = cs.selector + 8; | 2036 | ss_sel = cs_sel + 8; |
1941 | ss.selector &= ~SELECTOR_RPL_MASK; | 2037 | ss_sel &= ~SELECTOR_RPL_MASK; |
1942 | if (ctxt->mode == X86EMUL_MODE_PROT64 | 2038 | if (ctxt->mode == X86EMUL_MODE_PROT64 |
1943 | || is_long_mode(ctxt->vcpu)) { | 2039 | || is_long_mode(ctxt->vcpu)) { |
1944 | cs.db = 0; | 2040 | cs.d = 0; |
1945 | cs.l = 1; | 2041 | cs.l = 1; |
1946 | } | 2042 | } |
1947 | 2043 | ||
1948 | kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS); | 2044 | ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); |
1949 | kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS); | 2045 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); |
2046 | ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2047 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
1950 | 2048 | ||
1951 | kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); | 2049 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); |
1952 | c->eip = msr_data; | 2050 | c->eip = msr_data; |
1953 | 2051 | ||
1954 | kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data); | 2052 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data); |
1955 | c->regs[VCPU_REGS_RSP] = msr_data; | 2053 | c->regs[VCPU_REGS_RSP] = msr_data; |
1956 | 2054 | ||
1957 | return X86EMUL_CONTINUE; | 2055 | return X86EMUL_CONTINUE; |
1958 | } | 2056 | } |
1959 | 2057 | ||
1960 | static int | 2058 | static int |
1961 | emulate_sysexit(struct x86_emulate_ctxt *ctxt) | 2059 | emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
1962 | { | 2060 | { |
1963 | struct decode_cache *c = &ctxt->decode; | 2061 | struct decode_cache *c = &ctxt->decode; |
1964 | struct kvm_segment cs, ss; | 2062 | struct desc_struct cs, ss; |
1965 | u64 msr_data; | 2063 | u64 msr_data; |
1966 | int usermode; | 2064 | int usermode; |
2065 | u16 cs_sel, ss_sel; | ||
1967 | 2066 | ||
1968 | /* inject #GP if in real mode or Virtual 8086 mode */ | 2067 | /* inject #GP if in real mode or Virtual 8086 mode */ |
1969 | if (ctxt->mode == X86EMUL_MODE_REAL || | 2068 | if (ctxt->mode == X86EMUL_MODE_REAL || |
1970 | ctxt->mode == X86EMUL_MODE_VM86) { | 2069 | ctxt->mode == X86EMUL_MODE_VM86) { |
1971 | kvm_inject_gp(ctxt->vcpu, 0); | 2070 | emulate_gp(ctxt, 0); |
1972 | return X86EMUL_PROPAGATE_FAULT; | 2071 | return X86EMUL_PROPAGATE_FAULT; |
1973 | } | 2072 | } |
1974 | 2073 | ||
1975 | setup_syscalls_segments(ctxt, &cs, &ss); | 2074 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
1976 | 2075 | ||
1977 | if ((c->rex_prefix & 0x8) != 0x0) | 2076 | if ((c->rex_prefix & 0x8) != 0x0) |
1978 | usermode = X86EMUL_MODE_PROT64; | 2077 | usermode = X86EMUL_MODE_PROT64; |
@@ -1981,35 +2080,37 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) | |||
1981 | 2080 | ||
1982 | cs.dpl = 3; | 2081 | cs.dpl = 3; |
1983 | ss.dpl = 3; | 2082 | ss.dpl = 3; |
1984 | kvm_x86_ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); | 2083 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); |
1985 | switch (usermode) { | 2084 | switch (usermode) { |
1986 | case X86EMUL_MODE_PROT32: | 2085 | case X86EMUL_MODE_PROT32: |
1987 | cs.selector = (u16)(msr_data + 16); | 2086 | cs_sel = (u16)(msr_data + 16); |
1988 | if ((msr_data & 0xfffc) == 0x0) { | 2087 | if ((msr_data & 0xfffc) == 0x0) { |
1989 | kvm_inject_gp(ctxt->vcpu, 0); | 2088 | emulate_gp(ctxt, 0); |
1990 | return X86EMUL_PROPAGATE_FAULT; | 2089 | return X86EMUL_PROPAGATE_FAULT; |
1991 | } | 2090 | } |
1992 | ss.selector = (u16)(msr_data + 24); | 2091 | ss_sel = (u16)(msr_data + 24); |
1993 | break; | 2092 | break; |
1994 | case X86EMUL_MODE_PROT64: | 2093 | case X86EMUL_MODE_PROT64: |
1995 | cs.selector = (u16)(msr_data + 32); | 2094 | cs_sel = (u16)(msr_data + 32); |
1996 | if (msr_data == 0x0) { | 2095 | if (msr_data == 0x0) { |
1997 | kvm_inject_gp(ctxt->vcpu, 0); | 2096 | emulate_gp(ctxt, 0); |
1998 | return X86EMUL_PROPAGATE_FAULT; | 2097 | return X86EMUL_PROPAGATE_FAULT; |
1999 | } | 2098 | } |
2000 | ss.selector = cs.selector + 8; | 2099 | ss_sel = cs_sel + 8; |
2001 | cs.db = 0; | 2100 | cs.d = 0; |
2002 | cs.l = 1; | 2101 | cs.l = 1; |
2003 | break; | 2102 | break; |
2004 | } | 2103 | } |
2005 | cs.selector |= SELECTOR_RPL_MASK; | 2104 | cs_sel |= SELECTOR_RPL_MASK; |
2006 | ss.selector |= SELECTOR_RPL_MASK; | 2105 | ss_sel |= SELECTOR_RPL_MASK; |
2007 | 2106 | ||
2008 | kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS); | 2107 | ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); |
2009 | kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS); | 2108 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); |
2109 | ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2110 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
2010 | 2111 | ||
2011 | c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX]; | 2112 | c->eip = c->regs[VCPU_REGS_RDX]; |
2012 | c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX]; | 2113 | c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; |
2013 | 2114 | ||
2014 | return X86EMUL_CONTINUE; | 2115 | return X86EMUL_CONTINUE; |
2015 | } | 2116 | } |
@@ -2030,25 +2131,25 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | |||
2030 | struct x86_emulate_ops *ops, | 2131 | struct x86_emulate_ops *ops, |
2031 | u16 port, u16 len) | 2132 | u16 port, u16 len) |
2032 | { | 2133 | { |
2033 | struct kvm_segment tr_seg; | 2134 | struct desc_struct tr_seg; |
2034 | int r; | 2135 | int r; |
2035 | u16 io_bitmap_ptr; | 2136 | u16 io_bitmap_ptr; |
2036 | u8 perm, bit_idx = port & 0x7; | 2137 | u8 perm, bit_idx = port & 0x7; |
2037 | unsigned mask = (1 << len) - 1; | 2138 | unsigned mask = (1 << len) - 1; |
2038 | 2139 | ||
2039 | kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR); | 2140 | ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu); |
2040 | if (tr_seg.unusable) | 2141 | if (!tr_seg.p) |
2041 | return false; | 2142 | return false; |
2042 | if (tr_seg.limit < 103) | 2143 | if (desc_limit_scaled(&tr_seg) < 103) |
2043 | return false; | 2144 | return false; |
2044 | r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, | 2145 | r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2, |
2045 | NULL); | 2146 | ctxt->vcpu, NULL); |
2046 | if (r != X86EMUL_CONTINUE) | 2147 | if (r != X86EMUL_CONTINUE) |
2047 | return false; | 2148 | return false; |
2048 | if (io_bitmap_ptr + port/8 > tr_seg.limit) | 2149 | if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) |
2049 | return false; | 2150 | return false; |
2050 | r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1, | 2151 | r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8, |
2051 | ctxt->vcpu, NULL); | 2152 | &perm, 1, ctxt->vcpu, NULL); |
2052 | if (r != X86EMUL_CONTINUE) | 2153 | if (r != X86EMUL_CONTINUE) |
2053 | return false; | 2154 | return false; |
2054 | if ((perm >> bit_idx) & mask) | 2155 | if ((perm >> bit_idx) & mask) |
@@ -2066,17 +2167,6 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, | |||
2066 | return true; | 2167 | return true; |
2067 | } | 2168 | } |
2068 | 2169 | ||
2069 | static u32 get_cached_descriptor_base(struct x86_emulate_ctxt *ctxt, | ||
2070 | struct x86_emulate_ops *ops, | ||
2071 | int seg) | ||
2072 | { | ||
2073 | struct desc_struct desc; | ||
2074 | if (ops->get_cached_descriptor(&desc, seg, ctxt->vcpu)) | ||
2075 | return get_desc_base(&desc); | ||
2076 | else | ||
2077 | return ~0; | ||
2078 | } | ||
2079 | |||
2080 | static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, | 2170 | static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, |
2081 | struct x86_emulate_ops *ops, | 2171 | struct x86_emulate_ops *ops, |
2082 | struct tss_segment_16 *tss) | 2172 | struct tss_segment_16 *tss) |
@@ -2165,7 +2255,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
2165 | &err); | 2255 | &err); |
2166 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2256 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2167 | /* FIXME: need to provide precise fault address */ | 2257 | /* FIXME: need to provide precise fault address */ |
2168 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | 2258 | emulate_pf(ctxt, old_tss_base, err); |
2169 | return ret; | 2259 | return ret; |
2170 | } | 2260 | } |
2171 | 2261 | ||
@@ -2175,7 +2265,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
2175 | &err); | 2265 | &err); |
2176 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2266 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2177 | /* FIXME: need to provide precise fault address */ | 2267 | /* FIXME: need to provide precise fault address */ |
2178 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | 2268 | emulate_pf(ctxt, old_tss_base, err); |
2179 | return ret; | 2269 | return ret; |
2180 | } | 2270 | } |
2181 | 2271 | ||
@@ -2183,7 +2273,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
2183 | &err); | 2273 | &err); |
2184 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2274 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2185 | /* FIXME: need to provide precise fault address */ | 2275 | /* FIXME: need to provide precise fault address */ |
2186 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | 2276 | emulate_pf(ctxt, new_tss_base, err); |
2187 | return ret; | 2277 | return ret; |
2188 | } | 2278 | } |
2189 | 2279 | ||
@@ -2196,7 +2286,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
2196 | ctxt->vcpu, &err); | 2286 | ctxt->vcpu, &err); |
2197 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2287 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2198 | /* FIXME: need to provide precise fault address */ | 2288 | /* FIXME: need to provide precise fault address */ |
2199 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | 2289 | emulate_pf(ctxt, new_tss_base, err); |
2200 | return ret; | 2290 | return ret; |
2201 | } | 2291 | } |
2202 | } | 2292 | } |
@@ -2238,7 +2328,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2238 | struct decode_cache *c = &ctxt->decode; | 2328 | struct decode_cache *c = &ctxt->decode; |
2239 | int ret; | 2329 | int ret; |
2240 | 2330 | ||
2241 | ops->set_cr(3, tss->cr3, ctxt->vcpu); | 2331 | if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) { |
2332 | emulate_gp(ctxt, 0); | ||
2333 | return X86EMUL_PROPAGATE_FAULT; | ||
2334 | } | ||
2242 | c->eip = tss->eip; | 2335 | c->eip = tss->eip; |
2243 | ctxt->eflags = tss->eflags | 2; | 2336 | ctxt->eflags = tss->eflags | 2; |
2244 | c->regs[VCPU_REGS_RAX] = tss->eax; | 2337 | c->regs[VCPU_REGS_RAX] = tss->eax; |
@@ -2304,7 +2397,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2304 | &err); | 2397 | &err); |
2305 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2398 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2306 | /* FIXME: need to provide precise fault address */ | 2399 | /* FIXME: need to provide precise fault address */ |
2307 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | 2400 | emulate_pf(ctxt, old_tss_base, err); |
2308 | return ret; | 2401 | return ret; |
2309 | } | 2402 | } |
2310 | 2403 | ||
@@ -2314,7 +2407,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2314 | &err); | 2407 | &err); |
2315 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2408 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2316 | /* FIXME: need to provide precise fault address */ | 2409 | /* FIXME: need to provide precise fault address */ |
2317 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | 2410 | emulate_pf(ctxt, old_tss_base, err); |
2318 | return ret; | 2411 | return ret; |
2319 | } | 2412 | } |
2320 | 2413 | ||
@@ -2322,7 +2415,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2322 | &err); | 2415 | &err); |
2323 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2416 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2324 | /* FIXME: need to provide precise fault address */ | 2417 | /* FIXME: need to provide precise fault address */ |
2325 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | 2418 | emulate_pf(ctxt, new_tss_base, err); |
2326 | return ret; | 2419 | return ret; |
2327 | } | 2420 | } |
2328 | 2421 | ||
@@ -2335,7 +2428,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2335 | ctxt->vcpu, &err); | 2428 | ctxt->vcpu, &err); |
2336 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2429 | if (ret == X86EMUL_PROPAGATE_FAULT) { |
2337 | /* FIXME: need to provide precise fault address */ | 2430 | /* FIXME: need to provide precise fault address */ |
2338 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | 2431 | emulate_pf(ctxt, new_tss_base, err); |
2339 | return ret; | 2432 | return ret; |
2340 | } | 2433 | } |
2341 | } | 2434 | } |
@@ -2352,7 +2445,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2352 | int ret; | 2445 | int ret; |
2353 | u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); | 2446 | u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); |
2354 | ulong old_tss_base = | 2447 | ulong old_tss_base = |
2355 | get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR); | 2448 | ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu); |
2356 | u32 desc_limit; | 2449 | u32 desc_limit; |
2357 | 2450 | ||
2358 | /* FIXME: old_tss_base == ~0 ? */ | 2451 | /* FIXME: old_tss_base == ~0 ? */ |
@@ -2369,7 +2462,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2369 | if (reason != TASK_SWITCH_IRET) { | 2462 | if (reason != TASK_SWITCH_IRET) { |
2370 | if ((tss_selector & 3) > next_tss_desc.dpl || | 2463 | if ((tss_selector & 3) > next_tss_desc.dpl || |
2371 | ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { | 2464 | ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { |
2372 | kvm_inject_gp(ctxt->vcpu, 0); | 2465 | emulate_gp(ctxt, 0); |
2373 | return X86EMUL_PROPAGATE_FAULT; | 2466 | return X86EMUL_PROPAGATE_FAULT; |
2374 | } | 2467 | } |
2375 | } | 2468 | } |
@@ -2378,8 +2471,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2378 | if (!next_tss_desc.p || | 2471 | if (!next_tss_desc.p || |
2379 | ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || | 2472 | ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || |
2380 | desc_limit < 0x2b)) { | 2473 | desc_limit < 0x2b)) { |
2381 | kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR, | 2474 | emulate_ts(ctxt, tss_selector & 0xfffc); |
2382 | tss_selector & 0xfffc); | ||
2383 | return X86EMUL_PROPAGATE_FAULT; | 2475 | return X86EMUL_PROPAGATE_FAULT; |
2384 | } | 2476 | } |
2385 | 2477 | ||
@@ -2425,7 +2517,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2425 | c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; | 2517 | c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; |
2426 | c->lock_prefix = 0; | 2518 | c->lock_prefix = 0; |
2427 | c->src.val = (unsigned long) error_code; | 2519 | c->src.val = (unsigned long) error_code; |
2428 | emulate_push(ctxt); | 2520 | emulate_push(ctxt, ops); |
2429 | } | 2521 | } |
2430 | 2522 | ||
2431 | return ret; | 2523 | return ret; |
@@ -2439,18 +2531,16 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2439 | struct decode_cache *c = &ctxt->decode; | 2531 | struct decode_cache *c = &ctxt->decode; |
2440 | int rc; | 2532 | int rc; |
2441 | 2533 | ||
2442 | memset(c, 0, sizeof(struct decode_cache)); | ||
2443 | c->eip = ctxt->eip; | 2534 | c->eip = ctxt->eip; |
2444 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | ||
2445 | c->dst.type = OP_NONE; | 2535 | c->dst.type = OP_NONE; |
2446 | 2536 | ||
2447 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, | 2537 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, |
2448 | has_error_code, error_code); | 2538 | has_error_code, error_code); |
2449 | 2539 | ||
2450 | if (rc == X86EMUL_CONTINUE) { | 2540 | if (rc == X86EMUL_CONTINUE) { |
2451 | memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); | ||
2452 | kvm_rip_write(ctxt->vcpu, c->eip); | ||
2453 | rc = writeback(ctxt, ops); | 2541 | rc = writeback(ctxt, ops); |
2542 | if (rc == X86EMUL_CONTINUE) | ||
2543 | ctxt->eip = c->eip; | ||
2454 | } | 2544 | } |
2455 | 2545 | ||
2456 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 2546 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; |
@@ -2474,29 +2564,22 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2474 | int rc = X86EMUL_CONTINUE; | 2564 | int rc = X86EMUL_CONTINUE; |
2475 | int saved_dst_type = c->dst.type; | 2565 | int saved_dst_type = c->dst.type; |
2476 | 2566 | ||
2477 | ctxt->interruptibility = 0; | 2567 | ctxt->decode.mem_read.pos = 0; |
2478 | |||
2479 | /* Shadow copy of register state. Committed on successful emulation. | ||
2480 | * NOTE: we can copy them from vcpu as x86_decode_insn() doesn't | ||
2481 | * modify them. | ||
2482 | */ | ||
2483 | |||
2484 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | ||
2485 | 2568 | ||
2486 | if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { | 2569 | if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { |
2487 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 2570 | emulate_ud(ctxt); |
2488 | goto done; | 2571 | goto done; |
2489 | } | 2572 | } |
2490 | 2573 | ||
2491 | /* LOCK prefix is allowed only with some instructions */ | 2574 | /* LOCK prefix is allowed only with some instructions */ |
2492 | if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { | 2575 | if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { |
2493 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 2576 | emulate_ud(ctxt); |
2494 | goto done; | 2577 | goto done; |
2495 | } | 2578 | } |
2496 | 2579 | ||
2497 | /* Privileged instruction can be executed only in CPL=0 */ | 2580 | /* Privileged instruction can be executed only in CPL=0 */ |
2498 | if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { | 2581 | if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { |
2499 | kvm_inject_gp(ctxt->vcpu, 0); | 2582 | emulate_gp(ctxt, 0); |
2500 | goto done; | 2583 | goto done; |
2501 | } | 2584 | } |
2502 | 2585 | ||
@@ -2506,7 +2589,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2506 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { | 2589 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { |
2507 | string_done: | 2590 | string_done: |
2508 | ctxt->restart = false; | 2591 | ctxt->restart = false; |
2509 | kvm_rip_write(ctxt->vcpu, c->eip); | 2592 | ctxt->eip = c->eip; |
2510 | goto done; | 2593 | goto done; |
2511 | } | 2594 | } |
2512 | /* The second termination condition only applies for REPE | 2595 | /* The second termination condition only applies for REPE |
@@ -2529,20 +2612,16 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2529 | } | 2612 | } |
2530 | 2613 | ||
2531 | if (c->src.type == OP_MEM) { | 2614 | if (c->src.type == OP_MEM) { |
2532 | rc = ops->read_emulated((unsigned long)c->src.ptr, | 2615 | rc = read_emulated(ctxt, ops, (unsigned long)c->src.ptr, |
2533 | &c->src.val, | 2616 | c->src.valptr, c->src.bytes); |
2534 | c->src.bytes, | ||
2535 | ctxt->vcpu); | ||
2536 | if (rc != X86EMUL_CONTINUE) | 2617 | if (rc != X86EMUL_CONTINUE) |
2537 | goto done; | 2618 | goto done; |
2538 | c->src.orig_val = c->src.val; | 2619 | c->src.orig_val = c->src.val; |
2539 | } | 2620 | } |
2540 | 2621 | ||
2541 | if (c->src2.type == OP_MEM) { | 2622 | if (c->src2.type == OP_MEM) { |
2542 | rc = ops->read_emulated((unsigned long)c->src2.ptr, | 2623 | rc = read_emulated(ctxt, ops, (unsigned long)c->src2.ptr, |
2543 | &c->src2.val, | 2624 | &c->src2.val, c->src2.bytes); |
2544 | c->src2.bytes, | ||
2545 | ctxt->vcpu); | ||
2546 | if (rc != X86EMUL_CONTINUE) | 2625 | if (rc != X86EMUL_CONTINUE) |
2547 | goto done; | 2626 | goto done; |
2548 | } | 2627 | } |
@@ -2553,8 +2632,8 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2553 | 2632 | ||
2554 | if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { | 2633 | if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { |
2555 | /* optimisation - avoid slow emulated read if Mov */ | 2634 | /* optimisation - avoid slow emulated read if Mov */ |
2556 | rc = ops->read_emulated((unsigned long)c->dst.ptr, &c->dst.val, | 2635 | rc = read_emulated(ctxt, ops, (unsigned long)c->dst.ptr, |
2557 | c->dst.bytes, ctxt->vcpu); | 2636 | &c->dst.val, c->dst.bytes); |
2558 | if (rc != X86EMUL_CONTINUE) | 2637 | if (rc != X86EMUL_CONTINUE) |
2559 | goto done; | 2638 | goto done; |
2560 | } | 2639 | } |
@@ -2571,7 +2650,7 @@ special_insn: | |||
2571 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); | 2650 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); |
2572 | break; | 2651 | break; |
2573 | case 0x06: /* push es */ | 2652 | case 0x06: /* push es */ |
2574 | emulate_push_sreg(ctxt, VCPU_SREG_ES); | 2653 | emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); |
2575 | break; | 2654 | break; |
2576 | case 0x07: /* pop es */ | 2655 | case 0x07: /* pop es */ |
2577 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); | 2656 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); |
@@ -2583,14 +2662,14 @@ special_insn: | |||
2583 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); | 2662 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); |
2584 | break; | 2663 | break; |
2585 | case 0x0e: /* push cs */ | 2664 | case 0x0e: /* push cs */ |
2586 | emulate_push_sreg(ctxt, VCPU_SREG_CS); | 2665 | emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); |
2587 | break; | 2666 | break; |
2588 | case 0x10 ... 0x15: | 2667 | case 0x10 ... 0x15: |
2589 | adc: /* adc */ | 2668 | adc: /* adc */ |
2590 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); | 2669 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); |
2591 | break; | 2670 | break; |
2592 | case 0x16: /* push ss */ | 2671 | case 0x16: /* push ss */ |
2593 | emulate_push_sreg(ctxt, VCPU_SREG_SS); | 2672 | emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); |
2594 | break; | 2673 | break; |
2595 | case 0x17: /* pop ss */ | 2674 | case 0x17: /* pop ss */ |
2596 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); | 2675 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); |
@@ -2602,7 +2681,7 @@ special_insn: | |||
2602 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); | 2681 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); |
2603 | break; | 2682 | break; |
2604 | case 0x1e: /* push ds */ | 2683 | case 0x1e: /* push ds */ |
2605 | emulate_push_sreg(ctxt, VCPU_SREG_DS); | 2684 | emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); |
2606 | break; | 2685 | break; |
2607 | case 0x1f: /* pop ds */ | 2686 | case 0x1f: /* pop ds */ |
2608 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); | 2687 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); |
@@ -2632,7 +2711,7 @@ special_insn: | |||
2632 | emulate_1op("dec", c->dst, ctxt->eflags); | 2711 | emulate_1op("dec", c->dst, ctxt->eflags); |
2633 | break; | 2712 | break; |
2634 | case 0x50 ... 0x57: /* push reg */ | 2713 | case 0x50 ... 0x57: /* push reg */ |
2635 | emulate_push(ctxt); | 2714 | emulate_push(ctxt, ops); |
2636 | break; | 2715 | break; |
2637 | case 0x58 ... 0x5f: /* pop reg */ | 2716 | case 0x58 ... 0x5f: /* pop reg */ |
2638 | pop_instruction: | 2717 | pop_instruction: |
@@ -2641,7 +2720,9 @@ special_insn: | |||
2641 | goto done; | 2720 | goto done; |
2642 | break; | 2721 | break; |
2643 | case 0x60: /* pusha */ | 2722 | case 0x60: /* pusha */ |
2644 | emulate_pusha(ctxt); | 2723 | rc = emulate_pusha(ctxt, ops); |
2724 | if (rc != X86EMUL_CONTINUE) | ||
2725 | goto done; | ||
2645 | break; | 2726 | break; |
2646 | case 0x61: /* popa */ | 2727 | case 0x61: /* popa */ |
2647 | rc = emulate_popa(ctxt, ops); | 2728 | rc = emulate_popa(ctxt, ops); |
@@ -2655,14 +2736,14 @@ special_insn: | |||
2655 | break; | 2736 | break; |
2656 | case 0x68: /* push imm */ | 2737 | case 0x68: /* push imm */ |
2657 | case 0x6a: /* push imm8 */ | 2738 | case 0x6a: /* push imm8 */ |
2658 | emulate_push(ctxt); | 2739 | emulate_push(ctxt, ops); |
2659 | break; | 2740 | break; |
2660 | case 0x6c: /* insb */ | 2741 | case 0x6c: /* insb */ |
2661 | case 0x6d: /* insw/insd */ | 2742 | case 0x6d: /* insw/insd */ |
2662 | c->dst.bytes = min(c->dst.bytes, 4u); | 2743 | c->dst.bytes = min(c->dst.bytes, 4u); |
2663 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], | 2744 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], |
2664 | c->dst.bytes)) { | 2745 | c->dst.bytes)) { |
2665 | kvm_inject_gp(ctxt->vcpu, 0); | 2746 | emulate_gp(ctxt, 0); |
2666 | goto done; | 2747 | goto done; |
2667 | } | 2748 | } |
2668 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, | 2749 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, |
@@ -2674,7 +2755,7 @@ special_insn: | |||
2674 | c->src.bytes = min(c->src.bytes, 4u); | 2755 | c->src.bytes = min(c->src.bytes, 4u); |
2675 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], | 2756 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], |
2676 | c->src.bytes)) { | 2757 | c->src.bytes)) { |
2677 | kvm_inject_gp(ctxt->vcpu, 0); | 2758 | emulate_gp(ctxt, 0); |
2678 | goto done; | 2759 | goto done; |
2679 | } | 2760 | } |
2680 | ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX], | 2761 | ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX], |
@@ -2707,6 +2788,7 @@ special_insn: | |||
2707 | } | 2788 | } |
2708 | break; | 2789 | break; |
2709 | case 0x84 ... 0x85: | 2790 | case 0x84 ... 0x85: |
2791 | test: | ||
2710 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); | 2792 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); |
2711 | break; | 2793 | break; |
2712 | case 0x86 ... 0x87: /* xchg */ | 2794 | case 0x86 ... 0x87: /* xchg */ |
@@ -2735,18 +2817,13 @@ special_insn: | |||
2735 | break; | 2817 | break; |
2736 | case 0x88 ... 0x8b: /* mov */ | 2818 | case 0x88 ... 0x8b: /* mov */ |
2737 | goto mov; | 2819 | goto mov; |
2738 | case 0x8c: { /* mov r/m, sreg */ | 2820 | case 0x8c: /* mov r/m, sreg */ |
2739 | struct kvm_segment segreg; | 2821 | if (c->modrm_reg > VCPU_SREG_GS) { |
2740 | 2822 | emulate_ud(ctxt); | |
2741 | if (c->modrm_reg <= VCPU_SREG_GS) | ||
2742 | kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg); | ||
2743 | else { | ||
2744 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | ||
2745 | goto done; | 2823 | goto done; |
2746 | } | 2824 | } |
2747 | c->dst.val = segreg.selector; | 2825 | c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); |
2748 | break; | 2826 | break; |
2749 | } | ||
2750 | case 0x8d: /* lea r16/r32, m */ | 2827 | case 0x8d: /* lea r16/r32, m */ |
2751 | c->dst.val = c->modrm_ea; | 2828 | c->dst.val = c->modrm_ea; |
2752 | break; | 2829 | break; |
@@ -2757,12 +2834,12 @@ special_insn: | |||
2757 | 2834 | ||
2758 | if (c->modrm_reg == VCPU_SREG_CS || | 2835 | if (c->modrm_reg == VCPU_SREG_CS || |
2759 | c->modrm_reg > VCPU_SREG_GS) { | 2836 | c->modrm_reg > VCPU_SREG_GS) { |
2760 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 2837 | emulate_ud(ctxt); |
2761 | goto done; | 2838 | goto done; |
2762 | } | 2839 | } |
2763 | 2840 | ||
2764 | if (c->modrm_reg == VCPU_SREG_SS) | 2841 | if (c->modrm_reg == VCPU_SREG_SS) |
2765 | toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS); | 2842 | ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; |
2766 | 2843 | ||
2767 | rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg); | 2844 | rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg); |
2768 | 2845 | ||
@@ -2775,19 +2852,19 @@ special_insn: | |||
2775 | goto done; | 2852 | goto done; |
2776 | break; | 2853 | break; |
2777 | case 0x90: /* nop / xchg r8,rax */ | 2854 | case 0x90: /* nop / xchg r8,rax */ |
2778 | if (!(c->rex_prefix & 1)) { /* nop */ | 2855 | if (c->dst.ptr == (unsigned long *)&c->regs[VCPU_REGS_RAX]) { |
2779 | c->dst.type = OP_NONE; | 2856 | c->dst.type = OP_NONE; /* nop */ |
2780 | break; | 2857 | break; |
2781 | } | 2858 | } |
2782 | case 0x91 ... 0x97: /* xchg reg,rax */ | 2859 | case 0x91 ... 0x97: /* xchg reg,rax */ |
2783 | c->src.type = c->dst.type = OP_REG; | 2860 | c->src.type = OP_REG; |
2784 | c->src.bytes = c->dst.bytes = c->op_bytes; | 2861 | c->src.bytes = c->op_bytes; |
2785 | c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX]; | 2862 | c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX]; |
2786 | c->src.val = *(c->src.ptr); | 2863 | c->src.val = *(c->src.ptr); |
2787 | goto xchg; | 2864 | goto xchg; |
2788 | case 0x9c: /* pushf */ | 2865 | case 0x9c: /* pushf */ |
2789 | c->src.val = (unsigned long) ctxt->eflags; | 2866 | c->src.val = (unsigned long) ctxt->eflags; |
2790 | emulate_push(ctxt); | 2867 | emulate_push(ctxt, ops); |
2791 | break; | 2868 | break; |
2792 | case 0x9d: /* popf */ | 2869 | case 0x9d: /* popf */ |
2793 | c->dst.type = OP_REG; | 2870 | c->dst.type = OP_REG; |
@@ -2797,19 +2874,15 @@ special_insn: | |||
2797 | if (rc != X86EMUL_CONTINUE) | 2874 | if (rc != X86EMUL_CONTINUE) |
2798 | goto done; | 2875 | goto done; |
2799 | break; | 2876 | break; |
2800 | case 0xa0 ... 0xa1: /* mov */ | 2877 | case 0xa0 ... 0xa3: /* mov */ |
2801 | c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; | ||
2802 | c->dst.val = c->src.val; | ||
2803 | break; | ||
2804 | case 0xa2 ... 0xa3: /* mov */ | ||
2805 | c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX]; | ||
2806 | break; | ||
2807 | case 0xa4 ... 0xa5: /* movs */ | 2878 | case 0xa4 ... 0xa5: /* movs */ |
2808 | goto mov; | 2879 | goto mov; |
2809 | case 0xa6 ... 0xa7: /* cmps */ | 2880 | case 0xa6 ... 0xa7: /* cmps */ |
2810 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2881 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2811 | DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); | 2882 | DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); |
2812 | goto cmp; | 2883 | goto cmp; |
2884 | case 0xa8 ... 0xa9: /* test ax, imm */ | ||
2885 | goto test; | ||
2813 | case 0xaa ... 0xab: /* stos */ | 2886 | case 0xaa ... 0xab: /* stos */ |
2814 | c->dst.val = c->regs[VCPU_REGS_RAX]; | 2887 | c->dst.val = c->regs[VCPU_REGS_RAX]; |
2815 | break; | 2888 | break; |
@@ -2855,19 +2928,23 @@ special_insn: | |||
2855 | long int rel = c->src.val; | 2928 | long int rel = c->src.val; |
2856 | c->src.val = (unsigned long) c->eip; | 2929 | c->src.val = (unsigned long) c->eip; |
2857 | jmp_rel(c, rel); | 2930 | jmp_rel(c, rel); |
2858 | emulate_push(ctxt); | 2931 | emulate_push(ctxt, ops); |
2859 | break; | 2932 | break; |
2860 | } | 2933 | } |
2861 | case 0xe9: /* jmp rel */ | 2934 | case 0xe9: /* jmp rel */ |
2862 | goto jmp; | 2935 | goto jmp; |
2863 | case 0xea: /* jmp far */ | 2936 | case 0xea: { /* jmp far */ |
2937 | unsigned short sel; | ||
2864 | jump_far: | 2938 | jump_far: |
2865 | if (load_segment_descriptor(ctxt, ops, c->src2.val, | 2939 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); |
2866 | VCPU_SREG_CS)) | 2940 | |
2941 | if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS)) | ||
2867 | goto done; | 2942 | goto done; |
2868 | 2943 | ||
2869 | c->eip = c->src.val; | 2944 | c->eip = 0; |
2945 | memcpy(&c->eip, c->src.valptr, c->op_bytes); | ||
2870 | break; | 2946 | break; |
2947 | } | ||
2871 | case 0xeb: | 2948 | case 0xeb: |
2872 | jmp: /* jmp rel short */ | 2949 | jmp: /* jmp rel short */ |
2873 | jmp_rel(c, c->src.val); | 2950 | jmp_rel(c, c->src.val); |
@@ -2879,20 +2956,20 @@ special_insn: | |||
2879 | do_io_in: | 2956 | do_io_in: |
2880 | c->dst.bytes = min(c->dst.bytes, 4u); | 2957 | c->dst.bytes = min(c->dst.bytes, 4u); |
2881 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { | 2958 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { |
2882 | kvm_inject_gp(ctxt->vcpu, 0); | 2959 | emulate_gp(ctxt, 0); |
2883 | goto done; | 2960 | goto done; |
2884 | } | 2961 | } |
2885 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, | 2962 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, |
2886 | &c->dst.val)) | 2963 | &c->dst.val)) |
2887 | goto done; /* IO is needed */ | 2964 | goto done; /* IO is needed */ |
2888 | break; | 2965 | break; |
2889 | case 0xee: /* out al,dx */ | 2966 | case 0xee: /* out dx,al */ |
2890 | case 0xef: /* out (e/r)ax,dx */ | 2967 | case 0xef: /* out dx,(e/r)ax */ |
2891 | c->src.val = c->regs[VCPU_REGS_RDX]; | 2968 | c->src.val = c->regs[VCPU_REGS_RDX]; |
2892 | do_io_out: | 2969 | do_io_out: |
2893 | c->dst.bytes = min(c->dst.bytes, 4u); | 2970 | c->dst.bytes = min(c->dst.bytes, 4u); |
2894 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { | 2971 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { |
2895 | kvm_inject_gp(ctxt->vcpu, 0); | 2972 | emulate_gp(ctxt, 0); |
2896 | goto done; | 2973 | goto done; |
2897 | } | 2974 | } |
2898 | ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1, | 2975 | ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1, |
@@ -2916,18 +2993,20 @@ special_insn: | |||
2916 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2993 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2917 | break; | 2994 | break; |
2918 | case 0xfa: /* cli */ | 2995 | case 0xfa: /* cli */ |
2919 | if (emulator_bad_iopl(ctxt, ops)) | 2996 | if (emulator_bad_iopl(ctxt, ops)) { |
2920 | kvm_inject_gp(ctxt->vcpu, 0); | 2997 | emulate_gp(ctxt, 0); |
2921 | else { | 2998 | goto done; |
2999 | } else { | ||
2922 | ctxt->eflags &= ~X86_EFLAGS_IF; | 3000 | ctxt->eflags &= ~X86_EFLAGS_IF; |
2923 | c->dst.type = OP_NONE; /* Disable writeback. */ | 3001 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2924 | } | 3002 | } |
2925 | break; | 3003 | break; |
2926 | case 0xfb: /* sti */ | 3004 | case 0xfb: /* sti */ |
2927 | if (emulator_bad_iopl(ctxt, ops)) | 3005 | if (emulator_bad_iopl(ctxt, ops)) { |
2928 | kvm_inject_gp(ctxt->vcpu, 0); | 3006 | emulate_gp(ctxt, 0); |
2929 | else { | 3007 | goto done; |
2930 | toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI); | 3008 | } else { |
3009 | ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; | ||
2931 | ctxt->eflags |= X86_EFLAGS_IF; | 3010 | ctxt->eflags |= X86_EFLAGS_IF; |
2932 | c->dst.type = OP_NONE; /* Disable writeback. */ | 3011 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2933 | } | 3012 | } |
@@ -2964,11 +3043,12 @@ writeback: | |||
2964 | c->dst.type = saved_dst_type; | 3043 | c->dst.type = saved_dst_type; |
2965 | 3044 | ||
2966 | if ((c->d & SrcMask) == SrcSI) | 3045 | if ((c->d & SrcMask) == SrcSI) |
2967 | string_addr_inc(ctxt, seg_override_base(ctxt, c), VCPU_REGS_RSI, | 3046 | string_addr_inc(ctxt, seg_override_base(ctxt, ops, c), |
2968 | &c->src); | 3047 | VCPU_REGS_RSI, &c->src); |
2969 | 3048 | ||
2970 | if ((c->d & DstMask) == DstDI) | 3049 | if ((c->d & DstMask) == DstDI) |
2971 | string_addr_inc(ctxt, es_base(ctxt), VCPU_REGS_RDI, &c->dst); | 3050 | string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI, |
3051 | &c->dst); | ||
2972 | 3052 | ||
2973 | if (c->rep_prefix && (c->d & String)) { | 3053 | if (c->rep_prefix && (c->d & String)) { |
2974 | struct read_cache *rc = &ctxt->decode.io_read; | 3054 | struct read_cache *rc = &ctxt->decode.io_read; |
@@ -2981,11 +3061,12 @@ writeback: | |||
2981 | (rc->end != 0 && rc->end == rc->pos)) | 3061 | (rc->end != 0 && rc->end == rc->pos)) |
2982 | ctxt->restart = false; | 3062 | ctxt->restart = false; |
2983 | } | 3063 | } |
2984 | 3064 | /* | |
2985 | /* Commit shadow register state. */ | 3065 | * reset read cache here in case string instruction is restared |
2986 | memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); | 3066 | * without decoding |
2987 | kvm_rip_write(ctxt->vcpu, c->eip); | 3067 | */ |
2988 | ops->set_rflags(ctxt->vcpu, ctxt->eflags); | 3068 | ctxt->decode.mem_read.end = 0; |
3069 | ctxt->eip = c->eip; | ||
2989 | 3070 | ||
2990 | done: | 3071 | done: |
2991 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 3072 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; |
@@ -3051,7 +3132,7 @@ twobyte_insn: | |||
3051 | c->dst.type = OP_NONE; | 3132 | c->dst.type = OP_NONE; |
3052 | break; | 3133 | break; |
3053 | case 5: /* not defined */ | 3134 | case 5: /* not defined */ |
3054 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 3135 | emulate_ud(ctxt); |
3055 | goto done; | 3136 | goto done; |
3056 | case 7: /* invlpg*/ | 3137 | case 7: /* invlpg*/ |
3057 | emulate_invlpg(ctxt->vcpu, c->modrm_ea); | 3138 | emulate_invlpg(ctxt->vcpu, c->modrm_ea); |
@@ -3063,7 +3144,7 @@ twobyte_insn: | |||
3063 | } | 3144 | } |
3064 | break; | 3145 | break; |
3065 | case 0x05: /* syscall */ | 3146 | case 0x05: /* syscall */ |
3066 | rc = emulate_syscall(ctxt); | 3147 | rc = emulate_syscall(ctxt, ops); |
3067 | if (rc != X86EMUL_CONTINUE) | 3148 | if (rc != X86EMUL_CONTINUE) |
3068 | goto done; | 3149 | goto done; |
3069 | else | 3150 | else |
@@ -3073,8 +3154,11 @@ twobyte_insn: | |||
3073 | emulate_clts(ctxt->vcpu); | 3154 | emulate_clts(ctxt->vcpu); |
3074 | c->dst.type = OP_NONE; | 3155 | c->dst.type = OP_NONE; |
3075 | break; | 3156 | break; |
3076 | case 0x08: /* invd */ | ||
3077 | case 0x09: /* wbinvd */ | 3157 | case 0x09: /* wbinvd */ |
3158 | kvm_emulate_wbinvd(ctxt->vcpu); | ||
3159 | c->dst.type = OP_NONE; | ||
3160 | break; | ||
3161 | case 0x08: /* invd */ | ||
3078 | case 0x0d: /* GrpP (prefetch) */ | 3162 | case 0x0d: /* GrpP (prefetch) */ |
3079 | case 0x18: /* Grp16 (prefetch/nop) */ | 3163 | case 0x18: /* Grp16 (prefetch/nop) */ |
3080 | c->dst.type = OP_NONE; | 3164 | c->dst.type = OP_NONE; |
@@ -3084,7 +3168,7 @@ twobyte_insn: | |||
3084 | case 1: | 3168 | case 1: |
3085 | case 5 ... 7: | 3169 | case 5 ... 7: |
3086 | case 9 ... 15: | 3170 | case 9 ... 15: |
3087 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 3171 | emulate_ud(ctxt); |
3088 | goto done; | 3172 | goto done; |
3089 | } | 3173 | } |
3090 | c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu); | 3174 | c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu); |
@@ -3093,31 +3177,42 @@ twobyte_insn: | |||
3093 | case 0x21: /* mov from dr to reg */ | 3177 | case 0x21: /* mov from dr to reg */ |
3094 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && | 3178 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && |
3095 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { | 3179 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { |
3096 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 3180 | emulate_ud(ctxt); |
3097 | goto done; | 3181 | goto done; |
3098 | } | 3182 | } |
3099 | emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]); | 3183 | ops->get_dr(c->modrm_reg, &c->regs[c->modrm_rm], ctxt->vcpu); |
3100 | c->dst.type = OP_NONE; /* no writeback */ | 3184 | c->dst.type = OP_NONE; /* no writeback */ |
3101 | break; | 3185 | break; |
3102 | case 0x22: /* mov reg, cr */ | 3186 | case 0x22: /* mov reg, cr */ |
3103 | ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu); | 3187 | if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) { |
3188 | emulate_gp(ctxt, 0); | ||
3189 | goto done; | ||
3190 | } | ||
3104 | c->dst.type = OP_NONE; | 3191 | c->dst.type = OP_NONE; |
3105 | break; | 3192 | break; |
3106 | case 0x23: /* mov from reg to dr */ | 3193 | case 0x23: /* mov from reg to dr */ |
3107 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && | 3194 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && |
3108 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { | 3195 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { |
3109 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 3196 | emulate_ud(ctxt); |
3197 | goto done; | ||
3198 | } | ||
3199 | |||
3200 | if (ops->set_dr(c->modrm_reg, c->regs[c->modrm_rm] & | ||
3201 | ((ctxt->mode == X86EMUL_MODE_PROT64) ? | ||
3202 | ~0ULL : ~0U), ctxt->vcpu) < 0) { | ||
3203 | /* #UD condition is already handled by the code above */ | ||
3204 | emulate_gp(ctxt, 0); | ||
3110 | goto done; | 3205 | goto done; |
3111 | } | 3206 | } |
3112 | emulator_set_dr(ctxt, c->modrm_reg, c->regs[c->modrm_rm]); | 3207 | |
3113 | c->dst.type = OP_NONE; /* no writeback */ | 3208 | c->dst.type = OP_NONE; /* no writeback */ |
3114 | break; | 3209 | break; |
3115 | case 0x30: | 3210 | case 0x30: |
3116 | /* wrmsr */ | 3211 | /* wrmsr */ |
3117 | msr_data = (u32)c->regs[VCPU_REGS_RAX] | 3212 | msr_data = (u32)c->regs[VCPU_REGS_RAX] |
3118 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); | 3213 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); |
3119 | if (kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { | 3214 | if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { |
3120 | kvm_inject_gp(ctxt->vcpu, 0); | 3215 | emulate_gp(ctxt, 0); |
3121 | goto done; | 3216 | goto done; |
3122 | } | 3217 | } |
3123 | rc = X86EMUL_CONTINUE; | 3218 | rc = X86EMUL_CONTINUE; |
@@ -3125,8 +3220,8 @@ twobyte_insn: | |||
3125 | break; | 3220 | break; |
3126 | case 0x32: | 3221 | case 0x32: |
3127 | /* rdmsr */ | 3222 | /* rdmsr */ |
3128 | if (kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { | 3223 | if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { |
3129 | kvm_inject_gp(ctxt->vcpu, 0); | 3224 | emulate_gp(ctxt, 0); |
3130 | goto done; | 3225 | goto done; |
3131 | } else { | 3226 | } else { |
3132 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; | 3227 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; |
@@ -3136,14 +3231,14 @@ twobyte_insn: | |||
3136 | c->dst.type = OP_NONE; | 3231 | c->dst.type = OP_NONE; |
3137 | break; | 3232 | break; |
3138 | case 0x34: /* sysenter */ | 3233 | case 0x34: /* sysenter */ |
3139 | rc = emulate_sysenter(ctxt); | 3234 | rc = emulate_sysenter(ctxt, ops); |
3140 | if (rc != X86EMUL_CONTINUE) | 3235 | if (rc != X86EMUL_CONTINUE) |
3141 | goto done; | 3236 | goto done; |
3142 | else | 3237 | else |
3143 | goto writeback; | 3238 | goto writeback; |
3144 | break; | 3239 | break; |
3145 | case 0x35: /* sysexit */ | 3240 | case 0x35: /* sysexit */ |
3146 | rc = emulate_sysexit(ctxt); | 3241 | rc = emulate_sysexit(ctxt, ops); |
3147 | if (rc != X86EMUL_CONTINUE) | 3242 | if (rc != X86EMUL_CONTINUE) |
3148 | goto done; | 3243 | goto done; |
3149 | else | 3244 | else |
@@ -3160,7 +3255,7 @@ twobyte_insn: | |||
3160 | c->dst.type = OP_NONE; | 3255 | c->dst.type = OP_NONE; |
3161 | break; | 3256 | break; |
3162 | case 0xa0: /* push fs */ | 3257 | case 0xa0: /* push fs */ |
3163 | emulate_push_sreg(ctxt, VCPU_SREG_FS); | 3258 | emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); |
3164 | break; | 3259 | break; |
3165 | case 0xa1: /* pop fs */ | 3260 | case 0xa1: /* pop fs */ |
3166 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); | 3261 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); |
@@ -3179,7 +3274,7 @@ twobyte_insn: | |||
3179 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); | 3274 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); |
3180 | break; | 3275 | break; |
3181 | case 0xa8: /* push gs */ | 3276 | case 0xa8: /* push gs */ |
3182 | emulate_push_sreg(ctxt, VCPU_SREG_GS); | 3277 | emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); |
3183 | break; | 3278 | break; |
3184 | case 0xa9: /* pop gs */ | 3279 | case 0xa9: /* pop gs */ |
3185 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); | 3280 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); |