diff options
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 1247 |
1 files changed, 916 insertions, 331 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 4dade6ac0827..5ac0bb465ed6 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/kvm_emulate.h> | 33 | #include <asm/kvm_emulate.h> |
34 | 34 | ||
35 | #include "x86.h" | 35 | #include "x86.h" |
36 | #include "tss.h" | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * Opcode effective-address decode tables. | 39 | * Opcode effective-address decode tables. |
@@ -50,6 +51,8 @@ | |||
50 | #define DstReg (2<<1) /* Register operand. */ | 51 | #define DstReg (2<<1) /* Register operand. */ |
51 | #define DstMem (3<<1) /* Memory operand. */ | 52 | #define DstMem (3<<1) /* Memory operand. */ |
52 | #define DstAcc (4<<1) /* Destination Accumulator */ | 53 | #define DstAcc (4<<1) /* Destination Accumulator */ |
54 | #define DstDI (5<<1) /* Destination is in ES:(E)DI */ | ||
55 | #define DstMem64 (6<<1) /* 64bit memory operand */ | ||
53 | #define DstMask (7<<1) | 56 | #define DstMask (7<<1) |
54 | /* Source operand type. */ | 57 | /* Source operand type. */ |
55 | #define SrcNone (0<<4) /* No source operand. */ | 58 | #define SrcNone (0<<4) /* No source operand. */ |
@@ -63,6 +66,7 @@ | |||
63 | #define SrcOne (7<<4) /* Implied '1' */ | 66 | #define SrcOne (7<<4) /* Implied '1' */ |
64 | #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ | 67 | #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ |
65 | #define SrcImmU (9<<4) /* Immediate operand, unsigned */ | 68 | #define SrcImmU (9<<4) /* Immediate operand, unsigned */ |
69 | #define SrcSI (0xa<<4) /* Source is in the DS:RSI */ | ||
66 | #define SrcMask (0xf<<4) | 70 | #define SrcMask (0xf<<4) |
67 | /* Generic ModRM decode. */ | 71 | /* Generic ModRM decode. */ |
68 | #define ModRM (1<<8) | 72 | #define ModRM (1<<8) |
@@ -85,6 +89,9 @@ | |||
85 | #define Src2ImmByte (2<<29) | 89 | #define Src2ImmByte (2<<29) |
86 | #define Src2One (3<<29) | 90 | #define Src2One (3<<29) |
87 | #define Src2Imm16 (4<<29) | 91 | #define Src2Imm16 (4<<29) |
92 | #define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be | ||
93 | in memory and second argument is located | ||
94 | immediately after the first one in memory. */ | ||
88 | #define Src2Mask (7<<29) | 95 | #define Src2Mask (7<<29) |
89 | 96 | ||
90 | enum { | 97 | enum { |
@@ -147,8 +154,8 @@ static u32 opcode_table[256] = { | |||
147 | 0, 0, 0, 0, | 154 | 0, 0, 0, 0, |
148 | /* 0x68 - 0x6F */ | 155 | /* 0x68 - 0x6F */ |
149 | SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0, | 156 | SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0, |
150 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* insb, insw/insd */ | 157 | DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */ |
151 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, /* outsb, outsw/outsd */ | 158 | SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */ |
152 | /* 0x70 - 0x77 */ | 159 | /* 0x70 - 0x77 */ |
153 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, | 160 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, |
154 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, | 161 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, |
@@ -173,12 +180,12 @@ static u32 opcode_table[256] = { | |||
173 | /* 0xA0 - 0xA7 */ | 180 | /* 0xA0 - 0xA7 */ |
174 | ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, | 181 | ByteOp | DstReg | SrcMem | Mov | MemAbs, DstReg | SrcMem | Mov | MemAbs, |
175 | ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs, | 182 | ByteOp | DstMem | SrcReg | Mov | MemAbs, DstMem | SrcReg | Mov | MemAbs, |
176 | ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String, | 183 | ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String, |
177 | ByteOp | ImplicitOps | String, ImplicitOps | String, | 184 | ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String, |
178 | /* 0xA8 - 0xAF */ | 185 | /* 0xA8 - 0xAF */ |
179 | 0, 0, ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String, | 186 | 0, 0, ByteOp | DstDI | Mov | String, DstDI | Mov | String, |
180 | ByteOp | ImplicitOps | Mov | String, ImplicitOps | Mov | String, | 187 | ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String, |
181 | ByteOp | ImplicitOps | String, ImplicitOps | String, | 188 | ByteOp | DstDI | String, DstDI | String, |
182 | /* 0xB0 - 0xB7 */ | 189 | /* 0xB0 - 0xB7 */ |
183 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, | 190 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, |
184 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, | 191 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, |
@@ -204,13 +211,13 @@ static u32 opcode_table[256] = { | |||
204 | 0, 0, 0, 0, 0, 0, 0, 0, | 211 | 0, 0, 0, 0, 0, 0, 0, 0, |
205 | /* 0xE0 - 0xE7 */ | 212 | /* 0xE0 - 0xE7 */ |
206 | 0, 0, 0, 0, | 213 | 0, 0, 0, 0, |
207 | ByteOp | SrcImmUByte, SrcImmUByte, | 214 | ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc, |
208 | ByteOp | SrcImmUByte, SrcImmUByte, | 215 | ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc, |
209 | /* 0xE8 - 0xEF */ | 216 | /* 0xE8 - 0xEF */ |
210 | SrcImm | Stack, SrcImm | ImplicitOps, | 217 | SrcImm | Stack, SrcImm | ImplicitOps, |
211 | SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps, | 218 | SrcImmU | Src2Imm16 | No64, SrcImmByte | ImplicitOps, |
212 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, | 219 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, |
213 | SrcNone | ByteOp | ImplicitOps, SrcNone | ImplicitOps, | 220 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, |
214 | /* 0xF0 - 0xF7 */ | 221 | /* 0xF0 - 0xF7 */ |
215 | 0, 0, 0, 0, | 222 | 0, 0, 0, 0, |
216 | ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3, | 223 | ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3, |
@@ -343,7 +350,8 @@ static u32 group_table[] = { | |||
343 | [Group5*8] = | 350 | [Group5*8] = |
344 | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, | 351 | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, |
345 | SrcMem | ModRM | Stack, 0, | 352 | SrcMem | ModRM | Stack, 0, |
346 | SrcMem | ModRM | Stack, 0, SrcMem | ModRM | Stack, 0, | 353 | SrcMem | ModRM | Stack, SrcMem | ModRM | Src2Mem16 | ImplicitOps, |
354 | SrcMem | ModRM | Stack, 0, | ||
347 | [Group7*8] = | 355 | [Group7*8] = |
348 | 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv, | 356 | 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv, |
349 | SrcNone | ModRM | DstMem | Mov, 0, | 357 | SrcNone | ModRM | DstMem | Mov, 0, |
@@ -353,14 +361,14 @@ static u32 group_table[] = { | |||
353 | DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock, | 361 | DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock, |
354 | DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock, | 362 | DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock, |
355 | [Group9*8] = | 363 | [Group9*8] = |
356 | 0, ImplicitOps | ModRM | Lock, 0, 0, 0, 0, 0, 0, | 364 | 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0, |
357 | }; | 365 | }; |
358 | 366 | ||
359 | static u32 group2_table[] = { | 367 | static u32 group2_table[] = { |
360 | [Group7*8] = | 368 | [Group7*8] = |
361 | SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM, | 369 | SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv, |
362 | SrcNone | ModRM | DstMem | Mov, 0, | 370 | SrcNone | ModRM | DstMem | Mov, 0, |
363 | SrcMem16 | ModRM | Mov, 0, | 371 | SrcMem16 | ModRM | Mov | Priv, 0, |
364 | [Group9*8] = | 372 | [Group9*8] = |
365 | 0, 0, 0, 0, 0, 0, 0, 0, | 373 | 0, 0, 0, 0, 0, 0, 0, 0, |
366 | }; | 374 | }; |
@@ -562,7 +570,7 @@ static u32 group2_table[] = { | |||
562 | #define insn_fetch(_type, _size, _eip) \ | 570 | #define insn_fetch(_type, _size, _eip) \ |
563 | ({ unsigned long _x; \ | 571 | ({ unsigned long _x; \ |
564 | rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \ | 572 | rc = do_insn_fetch(ctxt, ops, (_eip), &_x, (_size)); \ |
565 | if (rc != 0) \ | 573 | if (rc != X86EMUL_CONTINUE) \ |
566 | goto done; \ | 574 | goto done; \ |
567 | (_eip) += (_size); \ | 575 | (_eip) += (_size); \ |
568 | (_type)_x; \ | 576 | (_type)_x; \ |
@@ -638,40 +646,40 @@ static unsigned long ss_base(struct x86_emulate_ctxt *ctxt) | |||
638 | 646 | ||
639 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, | 647 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, |
640 | struct x86_emulate_ops *ops, | 648 | struct x86_emulate_ops *ops, |
641 | unsigned long linear, u8 *dest) | 649 | unsigned long eip, u8 *dest) |
642 | { | 650 | { |
643 | struct fetch_cache *fc = &ctxt->decode.fetch; | 651 | struct fetch_cache *fc = &ctxt->decode.fetch; |
644 | int rc; | 652 | int rc; |
645 | int size; | 653 | int size, cur_size; |
646 | 654 | ||
647 | if (linear < fc->start || linear >= fc->end) { | 655 | if (eip == fc->end) { |
648 | size = min(15UL, PAGE_SIZE - offset_in_page(linear)); | 656 | cur_size = fc->end - fc->start; |
649 | rc = ops->fetch(linear, fc->data, size, ctxt->vcpu, NULL); | 657 | size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); |
650 | if (rc) | 658 | rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size, |
659 | size, ctxt->vcpu, NULL); | ||
660 | if (rc != X86EMUL_CONTINUE) | ||
651 | return rc; | 661 | return rc; |
652 | fc->start = linear; | 662 | fc->end += size; |
653 | fc->end = linear + size; | ||
654 | } | 663 | } |
655 | *dest = fc->data[linear - fc->start]; | 664 | *dest = fc->data[eip - fc->start]; |
656 | return 0; | 665 | return X86EMUL_CONTINUE; |
657 | } | 666 | } |
658 | 667 | ||
659 | static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, | 668 | static int do_insn_fetch(struct x86_emulate_ctxt *ctxt, |
660 | struct x86_emulate_ops *ops, | 669 | struct x86_emulate_ops *ops, |
661 | unsigned long eip, void *dest, unsigned size) | 670 | unsigned long eip, void *dest, unsigned size) |
662 | { | 671 | { |
663 | int rc = 0; | 672 | int rc; |
664 | 673 | ||
665 | /* x86 instructions are limited to 15 bytes. */ | 674 | /* x86 instructions are limited to 15 bytes. */ |
666 | if (eip + size - ctxt->decode.eip_orig > 15) | 675 | if (eip + size - ctxt->eip > 15) |
667 | return X86EMUL_UNHANDLEABLE; | 676 | return X86EMUL_UNHANDLEABLE; |
668 | eip += ctxt->cs_base; | ||
669 | while (size--) { | 677 | while (size--) { |
670 | rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); | 678 | rc = do_fetch_insn_byte(ctxt, ops, eip++, dest++); |
671 | if (rc) | 679 | if (rc != X86EMUL_CONTINUE) |
672 | return rc; | 680 | return rc; |
673 | } | 681 | } |
674 | return 0; | 682 | return X86EMUL_CONTINUE; |
675 | } | 683 | } |
676 | 684 | ||
677 | /* | 685 | /* |
@@ -702,7 +710,7 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, | |||
702 | *address = 0; | 710 | *address = 0; |
703 | rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, | 711 | rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, |
704 | ctxt->vcpu, NULL); | 712 | ctxt->vcpu, NULL); |
705 | if (rc) | 713 | if (rc != X86EMUL_CONTINUE) |
706 | return rc; | 714 | return rc; |
707 | rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, | 715 | rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, |
708 | ctxt->vcpu, NULL); | 716 | ctxt->vcpu, NULL); |
@@ -782,7 +790,7 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
782 | struct decode_cache *c = &ctxt->decode; | 790 | struct decode_cache *c = &ctxt->decode; |
783 | u8 sib; | 791 | u8 sib; |
784 | int index_reg = 0, base_reg = 0, scale; | 792 | int index_reg = 0, base_reg = 0, scale; |
785 | int rc = 0; | 793 | int rc = X86EMUL_CONTINUE; |
786 | 794 | ||
787 | if (c->rex_prefix) { | 795 | if (c->rex_prefix) { |
788 | c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */ | 796 | c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */ |
@@ -895,7 +903,7 @@ static int decode_abs(struct x86_emulate_ctxt *ctxt, | |||
895 | struct x86_emulate_ops *ops) | 903 | struct x86_emulate_ops *ops) |
896 | { | 904 | { |
897 | struct decode_cache *c = &ctxt->decode; | 905 | struct decode_cache *c = &ctxt->decode; |
898 | int rc = 0; | 906 | int rc = X86EMUL_CONTINUE; |
899 | 907 | ||
900 | switch (c->ad_bytes) { | 908 | switch (c->ad_bytes) { |
901 | case 2: | 909 | case 2: |
@@ -916,14 +924,18 @@ int | |||
916 | x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | 924 | x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
917 | { | 925 | { |
918 | struct decode_cache *c = &ctxt->decode; | 926 | struct decode_cache *c = &ctxt->decode; |
919 | int rc = 0; | 927 | int rc = X86EMUL_CONTINUE; |
920 | int mode = ctxt->mode; | 928 | int mode = ctxt->mode; |
921 | int def_op_bytes, def_ad_bytes, group; | 929 | int def_op_bytes, def_ad_bytes, group; |
922 | 930 | ||
923 | /* Shadow copy of register state. Committed on successful emulation. */ | ||
924 | 931 | ||
932 | /* we cannot decode insn before we complete previous rep insn */ | ||
933 | WARN_ON(ctxt->restart); | ||
934 | |||
935 | /* Shadow copy of register state. Committed on successful emulation. */ | ||
925 | memset(c, 0, sizeof(struct decode_cache)); | 936 | memset(c, 0, sizeof(struct decode_cache)); |
926 | c->eip = c->eip_orig = kvm_rip_read(ctxt->vcpu); | 937 | c->eip = ctxt->eip; |
938 | c->fetch.start = c->fetch.end = c->eip; | ||
927 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); | 939 | ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); |
928 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | 940 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); |
929 | 941 | ||
@@ -1015,11 +1027,6 @@ done_prefixes: | |||
1015 | } | 1027 | } |
1016 | } | 1028 | } |
1017 | 1029 | ||
1018 | if (mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { | ||
1019 | kvm_report_emulation_failure(ctxt->vcpu, "invalid x86/64 instruction"); | ||
1020 | return -1; | ||
1021 | } | ||
1022 | |||
1023 | if (c->d & Group) { | 1030 | if (c->d & Group) { |
1024 | group = c->d & GroupMask; | 1031 | group = c->d & GroupMask; |
1025 | c->modrm = insn_fetch(u8, 1, c->eip); | 1032 | c->modrm = insn_fetch(u8, 1, c->eip); |
@@ -1046,7 +1053,7 @@ done_prefixes: | |||
1046 | rc = decode_modrm(ctxt, ops); | 1053 | rc = decode_modrm(ctxt, ops); |
1047 | else if (c->d & MemAbs) | 1054 | else if (c->d & MemAbs) |
1048 | rc = decode_abs(ctxt, ops); | 1055 | rc = decode_abs(ctxt, ops); |
1049 | if (rc) | 1056 | if (rc != X86EMUL_CONTINUE) |
1050 | goto done; | 1057 | goto done; |
1051 | 1058 | ||
1052 | if (!c->has_seg_override) | 1059 | if (!c->has_seg_override) |
@@ -1057,6 +1064,10 @@ done_prefixes: | |||
1057 | 1064 | ||
1058 | if (c->ad_bytes != 8) | 1065 | if (c->ad_bytes != 8) |
1059 | c->modrm_ea = (u32)c->modrm_ea; | 1066 | c->modrm_ea = (u32)c->modrm_ea; |
1067 | |||
1068 | if (c->rip_relative) | ||
1069 | c->modrm_ea += c->eip; | ||
1070 | |||
1060 | /* | 1071 | /* |
1061 | * Decode and fetch the source operand: register, memory | 1072 | * Decode and fetch the source operand: register, memory |
1062 | * or immediate. | 1073 | * or immediate. |
@@ -1091,6 +1102,8 @@ done_prefixes: | |||
1091 | break; | 1102 | break; |
1092 | } | 1103 | } |
1093 | c->src.type = OP_MEM; | 1104 | c->src.type = OP_MEM; |
1105 | c->src.ptr = (unsigned long *)c->modrm_ea; | ||
1106 | c->src.val = 0; | ||
1094 | break; | 1107 | break; |
1095 | case SrcImm: | 1108 | case SrcImm: |
1096 | case SrcImmU: | 1109 | case SrcImmU: |
@@ -1139,6 +1152,14 @@ done_prefixes: | |||
1139 | c->src.bytes = 1; | 1152 | c->src.bytes = 1; |
1140 | c->src.val = 1; | 1153 | c->src.val = 1; |
1141 | break; | 1154 | break; |
1155 | case SrcSI: | ||
1156 | c->src.type = OP_MEM; | ||
1157 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1158 | c->src.ptr = (unsigned long *) | ||
1159 | register_address(c, seg_override_base(ctxt, c), | ||
1160 | c->regs[VCPU_REGS_RSI]); | ||
1161 | c->src.val = 0; | ||
1162 | break; | ||
1142 | } | 1163 | } |
1143 | 1164 | ||
1144 | /* | 1165 | /* |
@@ -1168,6 +1189,12 @@ done_prefixes: | |||
1168 | c->src2.bytes = 1; | 1189 | c->src2.bytes = 1; |
1169 | c->src2.val = 1; | 1190 | c->src2.val = 1; |
1170 | break; | 1191 | break; |
1192 | case Src2Mem16: | ||
1193 | c->src2.type = OP_MEM; | ||
1194 | c->src2.bytes = 2; | ||
1195 | c->src2.ptr = (unsigned long *)(c->modrm_ea + c->src.bytes); | ||
1196 | c->src2.val = 0; | ||
1197 | break; | ||
1171 | } | 1198 | } |
1172 | 1199 | ||
1173 | /* Decode and fetch the destination operand: register or memory. */ | 1200 | /* Decode and fetch the destination operand: register or memory. */ |
@@ -1180,6 +1207,7 @@ done_prefixes: | |||
1180 | c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); | 1207 | c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); |
1181 | break; | 1208 | break; |
1182 | case DstMem: | 1209 | case DstMem: |
1210 | case DstMem64: | ||
1183 | if ((c->d & ModRM) && c->modrm_mod == 3) { | 1211 | if ((c->d & ModRM) && c->modrm_mod == 3) { |
1184 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 1212 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1185 | c->dst.type = OP_REG; | 1213 | c->dst.type = OP_REG; |
@@ -1188,12 +1216,24 @@ done_prefixes: | |||
1188 | break; | 1216 | break; |
1189 | } | 1217 | } |
1190 | c->dst.type = OP_MEM; | 1218 | c->dst.type = OP_MEM; |
1219 | c->dst.ptr = (unsigned long *)c->modrm_ea; | ||
1220 | if ((c->d & DstMask) == DstMem64) | ||
1221 | c->dst.bytes = 8; | ||
1222 | else | ||
1223 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1224 | c->dst.val = 0; | ||
1225 | if (c->d & BitOp) { | ||
1226 | unsigned long mask = ~(c->dst.bytes * 8 - 1); | ||
1227 | |||
1228 | c->dst.ptr = (void *)c->dst.ptr + | ||
1229 | (c->src.val & mask) / 8; | ||
1230 | } | ||
1191 | break; | 1231 | break; |
1192 | case DstAcc: | 1232 | case DstAcc: |
1193 | c->dst.type = OP_REG; | 1233 | c->dst.type = OP_REG; |
1194 | c->dst.bytes = c->op_bytes; | 1234 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
1195 | c->dst.ptr = &c->regs[VCPU_REGS_RAX]; | 1235 | c->dst.ptr = &c->regs[VCPU_REGS_RAX]; |
1196 | switch (c->op_bytes) { | 1236 | switch (c->dst.bytes) { |
1197 | case 1: | 1237 | case 1: |
1198 | c->dst.val = *(u8 *)c->dst.ptr; | 1238 | c->dst.val = *(u8 *)c->dst.ptr; |
1199 | break; | 1239 | break; |
@@ -1203,18 +1243,248 @@ done_prefixes: | |||
1203 | case 4: | 1243 | case 4: |
1204 | c->dst.val = *(u32 *)c->dst.ptr; | 1244 | c->dst.val = *(u32 *)c->dst.ptr; |
1205 | break; | 1245 | break; |
1246 | case 8: | ||
1247 | c->dst.val = *(u64 *)c->dst.ptr; | ||
1248 | break; | ||
1206 | } | 1249 | } |
1207 | c->dst.orig_val = c->dst.val; | 1250 | c->dst.orig_val = c->dst.val; |
1208 | break; | 1251 | break; |
1252 | case DstDI: | ||
1253 | c->dst.type = OP_MEM; | ||
1254 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1255 | c->dst.ptr = (unsigned long *) | ||
1256 | register_address(c, es_base(ctxt), | ||
1257 | c->regs[VCPU_REGS_RDI]); | ||
1258 | c->dst.val = 0; | ||
1259 | break; | ||
1209 | } | 1260 | } |
1210 | 1261 | ||
1211 | if (c->rip_relative) | ||
1212 | c->modrm_ea += c->eip; | ||
1213 | |||
1214 | done: | 1262 | done: |
1215 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 1263 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; |
1216 | } | 1264 | } |
1217 | 1265 | ||
1266 | static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | ||
1267 | struct x86_emulate_ops *ops, | ||
1268 | unsigned int size, unsigned short port, | ||
1269 | void *dest) | ||
1270 | { | ||
1271 | struct read_cache *rc = &ctxt->decode.io_read; | ||
1272 | |||
1273 | if (rc->pos == rc->end) { /* refill pio read ahead */ | ||
1274 | struct decode_cache *c = &ctxt->decode; | ||
1275 | unsigned int in_page, n; | ||
1276 | unsigned int count = c->rep_prefix ? | ||
1277 | address_mask(c, c->regs[VCPU_REGS_RCX]) : 1; | ||
1278 | in_page = (ctxt->eflags & EFLG_DF) ? | ||
1279 | offset_in_page(c->regs[VCPU_REGS_RDI]) : | ||
1280 | PAGE_SIZE - offset_in_page(c->regs[VCPU_REGS_RDI]); | ||
1281 | n = min(min(in_page, (unsigned int)sizeof(rc->data)) / size, | ||
1282 | count); | ||
1283 | if (n == 0) | ||
1284 | n = 1; | ||
1285 | rc->pos = rc->end = 0; | ||
1286 | if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu)) | ||
1287 | return 0; | ||
1288 | rc->end = n * size; | ||
1289 | } | ||
1290 | |||
1291 | memcpy(dest, rc->data + rc->pos, size); | ||
1292 | rc->pos += size; | ||
1293 | return 1; | ||
1294 | } | ||
1295 | |||
1296 | static u32 desc_limit_scaled(struct desc_struct *desc) | ||
1297 | { | ||
1298 | u32 limit = get_desc_limit(desc); | ||
1299 | |||
1300 | return desc->g ? (limit << 12) | 0xfff : limit; | ||
1301 | } | ||
1302 | |||
1303 | static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, | ||
1304 | struct x86_emulate_ops *ops, | ||
1305 | u16 selector, struct desc_ptr *dt) | ||
1306 | { | ||
1307 | if (selector & 1 << 2) { | ||
1308 | struct desc_struct desc; | ||
1309 | memset (dt, 0, sizeof *dt); | ||
1310 | if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu)) | ||
1311 | return; | ||
1312 | |||
1313 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ | ||
1314 | dt->address = get_desc_base(&desc); | ||
1315 | } else | ||
1316 | ops->get_gdt(dt, ctxt->vcpu); | ||
1317 | } | ||
1318 | |||
1319 | /* allowed just for 8 bytes segments */ | ||
1320 | static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, | ||
1321 | struct x86_emulate_ops *ops, | ||
1322 | u16 selector, struct desc_struct *desc) | ||
1323 | { | ||
1324 | struct desc_ptr dt; | ||
1325 | u16 index = selector >> 3; | ||
1326 | int ret; | ||
1327 | u32 err; | ||
1328 | ulong addr; | ||
1329 | |||
1330 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | ||
1331 | |||
1332 | if (dt.size < index * 8 + 7) { | ||
1333 | kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); | ||
1334 | return X86EMUL_PROPAGATE_FAULT; | ||
1335 | } | ||
1336 | addr = dt.address + index * 8; | ||
1337 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | ||
1338 | if (ret == X86EMUL_PROPAGATE_FAULT) | ||
1339 | kvm_inject_page_fault(ctxt->vcpu, addr, err); | ||
1340 | |||
1341 | return ret; | ||
1342 | } | ||
1343 | |||
1344 | /* allowed just for 8 bytes segments */ | ||
1345 | static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | ||
1346 | struct x86_emulate_ops *ops, | ||
1347 | u16 selector, struct desc_struct *desc) | ||
1348 | { | ||
1349 | struct desc_ptr dt; | ||
1350 | u16 index = selector >> 3; | ||
1351 | u32 err; | ||
1352 | ulong addr; | ||
1353 | int ret; | ||
1354 | |||
1355 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | ||
1356 | |||
1357 | if (dt.size < index * 8 + 7) { | ||
1358 | kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); | ||
1359 | return X86EMUL_PROPAGATE_FAULT; | ||
1360 | } | ||
1361 | |||
1362 | addr = dt.address + index * 8; | ||
1363 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | ||
1364 | if (ret == X86EMUL_PROPAGATE_FAULT) | ||
1365 | kvm_inject_page_fault(ctxt->vcpu, addr, err); | ||
1366 | |||
1367 | return ret; | ||
1368 | } | ||
1369 | |||
1370 | static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | ||
1371 | struct x86_emulate_ops *ops, | ||
1372 | u16 selector, int seg) | ||
1373 | { | ||
1374 | struct desc_struct seg_desc; | ||
1375 | u8 dpl, rpl, cpl; | ||
1376 | unsigned err_vec = GP_VECTOR; | ||
1377 | u32 err_code = 0; | ||
1378 | bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ | ||
1379 | int ret; | ||
1380 | |||
1381 | memset(&seg_desc, 0, sizeof seg_desc); | ||
1382 | |||
1383 | if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) | ||
1384 | || ctxt->mode == X86EMUL_MODE_REAL) { | ||
1385 | /* set real mode segment descriptor */ | ||
1386 | set_desc_base(&seg_desc, selector << 4); | ||
1387 | set_desc_limit(&seg_desc, 0xffff); | ||
1388 | seg_desc.type = 3; | ||
1389 | seg_desc.p = 1; | ||
1390 | seg_desc.s = 1; | ||
1391 | goto load; | ||
1392 | } | ||
1393 | |||
1394 | /* NULL selector is not valid for TR, CS and SS */ | ||
1395 | if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR) | ||
1396 | && null_selector) | ||
1397 | goto exception; | ||
1398 | |||
1399 | /* TR should be in GDT only */ | ||
1400 | if (seg == VCPU_SREG_TR && (selector & (1 << 2))) | ||
1401 | goto exception; | ||
1402 | |||
1403 | if (null_selector) /* for NULL selector skip all following checks */ | ||
1404 | goto load; | ||
1405 | |||
1406 | ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc); | ||
1407 | if (ret != X86EMUL_CONTINUE) | ||
1408 | return ret; | ||
1409 | |||
1410 | err_code = selector & 0xfffc; | ||
1411 | err_vec = GP_VECTOR; | ||
1412 | |||
1413 | /* can't load system descriptor into segment selecor */ | ||
1414 | if (seg <= VCPU_SREG_GS && !seg_desc.s) | ||
1415 | goto exception; | ||
1416 | |||
1417 | if (!seg_desc.p) { | ||
1418 | err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; | ||
1419 | goto exception; | ||
1420 | } | ||
1421 | |||
1422 | rpl = selector & 3; | ||
1423 | dpl = seg_desc.dpl; | ||
1424 | cpl = ops->cpl(ctxt->vcpu); | ||
1425 | |||
1426 | switch (seg) { | ||
1427 | case VCPU_SREG_SS: | ||
1428 | /* | ||
1429 | * segment is not a writable data segment or segment | ||
1430 | * selector's RPL != CPL or segment selector's RPL != CPL | ||
1431 | */ | ||
1432 | if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) | ||
1433 | goto exception; | ||
1434 | break; | ||
1435 | case VCPU_SREG_CS: | ||
1436 | if (!(seg_desc.type & 8)) | ||
1437 | goto exception; | ||
1438 | |||
1439 | if (seg_desc.type & 4) { | ||
1440 | /* conforming */ | ||
1441 | if (dpl > cpl) | ||
1442 | goto exception; | ||
1443 | } else { | ||
1444 | /* nonconforming */ | ||
1445 | if (rpl > cpl || dpl != cpl) | ||
1446 | goto exception; | ||
1447 | } | ||
1448 | /* CS(RPL) <- CPL */ | ||
1449 | selector = (selector & 0xfffc) | cpl; | ||
1450 | break; | ||
1451 | case VCPU_SREG_TR: | ||
1452 | if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) | ||
1453 | goto exception; | ||
1454 | break; | ||
1455 | case VCPU_SREG_LDTR: | ||
1456 | if (seg_desc.s || seg_desc.type != 2) | ||
1457 | goto exception; | ||
1458 | break; | ||
1459 | default: /* DS, ES, FS, or GS */ | ||
1460 | /* | ||
1461 | * segment is not a data or readable code segment or | ||
1462 | * ((segment is a data or nonconforming code segment) | ||
1463 | * and (both RPL and CPL > DPL)) | ||
1464 | */ | ||
1465 | if ((seg_desc.type & 0xa) == 0x8 || | ||
1466 | (((seg_desc.type & 0xc) != 0xc) && | ||
1467 | (rpl > dpl && cpl > dpl))) | ||
1468 | goto exception; | ||
1469 | break; | ||
1470 | } | ||
1471 | |||
1472 | if (seg_desc.s) { | ||
1473 | /* mark segment as accessed */ | ||
1474 | seg_desc.type |= 1; | ||
1475 | ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc); | ||
1476 | if (ret != X86EMUL_CONTINUE) | ||
1477 | return ret; | ||
1478 | } | ||
1479 | load: | ||
1480 | ops->set_segment_selector(selector, seg, ctxt->vcpu); | ||
1481 | ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); | ||
1482 | return X86EMUL_CONTINUE; | ||
1483 | exception: | ||
1484 | kvm_queue_exception_e(ctxt->vcpu, err_vec, err_code); | ||
1485 | return X86EMUL_PROPAGATE_FAULT; | ||
1486 | } | ||
1487 | |||
1218 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt) | 1488 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt) |
1219 | { | 1489 | { |
1220 | struct decode_cache *c = &ctxt->decode; | 1490 | struct decode_cache *c = &ctxt->decode; |
@@ -1251,7 +1521,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1251 | int rc; | 1521 | int rc; |
1252 | unsigned long val, change_mask; | 1522 | unsigned long val, change_mask; |
1253 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 1523 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
1254 | int cpl = kvm_x86_ops->get_cpl(ctxt->vcpu); | 1524 | int cpl = ops->cpl(ctxt->vcpu); |
1255 | 1525 | ||
1256 | rc = emulate_pop(ctxt, ops, &val, len); | 1526 | rc = emulate_pop(ctxt, ops, &val, len); |
1257 | if (rc != X86EMUL_CONTINUE) | 1527 | if (rc != X86EMUL_CONTINUE) |
@@ -1306,10 +1576,10 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | |||
1306 | int rc; | 1576 | int rc; |
1307 | 1577 | ||
1308 | rc = emulate_pop(ctxt, ops, &selector, c->op_bytes); | 1578 | rc = emulate_pop(ctxt, ops, &selector, c->op_bytes); |
1309 | if (rc != 0) | 1579 | if (rc != X86EMUL_CONTINUE) |
1310 | return rc; | 1580 | return rc; |
1311 | 1581 | ||
1312 | rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg); | 1582 | rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg); |
1313 | return rc; | 1583 | return rc; |
1314 | } | 1584 | } |
1315 | 1585 | ||
@@ -1332,7 +1602,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt, | |||
1332 | struct x86_emulate_ops *ops) | 1602 | struct x86_emulate_ops *ops) |
1333 | { | 1603 | { |
1334 | struct decode_cache *c = &ctxt->decode; | 1604 | struct decode_cache *c = &ctxt->decode; |
1335 | int rc = 0; | 1605 | int rc = X86EMUL_CONTINUE; |
1336 | int reg = VCPU_REGS_RDI; | 1606 | int reg = VCPU_REGS_RDI; |
1337 | 1607 | ||
1338 | while (reg >= VCPU_REGS_RAX) { | 1608 | while (reg >= VCPU_REGS_RAX) { |
@@ -1343,7 +1613,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt, | |||
1343 | } | 1613 | } |
1344 | 1614 | ||
1345 | rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes); | 1615 | rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes); |
1346 | if (rc != 0) | 1616 | if (rc != X86EMUL_CONTINUE) |
1347 | break; | 1617 | break; |
1348 | --reg; | 1618 | --reg; |
1349 | } | 1619 | } |
@@ -1354,12 +1624,8 @@ static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, | |||
1354 | struct x86_emulate_ops *ops) | 1624 | struct x86_emulate_ops *ops) |
1355 | { | 1625 | { |
1356 | struct decode_cache *c = &ctxt->decode; | 1626 | struct decode_cache *c = &ctxt->decode; |
1357 | int rc; | ||
1358 | 1627 | ||
1359 | rc = emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes); | 1628 | return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes); |
1360 | if (rc != 0) | ||
1361 | return rc; | ||
1362 | return 0; | ||
1363 | } | 1629 | } |
1364 | 1630 | ||
1365 | static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt) | 1631 | static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt) |
@@ -1395,7 +1661,6 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt, | |||
1395 | struct x86_emulate_ops *ops) | 1661 | struct x86_emulate_ops *ops) |
1396 | { | 1662 | { |
1397 | struct decode_cache *c = &ctxt->decode; | 1663 | struct decode_cache *c = &ctxt->decode; |
1398 | int rc = 0; | ||
1399 | 1664 | ||
1400 | switch (c->modrm_reg) { | 1665 | switch (c->modrm_reg) { |
1401 | case 0 ... 1: /* test */ | 1666 | case 0 ... 1: /* test */ |
@@ -1408,11 +1673,9 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt, | |||
1408 | emulate_1op("neg", c->dst, ctxt->eflags); | 1673 | emulate_1op("neg", c->dst, ctxt->eflags); |
1409 | break; | 1674 | break; |
1410 | default: | 1675 | default: |
1411 | DPRINTF("Cannot emulate %02x\n", c->b); | 1676 | return 0; |
1412 | rc = X86EMUL_UNHANDLEABLE; | ||
1413 | break; | ||
1414 | } | 1677 | } |
1415 | return rc; | 1678 | return 1; |
1416 | } | 1679 | } |
1417 | 1680 | ||
1418 | static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, | 1681 | static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, |
@@ -1442,20 +1705,14 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, | |||
1442 | emulate_push(ctxt); | 1705 | emulate_push(ctxt); |
1443 | break; | 1706 | break; |
1444 | } | 1707 | } |
1445 | return 0; | 1708 | return X86EMUL_CONTINUE; |
1446 | } | 1709 | } |
1447 | 1710 | ||
1448 | static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, | 1711 | static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, |
1449 | struct x86_emulate_ops *ops, | 1712 | struct x86_emulate_ops *ops) |
1450 | unsigned long memop) | ||
1451 | { | 1713 | { |
1452 | struct decode_cache *c = &ctxt->decode; | 1714 | struct decode_cache *c = &ctxt->decode; |
1453 | u64 old, new; | 1715 | u64 old = c->dst.orig_val; |
1454 | int rc; | ||
1455 | |||
1456 | rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu); | ||
1457 | if (rc != X86EMUL_CONTINUE) | ||
1458 | return rc; | ||
1459 | 1716 | ||
1460 | if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || | 1717 | if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || |
1461 | ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { | 1718 | ((u32) (old >> 32) != (u32) c->regs[VCPU_REGS_RDX])) { |
@@ -1463,17 +1720,13 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, | |||
1463 | c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); | 1720 | c->regs[VCPU_REGS_RAX] = (u32) (old >> 0); |
1464 | c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); | 1721 | c->regs[VCPU_REGS_RDX] = (u32) (old >> 32); |
1465 | ctxt->eflags &= ~EFLG_ZF; | 1722 | ctxt->eflags &= ~EFLG_ZF; |
1466 | |||
1467 | } else { | 1723 | } else { |
1468 | new = ((u64)c->regs[VCPU_REGS_RCX] << 32) | | 1724 | c->dst.val = ((u64)c->regs[VCPU_REGS_RCX] << 32) | |
1469 | (u32) c->regs[VCPU_REGS_RBX]; | 1725 | (u32) c->regs[VCPU_REGS_RBX]; |
1470 | 1726 | ||
1471 | rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu); | ||
1472 | if (rc != X86EMUL_CONTINUE) | ||
1473 | return rc; | ||
1474 | ctxt->eflags |= EFLG_ZF; | 1727 | ctxt->eflags |= EFLG_ZF; |
1475 | } | 1728 | } |
1476 | return 0; | 1729 | return X86EMUL_CONTINUE; |
1477 | } | 1730 | } |
1478 | 1731 | ||
1479 | static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, | 1732 | static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, |
@@ -1484,14 +1737,14 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, | |||
1484 | unsigned long cs; | 1737 | unsigned long cs; |
1485 | 1738 | ||
1486 | rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes); | 1739 | rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes); |
1487 | if (rc) | 1740 | if (rc != X86EMUL_CONTINUE) |
1488 | return rc; | 1741 | return rc; |
1489 | if (c->op_bytes == 4) | 1742 | if (c->op_bytes == 4) |
1490 | c->eip = (u32)c->eip; | 1743 | c->eip = (u32)c->eip; |
1491 | rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); | 1744 | rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); |
1492 | if (rc) | 1745 | if (rc != X86EMUL_CONTINUE) |
1493 | return rc; | 1746 | return rc; |
1494 | rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS); | 1747 | rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); |
1495 | return rc; | 1748 | return rc; |
1496 | } | 1749 | } |
1497 | 1750 | ||
@@ -1544,7 +1797,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, | |||
1544 | default: | 1797 | default: |
1545 | break; | 1798 | break; |
1546 | } | 1799 | } |
1547 | return 0; | 1800 | return X86EMUL_CONTINUE; |
1548 | } | 1801 | } |
1549 | 1802 | ||
1550 | static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask) | 1803 | static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask) |
@@ -1598,8 +1851,11 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt) | |||
1598 | u64 msr_data; | 1851 | u64 msr_data; |
1599 | 1852 | ||
1600 | /* syscall is not available in real mode */ | 1853 | /* syscall is not available in real mode */ |
1601 | if (ctxt->mode == X86EMUL_MODE_REAL || ctxt->mode == X86EMUL_MODE_VM86) | 1854 | if (ctxt->mode == X86EMUL_MODE_REAL || |
1602 | return X86EMUL_UNHANDLEABLE; | 1855 | ctxt->mode == X86EMUL_MODE_VM86) { |
1856 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | ||
1857 | return X86EMUL_PROPAGATE_FAULT; | ||
1858 | } | ||
1603 | 1859 | ||
1604 | setup_syscalls_segments(ctxt, &cs, &ss); | 1860 | setup_syscalls_segments(ctxt, &cs, &ss); |
1605 | 1861 | ||
@@ -1649,14 +1905,16 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt) | |||
1649 | /* inject #GP if in real mode */ | 1905 | /* inject #GP if in real mode */ |
1650 | if (ctxt->mode == X86EMUL_MODE_REAL) { | 1906 | if (ctxt->mode == X86EMUL_MODE_REAL) { |
1651 | kvm_inject_gp(ctxt->vcpu, 0); | 1907 | kvm_inject_gp(ctxt->vcpu, 0); |
1652 | return X86EMUL_UNHANDLEABLE; | 1908 | return X86EMUL_PROPAGATE_FAULT; |
1653 | } | 1909 | } |
1654 | 1910 | ||
1655 | /* XXX sysenter/sysexit have not been tested in 64bit mode. | 1911 | /* XXX sysenter/sysexit have not been tested in 64bit mode. |
1656 | * Therefore, we inject an #UD. | 1912 | * Therefore, we inject an #UD. |
1657 | */ | 1913 | */ |
1658 | if (ctxt->mode == X86EMUL_MODE_PROT64) | 1914 | if (ctxt->mode == X86EMUL_MODE_PROT64) { |
1659 | return X86EMUL_UNHANDLEABLE; | 1915 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); |
1916 | return X86EMUL_PROPAGATE_FAULT; | ||
1917 | } | ||
1660 | 1918 | ||
1661 | setup_syscalls_segments(ctxt, &cs, &ss); | 1919 | setup_syscalls_segments(ctxt, &cs, &ss); |
1662 | 1920 | ||
@@ -1711,7 +1969,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) | |||
1711 | if (ctxt->mode == X86EMUL_MODE_REAL || | 1969 | if (ctxt->mode == X86EMUL_MODE_REAL || |
1712 | ctxt->mode == X86EMUL_MODE_VM86) { | 1970 | ctxt->mode == X86EMUL_MODE_VM86) { |
1713 | kvm_inject_gp(ctxt->vcpu, 0); | 1971 | kvm_inject_gp(ctxt->vcpu, 0); |
1714 | return X86EMUL_UNHANDLEABLE; | 1972 | return X86EMUL_PROPAGATE_FAULT; |
1715 | } | 1973 | } |
1716 | 1974 | ||
1717 | setup_syscalls_segments(ctxt, &cs, &ss); | 1975 | setup_syscalls_segments(ctxt, &cs, &ss); |
@@ -1756,7 +2014,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt) | |||
1756 | return X86EMUL_CONTINUE; | 2014 | return X86EMUL_CONTINUE; |
1757 | } | 2015 | } |
1758 | 2016 | ||
1759 | static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) | 2017 | static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, |
2018 | struct x86_emulate_ops *ops) | ||
1760 | { | 2019 | { |
1761 | int iopl; | 2020 | int iopl; |
1762 | if (ctxt->mode == X86EMUL_MODE_REAL) | 2021 | if (ctxt->mode == X86EMUL_MODE_REAL) |
@@ -1764,7 +2023,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt) | |||
1764 | if (ctxt->mode == X86EMUL_MODE_VM86) | 2023 | if (ctxt->mode == X86EMUL_MODE_VM86) |
1765 | return true; | 2024 | return true; |
1766 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 2025 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
1767 | return kvm_x86_ops->get_cpl(ctxt->vcpu) > iopl; | 2026 | return ops->cpl(ctxt->vcpu) > iopl; |
1768 | } | 2027 | } |
1769 | 2028 | ||
1770 | static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | 2029 | static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, |
@@ -1801,22 +2060,419 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, | |||
1801 | struct x86_emulate_ops *ops, | 2060 | struct x86_emulate_ops *ops, |
1802 | u16 port, u16 len) | 2061 | u16 port, u16 len) |
1803 | { | 2062 | { |
1804 | if (emulator_bad_iopl(ctxt)) | 2063 | if (emulator_bad_iopl(ctxt, ops)) |
1805 | if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) | 2064 | if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) |
1806 | return false; | 2065 | return false; |
1807 | return true; | 2066 | return true; |
1808 | } | 2067 | } |
1809 | 2068 | ||
2069 | static u32 get_cached_descriptor_base(struct x86_emulate_ctxt *ctxt, | ||
2070 | struct x86_emulate_ops *ops, | ||
2071 | int seg) | ||
2072 | { | ||
2073 | struct desc_struct desc; | ||
2074 | if (ops->get_cached_descriptor(&desc, seg, ctxt->vcpu)) | ||
2075 | return get_desc_base(&desc); | ||
2076 | else | ||
2077 | return ~0; | ||
2078 | } | ||
2079 | |||
2080 | static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, | ||
2081 | struct x86_emulate_ops *ops, | ||
2082 | struct tss_segment_16 *tss) | ||
2083 | { | ||
2084 | struct decode_cache *c = &ctxt->decode; | ||
2085 | |||
2086 | tss->ip = c->eip; | ||
2087 | tss->flag = ctxt->eflags; | ||
2088 | tss->ax = c->regs[VCPU_REGS_RAX]; | ||
2089 | tss->cx = c->regs[VCPU_REGS_RCX]; | ||
2090 | tss->dx = c->regs[VCPU_REGS_RDX]; | ||
2091 | tss->bx = c->regs[VCPU_REGS_RBX]; | ||
2092 | tss->sp = c->regs[VCPU_REGS_RSP]; | ||
2093 | tss->bp = c->regs[VCPU_REGS_RBP]; | ||
2094 | tss->si = c->regs[VCPU_REGS_RSI]; | ||
2095 | tss->di = c->regs[VCPU_REGS_RDI]; | ||
2096 | |||
2097 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | ||
2098 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | ||
2099 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | ||
2100 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | ||
2101 | tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | ||
2102 | } | ||
2103 | |||
2104 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | ||
2105 | struct x86_emulate_ops *ops, | ||
2106 | struct tss_segment_16 *tss) | ||
2107 | { | ||
2108 | struct decode_cache *c = &ctxt->decode; | ||
2109 | int ret; | ||
2110 | |||
2111 | c->eip = tss->ip; | ||
2112 | ctxt->eflags = tss->flag | 2; | ||
2113 | c->regs[VCPU_REGS_RAX] = tss->ax; | ||
2114 | c->regs[VCPU_REGS_RCX] = tss->cx; | ||
2115 | c->regs[VCPU_REGS_RDX] = tss->dx; | ||
2116 | c->regs[VCPU_REGS_RBX] = tss->bx; | ||
2117 | c->regs[VCPU_REGS_RSP] = tss->sp; | ||
2118 | c->regs[VCPU_REGS_RBP] = tss->bp; | ||
2119 | c->regs[VCPU_REGS_RSI] = tss->si; | ||
2120 | c->regs[VCPU_REGS_RDI] = tss->di; | ||
2121 | |||
2122 | /* | ||
2123 | * SDM says that segment selectors are loaded before segment | ||
2124 | * descriptors | ||
2125 | */ | ||
2126 | ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu); | ||
2127 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | ||
2128 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | ||
2129 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2130 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | ||
2131 | |||
2132 | /* | ||
2133 | * Now load segment descriptors. If fault happenes at this stage | ||
2134 | * it is handled in a context of new task | ||
2135 | */ | ||
2136 | ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR); | ||
2137 | if (ret != X86EMUL_CONTINUE) | ||
2138 | return ret; | ||
2139 | ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); | ||
2140 | if (ret != X86EMUL_CONTINUE) | ||
2141 | return ret; | ||
2142 | ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); | ||
2143 | if (ret != X86EMUL_CONTINUE) | ||
2144 | return ret; | ||
2145 | ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); | ||
2146 | if (ret != X86EMUL_CONTINUE) | ||
2147 | return ret; | ||
2148 | ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); | ||
2149 | if (ret != X86EMUL_CONTINUE) | ||
2150 | return ret; | ||
2151 | |||
2152 | return X86EMUL_CONTINUE; | ||
2153 | } | ||
2154 | |||
2155 | static int task_switch_16(struct x86_emulate_ctxt *ctxt, | ||
2156 | struct x86_emulate_ops *ops, | ||
2157 | u16 tss_selector, u16 old_tss_sel, | ||
2158 | ulong old_tss_base, struct desc_struct *new_desc) | ||
2159 | { | ||
2160 | struct tss_segment_16 tss_seg; | ||
2161 | int ret; | ||
2162 | u32 err, new_tss_base = get_desc_base(new_desc); | ||
2163 | |||
2164 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2165 | &err); | ||
2166 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2167 | /* FIXME: need to provide precise fault address */ | ||
2168 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2169 | return ret; | ||
2170 | } | ||
2171 | |||
2172 | save_state_to_tss16(ctxt, ops, &tss_seg); | ||
2173 | |||
2174 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2175 | &err); | ||
2176 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2177 | /* FIXME: need to provide precise fault address */ | ||
2178 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2179 | return ret; | ||
2180 | } | ||
2181 | |||
2182 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2183 | &err); | ||
2184 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2185 | /* FIXME: need to provide precise fault address */ | ||
2186 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2187 | return ret; | ||
2188 | } | ||
2189 | |||
2190 | if (old_tss_sel != 0xffff) { | ||
2191 | tss_seg.prev_task_link = old_tss_sel; | ||
2192 | |||
2193 | ret = ops->write_std(new_tss_base, | ||
2194 | &tss_seg.prev_task_link, | ||
2195 | sizeof tss_seg.prev_task_link, | ||
2196 | ctxt->vcpu, &err); | ||
2197 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2198 | /* FIXME: need to provide precise fault address */ | ||
2199 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2200 | return ret; | ||
2201 | } | ||
2202 | } | ||
2203 | |||
2204 | return load_state_from_tss16(ctxt, ops, &tss_seg); | ||
2205 | } | ||
2206 | |||
2207 | static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | ||
2208 | struct x86_emulate_ops *ops, | ||
2209 | struct tss_segment_32 *tss) | ||
2210 | { | ||
2211 | struct decode_cache *c = &ctxt->decode; | ||
2212 | |||
2213 | tss->cr3 = ops->get_cr(3, ctxt->vcpu); | ||
2214 | tss->eip = c->eip; | ||
2215 | tss->eflags = ctxt->eflags; | ||
2216 | tss->eax = c->regs[VCPU_REGS_RAX]; | ||
2217 | tss->ecx = c->regs[VCPU_REGS_RCX]; | ||
2218 | tss->edx = c->regs[VCPU_REGS_RDX]; | ||
2219 | tss->ebx = c->regs[VCPU_REGS_RBX]; | ||
2220 | tss->esp = c->regs[VCPU_REGS_RSP]; | ||
2221 | tss->ebp = c->regs[VCPU_REGS_RBP]; | ||
2222 | tss->esi = c->regs[VCPU_REGS_RSI]; | ||
2223 | tss->edi = c->regs[VCPU_REGS_RDI]; | ||
2224 | |||
2225 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | ||
2226 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | ||
2227 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | ||
2228 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | ||
2229 | tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu); | ||
2230 | tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu); | ||
2231 | tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | ||
2232 | } | ||
2233 | |||
2234 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | ||
2235 | struct x86_emulate_ops *ops, | ||
2236 | struct tss_segment_32 *tss) | ||
2237 | { | ||
2238 | struct decode_cache *c = &ctxt->decode; | ||
2239 | int ret; | ||
2240 | |||
2241 | ops->set_cr(3, tss->cr3, ctxt->vcpu); | ||
2242 | c->eip = tss->eip; | ||
2243 | ctxt->eflags = tss->eflags | 2; | ||
2244 | c->regs[VCPU_REGS_RAX] = tss->eax; | ||
2245 | c->regs[VCPU_REGS_RCX] = tss->ecx; | ||
2246 | c->regs[VCPU_REGS_RDX] = tss->edx; | ||
2247 | c->regs[VCPU_REGS_RBX] = tss->ebx; | ||
2248 | c->regs[VCPU_REGS_RSP] = tss->esp; | ||
2249 | c->regs[VCPU_REGS_RBP] = tss->ebp; | ||
2250 | c->regs[VCPU_REGS_RSI] = tss->esi; | ||
2251 | c->regs[VCPU_REGS_RDI] = tss->edi; | ||
2252 | |||
2253 | /* | ||
2254 | * SDM says that segment selectors are loaded before segment | ||
2255 | * descriptors | ||
2256 | */ | ||
2257 | ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu); | ||
2258 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | ||
2259 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | ||
2260 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2261 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | ||
2262 | ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu); | ||
2263 | ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu); | ||
2264 | |||
2265 | /* | ||
2266 | * Now load segment descriptors. If fault happenes at this stage | ||
2267 | * it is handled in a context of new task | ||
2268 | */ | ||
2269 | ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR); | ||
2270 | if (ret != X86EMUL_CONTINUE) | ||
2271 | return ret; | ||
2272 | ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); | ||
2273 | if (ret != X86EMUL_CONTINUE) | ||
2274 | return ret; | ||
2275 | ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); | ||
2276 | if (ret != X86EMUL_CONTINUE) | ||
2277 | return ret; | ||
2278 | ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); | ||
2279 | if (ret != X86EMUL_CONTINUE) | ||
2280 | return ret; | ||
2281 | ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); | ||
2282 | if (ret != X86EMUL_CONTINUE) | ||
2283 | return ret; | ||
2284 | ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS); | ||
2285 | if (ret != X86EMUL_CONTINUE) | ||
2286 | return ret; | ||
2287 | ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS); | ||
2288 | if (ret != X86EMUL_CONTINUE) | ||
2289 | return ret; | ||
2290 | |||
2291 | return X86EMUL_CONTINUE; | ||
2292 | } | ||
2293 | |||
2294 | static int task_switch_32(struct x86_emulate_ctxt *ctxt, | ||
2295 | struct x86_emulate_ops *ops, | ||
2296 | u16 tss_selector, u16 old_tss_sel, | ||
2297 | ulong old_tss_base, struct desc_struct *new_desc) | ||
2298 | { | ||
2299 | struct tss_segment_32 tss_seg; | ||
2300 | int ret; | ||
2301 | u32 err, new_tss_base = get_desc_base(new_desc); | ||
2302 | |||
2303 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2304 | &err); | ||
2305 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2306 | /* FIXME: need to provide precise fault address */ | ||
2307 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2308 | return ret; | ||
2309 | } | ||
2310 | |||
2311 | save_state_to_tss32(ctxt, ops, &tss_seg); | ||
2312 | |||
2313 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2314 | &err); | ||
2315 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2316 | /* FIXME: need to provide precise fault address */ | ||
2317 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2318 | return ret; | ||
2319 | } | ||
2320 | |||
2321 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2322 | &err); | ||
2323 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2324 | /* FIXME: need to provide precise fault address */ | ||
2325 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2326 | return ret; | ||
2327 | } | ||
2328 | |||
2329 | if (old_tss_sel != 0xffff) { | ||
2330 | tss_seg.prev_task_link = old_tss_sel; | ||
2331 | |||
2332 | ret = ops->write_std(new_tss_base, | ||
2333 | &tss_seg.prev_task_link, | ||
2334 | sizeof tss_seg.prev_task_link, | ||
2335 | ctxt->vcpu, &err); | ||
2336 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2337 | /* FIXME: need to provide precise fault address */ | ||
2338 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2339 | return ret; | ||
2340 | } | ||
2341 | } | ||
2342 | |||
2343 | return load_state_from_tss32(ctxt, ops, &tss_seg); | ||
2344 | } | ||
2345 | |||
2346 | static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | ||
2347 | struct x86_emulate_ops *ops, | ||
2348 | u16 tss_selector, int reason, | ||
2349 | bool has_error_code, u32 error_code) | ||
2350 | { | ||
2351 | struct desc_struct curr_tss_desc, next_tss_desc; | ||
2352 | int ret; | ||
2353 | u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); | ||
2354 | ulong old_tss_base = | ||
2355 | get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR); | ||
2356 | u32 desc_limit; | ||
2357 | |||
2358 | /* FIXME: old_tss_base == ~0 ? */ | ||
2359 | |||
2360 | ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc); | ||
2361 | if (ret != X86EMUL_CONTINUE) | ||
2362 | return ret; | ||
2363 | ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc); | ||
2364 | if (ret != X86EMUL_CONTINUE) | ||
2365 | return ret; | ||
2366 | |||
2367 | /* FIXME: check that next_tss_desc is tss */ | ||
2368 | |||
2369 | if (reason != TASK_SWITCH_IRET) { | ||
2370 | if ((tss_selector & 3) > next_tss_desc.dpl || | ||
2371 | ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { | ||
2372 | kvm_inject_gp(ctxt->vcpu, 0); | ||
2373 | return X86EMUL_PROPAGATE_FAULT; | ||
2374 | } | ||
2375 | } | ||
2376 | |||
2377 | desc_limit = desc_limit_scaled(&next_tss_desc); | ||
2378 | if (!next_tss_desc.p || | ||
2379 | ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || | ||
2380 | desc_limit < 0x2b)) { | ||
2381 | kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR, | ||
2382 | tss_selector & 0xfffc); | ||
2383 | return X86EMUL_PROPAGATE_FAULT; | ||
2384 | } | ||
2385 | |||
2386 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { | ||
2387 | curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ | ||
2388 | write_segment_descriptor(ctxt, ops, old_tss_sel, | ||
2389 | &curr_tss_desc); | ||
2390 | } | ||
2391 | |||
2392 | if (reason == TASK_SWITCH_IRET) | ||
2393 | ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; | ||
2394 | |||
2395 | /* set back link to prev task only if NT bit is set in eflags | ||
2396 | note that old_tss_sel is not used afetr this point */ | ||
2397 | if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) | ||
2398 | old_tss_sel = 0xffff; | ||
2399 | |||
2400 | if (next_tss_desc.type & 8) | ||
2401 | ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel, | ||
2402 | old_tss_base, &next_tss_desc); | ||
2403 | else | ||
2404 | ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel, | ||
2405 | old_tss_base, &next_tss_desc); | ||
2406 | if (ret != X86EMUL_CONTINUE) | ||
2407 | return ret; | ||
2408 | |||
2409 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) | ||
2410 | ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; | ||
2411 | |||
2412 | if (reason != TASK_SWITCH_IRET) { | ||
2413 | next_tss_desc.type |= (1 << 1); /* set busy flag */ | ||
2414 | write_segment_descriptor(ctxt, ops, tss_selector, | ||
2415 | &next_tss_desc); | ||
2416 | } | ||
2417 | |||
2418 | ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); | ||
2419 | ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu); | ||
2420 | ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); | ||
2421 | |||
2422 | if (has_error_code) { | ||
2423 | struct decode_cache *c = &ctxt->decode; | ||
2424 | |||
2425 | c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; | ||
2426 | c->lock_prefix = 0; | ||
2427 | c->src.val = (unsigned long) error_code; | ||
2428 | emulate_push(ctxt); | ||
2429 | } | ||
2430 | |||
2431 | return ret; | ||
2432 | } | ||
2433 | |||
2434 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | ||
2435 | struct x86_emulate_ops *ops, | ||
2436 | u16 tss_selector, int reason, | ||
2437 | bool has_error_code, u32 error_code) | ||
2438 | { | ||
2439 | struct decode_cache *c = &ctxt->decode; | ||
2440 | int rc; | ||
2441 | |||
2442 | memset(c, 0, sizeof(struct decode_cache)); | ||
2443 | c->eip = ctxt->eip; | ||
2444 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | ||
2445 | c->dst.type = OP_NONE; | ||
2446 | |||
2447 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, | ||
2448 | has_error_code, error_code); | ||
2449 | |||
2450 | if (rc == X86EMUL_CONTINUE) { | ||
2451 | memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); | ||
2452 | kvm_rip_write(ctxt->vcpu, c->eip); | ||
2453 | rc = writeback(ctxt, ops); | ||
2454 | } | ||
2455 | |||
2456 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | ||
2457 | } | ||
2458 | |||
2459 | static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base, | ||
2460 | int reg, struct operand *op) | ||
2461 | { | ||
2462 | struct decode_cache *c = &ctxt->decode; | ||
2463 | int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; | ||
2464 | |||
2465 | register_address_increment(c, &c->regs[reg], df * op->bytes); | ||
2466 | op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]); | ||
2467 | } | ||
2468 | |||
1810 | int | 2469 | int |
1811 | x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | 2470 | x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
1812 | { | 2471 | { |
1813 | unsigned long memop = 0; | ||
1814 | u64 msr_data; | 2472 | u64 msr_data; |
1815 | unsigned long saved_eip = 0; | ||
1816 | struct decode_cache *c = &ctxt->decode; | 2473 | struct decode_cache *c = &ctxt->decode; |
1817 | unsigned int port; | 2474 | int rc = X86EMUL_CONTINUE; |
1818 | int io_dir_in; | 2475 | int saved_dst_type = c->dst.type; |
1819 | int rc = 0; | ||
1820 | 2476 | ||
1821 | ctxt->interruptibility = 0; | 2477 | ctxt->interruptibility = 0; |
1822 | 2478 | ||
@@ -1826,26 +2482,30 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1826 | */ | 2482 | */ |
1827 | 2483 | ||
1828 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | 2484 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); |
1829 | saved_eip = c->eip; | 2485 | |
2486 | if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { | ||
2487 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | ||
2488 | goto done; | ||
2489 | } | ||
1830 | 2490 | ||
1831 | /* LOCK prefix is allowed only with some instructions */ | 2491 | /* LOCK prefix is allowed only with some instructions */ |
1832 | if (c->lock_prefix && !(c->d & Lock)) { | 2492 | if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { |
1833 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | 2493 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); |
1834 | goto done; | 2494 | goto done; |
1835 | } | 2495 | } |
1836 | 2496 | ||
1837 | /* Privileged instruction can be executed only in CPL=0 */ | 2497 | /* Privileged instruction can be executed only in CPL=0 */ |
1838 | if ((c->d & Priv) && kvm_x86_ops->get_cpl(ctxt->vcpu)) { | 2498 | if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { |
1839 | kvm_inject_gp(ctxt->vcpu, 0); | 2499 | kvm_inject_gp(ctxt->vcpu, 0); |
1840 | goto done; | 2500 | goto done; |
1841 | } | 2501 | } |
1842 | 2502 | ||
1843 | if (((c->d & ModRM) && (c->modrm_mod != 3)) || (c->d & MemAbs)) | ||
1844 | memop = c->modrm_ea; | ||
1845 | |||
1846 | if (c->rep_prefix && (c->d & String)) { | 2503 | if (c->rep_prefix && (c->d & String)) { |
2504 | ctxt->restart = true; | ||
1847 | /* All REP prefixes have the same first termination condition */ | 2505 | /* All REP prefixes have the same first termination condition */ |
1848 | if (c->regs[VCPU_REGS_RCX] == 0) { | 2506 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { |
2507 | string_done: | ||
2508 | ctxt->restart = false; | ||
1849 | kvm_rip_write(ctxt->vcpu, c->eip); | 2509 | kvm_rip_write(ctxt->vcpu, c->eip); |
1850 | goto done; | 2510 | goto done; |
1851 | } | 2511 | } |
@@ -1857,25 +2517,18 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1857 | * - if REPNE/REPNZ and ZF = 1 then done | 2517 | * - if REPNE/REPNZ and ZF = 1 then done |
1858 | */ | 2518 | */ |
1859 | if ((c->b == 0xa6) || (c->b == 0xa7) || | 2519 | if ((c->b == 0xa6) || (c->b == 0xa7) || |
1860 | (c->b == 0xae) || (c->b == 0xaf)) { | 2520 | (c->b == 0xae) || (c->b == 0xaf)) { |
1861 | if ((c->rep_prefix == REPE_PREFIX) && | 2521 | if ((c->rep_prefix == REPE_PREFIX) && |
1862 | ((ctxt->eflags & EFLG_ZF) == 0)) { | 2522 | ((ctxt->eflags & EFLG_ZF) == 0)) |
1863 | kvm_rip_write(ctxt->vcpu, c->eip); | 2523 | goto string_done; |
1864 | goto done; | ||
1865 | } | ||
1866 | if ((c->rep_prefix == REPNE_PREFIX) && | 2524 | if ((c->rep_prefix == REPNE_PREFIX) && |
1867 | ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) { | 2525 | ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) |
1868 | kvm_rip_write(ctxt->vcpu, c->eip); | 2526 | goto string_done; |
1869 | goto done; | ||
1870 | } | ||
1871 | } | 2527 | } |
1872 | c->regs[VCPU_REGS_RCX]--; | 2528 | c->eip = ctxt->eip; |
1873 | c->eip = kvm_rip_read(ctxt->vcpu); | ||
1874 | } | 2529 | } |
1875 | 2530 | ||
1876 | if (c->src.type == OP_MEM) { | 2531 | if (c->src.type == OP_MEM) { |
1877 | c->src.ptr = (unsigned long *)memop; | ||
1878 | c->src.val = 0; | ||
1879 | rc = ops->read_emulated((unsigned long)c->src.ptr, | 2532 | rc = ops->read_emulated((unsigned long)c->src.ptr, |
1880 | &c->src.val, | 2533 | &c->src.val, |
1881 | c->src.bytes, | 2534 | c->src.bytes, |
@@ -1885,29 +2538,25 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1885 | c->src.orig_val = c->src.val; | 2538 | c->src.orig_val = c->src.val; |
1886 | } | 2539 | } |
1887 | 2540 | ||
2541 | if (c->src2.type == OP_MEM) { | ||
2542 | rc = ops->read_emulated((unsigned long)c->src2.ptr, | ||
2543 | &c->src2.val, | ||
2544 | c->src2.bytes, | ||
2545 | ctxt->vcpu); | ||
2546 | if (rc != X86EMUL_CONTINUE) | ||
2547 | goto done; | ||
2548 | } | ||
2549 | |||
1888 | if ((c->d & DstMask) == ImplicitOps) | 2550 | if ((c->d & DstMask) == ImplicitOps) |
1889 | goto special_insn; | 2551 | goto special_insn; |
1890 | 2552 | ||
1891 | 2553 | ||
1892 | if (c->dst.type == OP_MEM) { | 2554 | if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { |
1893 | c->dst.ptr = (unsigned long *)memop; | 2555 | /* optimisation - avoid slow emulated read if Mov */ |
1894 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 2556 | rc = ops->read_emulated((unsigned long)c->dst.ptr, &c->dst.val, |
1895 | c->dst.val = 0; | 2557 | c->dst.bytes, ctxt->vcpu); |
1896 | if (c->d & BitOp) { | 2558 | if (rc != X86EMUL_CONTINUE) |
1897 | unsigned long mask = ~(c->dst.bytes * 8 - 1); | 2559 | goto done; |
1898 | |||
1899 | c->dst.ptr = (void *)c->dst.ptr + | ||
1900 | (c->src.val & mask) / 8; | ||
1901 | } | ||
1902 | if (!(c->d & Mov)) { | ||
1903 | /* optimisation - avoid slow emulated read */ | ||
1904 | rc = ops->read_emulated((unsigned long)c->dst.ptr, | ||
1905 | &c->dst.val, | ||
1906 | c->dst.bytes, | ||
1907 | ctxt->vcpu); | ||
1908 | if (rc != X86EMUL_CONTINUE) | ||
1909 | goto done; | ||
1910 | } | ||
1911 | } | 2560 | } |
1912 | c->dst.orig_val = c->dst.val; | 2561 | c->dst.orig_val = c->dst.val; |
1913 | 2562 | ||
@@ -1926,7 +2575,7 @@ special_insn: | |||
1926 | break; | 2575 | break; |
1927 | case 0x07: /* pop es */ | 2576 | case 0x07: /* pop es */ |
1928 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); | 2577 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); |
1929 | if (rc != 0) | 2578 | if (rc != X86EMUL_CONTINUE) |
1930 | goto done; | 2579 | goto done; |
1931 | break; | 2580 | break; |
1932 | case 0x08 ... 0x0d: | 2581 | case 0x08 ... 0x0d: |
@@ -1945,7 +2594,7 @@ special_insn: | |||
1945 | break; | 2594 | break; |
1946 | case 0x17: /* pop ss */ | 2595 | case 0x17: /* pop ss */ |
1947 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); | 2596 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); |
1948 | if (rc != 0) | 2597 | if (rc != X86EMUL_CONTINUE) |
1949 | goto done; | 2598 | goto done; |
1950 | break; | 2599 | break; |
1951 | case 0x18 ... 0x1d: | 2600 | case 0x18 ... 0x1d: |
@@ -1957,7 +2606,7 @@ special_insn: | |||
1957 | break; | 2606 | break; |
1958 | case 0x1f: /* pop ds */ | 2607 | case 0x1f: /* pop ds */ |
1959 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); | 2608 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); |
1960 | if (rc != 0) | 2609 | if (rc != X86EMUL_CONTINUE) |
1961 | goto done; | 2610 | goto done; |
1962 | break; | 2611 | break; |
1963 | case 0x20 ... 0x25: | 2612 | case 0x20 ... 0x25: |
@@ -1988,7 +2637,7 @@ special_insn: | |||
1988 | case 0x58 ... 0x5f: /* pop reg */ | 2637 | case 0x58 ... 0x5f: /* pop reg */ |
1989 | pop_instruction: | 2638 | pop_instruction: |
1990 | rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes); | 2639 | rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes); |
1991 | if (rc != 0) | 2640 | if (rc != X86EMUL_CONTINUE) |
1992 | goto done; | 2641 | goto done; |
1993 | break; | 2642 | break; |
1994 | case 0x60: /* pusha */ | 2643 | case 0x60: /* pusha */ |
@@ -1996,7 +2645,7 @@ special_insn: | |||
1996 | break; | 2645 | break; |
1997 | case 0x61: /* popa */ | 2646 | case 0x61: /* popa */ |
1998 | rc = emulate_popa(ctxt, ops); | 2647 | rc = emulate_popa(ctxt, ops); |
1999 | if (rc != 0) | 2648 | if (rc != X86EMUL_CONTINUE) |
2000 | goto done; | 2649 | goto done; |
2001 | break; | 2650 | break; |
2002 | case 0x63: /* movsxd */ | 2651 | case 0x63: /* movsxd */ |
@@ -2010,47 +2659,29 @@ special_insn: | |||
2010 | break; | 2659 | break; |
2011 | case 0x6c: /* insb */ | 2660 | case 0x6c: /* insb */ |
2012 | case 0x6d: /* insw/insd */ | 2661 | case 0x6d: /* insw/insd */ |
2662 | c->dst.bytes = min(c->dst.bytes, 4u); | ||
2013 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], | 2663 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], |
2014 | (c->d & ByteOp) ? 1 : c->op_bytes)) { | 2664 | c->dst.bytes)) { |
2015 | kvm_inject_gp(ctxt->vcpu, 0); | 2665 | kvm_inject_gp(ctxt->vcpu, 0); |
2016 | goto done; | 2666 | goto done; |
2017 | } | 2667 | } |
2018 | if (kvm_emulate_pio_string(ctxt->vcpu, | 2668 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, |
2019 | 1, | 2669 | c->regs[VCPU_REGS_RDX], &c->dst.val)) |
2020 | (c->d & ByteOp) ? 1 : c->op_bytes, | 2670 | goto done; /* IO is needed, skip writeback */ |
2021 | c->rep_prefix ? | 2671 | break; |
2022 | address_mask(c, c->regs[VCPU_REGS_RCX]) : 1, | ||
2023 | (ctxt->eflags & EFLG_DF), | ||
2024 | register_address(c, es_base(ctxt), | ||
2025 | c->regs[VCPU_REGS_RDI]), | ||
2026 | c->rep_prefix, | ||
2027 | c->regs[VCPU_REGS_RDX]) == 0) { | ||
2028 | c->eip = saved_eip; | ||
2029 | return -1; | ||
2030 | } | ||
2031 | return 0; | ||
2032 | case 0x6e: /* outsb */ | 2672 | case 0x6e: /* outsb */ |
2033 | case 0x6f: /* outsw/outsd */ | 2673 | case 0x6f: /* outsw/outsd */ |
2674 | c->src.bytes = min(c->src.bytes, 4u); | ||
2034 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], | 2675 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], |
2035 | (c->d & ByteOp) ? 1 : c->op_bytes)) { | 2676 | c->src.bytes)) { |
2036 | kvm_inject_gp(ctxt->vcpu, 0); | 2677 | kvm_inject_gp(ctxt->vcpu, 0); |
2037 | goto done; | 2678 | goto done; |
2038 | } | 2679 | } |
2039 | if (kvm_emulate_pio_string(ctxt->vcpu, | 2680 | ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX], |
2040 | 0, | 2681 | &c->src.val, 1, ctxt->vcpu); |
2041 | (c->d & ByteOp) ? 1 : c->op_bytes, | 2682 | |
2042 | c->rep_prefix ? | 2683 | c->dst.type = OP_NONE; /* nothing to writeback */ |
2043 | address_mask(c, c->regs[VCPU_REGS_RCX]) : 1, | 2684 | break; |
2044 | (ctxt->eflags & EFLG_DF), | ||
2045 | register_address(c, | ||
2046 | seg_override_base(ctxt, c), | ||
2047 | c->regs[VCPU_REGS_RSI]), | ||
2048 | c->rep_prefix, | ||
2049 | c->regs[VCPU_REGS_RDX]) == 0) { | ||
2050 | c->eip = saved_eip; | ||
2051 | return -1; | ||
2052 | } | ||
2053 | return 0; | ||
2054 | case 0x70 ... 0x7f: /* jcc (short) */ | 2685 | case 0x70 ... 0x7f: /* jcc (short) */ |
2055 | if (test_cc(c->b, ctxt->eflags)) | 2686 | if (test_cc(c->b, ctxt->eflags)) |
2056 | jmp_rel(c, c->src.val); | 2687 | jmp_rel(c, c->src.val); |
@@ -2107,12 +2738,11 @@ special_insn: | |||
2107 | case 0x8c: { /* mov r/m, sreg */ | 2738 | case 0x8c: { /* mov r/m, sreg */ |
2108 | struct kvm_segment segreg; | 2739 | struct kvm_segment segreg; |
2109 | 2740 | ||
2110 | if (c->modrm_reg <= 5) | 2741 | if (c->modrm_reg <= VCPU_SREG_GS) |
2111 | kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg); | 2742 | kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg); |
2112 | else { | 2743 | else { |
2113 | printk(KERN_INFO "0x8c: Invalid segreg in modrm byte 0x%02x\n", | 2744 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); |
2114 | c->modrm); | 2745 | goto done; |
2115 | goto cannot_emulate; | ||
2116 | } | 2746 | } |
2117 | c->dst.val = segreg.selector; | 2747 | c->dst.val = segreg.selector; |
2118 | break; | 2748 | break; |
@@ -2132,16 +2762,16 @@ special_insn: | |||
2132 | } | 2762 | } |
2133 | 2763 | ||
2134 | if (c->modrm_reg == VCPU_SREG_SS) | 2764 | if (c->modrm_reg == VCPU_SREG_SS) |
2135 | toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS); | 2765 | toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_MOV_SS); |
2136 | 2766 | ||
2137 | rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg); | 2767 | rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg); |
2138 | 2768 | ||
2139 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2769 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2140 | break; | 2770 | break; |
2141 | } | 2771 | } |
2142 | case 0x8f: /* pop (sole member of Grp1a) */ | 2772 | case 0x8f: /* pop (sole member of Grp1a) */ |
2143 | rc = emulate_grp1a(ctxt, ops); | 2773 | rc = emulate_grp1a(ctxt, ops); |
2144 | if (rc != 0) | 2774 | if (rc != X86EMUL_CONTINUE) |
2145 | goto done; | 2775 | goto done; |
2146 | break; | 2776 | break; |
2147 | case 0x90: /* nop / xchg r8,rax */ | 2777 | case 0x90: /* nop / xchg r8,rax */ |
@@ -2175,89 +2805,16 @@ special_insn: | |||
2175 | c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX]; | 2805 | c->dst.val = (unsigned long)c->regs[VCPU_REGS_RAX]; |
2176 | break; | 2806 | break; |
2177 | case 0xa4 ... 0xa5: /* movs */ | 2807 | case 0xa4 ... 0xa5: /* movs */ |
2178 | c->dst.type = OP_MEM; | 2808 | goto mov; |
2179 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
2180 | c->dst.ptr = (unsigned long *)register_address(c, | ||
2181 | es_base(ctxt), | ||
2182 | c->regs[VCPU_REGS_RDI]); | ||
2183 | rc = ops->read_emulated(register_address(c, | ||
2184 | seg_override_base(ctxt, c), | ||
2185 | c->regs[VCPU_REGS_RSI]), | ||
2186 | &c->dst.val, | ||
2187 | c->dst.bytes, ctxt->vcpu); | ||
2188 | if (rc != X86EMUL_CONTINUE) | ||
2189 | goto done; | ||
2190 | register_address_increment(c, &c->regs[VCPU_REGS_RSI], | ||
2191 | (ctxt->eflags & EFLG_DF) ? -c->dst.bytes | ||
2192 | : c->dst.bytes); | ||
2193 | register_address_increment(c, &c->regs[VCPU_REGS_RDI], | ||
2194 | (ctxt->eflags & EFLG_DF) ? -c->dst.bytes | ||
2195 | : c->dst.bytes); | ||
2196 | break; | ||
2197 | case 0xa6 ... 0xa7: /* cmps */ | 2809 | case 0xa6 ... 0xa7: /* cmps */ |
2198 | c->src.type = OP_NONE; /* Disable writeback. */ | ||
2199 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
2200 | c->src.ptr = (unsigned long *)register_address(c, | ||
2201 | seg_override_base(ctxt, c), | ||
2202 | c->regs[VCPU_REGS_RSI]); | ||
2203 | rc = ops->read_emulated((unsigned long)c->src.ptr, | ||
2204 | &c->src.val, | ||
2205 | c->src.bytes, | ||
2206 | ctxt->vcpu); | ||
2207 | if (rc != X86EMUL_CONTINUE) | ||
2208 | goto done; | ||
2209 | |||
2210 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2810 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2211 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
2212 | c->dst.ptr = (unsigned long *)register_address(c, | ||
2213 | es_base(ctxt), | ||
2214 | c->regs[VCPU_REGS_RDI]); | ||
2215 | rc = ops->read_emulated((unsigned long)c->dst.ptr, | ||
2216 | &c->dst.val, | ||
2217 | c->dst.bytes, | ||
2218 | ctxt->vcpu); | ||
2219 | if (rc != X86EMUL_CONTINUE) | ||
2220 | goto done; | ||
2221 | |||
2222 | DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); | 2811 | DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); |
2223 | 2812 | goto cmp; | |
2224 | emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); | ||
2225 | |||
2226 | register_address_increment(c, &c->regs[VCPU_REGS_RSI], | ||
2227 | (ctxt->eflags & EFLG_DF) ? -c->src.bytes | ||
2228 | : c->src.bytes); | ||
2229 | register_address_increment(c, &c->regs[VCPU_REGS_RDI], | ||
2230 | (ctxt->eflags & EFLG_DF) ? -c->dst.bytes | ||
2231 | : c->dst.bytes); | ||
2232 | |||
2233 | break; | ||
2234 | case 0xaa ... 0xab: /* stos */ | 2813 | case 0xaa ... 0xab: /* stos */ |
2235 | c->dst.type = OP_MEM; | ||
2236 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
2237 | c->dst.ptr = (unsigned long *)register_address(c, | ||
2238 | es_base(ctxt), | ||
2239 | c->regs[VCPU_REGS_RDI]); | ||
2240 | c->dst.val = c->regs[VCPU_REGS_RAX]; | 2814 | c->dst.val = c->regs[VCPU_REGS_RAX]; |
2241 | register_address_increment(c, &c->regs[VCPU_REGS_RDI], | ||
2242 | (ctxt->eflags & EFLG_DF) ? -c->dst.bytes | ||
2243 | : c->dst.bytes); | ||
2244 | break; | 2815 | break; |
2245 | case 0xac ... 0xad: /* lods */ | 2816 | case 0xac ... 0xad: /* lods */ |
2246 | c->dst.type = OP_REG; | 2817 | goto mov; |
2247 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
2248 | c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; | ||
2249 | rc = ops->read_emulated(register_address(c, | ||
2250 | seg_override_base(ctxt, c), | ||
2251 | c->regs[VCPU_REGS_RSI]), | ||
2252 | &c->dst.val, | ||
2253 | c->dst.bytes, | ||
2254 | ctxt->vcpu); | ||
2255 | if (rc != X86EMUL_CONTINUE) | ||
2256 | goto done; | ||
2257 | register_address_increment(c, &c->regs[VCPU_REGS_RSI], | ||
2258 | (ctxt->eflags & EFLG_DF) ? -c->dst.bytes | ||
2259 | : c->dst.bytes); | ||
2260 | break; | ||
2261 | case 0xae ... 0xaf: /* scas */ | 2818 | case 0xae ... 0xaf: /* scas */ |
2262 | DPRINTF("Urk! I don't handle SCAS.\n"); | 2819 | DPRINTF("Urk! I don't handle SCAS.\n"); |
2263 | goto cannot_emulate; | 2820 | goto cannot_emulate; |
@@ -2277,7 +2834,7 @@ special_insn: | |||
2277 | break; | 2834 | break; |
2278 | case 0xcb: /* ret far */ | 2835 | case 0xcb: /* ret far */ |
2279 | rc = emulate_ret_far(ctxt, ops); | 2836 | rc = emulate_ret_far(ctxt, ops); |
2280 | if (rc) | 2837 | if (rc != X86EMUL_CONTINUE) |
2281 | goto done; | 2838 | goto done; |
2282 | break; | 2839 | break; |
2283 | case 0xd0 ... 0xd1: /* Grp2 */ | 2840 | case 0xd0 ... 0xd1: /* Grp2 */ |
@@ -2290,14 +2847,10 @@ special_insn: | |||
2290 | break; | 2847 | break; |
2291 | case 0xe4: /* inb */ | 2848 | case 0xe4: /* inb */ |
2292 | case 0xe5: /* in */ | 2849 | case 0xe5: /* in */ |
2293 | port = c->src.val; | 2850 | goto do_io_in; |
2294 | io_dir_in = 1; | ||
2295 | goto do_io; | ||
2296 | case 0xe6: /* outb */ | 2851 | case 0xe6: /* outb */ |
2297 | case 0xe7: /* out */ | 2852 | case 0xe7: /* out */ |
2298 | port = c->src.val; | 2853 | goto do_io_out; |
2299 | io_dir_in = 0; | ||
2300 | goto do_io; | ||
2301 | case 0xe8: /* call (near) */ { | 2854 | case 0xe8: /* call (near) */ { |
2302 | long int rel = c->src.val; | 2855 | long int rel = c->src.val; |
2303 | c->src.val = (unsigned long) c->eip; | 2856 | c->src.val = (unsigned long) c->eip; |
@@ -2308,8 +2861,9 @@ special_insn: | |||
2308 | case 0xe9: /* jmp rel */ | 2861 | case 0xe9: /* jmp rel */ |
2309 | goto jmp; | 2862 | goto jmp; |
2310 | case 0xea: /* jmp far */ | 2863 | case 0xea: /* jmp far */ |
2311 | if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, | 2864 | jump_far: |
2312 | VCPU_SREG_CS)) | 2865 | if (load_segment_descriptor(ctxt, ops, c->src2.val, |
2866 | VCPU_SREG_CS)) | ||
2313 | goto done; | 2867 | goto done; |
2314 | 2868 | ||
2315 | c->eip = c->src.val; | 2869 | c->eip = c->src.val; |
@@ -2321,25 +2875,29 @@ special_insn: | |||
2321 | break; | 2875 | break; |
2322 | case 0xec: /* in al,dx */ | 2876 | case 0xec: /* in al,dx */ |
2323 | case 0xed: /* in (e/r)ax,dx */ | 2877 | case 0xed: /* in (e/r)ax,dx */ |
2324 | port = c->regs[VCPU_REGS_RDX]; | 2878 | c->src.val = c->regs[VCPU_REGS_RDX]; |
2325 | io_dir_in = 1; | 2879 | do_io_in: |
2326 | goto do_io; | 2880 | c->dst.bytes = min(c->dst.bytes, 4u); |
2881 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { | ||
2882 | kvm_inject_gp(ctxt->vcpu, 0); | ||
2883 | goto done; | ||
2884 | } | ||
2885 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, | ||
2886 | &c->dst.val)) | ||
2887 | goto done; /* IO is needed */ | ||
2888 | break; | ||
2327 | case 0xee: /* out al,dx */ | 2889 | case 0xee: /* out al,dx */ |
2328 | case 0xef: /* out (e/r)ax,dx */ | 2890 | case 0xef: /* out (e/r)ax,dx */ |
2329 | port = c->regs[VCPU_REGS_RDX]; | 2891 | c->src.val = c->regs[VCPU_REGS_RDX]; |
2330 | io_dir_in = 0; | 2892 | do_io_out: |
2331 | do_io: | 2893 | c->dst.bytes = min(c->dst.bytes, 4u); |
2332 | if (!emulator_io_permited(ctxt, ops, port, | 2894 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { |
2333 | (c->d & ByteOp) ? 1 : c->op_bytes)) { | ||
2334 | kvm_inject_gp(ctxt->vcpu, 0); | 2895 | kvm_inject_gp(ctxt->vcpu, 0); |
2335 | goto done; | 2896 | goto done; |
2336 | } | 2897 | } |
2337 | if (kvm_emulate_pio(ctxt->vcpu, io_dir_in, | 2898 | ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1, |
2338 | (c->d & ByteOp) ? 1 : c->op_bytes, | 2899 | ctxt->vcpu); |
2339 | port) != 0) { | 2900 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2340 | c->eip = saved_eip; | ||
2341 | goto cannot_emulate; | ||
2342 | } | ||
2343 | break; | 2901 | break; |
2344 | case 0xf4: /* hlt */ | 2902 | case 0xf4: /* hlt */ |
2345 | ctxt->vcpu->arch.halt_request = 1; | 2903 | ctxt->vcpu->arch.halt_request = 1; |
@@ -2350,16 +2908,15 @@ special_insn: | |||
2350 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2908 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2351 | break; | 2909 | break; |
2352 | case 0xf6 ... 0xf7: /* Grp3 */ | 2910 | case 0xf6 ... 0xf7: /* Grp3 */ |
2353 | rc = emulate_grp3(ctxt, ops); | 2911 | if (!emulate_grp3(ctxt, ops)) |
2354 | if (rc != 0) | 2912 | goto cannot_emulate; |
2355 | goto done; | ||
2356 | break; | 2913 | break; |
2357 | case 0xf8: /* clc */ | 2914 | case 0xf8: /* clc */ |
2358 | ctxt->eflags &= ~EFLG_CF; | 2915 | ctxt->eflags &= ~EFLG_CF; |
2359 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2916 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2360 | break; | 2917 | break; |
2361 | case 0xfa: /* cli */ | 2918 | case 0xfa: /* cli */ |
2362 | if (emulator_bad_iopl(ctxt)) | 2919 | if (emulator_bad_iopl(ctxt, ops)) |
2363 | kvm_inject_gp(ctxt->vcpu, 0); | 2920 | kvm_inject_gp(ctxt->vcpu, 0); |
2364 | else { | 2921 | else { |
2365 | ctxt->eflags &= ~X86_EFLAGS_IF; | 2922 | ctxt->eflags &= ~X86_EFLAGS_IF; |
@@ -2367,10 +2924,10 @@ special_insn: | |||
2367 | } | 2924 | } |
2368 | break; | 2925 | break; |
2369 | case 0xfb: /* sti */ | 2926 | case 0xfb: /* sti */ |
2370 | if (emulator_bad_iopl(ctxt)) | 2927 | if (emulator_bad_iopl(ctxt, ops)) |
2371 | kvm_inject_gp(ctxt->vcpu, 0); | 2928 | kvm_inject_gp(ctxt->vcpu, 0); |
2372 | else { | 2929 | else { |
2373 | toggle_interruptibility(ctxt, X86_SHADOW_INT_STI); | 2930 | toggle_interruptibility(ctxt, KVM_X86_SHADOW_INT_STI); |
2374 | ctxt->eflags |= X86_EFLAGS_IF; | 2931 | ctxt->eflags |= X86_EFLAGS_IF; |
2375 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2932 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2376 | } | 2933 | } |
@@ -2383,28 +2940,55 @@ special_insn: | |||
2383 | ctxt->eflags |= EFLG_DF; | 2940 | ctxt->eflags |= EFLG_DF; |
2384 | c->dst.type = OP_NONE; /* Disable writeback. */ | 2941 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2385 | break; | 2942 | break; |
2386 | case 0xfe ... 0xff: /* Grp4/Grp5 */ | 2943 | case 0xfe: /* Grp4 */ |
2944 | grp45: | ||
2387 | rc = emulate_grp45(ctxt, ops); | 2945 | rc = emulate_grp45(ctxt, ops); |
2388 | if (rc != 0) | 2946 | if (rc != X86EMUL_CONTINUE) |
2389 | goto done; | 2947 | goto done; |
2390 | break; | 2948 | break; |
2949 | case 0xff: /* Grp5 */ | ||
2950 | if (c->modrm_reg == 5) | ||
2951 | goto jump_far; | ||
2952 | goto grp45; | ||
2391 | } | 2953 | } |
2392 | 2954 | ||
2393 | writeback: | 2955 | writeback: |
2394 | rc = writeback(ctxt, ops); | 2956 | rc = writeback(ctxt, ops); |
2395 | if (rc != 0) | 2957 | if (rc != X86EMUL_CONTINUE) |
2396 | goto done; | 2958 | goto done; |
2397 | 2959 | ||
2960 | /* | ||
2961 | * restore dst type in case the decoding will be reused | ||
2962 | * (happens for string instruction ) | ||
2963 | */ | ||
2964 | c->dst.type = saved_dst_type; | ||
2965 | |||
2966 | if ((c->d & SrcMask) == SrcSI) | ||
2967 | string_addr_inc(ctxt, seg_override_base(ctxt, c), VCPU_REGS_RSI, | ||
2968 | &c->src); | ||
2969 | |||
2970 | if ((c->d & DstMask) == DstDI) | ||
2971 | string_addr_inc(ctxt, es_base(ctxt), VCPU_REGS_RDI, &c->dst); | ||
2972 | |||
2973 | if (c->rep_prefix && (c->d & String)) { | ||
2974 | struct read_cache *rc = &ctxt->decode.io_read; | ||
2975 | register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); | ||
2976 | /* | ||
2977 | * Re-enter guest when pio read ahead buffer is empty or, | ||
2978 | * if it is not used, after each 1024 iteration. | ||
2979 | */ | ||
2980 | if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) || | ||
2981 | (rc->end != 0 && rc->end == rc->pos)) | ||
2982 | ctxt->restart = false; | ||
2983 | } | ||
2984 | |||
2398 | /* Commit shadow register state. */ | 2985 | /* Commit shadow register state. */ |
2399 | memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); | 2986 | memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); |
2400 | kvm_rip_write(ctxt->vcpu, c->eip); | 2987 | kvm_rip_write(ctxt->vcpu, c->eip); |
2988 | ops->set_rflags(ctxt->vcpu, ctxt->eflags); | ||
2401 | 2989 | ||
2402 | done: | 2990 | done: |
2403 | if (rc == X86EMUL_UNHANDLEABLE) { | 2991 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; |
2404 | c->eip = saved_eip; | ||
2405 | return -1; | ||
2406 | } | ||
2407 | return 0; | ||
2408 | 2992 | ||
2409 | twobyte_insn: | 2993 | twobyte_insn: |
2410 | switch (c->b) { | 2994 | switch (c->b) { |
@@ -2418,18 +3002,18 @@ twobyte_insn: | |||
2418 | goto cannot_emulate; | 3002 | goto cannot_emulate; |
2419 | 3003 | ||
2420 | rc = kvm_fix_hypercall(ctxt->vcpu); | 3004 | rc = kvm_fix_hypercall(ctxt->vcpu); |
2421 | if (rc) | 3005 | if (rc != X86EMUL_CONTINUE) |
2422 | goto done; | 3006 | goto done; |
2423 | 3007 | ||
2424 | /* Let the processor re-execute the fixed hypercall */ | 3008 | /* Let the processor re-execute the fixed hypercall */ |
2425 | c->eip = kvm_rip_read(ctxt->vcpu); | 3009 | c->eip = ctxt->eip; |
2426 | /* Disable writeback. */ | 3010 | /* Disable writeback. */ |
2427 | c->dst.type = OP_NONE; | 3011 | c->dst.type = OP_NONE; |
2428 | break; | 3012 | break; |
2429 | case 2: /* lgdt */ | 3013 | case 2: /* lgdt */ |
2430 | rc = read_descriptor(ctxt, ops, c->src.ptr, | 3014 | rc = read_descriptor(ctxt, ops, c->src.ptr, |
2431 | &size, &address, c->op_bytes); | 3015 | &size, &address, c->op_bytes); |
2432 | if (rc) | 3016 | if (rc != X86EMUL_CONTINUE) |
2433 | goto done; | 3017 | goto done; |
2434 | realmode_lgdt(ctxt->vcpu, size, address); | 3018 | realmode_lgdt(ctxt->vcpu, size, address); |
2435 | /* Disable writeback. */ | 3019 | /* Disable writeback. */ |
@@ -2440,7 +3024,7 @@ twobyte_insn: | |||
2440 | switch (c->modrm_rm) { | 3024 | switch (c->modrm_rm) { |
2441 | case 1: | 3025 | case 1: |
2442 | rc = kvm_fix_hypercall(ctxt->vcpu); | 3026 | rc = kvm_fix_hypercall(ctxt->vcpu); |
2443 | if (rc) | 3027 | if (rc != X86EMUL_CONTINUE) |
2444 | goto done; | 3028 | goto done; |
2445 | break; | 3029 | break; |
2446 | default: | 3030 | default: |
@@ -2450,7 +3034,7 @@ twobyte_insn: | |||
2450 | rc = read_descriptor(ctxt, ops, c->src.ptr, | 3034 | rc = read_descriptor(ctxt, ops, c->src.ptr, |
2451 | &size, &address, | 3035 | &size, &address, |
2452 | c->op_bytes); | 3036 | c->op_bytes); |
2453 | if (rc) | 3037 | if (rc != X86EMUL_CONTINUE) |
2454 | goto done; | 3038 | goto done; |
2455 | realmode_lidt(ctxt->vcpu, size, address); | 3039 | realmode_lidt(ctxt->vcpu, size, address); |
2456 | } | 3040 | } |
@@ -2459,15 +3043,18 @@ twobyte_insn: | |||
2459 | break; | 3043 | break; |
2460 | case 4: /* smsw */ | 3044 | case 4: /* smsw */ |
2461 | c->dst.bytes = 2; | 3045 | c->dst.bytes = 2; |
2462 | c->dst.val = realmode_get_cr(ctxt->vcpu, 0); | 3046 | c->dst.val = ops->get_cr(0, ctxt->vcpu); |
2463 | break; | 3047 | break; |
2464 | case 6: /* lmsw */ | 3048 | case 6: /* lmsw */ |
2465 | realmode_lmsw(ctxt->vcpu, (u16)c->src.val, | 3049 | ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) | |
2466 | &ctxt->eflags); | 3050 | (c->src.val & 0x0f), ctxt->vcpu); |
2467 | c->dst.type = OP_NONE; | 3051 | c->dst.type = OP_NONE; |
2468 | break; | 3052 | break; |
3053 | case 5: /* not defined */ | ||
3054 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | ||
3055 | goto done; | ||
2469 | case 7: /* invlpg*/ | 3056 | case 7: /* invlpg*/ |
2470 | emulate_invlpg(ctxt->vcpu, memop); | 3057 | emulate_invlpg(ctxt->vcpu, c->modrm_ea); |
2471 | /* Disable writeback. */ | 3058 | /* Disable writeback. */ |
2472 | c->dst.type = OP_NONE; | 3059 | c->dst.type = OP_NONE; |
2473 | break; | 3060 | break; |
@@ -2493,54 +3080,54 @@ twobyte_insn: | |||
2493 | c->dst.type = OP_NONE; | 3080 | c->dst.type = OP_NONE; |
2494 | break; | 3081 | break; |
2495 | case 0x20: /* mov cr, reg */ | 3082 | case 0x20: /* mov cr, reg */ |
2496 | if (c->modrm_mod != 3) | 3083 | switch (c->modrm_reg) { |
2497 | goto cannot_emulate; | 3084 | case 1: |
2498 | c->regs[c->modrm_rm] = | 3085 | case 5 ... 7: |
2499 | realmode_get_cr(ctxt->vcpu, c->modrm_reg); | 3086 | case 9 ... 15: |
3087 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); | ||
3088 | goto done; | ||
3089 | } | ||
3090 | c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu); | ||
2500 | c->dst.type = OP_NONE; /* no writeback */ | 3091 | c->dst.type = OP_NONE; /* no writeback */ |
2501 | break; | 3092 | break; |
2502 | case 0x21: /* mov from dr to reg */ | 3093 | case 0x21: /* mov from dr to reg */ |
2503 | if (c->modrm_mod != 3) | 3094 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && |
2504 | goto cannot_emulate; | 3095 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { |
2505 | rc = emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]); | 3096 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); |
2506 | if (rc) | 3097 | goto done; |
2507 | goto cannot_emulate; | 3098 | } |
3099 | emulator_get_dr(ctxt, c->modrm_reg, &c->regs[c->modrm_rm]); | ||
2508 | c->dst.type = OP_NONE; /* no writeback */ | 3100 | c->dst.type = OP_NONE; /* no writeback */ |
2509 | break; | 3101 | break; |
2510 | case 0x22: /* mov reg, cr */ | 3102 | case 0x22: /* mov reg, cr */ |
2511 | if (c->modrm_mod != 3) | 3103 | ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu); |
2512 | goto cannot_emulate; | ||
2513 | realmode_set_cr(ctxt->vcpu, | ||
2514 | c->modrm_reg, c->modrm_val, &ctxt->eflags); | ||
2515 | c->dst.type = OP_NONE; | 3104 | c->dst.type = OP_NONE; |
2516 | break; | 3105 | break; |
2517 | case 0x23: /* mov from reg to dr */ | 3106 | case 0x23: /* mov from reg to dr */ |
2518 | if (c->modrm_mod != 3) | 3107 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && |
2519 | goto cannot_emulate; | 3108 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { |
2520 | rc = emulator_set_dr(ctxt, c->modrm_reg, | 3109 | kvm_queue_exception(ctxt->vcpu, UD_VECTOR); |
2521 | c->regs[c->modrm_rm]); | 3110 | goto done; |
2522 | if (rc) | 3111 | } |
2523 | goto cannot_emulate; | 3112 | emulator_set_dr(ctxt, c->modrm_reg, c->regs[c->modrm_rm]); |
2524 | c->dst.type = OP_NONE; /* no writeback */ | 3113 | c->dst.type = OP_NONE; /* no writeback */ |
2525 | break; | 3114 | break; |
2526 | case 0x30: | 3115 | case 0x30: |
2527 | /* wrmsr */ | 3116 | /* wrmsr */ |
2528 | msr_data = (u32)c->regs[VCPU_REGS_RAX] | 3117 | msr_data = (u32)c->regs[VCPU_REGS_RAX] |
2529 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); | 3118 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); |
2530 | rc = kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data); | 3119 | if (kvm_set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { |
2531 | if (rc) { | ||
2532 | kvm_inject_gp(ctxt->vcpu, 0); | 3120 | kvm_inject_gp(ctxt->vcpu, 0); |
2533 | c->eip = kvm_rip_read(ctxt->vcpu); | 3121 | goto done; |
2534 | } | 3122 | } |
2535 | rc = X86EMUL_CONTINUE; | 3123 | rc = X86EMUL_CONTINUE; |
2536 | c->dst.type = OP_NONE; | 3124 | c->dst.type = OP_NONE; |
2537 | break; | 3125 | break; |
2538 | case 0x32: | 3126 | case 0x32: |
2539 | /* rdmsr */ | 3127 | /* rdmsr */ |
2540 | rc = kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data); | 3128 | if (kvm_get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { |
2541 | if (rc) { | ||
2542 | kvm_inject_gp(ctxt->vcpu, 0); | 3129 | kvm_inject_gp(ctxt->vcpu, 0); |
2543 | c->eip = kvm_rip_read(ctxt->vcpu); | 3130 | goto done; |
2544 | } else { | 3131 | } else { |
2545 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; | 3132 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; |
2546 | c->regs[VCPU_REGS_RDX] = msr_data >> 32; | 3133 | c->regs[VCPU_REGS_RDX] = msr_data >> 32; |
@@ -2577,7 +3164,7 @@ twobyte_insn: | |||
2577 | break; | 3164 | break; |
2578 | case 0xa1: /* pop fs */ | 3165 | case 0xa1: /* pop fs */ |
2579 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); | 3166 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); |
2580 | if (rc != 0) | 3167 | if (rc != X86EMUL_CONTINUE) |
2581 | goto done; | 3168 | goto done; |
2582 | break; | 3169 | break; |
2583 | case 0xa3: | 3170 | case 0xa3: |
@@ -2596,7 +3183,7 @@ twobyte_insn: | |||
2596 | break; | 3183 | break; |
2597 | case 0xa9: /* pop gs */ | 3184 | case 0xa9: /* pop gs */ |
2598 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); | 3185 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); |
2599 | if (rc != 0) | 3186 | if (rc != X86EMUL_CONTINUE) |
2600 | goto done; | 3187 | goto done; |
2601 | break; | 3188 | break; |
2602 | case 0xab: | 3189 | case 0xab: |
@@ -2668,16 +3255,14 @@ twobyte_insn: | |||
2668 | (u64) c->src.val; | 3255 | (u64) c->src.val; |
2669 | break; | 3256 | break; |
2670 | case 0xc7: /* Grp9 (cmpxchg8b) */ | 3257 | case 0xc7: /* Grp9 (cmpxchg8b) */ |
2671 | rc = emulate_grp9(ctxt, ops, memop); | 3258 | rc = emulate_grp9(ctxt, ops); |
2672 | if (rc != 0) | 3259 | if (rc != X86EMUL_CONTINUE) |
2673 | goto done; | 3260 | goto done; |
2674 | c->dst.type = OP_NONE; | ||
2675 | break; | 3261 | break; |
2676 | } | 3262 | } |
2677 | goto writeback; | 3263 | goto writeback; |
2678 | 3264 | ||
2679 | cannot_emulate: | 3265 | cannot_emulate: |
2680 | DPRINTF("Cannot emulate %02x\n", c->b); | 3266 | DPRINTF("Cannot emulate %02x\n", c->b); |
2681 | c->eip = saved_eip; | ||
2682 | return -1; | 3267 | return -1; |
2683 | } | 3268 | } |