diff options
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 3945 |
1 files changed, 2482 insertions, 1463 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 66ca98aafdd6..adc98675cda0 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -9,7 +9,7 @@ | |||
9 | * privileged instructions: | 9 | * privileged instructions: |
10 | * | 10 | * |
11 | * Copyright (C) 2006 Qumranet | 11 | * Copyright (C) 2006 Qumranet |
12 | * Copyright 2010 Red Hat, Inc. and/or its affilates. | 12 | * Copyright 2010 Red Hat, Inc. and/or its affiliates. |
13 | * | 13 | * |
14 | * Avi Kivity <avi@qumranet.com> | 14 | * Avi Kivity <avi@qumranet.com> |
15 | * Yaniv Kamay <yaniv@qumranet.com> | 15 | * Yaniv Kamay <yaniv@qumranet.com> |
@@ -20,16 +20,8 @@ | |||
20 | * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 | 20 | * From: xen-unstable 10676:af9809f51f81a3c43f276f00c81a52ef558afda4 |
21 | */ | 21 | */ |
22 | 22 | ||
23 | #ifndef __KERNEL__ | ||
24 | #include <stdio.h> | ||
25 | #include <stdint.h> | ||
26 | #include <public/xen.h> | ||
27 | #define DPRINTF(_f, _a ...) printf(_f , ## _a) | ||
28 | #else | ||
29 | #include <linux/kvm_host.h> | 23 | #include <linux/kvm_host.h> |
30 | #include "kvm_cache_regs.h" | 24 | #include "kvm_cache_regs.h" |
31 | #define DPRINTF(x...) do {} while (0) | ||
32 | #endif | ||
33 | #include <linux/module.h> | 25 | #include <linux/module.h> |
34 | #include <asm/kvm_emulate.h> | 26 | #include <asm/kvm_emulate.h> |
35 | 27 | ||
@@ -51,39 +43,50 @@ | |||
51 | #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */ | 43 | #define ImplicitOps (1<<1) /* Implicit in opcode. No generic decode. */ |
52 | #define DstReg (2<<1) /* Register operand. */ | 44 | #define DstReg (2<<1) /* Register operand. */ |
53 | #define DstMem (3<<1) /* Memory operand. */ | 45 | #define DstMem (3<<1) /* Memory operand. */ |
54 | #define DstAcc (4<<1) /* Destination Accumulator */ | 46 | #define DstAcc (4<<1) /* Destination Accumulator */ |
55 | #define DstDI (5<<1) /* Destination is in ES:(E)DI */ | 47 | #define DstDI (5<<1) /* Destination is in ES:(E)DI */ |
56 | #define DstMem64 (6<<1) /* 64bit memory operand */ | 48 | #define DstMem64 (6<<1) /* 64bit memory operand */ |
57 | #define DstMask (7<<1) | 49 | #define DstImmUByte (7<<1) /* 8-bit unsigned immediate operand */ |
50 | #define DstDX (8<<1) /* Destination is in DX register */ | ||
51 | #define DstMask (0xf<<1) | ||
58 | /* Source operand type. */ | 52 | /* Source operand type. */ |
59 | #define SrcNone (0<<4) /* No source operand. */ | 53 | #define SrcNone (0<<5) /* No source operand. */ |
60 | #define SrcImplicit (0<<4) /* Source operand is implicit in the opcode. */ | 54 | #define SrcReg (1<<5) /* Register operand. */ |
61 | #define SrcReg (1<<4) /* Register operand. */ | 55 | #define SrcMem (2<<5) /* Memory operand. */ |
62 | #define SrcMem (2<<4) /* Memory operand. */ | 56 | #define SrcMem16 (3<<5) /* Memory operand (16-bit). */ |
63 | #define SrcMem16 (3<<4) /* Memory operand (16-bit). */ | 57 | #define SrcMem32 (4<<5) /* Memory operand (32-bit). */ |
64 | #define SrcMem32 (4<<4) /* Memory operand (32-bit). */ | 58 | #define SrcImm (5<<5) /* Immediate operand. */ |
65 | #define SrcImm (5<<4) /* Immediate operand. */ | 59 | #define SrcImmByte (6<<5) /* 8-bit sign-extended immediate operand. */ |
66 | #define SrcImmByte (6<<4) /* 8-bit sign-extended immediate operand. */ | 60 | #define SrcOne (7<<5) /* Implied '1' */ |
67 | #define SrcOne (7<<4) /* Implied '1' */ | 61 | #define SrcImmUByte (8<<5) /* 8-bit unsigned immediate operand. */ |
68 | #define SrcImmUByte (8<<4) /* 8-bit unsigned immediate operand. */ | 62 | #define SrcImmU (9<<5) /* Immediate operand, unsigned */ |
69 | #define SrcImmU (9<<4) /* Immediate operand, unsigned */ | 63 | #define SrcSI (0xa<<5) /* Source is in the DS:RSI */ |
70 | #define SrcSI (0xa<<4) /* Source is in the DS:RSI */ | 64 | #define SrcImmFAddr (0xb<<5) /* Source is immediate far address */ |
71 | #define SrcImmFAddr (0xb<<4) /* Source is immediate far address */ | 65 | #define SrcMemFAddr (0xc<<5) /* Source is far address in memory */ |
72 | #define SrcMemFAddr (0xc<<4) /* Source is far address in memory */ | 66 | #define SrcAcc (0xd<<5) /* Source Accumulator */ |
73 | #define SrcAcc (0xd<<4) /* Source Accumulator */ | 67 | #define SrcImmU16 (0xe<<5) /* Immediate operand, unsigned, 16 bits */ |
74 | #define SrcMask (0xf<<4) | 68 | #define SrcDX (0xf<<5) /* Source is in DX register */ |
69 | #define SrcMask (0xf<<5) | ||
75 | /* Generic ModRM decode. */ | 70 | /* Generic ModRM decode. */ |
76 | #define ModRM (1<<8) | 71 | #define ModRM (1<<9) |
77 | /* Destination is only written; never read. */ | 72 | /* Destination is only written; never read. */ |
78 | #define Mov (1<<9) | 73 | #define Mov (1<<10) |
79 | #define BitOp (1<<10) | 74 | #define BitOp (1<<11) |
80 | #define MemAbs (1<<11) /* Memory operand is absolute displacement */ | 75 | #define MemAbs (1<<12) /* Memory operand is absolute displacement */ |
81 | #define String (1<<12) /* String instruction (rep capable) */ | 76 | #define String (1<<13) /* String instruction (rep capable) */ |
82 | #define Stack (1<<13) /* Stack instruction (push/pop) */ | 77 | #define Stack (1<<14) /* Stack instruction (push/pop) */ |
83 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ | 78 | #define GroupMask (7<<15) /* Opcode uses one of the group mechanisms */ |
84 | #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ | 79 | #define Group (1<<15) /* Bits 3:5 of modrm byte extend opcode */ |
85 | #define GroupMask 0xff /* Group number stored in bits 0:7 */ | 80 | #define GroupDual (2<<15) /* Alternate decoding of mod == 3 */ |
81 | #define Prefix (3<<15) /* Instruction varies with 66/f2/f3 prefix */ | ||
82 | #define RMExt (4<<15) /* Opcode extension in ModRM r/m if mod == 3 */ | ||
83 | #define Sse (1<<18) /* SSE Vector instruction */ | ||
86 | /* Misc flags */ | 84 | /* Misc flags */ |
85 | #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ | ||
86 | #define VendorSpecific (1<<22) /* Vendor specific instruction */ | ||
87 | #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ | ||
88 | #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ | ||
89 | #define Undefined (1<<25) /* No Such Instruction */ | ||
87 | #define Lock (1<<26) /* lock prefix is allowed for the instruction */ | 90 | #define Lock (1<<26) /* lock prefix is allowed for the instruction */ |
88 | #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ | 91 | #define Priv (1<<27) /* instruction generates #GP if current CPL != 0 */ |
89 | #define No64 (1<<28) | 92 | #define No64 (1<<28) |
@@ -92,285 +95,40 @@ | |||
92 | #define Src2CL (1<<29) | 95 | #define Src2CL (1<<29) |
93 | #define Src2ImmByte (2<<29) | 96 | #define Src2ImmByte (2<<29) |
94 | #define Src2One (3<<29) | 97 | #define Src2One (3<<29) |
98 | #define Src2Imm (4<<29) | ||
95 | #define Src2Mask (7<<29) | 99 | #define Src2Mask (7<<29) |
96 | 100 | ||
97 | enum { | 101 | #define X2(x...) x, x |
98 | Group1_80, Group1_81, Group1_82, Group1_83, | 102 | #define X3(x...) X2(x), x |
99 | Group1A, Group3_Byte, Group3, Group4, Group5, Group7, | 103 | #define X4(x...) X2(x), X2(x) |
100 | Group8, Group9, | 104 | #define X5(x...) X4(x), x |
101 | }; | 105 | #define X6(x...) X4(x), X2(x) |
102 | 106 | #define X7(x...) X4(x), X3(x) | |
103 | static u32 opcode_table[256] = { | 107 | #define X8(x...) X4(x), X4(x) |
104 | /* 0x00 - 0x07 */ | 108 | #define X16(x...) X8(x), X8(x) |
105 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | 109 | |
106 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 110 | struct opcode { |
107 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, | 111 | u32 flags; |
108 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, | 112 | u8 intercept; |
109 | /* 0x08 - 0x0F */ | 113 | union { |
110 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | 114 | int (*execute)(struct x86_emulate_ctxt *ctxt); |
111 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | 115 | struct opcode *group; |
112 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, | 116 | struct group_dual *gdual; |
113 | ImplicitOps | Stack | No64, 0, | 117 | struct gprefix *gprefix; |
114 | /* 0x10 - 0x17 */ | 118 | } u; |
115 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | 119 | int (*check_perm)(struct x86_emulate_ctxt *ctxt); |
116 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | ||
117 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, | ||
118 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, | ||
119 | /* 0x18 - 0x1F */ | ||
120 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | ||
121 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | ||
122 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, | ||
123 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, | ||
124 | /* 0x20 - 0x27 */ | ||
125 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | ||
126 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | ||
127 | ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, | ||
128 | /* 0x28 - 0x2F */ | ||
129 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | ||
130 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | ||
131 | ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, | ||
132 | /* 0x30 - 0x37 */ | ||
133 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | ||
134 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | ||
135 | ByteOp | DstAcc | SrcImmByte, DstAcc | SrcImm, 0, 0, | ||
136 | /* 0x38 - 0x3F */ | ||
137 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | ||
138 | ByteOp | DstReg | SrcMem | ModRM, DstReg | SrcMem | ModRM, | ||
139 | ByteOp | DstAcc | SrcImm, DstAcc | SrcImm, | ||
140 | 0, 0, | ||
141 | /* 0x40 - 0x47 */ | ||
142 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, | ||
143 | /* 0x48 - 0x4F */ | ||
144 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, | ||
145 | /* 0x50 - 0x57 */ | ||
146 | SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, | ||
147 | SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, SrcReg | Stack, | ||
148 | /* 0x58 - 0x5F */ | ||
149 | DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, | ||
150 | DstReg | Stack, DstReg | Stack, DstReg | Stack, DstReg | Stack, | ||
151 | /* 0x60 - 0x67 */ | ||
152 | ImplicitOps | Stack | No64, ImplicitOps | Stack | No64, | ||
153 | 0, DstReg | SrcMem32 | ModRM | Mov /* movsxd (x86/64) */ , | ||
154 | 0, 0, 0, 0, | ||
155 | /* 0x68 - 0x6F */ | ||
156 | SrcImm | Mov | Stack, 0, SrcImmByte | Mov | Stack, 0, | ||
157 | DstDI | ByteOp | Mov | String, DstDI | Mov | String, /* insb, insw/insd */ | ||
158 | SrcSI | ByteOp | ImplicitOps | String, SrcSI | ImplicitOps | String, /* outsb, outsw/outsd */ | ||
159 | /* 0x70 - 0x77 */ | ||
160 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, | ||
161 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, | ||
162 | /* 0x78 - 0x7F */ | ||
163 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, | ||
164 | SrcImmByte, SrcImmByte, SrcImmByte, SrcImmByte, | ||
165 | /* 0x80 - 0x87 */ | ||
166 | Group | Group1_80, Group | Group1_81, | ||
167 | Group | Group1_82, Group | Group1_83, | ||
168 | ByteOp | DstMem | SrcReg | ModRM, DstMem | SrcReg | ModRM, | ||
169 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | ||
170 | /* 0x88 - 0x8F */ | ||
171 | ByteOp | DstMem | SrcReg | ModRM | Mov, DstMem | SrcReg | ModRM | Mov, | ||
172 | ByteOp | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
173 | DstMem | SrcNone | ModRM | Mov, ModRM | DstReg, | ||
174 | ImplicitOps | SrcMem16 | ModRM, Group | Group1A, | ||
175 | /* 0x90 - 0x97 */ | ||
176 | DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, DstReg, | ||
177 | /* 0x98 - 0x9F */ | ||
178 | 0, 0, SrcImmFAddr | No64, 0, | ||
179 | ImplicitOps | Stack, ImplicitOps | Stack, 0, 0, | ||
180 | /* 0xA0 - 0xA7 */ | ||
181 | ByteOp | DstAcc | SrcMem | Mov | MemAbs, DstAcc | SrcMem | Mov | MemAbs, | ||
182 | ByteOp | DstMem | SrcAcc | Mov | MemAbs, DstMem | SrcAcc | Mov | MemAbs, | ||
183 | ByteOp | SrcSI | DstDI | Mov | String, SrcSI | DstDI | Mov | String, | ||
184 | ByteOp | SrcSI | DstDI | String, SrcSI | DstDI | String, | ||
185 | /* 0xA8 - 0xAF */ | ||
186 | DstAcc | SrcImmByte | ByteOp, DstAcc | SrcImm, ByteOp | DstDI | Mov | String, DstDI | Mov | String, | ||
187 | ByteOp | SrcSI | DstAcc | Mov | String, SrcSI | DstAcc | Mov | String, | ||
188 | ByteOp | DstDI | String, DstDI | String, | ||
189 | /* 0xB0 - 0xB7 */ | ||
190 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, | ||
191 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, | ||
192 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, | ||
193 | ByteOp | DstReg | SrcImm | Mov, ByteOp | DstReg | SrcImm | Mov, | ||
194 | /* 0xB8 - 0xBF */ | ||
195 | DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, | ||
196 | DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, | ||
197 | DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, | ||
198 | DstReg | SrcImm | Mov, DstReg | SrcImm | Mov, | ||
199 | /* 0xC0 - 0xC7 */ | ||
200 | ByteOp | DstMem | SrcImm | ModRM, DstMem | SrcImmByte | ModRM, | ||
201 | 0, ImplicitOps | Stack, 0, 0, | ||
202 | ByteOp | DstMem | SrcImm | ModRM | Mov, DstMem | SrcImm | ModRM | Mov, | ||
203 | /* 0xC8 - 0xCF */ | ||
204 | 0, 0, 0, ImplicitOps | Stack, | ||
205 | ImplicitOps, SrcImmByte, ImplicitOps | No64, ImplicitOps, | ||
206 | /* 0xD0 - 0xD7 */ | ||
207 | ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, | ||
208 | ByteOp | DstMem | SrcImplicit | ModRM, DstMem | SrcImplicit | ModRM, | ||
209 | 0, 0, 0, 0, | ||
210 | /* 0xD8 - 0xDF */ | ||
211 | 0, 0, 0, 0, 0, 0, 0, 0, | ||
212 | /* 0xE0 - 0xE7 */ | ||
213 | 0, 0, 0, 0, | ||
214 | ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc, | ||
215 | ByteOp | SrcImmUByte | DstAcc, SrcImmUByte | DstAcc, | ||
216 | /* 0xE8 - 0xEF */ | ||
217 | SrcImm | Stack, SrcImm | ImplicitOps, | ||
218 | SrcImmFAddr | No64, SrcImmByte | ImplicitOps, | ||
219 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, | ||
220 | SrcNone | ByteOp | DstAcc, SrcNone | DstAcc, | ||
221 | /* 0xF0 - 0xF7 */ | ||
222 | 0, 0, 0, 0, | ||
223 | ImplicitOps | Priv, ImplicitOps, Group | Group3_Byte, Group | Group3, | ||
224 | /* 0xF8 - 0xFF */ | ||
225 | ImplicitOps, 0, ImplicitOps, ImplicitOps, | ||
226 | ImplicitOps, ImplicitOps, Group | Group4, Group | Group5, | ||
227 | }; | ||
228 | |||
229 | static u32 twobyte_table[256] = { | ||
230 | /* 0x00 - 0x0F */ | ||
231 | 0, Group | GroupDual | Group7, 0, 0, | ||
232 | 0, ImplicitOps, ImplicitOps | Priv, 0, | ||
233 | ImplicitOps | Priv, ImplicitOps | Priv, 0, 0, | ||
234 | 0, ImplicitOps | ModRM, 0, 0, | ||
235 | /* 0x10 - 0x1F */ | ||
236 | 0, 0, 0, 0, 0, 0, 0, 0, ImplicitOps | ModRM, 0, 0, 0, 0, 0, 0, 0, | ||
237 | /* 0x20 - 0x2F */ | ||
238 | ModRM | ImplicitOps | Priv, ModRM | Priv, | ||
239 | ModRM | ImplicitOps | Priv, ModRM | Priv, | ||
240 | 0, 0, 0, 0, | ||
241 | 0, 0, 0, 0, 0, 0, 0, 0, | ||
242 | /* 0x30 - 0x3F */ | ||
243 | ImplicitOps | Priv, 0, ImplicitOps | Priv, 0, | ||
244 | ImplicitOps, ImplicitOps | Priv, 0, 0, | ||
245 | 0, 0, 0, 0, 0, 0, 0, 0, | ||
246 | /* 0x40 - 0x47 */ | ||
247 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
248 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
249 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
250 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
251 | /* 0x48 - 0x4F */ | ||
252 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
253 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
254 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
255 | DstReg | SrcMem | ModRM | Mov, DstReg | SrcMem | ModRM | Mov, | ||
256 | /* 0x50 - 0x5F */ | ||
257 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
258 | /* 0x60 - 0x6F */ | ||
259 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
260 | /* 0x70 - 0x7F */ | ||
261 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
262 | /* 0x80 - 0x8F */ | ||
263 | SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, | ||
264 | SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, SrcImm, | ||
265 | /* 0x90 - 0x9F */ | ||
266 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
267 | /* 0xA0 - 0xA7 */ | ||
268 | ImplicitOps | Stack, ImplicitOps | Stack, | ||
269 | 0, DstMem | SrcReg | ModRM | BitOp, | ||
270 | DstMem | SrcReg | Src2ImmByte | ModRM, | ||
271 | DstMem | SrcReg | Src2CL | ModRM, 0, 0, | ||
272 | /* 0xA8 - 0xAF */ | ||
273 | ImplicitOps | Stack, ImplicitOps | Stack, | ||
274 | 0, DstMem | SrcReg | ModRM | BitOp | Lock, | ||
275 | DstMem | SrcReg | Src2ImmByte | ModRM, | ||
276 | DstMem | SrcReg | Src2CL | ModRM, | ||
277 | ModRM, 0, | ||
278 | /* 0xB0 - 0xB7 */ | ||
279 | ByteOp | DstMem | SrcReg | ModRM | Lock, DstMem | SrcReg | ModRM | Lock, | ||
280 | 0, DstMem | SrcReg | ModRM | BitOp | Lock, | ||
281 | 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, | ||
282 | DstReg | SrcMem16 | ModRM | Mov, | ||
283 | /* 0xB8 - 0xBF */ | ||
284 | 0, 0, | ||
285 | Group | Group8, DstMem | SrcReg | ModRM | BitOp | Lock, | ||
286 | 0, 0, ByteOp | DstReg | SrcMem | ModRM | Mov, | ||
287 | DstReg | SrcMem16 | ModRM | Mov, | ||
288 | /* 0xC0 - 0xCF */ | ||
289 | 0, 0, 0, DstMem | SrcReg | ModRM | Mov, | ||
290 | 0, 0, 0, Group | GroupDual | Group9, | ||
291 | 0, 0, 0, 0, 0, 0, 0, 0, | ||
292 | /* 0xD0 - 0xDF */ | ||
293 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
294 | /* 0xE0 - 0xEF */ | ||
295 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | ||
296 | /* 0xF0 - 0xFF */ | ||
297 | 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 | ||
298 | }; | 120 | }; |
299 | 121 | ||
300 | static u32 group_table[] = { | 122 | struct group_dual { |
301 | [Group1_80*8] = | 123 | struct opcode mod012[8]; |
302 | ByteOp | DstMem | SrcImm | ModRM | Lock, | 124 | struct opcode mod3[8]; |
303 | ByteOp | DstMem | SrcImm | ModRM | Lock, | ||
304 | ByteOp | DstMem | SrcImm | ModRM | Lock, | ||
305 | ByteOp | DstMem | SrcImm | ModRM | Lock, | ||
306 | ByteOp | DstMem | SrcImm | ModRM | Lock, | ||
307 | ByteOp | DstMem | SrcImm | ModRM | Lock, | ||
308 | ByteOp | DstMem | SrcImm | ModRM | Lock, | ||
309 | ByteOp | DstMem | SrcImm | ModRM, | ||
310 | [Group1_81*8] = | ||
311 | DstMem | SrcImm | ModRM | Lock, | ||
312 | DstMem | SrcImm | ModRM | Lock, | ||
313 | DstMem | SrcImm | ModRM | Lock, | ||
314 | DstMem | SrcImm | ModRM | Lock, | ||
315 | DstMem | SrcImm | ModRM | Lock, | ||
316 | DstMem | SrcImm | ModRM | Lock, | ||
317 | DstMem | SrcImm | ModRM | Lock, | ||
318 | DstMem | SrcImm | ModRM, | ||
319 | [Group1_82*8] = | ||
320 | ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, | ||
321 | ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, | ||
322 | ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, | ||
323 | ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, | ||
324 | ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, | ||
325 | ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, | ||
326 | ByteOp | DstMem | SrcImm | ModRM | No64 | Lock, | ||
327 | ByteOp | DstMem | SrcImm | ModRM | No64, | ||
328 | [Group1_83*8] = | ||
329 | DstMem | SrcImmByte | ModRM | Lock, | ||
330 | DstMem | SrcImmByte | ModRM | Lock, | ||
331 | DstMem | SrcImmByte | ModRM | Lock, | ||
332 | DstMem | SrcImmByte | ModRM | Lock, | ||
333 | DstMem | SrcImmByte | ModRM | Lock, | ||
334 | DstMem | SrcImmByte | ModRM | Lock, | ||
335 | DstMem | SrcImmByte | ModRM | Lock, | ||
336 | DstMem | SrcImmByte | ModRM, | ||
337 | [Group1A*8] = | ||
338 | DstMem | SrcNone | ModRM | Mov | Stack, 0, 0, 0, 0, 0, 0, 0, | ||
339 | [Group3_Byte*8] = | ||
340 | ByteOp | SrcImm | DstMem | ModRM, ByteOp | SrcImm | DstMem | ModRM, | ||
341 | ByteOp | DstMem | SrcNone | ModRM, ByteOp | DstMem | SrcNone | ModRM, | ||
342 | 0, 0, 0, 0, | ||
343 | [Group3*8] = | ||
344 | DstMem | SrcImm | ModRM, DstMem | SrcImm | ModRM, | ||
345 | DstMem | SrcNone | ModRM, DstMem | SrcNone | ModRM, | ||
346 | 0, 0, 0, 0, | ||
347 | [Group4*8] = | ||
348 | ByteOp | DstMem | SrcNone | ModRM | Lock, ByteOp | DstMem | SrcNone | ModRM | Lock, | ||
349 | 0, 0, 0, 0, 0, 0, | ||
350 | [Group5*8] = | ||
351 | DstMem | SrcNone | ModRM | Lock, DstMem | SrcNone | ModRM | Lock, | ||
352 | SrcMem | ModRM | Stack, 0, | ||
353 | SrcMem | ModRM | Stack, SrcMemFAddr | ModRM | ImplicitOps, | ||
354 | SrcMem | ModRM | Stack, 0, | ||
355 | [Group7*8] = | ||
356 | 0, 0, ModRM | SrcMem | Priv, ModRM | SrcMem | Priv, | ||
357 | SrcNone | ModRM | DstMem | Mov, 0, | ||
358 | SrcMem16 | ModRM | Mov | Priv, SrcMem | ModRM | ByteOp | Priv, | ||
359 | [Group8*8] = | ||
360 | 0, 0, 0, 0, | ||
361 | DstMem | SrcImmByte | ModRM, DstMem | SrcImmByte | ModRM | Lock, | ||
362 | DstMem | SrcImmByte | ModRM | Lock, DstMem | SrcImmByte | ModRM | Lock, | ||
363 | [Group9*8] = | ||
364 | 0, DstMem64 | ModRM | Lock, 0, 0, 0, 0, 0, 0, | ||
365 | }; | 125 | }; |
366 | 126 | ||
367 | static u32 group2_table[] = { | 127 | struct gprefix { |
368 | [Group7*8] = | 128 | struct opcode pfx_no; |
369 | SrcNone | ModRM | Priv, 0, 0, SrcNone | ModRM | Priv, | 129 | struct opcode pfx_66; |
370 | SrcNone | ModRM | DstMem | Mov, 0, | 130 | struct opcode pfx_f2; |
371 | SrcMem16 | ModRM | Mov | Priv, 0, | 131 | struct opcode pfx_f3; |
372 | [Group9*8] = | ||
373 | 0, 0, 0, 0, 0, 0, 0, 0, | ||
374 | }; | 132 | }; |
375 | 133 | ||
376 | /* EFLAGS bit definitions. */ | 134 | /* EFLAGS bit definitions. */ |
@@ -392,6 +150,9 @@ static u32 group2_table[] = { | |||
392 | #define EFLG_PF (1<<2) | 150 | #define EFLG_PF (1<<2) |
393 | #define EFLG_CF (1<<0) | 151 | #define EFLG_CF (1<<0) |
394 | 152 | ||
153 | #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a | ||
154 | #define EFLG_RESERVED_ONE_MASK 2 | ||
155 | |||
395 | /* | 156 | /* |
396 | * Instruction emulation: | 157 | * Instruction emulation: |
397 | * Most instructions are emulated directly via a fragment of inline assembly | 158 | * Most instructions are emulated directly via a fragment of inline assembly |
@@ -444,13 +205,13 @@ static u32 group2_table[] = { | |||
444 | #define ON64(x) | 205 | #define ON64(x) |
445 | #endif | 206 | #endif |
446 | 207 | ||
447 | #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \ | 208 | #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix, _dsttype) \ |
448 | do { \ | 209 | do { \ |
449 | __asm__ __volatile__ ( \ | 210 | __asm__ __volatile__ ( \ |
450 | _PRE_EFLAGS("0", "4", "2") \ | 211 | _PRE_EFLAGS("0", "4", "2") \ |
451 | _op _suffix " %"_x"3,%1; " \ | 212 | _op _suffix " %"_x"3,%1; " \ |
452 | _POST_EFLAGS("0", "4", "2") \ | 213 | _POST_EFLAGS("0", "4", "2") \ |
453 | : "=m" (_eflags), "=m" ((_dst).val), \ | 214 | : "=m" (_eflags), "+q" (*(_dsttype*)&(_dst).val),\ |
454 | "=&r" (_tmp) \ | 215 | "=&r" (_tmp) \ |
455 | : _y ((_src).val), "i" (EFLAGS_MASK)); \ | 216 | : _y ((_src).val), "i" (EFLAGS_MASK)); \ |
456 | } while (0) | 217 | } while (0) |
@@ -463,13 +224,13 @@ static u32 group2_table[] = { | |||
463 | \ | 224 | \ |
464 | switch ((_dst).bytes) { \ | 225 | switch ((_dst).bytes) { \ |
465 | case 2: \ | 226 | case 2: \ |
466 | ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \ | 227 | ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w",u16);\ |
467 | break; \ | 228 | break; \ |
468 | case 4: \ | 229 | case 4: \ |
469 | ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l"); \ | 230 | ____emulate_2op(_op,_src,_dst,_eflags,_lx,_ly,"l",u32);\ |
470 | break; \ | 231 | break; \ |
471 | case 8: \ | 232 | case 8: \ |
472 | ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q")); \ | 233 | ON64(____emulate_2op(_op,_src,_dst,_eflags,_qx,_qy,"q",u64)); \ |
473 | break; \ | 234 | break; \ |
474 | } \ | 235 | } \ |
475 | } while (0) | 236 | } while (0) |
@@ -479,7 +240,7 @@ static u32 group2_table[] = { | |||
479 | unsigned long _tmp; \ | 240 | unsigned long _tmp; \ |
480 | switch ((_dst).bytes) { \ | 241 | switch ((_dst).bytes) { \ |
481 | case 1: \ | 242 | case 1: \ |
482 | ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \ | 243 | ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b",u8); \ |
483 | break; \ | 244 | break; \ |
484 | default: \ | 245 | default: \ |
485 | __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ | 246 | __emulate_2op_nobyte(_op, _src, _dst, _eflags, \ |
@@ -504,42 +265,42 @@ static u32 group2_table[] = { | |||
504 | "w", "r", _LO32, "r", "", "r") | 265 | "w", "r", _LO32, "r", "", "r") |
505 | 266 | ||
506 | /* Instruction has three operands and one operand is stored in ECX register */ | 267 | /* Instruction has three operands and one operand is stored in ECX register */ |
507 | #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \ | 268 | #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \ |
508 | do { \ | 269 | do { \ |
509 | unsigned long _tmp; \ | 270 | unsigned long _tmp; \ |
510 | _type _clv = (_cl).val; \ | 271 | _type _clv = (_cl).val; \ |
511 | _type _srcv = (_src).val; \ | 272 | _type _srcv = (_src).val; \ |
512 | _type _dstv = (_dst).val; \ | 273 | _type _dstv = (_dst).val; \ |
513 | \ | 274 | \ |
514 | __asm__ __volatile__ ( \ | 275 | __asm__ __volatile__ ( \ |
515 | _PRE_EFLAGS("0", "5", "2") \ | 276 | _PRE_EFLAGS("0", "5", "2") \ |
516 | _op _suffix " %4,%1 \n" \ | 277 | _op _suffix " %4,%1 \n" \ |
517 | _POST_EFLAGS("0", "5", "2") \ | 278 | _POST_EFLAGS("0", "5", "2") \ |
518 | : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \ | 279 | : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \ |
519 | : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \ | 280 | : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \ |
520 | ); \ | 281 | ); \ |
521 | \ | 282 | \ |
522 | (_cl).val = (unsigned long) _clv; \ | 283 | (_cl).val = (unsigned long) _clv; \ |
523 | (_src).val = (unsigned long) _srcv; \ | 284 | (_src).val = (unsigned long) _srcv; \ |
524 | (_dst).val = (unsigned long) _dstv; \ | 285 | (_dst).val = (unsigned long) _dstv; \ |
525 | } while (0) | 286 | } while (0) |
526 | 287 | ||
527 | #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \ | 288 | #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \ |
528 | do { \ | 289 | do { \ |
529 | switch ((_dst).bytes) { \ | 290 | switch ((_dst).bytes) { \ |
530 | case 2: \ | 291 | case 2: \ |
531 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ | 292 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
532 | "w", unsigned short); \ | 293 | "w", unsigned short); \ |
533 | break; \ | 294 | break; \ |
534 | case 4: \ | 295 | case 4: \ |
535 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ | 296 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
536 | "l", unsigned int); \ | 297 | "l", unsigned int); \ |
537 | break; \ | 298 | break; \ |
538 | case 8: \ | 299 | case 8: \ |
539 | ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ | 300 | ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
540 | "q", unsigned long)); \ | 301 | "q", unsigned long)); \ |
541 | break; \ | 302 | break; \ |
542 | } \ | 303 | } \ |
543 | } while (0) | 304 | } while (0) |
544 | 305 | ||
545 | #define __emulate_1op(_op, _dst, _eflags, _suffix) \ | 306 | #define __emulate_1op(_op, _dst, _eflags, _suffix) \ |
@@ -566,6 +327,86 @@ static u32 group2_table[] = { | |||
566 | } \ | 327 | } \ |
567 | } while (0) | 328 | } while (0) |
568 | 329 | ||
330 | #define __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, _suffix) \ | ||
331 | do { \ | ||
332 | unsigned long _tmp; \ | ||
333 | \ | ||
334 | __asm__ __volatile__ ( \ | ||
335 | _PRE_EFLAGS("0", "4", "1") \ | ||
336 | _op _suffix " %5; " \ | ||
337 | _POST_EFLAGS("0", "4", "1") \ | ||
338 | : "=m" (_eflags), "=&r" (_tmp), \ | ||
339 | "+a" (_rax), "+d" (_rdx) \ | ||
340 | : "i" (EFLAGS_MASK), "m" ((_src).val), \ | ||
341 | "a" (_rax), "d" (_rdx)); \ | ||
342 | } while (0) | ||
343 | |||
344 | #define __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _suffix, _ex) \ | ||
345 | do { \ | ||
346 | unsigned long _tmp; \ | ||
347 | \ | ||
348 | __asm__ __volatile__ ( \ | ||
349 | _PRE_EFLAGS("0", "5", "1") \ | ||
350 | "1: \n\t" \ | ||
351 | _op _suffix " %6; " \ | ||
352 | "2: \n\t" \ | ||
353 | _POST_EFLAGS("0", "5", "1") \ | ||
354 | ".pushsection .fixup,\"ax\" \n\t" \ | ||
355 | "3: movb $1, %4 \n\t" \ | ||
356 | "jmp 2b \n\t" \ | ||
357 | ".popsection \n\t" \ | ||
358 | _ASM_EXTABLE(1b, 3b) \ | ||
359 | : "=m" (_eflags), "=&r" (_tmp), \ | ||
360 | "+a" (_rax), "+d" (_rdx), "+qm"(_ex) \ | ||
361 | : "i" (EFLAGS_MASK), "m" ((_src).val), \ | ||
362 | "a" (_rax), "d" (_rdx)); \ | ||
363 | } while (0) | ||
364 | |||
365 | /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ | ||
366 | #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \ | ||
367 | do { \ | ||
368 | switch((_src).bytes) { \ | ||
369 | case 1: \ | ||
370 | __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ | ||
371 | _eflags, "b"); \ | ||
372 | break; \ | ||
373 | case 2: \ | ||
374 | __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ | ||
375 | _eflags, "w"); \ | ||
376 | break; \ | ||
377 | case 4: \ | ||
378 | __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ | ||
379 | _eflags, "l"); \ | ||
380 | break; \ | ||
381 | case 8: \ | ||
382 | ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ | ||
383 | _eflags, "q")); \ | ||
384 | break; \ | ||
385 | } \ | ||
386 | } while (0) | ||
387 | |||
388 | #define emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, _eflags, _ex) \ | ||
389 | do { \ | ||
390 | switch((_src).bytes) { \ | ||
391 | case 1: \ | ||
392 | __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \ | ||
393 | _eflags, "b", _ex); \ | ||
394 | break; \ | ||
395 | case 2: \ | ||
396 | __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \ | ||
397 | _eflags, "w", _ex); \ | ||
398 | break; \ | ||
399 | case 4: \ | ||
400 | __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \ | ||
401 | _eflags, "l", _ex); \ | ||
402 | break; \ | ||
403 | case 8: ON64( \ | ||
404 | __emulate_1op_rax_rdx_ex(_op, _src, _rax, _rdx, \ | ||
405 | _eflags, "q", _ex)); \ | ||
406 | break; \ | ||
407 | } \ | ||
408 | } while (0) | ||
409 | |||
569 | /* Fetch next part of the instruction being emulated. */ | 410 | /* Fetch next part of the instruction being emulated. */ |
570 | #define insn_fetch(_type, _size, _eip) \ | 411 | #define insn_fetch(_type, _size, _eip) \ |
571 | ({ unsigned long _x; \ | 412 | ({ unsigned long _x; \ |
@@ -576,13 +417,33 @@ static u32 group2_table[] = { | |||
576 | (_type)_x; \ | 417 | (_type)_x; \ |
577 | }) | 418 | }) |
578 | 419 | ||
579 | #define insn_fetch_arr(_arr, _size, _eip) \ | 420 | #define insn_fetch_arr(_arr, _size, _eip) \ |
580 | ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \ | 421 | ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \ |
581 | if (rc != X86EMUL_CONTINUE) \ | 422 | if (rc != X86EMUL_CONTINUE) \ |
582 | goto done; \ | 423 | goto done; \ |
583 | (_eip) += (_size); \ | 424 | (_eip) += (_size); \ |
584 | }) | 425 | }) |
585 | 426 | ||
427 | static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, | ||
428 | enum x86_intercept intercept, | ||
429 | enum x86_intercept_stage stage) | ||
430 | { | ||
431 | struct x86_instruction_info info = { | ||
432 | .intercept = intercept, | ||
433 | .rep_prefix = ctxt->decode.rep_prefix, | ||
434 | .modrm_mod = ctxt->decode.modrm_mod, | ||
435 | .modrm_reg = ctxt->decode.modrm_reg, | ||
436 | .modrm_rm = ctxt->decode.modrm_rm, | ||
437 | .src_val = ctxt->decode.src.val64, | ||
438 | .src_bytes = ctxt->decode.src.bytes, | ||
439 | .dst_bytes = ctxt->decode.dst.bytes, | ||
440 | .ad_bytes = ctxt->decode.ad_bytes, | ||
441 | .next_rip = ctxt->eip, | ||
442 | }; | ||
443 | |||
444 | return ctxt->ops->intercept(ctxt, &info, stage); | ||
445 | } | ||
446 | |||
586 | static inline unsigned long ad_mask(struct decode_cache *c) | 447 | static inline unsigned long ad_mask(struct decode_cache *c) |
587 | { | 448 | { |
588 | return (1UL << (c->ad_bytes << 3)) - 1; | 449 | return (1UL << (c->ad_bytes << 3)) - 1; |
@@ -599,9 +460,9 @@ address_mask(struct decode_cache *c, unsigned long reg) | |||
599 | } | 460 | } |
600 | 461 | ||
601 | static inline unsigned long | 462 | static inline unsigned long |
602 | register_address(struct decode_cache *c, unsigned long base, unsigned long reg) | 463 | register_address(struct decode_cache *c, unsigned long reg) |
603 | { | 464 | { |
604 | return base + address_mask(c, reg); | 465 | return address_mask(c, reg); |
605 | } | 466 | } |
606 | 467 | ||
607 | static inline void | 468 | static inline void |
@@ -618,6 +479,13 @@ static inline void jmp_rel(struct decode_cache *c, int rel) | |||
618 | register_address_increment(c, &c->eip, rel); | 479 | register_address_increment(c, &c->eip, rel); |
619 | } | 480 | } |
620 | 481 | ||
482 | static u32 desc_limit_scaled(struct desc_struct *desc) | ||
483 | { | ||
484 | u32 limit = get_desc_limit(desc); | ||
485 | |||
486 | return desc->g ? (limit << 12) | 0xfff : limit; | ||
487 | } | ||
488 | |||
621 | static void set_seg_override(struct decode_cache *c, int seg) | 489 | static void set_seg_override(struct decode_cache *c, int seg) |
622 | { | 490 | { |
623 | c->has_seg_override = true; | 491 | c->has_seg_override = true; |
@@ -630,60 +498,177 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, | |||
630 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) | 498 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) |
631 | return 0; | 499 | return 0; |
632 | 500 | ||
633 | return ops->get_cached_segment_base(seg, ctxt->vcpu); | 501 | return ops->get_cached_segment_base(ctxt, seg); |
634 | } | 502 | } |
635 | 503 | ||
636 | static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, | 504 | static unsigned seg_override(struct x86_emulate_ctxt *ctxt, |
637 | struct x86_emulate_ops *ops, | 505 | struct decode_cache *c) |
638 | struct decode_cache *c) | ||
639 | { | 506 | { |
640 | if (!c->has_seg_override) | 507 | if (!c->has_seg_override) |
641 | return 0; | 508 | return 0; |
642 | 509 | ||
643 | return seg_base(ctxt, ops, c->seg_override); | 510 | return c->seg_override; |
644 | } | 511 | } |
645 | 512 | ||
646 | static unsigned long es_base(struct x86_emulate_ctxt *ctxt, | 513 | static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, |
647 | struct x86_emulate_ops *ops) | 514 | u32 error, bool valid) |
648 | { | 515 | { |
649 | return seg_base(ctxt, ops, VCPU_SREG_ES); | 516 | ctxt->exception.vector = vec; |
517 | ctxt->exception.error_code = error; | ||
518 | ctxt->exception.error_code_valid = valid; | ||
519 | return X86EMUL_PROPAGATE_FAULT; | ||
650 | } | 520 | } |
651 | 521 | ||
652 | static unsigned long ss_base(struct x86_emulate_ctxt *ctxt, | 522 | static int emulate_db(struct x86_emulate_ctxt *ctxt) |
653 | struct x86_emulate_ops *ops) | 523 | { |
524 | return emulate_exception(ctxt, DB_VECTOR, 0, false); | ||
525 | } | ||
526 | |||
527 | static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) | ||
528 | { | ||
529 | return emulate_exception(ctxt, GP_VECTOR, err, true); | ||
530 | } | ||
531 | |||
532 | static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) | ||
533 | { | ||
534 | return emulate_exception(ctxt, SS_VECTOR, err, true); | ||
535 | } | ||
536 | |||
537 | static int emulate_ud(struct x86_emulate_ctxt *ctxt) | ||
538 | { | ||
539 | return emulate_exception(ctxt, UD_VECTOR, 0, false); | ||
540 | } | ||
541 | |||
542 | static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err) | ||
543 | { | ||
544 | return emulate_exception(ctxt, TS_VECTOR, err, true); | ||
545 | } | ||
546 | |||
547 | static int emulate_de(struct x86_emulate_ctxt *ctxt) | ||
654 | { | 548 | { |
655 | return seg_base(ctxt, ops, VCPU_SREG_SS); | 549 | return emulate_exception(ctxt, DE_VECTOR, 0, false); |
656 | } | 550 | } |
657 | 551 | ||
658 | static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, | 552 | static int emulate_nm(struct x86_emulate_ctxt *ctxt) |
659 | u32 error, bool valid) | ||
660 | { | 553 | { |
661 | ctxt->exception = vec; | 554 | return emulate_exception(ctxt, NM_VECTOR, 0, false); |
662 | ctxt->error_code = error; | ||
663 | ctxt->error_code_valid = valid; | ||
664 | ctxt->restart = false; | ||
665 | } | 555 | } |
666 | 556 | ||
667 | static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) | 557 | static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) |
668 | { | 558 | { |
669 | emulate_exception(ctxt, GP_VECTOR, err, true); | 559 | u16 selector; |
560 | struct desc_struct desc; | ||
561 | |||
562 | ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); | ||
563 | return selector; | ||
670 | } | 564 | } |
671 | 565 | ||
672 | static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr, | 566 | static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, |
673 | int err) | 567 | unsigned seg) |
674 | { | 568 | { |
675 | ctxt->cr2 = addr; | 569 | u16 dummy; |
676 | emulate_exception(ctxt, PF_VECTOR, err, true); | 570 | u32 base3; |
571 | struct desc_struct desc; | ||
572 | |||
573 | ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); | ||
574 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); | ||
677 | } | 575 | } |
678 | 576 | ||
679 | static void emulate_ud(struct x86_emulate_ctxt *ctxt) | 577 | static int __linearize(struct x86_emulate_ctxt *ctxt, |
578 | struct segmented_address addr, | ||
579 | unsigned size, bool write, bool fetch, | ||
580 | ulong *linear) | ||
680 | { | 581 | { |
681 | emulate_exception(ctxt, UD_VECTOR, 0, false); | 582 | struct decode_cache *c = &ctxt->decode; |
583 | struct desc_struct desc; | ||
584 | bool usable; | ||
585 | ulong la; | ||
586 | u32 lim; | ||
587 | u16 sel; | ||
588 | unsigned cpl, rpl; | ||
589 | |||
590 | la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; | ||
591 | switch (ctxt->mode) { | ||
592 | case X86EMUL_MODE_REAL: | ||
593 | break; | ||
594 | case X86EMUL_MODE_PROT64: | ||
595 | if (((signed long)la << 16) >> 16 != la) | ||
596 | return emulate_gp(ctxt, 0); | ||
597 | break; | ||
598 | default: | ||
599 | usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, | ||
600 | addr.seg); | ||
601 | if (!usable) | ||
602 | goto bad; | ||
603 | /* code segment or read-only data segment */ | ||
604 | if (((desc.type & 8) || !(desc.type & 2)) && write) | ||
605 | goto bad; | ||
606 | /* unreadable code segment */ | ||
607 | if (!fetch && (desc.type & 8) && !(desc.type & 2)) | ||
608 | goto bad; | ||
609 | lim = desc_limit_scaled(&desc); | ||
610 | if ((desc.type & 8) || !(desc.type & 4)) { | ||
611 | /* expand-up segment */ | ||
612 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | ||
613 | goto bad; | ||
614 | } else { | ||
615 | /* exapand-down segment */ | ||
616 | if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) | ||
617 | goto bad; | ||
618 | lim = desc.d ? 0xffffffff : 0xffff; | ||
619 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | ||
620 | goto bad; | ||
621 | } | ||
622 | cpl = ctxt->ops->cpl(ctxt); | ||
623 | rpl = sel & 3; | ||
624 | cpl = max(cpl, rpl); | ||
625 | if (!(desc.type & 8)) { | ||
626 | /* data segment */ | ||
627 | if (cpl > desc.dpl) | ||
628 | goto bad; | ||
629 | } else if ((desc.type & 8) && !(desc.type & 4)) { | ||
630 | /* nonconforming code segment */ | ||
631 | if (cpl != desc.dpl) | ||
632 | goto bad; | ||
633 | } else if ((desc.type & 8) && (desc.type & 4)) { | ||
634 | /* conforming code segment */ | ||
635 | if (cpl < desc.dpl) | ||
636 | goto bad; | ||
637 | } | ||
638 | break; | ||
639 | } | ||
640 | if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8) | ||
641 | la &= (u32)-1; | ||
642 | *linear = la; | ||
643 | return X86EMUL_CONTINUE; | ||
644 | bad: | ||
645 | if (addr.seg == VCPU_SREG_SS) | ||
646 | return emulate_ss(ctxt, addr.seg); | ||
647 | else | ||
648 | return emulate_gp(ctxt, addr.seg); | ||
682 | } | 649 | } |
683 | 650 | ||
684 | static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err) | 651 | static int linearize(struct x86_emulate_ctxt *ctxt, |
652 | struct segmented_address addr, | ||
653 | unsigned size, bool write, | ||
654 | ulong *linear) | ||
685 | { | 655 | { |
686 | emulate_exception(ctxt, TS_VECTOR, err, true); | 656 | return __linearize(ctxt, addr, size, write, false, linear); |
657 | } | ||
658 | |||
659 | |||
660 | static int segmented_read_std(struct x86_emulate_ctxt *ctxt, | ||
661 | struct segmented_address addr, | ||
662 | void *data, | ||
663 | unsigned size) | ||
664 | { | ||
665 | int rc; | ||
666 | ulong linear; | ||
667 | |||
668 | rc = linearize(ctxt, addr, size, false, &linear); | ||
669 | if (rc != X86EMUL_CONTINUE) | ||
670 | return rc; | ||
671 | return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); | ||
687 | } | 672 | } |
688 | 673 | ||
689 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, | 674 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, |
@@ -695,10 +680,15 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, | |||
695 | int size, cur_size; | 680 | int size, cur_size; |
696 | 681 | ||
697 | if (eip == fc->end) { | 682 | if (eip == fc->end) { |
683 | unsigned long linear; | ||
684 | struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip}; | ||
698 | cur_size = fc->end - fc->start; | 685 | cur_size = fc->end - fc->start; |
699 | size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); | 686 | size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); |
700 | rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size, | 687 | rc = __linearize(ctxt, addr, size, false, true, &linear); |
701 | size, ctxt->vcpu, NULL); | 688 | if (rc != X86EMUL_CONTINUE) |
689 | return rc; | ||
690 | rc = ops->fetch(ctxt, linear, fc->data + cur_size, | ||
691 | size, &ctxt->exception); | ||
702 | if (rc != X86EMUL_CONTINUE) | 692 | if (rc != X86EMUL_CONTINUE) |
703 | return rc; | 693 | return rc; |
704 | fc->end += size; | 694 | fc->end += size; |
@@ -741,8 +731,7 @@ static void *decode_register(u8 modrm_reg, unsigned long *regs, | |||
741 | } | 731 | } |
742 | 732 | ||
743 | static int read_descriptor(struct x86_emulate_ctxt *ctxt, | 733 | static int read_descriptor(struct x86_emulate_ctxt *ctxt, |
744 | struct x86_emulate_ops *ops, | 734 | struct segmented_address addr, |
745 | void *ptr, | ||
746 | u16 *size, unsigned long *address, int op_bytes) | 735 | u16 *size, unsigned long *address, int op_bytes) |
747 | { | 736 | { |
748 | int rc; | 737 | int rc; |
@@ -750,12 +739,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, | |||
750 | if (op_bytes == 2) | 739 | if (op_bytes == 2) |
751 | op_bytes = 3; | 740 | op_bytes = 3; |
752 | *address = 0; | 741 | *address = 0; |
753 | rc = ops->read_std((unsigned long)ptr, (unsigned long *)size, 2, | 742 | rc = segmented_read_std(ctxt, addr, size, 2); |
754 | ctxt->vcpu, NULL); | ||
755 | if (rc != X86EMUL_CONTINUE) | 743 | if (rc != X86EMUL_CONTINUE) |
756 | return rc; | 744 | return rc; |
757 | rc = ops->read_std((unsigned long)ptr + 2, address, op_bytes, | 745 | addr.ea += 2; |
758 | ctxt->vcpu, NULL); | 746 | rc = segmented_read_std(ctxt, addr, address, op_bytes); |
759 | return rc; | 747 | return rc; |
760 | } | 748 | } |
761 | 749 | ||
@@ -794,7 +782,81 @@ static int test_cc(unsigned int condition, unsigned int flags) | |||
794 | return (!!rc ^ (condition & 1)); | 782 | return (!!rc ^ (condition & 1)); |
795 | } | 783 | } |
796 | 784 | ||
797 | static void decode_register_operand(struct operand *op, | 785 | static void fetch_register_operand(struct operand *op) |
786 | { | ||
787 | switch (op->bytes) { | ||
788 | case 1: | ||
789 | op->val = *(u8 *)op->addr.reg; | ||
790 | break; | ||
791 | case 2: | ||
792 | op->val = *(u16 *)op->addr.reg; | ||
793 | break; | ||
794 | case 4: | ||
795 | op->val = *(u32 *)op->addr.reg; | ||
796 | break; | ||
797 | case 8: | ||
798 | op->val = *(u64 *)op->addr.reg; | ||
799 | break; | ||
800 | } | ||
801 | } | ||
802 | |||
803 | static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) | ||
804 | { | ||
805 | ctxt->ops->get_fpu(ctxt); | ||
806 | switch (reg) { | ||
807 | case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break; | ||
808 | case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break; | ||
809 | case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break; | ||
810 | case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break; | ||
811 | case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break; | ||
812 | case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break; | ||
813 | case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break; | ||
814 | case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break; | ||
815 | #ifdef CONFIG_X86_64 | ||
816 | case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break; | ||
817 | case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break; | ||
818 | case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break; | ||
819 | case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break; | ||
820 | case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break; | ||
821 | case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break; | ||
822 | case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break; | ||
823 | case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break; | ||
824 | #endif | ||
825 | default: BUG(); | ||
826 | } | ||
827 | ctxt->ops->put_fpu(ctxt); | ||
828 | } | ||
829 | |||
830 | static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, | ||
831 | int reg) | ||
832 | { | ||
833 | ctxt->ops->get_fpu(ctxt); | ||
834 | switch (reg) { | ||
835 | case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break; | ||
836 | case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break; | ||
837 | case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break; | ||
838 | case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break; | ||
839 | case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break; | ||
840 | case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break; | ||
841 | case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break; | ||
842 | case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break; | ||
843 | #ifdef CONFIG_X86_64 | ||
844 | case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break; | ||
845 | case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break; | ||
846 | case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break; | ||
847 | case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break; | ||
848 | case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break; | ||
849 | case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break; | ||
850 | case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break; | ||
851 | case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break; | ||
852 | #endif | ||
853 | default: BUG(); | ||
854 | } | ||
855 | ctxt->ops->put_fpu(ctxt); | ||
856 | } | ||
857 | |||
858 | static void decode_register_operand(struct x86_emulate_ctxt *ctxt, | ||
859 | struct operand *op, | ||
798 | struct decode_cache *c, | 860 | struct decode_cache *c, |
799 | int inhibit_bytereg) | 861 | int inhibit_bytereg) |
800 | { | 862 | { |
@@ -803,36 +865,36 @@ static void decode_register_operand(struct operand *op, | |||
803 | 865 | ||
804 | if (!(c->d & ModRM)) | 866 | if (!(c->d & ModRM)) |
805 | reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); | 867 | reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); |
868 | |||
869 | if (c->d & Sse) { | ||
870 | op->type = OP_XMM; | ||
871 | op->bytes = 16; | ||
872 | op->addr.xmm = reg; | ||
873 | read_sse_reg(ctxt, &op->vec_val, reg); | ||
874 | return; | ||
875 | } | ||
876 | |||
806 | op->type = OP_REG; | 877 | op->type = OP_REG; |
807 | if ((c->d & ByteOp) && !inhibit_bytereg) { | 878 | if ((c->d & ByteOp) && !inhibit_bytereg) { |
808 | op->ptr = decode_register(reg, c->regs, highbyte_regs); | 879 | op->addr.reg = decode_register(reg, c->regs, highbyte_regs); |
809 | op->val = *(u8 *)op->ptr; | ||
810 | op->bytes = 1; | 880 | op->bytes = 1; |
811 | } else { | 881 | } else { |
812 | op->ptr = decode_register(reg, c->regs, 0); | 882 | op->addr.reg = decode_register(reg, c->regs, 0); |
813 | op->bytes = c->op_bytes; | 883 | op->bytes = c->op_bytes; |
814 | switch (op->bytes) { | ||
815 | case 2: | ||
816 | op->val = *(u16 *)op->ptr; | ||
817 | break; | ||
818 | case 4: | ||
819 | op->val = *(u32 *)op->ptr; | ||
820 | break; | ||
821 | case 8: | ||
822 | op->val = *(u64 *) op->ptr; | ||
823 | break; | ||
824 | } | ||
825 | } | 884 | } |
885 | fetch_register_operand(op); | ||
826 | op->orig_val = op->val; | 886 | op->orig_val = op->val; |
827 | } | 887 | } |
828 | 888 | ||
829 | static int decode_modrm(struct x86_emulate_ctxt *ctxt, | 889 | static int decode_modrm(struct x86_emulate_ctxt *ctxt, |
830 | struct x86_emulate_ops *ops) | 890 | struct x86_emulate_ops *ops, |
891 | struct operand *op) | ||
831 | { | 892 | { |
832 | struct decode_cache *c = &ctxt->decode; | 893 | struct decode_cache *c = &ctxt->decode; |
833 | u8 sib; | 894 | u8 sib; |
834 | int index_reg = 0, base_reg = 0, scale; | 895 | int index_reg = 0, base_reg = 0, scale; |
835 | int rc = X86EMUL_CONTINUE; | 896 | int rc = X86EMUL_CONTINUE; |
897 | ulong modrm_ea = 0; | ||
836 | 898 | ||
837 | if (c->rex_prefix) { | 899 | if (c->rex_prefix) { |
838 | c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */ | 900 | c->modrm_reg = (c->rex_prefix & 4) << 1; /* REX.R */ |
@@ -844,16 +906,26 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
844 | c->modrm_mod |= (c->modrm & 0xc0) >> 6; | 906 | c->modrm_mod |= (c->modrm & 0xc0) >> 6; |
845 | c->modrm_reg |= (c->modrm & 0x38) >> 3; | 907 | c->modrm_reg |= (c->modrm & 0x38) >> 3; |
846 | c->modrm_rm |= (c->modrm & 0x07); | 908 | c->modrm_rm |= (c->modrm & 0x07); |
847 | c->modrm_ea = 0; | 909 | c->modrm_seg = VCPU_SREG_DS; |
848 | c->use_modrm_ea = 1; | ||
849 | 910 | ||
850 | if (c->modrm_mod == 3) { | 911 | if (c->modrm_mod == 3) { |
851 | c->modrm_ptr = decode_register(c->modrm_rm, | 912 | op->type = OP_REG; |
913 | op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
914 | op->addr.reg = decode_register(c->modrm_rm, | ||
852 | c->regs, c->d & ByteOp); | 915 | c->regs, c->d & ByteOp); |
853 | c->modrm_val = *(unsigned long *)c->modrm_ptr; | 916 | if (c->d & Sse) { |
917 | op->type = OP_XMM; | ||
918 | op->bytes = 16; | ||
919 | op->addr.xmm = c->modrm_rm; | ||
920 | read_sse_reg(ctxt, &op->vec_val, c->modrm_rm); | ||
921 | return rc; | ||
922 | } | ||
923 | fetch_register_operand(op); | ||
854 | return rc; | 924 | return rc; |
855 | } | 925 | } |
856 | 926 | ||
927 | op->type = OP_MEM; | ||
928 | |||
857 | if (c->ad_bytes == 2) { | 929 | if (c->ad_bytes == 2) { |
858 | unsigned bx = c->regs[VCPU_REGS_RBX]; | 930 | unsigned bx = c->regs[VCPU_REGS_RBX]; |
859 | unsigned bp = c->regs[VCPU_REGS_RBP]; | 931 | unsigned bp = c->regs[VCPU_REGS_RBP]; |
@@ -864,47 +936,46 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
864 | switch (c->modrm_mod) { | 936 | switch (c->modrm_mod) { |
865 | case 0: | 937 | case 0: |
866 | if (c->modrm_rm == 6) | 938 | if (c->modrm_rm == 6) |
867 | c->modrm_ea += insn_fetch(u16, 2, c->eip); | 939 | modrm_ea += insn_fetch(u16, 2, c->eip); |
868 | break; | 940 | break; |
869 | case 1: | 941 | case 1: |
870 | c->modrm_ea += insn_fetch(s8, 1, c->eip); | 942 | modrm_ea += insn_fetch(s8, 1, c->eip); |
871 | break; | 943 | break; |
872 | case 2: | 944 | case 2: |
873 | c->modrm_ea += insn_fetch(u16, 2, c->eip); | 945 | modrm_ea += insn_fetch(u16, 2, c->eip); |
874 | break; | 946 | break; |
875 | } | 947 | } |
876 | switch (c->modrm_rm) { | 948 | switch (c->modrm_rm) { |
877 | case 0: | 949 | case 0: |
878 | c->modrm_ea += bx + si; | 950 | modrm_ea += bx + si; |
879 | break; | 951 | break; |
880 | case 1: | 952 | case 1: |
881 | c->modrm_ea += bx + di; | 953 | modrm_ea += bx + di; |
882 | break; | 954 | break; |
883 | case 2: | 955 | case 2: |
884 | c->modrm_ea += bp + si; | 956 | modrm_ea += bp + si; |
885 | break; | 957 | break; |
886 | case 3: | 958 | case 3: |
887 | c->modrm_ea += bp + di; | 959 | modrm_ea += bp + di; |
888 | break; | 960 | break; |
889 | case 4: | 961 | case 4: |
890 | c->modrm_ea += si; | 962 | modrm_ea += si; |
891 | break; | 963 | break; |
892 | case 5: | 964 | case 5: |
893 | c->modrm_ea += di; | 965 | modrm_ea += di; |
894 | break; | 966 | break; |
895 | case 6: | 967 | case 6: |
896 | if (c->modrm_mod != 0) | 968 | if (c->modrm_mod != 0) |
897 | c->modrm_ea += bp; | 969 | modrm_ea += bp; |
898 | break; | 970 | break; |
899 | case 7: | 971 | case 7: |
900 | c->modrm_ea += bx; | 972 | modrm_ea += bx; |
901 | break; | 973 | break; |
902 | } | 974 | } |
903 | if (c->modrm_rm == 2 || c->modrm_rm == 3 || | 975 | if (c->modrm_rm == 2 || c->modrm_rm == 3 || |
904 | (c->modrm_rm == 6 && c->modrm_mod != 0)) | 976 | (c->modrm_rm == 6 && c->modrm_mod != 0)) |
905 | if (!c->has_seg_override) | 977 | c->modrm_seg = VCPU_SREG_SS; |
906 | set_seg_override(c, VCPU_SREG_SS); | 978 | modrm_ea = (u16)modrm_ea; |
907 | c->modrm_ea = (u16)c->modrm_ea; | ||
908 | } else { | 979 | } else { |
909 | /* 32/64-bit ModR/M decode. */ | 980 | /* 32/64-bit ModR/M decode. */ |
910 | if ((c->modrm_rm & 7) == 4) { | 981 | if ((c->modrm_rm & 7) == 4) { |
@@ -914,410 +985,74 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
914 | scale = sib >> 6; | 985 | scale = sib >> 6; |
915 | 986 | ||
916 | if ((base_reg & 7) == 5 && c->modrm_mod == 0) | 987 | if ((base_reg & 7) == 5 && c->modrm_mod == 0) |
917 | c->modrm_ea += insn_fetch(s32, 4, c->eip); | 988 | modrm_ea += insn_fetch(s32, 4, c->eip); |
918 | else | 989 | else |
919 | c->modrm_ea += c->regs[base_reg]; | 990 | modrm_ea += c->regs[base_reg]; |
920 | if (index_reg != 4) | 991 | if (index_reg != 4) |
921 | c->modrm_ea += c->regs[index_reg] << scale; | 992 | modrm_ea += c->regs[index_reg] << scale; |
922 | } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) { | 993 | } else if ((c->modrm_rm & 7) == 5 && c->modrm_mod == 0) { |
923 | if (ctxt->mode == X86EMUL_MODE_PROT64) | 994 | if (ctxt->mode == X86EMUL_MODE_PROT64) |
924 | c->rip_relative = 1; | 995 | c->rip_relative = 1; |
925 | } else | 996 | } else |
926 | c->modrm_ea += c->regs[c->modrm_rm]; | 997 | modrm_ea += c->regs[c->modrm_rm]; |
927 | switch (c->modrm_mod) { | 998 | switch (c->modrm_mod) { |
928 | case 0: | 999 | case 0: |
929 | if (c->modrm_rm == 5) | 1000 | if (c->modrm_rm == 5) |
930 | c->modrm_ea += insn_fetch(s32, 4, c->eip); | 1001 | modrm_ea += insn_fetch(s32, 4, c->eip); |
931 | break; | 1002 | break; |
932 | case 1: | 1003 | case 1: |
933 | c->modrm_ea += insn_fetch(s8, 1, c->eip); | 1004 | modrm_ea += insn_fetch(s8, 1, c->eip); |
934 | break; | 1005 | break; |
935 | case 2: | 1006 | case 2: |
936 | c->modrm_ea += insn_fetch(s32, 4, c->eip); | 1007 | modrm_ea += insn_fetch(s32, 4, c->eip); |
937 | break; | 1008 | break; |
938 | } | 1009 | } |
939 | } | 1010 | } |
1011 | op->addr.mem.ea = modrm_ea; | ||
940 | done: | 1012 | done: |
941 | return rc; | 1013 | return rc; |
942 | } | 1014 | } |
943 | 1015 | ||
944 | static int decode_abs(struct x86_emulate_ctxt *ctxt, | 1016 | static int decode_abs(struct x86_emulate_ctxt *ctxt, |
945 | struct x86_emulate_ops *ops) | 1017 | struct x86_emulate_ops *ops, |
1018 | struct operand *op) | ||
946 | { | 1019 | { |
947 | struct decode_cache *c = &ctxt->decode; | 1020 | struct decode_cache *c = &ctxt->decode; |
948 | int rc = X86EMUL_CONTINUE; | 1021 | int rc = X86EMUL_CONTINUE; |
949 | 1022 | ||
1023 | op->type = OP_MEM; | ||
950 | switch (c->ad_bytes) { | 1024 | switch (c->ad_bytes) { |
951 | case 2: | 1025 | case 2: |
952 | c->modrm_ea = insn_fetch(u16, 2, c->eip); | 1026 | op->addr.mem.ea = insn_fetch(u16, 2, c->eip); |
953 | break; | 1027 | break; |
954 | case 4: | 1028 | case 4: |
955 | c->modrm_ea = insn_fetch(u32, 4, c->eip); | 1029 | op->addr.mem.ea = insn_fetch(u32, 4, c->eip); |
956 | break; | 1030 | break; |
957 | case 8: | 1031 | case 8: |
958 | c->modrm_ea = insn_fetch(u64, 8, c->eip); | 1032 | op->addr.mem.ea = insn_fetch(u64, 8, c->eip); |
959 | break; | 1033 | break; |
960 | } | 1034 | } |
961 | done: | 1035 | done: |
962 | return rc; | 1036 | return rc; |
963 | } | 1037 | } |
964 | 1038 | ||
965 | int | 1039 | static void fetch_bit_operand(struct decode_cache *c) |
966 | x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | ||
967 | { | 1040 | { |
968 | struct decode_cache *c = &ctxt->decode; | 1041 | long sv = 0, mask; |
969 | int rc = X86EMUL_CONTINUE; | ||
970 | int mode = ctxt->mode; | ||
971 | int def_op_bytes, def_ad_bytes, group; | ||
972 | |||
973 | |||
974 | /* we cannot decode insn before we complete previous rep insn */ | ||
975 | WARN_ON(ctxt->restart); | ||
976 | |||
977 | c->eip = ctxt->eip; | ||
978 | c->fetch.start = c->fetch.end = c->eip; | ||
979 | ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS); | ||
980 | |||
981 | switch (mode) { | ||
982 | case X86EMUL_MODE_REAL: | ||
983 | case X86EMUL_MODE_VM86: | ||
984 | case X86EMUL_MODE_PROT16: | ||
985 | def_op_bytes = def_ad_bytes = 2; | ||
986 | break; | ||
987 | case X86EMUL_MODE_PROT32: | ||
988 | def_op_bytes = def_ad_bytes = 4; | ||
989 | break; | ||
990 | #ifdef CONFIG_X86_64 | ||
991 | case X86EMUL_MODE_PROT64: | ||
992 | def_op_bytes = 4; | ||
993 | def_ad_bytes = 8; | ||
994 | break; | ||
995 | #endif | ||
996 | default: | ||
997 | return -1; | ||
998 | } | ||
999 | |||
1000 | c->op_bytes = def_op_bytes; | ||
1001 | c->ad_bytes = def_ad_bytes; | ||
1002 | |||
1003 | /* Legacy prefixes. */ | ||
1004 | for (;;) { | ||
1005 | switch (c->b = insn_fetch(u8, 1, c->eip)) { | ||
1006 | case 0x66: /* operand-size override */ | ||
1007 | /* switch between 2/4 bytes */ | ||
1008 | c->op_bytes = def_op_bytes ^ 6; | ||
1009 | break; | ||
1010 | case 0x67: /* address-size override */ | ||
1011 | if (mode == X86EMUL_MODE_PROT64) | ||
1012 | /* switch between 4/8 bytes */ | ||
1013 | c->ad_bytes = def_ad_bytes ^ 12; | ||
1014 | else | ||
1015 | /* switch between 2/4 bytes */ | ||
1016 | c->ad_bytes = def_ad_bytes ^ 6; | ||
1017 | break; | ||
1018 | case 0x26: /* ES override */ | ||
1019 | case 0x2e: /* CS override */ | ||
1020 | case 0x36: /* SS override */ | ||
1021 | case 0x3e: /* DS override */ | ||
1022 | set_seg_override(c, (c->b >> 3) & 3); | ||
1023 | break; | ||
1024 | case 0x64: /* FS override */ | ||
1025 | case 0x65: /* GS override */ | ||
1026 | set_seg_override(c, c->b & 7); | ||
1027 | break; | ||
1028 | case 0x40 ... 0x4f: /* REX */ | ||
1029 | if (mode != X86EMUL_MODE_PROT64) | ||
1030 | goto done_prefixes; | ||
1031 | c->rex_prefix = c->b; | ||
1032 | continue; | ||
1033 | case 0xf0: /* LOCK */ | ||
1034 | c->lock_prefix = 1; | ||
1035 | break; | ||
1036 | case 0xf2: /* REPNE/REPNZ */ | ||
1037 | c->rep_prefix = REPNE_PREFIX; | ||
1038 | break; | ||
1039 | case 0xf3: /* REP/REPE/REPZ */ | ||
1040 | c->rep_prefix = REPE_PREFIX; | ||
1041 | break; | ||
1042 | default: | ||
1043 | goto done_prefixes; | ||
1044 | } | ||
1045 | |||
1046 | /* Any legacy prefix after a REX prefix nullifies its effect. */ | ||
1047 | 1042 | ||
1048 | c->rex_prefix = 0; | 1043 | if (c->dst.type == OP_MEM && c->src.type == OP_REG) { |
1049 | } | 1044 | mask = ~(c->dst.bytes * 8 - 1); |
1050 | |||
1051 | done_prefixes: | ||
1052 | |||
1053 | /* REX prefix. */ | ||
1054 | if (c->rex_prefix) | ||
1055 | if (c->rex_prefix & 8) | ||
1056 | c->op_bytes = 8; /* REX.W */ | ||
1057 | 1045 | ||
1058 | /* Opcode byte(s). */ | 1046 | if (c->src.bytes == 2) |
1059 | c->d = opcode_table[c->b]; | 1047 | sv = (s16)c->src.val & (s16)mask; |
1060 | if (c->d == 0) { | 1048 | else if (c->src.bytes == 4) |
1061 | /* Two-byte opcode? */ | 1049 | sv = (s32)c->src.val & (s32)mask; |
1062 | if (c->b == 0x0f) { | ||
1063 | c->twobyte = 1; | ||
1064 | c->b = insn_fetch(u8, 1, c->eip); | ||
1065 | c->d = twobyte_table[c->b]; | ||
1066 | } | ||
1067 | } | ||
1068 | |||
1069 | if (c->d & Group) { | ||
1070 | group = c->d & GroupMask; | ||
1071 | c->modrm = insn_fetch(u8, 1, c->eip); | ||
1072 | --c->eip; | ||
1073 | |||
1074 | group = (group << 3) + ((c->modrm >> 3) & 7); | ||
1075 | if ((c->d & GroupDual) && (c->modrm >> 6) == 3) | ||
1076 | c->d = group2_table[group]; | ||
1077 | else | ||
1078 | c->d = group_table[group]; | ||
1079 | } | ||
1080 | 1050 | ||
1081 | /* Unrecognised? */ | 1051 | c->dst.addr.mem.ea += (sv >> 3); |
1082 | if (c->d == 0) { | ||
1083 | DPRINTF("Cannot emulate %02x\n", c->b); | ||
1084 | return -1; | ||
1085 | } | 1052 | } |
1086 | 1053 | ||
1087 | if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack)) | 1054 | /* only subword offset */ |
1088 | c->op_bytes = 8; | 1055 | c->src.val &= (c->dst.bytes << 3) - 1; |
1089 | |||
1090 | /* ModRM and SIB bytes. */ | ||
1091 | if (c->d & ModRM) | ||
1092 | rc = decode_modrm(ctxt, ops); | ||
1093 | else if (c->d & MemAbs) | ||
1094 | rc = decode_abs(ctxt, ops); | ||
1095 | if (rc != X86EMUL_CONTINUE) | ||
1096 | goto done; | ||
1097 | |||
1098 | if (!c->has_seg_override) | ||
1099 | set_seg_override(c, VCPU_SREG_DS); | ||
1100 | |||
1101 | if (!(!c->twobyte && c->b == 0x8d)) | ||
1102 | c->modrm_ea += seg_override_base(ctxt, ops, c); | ||
1103 | |||
1104 | if (c->ad_bytes != 8) | ||
1105 | c->modrm_ea = (u32)c->modrm_ea; | ||
1106 | |||
1107 | if (c->rip_relative) | ||
1108 | c->modrm_ea += c->eip; | ||
1109 | |||
1110 | /* | ||
1111 | * Decode and fetch the source operand: register, memory | ||
1112 | * or immediate. | ||
1113 | */ | ||
1114 | switch (c->d & SrcMask) { | ||
1115 | case SrcNone: | ||
1116 | break; | ||
1117 | case SrcReg: | ||
1118 | decode_register_operand(&c->src, c, 0); | ||
1119 | break; | ||
1120 | case SrcMem16: | ||
1121 | c->src.bytes = 2; | ||
1122 | goto srcmem_common; | ||
1123 | case SrcMem32: | ||
1124 | c->src.bytes = 4; | ||
1125 | goto srcmem_common; | ||
1126 | case SrcMem: | ||
1127 | c->src.bytes = (c->d & ByteOp) ? 1 : | ||
1128 | c->op_bytes; | ||
1129 | /* Don't fetch the address for invlpg: it could be unmapped. */ | ||
1130 | if (c->twobyte && c->b == 0x01 && c->modrm_reg == 7) | ||
1131 | break; | ||
1132 | srcmem_common: | ||
1133 | /* | ||
1134 | * For instructions with a ModR/M byte, switch to register | ||
1135 | * access if Mod = 3. | ||
1136 | */ | ||
1137 | if ((c->d & ModRM) && c->modrm_mod == 3) { | ||
1138 | c->src.type = OP_REG; | ||
1139 | c->src.val = c->modrm_val; | ||
1140 | c->src.ptr = c->modrm_ptr; | ||
1141 | break; | ||
1142 | } | ||
1143 | c->src.type = OP_MEM; | ||
1144 | c->src.ptr = (unsigned long *)c->modrm_ea; | ||
1145 | c->src.val = 0; | ||
1146 | break; | ||
1147 | case SrcImm: | ||
1148 | case SrcImmU: | ||
1149 | c->src.type = OP_IMM; | ||
1150 | c->src.ptr = (unsigned long *)c->eip; | ||
1151 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1152 | if (c->src.bytes == 8) | ||
1153 | c->src.bytes = 4; | ||
1154 | /* NB. Immediates are sign-extended as necessary. */ | ||
1155 | switch (c->src.bytes) { | ||
1156 | case 1: | ||
1157 | c->src.val = insn_fetch(s8, 1, c->eip); | ||
1158 | break; | ||
1159 | case 2: | ||
1160 | c->src.val = insn_fetch(s16, 2, c->eip); | ||
1161 | break; | ||
1162 | case 4: | ||
1163 | c->src.val = insn_fetch(s32, 4, c->eip); | ||
1164 | break; | ||
1165 | } | ||
1166 | if ((c->d & SrcMask) == SrcImmU) { | ||
1167 | switch (c->src.bytes) { | ||
1168 | case 1: | ||
1169 | c->src.val &= 0xff; | ||
1170 | break; | ||
1171 | case 2: | ||
1172 | c->src.val &= 0xffff; | ||
1173 | break; | ||
1174 | case 4: | ||
1175 | c->src.val &= 0xffffffff; | ||
1176 | break; | ||
1177 | } | ||
1178 | } | ||
1179 | break; | ||
1180 | case SrcImmByte: | ||
1181 | case SrcImmUByte: | ||
1182 | c->src.type = OP_IMM; | ||
1183 | c->src.ptr = (unsigned long *)c->eip; | ||
1184 | c->src.bytes = 1; | ||
1185 | if ((c->d & SrcMask) == SrcImmByte) | ||
1186 | c->src.val = insn_fetch(s8, 1, c->eip); | ||
1187 | else | ||
1188 | c->src.val = insn_fetch(u8, 1, c->eip); | ||
1189 | break; | ||
1190 | case SrcAcc: | ||
1191 | c->src.type = OP_REG; | ||
1192 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1193 | c->src.ptr = &c->regs[VCPU_REGS_RAX]; | ||
1194 | switch (c->src.bytes) { | ||
1195 | case 1: | ||
1196 | c->src.val = *(u8 *)c->src.ptr; | ||
1197 | break; | ||
1198 | case 2: | ||
1199 | c->src.val = *(u16 *)c->src.ptr; | ||
1200 | break; | ||
1201 | case 4: | ||
1202 | c->src.val = *(u32 *)c->src.ptr; | ||
1203 | break; | ||
1204 | case 8: | ||
1205 | c->src.val = *(u64 *)c->src.ptr; | ||
1206 | break; | ||
1207 | } | ||
1208 | break; | ||
1209 | case SrcOne: | ||
1210 | c->src.bytes = 1; | ||
1211 | c->src.val = 1; | ||
1212 | break; | ||
1213 | case SrcSI: | ||
1214 | c->src.type = OP_MEM; | ||
1215 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1216 | c->src.ptr = (unsigned long *) | ||
1217 | register_address(c, seg_override_base(ctxt, ops, c), | ||
1218 | c->regs[VCPU_REGS_RSI]); | ||
1219 | c->src.val = 0; | ||
1220 | break; | ||
1221 | case SrcImmFAddr: | ||
1222 | c->src.type = OP_IMM; | ||
1223 | c->src.ptr = (unsigned long *)c->eip; | ||
1224 | c->src.bytes = c->op_bytes + 2; | ||
1225 | insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip); | ||
1226 | break; | ||
1227 | case SrcMemFAddr: | ||
1228 | c->src.type = OP_MEM; | ||
1229 | c->src.ptr = (unsigned long *)c->modrm_ea; | ||
1230 | c->src.bytes = c->op_bytes + 2; | ||
1231 | break; | ||
1232 | } | ||
1233 | |||
1234 | /* | ||
1235 | * Decode and fetch the second source operand: register, memory | ||
1236 | * or immediate. | ||
1237 | */ | ||
1238 | switch (c->d & Src2Mask) { | ||
1239 | case Src2None: | ||
1240 | break; | ||
1241 | case Src2CL: | ||
1242 | c->src2.bytes = 1; | ||
1243 | c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8; | ||
1244 | break; | ||
1245 | case Src2ImmByte: | ||
1246 | c->src2.type = OP_IMM; | ||
1247 | c->src2.ptr = (unsigned long *)c->eip; | ||
1248 | c->src2.bytes = 1; | ||
1249 | c->src2.val = insn_fetch(u8, 1, c->eip); | ||
1250 | break; | ||
1251 | case Src2One: | ||
1252 | c->src2.bytes = 1; | ||
1253 | c->src2.val = 1; | ||
1254 | break; | ||
1255 | } | ||
1256 | |||
1257 | /* Decode and fetch the destination operand: register or memory. */ | ||
1258 | switch (c->d & DstMask) { | ||
1259 | case ImplicitOps: | ||
1260 | /* Special instructions do their own operand decoding. */ | ||
1261 | return 0; | ||
1262 | case DstReg: | ||
1263 | decode_register_operand(&c->dst, c, | ||
1264 | c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); | ||
1265 | break; | ||
1266 | case DstMem: | ||
1267 | case DstMem64: | ||
1268 | if ((c->d & ModRM) && c->modrm_mod == 3) { | ||
1269 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1270 | c->dst.type = OP_REG; | ||
1271 | c->dst.val = c->dst.orig_val = c->modrm_val; | ||
1272 | c->dst.ptr = c->modrm_ptr; | ||
1273 | break; | ||
1274 | } | ||
1275 | c->dst.type = OP_MEM; | ||
1276 | c->dst.ptr = (unsigned long *)c->modrm_ea; | ||
1277 | if ((c->d & DstMask) == DstMem64) | ||
1278 | c->dst.bytes = 8; | ||
1279 | else | ||
1280 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1281 | c->dst.val = 0; | ||
1282 | if (c->d & BitOp) { | ||
1283 | unsigned long mask = ~(c->dst.bytes * 8 - 1); | ||
1284 | |||
1285 | c->dst.ptr = (void *)c->dst.ptr + | ||
1286 | (c->src.val & mask) / 8; | ||
1287 | } | ||
1288 | break; | ||
1289 | case DstAcc: | ||
1290 | c->dst.type = OP_REG; | ||
1291 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1292 | c->dst.ptr = &c->regs[VCPU_REGS_RAX]; | ||
1293 | switch (c->dst.bytes) { | ||
1294 | case 1: | ||
1295 | c->dst.val = *(u8 *)c->dst.ptr; | ||
1296 | break; | ||
1297 | case 2: | ||
1298 | c->dst.val = *(u16 *)c->dst.ptr; | ||
1299 | break; | ||
1300 | case 4: | ||
1301 | c->dst.val = *(u32 *)c->dst.ptr; | ||
1302 | break; | ||
1303 | case 8: | ||
1304 | c->dst.val = *(u64 *)c->dst.ptr; | ||
1305 | break; | ||
1306 | } | ||
1307 | c->dst.orig_val = c->dst.val; | ||
1308 | break; | ||
1309 | case DstDI: | ||
1310 | c->dst.type = OP_MEM; | ||
1311 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
1312 | c->dst.ptr = (unsigned long *) | ||
1313 | register_address(c, es_base(ctxt, ops), | ||
1314 | c->regs[VCPU_REGS_RDI]); | ||
1315 | c->dst.val = 0; | ||
1316 | break; | ||
1317 | } | ||
1318 | |||
1319 | done: | ||
1320 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | ||
1321 | } | 1056 | } |
1322 | 1057 | ||
1323 | static int read_emulated(struct x86_emulate_ctxt *ctxt, | 1058 | static int read_emulated(struct x86_emulate_ctxt *ctxt, |
@@ -1326,7 +1061,6 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, | |||
1326 | { | 1061 | { |
1327 | int rc; | 1062 | int rc; |
1328 | struct read_cache *mc = &ctxt->decode.mem_read; | 1063 | struct read_cache *mc = &ctxt->decode.mem_read; |
1329 | u32 err; | ||
1330 | 1064 | ||
1331 | while (size) { | 1065 | while (size) { |
1332 | int n = min(size, 8u); | 1066 | int n = min(size, 8u); |
@@ -1334,10 +1068,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, | |||
1334 | if (mc->pos < mc->end) | 1068 | if (mc->pos < mc->end) |
1335 | goto read_cached; | 1069 | goto read_cached; |
1336 | 1070 | ||
1337 | rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, | 1071 | rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n, |
1338 | ctxt->vcpu); | 1072 | &ctxt->exception); |
1339 | if (rc == X86EMUL_PROPAGATE_FAULT) | ||
1340 | emulate_pf(ctxt, addr, err); | ||
1341 | if (rc != X86EMUL_CONTINUE) | 1073 | if (rc != X86EMUL_CONTINUE) |
1342 | return rc; | 1074 | return rc; |
1343 | mc->end += n; | 1075 | mc->end += n; |
@@ -1351,6 +1083,50 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, | |||
1351 | return X86EMUL_CONTINUE; | 1083 | return X86EMUL_CONTINUE; |
1352 | } | 1084 | } |
1353 | 1085 | ||
1086 | static int segmented_read(struct x86_emulate_ctxt *ctxt, | ||
1087 | struct segmented_address addr, | ||
1088 | void *data, | ||
1089 | unsigned size) | ||
1090 | { | ||
1091 | int rc; | ||
1092 | ulong linear; | ||
1093 | |||
1094 | rc = linearize(ctxt, addr, size, false, &linear); | ||
1095 | if (rc != X86EMUL_CONTINUE) | ||
1096 | return rc; | ||
1097 | return read_emulated(ctxt, ctxt->ops, linear, data, size); | ||
1098 | } | ||
1099 | |||
1100 | static int segmented_write(struct x86_emulate_ctxt *ctxt, | ||
1101 | struct segmented_address addr, | ||
1102 | const void *data, | ||
1103 | unsigned size) | ||
1104 | { | ||
1105 | int rc; | ||
1106 | ulong linear; | ||
1107 | |||
1108 | rc = linearize(ctxt, addr, size, true, &linear); | ||
1109 | if (rc != X86EMUL_CONTINUE) | ||
1110 | return rc; | ||
1111 | return ctxt->ops->write_emulated(ctxt, linear, data, size, | ||
1112 | &ctxt->exception); | ||
1113 | } | ||
1114 | |||
1115 | static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, | ||
1116 | struct segmented_address addr, | ||
1117 | const void *orig_data, const void *data, | ||
1118 | unsigned size) | ||
1119 | { | ||
1120 | int rc; | ||
1121 | ulong linear; | ||
1122 | |||
1123 | rc = linearize(ctxt, addr, size, true, &linear); | ||
1124 | if (rc != X86EMUL_CONTINUE) | ||
1125 | return rc; | ||
1126 | return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, | ||
1127 | size, &ctxt->exception); | ||
1128 | } | ||
1129 | |||
1354 | static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | 1130 | static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, |
1355 | struct x86_emulate_ops *ops, | 1131 | struct x86_emulate_ops *ops, |
1356 | unsigned int size, unsigned short port, | 1132 | unsigned int size, unsigned short port, |
@@ -1371,7 +1147,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
1371 | if (n == 0) | 1147 | if (n == 0) |
1372 | n = 1; | 1148 | n = 1; |
1373 | rc->pos = rc->end = 0; | 1149 | rc->pos = rc->end = 0; |
1374 | if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu)) | 1150 | if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n)) |
1375 | return 0; | 1151 | return 0; |
1376 | rc->end = n * size; | 1152 | rc->end = n * size; |
1377 | } | 1153 | } |
@@ -1381,27 +1157,22 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
1381 | return 1; | 1157 | return 1; |
1382 | } | 1158 | } |
1383 | 1159 | ||
1384 | static u32 desc_limit_scaled(struct desc_struct *desc) | ||
1385 | { | ||
1386 | u32 limit = get_desc_limit(desc); | ||
1387 | |||
1388 | return desc->g ? (limit << 12) | 0xfff : limit; | ||
1389 | } | ||
1390 | |||
1391 | static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, | 1160 | static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, |
1392 | struct x86_emulate_ops *ops, | 1161 | struct x86_emulate_ops *ops, |
1393 | u16 selector, struct desc_ptr *dt) | 1162 | u16 selector, struct desc_ptr *dt) |
1394 | { | 1163 | { |
1395 | if (selector & 1 << 2) { | 1164 | if (selector & 1 << 2) { |
1396 | struct desc_struct desc; | 1165 | struct desc_struct desc; |
1166 | u16 sel; | ||
1167 | |||
1397 | memset (dt, 0, sizeof *dt); | 1168 | memset (dt, 0, sizeof *dt); |
1398 | if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu)) | 1169 | if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR)) |
1399 | return; | 1170 | return; |
1400 | 1171 | ||
1401 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ | 1172 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ |
1402 | dt->address = get_desc_base(&desc); | 1173 | dt->address = get_desc_base(&desc); |
1403 | } else | 1174 | } else |
1404 | ops->get_gdt(dt, ctxt->vcpu); | 1175 | ops->get_gdt(ctxt, dt); |
1405 | } | 1176 | } |
1406 | 1177 | ||
1407 | /* allowed just for 8 bytes segments */ | 1178 | /* allowed just for 8 bytes segments */ |
@@ -1412,19 +1183,14 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1412 | struct desc_ptr dt; | 1183 | struct desc_ptr dt; |
1413 | u16 index = selector >> 3; | 1184 | u16 index = selector >> 3; |
1414 | int ret; | 1185 | int ret; |
1415 | u32 err; | ||
1416 | ulong addr; | 1186 | ulong addr; |
1417 | 1187 | ||
1418 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | 1188 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); |
1419 | 1189 | ||
1420 | if (dt.size < index * 8 + 7) { | 1190 | if (dt.size < index * 8 + 7) |
1421 | emulate_gp(ctxt, selector & 0xfffc); | 1191 | return emulate_gp(ctxt, selector & 0xfffc); |
1422 | return X86EMUL_PROPAGATE_FAULT; | ||
1423 | } | ||
1424 | addr = dt.address + index * 8; | 1192 | addr = dt.address + index * 8; |
1425 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | 1193 | ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); |
1426 | if (ret == X86EMUL_PROPAGATE_FAULT) | ||
1427 | emulate_pf(ctxt, addr, err); | ||
1428 | 1194 | ||
1429 | return ret; | 1195 | return ret; |
1430 | } | 1196 | } |
@@ -1436,25 +1202,21 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1436 | { | 1202 | { |
1437 | struct desc_ptr dt; | 1203 | struct desc_ptr dt; |
1438 | u16 index = selector >> 3; | 1204 | u16 index = selector >> 3; |
1439 | u32 err; | ||
1440 | ulong addr; | 1205 | ulong addr; |
1441 | int ret; | 1206 | int ret; |
1442 | 1207 | ||
1443 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | 1208 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); |
1444 | 1209 | ||
1445 | if (dt.size < index * 8 + 7) { | 1210 | if (dt.size < index * 8 + 7) |
1446 | emulate_gp(ctxt, selector & 0xfffc); | 1211 | return emulate_gp(ctxt, selector & 0xfffc); |
1447 | return X86EMUL_PROPAGATE_FAULT; | ||
1448 | } | ||
1449 | 1212 | ||
1450 | addr = dt.address + index * 8; | 1213 | addr = dt.address + index * 8; |
1451 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | 1214 | ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); |
1452 | if (ret == X86EMUL_PROPAGATE_FAULT) | ||
1453 | emulate_pf(ctxt, addr, err); | ||
1454 | 1215 | ||
1455 | return ret; | 1216 | return ret; |
1456 | } | 1217 | } |
1457 | 1218 | ||
1219 | /* Does not support long mode */ | ||
1458 | static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | 1220 | static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, |
1459 | struct x86_emulate_ops *ops, | 1221 | struct x86_emulate_ops *ops, |
1460 | u16 selector, int seg) | 1222 | u16 selector, int seg) |
@@ -1509,7 +1271,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1509 | 1271 | ||
1510 | rpl = selector & 3; | 1272 | rpl = selector & 3; |
1511 | dpl = seg_desc.dpl; | 1273 | dpl = seg_desc.dpl; |
1512 | cpl = ops->cpl(ctxt->vcpu); | 1274 | cpl = ops->cpl(ctxt); |
1513 | 1275 | ||
1514 | switch (seg) { | 1276 | switch (seg) { |
1515 | case VCPU_SREG_SS: | 1277 | case VCPU_SREG_SS: |
@@ -1565,63 +1327,59 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1565 | return ret; | 1327 | return ret; |
1566 | } | 1328 | } |
1567 | load: | 1329 | load: |
1568 | ops->set_segment_selector(selector, seg, ctxt->vcpu); | 1330 | ops->set_segment(ctxt, selector, &seg_desc, 0, seg); |
1569 | ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); | ||
1570 | return X86EMUL_CONTINUE; | 1331 | return X86EMUL_CONTINUE; |
1571 | exception: | 1332 | exception: |
1572 | emulate_exception(ctxt, err_vec, err_code, true); | 1333 | emulate_exception(ctxt, err_vec, err_code, true); |
1573 | return X86EMUL_PROPAGATE_FAULT; | 1334 | return X86EMUL_PROPAGATE_FAULT; |
1574 | } | 1335 | } |
1575 | 1336 | ||
1576 | static inline int writeback(struct x86_emulate_ctxt *ctxt, | 1337 | static void write_register_operand(struct operand *op) |
1577 | struct x86_emulate_ops *ops) | 1338 | { |
1339 | /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ | ||
1340 | switch (op->bytes) { | ||
1341 | case 1: | ||
1342 | *(u8 *)op->addr.reg = (u8)op->val; | ||
1343 | break; | ||
1344 | case 2: | ||
1345 | *(u16 *)op->addr.reg = (u16)op->val; | ||
1346 | break; | ||
1347 | case 4: | ||
1348 | *op->addr.reg = (u32)op->val; | ||
1349 | break; /* 64b: zero-extend */ | ||
1350 | case 8: | ||
1351 | *op->addr.reg = op->val; | ||
1352 | break; | ||
1353 | } | ||
1354 | } | ||
1355 | |||
1356 | static int writeback(struct x86_emulate_ctxt *ctxt) | ||
1578 | { | 1357 | { |
1579 | int rc; | 1358 | int rc; |
1580 | struct decode_cache *c = &ctxt->decode; | 1359 | struct decode_cache *c = &ctxt->decode; |
1581 | u32 err; | ||
1582 | 1360 | ||
1583 | switch (c->dst.type) { | 1361 | switch (c->dst.type) { |
1584 | case OP_REG: | 1362 | case OP_REG: |
1585 | /* The 4-byte case *is* correct: | 1363 | write_register_operand(&c->dst); |
1586 | * in 64-bit mode we zero-extend. | ||
1587 | */ | ||
1588 | switch (c->dst.bytes) { | ||
1589 | case 1: | ||
1590 | *(u8 *)c->dst.ptr = (u8)c->dst.val; | ||
1591 | break; | ||
1592 | case 2: | ||
1593 | *(u16 *)c->dst.ptr = (u16)c->dst.val; | ||
1594 | break; | ||
1595 | case 4: | ||
1596 | *c->dst.ptr = (u32)c->dst.val; | ||
1597 | break; /* 64b: zero-ext */ | ||
1598 | case 8: | ||
1599 | *c->dst.ptr = c->dst.val; | ||
1600 | break; | ||
1601 | } | ||
1602 | break; | 1364 | break; |
1603 | case OP_MEM: | 1365 | case OP_MEM: |
1604 | if (c->lock_prefix) | 1366 | if (c->lock_prefix) |
1605 | rc = ops->cmpxchg_emulated( | 1367 | rc = segmented_cmpxchg(ctxt, |
1606 | (unsigned long)c->dst.ptr, | 1368 | c->dst.addr.mem, |
1607 | &c->dst.orig_val, | 1369 | &c->dst.orig_val, |
1608 | &c->dst.val, | 1370 | &c->dst.val, |
1609 | c->dst.bytes, | 1371 | c->dst.bytes); |
1610 | &err, | ||
1611 | ctxt->vcpu); | ||
1612 | else | 1372 | else |
1613 | rc = ops->write_emulated( | 1373 | rc = segmented_write(ctxt, |
1614 | (unsigned long)c->dst.ptr, | 1374 | c->dst.addr.mem, |
1615 | &c->dst.val, | 1375 | &c->dst.val, |
1616 | c->dst.bytes, | 1376 | c->dst.bytes); |
1617 | &err, | ||
1618 | ctxt->vcpu); | ||
1619 | if (rc == X86EMUL_PROPAGATE_FAULT) | ||
1620 | emulate_pf(ctxt, | ||
1621 | (unsigned long)c->dst.ptr, err); | ||
1622 | if (rc != X86EMUL_CONTINUE) | 1377 | if (rc != X86EMUL_CONTINUE) |
1623 | return rc; | 1378 | return rc; |
1624 | break; | 1379 | break; |
1380 | case OP_XMM: | ||
1381 | write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm); | ||
1382 | break; | ||
1625 | case OP_NONE: | 1383 | case OP_NONE: |
1626 | /* no writeback */ | 1384 | /* no writeback */ |
1627 | break; | 1385 | break; |
@@ -1631,29 +1389,30 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, | |||
1631 | return X86EMUL_CONTINUE; | 1389 | return X86EMUL_CONTINUE; |
1632 | } | 1390 | } |
1633 | 1391 | ||
1634 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt, | 1392 | static int em_push(struct x86_emulate_ctxt *ctxt) |
1635 | struct x86_emulate_ops *ops) | ||
1636 | { | 1393 | { |
1637 | struct decode_cache *c = &ctxt->decode; | 1394 | struct decode_cache *c = &ctxt->decode; |
1395 | struct segmented_address addr; | ||
1638 | 1396 | ||
1639 | c->dst.type = OP_MEM; | ||
1640 | c->dst.bytes = c->op_bytes; | ||
1641 | c->dst.val = c->src.val; | ||
1642 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); | 1397 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); |
1643 | c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops), | 1398 | addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); |
1644 | c->regs[VCPU_REGS_RSP]); | 1399 | addr.seg = VCPU_SREG_SS; |
1400 | |||
1401 | /* Disable writeback. */ | ||
1402 | c->dst.type = OP_NONE; | ||
1403 | return segmented_write(ctxt, addr, &c->src.val, c->op_bytes); | ||
1645 | } | 1404 | } |
1646 | 1405 | ||
1647 | static int emulate_pop(struct x86_emulate_ctxt *ctxt, | 1406 | static int emulate_pop(struct x86_emulate_ctxt *ctxt, |
1648 | struct x86_emulate_ops *ops, | ||
1649 | void *dest, int len) | 1407 | void *dest, int len) |
1650 | { | 1408 | { |
1651 | struct decode_cache *c = &ctxt->decode; | 1409 | struct decode_cache *c = &ctxt->decode; |
1652 | int rc; | 1410 | int rc; |
1411 | struct segmented_address addr; | ||
1653 | 1412 | ||
1654 | rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops), | 1413 | addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); |
1655 | c->regs[VCPU_REGS_RSP]), | 1414 | addr.seg = VCPU_SREG_SS; |
1656 | dest, len); | 1415 | rc = segmented_read(ctxt, addr, dest, len); |
1657 | if (rc != X86EMUL_CONTINUE) | 1416 | if (rc != X86EMUL_CONTINUE) |
1658 | return rc; | 1417 | return rc; |
1659 | 1418 | ||
@@ -1661,6 +1420,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, | |||
1661 | return rc; | 1420 | return rc; |
1662 | } | 1421 | } |
1663 | 1422 | ||
1423 | static int em_pop(struct x86_emulate_ctxt *ctxt) | ||
1424 | { | ||
1425 | struct decode_cache *c = &ctxt->decode; | ||
1426 | |||
1427 | return emulate_pop(ctxt, &c->dst.val, c->op_bytes); | ||
1428 | } | ||
1429 | |||
1664 | static int emulate_popf(struct x86_emulate_ctxt *ctxt, | 1430 | static int emulate_popf(struct x86_emulate_ctxt *ctxt, |
1665 | struct x86_emulate_ops *ops, | 1431 | struct x86_emulate_ops *ops, |
1666 | void *dest, int len) | 1432 | void *dest, int len) |
@@ -1668,9 +1434,9 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1668 | int rc; | 1434 | int rc; |
1669 | unsigned long val, change_mask; | 1435 | unsigned long val, change_mask; |
1670 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 1436 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
1671 | int cpl = ops->cpl(ctxt->vcpu); | 1437 | int cpl = ops->cpl(ctxt); |
1672 | 1438 | ||
1673 | rc = emulate_pop(ctxt, ops, &val, len); | 1439 | rc = emulate_pop(ctxt, &val, len); |
1674 | if (rc != X86EMUL_CONTINUE) | 1440 | if (rc != X86EMUL_CONTINUE) |
1675 | return rc; | 1441 | return rc; |
1676 | 1442 | ||
@@ -1687,10 +1453,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1687 | change_mask |= EFLG_IF; | 1453 | change_mask |= EFLG_IF; |
1688 | break; | 1454 | break; |
1689 | case X86EMUL_MODE_VM86: | 1455 | case X86EMUL_MODE_VM86: |
1690 | if (iopl < 3) { | 1456 | if (iopl < 3) |
1691 | emulate_gp(ctxt, 0); | 1457 | return emulate_gp(ctxt, 0); |
1692 | return X86EMUL_PROPAGATE_FAULT; | ||
1693 | } | ||
1694 | change_mask |= EFLG_IF; | 1458 | change_mask |= EFLG_IF; |
1695 | break; | 1459 | break; |
1696 | default: /* real mode */ | 1460 | default: /* real mode */ |
@@ -1704,14 +1468,24 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1704 | return rc; | 1468 | return rc; |
1705 | } | 1469 | } |
1706 | 1470 | ||
1707 | static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, | 1471 | static int em_popf(struct x86_emulate_ctxt *ctxt) |
1708 | struct x86_emulate_ops *ops, int seg) | 1472 | { |
1473 | struct decode_cache *c = &ctxt->decode; | ||
1474 | |||
1475 | c->dst.type = OP_REG; | ||
1476 | c->dst.addr.reg = &ctxt->eflags; | ||
1477 | c->dst.bytes = c->op_bytes; | ||
1478 | return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes); | ||
1479 | } | ||
1480 | |||
1481 | static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, | ||
1482 | struct x86_emulate_ops *ops, int seg) | ||
1709 | { | 1483 | { |
1710 | struct decode_cache *c = &ctxt->decode; | 1484 | struct decode_cache *c = &ctxt->decode; |
1711 | 1485 | ||
1712 | c->src.val = ops->get_segment_selector(seg, ctxt->vcpu); | 1486 | c->src.val = get_segment_selector(ctxt, seg); |
1713 | 1487 | ||
1714 | emulate_push(ctxt, ops); | 1488 | return em_push(ctxt); |
1715 | } | 1489 | } |
1716 | 1490 | ||
1717 | static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | 1491 | static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, |
@@ -1721,7 +1495,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | |||
1721 | unsigned long selector; | 1495 | unsigned long selector; |
1722 | int rc; | 1496 | int rc; |
1723 | 1497 | ||
1724 | rc = emulate_pop(ctxt, ops, &selector, c->op_bytes); | 1498 | rc = emulate_pop(ctxt, &selector, c->op_bytes); |
1725 | if (rc != X86EMUL_CONTINUE) | 1499 | if (rc != X86EMUL_CONTINUE) |
1726 | return rc; | 1500 | return rc; |
1727 | 1501 | ||
@@ -1729,8 +1503,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | |||
1729 | return rc; | 1503 | return rc; |
1730 | } | 1504 | } |
1731 | 1505 | ||
1732 | static int emulate_pusha(struct x86_emulate_ctxt *ctxt, | 1506 | static int em_pusha(struct x86_emulate_ctxt *ctxt) |
1733 | struct x86_emulate_ops *ops) | ||
1734 | { | 1507 | { |
1735 | struct decode_cache *c = &ctxt->decode; | 1508 | struct decode_cache *c = &ctxt->decode; |
1736 | unsigned long old_esp = c->regs[VCPU_REGS_RSP]; | 1509 | unsigned long old_esp = c->regs[VCPU_REGS_RSP]; |
@@ -1741,23 +1514,25 @@ static int emulate_pusha(struct x86_emulate_ctxt *ctxt, | |||
1741 | (reg == VCPU_REGS_RSP) ? | 1514 | (reg == VCPU_REGS_RSP) ? |
1742 | (c->src.val = old_esp) : (c->src.val = c->regs[reg]); | 1515 | (c->src.val = old_esp) : (c->src.val = c->regs[reg]); |
1743 | 1516 | ||
1744 | emulate_push(ctxt, ops); | 1517 | rc = em_push(ctxt); |
1745 | |||
1746 | rc = writeback(ctxt, ops); | ||
1747 | if (rc != X86EMUL_CONTINUE) | 1518 | if (rc != X86EMUL_CONTINUE) |
1748 | return rc; | 1519 | return rc; |
1749 | 1520 | ||
1750 | ++reg; | 1521 | ++reg; |
1751 | } | 1522 | } |
1752 | 1523 | ||
1753 | /* Disable writeback. */ | ||
1754 | c->dst.type = OP_NONE; | ||
1755 | |||
1756 | return rc; | 1524 | return rc; |
1757 | } | 1525 | } |
1758 | 1526 | ||
1759 | static int emulate_popa(struct x86_emulate_ctxt *ctxt, | 1527 | static int em_pushf(struct x86_emulate_ctxt *ctxt) |
1760 | struct x86_emulate_ops *ops) | 1528 | { |
1529 | struct decode_cache *c = &ctxt->decode; | ||
1530 | |||
1531 | c->src.val = (unsigned long)ctxt->eflags; | ||
1532 | return em_push(ctxt); | ||
1533 | } | ||
1534 | |||
1535 | static int em_popa(struct x86_emulate_ctxt *ctxt) | ||
1761 | { | 1536 | { |
1762 | struct decode_cache *c = &ctxt->decode; | 1537 | struct decode_cache *c = &ctxt->decode; |
1763 | int rc = X86EMUL_CONTINUE; | 1538 | int rc = X86EMUL_CONTINUE; |
@@ -1770,7 +1545,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt, | |||
1770 | --reg; | 1545 | --reg; |
1771 | } | 1546 | } |
1772 | 1547 | ||
1773 | rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes); | 1548 | rc = emulate_pop(ctxt, &c->regs[reg], c->op_bytes); |
1774 | if (rc != X86EMUL_CONTINUE) | 1549 | if (rc != X86EMUL_CONTINUE) |
1775 | break; | 1550 | break; |
1776 | --reg; | 1551 | --reg; |
@@ -1778,15 +1553,167 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt, | |||
1778 | return rc; | 1553 | return rc; |
1779 | } | 1554 | } |
1780 | 1555 | ||
1781 | static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, | 1556 | int emulate_int_real(struct x86_emulate_ctxt *ctxt, |
1782 | struct x86_emulate_ops *ops) | 1557 | struct x86_emulate_ops *ops, int irq) |
1783 | { | 1558 | { |
1784 | struct decode_cache *c = &ctxt->decode; | 1559 | struct decode_cache *c = &ctxt->decode; |
1560 | int rc; | ||
1561 | struct desc_ptr dt; | ||
1562 | gva_t cs_addr; | ||
1563 | gva_t eip_addr; | ||
1564 | u16 cs, eip; | ||
1785 | 1565 | ||
1786 | return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes); | 1566 | /* TODO: Add limit checks */ |
1567 | c->src.val = ctxt->eflags; | ||
1568 | rc = em_push(ctxt); | ||
1569 | if (rc != X86EMUL_CONTINUE) | ||
1570 | return rc; | ||
1571 | |||
1572 | ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); | ||
1573 | |||
1574 | c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); | ||
1575 | rc = em_push(ctxt); | ||
1576 | if (rc != X86EMUL_CONTINUE) | ||
1577 | return rc; | ||
1578 | |||
1579 | c->src.val = c->eip; | ||
1580 | rc = em_push(ctxt); | ||
1581 | if (rc != X86EMUL_CONTINUE) | ||
1582 | return rc; | ||
1583 | |||
1584 | ops->get_idt(ctxt, &dt); | ||
1585 | |||
1586 | eip_addr = dt.address + (irq << 2); | ||
1587 | cs_addr = dt.address + (irq << 2) + 2; | ||
1588 | |||
1589 | rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); | ||
1590 | if (rc != X86EMUL_CONTINUE) | ||
1591 | return rc; | ||
1592 | |||
1593 | rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); | ||
1594 | if (rc != X86EMUL_CONTINUE) | ||
1595 | return rc; | ||
1596 | |||
1597 | rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS); | ||
1598 | if (rc != X86EMUL_CONTINUE) | ||
1599 | return rc; | ||
1600 | |||
1601 | c->eip = eip; | ||
1602 | |||
1603 | return rc; | ||
1787 | } | 1604 | } |
1788 | 1605 | ||
1789 | static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt) | 1606 | static int emulate_int(struct x86_emulate_ctxt *ctxt, |
1607 | struct x86_emulate_ops *ops, int irq) | ||
1608 | { | ||
1609 | switch(ctxt->mode) { | ||
1610 | case X86EMUL_MODE_REAL: | ||
1611 | return emulate_int_real(ctxt, ops, irq); | ||
1612 | case X86EMUL_MODE_VM86: | ||
1613 | case X86EMUL_MODE_PROT16: | ||
1614 | case X86EMUL_MODE_PROT32: | ||
1615 | case X86EMUL_MODE_PROT64: | ||
1616 | default: | ||
1617 | /* Protected mode interrupts unimplemented yet */ | ||
1618 | return X86EMUL_UNHANDLEABLE; | ||
1619 | } | ||
1620 | } | ||
1621 | |||
1622 | static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, | ||
1623 | struct x86_emulate_ops *ops) | ||
1624 | { | ||
1625 | struct decode_cache *c = &ctxt->decode; | ||
1626 | int rc = X86EMUL_CONTINUE; | ||
1627 | unsigned long temp_eip = 0; | ||
1628 | unsigned long temp_eflags = 0; | ||
1629 | unsigned long cs = 0; | ||
1630 | unsigned long mask = EFLG_CF | EFLG_PF | EFLG_AF | EFLG_ZF | EFLG_SF | EFLG_TF | | ||
1631 | EFLG_IF | EFLG_DF | EFLG_OF | EFLG_IOPL | EFLG_NT | EFLG_RF | | ||
1632 | EFLG_AC | EFLG_ID | (1 << 1); /* Last one is the reserved bit */ | ||
1633 | unsigned long vm86_mask = EFLG_VM | EFLG_VIF | EFLG_VIP; | ||
1634 | |||
1635 | /* TODO: Add stack limit check */ | ||
1636 | |||
1637 | rc = emulate_pop(ctxt, &temp_eip, c->op_bytes); | ||
1638 | |||
1639 | if (rc != X86EMUL_CONTINUE) | ||
1640 | return rc; | ||
1641 | |||
1642 | if (temp_eip & ~0xffff) | ||
1643 | return emulate_gp(ctxt, 0); | ||
1644 | |||
1645 | rc = emulate_pop(ctxt, &cs, c->op_bytes); | ||
1646 | |||
1647 | if (rc != X86EMUL_CONTINUE) | ||
1648 | return rc; | ||
1649 | |||
1650 | rc = emulate_pop(ctxt, &temp_eflags, c->op_bytes); | ||
1651 | |||
1652 | if (rc != X86EMUL_CONTINUE) | ||
1653 | return rc; | ||
1654 | |||
1655 | rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); | ||
1656 | |||
1657 | if (rc != X86EMUL_CONTINUE) | ||
1658 | return rc; | ||
1659 | |||
1660 | c->eip = temp_eip; | ||
1661 | |||
1662 | |||
1663 | if (c->op_bytes == 4) | ||
1664 | ctxt->eflags = ((temp_eflags & mask) | (ctxt->eflags & vm86_mask)); | ||
1665 | else if (c->op_bytes == 2) { | ||
1666 | ctxt->eflags &= ~0xffff; | ||
1667 | ctxt->eflags |= temp_eflags; | ||
1668 | } | ||
1669 | |||
1670 | ctxt->eflags &= ~EFLG_RESERVED_ZEROS_MASK; /* Clear reserved zeros */ | ||
1671 | ctxt->eflags |= EFLG_RESERVED_ONE_MASK; | ||
1672 | |||
1673 | return rc; | ||
1674 | } | ||
1675 | |||
1676 | static inline int emulate_iret(struct x86_emulate_ctxt *ctxt, | ||
1677 | struct x86_emulate_ops* ops) | ||
1678 | { | ||
1679 | switch(ctxt->mode) { | ||
1680 | case X86EMUL_MODE_REAL: | ||
1681 | return emulate_iret_real(ctxt, ops); | ||
1682 | case X86EMUL_MODE_VM86: | ||
1683 | case X86EMUL_MODE_PROT16: | ||
1684 | case X86EMUL_MODE_PROT32: | ||
1685 | case X86EMUL_MODE_PROT64: | ||
1686 | default: | ||
1687 | /* iret from protected mode unimplemented yet */ | ||
1688 | return X86EMUL_UNHANDLEABLE; | ||
1689 | } | ||
1690 | } | ||
1691 | |||
1692 | static int em_jmp_far(struct x86_emulate_ctxt *ctxt) | ||
1693 | { | ||
1694 | struct decode_cache *c = &ctxt->decode; | ||
1695 | int rc; | ||
1696 | unsigned short sel; | ||
1697 | |||
1698 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | ||
1699 | |||
1700 | rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS); | ||
1701 | if (rc != X86EMUL_CONTINUE) | ||
1702 | return rc; | ||
1703 | |||
1704 | c->eip = 0; | ||
1705 | memcpy(&c->eip, c->src.valptr, c->op_bytes); | ||
1706 | return X86EMUL_CONTINUE; | ||
1707 | } | ||
1708 | |||
1709 | static int em_grp1a(struct x86_emulate_ctxt *ctxt) | ||
1710 | { | ||
1711 | struct decode_cache *c = &ctxt->decode; | ||
1712 | |||
1713 | return emulate_pop(ctxt, &c->dst.val, c->dst.bytes); | ||
1714 | } | ||
1715 | |||
1716 | static int em_grp2(struct x86_emulate_ctxt *ctxt) | ||
1790 | { | 1717 | { |
1791 | struct decode_cache *c = &ctxt->decode; | 1718 | struct decode_cache *c = &ctxt->decode; |
1792 | switch (c->modrm_reg) { | 1719 | switch (c->modrm_reg) { |
@@ -1813,12 +1740,15 @@ static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt) | |||
1813 | emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags); | 1740 | emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags); |
1814 | break; | 1741 | break; |
1815 | } | 1742 | } |
1743 | return X86EMUL_CONTINUE; | ||
1816 | } | 1744 | } |
1817 | 1745 | ||
1818 | static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt, | 1746 | static int em_grp3(struct x86_emulate_ctxt *ctxt) |
1819 | struct x86_emulate_ops *ops) | ||
1820 | { | 1747 | { |
1821 | struct decode_cache *c = &ctxt->decode; | 1748 | struct decode_cache *c = &ctxt->decode; |
1749 | unsigned long *rax = &c->regs[VCPU_REGS_RAX]; | ||
1750 | unsigned long *rdx = &c->regs[VCPU_REGS_RDX]; | ||
1751 | u8 de = 0; | ||
1822 | 1752 | ||
1823 | switch (c->modrm_reg) { | 1753 | switch (c->modrm_reg) { |
1824 | case 0 ... 1: /* test */ | 1754 | case 0 ... 1: /* test */ |
@@ -1830,16 +1760,32 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt, | |||
1830 | case 3: /* neg */ | 1760 | case 3: /* neg */ |
1831 | emulate_1op("neg", c->dst, ctxt->eflags); | 1761 | emulate_1op("neg", c->dst, ctxt->eflags); |
1832 | break; | 1762 | break; |
1763 | case 4: /* mul */ | ||
1764 | emulate_1op_rax_rdx("mul", c->src, *rax, *rdx, ctxt->eflags); | ||
1765 | break; | ||
1766 | case 5: /* imul */ | ||
1767 | emulate_1op_rax_rdx("imul", c->src, *rax, *rdx, ctxt->eflags); | ||
1768 | break; | ||
1769 | case 6: /* div */ | ||
1770 | emulate_1op_rax_rdx_ex("div", c->src, *rax, *rdx, | ||
1771 | ctxt->eflags, de); | ||
1772 | break; | ||
1773 | case 7: /* idiv */ | ||
1774 | emulate_1op_rax_rdx_ex("idiv", c->src, *rax, *rdx, | ||
1775 | ctxt->eflags, de); | ||
1776 | break; | ||
1833 | default: | 1777 | default: |
1834 | return 0; | 1778 | return X86EMUL_UNHANDLEABLE; |
1835 | } | 1779 | } |
1836 | return 1; | 1780 | if (de) |
1781 | return emulate_de(ctxt); | ||
1782 | return X86EMUL_CONTINUE; | ||
1837 | } | 1783 | } |
1838 | 1784 | ||
1839 | static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, | 1785 | static int em_grp45(struct x86_emulate_ctxt *ctxt) |
1840 | struct x86_emulate_ops *ops) | ||
1841 | { | 1786 | { |
1842 | struct decode_cache *c = &ctxt->decode; | 1787 | struct decode_cache *c = &ctxt->decode; |
1788 | int rc = X86EMUL_CONTINUE; | ||
1843 | 1789 | ||
1844 | switch (c->modrm_reg) { | 1790 | switch (c->modrm_reg) { |
1845 | case 0: /* inc */ | 1791 | case 0: /* inc */ |
@@ -1853,21 +1799,23 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, | |||
1853 | old_eip = c->eip; | 1799 | old_eip = c->eip; |
1854 | c->eip = c->src.val; | 1800 | c->eip = c->src.val; |
1855 | c->src.val = old_eip; | 1801 | c->src.val = old_eip; |
1856 | emulate_push(ctxt, ops); | 1802 | rc = em_push(ctxt); |
1857 | break; | 1803 | break; |
1858 | } | 1804 | } |
1859 | case 4: /* jmp abs */ | 1805 | case 4: /* jmp abs */ |
1860 | c->eip = c->src.val; | 1806 | c->eip = c->src.val; |
1861 | break; | 1807 | break; |
1808 | case 5: /* jmp far */ | ||
1809 | rc = em_jmp_far(ctxt); | ||
1810 | break; | ||
1862 | case 6: /* push */ | 1811 | case 6: /* push */ |
1863 | emulate_push(ctxt, ops); | 1812 | rc = em_push(ctxt); |
1864 | break; | 1813 | break; |
1865 | } | 1814 | } |
1866 | return X86EMUL_CONTINUE; | 1815 | return rc; |
1867 | } | 1816 | } |
1868 | 1817 | ||
1869 | static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, | 1818 | static int em_grp9(struct x86_emulate_ctxt *ctxt) |
1870 | struct x86_emulate_ops *ops) | ||
1871 | { | 1819 | { |
1872 | struct decode_cache *c = &ctxt->decode; | 1820 | struct decode_cache *c = &ctxt->decode; |
1873 | u64 old = c->dst.orig_val64; | 1821 | u64 old = c->dst.orig_val64; |
@@ -1893,25 +1841,44 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, | |||
1893 | int rc; | 1841 | int rc; |
1894 | unsigned long cs; | 1842 | unsigned long cs; |
1895 | 1843 | ||
1896 | rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes); | 1844 | rc = emulate_pop(ctxt, &c->eip, c->op_bytes); |
1897 | if (rc != X86EMUL_CONTINUE) | 1845 | if (rc != X86EMUL_CONTINUE) |
1898 | return rc; | 1846 | return rc; |
1899 | if (c->op_bytes == 4) | 1847 | if (c->op_bytes == 4) |
1900 | c->eip = (u32)c->eip; | 1848 | c->eip = (u32)c->eip; |
1901 | rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); | 1849 | rc = emulate_pop(ctxt, &cs, c->op_bytes); |
1902 | if (rc != X86EMUL_CONTINUE) | 1850 | if (rc != X86EMUL_CONTINUE) |
1903 | return rc; | 1851 | return rc; |
1904 | rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); | 1852 | rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); |
1905 | return rc; | 1853 | return rc; |
1906 | } | 1854 | } |
1907 | 1855 | ||
1856 | static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, | ||
1857 | struct x86_emulate_ops *ops, int seg) | ||
1858 | { | ||
1859 | struct decode_cache *c = &ctxt->decode; | ||
1860 | unsigned short sel; | ||
1861 | int rc; | ||
1862 | |||
1863 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | ||
1864 | |||
1865 | rc = load_segment_descriptor(ctxt, ops, sel, seg); | ||
1866 | if (rc != X86EMUL_CONTINUE) | ||
1867 | return rc; | ||
1868 | |||
1869 | c->dst.val = c->src.val; | ||
1870 | return rc; | ||
1871 | } | ||
1872 | |||
1908 | static inline void | 1873 | static inline void |
1909 | setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | 1874 | setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, |
1910 | struct x86_emulate_ops *ops, struct desc_struct *cs, | 1875 | struct x86_emulate_ops *ops, struct desc_struct *cs, |
1911 | struct desc_struct *ss) | 1876 | struct desc_struct *ss) |
1912 | { | 1877 | { |
1878 | u16 selector; | ||
1879 | |||
1913 | memset(cs, 0, sizeof(struct desc_struct)); | 1880 | memset(cs, 0, sizeof(struct desc_struct)); |
1914 | ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu); | 1881 | ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS); |
1915 | memset(ss, 0, sizeof(struct desc_struct)); | 1882 | memset(ss, 0, sizeof(struct desc_struct)); |
1916 | 1883 | ||
1917 | cs->l = 0; /* will be adjusted later */ | 1884 | cs->l = 0; /* will be adjusted later */ |
@@ -1941,46 +1908,44 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1941 | struct desc_struct cs, ss; | 1908 | struct desc_struct cs, ss; |
1942 | u64 msr_data; | 1909 | u64 msr_data; |
1943 | u16 cs_sel, ss_sel; | 1910 | u16 cs_sel, ss_sel; |
1911 | u64 efer = 0; | ||
1944 | 1912 | ||
1945 | /* syscall is not available in real mode */ | 1913 | /* syscall is not available in real mode */ |
1946 | if (ctxt->mode == X86EMUL_MODE_REAL || | 1914 | if (ctxt->mode == X86EMUL_MODE_REAL || |
1947 | ctxt->mode == X86EMUL_MODE_VM86) { | 1915 | ctxt->mode == X86EMUL_MODE_VM86) |
1948 | emulate_ud(ctxt); | 1916 | return emulate_ud(ctxt); |
1949 | return X86EMUL_PROPAGATE_FAULT; | ||
1950 | } | ||
1951 | 1917 | ||
1918 | ops->get_msr(ctxt, MSR_EFER, &efer); | ||
1952 | setup_syscalls_segments(ctxt, ops, &cs, &ss); | 1919 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
1953 | 1920 | ||
1954 | ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); | 1921 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1955 | msr_data >>= 32; | 1922 | msr_data >>= 32; |
1956 | cs_sel = (u16)(msr_data & 0xfffc); | 1923 | cs_sel = (u16)(msr_data & 0xfffc); |
1957 | ss_sel = (u16)(msr_data + 8); | 1924 | ss_sel = (u16)(msr_data + 8); |
1958 | 1925 | ||
1959 | if (is_long_mode(ctxt->vcpu)) { | 1926 | if (efer & EFER_LMA) { |
1960 | cs.d = 0; | 1927 | cs.d = 0; |
1961 | cs.l = 1; | 1928 | cs.l = 1; |
1962 | } | 1929 | } |
1963 | ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); | 1930 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
1964 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 1931 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
1965 | ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); | ||
1966 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
1967 | 1932 | ||
1968 | c->regs[VCPU_REGS_RCX] = c->eip; | 1933 | c->regs[VCPU_REGS_RCX] = c->eip; |
1969 | if (is_long_mode(ctxt->vcpu)) { | 1934 | if (efer & EFER_LMA) { |
1970 | #ifdef CONFIG_X86_64 | 1935 | #ifdef CONFIG_X86_64 |
1971 | c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; | 1936 | c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; |
1972 | 1937 | ||
1973 | ops->get_msr(ctxt->vcpu, | 1938 | ops->get_msr(ctxt, |
1974 | ctxt->mode == X86EMUL_MODE_PROT64 ? | 1939 | ctxt->mode == X86EMUL_MODE_PROT64 ? |
1975 | MSR_LSTAR : MSR_CSTAR, &msr_data); | 1940 | MSR_LSTAR : MSR_CSTAR, &msr_data); |
1976 | c->eip = msr_data; | 1941 | c->eip = msr_data; |
1977 | 1942 | ||
1978 | ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data); | 1943 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); |
1979 | ctxt->eflags &= ~(msr_data | EFLG_RF); | 1944 | ctxt->eflags &= ~(msr_data | EFLG_RF); |
1980 | #endif | 1945 | #endif |
1981 | } else { | 1946 | } else { |
1982 | /* legacy mode */ | 1947 | /* legacy mode */ |
1983 | ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); | 1948 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1984 | c->eip = (u32)msr_data; | 1949 | c->eip = (u32)msr_data; |
1985 | 1950 | ||
1986 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 1951 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); |
@@ -1996,36 +1961,30 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1996 | struct desc_struct cs, ss; | 1961 | struct desc_struct cs, ss; |
1997 | u64 msr_data; | 1962 | u64 msr_data; |
1998 | u16 cs_sel, ss_sel; | 1963 | u16 cs_sel, ss_sel; |
1964 | u64 efer = 0; | ||
1999 | 1965 | ||
1966 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2000 | /* inject #GP if in real mode */ | 1967 | /* inject #GP if in real mode */ |
2001 | if (ctxt->mode == X86EMUL_MODE_REAL) { | 1968 | if (ctxt->mode == X86EMUL_MODE_REAL) |
2002 | emulate_gp(ctxt, 0); | 1969 | return emulate_gp(ctxt, 0); |
2003 | return X86EMUL_PROPAGATE_FAULT; | ||
2004 | } | ||
2005 | 1970 | ||
2006 | /* XXX sysenter/sysexit have not been tested in 64bit mode. | 1971 | /* XXX sysenter/sysexit have not been tested in 64bit mode. |
2007 | * Therefore, we inject an #UD. | 1972 | * Therefore, we inject an #UD. |
2008 | */ | 1973 | */ |
2009 | if (ctxt->mode == X86EMUL_MODE_PROT64) { | 1974 | if (ctxt->mode == X86EMUL_MODE_PROT64) |
2010 | emulate_ud(ctxt); | 1975 | return emulate_ud(ctxt); |
2011 | return X86EMUL_PROPAGATE_FAULT; | ||
2012 | } | ||
2013 | 1976 | ||
2014 | setup_syscalls_segments(ctxt, ops, &cs, &ss); | 1977 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
2015 | 1978 | ||
2016 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); | 1979 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); |
2017 | switch (ctxt->mode) { | 1980 | switch (ctxt->mode) { |
2018 | case X86EMUL_MODE_PROT32: | 1981 | case X86EMUL_MODE_PROT32: |
2019 | if ((msr_data & 0xfffc) == 0x0) { | 1982 | if ((msr_data & 0xfffc) == 0x0) |
2020 | emulate_gp(ctxt, 0); | 1983 | return emulate_gp(ctxt, 0); |
2021 | return X86EMUL_PROPAGATE_FAULT; | ||
2022 | } | ||
2023 | break; | 1984 | break; |
2024 | case X86EMUL_MODE_PROT64: | 1985 | case X86EMUL_MODE_PROT64: |
2025 | if (msr_data == 0x0) { | 1986 | if (msr_data == 0x0) |
2026 | emulate_gp(ctxt, 0); | 1987 | return emulate_gp(ctxt, 0); |
2027 | return X86EMUL_PROPAGATE_FAULT; | ||
2028 | } | ||
2029 | break; | 1988 | break; |
2030 | } | 1989 | } |
2031 | 1990 | ||
@@ -2034,21 +1993,18 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2034 | cs_sel &= ~SELECTOR_RPL_MASK; | 1993 | cs_sel &= ~SELECTOR_RPL_MASK; |
2035 | ss_sel = cs_sel + 8; | 1994 | ss_sel = cs_sel + 8; |
2036 | ss_sel &= ~SELECTOR_RPL_MASK; | 1995 | ss_sel &= ~SELECTOR_RPL_MASK; |
2037 | if (ctxt->mode == X86EMUL_MODE_PROT64 | 1996 | if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { |
2038 | || is_long_mode(ctxt->vcpu)) { | ||
2039 | cs.d = 0; | 1997 | cs.d = 0; |
2040 | cs.l = 1; | 1998 | cs.l = 1; |
2041 | } | 1999 | } |
2042 | 2000 | ||
2043 | ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); | 2001 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
2044 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 2002 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
2045 | ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2046 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
2047 | 2003 | ||
2048 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); | 2004 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); |
2049 | c->eip = msr_data; | 2005 | c->eip = msr_data; |
2050 | 2006 | ||
2051 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data); | 2007 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); |
2052 | c->regs[VCPU_REGS_RSP] = msr_data; | 2008 | c->regs[VCPU_REGS_RSP] = msr_data; |
2053 | 2009 | ||
2054 | return X86EMUL_CONTINUE; | 2010 | return X86EMUL_CONTINUE; |
@@ -2065,10 +2021,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2065 | 2021 | ||
2066 | /* inject #GP if in real mode or Virtual 8086 mode */ | 2022 | /* inject #GP if in real mode or Virtual 8086 mode */ |
2067 | if (ctxt->mode == X86EMUL_MODE_REAL || | 2023 | if (ctxt->mode == X86EMUL_MODE_REAL || |
2068 | ctxt->mode == X86EMUL_MODE_VM86) { | 2024 | ctxt->mode == X86EMUL_MODE_VM86) |
2069 | emulate_gp(ctxt, 0); | 2025 | return emulate_gp(ctxt, 0); |
2070 | return X86EMUL_PROPAGATE_FAULT; | ||
2071 | } | ||
2072 | 2026 | ||
2073 | setup_syscalls_segments(ctxt, ops, &cs, &ss); | 2027 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
2074 | 2028 | ||
@@ -2079,22 +2033,18 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2079 | 2033 | ||
2080 | cs.dpl = 3; | 2034 | cs.dpl = 3; |
2081 | ss.dpl = 3; | 2035 | ss.dpl = 3; |
2082 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); | 2036 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); |
2083 | switch (usermode) { | 2037 | switch (usermode) { |
2084 | case X86EMUL_MODE_PROT32: | 2038 | case X86EMUL_MODE_PROT32: |
2085 | cs_sel = (u16)(msr_data + 16); | 2039 | cs_sel = (u16)(msr_data + 16); |
2086 | if ((msr_data & 0xfffc) == 0x0) { | 2040 | if ((msr_data & 0xfffc) == 0x0) |
2087 | emulate_gp(ctxt, 0); | 2041 | return emulate_gp(ctxt, 0); |
2088 | return X86EMUL_PROPAGATE_FAULT; | ||
2089 | } | ||
2090 | ss_sel = (u16)(msr_data + 24); | 2042 | ss_sel = (u16)(msr_data + 24); |
2091 | break; | 2043 | break; |
2092 | case X86EMUL_MODE_PROT64: | 2044 | case X86EMUL_MODE_PROT64: |
2093 | cs_sel = (u16)(msr_data + 32); | 2045 | cs_sel = (u16)(msr_data + 32); |
2094 | if (msr_data == 0x0) { | 2046 | if (msr_data == 0x0) |
2095 | emulate_gp(ctxt, 0); | 2047 | return emulate_gp(ctxt, 0); |
2096 | return X86EMUL_PROPAGATE_FAULT; | ||
2097 | } | ||
2098 | ss_sel = cs_sel + 8; | 2048 | ss_sel = cs_sel + 8; |
2099 | cs.d = 0; | 2049 | cs.d = 0; |
2100 | cs.l = 1; | 2050 | cs.l = 1; |
@@ -2103,10 +2053,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2103 | cs_sel |= SELECTOR_RPL_MASK; | 2053 | cs_sel |= SELECTOR_RPL_MASK; |
2104 | ss_sel |= SELECTOR_RPL_MASK; | 2054 | ss_sel |= SELECTOR_RPL_MASK; |
2105 | 2055 | ||
2106 | ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu); | 2056 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
2107 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 2057 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
2108 | ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2109 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
2110 | 2058 | ||
2111 | c->eip = c->regs[VCPU_REGS_RDX]; | 2059 | c->eip = c->regs[VCPU_REGS_RDX]; |
2112 | c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; | 2060 | c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; |
@@ -2123,7 +2071,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, | |||
2123 | if (ctxt->mode == X86EMUL_MODE_VM86) | 2071 | if (ctxt->mode == X86EMUL_MODE_VM86) |
2124 | return true; | 2072 | return true; |
2125 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 2073 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
2126 | return ops->cpl(ctxt->vcpu) > iopl; | 2074 | return ops->cpl(ctxt) > iopl; |
2127 | } | 2075 | } |
2128 | 2076 | ||
2129 | static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | 2077 | static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, |
@@ -2131,24 +2079,27 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | |||
2131 | u16 port, u16 len) | 2079 | u16 port, u16 len) |
2132 | { | 2080 | { |
2133 | struct desc_struct tr_seg; | 2081 | struct desc_struct tr_seg; |
2082 | u32 base3; | ||
2134 | int r; | 2083 | int r; |
2135 | u16 io_bitmap_ptr; | 2084 | u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; |
2136 | u8 perm, bit_idx = port & 0x7; | ||
2137 | unsigned mask = (1 << len) - 1; | 2085 | unsigned mask = (1 << len) - 1; |
2086 | unsigned long base; | ||
2138 | 2087 | ||
2139 | ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu); | 2088 | ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); |
2140 | if (!tr_seg.p) | 2089 | if (!tr_seg.p) |
2141 | return false; | 2090 | return false; |
2142 | if (desc_limit_scaled(&tr_seg) < 103) | 2091 | if (desc_limit_scaled(&tr_seg) < 103) |
2143 | return false; | 2092 | return false; |
2144 | r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2, | 2093 | base = get_desc_base(&tr_seg); |
2145 | ctxt->vcpu, NULL); | 2094 | #ifdef CONFIG_X86_64 |
2095 | base |= ((u64)base3) << 32; | ||
2096 | #endif | ||
2097 | r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); | ||
2146 | if (r != X86EMUL_CONTINUE) | 2098 | if (r != X86EMUL_CONTINUE) |
2147 | return false; | 2099 | return false; |
2148 | if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) | 2100 | if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) |
2149 | return false; | 2101 | return false; |
2150 | r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8, | 2102 | r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); |
2151 | &perm, 1, ctxt->vcpu, NULL); | ||
2152 | if (r != X86EMUL_CONTINUE) | 2103 | if (r != X86EMUL_CONTINUE) |
2153 | return false; | 2104 | return false; |
2154 | if ((perm >> bit_idx) & mask) | 2105 | if ((perm >> bit_idx) & mask) |
@@ -2160,9 +2111,15 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, | |||
2160 | struct x86_emulate_ops *ops, | 2111 | struct x86_emulate_ops *ops, |
2161 | u16 port, u16 len) | 2112 | u16 port, u16 len) |
2162 | { | 2113 | { |
2114 | if (ctxt->perm_ok) | ||
2115 | return true; | ||
2116 | |||
2163 | if (emulator_bad_iopl(ctxt, ops)) | 2117 | if (emulator_bad_iopl(ctxt, ops)) |
2164 | if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) | 2118 | if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) |
2165 | return false; | 2119 | return false; |
2120 | |||
2121 | ctxt->perm_ok = true; | ||
2122 | |||
2166 | return true; | 2123 | return true; |
2167 | } | 2124 | } |
2168 | 2125 | ||
@@ -2183,11 +2140,11 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, | |||
2183 | tss->si = c->regs[VCPU_REGS_RSI]; | 2140 | tss->si = c->regs[VCPU_REGS_RSI]; |
2184 | tss->di = c->regs[VCPU_REGS_RDI]; | 2141 | tss->di = c->regs[VCPU_REGS_RDI]; |
2185 | 2142 | ||
2186 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | 2143 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
2187 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2144 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
2188 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | 2145 | tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); |
2189 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | 2146 | tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); |
2190 | tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | 2147 | tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); |
2191 | } | 2148 | } |
2192 | 2149 | ||
2193 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | 2150 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, |
@@ -2212,11 +2169,11 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | |||
2212 | * SDM says that segment selectors are loaded before segment | 2169 | * SDM says that segment selectors are loaded before segment |
2213 | * descriptors | 2170 | * descriptors |
2214 | */ | 2171 | */ |
2215 | ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu); | 2172 | set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); |
2216 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | 2173 | set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
2217 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | 2174 | set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
2218 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | 2175 | set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
2219 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | 2176 | set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
2220 | 2177 | ||
2221 | /* | 2178 | /* |
2222 | * Now load segment descriptors. If fault happenes at this stage | 2179 | * Now load segment descriptors. If fault happenes at this stage |
@@ -2248,46 +2205,38 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
2248 | { | 2205 | { |
2249 | struct tss_segment_16 tss_seg; | 2206 | struct tss_segment_16 tss_seg; |
2250 | int ret; | 2207 | int ret; |
2251 | u32 err, new_tss_base = get_desc_base(new_desc); | 2208 | u32 new_tss_base = get_desc_base(new_desc); |
2252 | 2209 | ||
2253 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2210 | ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
2254 | &err); | 2211 | &ctxt->exception); |
2255 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2212 | if (ret != X86EMUL_CONTINUE) |
2256 | /* FIXME: need to provide precise fault address */ | 2213 | /* FIXME: need to provide precise fault address */ |
2257 | emulate_pf(ctxt, old_tss_base, err); | ||
2258 | return ret; | 2214 | return ret; |
2259 | } | ||
2260 | 2215 | ||
2261 | save_state_to_tss16(ctxt, ops, &tss_seg); | 2216 | save_state_to_tss16(ctxt, ops, &tss_seg); |
2262 | 2217 | ||
2263 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2218 | ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
2264 | &err); | 2219 | &ctxt->exception); |
2265 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2220 | if (ret != X86EMUL_CONTINUE) |
2266 | /* FIXME: need to provide precise fault address */ | 2221 | /* FIXME: need to provide precise fault address */ |
2267 | emulate_pf(ctxt, old_tss_base, err); | ||
2268 | return ret; | 2222 | return ret; |
2269 | } | ||
2270 | 2223 | ||
2271 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2224 | ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, |
2272 | &err); | 2225 | &ctxt->exception); |
2273 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2226 | if (ret != X86EMUL_CONTINUE) |
2274 | /* FIXME: need to provide precise fault address */ | 2227 | /* FIXME: need to provide precise fault address */ |
2275 | emulate_pf(ctxt, new_tss_base, err); | ||
2276 | return ret; | 2228 | return ret; |
2277 | } | ||
2278 | 2229 | ||
2279 | if (old_tss_sel != 0xffff) { | 2230 | if (old_tss_sel != 0xffff) { |
2280 | tss_seg.prev_task_link = old_tss_sel; | 2231 | tss_seg.prev_task_link = old_tss_sel; |
2281 | 2232 | ||
2282 | ret = ops->write_std(new_tss_base, | 2233 | ret = ops->write_std(ctxt, new_tss_base, |
2283 | &tss_seg.prev_task_link, | 2234 | &tss_seg.prev_task_link, |
2284 | sizeof tss_seg.prev_task_link, | 2235 | sizeof tss_seg.prev_task_link, |
2285 | ctxt->vcpu, &err); | 2236 | &ctxt->exception); |
2286 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2237 | if (ret != X86EMUL_CONTINUE) |
2287 | /* FIXME: need to provide precise fault address */ | 2238 | /* FIXME: need to provide precise fault address */ |
2288 | emulate_pf(ctxt, new_tss_base, err); | ||
2289 | return ret; | 2239 | return ret; |
2290 | } | ||
2291 | } | 2240 | } |
2292 | 2241 | ||
2293 | return load_state_from_tss16(ctxt, ops, &tss_seg); | 2242 | return load_state_from_tss16(ctxt, ops, &tss_seg); |
@@ -2299,7 +2248,7 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | |||
2299 | { | 2248 | { |
2300 | struct decode_cache *c = &ctxt->decode; | 2249 | struct decode_cache *c = &ctxt->decode; |
2301 | 2250 | ||
2302 | tss->cr3 = ops->get_cr(3, ctxt->vcpu); | 2251 | tss->cr3 = ops->get_cr(ctxt, 3); |
2303 | tss->eip = c->eip; | 2252 | tss->eip = c->eip; |
2304 | tss->eflags = ctxt->eflags; | 2253 | tss->eflags = ctxt->eflags; |
2305 | tss->eax = c->regs[VCPU_REGS_RAX]; | 2254 | tss->eax = c->regs[VCPU_REGS_RAX]; |
@@ -2311,13 +2260,13 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | |||
2311 | tss->esi = c->regs[VCPU_REGS_RSI]; | 2260 | tss->esi = c->regs[VCPU_REGS_RSI]; |
2312 | tss->edi = c->regs[VCPU_REGS_RDI]; | 2261 | tss->edi = c->regs[VCPU_REGS_RDI]; |
2313 | 2262 | ||
2314 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | 2263 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
2315 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2264 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
2316 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | 2265 | tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); |
2317 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | 2266 | tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); |
2318 | tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu); | 2267 | tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); |
2319 | tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu); | 2268 | tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); |
2320 | tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | 2269 | tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR); |
2321 | } | 2270 | } |
2322 | 2271 | ||
2323 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | 2272 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, |
@@ -2327,10 +2276,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2327 | struct decode_cache *c = &ctxt->decode; | 2276 | struct decode_cache *c = &ctxt->decode; |
2328 | int ret; | 2277 | int ret; |
2329 | 2278 | ||
2330 | if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) { | 2279 | if (ops->set_cr(ctxt, 3, tss->cr3)) |
2331 | emulate_gp(ctxt, 0); | 2280 | return emulate_gp(ctxt, 0); |
2332 | return X86EMUL_PROPAGATE_FAULT; | ||
2333 | } | ||
2334 | c->eip = tss->eip; | 2281 | c->eip = tss->eip; |
2335 | ctxt->eflags = tss->eflags | 2; | 2282 | ctxt->eflags = tss->eflags | 2; |
2336 | c->regs[VCPU_REGS_RAX] = tss->eax; | 2283 | c->regs[VCPU_REGS_RAX] = tss->eax; |
@@ -2346,13 +2293,13 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2346 | * SDM says that segment selectors are loaded before segment | 2293 | * SDM says that segment selectors are loaded before segment |
2347 | * descriptors | 2294 | * descriptors |
2348 | */ | 2295 | */ |
2349 | ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu); | 2296 | set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); |
2350 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | 2297 | set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
2351 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | 2298 | set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
2352 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | 2299 | set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
2353 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | 2300 | set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
2354 | ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu); | 2301 | set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); |
2355 | ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu); | 2302 | set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); |
2356 | 2303 | ||
2357 | /* | 2304 | /* |
2358 | * Now load segment descriptors. If fault happenes at this stage | 2305 | * Now load segment descriptors. If fault happenes at this stage |
@@ -2390,46 +2337,38 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2390 | { | 2337 | { |
2391 | struct tss_segment_32 tss_seg; | 2338 | struct tss_segment_32 tss_seg; |
2392 | int ret; | 2339 | int ret; |
2393 | u32 err, new_tss_base = get_desc_base(new_desc); | 2340 | u32 new_tss_base = get_desc_base(new_desc); |
2394 | 2341 | ||
2395 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2342 | ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
2396 | &err); | 2343 | &ctxt->exception); |
2397 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2344 | if (ret != X86EMUL_CONTINUE) |
2398 | /* FIXME: need to provide precise fault address */ | 2345 | /* FIXME: need to provide precise fault address */ |
2399 | emulate_pf(ctxt, old_tss_base, err); | ||
2400 | return ret; | 2346 | return ret; |
2401 | } | ||
2402 | 2347 | ||
2403 | save_state_to_tss32(ctxt, ops, &tss_seg); | 2348 | save_state_to_tss32(ctxt, ops, &tss_seg); |
2404 | 2349 | ||
2405 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2350 | ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
2406 | &err); | 2351 | &ctxt->exception); |
2407 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2352 | if (ret != X86EMUL_CONTINUE) |
2408 | /* FIXME: need to provide precise fault address */ | 2353 | /* FIXME: need to provide precise fault address */ |
2409 | emulate_pf(ctxt, old_tss_base, err); | ||
2410 | return ret; | 2354 | return ret; |
2411 | } | ||
2412 | 2355 | ||
2413 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2356 | ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, |
2414 | &err); | 2357 | &ctxt->exception); |
2415 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2358 | if (ret != X86EMUL_CONTINUE) |
2416 | /* FIXME: need to provide precise fault address */ | 2359 | /* FIXME: need to provide precise fault address */ |
2417 | emulate_pf(ctxt, new_tss_base, err); | ||
2418 | return ret; | 2360 | return ret; |
2419 | } | ||
2420 | 2361 | ||
2421 | if (old_tss_sel != 0xffff) { | 2362 | if (old_tss_sel != 0xffff) { |
2422 | tss_seg.prev_task_link = old_tss_sel; | 2363 | tss_seg.prev_task_link = old_tss_sel; |
2423 | 2364 | ||
2424 | ret = ops->write_std(new_tss_base, | 2365 | ret = ops->write_std(ctxt, new_tss_base, |
2425 | &tss_seg.prev_task_link, | 2366 | &tss_seg.prev_task_link, |
2426 | sizeof tss_seg.prev_task_link, | 2367 | sizeof tss_seg.prev_task_link, |
2427 | ctxt->vcpu, &err); | 2368 | &ctxt->exception); |
2428 | if (ret == X86EMUL_PROPAGATE_FAULT) { | 2369 | if (ret != X86EMUL_CONTINUE) |
2429 | /* FIXME: need to provide precise fault address */ | 2370 | /* FIXME: need to provide precise fault address */ |
2430 | emulate_pf(ctxt, new_tss_base, err); | ||
2431 | return ret; | 2371 | return ret; |
2432 | } | ||
2433 | } | 2372 | } |
2434 | 2373 | ||
2435 | return load_state_from_tss32(ctxt, ops, &tss_seg); | 2374 | return load_state_from_tss32(ctxt, ops, &tss_seg); |
@@ -2442,9 +2381,9 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2442 | { | 2381 | { |
2443 | struct desc_struct curr_tss_desc, next_tss_desc; | 2382 | struct desc_struct curr_tss_desc, next_tss_desc; |
2444 | int ret; | 2383 | int ret; |
2445 | u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); | 2384 | u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); |
2446 | ulong old_tss_base = | 2385 | ulong old_tss_base = |
2447 | ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu); | 2386 | ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); |
2448 | u32 desc_limit; | 2387 | u32 desc_limit; |
2449 | 2388 | ||
2450 | /* FIXME: old_tss_base == ~0 ? */ | 2389 | /* FIXME: old_tss_base == ~0 ? */ |
@@ -2460,10 +2399,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2460 | 2399 | ||
2461 | if (reason != TASK_SWITCH_IRET) { | 2400 | if (reason != TASK_SWITCH_IRET) { |
2462 | if ((tss_selector & 3) > next_tss_desc.dpl || | 2401 | if ((tss_selector & 3) > next_tss_desc.dpl || |
2463 | ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { | 2402 | ops->cpl(ctxt) > next_tss_desc.dpl) |
2464 | emulate_gp(ctxt, 0); | 2403 | return emulate_gp(ctxt, 0); |
2465 | return X86EMUL_PROPAGATE_FAULT; | ||
2466 | } | ||
2467 | } | 2404 | } |
2468 | 2405 | ||
2469 | desc_limit = desc_limit_scaled(&next_tss_desc); | 2406 | desc_limit = desc_limit_scaled(&next_tss_desc); |
@@ -2506,9 +2443,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2506 | &next_tss_desc); | 2443 | &next_tss_desc); |
2507 | } | 2444 | } |
2508 | 2445 | ||
2509 | ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); | 2446 | ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); |
2510 | ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu); | 2447 | ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); |
2511 | ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); | ||
2512 | 2448 | ||
2513 | if (has_error_code) { | 2449 | if (has_error_code) { |
2514 | struct decode_cache *c = &ctxt->decode; | 2450 | struct decode_cache *c = &ctxt->decode; |
@@ -2516,17 +2452,17 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2516 | c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; | 2452 | c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; |
2517 | c->lock_prefix = 0; | 2453 | c->lock_prefix = 0; |
2518 | c->src.val = (unsigned long) error_code; | 2454 | c->src.val = (unsigned long) error_code; |
2519 | emulate_push(ctxt, ops); | 2455 | ret = em_push(ctxt); |
2520 | } | 2456 | } |
2521 | 2457 | ||
2522 | return ret; | 2458 | return ret; |
2523 | } | 2459 | } |
2524 | 2460 | ||
2525 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | 2461 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, |
2526 | struct x86_emulate_ops *ops, | ||
2527 | u16 tss_selector, int reason, | 2462 | u16 tss_selector, int reason, |
2528 | bool has_error_code, u32 error_code) | 2463 | bool has_error_code, u32 error_code) |
2529 | { | 2464 | { |
2465 | struct x86_emulate_ops *ops = ctxt->ops; | ||
2530 | struct decode_cache *c = &ctxt->decode; | 2466 | struct decode_cache *c = &ctxt->decode; |
2531 | int rc; | 2467 | int rc; |
2532 | 2468 | ||
@@ -2536,91 +2472,1357 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2536 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, | 2472 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, |
2537 | has_error_code, error_code); | 2473 | has_error_code, error_code); |
2538 | 2474 | ||
2539 | if (rc == X86EMUL_CONTINUE) { | 2475 | if (rc == X86EMUL_CONTINUE) |
2540 | rc = writeback(ctxt, ops); | 2476 | ctxt->eip = c->eip; |
2541 | if (rc == X86EMUL_CONTINUE) | ||
2542 | ctxt->eip = c->eip; | ||
2543 | } | ||
2544 | 2477 | ||
2545 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 2478 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; |
2546 | } | 2479 | } |
2547 | 2480 | ||
2548 | static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned long base, | 2481 | static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, |
2549 | int reg, struct operand *op) | 2482 | int reg, struct operand *op) |
2550 | { | 2483 | { |
2551 | struct decode_cache *c = &ctxt->decode; | 2484 | struct decode_cache *c = &ctxt->decode; |
2552 | int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; | 2485 | int df = (ctxt->eflags & EFLG_DF) ? -1 : 1; |
2553 | 2486 | ||
2554 | register_address_increment(c, &c->regs[reg], df * op->bytes); | 2487 | register_address_increment(c, &c->regs[reg], df * op->bytes); |
2555 | op->ptr = (unsigned long *)register_address(c, base, c->regs[reg]); | 2488 | op->addr.mem.ea = register_address(c, c->regs[reg]); |
2489 | op->addr.mem.seg = seg; | ||
2490 | } | ||
2491 | |||
2492 | static int em_das(struct x86_emulate_ctxt *ctxt) | ||
2493 | { | ||
2494 | struct decode_cache *c = &ctxt->decode; | ||
2495 | u8 al, old_al; | ||
2496 | bool af, cf, old_cf; | ||
2497 | |||
2498 | cf = ctxt->eflags & X86_EFLAGS_CF; | ||
2499 | al = c->dst.val; | ||
2500 | |||
2501 | old_al = al; | ||
2502 | old_cf = cf; | ||
2503 | cf = false; | ||
2504 | af = ctxt->eflags & X86_EFLAGS_AF; | ||
2505 | if ((al & 0x0f) > 9 || af) { | ||
2506 | al -= 6; | ||
2507 | cf = old_cf | (al >= 250); | ||
2508 | af = true; | ||
2509 | } else { | ||
2510 | af = false; | ||
2511 | } | ||
2512 | if (old_al > 0x99 || old_cf) { | ||
2513 | al -= 0x60; | ||
2514 | cf = true; | ||
2515 | } | ||
2516 | |||
2517 | c->dst.val = al; | ||
2518 | /* Set PF, ZF, SF */ | ||
2519 | c->src.type = OP_IMM; | ||
2520 | c->src.val = 0; | ||
2521 | c->src.bytes = 1; | ||
2522 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); | ||
2523 | ctxt->eflags &= ~(X86_EFLAGS_AF | X86_EFLAGS_CF); | ||
2524 | if (cf) | ||
2525 | ctxt->eflags |= X86_EFLAGS_CF; | ||
2526 | if (af) | ||
2527 | ctxt->eflags |= X86_EFLAGS_AF; | ||
2528 | return X86EMUL_CONTINUE; | ||
2529 | } | ||
2530 | |||
2531 | static int em_call_far(struct x86_emulate_ctxt *ctxt) | ||
2532 | { | ||
2533 | struct decode_cache *c = &ctxt->decode; | ||
2534 | u16 sel, old_cs; | ||
2535 | ulong old_eip; | ||
2536 | int rc; | ||
2537 | |||
2538 | old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); | ||
2539 | old_eip = c->eip; | ||
2540 | |||
2541 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | ||
2542 | if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS)) | ||
2543 | return X86EMUL_CONTINUE; | ||
2544 | |||
2545 | c->eip = 0; | ||
2546 | memcpy(&c->eip, c->src.valptr, c->op_bytes); | ||
2547 | |||
2548 | c->src.val = old_cs; | ||
2549 | rc = em_push(ctxt); | ||
2550 | if (rc != X86EMUL_CONTINUE) | ||
2551 | return rc; | ||
2552 | |||
2553 | c->src.val = old_eip; | ||
2554 | return em_push(ctxt); | ||
2555 | } | ||
2556 | |||
2557 | static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) | ||
2558 | { | ||
2559 | struct decode_cache *c = &ctxt->decode; | ||
2560 | int rc; | ||
2561 | |||
2562 | c->dst.type = OP_REG; | ||
2563 | c->dst.addr.reg = &c->eip; | ||
2564 | c->dst.bytes = c->op_bytes; | ||
2565 | rc = emulate_pop(ctxt, &c->dst.val, c->op_bytes); | ||
2566 | if (rc != X86EMUL_CONTINUE) | ||
2567 | return rc; | ||
2568 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val); | ||
2569 | return X86EMUL_CONTINUE; | ||
2570 | } | ||
2571 | |||
2572 | static int em_add(struct x86_emulate_ctxt *ctxt) | ||
2573 | { | ||
2574 | struct decode_cache *c = &ctxt->decode; | ||
2575 | |||
2576 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); | ||
2577 | return X86EMUL_CONTINUE; | ||
2578 | } | ||
2579 | |||
2580 | static int em_or(struct x86_emulate_ctxt *ctxt) | ||
2581 | { | ||
2582 | struct decode_cache *c = &ctxt->decode; | ||
2583 | |||
2584 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); | ||
2585 | return X86EMUL_CONTINUE; | ||
2586 | } | ||
2587 | |||
2588 | static int em_adc(struct x86_emulate_ctxt *ctxt) | ||
2589 | { | ||
2590 | struct decode_cache *c = &ctxt->decode; | ||
2591 | |||
2592 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); | ||
2593 | return X86EMUL_CONTINUE; | ||
2594 | } | ||
2595 | |||
2596 | static int em_sbb(struct x86_emulate_ctxt *ctxt) | ||
2597 | { | ||
2598 | struct decode_cache *c = &ctxt->decode; | ||
2599 | |||
2600 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); | ||
2601 | return X86EMUL_CONTINUE; | ||
2602 | } | ||
2603 | |||
2604 | static int em_and(struct x86_emulate_ctxt *ctxt) | ||
2605 | { | ||
2606 | struct decode_cache *c = &ctxt->decode; | ||
2607 | |||
2608 | emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); | ||
2609 | return X86EMUL_CONTINUE; | ||
2610 | } | ||
2611 | |||
2612 | static int em_sub(struct x86_emulate_ctxt *ctxt) | ||
2613 | { | ||
2614 | struct decode_cache *c = &ctxt->decode; | ||
2615 | |||
2616 | emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags); | ||
2617 | return X86EMUL_CONTINUE; | ||
2618 | } | ||
2619 | |||
2620 | static int em_xor(struct x86_emulate_ctxt *ctxt) | ||
2621 | { | ||
2622 | struct decode_cache *c = &ctxt->decode; | ||
2623 | |||
2624 | emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags); | ||
2625 | return X86EMUL_CONTINUE; | ||
2626 | } | ||
2627 | |||
2628 | static int em_cmp(struct x86_emulate_ctxt *ctxt) | ||
2629 | { | ||
2630 | struct decode_cache *c = &ctxt->decode; | ||
2631 | |||
2632 | emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); | ||
2633 | /* Disable writeback. */ | ||
2634 | c->dst.type = OP_NONE; | ||
2635 | return X86EMUL_CONTINUE; | ||
2636 | } | ||
2637 | |||
2638 | static int em_imul(struct x86_emulate_ctxt *ctxt) | ||
2639 | { | ||
2640 | struct decode_cache *c = &ctxt->decode; | ||
2641 | |||
2642 | emulate_2op_SrcV_nobyte("imul", c->src, c->dst, ctxt->eflags); | ||
2643 | return X86EMUL_CONTINUE; | ||
2644 | } | ||
2645 | |||
2646 | static int em_imul_3op(struct x86_emulate_ctxt *ctxt) | ||
2647 | { | ||
2648 | struct decode_cache *c = &ctxt->decode; | ||
2649 | |||
2650 | c->dst.val = c->src2.val; | ||
2651 | return em_imul(ctxt); | ||
2652 | } | ||
2653 | |||
2654 | static int em_cwd(struct x86_emulate_ctxt *ctxt) | ||
2655 | { | ||
2656 | struct decode_cache *c = &ctxt->decode; | ||
2657 | |||
2658 | c->dst.type = OP_REG; | ||
2659 | c->dst.bytes = c->src.bytes; | ||
2660 | c->dst.addr.reg = &c->regs[VCPU_REGS_RDX]; | ||
2661 | c->dst.val = ~((c->src.val >> (c->src.bytes * 8 - 1)) - 1); | ||
2662 | |||
2663 | return X86EMUL_CONTINUE; | ||
2664 | } | ||
2665 | |||
2666 | static int em_rdtsc(struct x86_emulate_ctxt *ctxt) | ||
2667 | { | ||
2668 | struct decode_cache *c = &ctxt->decode; | ||
2669 | u64 tsc = 0; | ||
2670 | |||
2671 | ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); | ||
2672 | c->regs[VCPU_REGS_RAX] = (u32)tsc; | ||
2673 | c->regs[VCPU_REGS_RDX] = tsc >> 32; | ||
2674 | return X86EMUL_CONTINUE; | ||
2675 | } | ||
2676 | |||
2677 | static int em_mov(struct x86_emulate_ctxt *ctxt) | ||
2678 | { | ||
2679 | struct decode_cache *c = &ctxt->decode; | ||
2680 | c->dst.val = c->src.val; | ||
2681 | return X86EMUL_CONTINUE; | ||
2682 | } | ||
2683 | |||
2684 | static int em_movdqu(struct x86_emulate_ctxt *ctxt) | ||
2685 | { | ||
2686 | struct decode_cache *c = &ctxt->decode; | ||
2687 | memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes); | ||
2688 | return X86EMUL_CONTINUE; | ||
2689 | } | ||
2690 | |||
2691 | static int em_invlpg(struct x86_emulate_ctxt *ctxt) | ||
2692 | { | ||
2693 | struct decode_cache *c = &ctxt->decode; | ||
2694 | int rc; | ||
2695 | ulong linear; | ||
2696 | |||
2697 | rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear); | ||
2698 | if (rc == X86EMUL_CONTINUE) | ||
2699 | ctxt->ops->invlpg(ctxt, linear); | ||
2700 | /* Disable writeback. */ | ||
2701 | c->dst.type = OP_NONE; | ||
2702 | return X86EMUL_CONTINUE; | ||
2703 | } | ||
2704 | |||
2705 | static int em_clts(struct x86_emulate_ctxt *ctxt) | ||
2706 | { | ||
2707 | ulong cr0; | ||
2708 | |||
2709 | cr0 = ctxt->ops->get_cr(ctxt, 0); | ||
2710 | cr0 &= ~X86_CR0_TS; | ||
2711 | ctxt->ops->set_cr(ctxt, 0, cr0); | ||
2712 | return X86EMUL_CONTINUE; | ||
2713 | } | ||
2714 | |||
2715 | static int em_vmcall(struct x86_emulate_ctxt *ctxt) | ||
2716 | { | ||
2717 | struct decode_cache *c = &ctxt->decode; | ||
2718 | int rc; | ||
2719 | |||
2720 | if (c->modrm_mod != 3 || c->modrm_rm != 1) | ||
2721 | return X86EMUL_UNHANDLEABLE; | ||
2722 | |||
2723 | rc = ctxt->ops->fix_hypercall(ctxt); | ||
2724 | if (rc != X86EMUL_CONTINUE) | ||
2725 | return rc; | ||
2726 | |||
2727 | /* Let the processor re-execute the fixed hypercall */ | ||
2728 | c->eip = ctxt->eip; | ||
2729 | /* Disable writeback. */ | ||
2730 | c->dst.type = OP_NONE; | ||
2731 | return X86EMUL_CONTINUE; | ||
2732 | } | ||
2733 | |||
2734 | static int em_lgdt(struct x86_emulate_ctxt *ctxt) | ||
2735 | { | ||
2736 | struct decode_cache *c = &ctxt->decode; | ||
2737 | struct desc_ptr desc_ptr; | ||
2738 | int rc; | ||
2739 | |||
2740 | rc = read_descriptor(ctxt, c->src.addr.mem, | ||
2741 | &desc_ptr.size, &desc_ptr.address, | ||
2742 | c->op_bytes); | ||
2743 | if (rc != X86EMUL_CONTINUE) | ||
2744 | return rc; | ||
2745 | ctxt->ops->set_gdt(ctxt, &desc_ptr); | ||
2746 | /* Disable writeback. */ | ||
2747 | c->dst.type = OP_NONE; | ||
2748 | return X86EMUL_CONTINUE; | ||
2749 | } | ||
2750 | |||
2751 | static int em_vmmcall(struct x86_emulate_ctxt *ctxt) | ||
2752 | { | ||
2753 | struct decode_cache *c = &ctxt->decode; | ||
2754 | int rc; | ||
2755 | |||
2756 | rc = ctxt->ops->fix_hypercall(ctxt); | ||
2757 | |||
2758 | /* Disable writeback. */ | ||
2759 | c->dst.type = OP_NONE; | ||
2760 | return rc; | ||
2761 | } | ||
2762 | |||
2763 | static int em_lidt(struct x86_emulate_ctxt *ctxt) | ||
2764 | { | ||
2765 | struct decode_cache *c = &ctxt->decode; | ||
2766 | struct desc_ptr desc_ptr; | ||
2767 | int rc; | ||
2768 | |||
2769 | rc = read_descriptor(ctxt, c->src.addr.mem, | ||
2770 | &desc_ptr.size, &desc_ptr.address, | ||
2771 | c->op_bytes); | ||
2772 | if (rc != X86EMUL_CONTINUE) | ||
2773 | return rc; | ||
2774 | ctxt->ops->set_idt(ctxt, &desc_ptr); | ||
2775 | /* Disable writeback. */ | ||
2776 | c->dst.type = OP_NONE; | ||
2777 | return X86EMUL_CONTINUE; | ||
2778 | } | ||
2779 | |||
2780 | static int em_smsw(struct x86_emulate_ctxt *ctxt) | ||
2781 | { | ||
2782 | struct decode_cache *c = &ctxt->decode; | ||
2783 | |||
2784 | c->dst.bytes = 2; | ||
2785 | c->dst.val = ctxt->ops->get_cr(ctxt, 0); | ||
2786 | return X86EMUL_CONTINUE; | ||
2787 | } | ||
2788 | |||
2789 | static int em_lmsw(struct x86_emulate_ctxt *ctxt) | ||
2790 | { | ||
2791 | struct decode_cache *c = &ctxt->decode; | ||
2792 | ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | ||
2793 | | (c->src.val & 0x0f)); | ||
2794 | c->dst.type = OP_NONE; | ||
2795 | return X86EMUL_CONTINUE; | ||
2796 | } | ||
2797 | |||
2798 | static bool valid_cr(int nr) | ||
2799 | { | ||
2800 | switch (nr) { | ||
2801 | case 0: | ||
2802 | case 2 ... 4: | ||
2803 | case 8: | ||
2804 | return true; | ||
2805 | default: | ||
2806 | return false; | ||
2807 | } | ||
2808 | } | ||
2809 | |||
2810 | static int check_cr_read(struct x86_emulate_ctxt *ctxt) | ||
2811 | { | ||
2812 | struct decode_cache *c = &ctxt->decode; | ||
2813 | |||
2814 | if (!valid_cr(c->modrm_reg)) | ||
2815 | return emulate_ud(ctxt); | ||
2816 | |||
2817 | return X86EMUL_CONTINUE; | ||
2818 | } | ||
2819 | |||
2820 | static int check_cr_write(struct x86_emulate_ctxt *ctxt) | ||
2821 | { | ||
2822 | struct decode_cache *c = &ctxt->decode; | ||
2823 | u64 new_val = c->src.val64; | ||
2824 | int cr = c->modrm_reg; | ||
2825 | u64 efer = 0; | ||
2826 | |||
2827 | static u64 cr_reserved_bits[] = { | ||
2828 | 0xffffffff00000000ULL, | ||
2829 | 0, 0, 0, /* CR3 checked later */ | ||
2830 | CR4_RESERVED_BITS, | ||
2831 | 0, 0, 0, | ||
2832 | CR8_RESERVED_BITS, | ||
2833 | }; | ||
2834 | |||
2835 | if (!valid_cr(cr)) | ||
2836 | return emulate_ud(ctxt); | ||
2837 | |||
2838 | if (new_val & cr_reserved_bits[cr]) | ||
2839 | return emulate_gp(ctxt, 0); | ||
2840 | |||
2841 | switch (cr) { | ||
2842 | case 0: { | ||
2843 | u64 cr4; | ||
2844 | if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || | ||
2845 | ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) | ||
2846 | return emulate_gp(ctxt, 0); | ||
2847 | |||
2848 | cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2849 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2850 | |||
2851 | if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && | ||
2852 | !(cr4 & X86_CR4_PAE)) | ||
2853 | return emulate_gp(ctxt, 0); | ||
2854 | |||
2855 | break; | ||
2856 | } | ||
2857 | case 3: { | ||
2858 | u64 rsvd = 0; | ||
2859 | |||
2860 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2861 | if (efer & EFER_LMA) | ||
2862 | rsvd = CR3_L_MODE_RESERVED_BITS; | ||
2863 | else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE) | ||
2864 | rsvd = CR3_PAE_RESERVED_BITS; | ||
2865 | else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG) | ||
2866 | rsvd = CR3_NONPAE_RESERVED_BITS; | ||
2867 | |||
2868 | if (new_val & rsvd) | ||
2869 | return emulate_gp(ctxt, 0); | ||
2870 | |||
2871 | break; | ||
2872 | } | ||
2873 | case 4: { | ||
2874 | u64 cr4; | ||
2875 | |||
2876 | cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2877 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2878 | |||
2879 | if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) | ||
2880 | return emulate_gp(ctxt, 0); | ||
2881 | |||
2882 | break; | ||
2883 | } | ||
2884 | } | ||
2885 | |||
2886 | return X86EMUL_CONTINUE; | ||
2887 | } | ||
2888 | |||
2889 | static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) | ||
2890 | { | ||
2891 | unsigned long dr7; | ||
2892 | |||
2893 | ctxt->ops->get_dr(ctxt, 7, &dr7); | ||
2894 | |||
2895 | /* Check if DR7.Global_Enable is set */ | ||
2896 | return dr7 & (1 << 13); | ||
2897 | } | ||
2898 | |||
2899 | static int check_dr_read(struct x86_emulate_ctxt *ctxt) | ||
2900 | { | ||
2901 | struct decode_cache *c = &ctxt->decode; | ||
2902 | int dr = c->modrm_reg; | ||
2903 | u64 cr4; | ||
2904 | |||
2905 | if (dr > 7) | ||
2906 | return emulate_ud(ctxt); | ||
2907 | |||
2908 | cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2909 | if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) | ||
2910 | return emulate_ud(ctxt); | ||
2911 | |||
2912 | if (check_dr7_gd(ctxt)) | ||
2913 | return emulate_db(ctxt); | ||
2914 | |||
2915 | return X86EMUL_CONTINUE; | ||
2916 | } | ||
2917 | |||
2918 | static int check_dr_write(struct x86_emulate_ctxt *ctxt) | ||
2919 | { | ||
2920 | struct decode_cache *c = &ctxt->decode; | ||
2921 | u64 new_val = c->src.val64; | ||
2922 | int dr = c->modrm_reg; | ||
2923 | |||
2924 | if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) | ||
2925 | return emulate_gp(ctxt, 0); | ||
2926 | |||
2927 | return check_dr_read(ctxt); | ||
2928 | } | ||
2929 | |||
2930 | static int check_svme(struct x86_emulate_ctxt *ctxt) | ||
2931 | { | ||
2932 | u64 efer; | ||
2933 | |||
2934 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2935 | |||
2936 | if (!(efer & EFER_SVME)) | ||
2937 | return emulate_ud(ctxt); | ||
2938 | |||
2939 | return X86EMUL_CONTINUE; | ||
2940 | } | ||
2941 | |||
2942 | static int check_svme_pa(struct x86_emulate_ctxt *ctxt) | ||
2943 | { | ||
2944 | u64 rax = ctxt->decode.regs[VCPU_REGS_RAX]; | ||
2945 | |||
2946 | /* Valid physical address? */ | ||
2947 | if (rax & 0xffff000000000000ULL) | ||
2948 | return emulate_gp(ctxt, 0); | ||
2949 | |||
2950 | return check_svme(ctxt); | ||
2951 | } | ||
2952 | |||
2953 | static int check_rdtsc(struct x86_emulate_ctxt *ctxt) | ||
2954 | { | ||
2955 | u64 cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2956 | |||
2957 | if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) | ||
2958 | return emulate_ud(ctxt); | ||
2959 | |||
2960 | return X86EMUL_CONTINUE; | ||
2961 | } | ||
2962 | |||
2963 | static int check_rdpmc(struct x86_emulate_ctxt *ctxt) | ||
2964 | { | ||
2965 | u64 cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2966 | u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX]; | ||
2967 | |||
2968 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || | ||
2969 | (rcx > 3)) | ||
2970 | return emulate_gp(ctxt, 0); | ||
2971 | |||
2972 | return X86EMUL_CONTINUE; | ||
2973 | } | ||
2974 | |||
2975 | static int check_perm_in(struct x86_emulate_ctxt *ctxt) | ||
2976 | { | ||
2977 | struct decode_cache *c = &ctxt->decode; | ||
2978 | |||
2979 | c->dst.bytes = min(c->dst.bytes, 4u); | ||
2980 | if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes)) | ||
2981 | return emulate_gp(ctxt, 0); | ||
2982 | |||
2983 | return X86EMUL_CONTINUE; | ||
2984 | } | ||
2985 | |||
2986 | static int check_perm_out(struct x86_emulate_ctxt *ctxt) | ||
2987 | { | ||
2988 | struct decode_cache *c = &ctxt->decode; | ||
2989 | |||
2990 | c->src.bytes = min(c->src.bytes, 4u); | ||
2991 | if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes)) | ||
2992 | return emulate_gp(ctxt, 0); | ||
2993 | |||
2994 | return X86EMUL_CONTINUE; | ||
2995 | } | ||
2996 | |||
2997 | #define D(_y) { .flags = (_y) } | ||
2998 | #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i } | ||
2999 | #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \ | ||
3000 | .check_perm = (_p) } | ||
3001 | #define N D(0) | ||
3002 | #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } | ||
3003 | #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) } | ||
3004 | #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) } | ||
3005 | #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } | ||
3006 | #define II(_f, _e, _i) \ | ||
3007 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } | ||
3008 | #define IIP(_f, _e, _i, _p) \ | ||
3009 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \ | ||
3010 | .check_perm = (_p) } | ||
3011 | #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } | ||
3012 | |||
3013 | #define D2bv(_f) D((_f) | ByteOp), D(_f) | ||
3014 | #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) | ||
3015 | #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) | ||
3016 | |||
3017 | #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \ | ||
3018 | I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ | ||
3019 | I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) | ||
3020 | |||
3021 | static struct opcode group7_rm1[] = { | ||
3022 | DI(SrcNone | ModRM | Priv, monitor), | ||
3023 | DI(SrcNone | ModRM | Priv, mwait), | ||
3024 | N, N, N, N, N, N, | ||
3025 | }; | ||
3026 | |||
3027 | static struct opcode group7_rm3[] = { | ||
3028 | DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa), | ||
3029 | II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall), | ||
3030 | DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa), | ||
3031 | DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa), | ||
3032 | DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme), | ||
3033 | DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme), | ||
3034 | DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme), | ||
3035 | DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme), | ||
3036 | }; | ||
3037 | |||
3038 | static struct opcode group7_rm7[] = { | ||
3039 | N, | ||
3040 | DIP(SrcNone | ModRM, rdtscp, check_rdtsc), | ||
3041 | N, N, N, N, N, N, | ||
3042 | }; | ||
3043 | |||
3044 | static struct opcode group1[] = { | ||
3045 | I(Lock, em_add), | ||
3046 | I(Lock, em_or), | ||
3047 | I(Lock, em_adc), | ||
3048 | I(Lock, em_sbb), | ||
3049 | I(Lock, em_and), | ||
3050 | I(Lock, em_sub), | ||
3051 | I(Lock, em_xor), | ||
3052 | I(0, em_cmp), | ||
3053 | }; | ||
3054 | |||
3055 | static struct opcode group1A[] = { | ||
3056 | D(DstMem | SrcNone | ModRM | Mov | Stack), N, N, N, N, N, N, N, | ||
3057 | }; | ||
3058 | |||
3059 | static struct opcode group3[] = { | ||
3060 | D(DstMem | SrcImm | ModRM), D(DstMem | SrcImm | ModRM), | ||
3061 | D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock), | ||
3062 | X4(D(SrcMem | ModRM)), | ||
3063 | }; | ||
3064 | |||
3065 | static struct opcode group4[] = { | ||
3066 | D(ByteOp | DstMem | SrcNone | ModRM | Lock), D(ByteOp | DstMem | SrcNone | ModRM | Lock), | ||
3067 | N, N, N, N, N, N, | ||
3068 | }; | ||
3069 | |||
3070 | static struct opcode group5[] = { | ||
3071 | D(DstMem | SrcNone | ModRM | Lock), D(DstMem | SrcNone | ModRM | Lock), | ||
3072 | D(SrcMem | ModRM | Stack), | ||
3073 | I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far), | ||
3074 | D(SrcMem | ModRM | Stack), D(SrcMemFAddr | ModRM | ImplicitOps), | ||
3075 | D(SrcMem | ModRM | Stack), N, | ||
3076 | }; | ||
3077 | |||
3078 | static struct opcode group6[] = { | ||
3079 | DI(ModRM | Prot, sldt), | ||
3080 | DI(ModRM | Prot, str), | ||
3081 | DI(ModRM | Prot | Priv, lldt), | ||
3082 | DI(ModRM | Prot | Priv, ltr), | ||
3083 | N, N, N, N, | ||
3084 | }; | ||
3085 | |||
3086 | static struct group_dual group7 = { { | ||
3087 | DI(ModRM | Mov | DstMem | Priv, sgdt), | ||
3088 | DI(ModRM | Mov | DstMem | Priv, sidt), | ||
3089 | II(ModRM | SrcMem | Priv, em_lgdt, lgdt), | ||
3090 | II(ModRM | SrcMem | Priv, em_lidt, lidt), | ||
3091 | II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N, | ||
3092 | II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), | ||
3093 | II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg), | ||
3094 | }, { | ||
3095 | I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall), | ||
3096 | EXT(0, group7_rm1), | ||
3097 | N, EXT(0, group7_rm3), | ||
3098 | II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N, | ||
3099 | II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), | ||
3100 | } }; | ||
3101 | |||
3102 | static struct opcode group8[] = { | ||
3103 | N, N, N, N, | ||
3104 | D(DstMem | SrcImmByte | ModRM), D(DstMem | SrcImmByte | ModRM | Lock), | ||
3105 | D(DstMem | SrcImmByte | ModRM | Lock), D(DstMem | SrcImmByte | ModRM | Lock), | ||
3106 | }; | ||
3107 | |||
3108 | static struct group_dual group9 = { { | ||
3109 | N, D(DstMem64 | ModRM | Lock), N, N, N, N, N, N, | ||
3110 | }, { | ||
3111 | N, N, N, N, N, N, N, N, | ||
3112 | } }; | ||
3113 | |||
3114 | static struct opcode group11[] = { | ||
3115 | I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)), | ||
3116 | }; | ||
3117 | |||
3118 | static struct gprefix pfx_0f_6f_0f_7f = { | ||
3119 | N, N, N, I(Sse, em_movdqu), | ||
3120 | }; | ||
3121 | |||
3122 | static struct opcode opcode_table[256] = { | ||
3123 | /* 0x00 - 0x07 */ | ||
3124 | I6ALU(Lock, em_add), | ||
3125 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), | ||
3126 | /* 0x08 - 0x0F */ | ||
3127 | I6ALU(Lock, em_or), | ||
3128 | D(ImplicitOps | Stack | No64), N, | ||
3129 | /* 0x10 - 0x17 */ | ||
3130 | I6ALU(Lock, em_adc), | ||
3131 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), | ||
3132 | /* 0x18 - 0x1F */ | ||
3133 | I6ALU(Lock, em_sbb), | ||
3134 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), | ||
3135 | /* 0x20 - 0x27 */ | ||
3136 | I6ALU(Lock, em_and), N, N, | ||
3137 | /* 0x28 - 0x2F */ | ||
3138 | I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), | ||
3139 | /* 0x30 - 0x37 */ | ||
3140 | I6ALU(Lock, em_xor), N, N, | ||
3141 | /* 0x38 - 0x3F */ | ||
3142 | I6ALU(0, em_cmp), N, N, | ||
3143 | /* 0x40 - 0x4F */ | ||
3144 | X16(D(DstReg)), | ||
3145 | /* 0x50 - 0x57 */ | ||
3146 | X8(I(SrcReg | Stack, em_push)), | ||
3147 | /* 0x58 - 0x5F */ | ||
3148 | X8(I(DstReg | Stack, em_pop)), | ||
3149 | /* 0x60 - 0x67 */ | ||
3150 | I(ImplicitOps | Stack | No64, em_pusha), | ||
3151 | I(ImplicitOps | Stack | No64, em_popa), | ||
3152 | N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , | ||
3153 | N, N, N, N, | ||
3154 | /* 0x68 - 0x6F */ | ||
3155 | I(SrcImm | Mov | Stack, em_push), | ||
3156 | I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), | ||
3157 | I(SrcImmByte | Mov | Stack, em_push), | ||
3158 | I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), | ||
3159 | D2bvIP(DstDI | SrcDX | Mov | String, ins, check_perm_in), /* insb, insw/insd */ | ||
3160 | D2bvIP(SrcSI | DstDX | String, outs, check_perm_out), /* outsb, outsw/outsd */ | ||
3161 | /* 0x70 - 0x7F */ | ||
3162 | X16(D(SrcImmByte)), | ||
3163 | /* 0x80 - 0x87 */ | ||
3164 | G(ByteOp | DstMem | SrcImm | ModRM | Group, group1), | ||
3165 | G(DstMem | SrcImm | ModRM | Group, group1), | ||
3166 | G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1), | ||
3167 | G(DstMem | SrcImmByte | ModRM | Group, group1), | ||
3168 | D2bv(DstMem | SrcReg | ModRM), D2bv(DstMem | SrcReg | ModRM | Lock), | ||
3169 | /* 0x88 - 0x8F */ | ||
3170 | I2bv(DstMem | SrcReg | ModRM | Mov, em_mov), | ||
3171 | I2bv(DstReg | SrcMem | ModRM | Mov, em_mov), | ||
3172 | D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg), | ||
3173 | D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A), | ||
3174 | /* 0x90 - 0x97 */ | ||
3175 | DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), | ||
3176 | /* 0x98 - 0x9F */ | ||
3177 | D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), | ||
3178 | I(SrcImmFAddr | No64, em_call_far), N, | ||
3179 | II(ImplicitOps | Stack, em_pushf, pushf), | ||
3180 | II(ImplicitOps | Stack, em_popf, popf), N, N, | ||
3181 | /* 0xA0 - 0xA7 */ | ||
3182 | I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), | ||
3183 | I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov), | ||
3184 | I2bv(SrcSI | DstDI | Mov | String, em_mov), | ||
3185 | I2bv(SrcSI | DstDI | String, em_cmp), | ||
3186 | /* 0xA8 - 0xAF */ | ||
3187 | D2bv(DstAcc | SrcImm), | ||
3188 | I2bv(SrcAcc | DstDI | Mov | String, em_mov), | ||
3189 | I2bv(SrcSI | DstAcc | Mov | String, em_mov), | ||
3190 | I2bv(SrcAcc | DstDI | String, em_cmp), | ||
3191 | /* 0xB0 - 0xB7 */ | ||
3192 | X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), | ||
3193 | /* 0xB8 - 0xBF */ | ||
3194 | X8(I(DstReg | SrcImm | Mov, em_mov)), | ||
3195 | /* 0xC0 - 0xC7 */ | ||
3196 | D2bv(DstMem | SrcImmByte | ModRM), | ||
3197 | I(ImplicitOps | Stack | SrcImmU16, em_ret_near_imm), | ||
3198 | D(ImplicitOps | Stack), | ||
3199 | D(DstReg | SrcMemFAddr | ModRM | No64), D(DstReg | SrcMemFAddr | ModRM | No64), | ||
3200 | G(ByteOp, group11), G(0, group11), | ||
3201 | /* 0xC8 - 0xCF */ | ||
3202 | N, N, N, D(ImplicitOps | Stack), | ||
3203 | D(ImplicitOps), DI(SrcImmByte, intn), | ||
3204 | D(ImplicitOps | No64), DI(ImplicitOps, iret), | ||
3205 | /* 0xD0 - 0xD7 */ | ||
3206 | D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM), | ||
3207 | N, N, N, N, | ||
3208 | /* 0xD8 - 0xDF */ | ||
3209 | N, N, N, N, N, N, N, N, | ||
3210 | /* 0xE0 - 0xE7 */ | ||
3211 | X4(D(SrcImmByte)), | ||
3212 | D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in), | ||
3213 | D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out), | ||
3214 | /* 0xE8 - 0xEF */ | ||
3215 | D(SrcImm | Stack), D(SrcImm | ImplicitOps), | ||
3216 | D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), | ||
3217 | D2bvIP(SrcDX | DstAcc, in, check_perm_in), | ||
3218 | D2bvIP(SrcAcc | DstDX, out, check_perm_out), | ||
3219 | /* 0xF0 - 0xF7 */ | ||
3220 | N, DI(ImplicitOps, icebp), N, N, | ||
3221 | DI(ImplicitOps | Priv, hlt), D(ImplicitOps), | ||
3222 | G(ByteOp, group3), G(0, group3), | ||
3223 | /* 0xF8 - 0xFF */ | ||
3224 | D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), | ||
3225 | D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), | ||
3226 | }; | ||
3227 | |||
3228 | static struct opcode twobyte_table[256] = { | ||
3229 | /* 0x00 - 0x0F */ | ||
3230 | G(0, group6), GD(0, &group7), N, N, | ||
3231 | N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N, | ||
3232 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, | ||
3233 | N, D(ImplicitOps | ModRM), N, N, | ||
3234 | /* 0x10 - 0x1F */ | ||
3235 | N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, | ||
3236 | /* 0x20 - 0x2F */ | ||
3237 | DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), | ||
3238 | DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), | ||
3239 | DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write), | ||
3240 | DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write), | ||
3241 | N, N, N, N, | ||
3242 | N, N, N, N, N, N, N, N, | ||
3243 | /* 0x30 - 0x3F */ | ||
3244 | DI(ImplicitOps | Priv, wrmsr), | ||
3245 | IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), | ||
3246 | DI(ImplicitOps | Priv, rdmsr), | ||
3247 | DIP(ImplicitOps | Priv, rdpmc, check_rdpmc), | ||
3248 | D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific), | ||
3249 | N, N, | ||
3250 | N, N, N, N, N, N, N, N, | ||
3251 | /* 0x40 - 0x4F */ | ||
3252 | X16(D(DstReg | SrcMem | ModRM | Mov)), | ||
3253 | /* 0x50 - 0x5F */ | ||
3254 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | ||
3255 | /* 0x60 - 0x6F */ | ||
3256 | N, N, N, N, | ||
3257 | N, N, N, N, | ||
3258 | N, N, N, N, | ||
3259 | N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), | ||
3260 | /* 0x70 - 0x7F */ | ||
3261 | N, N, N, N, | ||
3262 | N, N, N, N, | ||
3263 | N, N, N, N, | ||
3264 | N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), | ||
3265 | /* 0x80 - 0x8F */ | ||
3266 | X16(D(SrcImm)), | ||
3267 | /* 0x90 - 0x9F */ | ||
3268 | X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), | ||
3269 | /* 0xA0 - 0xA7 */ | ||
3270 | D(ImplicitOps | Stack), D(ImplicitOps | Stack), | ||
3271 | DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp), | ||
3272 | D(DstMem | SrcReg | Src2ImmByte | ModRM), | ||
3273 | D(DstMem | SrcReg | Src2CL | ModRM), N, N, | ||
3274 | /* 0xA8 - 0xAF */ | ||
3275 | D(ImplicitOps | Stack), D(ImplicitOps | Stack), | ||
3276 | DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock), | ||
3277 | D(DstMem | SrcReg | Src2ImmByte | ModRM), | ||
3278 | D(DstMem | SrcReg | Src2CL | ModRM), | ||
3279 | D(ModRM), I(DstReg | SrcMem | ModRM, em_imul), | ||
3280 | /* 0xB0 - 0xB7 */ | ||
3281 | D2bv(DstMem | SrcReg | ModRM | Lock), | ||
3282 | D(DstReg | SrcMemFAddr | ModRM), D(DstMem | SrcReg | ModRM | BitOp | Lock), | ||
3283 | D(DstReg | SrcMemFAddr | ModRM), D(DstReg | SrcMemFAddr | ModRM), | ||
3284 | D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), | ||
3285 | /* 0xB8 - 0xBF */ | ||
3286 | N, N, | ||
3287 | G(BitOp, group8), D(DstMem | SrcReg | ModRM | BitOp | Lock), | ||
3288 | D(DstReg | SrcMem | ModRM), D(DstReg | SrcMem | ModRM), | ||
3289 | D(ByteOp | DstReg | SrcMem | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), | ||
3290 | /* 0xC0 - 0xCF */ | ||
3291 | D2bv(DstMem | SrcReg | ModRM | Lock), | ||
3292 | N, D(DstMem | SrcReg | ModRM | Mov), | ||
3293 | N, N, N, GD(0, &group9), | ||
3294 | N, N, N, N, N, N, N, N, | ||
3295 | /* 0xD0 - 0xDF */ | ||
3296 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | ||
3297 | /* 0xE0 - 0xEF */ | ||
3298 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | ||
3299 | /* 0xF0 - 0xFF */ | ||
3300 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N | ||
3301 | }; | ||
3302 | |||
3303 | #undef D | ||
3304 | #undef N | ||
3305 | #undef G | ||
3306 | #undef GD | ||
3307 | #undef I | ||
3308 | #undef GP | ||
3309 | #undef EXT | ||
3310 | |||
3311 | #undef D2bv | ||
3312 | #undef D2bvIP | ||
3313 | #undef I2bv | ||
3314 | #undef I6ALU | ||
3315 | |||
3316 | static unsigned imm_size(struct decode_cache *c) | ||
3317 | { | ||
3318 | unsigned size; | ||
3319 | |||
3320 | size = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
3321 | if (size == 8) | ||
3322 | size = 4; | ||
3323 | return size; | ||
3324 | } | ||
3325 | |||
3326 | static int decode_imm(struct x86_emulate_ctxt *ctxt, struct operand *op, | ||
3327 | unsigned size, bool sign_extension) | ||
3328 | { | ||
3329 | struct decode_cache *c = &ctxt->decode; | ||
3330 | struct x86_emulate_ops *ops = ctxt->ops; | ||
3331 | int rc = X86EMUL_CONTINUE; | ||
3332 | |||
3333 | op->type = OP_IMM; | ||
3334 | op->bytes = size; | ||
3335 | op->addr.mem.ea = c->eip; | ||
3336 | /* NB. Immediates are sign-extended as necessary. */ | ||
3337 | switch (op->bytes) { | ||
3338 | case 1: | ||
3339 | op->val = insn_fetch(s8, 1, c->eip); | ||
3340 | break; | ||
3341 | case 2: | ||
3342 | op->val = insn_fetch(s16, 2, c->eip); | ||
3343 | break; | ||
3344 | case 4: | ||
3345 | op->val = insn_fetch(s32, 4, c->eip); | ||
3346 | break; | ||
3347 | } | ||
3348 | if (!sign_extension) { | ||
3349 | switch (op->bytes) { | ||
3350 | case 1: | ||
3351 | op->val &= 0xff; | ||
3352 | break; | ||
3353 | case 2: | ||
3354 | op->val &= 0xffff; | ||
3355 | break; | ||
3356 | case 4: | ||
3357 | op->val &= 0xffffffff; | ||
3358 | break; | ||
3359 | } | ||
3360 | } | ||
3361 | done: | ||
3362 | return rc; | ||
2556 | } | 3363 | } |
2557 | 3364 | ||
2558 | int | 3365 | int |
2559 | x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | 3366 | x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) |
2560 | { | 3367 | { |
3368 | struct x86_emulate_ops *ops = ctxt->ops; | ||
3369 | struct decode_cache *c = &ctxt->decode; | ||
3370 | int rc = X86EMUL_CONTINUE; | ||
3371 | int mode = ctxt->mode; | ||
3372 | int def_op_bytes, def_ad_bytes, goffset, simd_prefix; | ||
3373 | bool op_prefix = false; | ||
3374 | struct opcode opcode; | ||
3375 | struct operand memop = { .type = OP_NONE }, *memopp = NULL; | ||
3376 | |||
3377 | c->eip = ctxt->eip; | ||
3378 | c->fetch.start = c->eip; | ||
3379 | c->fetch.end = c->fetch.start + insn_len; | ||
3380 | if (insn_len > 0) | ||
3381 | memcpy(c->fetch.data, insn, insn_len); | ||
3382 | |||
3383 | switch (mode) { | ||
3384 | case X86EMUL_MODE_REAL: | ||
3385 | case X86EMUL_MODE_VM86: | ||
3386 | case X86EMUL_MODE_PROT16: | ||
3387 | def_op_bytes = def_ad_bytes = 2; | ||
3388 | break; | ||
3389 | case X86EMUL_MODE_PROT32: | ||
3390 | def_op_bytes = def_ad_bytes = 4; | ||
3391 | break; | ||
3392 | #ifdef CONFIG_X86_64 | ||
3393 | case X86EMUL_MODE_PROT64: | ||
3394 | def_op_bytes = 4; | ||
3395 | def_ad_bytes = 8; | ||
3396 | break; | ||
3397 | #endif | ||
3398 | default: | ||
3399 | return -1; | ||
3400 | } | ||
3401 | |||
3402 | c->op_bytes = def_op_bytes; | ||
3403 | c->ad_bytes = def_ad_bytes; | ||
3404 | |||
3405 | /* Legacy prefixes. */ | ||
3406 | for (;;) { | ||
3407 | switch (c->b = insn_fetch(u8, 1, c->eip)) { | ||
3408 | case 0x66: /* operand-size override */ | ||
3409 | op_prefix = true; | ||
3410 | /* switch between 2/4 bytes */ | ||
3411 | c->op_bytes = def_op_bytes ^ 6; | ||
3412 | break; | ||
3413 | case 0x67: /* address-size override */ | ||
3414 | if (mode == X86EMUL_MODE_PROT64) | ||
3415 | /* switch between 4/8 bytes */ | ||
3416 | c->ad_bytes = def_ad_bytes ^ 12; | ||
3417 | else | ||
3418 | /* switch between 2/4 bytes */ | ||
3419 | c->ad_bytes = def_ad_bytes ^ 6; | ||
3420 | break; | ||
3421 | case 0x26: /* ES override */ | ||
3422 | case 0x2e: /* CS override */ | ||
3423 | case 0x36: /* SS override */ | ||
3424 | case 0x3e: /* DS override */ | ||
3425 | set_seg_override(c, (c->b >> 3) & 3); | ||
3426 | break; | ||
3427 | case 0x64: /* FS override */ | ||
3428 | case 0x65: /* GS override */ | ||
3429 | set_seg_override(c, c->b & 7); | ||
3430 | break; | ||
3431 | case 0x40 ... 0x4f: /* REX */ | ||
3432 | if (mode != X86EMUL_MODE_PROT64) | ||
3433 | goto done_prefixes; | ||
3434 | c->rex_prefix = c->b; | ||
3435 | continue; | ||
3436 | case 0xf0: /* LOCK */ | ||
3437 | c->lock_prefix = 1; | ||
3438 | break; | ||
3439 | case 0xf2: /* REPNE/REPNZ */ | ||
3440 | case 0xf3: /* REP/REPE/REPZ */ | ||
3441 | c->rep_prefix = c->b; | ||
3442 | break; | ||
3443 | default: | ||
3444 | goto done_prefixes; | ||
3445 | } | ||
3446 | |||
3447 | /* Any legacy prefix after a REX prefix nullifies its effect. */ | ||
3448 | |||
3449 | c->rex_prefix = 0; | ||
3450 | } | ||
3451 | |||
3452 | done_prefixes: | ||
3453 | |||
3454 | /* REX prefix. */ | ||
3455 | if (c->rex_prefix & 8) | ||
3456 | c->op_bytes = 8; /* REX.W */ | ||
3457 | |||
3458 | /* Opcode byte(s). */ | ||
3459 | opcode = opcode_table[c->b]; | ||
3460 | /* Two-byte opcode? */ | ||
3461 | if (c->b == 0x0f) { | ||
3462 | c->twobyte = 1; | ||
3463 | c->b = insn_fetch(u8, 1, c->eip); | ||
3464 | opcode = twobyte_table[c->b]; | ||
3465 | } | ||
3466 | c->d = opcode.flags; | ||
3467 | |||
3468 | while (c->d & GroupMask) { | ||
3469 | switch (c->d & GroupMask) { | ||
3470 | case Group: | ||
3471 | c->modrm = insn_fetch(u8, 1, c->eip); | ||
3472 | --c->eip; | ||
3473 | goffset = (c->modrm >> 3) & 7; | ||
3474 | opcode = opcode.u.group[goffset]; | ||
3475 | break; | ||
3476 | case GroupDual: | ||
3477 | c->modrm = insn_fetch(u8, 1, c->eip); | ||
3478 | --c->eip; | ||
3479 | goffset = (c->modrm >> 3) & 7; | ||
3480 | if ((c->modrm >> 6) == 3) | ||
3481 | opcode = opcode.u.gdual->mod3[goffset]; | ||
3482 | else | ||
3483 | opcode = opcode.u.gdual->mod012[goffset]; | ||
3484 | break; | ||
3485 | case RMExt: | ||
3486 | goffset = c->modrm & 7; | ||
3487 | opcode = opcode.u.group[goffset]; | ||
3488 | break; | ||
3489 | case Prefix: | ||
3490 | if (c->rep_prefix && op_prefix) | ||
3491 | return X86EMUL_UNHANDLEABLE; | ||
3492 | simd_prefix = op_prefix ? 0x66 : c->rep_prefix; | ||
3493 | switch (simd_prefix) { | ||
3494 | case 0x00: opcode = opcode.u.gprefix->pfx_no; break; | ||
3495 | case 0x66: opcode = opcode.u.gprefix->pfx_66; break; | ||
3496 | case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; | ||
3497 | case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; | ||
3498 | } | ||
3499 | break; | ||
3500 | default: | ||
3501 | return X86EMUL_UNHANDLEABLE; | ||
3502 | } | ||
3503 | |||
3504 | c->d &= ~GroupMask; | ||
3505 | c->d |= opcode.flags; | ||
3506 | } | ||
3507 | |||
3508 | c->execute = opcode.u.execute; | ||
3509 | c->check_perm = opcode.check_perm; | ||
3510 | c->intercept = opcode.intercept; | ||
3511 | |||
3512 | /* Unrecognised? */ | ||
3513 | if (c->d == 0 || (c->d & Undefined)) | ||
3514 | return -1; | ||
3515 | |||
3516 | if (!(c->d & VendorSpecific) && ctxt->only_vendor_specific_insn) | ||
3517 | return -1; | ||
3518 | |||
3519 | if (mode == X86EMUL_MODE_PROT64 && (c->d & Stack)) | ||
3520 | c->op_bytes = 8; | ||
3521 | |||
3522 | if (c->d & Op3264) { | ||
3523 | if (mode == X86EMUL_MODE_PROT64) | ||
3524 | c->op_bytes = 8; | ||
3525 | else | ||
3526 | c->op_bytes = 4; | ||
3527 | } | ||
3528 | |||
3529 | if (c->d & Sse) | ||
3530 | c->op_bytes = 16; | ||
3531 | |||
3532 | /* ModRM and SIB bytes. */ | ||
3533 | if (c->d & ModRM) { | ||
3534 | rc = decode_modrm(ctxt, ops, &memop); | ||
3535 | if (!c->has_seg_override) | ||
3536 | set_seg_override(c, c->modrm_seg); | ||
3537 | } else if (c->d & MemAbs) | ||
3538 | rc = decode_abs(ctxt, ops, &memop); | ||
3539 | if (rc != X86EMUL_CONTINUE) | ||
3540 | goto done; | ||
3541 | |||
3542 | if (!c->has_seg_override) | ||
3543 | set_seg_override(c, VCPU_SREG_DS); | ||
3544 | |||
3545 | memop.addr.mem.seg = seg_override(ctxt, c); | ||
3546 | |||
3547 | if (memop.type == OP_MEM && c->ad_bytes != 8) | ||
3548 | memop.addr.mem.ea = (u32)memop.addr.mem.ea; | ||
3549 | |||
3550 | /* | ||
3551 | * Decode and fetch the source operand: register, memory | ||
3552 | * or immediate. | ||
3553 | */ | ||
3554 | switch (c->d & SrcMask) { | ||
3555 | case SrcNone: | ||
3556 | break; | ||
3557 | case SrcReg: | ||
3558 | decode_register_operand(ctxt, &c->src, c, 0); | ||
3559 | break; | ||
3560 | case SrcMem16: | ||
3561 | memop.bytes = 2; | ||
3562 | goto srcmem_common; | ||
3563 | case SrcMem32: | ||
3564 | memop.bytes = 4; | ||
3565 | goto srcmem_common; | ||
3566 | case SrcMem: | ||
3567 | memop.bytes = (c->d & ByteOp) ? 1 : | ||
3568 | c->op_bytes; | ||
3569 | srcmem_common: | ||
3570 | c->src = memop; | ||
3571 | memopp = &c->src; | ||
3572 | break; | ||
3573 | case SrcImmU16: | ||
3574 | rc = decode_imm(ctxt, &c->src, 2, false); | ||
3575 | break; | ||
3576 | case SrcImm: | ||
3577 | rc = decode_imm(ctxt, &c->src, imm_size(c), true); | ||
3578 | break; | ||
3579 | case SrcImmU: | ||
3580 | rc = decode_imm(ctxt, &c->src, imm_size(c), false); | ||
3581 | break; | ||
3582 | case SrcImmByte: | ||
3583 | rc = decode_imm(ctxt, &c->src, 1, true); | ||
3584 | break; | ||
3585 | case SrcImmUByte: | ||
3586 | rc = decode_imm(ctxt, &c->src, 1, false); | ||
3587 | break; | ||
3588 | case SrcAcc: | ||
3589 | c->src.type = OP_REG; | ||
3590 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
3591 | c->src.addr.reg = &c->regs[VCPU_REGS_RAX]; | ||
3592 | fetch_register_operand(&c->src); | ||
3593 | break; | ||
3594 | case SrcOne: | ||
3595 | c->src.bytes = 1; | ||
3596 | c->src.val = 1; | ||
3597 | break; | ||
3598 | case SrcSI: | ||
3599 | c->src.type = OP_MEM; | ||
3600 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
3601 | c->src.addr.mem.ea = | ||
3602 | register_address(c, c->regs[VCPU_REGS_RSI]); | ||
3603 | c->src.addr.mem.seg = seg_override(ctxt, c); | ||
3604 | c->src.val = 0; | ||
3605 | break; | ||
3606 | case SrcImmFAddr: | ||
3607 | c->src.type = OP_IMM; | ||
3608 | c->src.addr.mem.ea = c->eip; | ||
3609 | c->src.bytes = c->op_bytes + 2; | ||
3610 | insn_fetch_arr(c->src.valptr, c->src.bytes, c->eip); | ||
3611 | break; | ||
3612 | case SrcMemFAddr: | ||
3613 | memop.bytes = c->op_bytes + 2; | ||
3614 | goto srcmem_common; | ||
3615 | break; | ||
3616 | case SrcDX: | ||
3617 | c->src.type = OP_REG; | ||
3618 | c->src.bytes = 2; | ||
3619 | c->src.addr.reg = &c->regs[VCPU_REGS_RDX]; | ||
3620 | fetch_register_operand(&c->src); | ||
3621 | break; | ||
3622 | } | ||
3623 | |||
3624 | if (rc != X86EMUL_CONTINUE) | ||
3625 | goto done; | ||
3626 | |||
3627 | /* | ||
3628 | * Decode and fetch the second source operand: register, memory | ||
3629 | * or immediate. | ||
3630 | */ | ||
3631 | switch (c->d & Src2Mask) { | ||
3632 | case Src2None: | ||
3633 | break; | ||
3634 | case Src2CL: | ||
3635 | c->src2.bytes = 1; | ||
3636 | c->src2.val = c->regs[VCPU_REGS_RCX] & 0x8; | ||
3637 | break; | ||
3638 | case Src2ImmByte: | ||
3639 | rc = decode_imm(ctxt, &c->src2, 1, true); | ||
3640 | break; | ||
3641 | case Src2One: | ||
3642 | c->src2.bytes = 1; | ||
3643 | c->src2.val = 1; | ||
3644 | break; | ||
3645 | case Src2Imm: | ||
3646 | rc = decode_imm(ctxt, &c->src2, imm_size(c), true); | ||
3647 | break; | ||
3648 | } | ||
3649 | |||
3650 | if (rc != X86EMUL_CONTINUE) | ||
3651 | goto done; | ||
3652 | |||
3653 | /* Decode and fetch the destination operand: register or memory. */ | ||
3654 | switch (c->d & DstMask) { | ||
3655 | case DstReg: | ||
3656 | decode_register_operand(ctxt, &c->dst, c, | ||
3657 | c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); | ||
3658 | break; | ||
3659 | case DstImmUByte: | ||
3660 | c->dst.type = OP_IMM; | ||
3661 | c->dst.addr.mem.ea = c->eip; | ||
3662 | c->dst.bytes = 1; | ||
3663 | c->dst.val = insn_fetch(u8, 1, c->eip); | ||
3664 | break; | ||
3665 | case DstMem: | ||
3666 | case DstMem64: | ||
3667 | c->dst = memop; | ||
3668 | memopp = &c->dst; | ||
3669 | if ((c->d & DstMask) == DstMem64) | ||
3670 | c->dst.bytes = 8; | ||
3671 | else | ||
3672 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
3673 | if (c->d & BitOp) | ||
3674 | fetch_bit_operand(c); | ||
3675 | c->dst.orig_val = c->dst.val; | ||
3676 | break; | ||
3677 | case DstAcc: | ||
3678 | c->dst.type = OP_REG; | ||
3679 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
3680 | c->dst.addr.reg = &c->regs[VCPU_REGS_RAX]; | ||
3681 | fetch_register_operand(&c->dst); | ||
3682 | c->dst.orig_val = c->dst.val; | ||
3683 | break; | ||
3684 | case DstDI: | ||
3685 | c->dst.type = OP_MEM; | ||
3686 | c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | ||
3687 | c->dst.addr.mem.ea = | ||
3688 | register_address(c, c->regs[VCPU_REGS_RDI]); | ||
3689 | c->dst.addr.mem.seg = VCPU_SREG_ES; | ||
3690 | c->dst.val = 0; | ||
3691 | break; | ||
3692 | case DstDX: | ||
3693 | c->dst.type = OP_REG; | ||
3694 | c->dst.bytes = 2; | ||
3695 | c->dst.addr.reg = &c->regs[VCPU_REGS_RDX]; | ||
3696 | fetch_register_operand(&c->dst); | ||
3697 | break; | ||
3698 | case ImplicitOps: | ||
3699 | /* Special instructions do their own operand decoding. */ | ||
3700 | default: | ||
3701 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
3702 | break; | ||
3703 | } | ||
3704 | |||
3705 | done: | ||
3706 | if (memopp && memopp->type == OP_MEM && c->rip_relative) | ||
3707 | memopp->addr.mem.ea += c->eip; | ||
3708 | |||
3709 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; | ||
3710 | } | ||
3711 | |||
3712 | static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) | ||
3713 | { | ||
3714 | struct decode_cache *c = &ctxt->decode; | ||
3715 | |||
3716 | /* The second termination condition only applies for REPE | ||
3717 | * and REPNE. Test if the repeat string operation prefix is | ||
3718 | * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the | ||
3719 | * corresponding termination condition according to: | ||
3720 | * - if REPE/REPZ and ZF = 0 then done | ||
3721 | * - if REPNE/REPNZ and ZF = 1 then done | ||
3722 | */ | ||
3723 | if (((c->b == 0xa6) || (c->b == 0xa7) || | ||
3724 | (c->b == 0xae) || (c->b == 0xaf)) | ||
3725 | && (((c->rep_prefix == REPE_PREFIX) && | ||
3726 | ((ctxt->eflags & EFLG_ZF) == 0)) | ||
3727 | || ((c->rep_prefix == REPNE_PREFIX) && | ||
3728 | ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)))) | ||
3729 | return true; | ||
3730 | |||
3731 | return false; | ||
3732 | } | ||
3733 | |||
3734 | int | ||
3735 | x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | ||
3736 | { | ||
3737 | struct x86_emulate_ops *ops = ctxt->ops; | ||
2561 | u64 msr_data; | 3738 | u64 msr_data; |
2562 | struct decode_cache *c = &ctxt->decode; | 3739 | struct decode_cache *c = &ctxt->decode; |
2563 | int rc = X86EMUL_CONTINUE; | 3740 | int rc = X86EMUL_CONTINUE; |
2564 | int saved_dst_type = c->dst.type; | 3741 | int saved_dst_type = c->dst.type; |
3742 | int irq; /* Used for int 3, int, and into */ | ||
2565 | 3743 | ||
2566 | ctxt->decode.mem_read.pos = 0; | 3744 | ctxt->decode.mem_read.pos = 0; |
2567 | 3745 | ||
2568 | if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { | 3746 | if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { |
2569 | emulate_ud(ctxt); | 3747 | rc = emulate_ud(ctxt); |
2570 | goto done; | 3748 | goto done; |
2571 | } | 3749 | } |
2572 | 3750 | ||
2573 | /* LOCK prefix is allowed only with some instructions */ | 3751 | /* LOCK prefix is allowed only with some instructions */ |
2574 | if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { | 3752 | if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { |
2575 | emulate_ud(ctxt); | 3753 | rc = emulate_ud(ctxt); |
3754 | goto done; | ||
3755 | } | ||
3756 | |||
3757 | if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) { | ||
3758 | rc = emulate_ud(ctxt); | ||
3759 | goto done; | ||
3760 | } | ||
3761 | |||
3762 | if ((c->d & Sse) | ||
3763 | && ((ops->get_cr(ctxt, 0) & X86_CR0_EM) | ||
3764 | || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { | ||
3765 | rc = emulate_ud(ctxt); | ||
3766 | goto done; | ||
3767 | } | ||
3768 | |||
3769 | if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { | ||
3770 | rc = emulate_nm(ctxt); | ||
2576 | goto done; | 3771 | goto done; |
2577 | } | 3772 | } |
2578 | 3773 | ||
3774 | if (unlikely(ctxt->guest_mode) && c->intercept) { | ||
3775 | rc = emulator_check_intercept(ctxt, c->intercept, | ||
3776 | X86_ICPT_PRE_EXCEPT); | ||
3777 | if (rc != X86EMUL_CONTINUE) | ||
3778 | goto done; | ||
3779 | } | ||
3780 | |||
2579 | /* Privileged instruction can be executed only in CPL=0 */ | 3781 | /* Privileged instruction can be executed only in CPL=0 */ |
2580 | if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { | 3782 | if ((c->d & Priv) && ops->cpl(ctxt)) { |
2581 | emulate_gp(ctxt, 0); | 3783 | rc = emulate_gp(ctxt, 0); |
2582 | goto done; | 3784 | goto done; |
2583 | } | 3785 | } |
2584 | 3786 | ||
3787 | /* Instruction can only be executed in protected mode */ | ||
3788 | if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) { | ||
3789 | rc = emulate_ud(ctxt); | ||
3790 | goto done; | ||
3791 | } | ||
3792 | |||
3793 | /* Do instruction specific permission checks */ | ||
3794 | if (c->check_perm) { | ||
3795 | rc = c->check_perm(ctxt); | ||
3796 | if (rc != X86EMUL_CONTINUE) | ||
3797 | goto done; | ||
3798 | } | ||
3799 | |||
3800 | if (unlikely(ctxt->guest_mode) && c->intercept) { | ||
3801 | rc = emulator_check_intercept(ctxt, c->intercept, | ||
3802 | X86_ICPT_POST_EXCEPT); | ||
3803 | if (rc != X86EMUL_CONTINUE) | ||
3804 | goto done; | ||
3805 | } | ||
3806 | |||
2585 | if (c->rep_prefix && (c->d & String)) { | 3807 | if (c->rep_prefix && (c->d & String)) { |
2586 | ctxt->restart = true; | ||
2587 | /* All REP prefixes have the same first termination condition */ | 3808 | /* All REP prefixes have the same first termination condition */ |
2588 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { | 3809 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { |
2589 | string_done: | ||
2590 | ctxt->restart = false; | ||
2591 | ctxt->eip = c->eip; | 3810 | ctxt->eip = c->eip; |
2592 | goto done; | 3811 | goto done; |
2593 | } | 3812 | } |
2594 | /* The second termination condition only applies for REPE | ||
2595 | * and REPNE. Test if the repeat string operation prefix is | ||
2596 | * REPE/REPZ or REPNE/REPNZ and if it's the case it tests the | ||
2597 | * corresponding termination condition according to: | ||
2598 | * - if REPE/REPZ and ZF = 0 then done | ||
2599 | * - if REPNE/REPNZ and ZF = 1 then done | ||
2600 | */ | ||
2601 | if ((c->b == 0xa6) || (c->b == 0xa7) || | ||
2602 | (c->b == 0xae) || (c->b == 0xaf)) { | ||
2603 | if ((c->rep_prefix == REPE_PREFIX) && | ||
2604 | ((ctxt->eflags & EFLG_ZF) == 0)) | ||
2605 | goto string_done; | ||
2606 | if ((c->rep_prefix == REPNE_PREFIX) && | ||
2607 | ((ctxt->eflags & EFLG_ZF) == EFLG_ZF)) | ||
2608 | goto string_done; | ||
2609 | } | ||
2610 | c->eip = ctxt->eip; | ||
2611 | } | 3813 | } |
2612 | 3814 | ||
2613 | if (c->src.type == OP_MEM) { | 3815 | if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) { |
2614 | rc = read_emulated(ctxt, ops, (unsigned long)c->src.ptr, | 3816 | rc = segmented_read(ctxt, c->src.addr.mem, |
2615 | c->src.valptr, c->src.bytes); | 3817 | c->src.valptr, c->src.bytes); |
2616 | if (rc != X86EMUL_CONTINUE) | 3818 | if (rc != X86EMUL_CONTINUE) |
2617 | goto done; | 3819 | goto done; |
2618 | c->src.orig_val64 = c->src.val64; | 3820 | c->src.orig_val64 = c->src.val64; |
2619 | } | 3821 | } |
2620 | 3822 | ||
2621 | if (c->src2.type == OP_MEM) { | 3823 | if (c->src2.type == OP_MEM) { |
2622 | rc = read_emulated(ctxt, ops, (unsigned long)c->src2.ptr, | 3824 | rc = segmented_read(ctxt, c->src2.addr.mem, |
2623 | &c->src2.val, c->src2.bytes); | 3825 | &c->src2.val, c->src2.bytes); |
2624 | if (rc != X86EMUL_CONTINUE) | 3826 | if (rc != X86EMUL_CONTINUE) |
2625 | goto done; | 3827 | goto done; |
2626 | } | 3828 | } |
@@ -2631,7 +3833,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2631 | 3833 | ||
2632 | if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { | 3834 | if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { |
2633 | /* optimisation - avoid slow emulated read if Mov */ | 3835 | /* optimisation - avoid slow emulated read if Mov */ |
2634 | rc = read_emulated(ctxt, ops, (unsigned long)c->dst.ptr, | 3836 | rc = segmented_read(ctxt, c->dst.addr.mem, |
2635 | &c->dst.val, c->dst.bytes); | 3837 | &c->dst.val, c->dst.bytes); |
2636 | if (rc != X86EMUL_CONTINUE) | 3838 | if (rc != X86EMUL_CONTINUE) |
2637 | goto done; | 3839 | goto done; |
@@ -2640,68 +3842,44 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
2640 | 3842 | ||
2641 | special_insn: | 3843 | special_insn: |
2642 | 3844 | ||
3845 | if (unlikely(ctxt->guest_mode) && c->intercept) { | ||
3846 | rc = emulator_check_intercept(ctxt, c->intercept, | ||
3847 | X86_ICPT_POST_MEMACCESS); | ||
3848 | if (rc != X86EMUL_CONTINUE) | ||
3849 | goto done; | ||
3850 | } | ||
3851 | |||
3852 | if (c->execute) { | ||
3853 | rc = c->execute(ctxt); | ||
3854 | if (rc != X86EMUL_CONTINUE) | ||
3855 | goto done; | ||
3856 | goto writeback; | ||
3857 | } | ||
3858 | |||
2643 | if (c->twobyte) | 3859 | if (c->twobyte) |
2644 | goto twobyte_insn; | 3860 | goto twobyte_insn; |
2645 | 3861 | ||
2646 | switch (c->b) { | 3862 | switch (c->b) { |
2647 | case 0x00 ... 0x05: | ||
2648 | add: /* add */ | ||
2649 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); | ||
2650 | break; | ||
2651 | case 0x06: /* push es */ | 3863 | case 0x06: /* push es */ |
2652 | emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); | 3864 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); |
2653 | break; | 3865 | break; |
2654 | case 0x07: /* pop es */ | 3866 | case 0x07: /* pop es */ |
2655 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); | 3867 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); |
2656 | if (rc != X86EMUL_CONTINUE) | ||
2657 | goto done; | ||
2658 | break; | ||
2659 | case 0x08 ... 0x0d: | ||
2660 | or: /* or */ | ||
2661 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); | ||
2662 | break; | 3868 | break; |
2663 | case 0x0e: /* push cs */ | 3869 | case 0x0e: /* push cs */ |
2664 | emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); | 3870 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); |
2665 | break; | ||
2666 | case 0x10 ... 0x15: | ||
2667 | adc: /* adc */ | ||
2668 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); | ||
2669 | break; | 3871 | break; |
2670 | case 0x16: /* push ss */ | 3872 | case 0x16: /* push ss */ |
2671 | emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); | 3873 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); |
2672 | break; | 3874 | break; |
2673 | case 0x17: /* pop ss */ | 3875 | case 0x17: /* pop ss */ |
2674 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); | 3876 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); |
2675 | if (rc != X86EMUL_CONTINUE) | ||
2676 | goto done; | ||
2677 | break; | ||
2678 | case 0x18 ... 0x1d: | ||
2679 | sbb: /* sbb */ | ||
2680 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); | ||
2681 | break; | 3877 | break; |
2682 | case 0x1e: /* push ds */ | 3878 | case 0x1e: /* push ds */ |
2683 | emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); | 3879 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); |
2684 | break; | 3880 | break; |
2685 | case 0x1f: /* pop ds */ | 3881 | case 0x1f: /* pop ds */ |
2686 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); | 3882 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); |
2687 | if (rc != X86EMUL_CONTINUE) | ||
2688 | goto done; | ||
2689 | break; | ||
2690 | case 0x20 ... 0x25: | ||
2691 | and: /* and */ | ||
2692 | emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); | ||
2693 | break; | ||
2694 | case 0x28 ... 0x2d: | ||
2695 | sub: /* sub */ | ||
2696 | emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags); | ||
2697 | break; | ||
2698 | case 0x30 ... 0x35: | ||
2699 | xor: /* xor */ | ||
2700 | emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags); | ||
2701 | break; | ||
2702 | case 0x38 ... 0x3d: | ||
2703 | cmp: /* cmp */ | ||
2704 | emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); | ||
2705 | break; | 3883 | break; |
2706 | case 0x40 ... 0x47: /* inc r16/r32 */ | 3884 | case 0x40 ... 0x47: /* inc r16/r32 */ |
2707 | emulate_1op("inc", c->dst, ctxt->eflags); | 3885 | emulate_1op("inc", c->dst, ctxt->eflags); |
@@ -2709,83 +3887,24 @@ special_insn: | |||
2709 | case 0x48 ... 0x4f: /* dec r16/r32 */ | 3887 | case 0x48 ... 0x4f: /* dec r16/r32 */ |
2710 | emulate_1op("dec", c->dst, ctxt->eflags); | 3888 | emulate_1op("dec", c->dst, ctxt->eflags); |
2711 | break; | 3889 | break; |
2712 | case 0x50 ... 0x57: /* push reg */ | ||
2713 | emulate_push(ctxt, ops); | ||
2714 | break; | ||
2715 | case 0x58 ... 0x5f: /* pop reg */ | ||
2716 | pop_instruction: | ||
2717 | rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes); | ||
2718 | if (rc != X86EMUL_CONTINUE) | ||
2719 | goto done; | ||
2720 | break; | ||
2721 | case 0x60: /* pusha */ | ||
2722 | rc = emulate_pusha(ctxt, ops); | ||
2723 | if (rc != X86EMUL_CONTINUE) | ||
2724 | goto done; | ||
2725 | break; | ||
2726 | case 0x61: /* popa */ | ||
2727 | rc = emulate_popa(ctxt, ops); | ||
2728 | if (rc != X86EMUL_CONTINUE) | ||
2729 | goto done; | ||
2730 | break; | ||
2731 | case 0x63: /* movsxd */ | 3890 | case 0x63: /* movsxd */ |
2732 | if (ctxt->mode != X86EMUL_MODE_PROT64) | 3891 | if (ctxt->mode != X86EMUL_MODE_PROT64) |
2733 | goto cannot_emulate; | 3892 | goto cannot_emulate; |
2734 | c->dst.val = (s32) c->src.val; | 3893 | c->dst.val = (s32) c->src.val; |
2735 | break; | 3894 | break; |
2736 | case 0x68: /* push imm */ | ||
2737 | case 0x6a: /* push imm8 */ | ||
2738 | emulate_push(ctxt, ops); | ||
2739 | break; | ||
2740 | case 0x6c: /* insb */ | 3895 | case 0x6c: /* insb */ |
2741 | case 0x6d: /* insw/insd */ | 3896 | case 0x6d: /* insw/insd */ |
2742 | c->dst.bytes = min(c->dst.bytes, 4u); | 3897 | c->src.val = c->regs[VCPU_REGS_RDX]; |
2743 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], | 3898 | goto do_io_in; |
2744 | c->dst.bytes)) { | ||
2745 | emulate_gp(ctxt, 0); | ||
2746 | goto done; | ||
2747 | } | ||
2748 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, | ||
2749 | c->regs[VCPU_REGS_RDX], &c->dst.val)) | ||
2750 | goto done; /* IO is needed, skip writeback */ | ||
2751 | break; | ||
2752 | case 0x6e: /* outsb */ | 3899 | case 0x6e: /* outsb */ |
2753 | case 0x6f: /* outsw/outsd */ | 3900 | case 0x6f: /* outsw/outsd */ |
2754 | c->src.bytes = min(c->src.bytes, 4u); | 3901 | c->dst.val = c->regs[VCPU_REGS_RDX]; |
2755 | if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], | 3902 | goto do_io_out; |
2756 | c->src.bytes)) { | ||
2757 | emulate_gp(ctxt, 0); | ||
2758 | goto done; | ||
2759 | } | ||
2760 | ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX], | ||
2761 | &c->src.val, 1, ctxt->vcpu); | ||
2762 | |||
2763 | c->dst.type = OP_NONE; /* nothing to writeback */ | ||
2764 | break; | 3903 | break; |
2765 | case 0x70 ... 0x7f: /* jcc (short) */ | 3904 | case 0x70 ... 0x7f: /* jcc (short) */ |
2766 | if (test_cc(c->b, ctxt->eflags)) | 3905 | if (test_cc(c->b, ctxt->eflags)) |
2767 | jmp_rel(c, c->src.val); | 3906 | jmp_rel(c, c->src.val); |
2768 | break; | 3907 | break; |
2769 | case 0x80 ... 0x83: /* Grp1 */ | ||
2770 | switch (c->modrm_reg) { | ||
2771 | case 0: | ||
2772 | goto add; | ||
2773 | case 1: | ||
2774 | goto or; | ||
2775 | case 2: | ||
2776 | goto adc; | ||
2777 | case 3: | ||
2778 | goto sbb; | ||
2779 | case 4: | ||
2780 | goto and; | ||
2781 | case 5: | ||
2782 | goto sub; | ||
2783 | case 6: | ||
2784 | goto xor; | ||
2785 | case 7: | ||
2786 | goto cmp; | ||
2787 | } | ||
2788 | break; | ||
2789 | case 0x84 ... 0x85: | 3908 | case 0x84 ... 0x85: |
2790 | test: | 3909 | test: |
2791 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); | 3910 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); |
@@ -2793,38 +3912,24 @@ special_insn: | |||
2793 | case 0x86 ... 0x87: /* xchg */ | 3912 | case 0x86 ... 0x87: /* xchg */ |
2794 | xchg: | 3913 | xchg: |
2795 | /* Write back the register source. */ | 3914 | /* Write back the register source. */ |
2796 | switch (c->dst.bytes) { | 3915 | c->src.val = c->dst.val; |
2797 | case 1: | 3916 | write_register_operand(&c->src); |
2798 | *(u8 *) c->src.ptr = (u8) c->dst.val; | ||
2799 | break; | ||
2800 | case 2: | ||
2801 | *(u16 *) c->src.ptr = (u16) c->dst.val; | ||
2802 | break; | ||
2803 | case 4: | ||
2804 | *c->src.ptr = (u32) c->dst.val; | ||
2805 | break; /* 64b reg: zero-extend */ | ||
2806 | case 8: | ||
2807 | *c->src.ptr = c->dst.val; | ||
2808 | break; | ||
2809 | } | ||
2810 | /* | 3917 | /* |
2811 | * Write back the memory destination with implicit LOCK | 3918 | * Write back the memory destination with implicit LOCK |
2812 | * prefix. | 3919 | * prefix. |
2813 | */ | 3920 | */ |
2814 | c->dst.val = c->src.val; | 3921 | c->dst.val = c->src.orig_val; |
2815 | c->lock_prefix = 1; | 3922 | c->lock_prefix = 1; |
2816 | break; | 3923 | break; |
2817 | case 0x88 ... 0x8b: /* mov */ | ||
2818 | goto mov; | ||
2819 | case 0x8c: /* mov r/m, sreg */ | 3924 | case 0x8c: /* mov r/m, sreg */ |
2820 | if (c->modrm_reg > VCPU_SREG_GS) { | 3925 | if (c->modrm_reg > VCPU_SREG_GS) { |
2821 | emulate_ud(ctxt); | 3926 | rc = emulate_ud(ctxt); |
2822 | goto done; | 3927 | goto done; |
2823 | } | 3928 | } |
2824 | c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); | 3929 | c->dst.val = get_segment_selector(ctxt, c->modrm_reg); |
2825 | break; | 3930 | break; |
2826 | case 0x8d: /* lea r16/r32, m */ | 3931 | case 0x8d: /* lea r16/r32, m */ |
2827 | c->dst.val = c->modrm_ea; | 3932 | c->dst.val = c->src.addr.mem.ea; |
2828 | break; | 3933 | break; |
2829 | case 0x8e: { /* mov seg, r/m16 */ | 3934 | case 0x8e: { /* mov seg, r/m16 */ |
2830 | uint16_t sel; | 3935 | uint16_t sel; |
@@ -2833,7 +3938,7 @@ special_insn: | |||
2833 | 3938 | ||
2834 | if (c->modrm_reg == VCPU_SREG_CS || | 3939 | if (c->modrm_reg == VCPU_SREG_CS || |
2835 | c->modrm_reg > VCPU_SREG_GS) { | 3940 | c->modrm_reg > VCPU_SREG_GS) { |
2836 | emulate_ud(ctxt); | 3941 | rc = emulate_ud(ctxt); |
2837 | goto done; | 3942 | goto done; |
2838 | } | 3943 | } |
2839 | 3944 | ||
@@ -2846,76 +3951,72 @@ special_insn: | |||
2846 | break; | 3951 | break; |
2847 | } | 3952 | } |
2848 | case 0x8f: /* pop (sole member of Grp1a) */ | 3953 | case 0x8f: /* pop (sole member of Grp1a) */ |
2849 | rc = emulate_grp1a(ctxt, ops); | 3954 | rc = em_grp1a(ctxt); |
2850 | if (rc != X86EMUL_CONTINUE) | ||
2851 | goto done; | ||
2852 | break; | 3955 | break; |
2853 | case 0x90: /* nop / xchg r8,rax */ | 3956 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ |
2854 | if (c->dst.ptr == (unsigned long *)&c->regs[VCPU_REGS_RAX]) { | 3957 | if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX]) |
2855 | c->dst.type = OP_NONE; /* nop */ | ||
2856 | break; | 3958 | break; |
2857 | } | ||
2858 | case 0x91 ... 0x97: /* xchg reg,rax */ | ||
2859 | c->src.type = OP_REG; | ||
2860 | c->src.bytes = c->op_bytes; | ||
2861 | c->src.ptr = (unsigned long *) &c->regs[VCPU_REGS_RAX]; | ||
2862 | c->src.val = *(c->src.ptr); | ||
2863 | goto xchg; | 3959 | goto xchg; |
2864 | case 0x9c: /* pushf */ | 3960 | case 0x98: /* cbw/cwde/cdqe */ |
2865 | c->src.val = (unsigned long) ctxt->eflags; | 3961 | switch (c->op_bytes) { |
2866 | emulate_push(ctxt, ops); | 3962 | case 2: c->dst.val = (s8)c->dst.val; break; |
2867 | break; | 3963 | case 4: c->dst.val = (s16)c->dst.val; break; |
2868 | case 0x9d: /* popf */ | 3964 | case 8: c->dst.val = (s32)c->dst.val; break; |
2869 | c->dst.type = OP_REG; | 3965 | } |
2870 | c->dst.ptr = (unsigned long *) &ctxt->eflags; | ||
2871 | c->dst.bytes = c->op_bytes; | ||
2872 | rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes); | ||
2873 | if (rc != X86EMUL_CONTINUE) | ||
2874 | goto done; | ||
2875 | break; | 3966 | break; |
2876 | case 0xa0 ... 0xa3: /* mov */ | ||
2877 | case 0xa4 ... 0xa5: /* movs */ | ||
2878 | goto mov; | ||
2879 | case 0xa6 ... 0xa7: /* cmps */ | ||
2880 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
2881 | DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); | ||
2882 | goto cmp; | ||
2883 | case 0xa8 ... 0xa9: /* test ax, imm */ | 3967 | case 0xa8 ... 0xa9: /* test ax, imm */ |
2884 | goto test; | 3968 | goto test; |
2885 | case 0xaa ... 0xab: /* stos */ | ||
2886 | c->dst.val = c->regs[VCPU_REGS_RAX]; | ||
2887 | break; | ||
2888 | case 0xac ... 0xad: /* lods */ | ||
2889 | goto mov; | ||
2890 | case 0xae ... 0xaf: /* scas */ | ||
2891 | DPRINTF("Urk! I don't handle SCAS.\n"); | ||
2892 | goto cannot_emulate; | ||
2893 | case 0xb0 ... 0xbf: /* mov r, imm */ | ||
2894 | goto mov; | ||
2895 | case 0xc0 ... 0xc1: | 3969 | case 0xc0 ... 0xc1: |
2896 | emulate_grp2(ctxt); | 3970 | rc = em_grp2(ctxt); |
2897 | break; | 3971 | break; |
2898 | case 0xc3: /* ret */ | 3972 | case 0xc3: /* ret */ |
2899 | c->dst.type = OP_REG; | 3973 | c->dst.type = OP_REG; |
2900 | c->dst.ptr = &c->eip; | 3974 | c->dst.addr.reg = &c->eip; |
2901 | c->dst.bytes = c->op_bytes; | 3975 | c->dst.bytes = c->op_bytes; |
2902 | goto pop_instruction; | 3976 | rc = em_pop(ctxt); |
2903 | case 0xc6 ... 0xc7: /* mov (sole member of Grp11) */ | 3977 | break; |
2904 | mov: | 3978 | case 0xc4: /* les */ |
2905 | c->dst.val = c->src.val; | 3979 | rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES); |
3980 | break; | ||
3981 | case 0xc5: /* lds */ | ||
3982 | rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS); | ||
2906 | break; | 3983 | break; |
2907 | case 0xcb: /* ret far */ | 3984 | case 0xcb: /* ret far */ |
2908 | rc = emulate_ret_far(ctxt, ops); | 3985 | rc = emulate_ret_far(ctxt, ops); |
2909 | if (rc != X86EMUL_CONTINUE) | 3986 | break; |
2910 | goto done; | 3987 | case 0xcc: /* int3 */ |
3988 | irq = 3; | ||
3989 | goto do_interrupt; | ||
3990 | case 0xcd: /* int n */ | ||
3991 | irq = c->src.val; | ||
3992 | do_interrupt: | ||
3993 | rc = emulate_int(ctxt, ops, irq); | ||
3994 | break; | ||
3995 | case 0xce: /* into */ | ||
3996 | if (ctxt->eflags & EFLG_OF) { | ||
3997 | irq = 4; | ||
3998 | goto do_interrupt; | ||
3999 | } | ||
4000 | break; | ||
4001 | case 0xcf: /* iret */ | ||
4002 | rc = emulate_iret(ctxt, ops); | ||
2911 | break; | 4003 | break; |
2912 | case 0xd0 ... 0xd1: /* Grp2 */ | 4004 | case 0xd0 ... 0xd1: /* Grp2 */ |
2913 | c->src.val = 1; | 4005 | rc = em_grp2(ctxt); |
2914 | emulate_grp2(ctxt); | ||
2915 | break; | 4006 | break; |
2916 | case 0xd2 ... 0xd3: /* Grp2 */ | 4007 | case 0xd2 ... 0xd3: /* Grp2 */ |
2917 | c->src.val = c->regs[VCPU_REGS_RCX]; | 4008 | c->src.val = c->regs[VCPU_REGS_RCX]; |
2918 | emulate_grp2(ctxt); | 4009 | rc = em_grp2(ctxt); |
4010 | break; | ||
4011 | case 0xe0 ... 0xe2: /* loop/loopz/loopnz */ | ||
4012 | register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); | ||
4013 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) != 0 && | ||
4014 | (c->b == 0xe2 || test_cc(c->b ^ 0x5, ctxt->eflags))) | ||
4015 | jmp_rel(c, c->src.val); | ||
4016 | break; | ||
4017 | case 0xe3: /* jcxz/jecxz/jrcxz */ | ||
4018 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) | ||
4019 | jmp_rel(c, c->src.val); | ||
2919 | break; | 4020 | break; |
2920 | case 0xe4: /* inb */ | 4021 | case 0xe4: /* inb */ |
2921 | case 0xe5: /* in */ | 4022 | case 0xe5: /* in */ |
@@ -2927,23 +4028,14 @@ special_insn: | |||
2927 | long int rel = c->src.val; | 4028 | long int rel = c->src.val; |
2928 | c->src.val = (unsigned long) c->eip; | 4029 | c->src.val = (unsigned long) c->eip; |
2929 | jmp_rel(c, rel); | 4030 | jmp_rel(c, rel); |
2930 | emulate_push(ctxt, ops); | 4031 | rc = em_push(ctxt); |
2931 | break; | 4032 | break; |
2932 | } | 4033 | } |
2933 | case 0xe9: /* jmp rel */ | 4034 | case 0xe9: /* jmp rel */ |
2934 | goto jmp; | 4035 | goto jmp; |
2935 | case 0xea: { /* jmp far */ | 4036 | case 0xea: /* jmp far */ |
2936 | unsigned short sel; | 4037 | rc = em_jmp_far(ctxt); |
2937 | jump_far: | ||
2938 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | ||
2939 | |||
2940 | if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS)) | ||
2941 | goto done; | ||
2942 | |||
2943 | c->eip = 0; | ||
2944 | memcpy(&c->eip, c->src.valptr, c->op_bytes); | ||
2945 | break; | 4038 | break; |
2946 | } | ||
2947 | case 0xeb: | 4039 | case 0xeb: |
2948 | jmp: /* jmp rel short */ | 4040 | jmp: /* jmp rel short */ |
2949 | jmp_rel(c, c->src.val); | 4041 | jmp_rel(c, c->src.val); |
@@ -2951,87 +4043,71 @@ special_insn: | |||
2951 | break; | 4043 | break; |
2952 | case 0xec: /* in al,dx */ | 4044 | case 0xec: /* in al,dx */ |
2953 | case 0xed: /* in (e/r)ax,dx */ | 4045 | case 0xed: /* in (e/r)ax,dx */ |
2954 | c->src.val = c->regs[VCPU_REGS_RDX]; | ||
2955 | do_io_in: | 4046 | do_io_in: |
2956 | c->dst.bytes = min(c->dst.bytes, 4u); | ||
2957 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { | ||
2958 | emulate_gp(ctxt, 0); | ||
2959 | goto done; | ||
2960 | } | ||
2961 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, | 4047 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, |
2962 | &c->dst.val)) | 4048 | &c->dst.val)) |
2963 | goto done; /* IO is needed */ | 4049 | goto done; /* IO is needed */ |
2964 | break; | 4050 | break; |
2965 | case 0xee: /* out dx,al */ | 4051 | case 0xee: /* out dx,al */ |
2966 | case 0xef: /* out dx,(e/r)ax */ | 4052 | case 0xef: /* out dx,(e/r)ax */ |
2967 | c->src.val = c->regs[VCPU_REGS_RDX]; | ||
2968 | do_io_out: | 4053 | do_io_out: |
2969 | c->dst.bytes = min(c->dst.bytes, 4u); | 4054 | ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, |
2970 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { | 4055 | &c->src.val, 1); |
2971 | emulate_gp(ctxt, 0); | ||
2972 | goto done; | ||
2973 | } | ||
2974 | ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1, | ||
2975 | ctxt->vcpu); | ||
2976 | c->dst.type = OP_NONE; /* Disable writeback. */ | 4056 | c->dst.type = OP_NONE; /* Disable writeback. */ |
2977 | break; | 4057 | break; |
2978 | case 0xf4: /* hlt */ | 4058 | case 0xf4: /* hlt */ |
2979 | ctxt->vcpu->arch.halt_request = 1; | 4059 | ctxt->ops->halt(ctxt); |
2980 | break; | 4060 | break; |
2981 | case 0xf5: /* cmc */ | 4061 | case 0xf5: /* cmc */ |
2982 | /* complement carry flag from eflags reg */ | 4062 | /* complement carry flag from eflags reg */ |
2983 | ctxt->eflags ^= EFLG_CF; | 4063 | ctxt->eflags ^= EFLG_CF; |
2984 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
2985 | break; | 4064 | break; |
2986 | case 0xf6 ... 0xf7: /* Grp3 */ | 4065 | case 0xf6 ... 0xf7: /* Grp3 */ |
2987 | if (!emulate_grp3(ctxt, ops)) | 4066 | rc = em_grp3(ctxt); |
2988 | goto cannot_emulate; | ||
2989 | break; | 4067 | break; |
2990 | case 0xf8: /* clc */ | 4068 | case 0xf8: /* clc */ |
2991 | ctxt->eflags &= ~EFLG_CF; | 4069 | ctxt->eflags &= ~EFLG_CF; |
2992 | c->dst.type = OP_NONE; /* Disable writeback. */ | 4070 | break; |
4071 | case 0xf9: /* stc */ | ||
4072 | ctxt->eflags |= EFLG_CF; | ||
2993 | break; | 4073 | break; |
2994 | case 0xfa: /* cli */ | 4074 | case 0xfa: /* cli */ |
2995 | if (emulator_bad_iopl(ctxt, ops)) { | 4075 | if (emulator_bad_iopl(ctxt, ops)) { |
2996 | emulate_gp(ctxt, 0); | 4076 | rc = emulate_gp(ctxt, 0); |
2997 | goto done; | 4077 | goto done; |
2998 | } else { | 4078 | } else |
2999 | ctxt->eflags &= ~X86_EFLAGS_IF; | 4079 | ctxt->eflags &= ~X86_EFLAGS_IF; |
3000 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
3001 | } | ||
3002 | break; | 4080 | break; |
3003 | case 0xfb: /* sti */ | 4081 | case 0xfb: /* sti */ |
3004 | if (emulator_bad_iopl(ctxt, ops)) { | 4082 | if (emulator_bad_iopl(ctxt, ops)) { |
3005 | emulate_gp(ctxt, 0); | 4083 | rc = emulate_gp(ctxt, 0); |
3006 | goto done; | 4084 | goto done; |
3007 | } else { | 4085 | } else { |
3008 | ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; | 4086 | ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; |
3009 | ctxt->eflags |= X86_EFLAGS_IF; | 4087 | ctxt->eflags |= X86_EFLAGS_IF; |
3010 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
3011 | } | 4088 | } |
3012 | break; | 4089 | break; |
3013 | case 0xfc: /* cld */ | 4090 | case 0xfc: /* cld */ |
3014 | ctxt->eflags &= ~EFLG_DF; | 4091 | ctxt->eflags &= ~EFLG_DF; |
3015 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
3016 | break; | 4092 | break; |
3017 | case 0xfd: /* std */ | 4093 | case 0xfd: /* std */ |
3018 | ctxt->eflags |= EFLG_DF; | 4094 | ctxt->eflags |= EFLG_DF; |
3019 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
3020 | break; | 4095 | break; |
3021 | case 0xfe: /* Grp4 */ | 4096 | case 0xfe: /* Grp4 */ |
3022 | grp45: | 4097 | rc = em_grp45(ctxt); |
3023 | rc = emulate_grp45(ctxt, ops); | ||
3024 | if (rc != X86EMUL_CONTINUE) | ||
3025 | goto done; | ||
3026 | break; | 4098 | break; |
3027 | case 0xff: /* Grp5 */ | 4099 | case 0xff: /* Grp5 */ |
3028 | if (c->modrm_reg == 5) | 4100 | rc = em_grp45(ctxt); |
3029 | goto jump_far; | 4101 | break; |
3030 | goto grp45; | 4102 | default: |
4103 | goto cannot_emulate; | ||
3031 | } | 4104 | } |
3032 | 4105 | ||
4106 | if (rc != X86EMUL_CONTINUE) | ||
4107 | goto done; | ||
4108 | |||
3033 | writeback: | 4109 | writeback: |
3034 | rc = writeback(ctxt, ops); | 4110 | rc = writeback(ctxt); |
3035 | if (rc != X86EMUL_CONTINUE) | 4111 | if (rc != X86EMUL_CONTINUE) |
3036 | goto done; | 4112 | goto done; |
3037 | 4113 | ||
@@ -3042,165 +4118,82 @@ writeback: | |||
3042 | c->dst.type = saved_dst_type; | 4118 | c->dst.type = saved_dst_type; |
3043 | 4119 | ||
3044 | if ((c->d & SrcMask) == SrcSI) | 4120 | if ((c->d & SrcMask) == SrcSI) |
3045 | string_addr_inc(ctxt, seg_override_base(ctxt, ops, c), | 4121 | string_addr_inc(ctxt, seg_override(ctxt, c), |
3046 | VCPU_REGS_RSI, &c->src); | 4122 | VCPU_REGS_RSI, &c->src); |
3047 | 4123 | ||
3048 | if ((c->d & DstMask) == DstDI) | 4124 | if ((c->d & DstMask) == DstDI) |
3049 | string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI, | 4125 | string_addr_inc(ctxt, VCPU_SREG_ES, VCPU_REGS_RDI, |
3050 | &c->dst); | 4126 | &c->dst); |
3051 | 4127 | ||
3052 | if (c->rep_prefix && (c->d & String)) { | 4128 | if (c->rep_prefix && (c->d & String)) { |
3053 | struct read_cache *rc = &ctxt->decode.io_read; | 4129 | struct read_cache *r = &ctxt->decode.io_read; |
3054 | register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); | 4130 | register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); |
3055 | /* | 4131 | |
3056 | * Re-enter guest when pio read ahead buffer is empty or, | 4132 | if (!string_insn_completed(ctxt)) { |
3057 | * if it is not used, after each 1024 iteration. | 4133 | /* |
3058 | */ | 4134 | * Re-enter guest when pio read ahead buffer is empty |
3059 | if ((rc->end == 0 && !(c->regs[VCPU_REGS_RCX] & 0x3ff)) || | 4135 | * or, if it is not used, after each 1024 iteration. |
3060 | (rc->end != 0 && rc->end == rc->pos)) | 4136 | */ |
3061 | ctxt->restart = false; | 4137 | if ((r->end != 0 || c->regs[VCPU_REGS_RCX] & 0x3ff) && |
4138 | (r->end == 0 || r->end != r->pos)) { | ||
4139 | /* | ||
4140 | * Reset read cache. Usually happens before | ||
4141 | * decode, but since instruction is restarted | ||
4142 | * we have to do it here. | ||
4143 | */ | ||
4144 | ctxt->decode.mem_read.end = 0; | ||
4145 | return EMULATION_RESTART; | ||
4146 | } | ||
4147 | goto done; /* skip rip writeback */ | ||
4148 | } | ||
3062 | } | 4149 | } |
3063 | /* | 4150 | |
3064 | * reset read cache here in case string instruction is restared | ||
3065 | * without decoding | ||
3066 | */ | ||
3067 | ctxt->decode.mem_read.end = 0; | ||
3068 | ctxt->eip = c->eip; | 4151 | ctxt->eip = c->eip; |
3069 | 4152 | ||
3070 | done: | 4153 | done: |
3071 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 4154 | if (rc == X86EMUL_PROPAGATE_FAULT) |
4155 | ctxt->have_exception = true; | ||
4156 | if (rc == X86EMUL_INTERCEPTED) | ||
4157 | return EMULATION_INTERCEPTED; | ||
4158 | |||
4159 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; | ||
3072 | 4160 | ||
3073 | twobyte_insn: | 4161 | twobyte_insn: |
3074 | switch (c->b) { | 4162 | switch (c->b) { |
3075 | case 0x01: /* lgdt, lidt, lmsw */ | ||
3076 | switch (c->modrm_reg) { | ||
3077 | u16 size; | ||
3078 | unsigned long address; | ||
3079 | |||
3080 | case 0: /* vmcall */ | ||
3081 | if (c->modrm_mod != 3 || c->modrm_rm != 1) | ||
3082 | goto cannot_emulate; | ||
3083 | |||
3084 | rc = kvm_fix_hypercall(ctxt->vcpu); | ||
3085 | if (rc != X86EMUL_CONTINUE) | ||
3086 | goto done; | ||
3087 | |||
3088 | /* Let the processor re-execute the fixed hypercall */ | ||
3089 | c->eip = ctxt->eip; | ||
3090 | /* Disable writeback. */ | ||
3091 | c->dst.type = OP_NONE; | ||
3092 | break; | ||
3093 | case 2: /* lgdt */ | ||
3094 | rc = read_descriptor(ctxt, ops, c->src.ptr, | ||
3095 | &size, &address, c->op_bytes); | ||
3096 | if (rc != X86EMUL_CONTINUE) | ||
3097 | goto done; | ||
3098 | realmode_lgdt(ctxt->vcpu, size, address); | ||
3099 | /* Disable writeback. */ | ||
3100 | c->dst.type = OP_NONE; | ||
3101 | break; | ||
3102 | case 3: /* lidt/vmmcall */ | ||
3103 | if (c->modrm_mod == 3) { | ||
3104 | switch (c->modrm_rm) { | ||
3105 | case 1: | ||
3106 | rc = kvm_fix_hypercall(ctxt->vcpu); | ||
3107 | if (rc != X86EMUL_CONTINUE) | ||
3108 | goto done; | ||
3109 | break; | ||
3110 | default: | ||
3111 | goto cannot_emulate; | ||
3112 | } | ||
3113 | } else { | ||
3114 | rc = read_descriptor(ctxt, ops, c->src.ptr, | ||
3115 | &size, &address, | ||
3116 | c->op_bytes); | ||
3117 | if (rc != X86EMUL_CONTINUE) | ||
3118 | goto done; | ||
3119 | realmode_lidt(ctxt->vcpu, size, address); | ||
3120 | } | ||
3121 | /* Disable writeback. */ | ||
3122 | c->dst.type = OP_NONE; | ||
3123 | break; | ||
3124 | case 4: /* smsw */ | ||
3125 | c->dst.bytes = 2; | ||
3126 | c->dst.val = ops->get_cr(0, ctxt->vcpu); | ||
3127 | break; | ||
3128 | case 6: /* lmsw */ | ||
3129 | ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0ful) | | ||
3130 | (c->src.val & 0x0f), ctxt->vcpu); | ||
3131 | c->dst.type = OP_NONE; | ||
3132 | break; | ||
3133 | case 5: /* not defined */ | ||
3134 | emulate_ud(ctxt); | ||
3135 | goto done; | ||
3136 | case 7: /* invlpg*/ | ||
3137 | emulate_invlpg(ctxt->vcpu, c->modrm_ea); | ||
3138 | /* Disable writeback. */ | ||
3139 | c->dst.type = OP_NONE; | ||
3140 | break; | ||
3141 | default: | ||
3142 | goto cannot_emulate; | ||
3143 | } | ||
3144 | break; | ||
3145 | case 0x05: /* syscall */ | 4163 | case 0x05: /* syscall */ |
3146 | rc = emulate_syscall(ctxt, ops); | 4164 | rc = emulate_syscall(ctxt, ops); |
3147 | if (rc != X86EMUL_CONTINUE) | ||
3148 | goto done; | ||
3149 | else | ||
3150 | goto writeback; | ||
3151 | break; | 4165 | break; |
3152 | case 0x06: | 4166 | case 0x06: |
3153 | emulate_clts(ctxt->vcpu); | 4167 | rc = em_clts(ctxt); |
3154 | c->dst.type = OP_NONE; | ||
3155 | break; | 4168 | break; |
3156 | case 0x09: /* wbinvd */ | 4169 | case 0x09: /* wbinvd */ |
3157 | kvm_emulate_wbinvd(ctxt->vcpu); | 4170 | (ctxt->ops->wbinvd)(ctxt); |
3158 | c->dst.type = OP_NONE; | ||
3159 | break; | 4171 | break; |
3160 | case 0x08: /* invd */ | 4172 | case 0x08: /* invd */ |
3161 | case 0x0d: /* GrpP (prefetch) */ | 4173 | case 0x0d: /* GrpP (prefetch) */ |
3162 | case 0x18: /* Grp16 (prefetch/nop) */ | 4174 | case 0x18: /* Grp16 (prefetch/nop) */ |
3163 | c->dst.type = OP_NONE; | ||
3164 | break; | 4175 | break; |
3165 | case 0x20: /* mov cr, reg */ | 4176 | case 0x20: /* mov cr, reg */ |
3166 | switch (c->modrm_reg) { | 4177 | c->dst.val = ops->get_cr(ctxt, c->modrm_reg); |
3167 | case 1: | ||
3168 | case 5 ... 7: | ||
3169 | case 9 ... 15: | ||
3170 | emulate_ud(ctxt); | ||
3171 | goto done; | ||
3172 | } | ||
3173 | c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu); | ||
3174 | c->dst.type = OP_NONE; /* no writeback */ | ||
3175 | break; | 4178 | break; |
3176 | case 0x21: /* mov from dr to reg */ | 4179 | case 0x21: /* mov from dr to reg */ |
3177 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && | 4180 | ops->get_dr(ctxt, c->modrm_reg, &c->dst.val); |
3178 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { | ||
3179 | emulate_ud(ctxt); | ||
3180 | goto done; | ||
3181 | } | ||
3182 | ops->get_dr(c->modrm_reg, &c->regs[c->modrm_rm], ctxt->vcpu); | ||
3183 | c->dst.type = OP_NONE; /* no writeback */ | ||
3184 | break; | 4181 | break; |
3185 | case 0x22: /* mov reg, cr */ | 4182 | case 0x22: /* mov reg, cr */ |
3186 | if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) { | 4183 | if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) { |
3187 | emulate_gp(ctxt, 0); | 4184 | emulate_gp(ctxt, 0); |
4185 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3188 | goto done; | 4186 | goto done; |
3189 | } | 4187 | } |
3190 | c->dst.type = OP_NONE; | 4188 | c->dst.type = OP_NONE; |
3191 | break; | 4189 | break; |
3192 | case 0x23: /* mov from reg to dr */ | 4190 | case 0x23: /* mov from reg to dr */ |
3193 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && | 4191 | if (ops->set_dr(ctxt, c->modrm_reg, c->src.val & |
3194 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { | ||
3195 | emulate_ud(ctxt); | ||
3196 | goto done; | ||
3197 | } | ||
3198 | |||
3199 | if (ops->set_dr(c->modrm_reg, c->regs[c->modrm_rm] & | ||
3200 | ((ctxt->mode == X86EMUL_MODE_PROT64) ? | 4192 | ((ctxt->mode == X86EMUL_MODE_PROT64) ? |
3201 | ~0ULL : ~0U), ctxt->vcpu) < 0) { | 4193 | ~0ULL : ~0U)) < 0) { |
3202 | /* #UD condition is already handled by the code above */ | 4194 | /* #UD condition is already handled by the code above */ |
3203 | emulate_gp(ctxt, 0); | 4195 | emulate_gp(ctxt, 0); |
4196 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3204 | goto done; | 4197 | goto done; |
3205 | } | 4198 | } |
3206 | 4199 | ||
@@ -3210,38 +4203,30 @@ twobyte_insn: | |||
3210 | /* wrmsr */ | 4203 | /* wrmsr */ |
3211 | msr_data = (u32)c->regs[VCPU_REGS_RAX] | 4204 | msr_data = (u32)c->regs[VCPU_REGS_RAX] |
3212 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); | 4205 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); |
3213 | if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { | 4206 | if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) { |
3214 | emulate_gp(ctxt, 0); | 4207 | emulate_gp(ctxt, 0); |
4208 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3215 | goto done; | 4209 | goto done; |
3216 | } | 4210 | } |
3217 | rc = X86EMUL_CONTINUE; | 4211 | rc = X86EMUL_CONTINUE; |
3218 | c->dst.type = OP_NONE; | ||
3219 | break; | 4212 | break; |
3220 | case 0x32: | 4213 | case 0x32: |
3221 | /* rdmsr */ | 4214 | /* rdmsr */ |
3222 | if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { | 4215 | if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) { |
3223 | emulate_gp(ctxt, 0); | 4216 | emulate_gp(ctxt, 0); |
4217 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3224 | goto done; | 4218 | goto done; |
3225 | } else { | 4219 | } else { |
3226 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; | 4220 | c->regs[VCPU_REGS_RAX] = (u32)msr_data; |
3227 | c->regs[VCPU_REGS_RDX] = msr_data >> 32; | 4221 | c->regs[VCPU_REGS_RDX] = msr_data >> 32; |
3228 | } | 4222 | } |
3229 | rc = X86EMUL_CONTINUE; | 4223 | rc = X86EMUL_CONTINUE; |
3230 | c->dst.type = OP_NONE; | ||
3231 | break; | 4224 | break; |
3232 | case 0x34: /* sysenter */ | 4225 | case 0x34: /* sysenter */ |
3233 | rc = emulate_sysenter(ctxt, ops); | 4226 | rc = emulate_sysenter(ctxt, ops); |
3234 | if (rc != X86EMUL_CONTINUE) | ||
3235 | goto done; | ||
3236 | else | ||
3237 | goto writeback; | ||
3238 | break; | 4227 | break; |
3239 | case 0x35: /* sysexit */ | 4228 | case 0x35: /* sysexit */ |
3240 | rc = emulate_sysexit(ctxt, ops); | 4229 | rc = emulate_sysexit(ctxt, ops); |
3241 | if (rc != X86EMUL_CONTINUE) | ||
3242 | goto done; | ||
3243 | else | ||
3244 | goto writeback; | ||
3245 | break; | 4230 | break; |
3246 | case 0x40 ... 0x4f: /* cmov */ | 4231 | case 0x40 ... 0x4f: /* cmov */ |
3247 | c->dst.val = c->dst.orig_val = c->src.val; | 4232 | c->dst.val = c->dst.orig_val = c->src.val; |
@@ -3251,15 +4236,15 @@ twobyte_insn: | |||
3251 | case 0x80 ... 0x8f: /* jnz rel, etc*/ | 4236 | case 0x80 ... 0x8f: /* jnz rel, etc*/ |
3252 | if (test_cc(c->b, ctxt->eflags)) | 4237 | if (test_cc(c->b, ctxt->eflags)) |
3253 | jmp_rel(c, c->src.val); | 4238 | jmp_rel(c, c->src.val); |
3254 | c->dst.type = OP_NONE; | 4239 | break; |
4240 | case 0x90 ... 0x9f: /* setcc r/m8 */ | ||
4241 | c->dst.val = test_cc(c->b, ctxt->eflags); | ||
3255 | break; | 4242 | break; |
3256 | case 0xa0: /* push fs */ | 4243 | case 0xa0: /* push fs */ |
3257 | emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); | 4244 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); |
3258 | break; | 4245 | break; |
3259 | case 0xa1: /* pop fs */ | 4246 | case 0xa1: /* pop fs */ |
3260 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); | 4247 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); |
3261 | if (rc != X86EMUL_CONTINUE) | ||
3262 | goto done; | ||
3263 | break; | 4248 | break; |
3264 | case 0xa3: | 4249 | case 0xa3: |
3265 | bt: /* bt */ | 4250 | bt: /* bt */ |
@@ -3273,17 +4258,13 @@ twobyte_insn: | |||
3273 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); | 4258 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); |
3274 | break; | 4259 | break; |
3275 | case 0xa8: /* push gs */ | 4260 | case 0xa8: /* push gs */ |
3276 | emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); | 4261 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); |
3277 | break; | 4262 | break; |
3278 | case 0xa9: /* pop gs */ | 4263 | case 0xa9: /* pop gs */ |
3279 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); | 4264 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); |
3280 | if (rc != X86EMUL_CONTINUE) | ||
3281 | goto done; | ||
3282 | break; | 4265 | break; |
3283 | case 0xab: | 4266 | case 0xab: |
3284 | bts: /* bts */ | 4267 | bts: /* bts */ |
3285 | /* only subword offset */ | ||
3286 | c->src.val &= (c->dst.bytes << 3) - 1; | ||
3287 | emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); | 4268 | emulate_2op_SrcV_nobyte("bts", c->src, c->dst, ctxt->eflags); |
3288 | break; | 4269 | break; |
3289 | case 0xac: /* shrd imm8, r, r/m */ | 4270 | case 0xac: /* shrd imm8, r, r/m */ |
@@ -3306,15 +4287,22 @@ twobyte_insn: | |||
3306 | } else { | 4287 | } else { |
3307 | /* Failure: write the value we saw to EAX. */ | 4288 | /* Failure: write the value we saw to EAX. */ |
3308 | c->dst.type = OP_REG; | 4289 | c->dst.type = OP_REG; |
3309 | c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; | 4290 | c->dst.addr.reg = (unsigned long *)&c->regs[VCPU_REGS_RAX]; |
3310 | } | 4291 | } |
3311 | break; | 4292 | break; |
4293 | case 0xb2: /* lss */ | ||
4294 | rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS); | ||
4295 | break; | ||
3312 | case 0xb3: | 4296 | case 0xb3: |
3313 | btr: /* btr */ | 4297 | btr: /* btr */ |
3314 | /* only subword offset */ | ||
3315 | c->src.val &= (c->dst.bytes << 3) - 1; | ||
3316 | emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags); | 4298 | emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags); |
3317 | break; | 4299 | break; |
4300 | case 0xb4: /* lfs */ | ||
4301 | rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS); | ||
4302 | break; | ||
4303 | case 0xb5: /* lgs */ | ||
4304 | rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS); | ||
4305 | break; | ||
3318 | case 0xb6 ... 0xb7: /* movzx */ | 4306 | case 0xb6 ... 0xb7: /* movzx */ |
3319 | c->dst.bytes = c->op_bytes; | 4307 | c->dst.bytes = c->op_bytes; |
3320 | c->dst.val = (c->d & ByteOp) ? (u8) c->src.val | 4308 | c->dst.val = (c->d & ByteOp) ? (u8) c->src.val |
@@ -3334,29 +4322,60 @@ twobyte_insn: | |||
3334 | break; | 4322 | break; |
3335 | case 0xbb: | 4323 | case 0xbb: |
3336 | btc: /* btc */ | 4324 | btc: /* btc */ |
3337 | /* only subword offset */ | ||
3338 | c->src.val &= (c->dst.bytes << 3) - 1; | ||
3339 | emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags); | 4325 | emulate_2op_SrcV_nobyte("btc", c->src, c->dst, ctxt->eflags); |
3340 | break; | 4326 | break; |
4327 | case 0xbc: { /* bsf */ | ||
4328 | u8 zf; | ||
4329 | __asm__ ("bsf %2, %0; setz %1" | ||
4330 | : "=r"(c->dst.val), "=q"(zf) | ||
4331 | : "r"(c->src.val)); | ||
4332 | ctxt->eflags &= ~X86_EFLAGS_ZF; | ||
4333 | if (zf) { | ||
4334 | ctxt->eflags |= X86_EFLAGS_ZF; | ||
4335 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
4336 | } | ||
4337 | break; | ||
4338 | } | ||
4339 | case 0xbd: { /* bsr */ | ||
4340 | u8 zf; | ||
4341 | __asm__ ("bsr %2, %0; setz %1" | ||
4342 | : "=r"(c->dst.val), "=q"(zf) | ||
4343 | : "r"(c->src.val)); | ||
4344 | ctxt->eflags &= ~X86_EFLAGS_ZF; | ||
4345 | if (zf) { | ||
4346 | ctxt->eflags |= X86_EFLAGS_ZF; | ||
4347 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
4348 | } | ||
4349 | break; | ||
4350 | } | ||
3341 | case 0xbe ... 0xbf: /* movsx */ | 4351 | case 0xbe ... 0xbf: /* movsx */ |
3342 | c->dst.bytes = c->op_bytes; | 4352 | c->dst.bytes = c->op_bytes; |
3343 | c->dst.val = (c->d & ByteOp) ? (s8) c->src.val : | 4353 | c->dst.val = (c->d & ByteOp) ? (s8) c->src.val : |
3344 | (s16) c->src.val; | 4354 | (s16) c->src.val; |
3345 | break; | 4355 | break; |
4356 | case 0xc0 ... 0xc1: /* xadd */ | ||
4357 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); | ||
4358 | /* Write back the register source. */ | ||
4359 | c->src.val = c->dst.orig_val; | ||
4360 | write_register_operand(&c->src); | ||
4361 | break; | ||
3346 | case 0xc3: /* movnti */ | 4362 | case 0xc3: /* movnti */ |
3347 | c->dst.bytes = c->op_bytes; | 4363 | c->dst.bytes = c->op_bytes; |
3348 | c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val : | 4364 | c->dst.val = (c->op_bytes == 4) ? (u32) c->src.val : |
3349 | (u64) c->src.val; | 4365 | (u64) c->src.val; |
3350 | break; | 4366 | break; |
3351 | case 0xc7: /* Grp9 (cmpxchg8b) */ | 4367 | case 0xc7: /* Grp9 (cmpxchg8b) */ |
3352 | rc = emulate_grp9(ctxt, ops); | 4368 | rc = em_grp9(ctxt); |
3353 | if (rc != X86EMUL_CONTINUE) | ||
3354 | goto done; | ||
3355 | break; | 4369 | break; |
4370 | default: | ||
4371 | goto cannot_emulate; | ||
3356 | } | 4372 | } |
4373 | |||
4374 | if (rc != X86EMUL_CONTINUE) | ||
4375 | goto done; | ||
4376 | |||
3357 | goto writeback; | 4377 | goto writeback; |
3358 | 4378 | ||
3359 | cannot_emulate: | 4379 | cannot_emulate: |
3360 | DPRINTF("Cannot emulate %02x\n", c->b); | 4380 | return EMULATION_FAILED; |
3361 | return -1; | ||
3362 | } | 4381 | } |