aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c273
1 files changed, 219 insertions, 54 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index f95d242ee9f7..97d9a9914ba8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -433,11 +433,32 @@ static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt,
433 return ctxt->ops->intercept(ctxt, &info, stage); 433 return ctxt->ops->intercept(ctxt, &info, stage);
434} 434}
435 435
436static void assign_masked(ulong *dest, ulong src, ulong mask)
437{
438 *dest = (*dest & ~mask) | (src & mask);
439}
440
436static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt) 441static inline unsigned long ad_mask(struct x86_emulate_ctxt *ctxt)
437{ 442{
438 return (1UL << (ctxt->ad_bytes << 3)) - 1; 443 return (1UL << (ctxt->ad_bytes << 3)) - 1;
439} 444}
440 445
446static ulong stack_mask(struct x86_emulate_ctxt *ctxt)
447{
448 u16 sel;
449 struct desc_struct ss;
450
451 if (ctxt->mode == X86EMUL_MODE_PROT64)
452 return ~0UL;
453 ctxt->ops->get_segment(ctxt, &sel, &ss, NULL, VCPU_SREG_SS);
454 return ~0U >> ((ss.d ^ 1) * 16); /* d=0: 0xffff; d=1: 0xffffffff */
455}
456
457static int stack_size(struct x86_emulate_ctxt *ctxt)
458{
459 return (__fls(stack_mask(ctxt)) + 1) >> 3;
460}
461
441/* Access/update address held in a register, based on addressing mode. */ 462/* Access/update address held in a register, based on addressing mode. */
442static inline unsigned long 463static inline unsigned long
443address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg) 464address_mask(struct x86_emulate_ctxt *ctxt, unsigned long reg)
@@ -958,6 +979,12 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
958 op->orig_val = op->val; 979 op->orig_val = op->val;
959} 980}
960 981
982static void adjust_modrm_seg(struct x86_emulate_ctxt *ctxt, int base_reg)
983{
984 if (base_reg == VCPU_REGS_RSP || base_reg == VCPU_REGS_RBP)
985 ctxt->modrm_seg = VCPU_SREG_SS;
986}
987
961static int decode_modrm(struct x86_emulate_ctxt *ctxt, 988static int decode_modrm(struct x86_emulate_ctxt *ctxt,
962 struct operand *op) 989 struct operand *op)
963{ 990{
@@ -1061,15 +1088,20 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
1061 1088
1062 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0) 1089 if ((base_reg & 7) == 5 && ctxt->modrm_mod == 0)
1063 modrm_ea += insn_fetch(s32, ctxt); 1090 modrm_ea += insn_fetch(s32, ctxt);
1064 else 1091 else {
1065 modrm_ea += ctxt->regs[base_reg]; 1092 modrm_ea += ctxt->regs[base_reg];
1093 adjust_modrm_seg(ctxt, base_reg);
1094 }
1066 if (index_reg != 4) 1095 if (index_reg != 4)
1067 modrm_ea += ctxt->regs[index_reg] << scale; 1096 modrm_ea += ctxt->regs[index_reg] << scale;
1068 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) { 1097 } else if ((ctxt->modrm_rm & 7) == 5 && ctxt->modrm_mod == 0) {
1069 if (ctxt->mode == X86EMUL_MODE_PROT64) 1098 if (ctxt->mode == X86EMUL_MODE_PROT64)
1070 ctxt->rip_relative = 1; 1099 ctxt->rip_relative = 1;
1071 } else 1100 } else {
1072 modrm_ea += ctxt->regs[ctxt->modrm_rm]; 1101 base_reg = ctxt->modrm_rm;
1102 modrm_ea += ctxt->regs[base_reg];
1103 adjust_modrm_seg(ctxt, base_reg);
1104 }
1073 switch (ctxt->modrm_mod) { 1105 switch (ctxt->modrm_mod) {
1074 case 0: 1106 case 0:
1075 if (ctxt->modrm_rm == 5) 1107 if (ctxt->modrm_rm == 5)
@@ -1264,7 +1296,8 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1264 1296
1265/* allowed just for 8 bytes segments */ 1297/* allowed just for 8 bytes segments */
1266static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1298static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1267 u16 selector, struct desc_struct *desc) 1299 u16 selector, struct desc_struct *desc,
1300 ulong *desc_addr_p)
1268{ 1301{
1269 struct desc_ptr dt; 1302 struct desc_ptr dt;
1270 u16 index = selector >> 3; 1303 u16 index = selector >> 3;
@@ -1275,7 +1308,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1275 if (dt.size < index * 8 + 7) 1308 if (dt.size < index * 8 + 7)
1276 return emulate_gp(ctxt, selector & 0xfffc); 1309 return emulate_gp(ctxt, selector & 0xfffc);
1277 1310
1278 addr = dt.address + index * 8; 1311 *desc_addr_p = addr = dt.address + index * 8;
1279 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc, 1312 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1280 &ctxt->exception); 1313 &ctxt->exception);
1281} 1314}
@@ -1302,11 +1335,12 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1302static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1335static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1303 u16 selector, int seg) 1336 u16 selector, int seg)
1304{ 1337{
1305 struct desc_struct seg_desc; 1338 struct desc_struct seg_desc, old_desc;
1306 u8 dpl, rpl, cpl; 1339 u8 dpl, rpl, cpl;
1307 unsigned err_vec = GP_VECTOR; 1340 unsigned err_vec = GP_VECTOR;
1308 u32 err_code = 0; 1341 u32 err_code = 0;
1309 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1342 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
1343 ulong desc_addr;
1310 int ret; 1344 int ret;
1311 1345
1312 memset(&seg_desc, 0, sizeof seg_desc); 1346 memset(&seg_desc, 0, sizeof seg_desc);
@@ -1324,8 +1358,14 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1324 goto load; 1358 goto load;
1325 } 1359 }
1326 1360
1327 /* NULL selector is not valid for TR, CS and SS */ 1361 rpl = selector & 3;
1328 if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR) 1362 cpl = ctxt->ops->cpl(ctxt);
1363
1364 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1365 if ((seg == VCPU_SREG_CS
1366 || (seg == VCPU_SREG_SS
1367 && (ctxt->mode != X86EMUL_MODE_PROT64 || rpl != cpl))
1368 || seg == VCPU_SREG_TR)
1329 && null_selector) 1369 && null_selector)
1330 goto exception; 1370 goto exception;
1331 1371
@@ -1336,7 +1376,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1336 if (null_selector) /* for NULL selector skip all following checks */ 1376 if (null_selector) /* for NULL selector skip all following checks */
1337 goto load; 1377 goto load;
1338 1378
1339 ret = read_segment_descriptor(ctxt, selector, &seg_desc); 1379 ret = read_segment_descriptor(ctxt, selector, &seg_desc, &desc_addr);
1340 if (ret != X86EMUL_CONTINUE) 1380 if (ret != X86EMUL_CONTINUE)
1341 return ret; 1381 return ret;
1342 1382
@@ -1352,9 +1392,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1352 goto exception; 1392 goto exception;
1353 } 1393 }
1354 1394
1355 rpl = selector & 3;
1356 dpl = seg_desc.dpl; 1395 dpl = seg_desc.dpl;
1357 cpl = ctxt->ops->cpl(ctxt);
1358 1396
1359 switch (seg) { 1397 switch (seg) {
1360 case VCPU_SREG_SS: 1398 case VCPU_SREG_SS:
@@ -1384,6 +1422,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1384 case VCPU_SREG_TR: 1422 case VCPU_SREG_TR:
1385 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) 1423 if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9))
1386 goto exception; 1424 goto exception;
1425 old_desc = seg_desc;
1426 seg_desc.type |= 2; /* busy */
1427 ret = ctxt->ops->cmpxchg_emulated(ctxt, desc_addr, &old_desc, &seg_desc,
1428 sizeof(seg_desc), &ctxt->exception);
1429 if (ret != X86EMUL_CONTINUE)
1430 return ret;
1387 break; 1431 break;
1388 case VCPU_SREG_LDTR: 1432 case VCPU_SREG_LDTR:
1389 if (seg_desc.s || seg_desc.type != 2) 1433 if (seg_desc.s || seg_desc.type != 2)
@@ -1474,17 +1518,22 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
1474 return X86EMUL_CONTINUE; 1518 return X86EMUL_CONTINUE;
1475} 1519}
1476 1520
1477static int em_push(struct x86_emulate_ctxt *ctxt) 1521static int push(struct x86_emulate_ctxt *ctxt, void *data, int bytes)
1478{ 1522{
1479 struct segmented_address addr; 1523 struct segmented_address addr;
1480 1524
1481 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -ctxt->op_bytes); 1525 register_address_increment(ctxt, &ctxt->regs[VCPU_REGS_RSP], -bytes);
1482 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]); 1526 addr.ea = register_address(ctxt, ctxt->regs[VCPU_REGS_RSP]);
1483 addr.seg = VCPU_SREG_SS; 1527 addr.seg = VCPU_SREG_SS;
1484 1528
1529 return segmented_write(ctxt, addr, data, bytes);
1530}
1531
1532static int em_push(struct x86_emulate_ctxt *ctxt)
1533{
1485 /* Disable writeback. */ 1534 /* Disable writeback. */
1486 ctxt->dst.type = OP_NONE; 1535 ctxt->dst.type = OP_NONE;
1487 return segmented_write(ctxt, addr, &ctxt->src.val, ctxt->op_bytes); 1536 return push(ctxt, &ctxt->src.val, ctxt->op_bytes);
1488} 1537}
1489 1538
1490static int emulate_pop(struct x86_emulate_ctxt *ctxt, 1539static int emulate_pop(struct x86_emulate_ctxt *ctxt,
@@ -1556,6 +1605,33 @@ static int em_popf(struct x86_emulate_ctxt *ctxt)
1556 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes); 1605 return emulate_popf(ctxt, &ctxt->dst.val, ctxt->op_bytes);
1557} 1606}
1558 1607
1608static int em_enter(struct x86_emulate_ctxt *ctxt)
1609{
1610 int rc;
1611 unsigned frame_size = ctxt->src.val;
1612 unsigned nesting_level = ctxt->src2.val & 31;
1613
1614 if (nesting_level)
1615 return X86EMUL_UNHANDLEABLE;
1616
1617 rc = push(ctxt, &ctxt->regs[VCPU_REGS_RBP], stack_size(ctxt));
1618 if (rc != X86EMUL_CONTINUE)
1619 return rc;
1620 assign_masked(&ctxt->regs[VCPU_REGS_RBP], ctxt->regs[VCPU_REGS_RSP],
1621 stack_mask(ctxt));
1622 assign_masked(&ctxt->regs[VCPU_REGS_RSP],
1623 ctxt->regs[VCPU_REGS_RSP] - frame_size,
1624 stack_mask(ctxt));
1625 return X86EMUL_CONTINUE;
1626}
1627
1628static int em_leave(struct x86_emulate_ctxt *ctxt)
1629{
1630 assign_masked(&ctxt->regs[VCPU_REGS_RSP], ctxt->regs[VCPU_REGS_RBP],
1631 stack_mask(ctxt));
1632 return emulate_pop(ctxt, &ctxt->regs[VCPU_REGS_RBP], ctxt->op_bytes);
1633}
1634
1559static int em_push_sreg(struct x86_emulate_ctxt *ctxt) 1635static int em_push_sreg(struct x86_emulate_ctxt *ctxt)
1560{ 1636{
1561 int seg = ctxt->src2.val; 1637 int seg = ctxt->src2.val;
@@ -1993,8 +2069,8 @@ static bool vendor_intel(struct x86_emulate_ctxt *ctxt)
1993 u32 eax, ebx, ecx, edx; 2069 u32 eax, ebx, ecx, edx;
1994 2070
1995 eax = ecx = 0; 2071 eax = ecx = 0;
1996 return ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx) 2072 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
1997 && ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx 2073 return ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx
1998 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx 2074 && ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx
1999 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx; 2075 && edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx;
2000} 2076}
@@ -2013,32 +2089,31 @@ static bool em_syscall_is_enabled(struct x86_emulate_ctxt *ctxt)
2013 2089
2014 eax = 0x00000000; 2090 eax = 0x00000000;
2015 ecx = 0x00000000; 2091 ecx = 0x00000000;
2016 if (ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx)) { 2092 ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
2017 /* 2093 /*
2018 * Intel ("GenuineIntel") 2094 * Intel ("GenuineIntel")
2019 * remark: Intel CPUs only support "syscall" in 64bit 2095 * remark: Intel CPUs only support "syscall" in 64bit
2020 * longmode. Also an 64bit guest with a 2096 * longmode. Also an 64bit guest with a
2021 * 32bit compat-app running will #UD !! While this 2097 * 32bit compat-app running will #UD !! While this
2022 * behaviour can be fixed (by emulating) into AMD 2098 * behaviour can be fixed (by emulating) into AMD
2023 * response - CPUs of AMD can't behave like Intel. 2099 * response - CPUs of AMD can't behave like Intel.
2024 */ 2100 */
2025 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx && 2101 if (ebx == X86EMUL_CPUID_VENDOR_GenuineIntel_ebx &&
2026 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx && 2102 ecx == X86EMUL_CPUID_VENDOR_GenuineIntel_ecx &&
2027 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx) 2103 edx == X86EMUL_CPUID_VENDOR_GenuineIntel_edx)
2028 return false; 2104 return false;
2029 2105
2030 /* AMD ("AuthenticAMD") */ 2106 /* AMD ("AuthenticAMD") */
2031 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx && 2107 if (ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx &&
2032 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx && 2108 ecx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ecx &&
2033 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx) 2109 edx == X86EMUL_CPUID_VENDOR_AuthenticAMD_edx)
2034 return true; 2110 return true;
2035 2111
2036 /* AMD ("AMDisbetter!") */ 2112 /* AMD ("AMDisbetter!") */
2037 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx && 2113 if (ebx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ebx &&
2038 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx && 2114 ecx == X86EMUL_CPUID_VENDOR_AMDisbetterI_ecx &&
2039 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx) 2115 edx == X86EMUL_CPUID_VENDOR_AMDisbetterI_edx)
2040 return true; 2116 return true;
2041 }
2042 2117
2043 /* default: (not Intel, not AMD), apply Intel's stricter rules... */ 2118 /* default: (not Intel, not AMD), apply Intel's stricter rules... */
2044 return false; 2119 return false;
@@ -2547,13 +2622,14 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2547 ulong old_tss_base = 2622 ulong old_tss_base =
2548 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 2623 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2549 u32 desc_limit; 2624 u32 desc_limit;
2625 ulong desc_addr;
2550 2626
2551 /* FIXME: old_tss_base == ~0 ? */ 2627 /* FIXME: old_tss_base == ~0 ? */
2552 2628
2553 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc); 2629 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc, &desc_addr);
2554 if (ret != X86EMUL_CONTINUE) 2630 if (ret != X86EMUL_CONTINUE)
2555 return ret; 2631 return ret;
2556 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc); 2632 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc, &desc_addr);
2557 if (ret != X86EMUL_CONTINUE) 2633 if (ret != X86EMUL_CONTINUE)
2558 return ret; 2634 return ret;
2559 2635
@@ -2948,6 +3024,24 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2948 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 3024 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2949} 3025}
2950 3026
3027static int em_lldt(struct x86_emulate_ctxt *ctxt)
3028{
3029 u16 sel = ctxt->src.val;
3030
3031 /* Disable writeback. */
3032 ctxt->dst.type = OP_NONE;
3033 return load_segment_descriptor(ctxt, sel, VCPU_SREG_LDTR);
3034}
3035
3036static int em_ltr(struct x86_emulate_ctxt *ctxt)
3037{
3038 u16 sel = ctxt->src.val;
3039
3040 /* Disable writeback. */
3041 ctxt->dst.type = OP_NONE;
3042 return load_segment_descriptor(ctxt, sel, VCPU_SREG_TR);
3043}
3044
2951static int em_invlpg(struct x86_emulate_ctxt *ctxt) 3045static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2952{ 3046{
2953 int rc; 3047 int rc;
@@ -2989,11 +3083,42 @@ static int em_vmcall(struct x86_emulate_ctxt *ctxt)
2989 return X86EMUL_CONTINUE; 3083 return X86EMUL_CONTINUE;
2990} 3084}
2991 3085
3086static int emulate_store_desc_ptr(struct x86_emulate_ctxt *ctxt,
3087 void (*get)(struct x86_emulate_ctxt *ctxt,
3088 struct desc_ptr *ptr))
3089{
3090 struct desc_ptr desc_ptr;
3091
3092 if (ctxt->mode == X86EMUL_MODE_PROT64)
3093 ctxt->op_bytes = 8;
3094 get(ctxt, &desc_ptr);
3095 if (ctxt->op_bytes == 2) {
3096 ctxt->op_bytes = 4;
3097 desc_ptr.address &= 0x00ffffff;
3098 }
3099 /* Disable writeback. */
3100 ctxt->dst.type = OP_NONE;
3101 return segmented_write(ctxt, ctxt->dst.addr.mem,
3102 &desc_ptr, 2 + ctxt->op_bytes);
3103}
3104
3105static int em_sgdt(struct x86_emulate_ctxt *ctxt)
3106{
3107 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_gdt);
3108}
3109
3110static int em_sidt(struct x86_emulate_ctxt *ctxt)
3111{
3112 return emulate_store_desc_ptr(ctxt, ctxt->ops->get_idt);
3113}
3114
2992static int em_lgdt(struct x86_emulate_ctxt *ctxt) 3115static int em_lgdt(struct x86_emulate_ctxt *ctxt)
2993{ 3116{
2994 struct desc_ptr desc_ptr; 3117 struct desc_ptr desc_ptr;
2995 int rc; 3118 int rc;
2996 3119
3120 if (ctxt->mode == X86EMUL_MODE_PROT64)
3121 ctxt->op_bytes = 8;
2997 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3122 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
2998 &desc_ptr.size, &desc_ptr.address, 3123 &desc_ptr.size, &desc_ptr.address,
2999 ctxt->op_bytes); 3124 ctxt->op_bytes);
@@ -3021,6 +3146,8 @@ static int em_lidt(struct x86_emulate_ctxt *ctxt)
3021 struct desc_ptr desc_ptr; 3146 struct desc_ptr desc_ptr;
3022 int rc; 3147 int rc;
3023 3148
3149 if (ctxt->mode == X86EMUL_MODE_PROT64)
3150 ctxt->op_bytes = 8;
3024 rc = read_descriptor(ctxt, ctxt->src.addr.mem, 3151 rc = read_descriptor(ctxt, ctxt->src.addr.mem,
3025 &desc_ptr.size, &desc_ptr.address, 3152 &desc_ptr.size, &desc_ptr.address,
3026 ctxt->op_bytes); 3153 ctxt->op_bytes);
@@ -3143,6 +3270,42 @@ static int em_bsr(struct x86_emulate_ctxt *ctxt)
3143 return X86EMUL_CONTINUE; 3270 return X86EMUL_CONTINUE;
3144} 3271}
3145 3272
3273static int em_cpuid(struct x86_emulate_ctxt *ctxt)
3274{
3275 u32 eax, ebx, ecx, edx;
3276
3277 eax = ctxt->regs[VCPU_REGS_RAX];
3278 ecx = ctxt->regs[VCPU_REGS_RCX];
3279 ctxt->ops->get_cpuid(ctxt, &eax, &ebx, &ecx, &edx);
3280 ctxt->regs[VCPU_REGS_RAX] = eax;
3281 ctxt->regs[VCPU_REGS_RBX] = ebx;
3282 ctxt->regs[VCPU_REGS_RCX] = ecx;
3283 ctxt->regs[VCPU_REGS_RDX] = edx;
3284 return X86EMUL_CONTINUE;
3285}
3286
3287static int em_lahf(struct x86_emulate_ctxt *ctxt)
3288{
3289 ctxt->regs[VCPU_REGS_RAX] &= ~0xff00UL;
3290 ctxt->regs[VCPU_REGS_RAX] |= (ctxt->eflags & 0xff) << 8;
3291 return X86EMUL_CONTINUE;
3292}
3293
3294static int em_bswap(struct x86_emulate_ctxt *ctxt)
3295{
3296 switch (ctxt->op_bytes) {
3297#ifdef CONFIG_X86_64
3298 case 8:
3299 asm("bswap %0" : "+r"(ctxt->dst.val));
3300 break;
3301#endif
3302 default:
3303 asm("bswap %0" : "+r"(*(u32 *)&ctxt->dst.val));
3304 break;
3305 }
3306 return X86EMUL_CONTINUE;
3307}
3308
3146static bool valid_cr(int nr) 3309static bool valid_cr(int nr)
3147{ 3310{
3148 switch (nr) { 3311 switch (nr) {
@@ -3424,14 +3587,14 @@ static struct opcode group5[] = {
3424static struct opcode group6[] = { 3587static struct opcode group6[] = {
3425 DI(Prot, sldt), 3588 DI(Prot, sldt),
3426 DI(Prot, str), 3589 DI(Prot, str),
3427 DI(Prot | Priv, lldt), 3590 II(Prot | Priv | SrcMem16, em_lldt, lldt),
3428 DI(Prot | Priv, ltr), 3591 II(Prot | Priv | SrcMem16, em_ltr, ltr),
3429 N, N, N, N, 3592 N, N, N, N,
3430}; 3593};
3431 3594
3432static struct group_dual group7 = { { 3595static struct group_dual group7 = { {
3433 DI(Mov | DstMem | Priv, sgdt), 3596 II(Mov | DstMem | Priv, em_sgdt, sgdt),
3434 DI(Mov | DstMem | Priv, sidt), 3597 II(Mov | DstMem | Priv, em_sidt, sidt),
3435 II(SrcMem | Priv, em_lgdt, lgdt), 3598 II(SrcMem | Priv, em_lgdt, lgdt),
3436 II(SrcMem | Priv, em_lidt, lidt), 3599 II(SrcMem | Priv, em_lidt, lidt),
3437 II(SrcNone | DstMem | Mov, em_smsw, smsw), N, 3600 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
@@ -3538,7 +3701,7 @@ static struct opcode opcode_table[256] = {
3538 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), 3701 D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd),
3539 I(SrcImmFAddr | No64, em_call_far), N, 3702 I(SrcImmFAddr | No64, em_call_far), N,
3540 II(ImplicitOps | Stack, em_pushf, pushf), 3703 II(ImplicitOps | Stack, em_pushf, pushf),
3541 II(ImplicitOps | Stack, em_popf, popf), N, N, 3704 II(ImplicitOps | Stack, em_popf, popf), N, I(ImplicitOps, em_lahf),
3542 /* 0xA0 - 0xA7 */ 3705 /* 0xA0 - 0xA7 */
3543 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), 3706 I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov),
3544 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov), 3707 I2bv(DstMem | SrcAcc | Mov | MemAbs | PageTable, em_mov),
@@ -3561,7 +3724,8 @@ static struct opcode opcode_table[256] = {
3561 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg), 3724 I(DstReg | SrcMemFAddr | ModRM | No64 | Src2DS, em_lseg),
3562 G(ByteOp, group11), G(0, group11), 3725 G(ByteOp, group11), G(0, group11),
3563 /* 0xC8 - 0xCF */ 3726 /* 0xC8 - 0xCF */
3564 N, N, N, I(ImplicitOps | Stack, em_ret_far), 3727 I(Stack | SrcImmU16 | Src2ImmByte, em_enter), I(Stack, em_leave),
3728 N, I(ImplicitOps | Stack, em_ret_far),
3565 D(ImplicitOps), DI(SrcImmByte, intn), 3729 D(ImplicitOps), DI(SrcImmByte, intn),
3566 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret), 3730 D(ImplicitOps | No64), II(ImplicitOps, em_iret, iret),
3567 /* 0xD0 - 0xD7 */ 3731 /* 0xD0 - 0xD7 */
@@ -3635,7 +3799,7 @@ static struct opcode twobyte_table[256] = {
3635 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), 3799 X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)),
3636 /* 0xA0 - 0xA7 */ 3800 /* 0xA0 - 0xA7 */
3637 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg), 3801 I(Stack | Src2FS, em_push_sreg), I(Stack | Src2FS, em_pop_sreg),
3638 DI(ImplicitOps, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt), 3802 II(ImplicitOps, em_cpuid, cpuid), I(DstMem | SrcReg | ModRM | BitOp, em_bt),
3639 D(DstMem | SrcReg | Src2ImmByte | ModRM), 3803 D(DstMem | SrcReg | Src2ImmByte | ModRM),
3640 D(DstMem | SrcReg | Src2CL | ModRM), N, N, 3804 D(DstMem | SrcReg | Src2CL | ModRM), N, N,
3641 /* 0xA8 - 0xAF */ 3805 /* 0xA8 - 0xAF */
@@ -3658,11 +3822,12 @@ static struct opcode twobyte_table[256] = {
3658 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc), 3822 I(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_btc),
3659 I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr), 3823 I(DstReg | SrcMem | ModRM, em_bsf), I(DstReg | SrcMem | ModRM, em_bsr),
3660 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov), 3824 D(DstReg | SrcMem8 | ModRM | Mov), D(DstReg | SrcMem16 | ModRM | Mov),
3661 /* 0xC0 - 0xCF */ 3825 /* 0xC0 - 0xC7 */
3662 D2bv(DstMem | SrcReg | ModRM | Lock), 3826 D2bv(DstMem | SrcReg | ModRM | Lock),
3663 N, D(DstMem | SrcReg | ModRM | Mov), 3827 N, D(DstMem | SrcReg | ModRM | Mov),
3664 N, N, N, GD(0, &group9), 3828 N, N, N, GD(0, &group9),
3665 N, N, N, N, N, N, N, N, 3829 /* 0xC8 - 0xCF */
3830 X8(I(DstReg, em_bswap)),
3666 /* 0xD0 - 0xDF */ 3831 /* 0xD0 - 0xDF */
3667 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, 3832 N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N,
3668 /* 0xE0 - 0xEF */ 3833 /* 0xE0 - 0xEF */
@@ -4426,12 +4591,12 @@ twobyte_insn:
4426 break; 4591 break;
4427 case 0xb6 ... 0xb7: /* movzx */ 4592 case 0xb6 ... 0xb7: /* movzx */
4428 ctxt->dst.bytes = ctxt->op_bytes; 4593 ctxt->dst.bytes = ctxt->op_bytes;
4429 ctxt->dst.val = (ctxt->d & ByteOp) ? (u8) ctxt->src.val 4594 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
4430 : (u16) ctxt->src.val; 4595 : (u16) ctxt->src.val;
4431 break; 4596 break;
4432 case 0xbe ... 0xbf: /* movsx */ 4597 case 0xbe ... 0xbf: /* movsx */
4433 ctxt->dst.bytes = ctxt->op_bytes; 4598 ctxt->dst.bytes = ctxt->op_bytes;
4434 ctxt->dst.val = (ctxt->d & ByteOp) ? (s8) ctxt->src.val : 4599 ctxt->dst.val = (ctxt->src.bytes == 1) ? (s8) ctxt->src.val :
4435 (s16) ctxt->src.val; 4600 (s16) ctxt->src.val;
4436 break; 4601 break;
4437 case 0xc0 ... 0xc1: /* xadd */ 4602 case 0xc0 ... 0xc1: /* xadd */