diff options
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 1754 |
1 files changed, 1189 insertions, 565 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 0ad47b819a8b..d6e2477feb18 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -73,9 +73,14 @@ | |||
73 | #define MemAbs (1<<11) /* Memory operand is absolute displacement */ | 73 | #define MemAbs (1<<11) /* Memory operand is absolute displacement */ |
74 | #define String (1<<12) /* String instruction (rep capable) */ | 74 | #define String (1<<12) /* String instruction (rep capable) */ |
75 | #define Stack (1<<13) /* Stack instruction (push/pop) */ | 75 | #define Stack (1<<13) /* Stack instruction (push/pop) */ |
76 | #define GroupMask (7<<14) /* Opcode uses one of the group mechanisms */ | ||
76 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ | 77 | #define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ |
77 | #define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ | 78 | #define GroupDual (2<<14) /* Alternate decoding of mod == 3 */ |
79 | #define Prefix (3<<14) /* Instruction varies with 66/f2/f3 prefix */ | ||
80 | #define RMExt (4<<14) /* Opcode extension in ModRM r/m if mod == 3 */ | ||
81 | #define Sse (1<<17) /* SSE Vector instruction */ | ||
78 | /* Misc flags */ | 82 | /* Misc flags */ |
83 | #define Prot (1<<21) /* instruction generates #UD if not in prot-mode */ | ||
79 | #define VendorSpecific (1<<22) /* Vendor specific instruction */ | 84 | #define VendorSpecific (1<<22) /* Vendor specific instruction */ |
80 | #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ | 85 | #define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ |
81 | #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ | 86 | #define Op3264 (1<<24) /* Operand is 64b in long mode, 32b otherwise */ |
@@ -102,11 +107,14 @@ | |||
102 | 107 | ||
103 | struct opcode { | 108 | struct opcode { |
104 | u32 flags; | 109 | u32 flags; |
110 | u8 intercept; | ||
105 | union { | 111 | union { |
106 | int (*execute)(struct x86_emulate_ctxt *ctxt); | 112 | int (*execute)(struct x86_emulate_ctxt *ctxt); |
107 | struct opcode *group; | 113 | struct opcode *group; |
108 | struct group_dual *gdual; | 114 | struct group_dual *gdual; |
115 | struct gprefix *gprefix; | ||
109 | } u; | 116 | } u; |
117 | int (*check_perm)(struct x86_emulate_ctxt *ctxt); | ||
110 | }; | 118 | }; |
111 | 119 | ||
112 | struct group_dual { | 120 | struct group_dual { |
@@ -114,6 +122,13 @@ struct group_dual { | |||
114 | struct opcode mod3[8]; | 122 | struct opcode mod3[8]; |
115 | }; | 123 | }; |
116 | 124 | ||
125 | struct gprefix { | ||
126 | struct opcode pfx_no; | ||
127 | struct opcode pfx_66; | ||
128 | struct opcode pfx_f2; | ||
129 | struct opcode pfx_f3; | ||
130 | }; | ||
131 | |||
117 | /* EFLAGS bit definitions. */ | 132 | /* EFLAGS bit definitions. */ |
118 | #define EFLG_ID (1<<21) | 133 | #define EFLG_ID (1<<21) |
119 | #define EFLG_VIP (1<<20) | 134 | #define EFLG_VIP (1<<20) |
@@ -248,42 +263,42 @@ struct group_dual { | |||
248 | "w", "r", _LO32, "r", "", "r") | 263 | "w", "r", _LO32, "r", "", "r") |
249 | 264 | ||
250 | /* Instruction has three operands and one operand is stored in ECX register */ | 265 | /* Instruction has three operands and one operand is stored in ECX register */ |
251 | #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \ | 266 | #define __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, _suffix, _type) \ |
252 | do { \ | 267 | do { \ |
253 | unsigned long _tmp; \ | 268 | unsigned long _tmp; \ |
254 | _type _clv = (_cl).val; \ | 269 | _type _clv = (_cl).val; \ |
255 | _type _srcv = (_src).val; \ | 270 | _type _srcv = (_src).val; \ |
256 | _type _dstv = (_dst).val; \ | 271 | _type _dstv = (_dst).val; \ |
257 | \ | 272 | \ |
258 | __asm__ __volatile__ ( \ | 273 | __asm__ __volatile__ ( \ |
259 | _PRE_EFLAGS("0", "5", "2") \ | 274 | _PRE_EFLAGS("0", "5", "2") \ |
260 | _op _suffix " %4,%1 \n" \ | 275 | _op _suffix " %4,%1 \n" \ |
261 | _POST_EFLAGS("0", "5", "2") \ | 276 | _POST_EFLAGS("0", "5", "2") \ |
262 | : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \ | 277 | : "=m" (_eflags), "+r" (_dstv), "=&r" (_tmp) \ |
263 | : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \ | 278 | : "c" (_clv) , "r" (_srcv), "i" (EFLAGS_MASK) \ |
264 | ); \ | 279 | ); \ |
265 | \ | 280 | \ |
266 | (_cl).val = (unsigned long) _clv; \ | 281 | (_cl).val = (unsigned long) _clv; \ |
267 | (_src).val = (unsigned long) _srcv; \ | 282 | (_src).val = (unsigned long) _srcv; \ |
268 | (_dst).val = (unsigned long) _dstv; \ | 283 | (_dst).val = (unsigned long) _dstv; \ |
269 | } while (0) | 284 | } while (0) |
270 | 285 | ||
271 | #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \ | 286 | #define emulate_2op_cl(_op, _cl, _src, _dst, _eflags) \ |
272 | do { \ | 287 | do { \ |
273 | switch ((_dst).bytes) { \ | 288 | switch ((_dst).bytes) { \ |
274 | case 2: \ | 289 | case 2: \ |
275 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ | 290 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
276 | "w", unsigned short); \ | 291 | "w", unsigned short); \ |
277 | break; \ | 292 | break; \ |
278 | case 4: \ | 293 | case 4: \ |
279 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ | 294 | __emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
280 | "l", unsigned int); \ | 295 | "l", unsigned int); \ |
281 | break; \ | 296 | break; \ |
282 | case 8: \ | 297 | case 8: \ |
283 | ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ | 298 | ON64(__emulate_2op_cl(_op, _cl, _src, _dst, _eflags, \ |
284 | "q", unsigned long)); \ | 299 | "q", unsigned long)); \ |
285 | break; \ | 300 | break; \ |
286 | } \ | 301 | } \ |
287 | } while (0) | 302 | } while (0) |
288 | 303 | ||
289 | #define __emulate_1op(_op, _dst, _eflags, _suffix) \ | 304 | #define __emulate_1op(_op, _dst, _eflags, _suffix) \ |
@@ -346,13 +361,25 @@ struct group_dual { | |||
346 | } while (0) | 361 | } while (0) |
347 | 362 | ||
348 | /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ | 363 | /* instruction has only one source operand, destination is implicit (e.g. mul, div, imul, idiv) */ |
349 | #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \ | 364 | #define emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags) \ |
350 | do { \ | 365 | do { \ |
351 | switch((_src).bytes) { \ | 366 | switch((_src).bytes) { \ |
352 | case 1: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "b"); break; \ | 367 | case 1: \ |
353 | case 2: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "w"); break; \ | 368 | __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ |
354 | case 4: __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "l"); break; \ | 369 | _eflags, "b"); \ |
355 | case 8: ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, _eflags, "q")); break; \ | 370 | break; \ |
371 | case 2: \ | ||
372 | __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ | ||
373 | _eflags, "w"); \ | ||
374 | break; \ | ||
375 | case 4: \ | ||
376 | __emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ | ||
377 | _eflags, "l"); \ | ||
378 | break; \ | ||
379 | case 8: \ | ||
380 | ON64(__emulate_1op_rax_rdx(_op, _src, _rax, _rdx, \ | ||
381 | _eflags, "q")); \ | ||
382 | break; \ | ||
356 | } \ | 383 | } \ |
357 | } while (0) | 384 | } while (0) |
358 | 385 | ||
@@ -388,13 +415,33 @@ struct group_dual { | |||
388 | (_type)_x; \ | 415 | (_type)_x; \ |
389 | }) | 416 | }) |
390 | 417 | ||
391 | #define insn_fetch_arr(_arr, _size, _eip) \ | 418 | #define insn_fetch_arr(_arr, _size, _eip) \ |
392 | ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \ | 419 | ({ rc = do_insn_fetch(ctxt, ops, (_eip), _arr, (_size)); \ |
393 | if (rc != X86EMUL_CONTINUE) \ | 420 | if (rc != X86EMUL_CONTINUE) \ |
394 | goto done; \ | 421 | goto done; \ |
395 | (_eip) += (_size); \ | 422 | (_eip) += (_size); \ |
396 | }) | 423 | }) |
397 | 424 | ||
425 | static int emulator_check_intercept(struct x86_emulate_ctxt *ctxt, | ||
426 | enum x86_intercept intercept, | ||
427 | enum x86_intercept_stage stage) | ||
428 | { | ||
429 | struct x86_instruction_info info = { | ||
430 | .intercept = intercept, | ||
431 | .rep_prefix = ctxt->decode.rep_prefix, | ||
432 | .modrm_mod = ctxt->decode.modrm_mod, | ||
433 | .modrm_reg = ctxt->decode.modrm_reg, | ||
434 | .modrm_rm = ctxt->decode.modrm_rm, | ||
435 | .src_val = ctxt->decode.src.val64, | ||
436 | .src_bytes = ctxt->decode.src.bytes, | ||
437 | .dst_bytes = ctxt->decode.dst.bytes, | ||
438 | .ad_bytes = ctxt->decode.ad_bytes, | ||
439 | .next_rip = ctxt->eip, | ||
440 | }; | ||
441 | |||
442 | return ctxt->ops->intercept(ctxt, &info, stage); | ||
443 | } | ||
444 | |||
398 | static inline unsigned long ad_mask(struct decode_cache *c) | 445 | static inline unsigned long ad_mask(struct decode_cache *c) |
399 | { | 446 | { |
400 | return (1UL << (c->ad_bytes << 3)) - 1; | 447 | return (1UL << (c->ad_bytes << 3)) - 1; |
@@ -430,6 +477,13 @@ static inline void jmp_rel(struct decode_cache *c, int rel) | |||
430 | register_address_increment(c, &c->eip, rel); | 477 | register_address_increment(c, &c->eip, rel); |
431 | } | 478 | } |
432 | 479 | ||
480 | static u32 desc_limit_scaled(struct desc_struct *desc) | ||
481 | { | ||
482 | u32 limit = get_desc_limit(desc); | ||
483 | |||
484 | return desc->g ? (limit << 12) | 0xfff : limit; | ||
485 | } | ||
486 | |||
433 | static void set_seg_override(struct decode_cache *c, int seg) | 487 | static void set_seg_override(struct decode_cache *c, int seg) |
434 | { | 488 | { |
435 | c->has_seg_override = true; | 489 | c->has_seg_override = true; |
@@ -442,11 +496,10 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, | |||
442 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) | 496 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) |
443 | return 0; | 497 | return 0; |
444 | 498 | ||
445 | return ops->get_cached_segment_base(seg, ctxt->vcpu); | 499 | return ops->get_cached_segment_base(ctxt, seg); |
446 | } | 500 | } |
447 | 501 | ||
448 | static unsigned seg_override(struct x86_emulate_ctxt *ctxt, | 502 | static unsigned seg_override(struct x86_emulate_ctxt *ctxt, |
449 | struct x86_emulate_ops *ops, | ||
450 | struct decode_cache *c) | 503 | struct decode_cache *c) |
451 | { | 504 | { |
452 | if (!c->has_seg_override) | 505 | if (!c->has_seg_override) |
@@ -455,18 +508,6 @@ static unsigned seg_override(struct x86_emulate_ctxt *ctxt, | |||
455 | return c->seg_override; | 508 | return c->seg_override; |
456 | } | 509 | } |
457 | 510 | ||
458 | static ulong linear(struct x86_emulate_ctxt *ctxt, | ||
459 | struct segmented_address addr) | ||
460 | { | ||
461 | struct decode_cache *c = &ctxt->decode; | ||
462 | ulong la; | ||
463 | |||
464 | la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; | ||
465 | if (c->ad_bytes != 8) | ||
466 | la &= (u32)-1; | ||
467 | return la; | ||
468 | } | ||
469 | |||
470 | static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, | 511 | static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, |
471 | u32 error, bool valid) | 512 | u32 error, bool valid) |
472 | { | 513 | { |
@@ -476,11 +517,21 @@ static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, | |||
476 | return X86EMUL_PROPAGATE_FAULT; | 517 | return X86EMUL_PROPAGATE_FAULT; |
477 | } | 518 | } |
478 | 519 | ||
520 | static int emulate_db(struct x86_emulate_ctxt *ctxt) | ||
521 | { | ||
522 | return emulate_exception(ctxt, DB_VECTOR, 0, false); | ||
523 | } | ||
524 | |||
479 | static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) | 525 | static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err) |
480 | { | 526 | { |
481 | return emulate_exception(ctxt, GP_VECTOR, err, true); | 527 | return emulate_exception(ctxt, GP_VECTOR, err, true); |
482 | } | 528 | } |
483 | 529 | ||
530 | static int emulate_ss(struct x86_emulate_ctxt *ctxt, int err) | ||
531 | { | ||
532 | return emulate_exception(ctxt, SS_VECTOR, err, true); | ||
533 | } | ||
534 | |||
484 | static int emulate_ud(struct x86_emulate_ctxt *ctxt) | 535 | static int emulate_ud(struct x86_emulate_ctxt *ctxt) |
485 | { | 536 | { |
486 | return emulate_exception(ctxt, UD_VECTOR, 0, false); | 537 | return emulate_exception(ctxt, UD_VECTOR, 0, false); |
@@ -496,6 +547,128 @@ static int emulate_de(struct x86_emulate_ctxt *ctxt) | |||
496 | return emulate_exception(ctxt, DE_VECTOR, 0, false); | 547 | return emulate_exception(ctxt, DE_VECTOR, 0, false); |
497 | } | 548 | } |
498 | 549 | ||
550 | static int emulate_nm(struct x86_emulate_ctxt *ctxt) | ||
551 | { | ||
552 | return emulate_exception(ctxt, NM_VECTOR, 0, false); | ||
553 | } | ||
554 | |||
555 | static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) | ||
556 | { | ||
557 | u16 selector; | ||
558 | struct desc_struct desc; | ||
559 | |||
560 | ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg); | ||
561 | return selector; | ||
562 | } | ||
563 | |||
564 | static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector, | ||
565 | unsigned seg) | ||
566 | { | ||
567 | u16 dummy; | ||
568 | u32 base3; | ||
569 | struct desc_struct desc; | ||
570 | |||
571 | ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg); | ||
572 | ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); | ||
573 | } | ||
574 | |||
575 | static int __linearize(struct x86_emulate_ctxt *ctxt, | ||
576 | struct segmented_address addr, | ||
577 | unsigned size, bool write, bool fetch, | ||
578 | ulong *linear) | ||
579 | { | ||
580 | struct decode_cache *c = &ctxt->decode; | ||
581 | struct desc_struct desc; | ||
582 | bool usable; | ||
583 | ulong la; | ||
584 | u32 lim; | ||
585 | u16 sel; | ||
586 | unsigned cpl, rpl; | ||
587 | |||
588 | la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; | ||
589 | switch (ctxt->mode) { | ||
590 | case X86EMUL_MODE_REAL: | ||
591 | break; | ||
592 | case X86EMUL_MODE_PROT64: | ||
593 | if (((signed long)la << 16) >> 16 != la) | ||
594 | return emulate_gp(ctxt, 0); | ||
595 | break; | ||
596 | default: | ||
597 | usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, | ||
598 | addr.seg); | ||
599 | if (!usable) | ||
600 | goto bad; | ||
601 | /* code segment or read-only data segment */ | ||
602 | if (((desc.type & 8) || !(desc.type & 2)) && write) | ||
603 | goto bad; | ||
604 | /* unreadable code segment */ | ||
605 | if (!fetch && (desc.type & 8) && !(desc.type & 2)) | ||
606 | goto bad; | ||
607 | lim = desc_limit_scaled(&desc); | ||
608 | if ((desc.type & 8) || !(desc.type & 4)) { | ||
609 | /* expand-up segment */ | ||
610 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | ||
611 | goto bad; | ||
612 | } else { | ||
613 | /* exapand-down segment */ | ||
614 | if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) | ||
615 | goto bad; | ||
616 | lim = desc.d ? 0xffffffff : 0xffff; | ||
617 | if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) | ||
618 | goto bad; | ||
619 | } | ||
620 | cpl = ctxt->ops->cpl(ctxt); | ||
621 | rpl = sel & 3; | ||
622 | cpl = max(cpl, rpl); | ||
623 | if (!(desc.type & 8)) { | ||
624 | /* data segment */ | ||
625 | if (cpl > desc.dpl) | ||
626 | goto bad; | ||
627 | } else if ((desc.type & 8) && !(desc.type & 4)) { | ||
628 | /* nonconforming code segment */ | ||
629 | if (cpl != desc.dpl) | ||
630 | goto bad; | ||
631 | } else if ((desc.type & 8) && (desc.type & 4)) { | ||
632 | /* conforming code segment */ | ||
633 | if (cpl < desc.dpl) | ||
634 | goto bad; | ||
635 | } | ||
636 | break; | ||
637 | } | ||
638 | if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8) | ||
639 | la &= (u32)-1; | ||
640 | *linear = la; | ||
641 | return X86EMUL_CONTINUE; | ||
642 | bad: | ||
643 | if (addr.seg == VCPU_SREG_SS) | ||
644 | return emulate_ss(ctxt, addr.seg); | ||
645 | else | ||
646 | return emulate_gp(ctxt, addr.seg); | ||
647 | } | ||
648 | |||
649 | static int linearize(struct x86_emulate_ctxt *ctxt, | ||
650 | struct segmented_address addr, | ||
651 | unsigned size, bool write, | ||
652 | ulong *linear) | ||
653 | { | ||
654 | return __linearize(ctxt, addr, size, write, false, linear); | ||
655 | } | ||
656 | |||
657 | |||
658 | static int segmented_read_std(struct x86_emulate_ctxt *ctxt, | ||
659 | struct segmented_address addr, | ||
660 | void *data, | ||
661 | unsigned size) | ||
662 | { | ||
663 | int rc; | ||
664 | ulong linear; | ||
665 | |||
666 | rc = linearize(ctxt, addr, size, false, &linear); | ||
667 | if (rc != X86EMUL_CONTINUE) | ||
668 | return rc; | ||
669 | return ctxt->ops->read_std(ctxt, linear, data, size, &ctxt->exception); | ||
670 | } | ||
671 | |||
499 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, | 672 | static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, |
500 | struct x86_emulate_ops *ops, | 673 | struct x86_emulate_ops *ops, |
501 | unsigned long eip, u8 *dest) | 674 | unsigned long eip, u8 *dest) |
@@ -505,10 +678,15 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, | |||
505 | int size, cur_size; | 678 | int size, cur_size; |
506 | 679 | ||
507 | if (eip == fc->end) { | 680 | if (eip == fc->end) { |
681 | unsigned long linear; | ||
682 | struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip}; | ||
508 | cur_size = fc->end - fc->start; | 683 | cur_size = fc->end - fc->start; |
509 | size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); | 684 | size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); |
510 | rc = ops->fetch(ctxt->cs_base + eip, fc->data + cur_size, | 685 | rc = __linearize(ctxt, addr, size, false, true, &linear); |
511 | size, ctxt->vcpu, &ctxt->exception); | 686 | if (rc != X86EMUL_CONTINUE) |
687 | return rc; | ||
688 | rc = ops->fetch(ctxt, linear, fc->data + cur_size, | ||
689 | size, &ctxt->exception); | ||
512 | if (rc != X86EMUL_CONTINUE) | 690 | if (rc != X86EMUL_CONTINUE) |
513 | return rc; | 691 | return rc; |
514 | fc->end += size; | 692 | fc->end += size; |
@@ -551,7 +729,6 @@ static void *decode_register(u8 modrm_reg, unsigned long *regs, | |||
551 | } | 729 | } |
552 | 730 | ||
553 | static int read_descriptor(struct x86_emulate_ctxt *ctxt, | 731 | static int read_descriptor(struct x86_emulate_ctxt *ctxt, |
554 | struct x86_emulate_ops *ops, | ||
555 | struct segmented_address addr, | 732 | struct segmented_address addr, |
556 | u16 *size, unsigned long *address, int op_bytes) | 733 | u16 *size, unsigned long *address, int op_bytes) |
557 | { | 734 | { |
@@ -560,13 +737,11 @@ static int read_descriptor(struct x86_emulate_ctxt *ctxt, | |||
560 | if (op_bytes == 2) | 737 | if (op_bytes == 2) |
561 | op_bytes = 3; | 738 | op_bytes = 3; |
562 | *address = 0; | 739 | *address = 0; |
563 | rc = ops->read_std(linear(ctxt, addr), (unsigned long *)size, 2, | 740 | rc = segmented_read_std(ctxt, addr, size, 2); |
564 | ctxt->vcpu, &ctxt->exception); | ||
565 | if (rc != X86EMUL_CONTINUE) | 741 | if (rc != X86EMUL_CONTINUE) |
566 | return rc; | 742 | return rc; |
567 | addr.ea += 2; | 743 | addr.ea += 2; |
568 | rc = ops->read_std(linear(ctxt, addr), address, op_bytes, | 744 | rc = segmented_read_std(ctxt, addr, address, op_bytes); |
569 | ctxt->vcpu, &ctxt->exception); | ||
570 | return rc; | 745 | return rc; |
571 | } | 746 | } |
572 | 747 | ||
@@ -623,7 +798,63 @@ static void fetch_register_operand(struct operand *op) | |||
623 | } | 798 | } |
624 | } | 799 | } |
625 | 800 | ||
626 | static void decode_register_operand(struct operand *op, | 801 | static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg) |
802 | { | ||
803 | ctxt->ops->get_fpu(ctxt); | ||
804 | switch (reg) { | ||
805 | case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break; | ||
806 | case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break; | ||
807 | case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break; | ||
808 | case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break; | ||
809 | case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break; | ||
810 | case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break; | ||
811 | case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break; | ||
812 | case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break; | ||
813 | #ifdef CONFIG_X86_64 | ||
814 | case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break; | ||
815 | case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break; | ||
816 | case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break; | ||
817 | case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break; | ||
818 | case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break; | ||
819 | case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break; | ||
820 | case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break; | ||
821 | case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break; | ||
822 | #endif | ||
823 | default: BUG(); | ||
824 | } | ||
825 | ctxt->ops->put_fpu(ctxt); | ||
826 | } | ||
827 | |||
828 | static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, | ||
829 | int reg) | ||
830 | { | ||
831 | ctxt->ops->get_fpu(ctxt); | ||
832 | switch (reg) { | ||
833 | case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break; | ||
834 | case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break; | ||
835 | case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break; | ||
836 | case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break; | ||
837 | case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break; | ||
838 | case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break; | ||
839 | case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break; | ||
840 | case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break; | ||
841 | #ifdef CONFIG_X86_64 | ||
842 | case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break; | ||
843 | case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break; | ||
844 | case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break; | ||
845 | case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break; | ||
846 | case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break; | ||
847 | case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break; | ||
848 | case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break; | ||
849 | case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break; | ||
850 | #endif | ||
851 | default: BUG(); | ||
852 | } | ||
853 | ctxt->ops->put_fpu(ctxt); | ||
854 | } | ||
855 | |||
856 | static void decode_register_operand(struct x86_emulate_ctxt *ctxt, | ||
857 | struct operand *op, | ||
627 | struct decode_cache *c, | 858 | struct decode_cache *c, |
628 | int inhibit_bytereg) | 859 | int inhibit_bytereg) |
629 | { | 860 | { |
@@ -632,6 +863,15 @@ static void decode_register_operand(struct operand *op, | |||
632 | 863 | ||
633 | if (!(c->d & ModRM)) | 864 | if (!(c->d & ModRM)) |
634 | reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); | 865 | reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); |
866 | |||
867 | if (c->d & Sse) { | ||
868 | op->type = OP_XMM; | ||
869 | op->bytes = 16; | ||
870 | op->addr.xmm = reg; | ||
871 | read_sse_reg(ctxt, &op->vec_val, reg); | ||
872 | return; | ||
873 | } | ||
874 | |||
635 | op->type = OP_REG; | 875 | op->type = OP_REG; |
636 | if ((c->d & ByteOp) && !inhibit_bytereg) { | 876 | if ((c->d & ByteOp) && !inhibit_bytereg) { |
637 | op->addr.reg = decode_register(reg, c->regs, highbyte_regs); | 877 | op->addr.reg = decode_register(reg, c->regs, highbyte_regs); |
@@ -671,6 +911,13 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt, | |||
671 | op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 911 | op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
672 | op->addr.reg = decode_register(c->modrm_rm, | 912 | op->addr.reg = decode_register(c->modrm_rm, |
673 | c->regs, c->d & ByteOp); | 913 | c->regs, c->d & ByteOp); |
914 | if (c->d & Sse) { | ||
915 | op->type = OP_XMM; | ||
916 | op->bytes = 16; | ||
917 | op->addr.xmm = c->modrm_rm; | ||
918 | read_sse_reg(ctxt, &op->vec_val, c->modrm_rm); | ||
919 | return rc; | ||
920 | } | ||
674 | fetch_register_operand(op); | 921 | fetch_register_operand(op); |
675 | return rc; | 922 | return rc; |
676 | } | 923 | } |
@@ -819,8 +1066,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, | |||
819 | if (mc->pos < mc->end) | 1066 | if (mc->pos < mc->end) |
820 | goto read_cached; | 1067 | goto read_cached; |
821 | 1068 | ||
822 | rc = ops->read_emulated(addr, mc->data + mc->end, n, | 1069 | rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n, |
823 | &ctxt->exception, ctxt->vcpu); | 1070 | &ctxt->exception); |
824 | if (rc != X86EMUL_CONTINUE) | 1071 | if (rc != X86EMUL_CONTINUE) |
825 | return rc; | 1072 | return rc; |
826 | mc->end += n; | 1073 | mc->end += n; |
@@ -834,6 +1081,50 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt, | |||
834 | return X86EMUL_CONTINUE; | 1081 | return X86EMUL_CONTINUE; |
835 | } | 1082 | } |
836 | 1083 | ||
1084 | static int segmented_read(struct x86_emulate_ctxt *ctxt, | ||
1085 | struct segmented_address addr, | ||
1086 | void *data, | ||
1087 | unsigned size) | ||
1088 | { | ||
1089 | int rc; | ||
1090 | ulong linear; | ||
1091 | |||
1092 | rc = linearize(ctxt, addr, size, false, &linear); | ||
1093 | if (rc != X86EMUL_CONTINUE) | ||
1094 | return rc; | ||
1095 | return read_emulated(ctxt, ctxt->ops, linear, data, size); | ||
1096 | } | ||
1097 | |||
1098 | static int segmented_write(struct x86_emulate_ctxt *ctxt, | ||
1099 | struct segmented_address addr, | ||
1100 | const void *data, | ||
1101 | unsigned size) | ||
1102 | { | ||
1103 | int rc; | ||
1104 | ulong linear; | ||
1105 | |||
1106 | rc = linearize(ctxt, addr, size, true, &linear); | ||
1107 | if (rc != X86EMUL_CONTINUE) | ||
1108 | return rc; | ||
1109 | return ctxt->ops->write_emulated(ctxt, linear, data, size, | ||
1110 | &ctxt->exception); | ||
1111 | } | ||
1112 | |||
1113 | static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt, | ||
1114 | struct segmented_address addr, | ||
1115 | const void *orig_data, const void *data, | ||
1116 | unsigned size) | ||
1117 | { | ||
1118 | int rc; | ||
1119 | ulong linear; | ||
1120 | |||
1121 | rc = linearize(ctxt, addr, size, true, &linear); | ||
1122 | if (rc != X86EMUL_CONTINUE) | ||
1123 | return rc; | ||
1124 | return ctxt->ops->cmpxchg_emulated(ctxt, linear, orig_data, data, | ||
1125 | size, &ctxt->exception); | ||
1126 | } | ||
1127 | |||
837 | static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | 1128 | static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, |
838 | struct x86_emulate_ops *ops, | 1129 | struct x86_emulate_ops *ops, |
839 | unsigned int size, unsigned short port, | 1130 | unsigned int size, unsigned short port, |
@@ -854,7 +1145,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
854 | if (n == 0) | 1145 | if (n == 0) |
855 | n = 1; | 1146 | n = 1; |
856 | rc->pos = rc->end = 0; | 1147 | rc->pos = rc->end = 0; |
857 | if (!ops->pio_in_emulated(size, port, rc->data, n, ctxt->vcpu)) | 1148 | if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n)) |
858 | return 0; | 1149 | return 0; |
859 | rc->end = n * size; | 1150 | rc->end = n * size; |
860 | } | 1151 | } |
@@ -864,28 +1155,22 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, | |||
864 | return 1; | 1155 | return 1; |
865 | } | 1156 | } |
866 | 1157 | ||
867 | static u32 desc_limit_scaled(struct desc_struct *desc) | ||
868 | { | ||
869 | u32 limit = get_desc_limit(desc); | ||
870 | |||
871 | return desc->g ? (limit << 12) | 0xfff : limit; | ||
872 | } | ||
873 | |||
874 | static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, | 1158 | static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, |
875 | struct x86_emulate_ops *ops, | 1159 | struct x86_emulate_ops *ops, |
876 | u16 selector, struct desc_ptr *dt) | 1160 | u16 selector, struct desc_ptr *dt) |
877 | { | 1161 | { |
878 | if (selector & 1 << 2) { | 1162 | if (selector & 1 << 2) { |
879 | struct desc_struct desc; | 1163 | struct desc_struct desc; |
1164 | u16 sel; | ||
1165 | |||
880 | memset (dt, 0, sizeof *dt); | 1166 | memset (dt, 0, sizeof *dt); |
881 | if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR, | 1167 | if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR)) |
882 | ctxt->vcpu)) | ||
883 | return; | 1168 | return; |
884 | 1169 | ||
885 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ | 1170 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ |
886 | dt->address = get_desc_base(&desc); | 1171 | dt->address = get_desc_base(&desc); |
887 | } else | 1172 | } else |
888 | ops->get_gdt(dt, ctxt->vcpu); | 1173 | ops->get_gdt(ctxt, dt); |
889 | } | 1174 | } |
890 | 1175 | ||
891 | /* allowed just for 8 bytes segments */ | 1176 | /* allowed just for 8 bytes segments */ |
@@ -903,8 +1188,7 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
903 | if (dt.size < index * 8 + 7) | 1188 | if (dt.size < index * 8 + 7) |
904 | return emulate_gp(ctxt, selector & 0xfffc); | 1189 | return emulate_gp(ctxt, selector & 0xfffc); |
905 | addr = dt.address + index * 8; | 1190 | addr = dt.address + index * 8; |
906 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, | 1191 | ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); |
907 | &ctxt->exception); | ||
908 | 1192 | ||
909 | return ret; | 1193 | return ret; |
910 | } | 1194 | } |
@@ -925,8 +1209,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
925 | return emulate_gp(ctxt, selector & 0xfffc); | 1209 | return emulate_gp(ctxt, selector & 0xfffc); |
926 | 1210 | ||
927 | addr = dt.address + index * 8; | 1211 | addr = dt.address + index * 8; |
928 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, | 1212 | ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); |
929 | &ctxt->exception); | ||
930 | 1213 | ||
931 | return ret; | 1214 | return ret; |
932 | } | 1215 | } |
@@ -986,7 +1269,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
986 | 1269 | ||
987 | rpl = selector & 3; | 1270 | rpl = selector & 3; |
988 | dpl = seg_desc.dpl; | 1271 | dpl = seg_desc.dpl; |
989 | cpl = ops->cpl(ctxt->vcpu); | 1272 | cpl = ops->cpl(ctxt); |
990 | 1273 | ||
991 | switch (seg) { | 1274 | switch (seg) { |
992 | case VCPU_SREG_SS: | 1275 | case VCPU_SREG_SS: |
@@ -1042,8 +1325,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1042 | return ret; | 1325 | return ret; |
1043 | } | 1326 | } |
1044 | load: | 1327 | load: |
1045 | ops->set_segment_selector(selector, seg, ctxt->vcpu); | 1328 | ops->set_segment(ctxt, selector, &seg_desc, 0, seg); |
1046 | ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu); | ||
1047 | return X86EMUL_CONTINUE; | 1329 | return X86EMUL_CONTINUE; |
1048 | exception: | 1330 | exception: |
1049 | emulate_exception(ctxt, err_vec, err_code, true); | 1331 | emulate_exception(ctxt, err_vec, err_code, true); |
@@ -1069,8 +1351,7 @@ static void write_register_operand(struct operand *op) | |||
1069 | } | 1351 | } |
1070 | } | 1352 | } |
1071 | 1353 | ||
1072 | static inline int writeback(struct x86_emulate_ctxt *ctxt, | 1354 | static int writeback(struct x86_emulate_ctxt *ctxt) |
1073 | struct x86_emulate_ops *ops) | ||
1074 | { | 1355 | { |
1075 | int rc; | 1356 | int rc; |
1076 | struct decode_cache *c = &ctxt->decode; | 1357 | struct decode_cache *c = &ctxt->decode; |
@@ -1081,23 +1362,22 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, | |||
1081 | break; | 1362 | break; |
1082 | case OP_MEM: | 1363 | case OP_MEM: |
1083 | if (c->lock_prefix) | 1364 | if (c->lock_prefix) |
1084 | rc = ops->cmpxchg_emulated( | 1365 | rc = segmented_cmpxchg(ctxt, |
1085 | linear(ctxt, c->dst.addr.mem), | 1366 | c->dst.addr.mem, |
1086 | &c->dst.orig_val, | 1367 | &c->dst.orig_val, |
1087 | &c->dst.val, | 1368 | &c->dst.val, |
1088 | c->dst.bytes, | 1369 | c->dst.bytes); |
1089 | &ctxt->exception, | ||
1090 | ctxt->vcpu); | ||
1091 | else | 1370 | else |
1092 | rc = ops->write_emulated( | 1371 | rc = segmented_write(ctxt, |
1093 | linear(ctxt, c->dst.addr.mem), | 1372 | c->dst.addr.mem, |
1094 | &c->dst.val, | 1373 | &c->dst.val, |
1095 | c->dst.bytes, | 1374 | c->dst.bytes); |
1096 | &ctxt->exception, | ||
1097 | ctxt->vcpu); | ||
1098 | if (rc != X86EMUL_CONTINUE) | 1375 | if (rc != X86EMUL_CONTINUE) |
1099 | return rc; | 1376 | return rc; |
1100 | break; | 1377 | break; |
1378 | case OP_XMM: | ||
1379 | write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm); | ||
1380 | break; | ||
1101 | case OP_NONE: | 1381 | case OP_NONE: |
1102 | /* no writeback */ | 1382 | /* no writeback */ |
1103 | break; | 1383 | break; |
@@ -1107,21 +1387,21 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt, | |||
1107 | return X86EMUL_CONTINUE; | 1387 | return X86EMUL_CONTINUE; |
1108 | } | 1388 | } |
1109 | 1389 | ||
1110 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt, | 1390 | static int em_push(struct x86_emulate_ctxt *ctxt) |
1111 | struct x86_emulate_ops *ops) | ||
1112 | { | 1391 | { |
1113 | struct decode_cache *c = &ctxt->decode; | 1392 | struct decode_cache *c = &ctxt->decode; |
1393 | struct segmented_address addr; | ||
1114 | 1394 | ||
1115 | c->dst.type = OP_MEM; | ||
1116 | c->dst.bytes = c->op_bytes; | ||
1117 | c->dst.val = c->src.val; | ||
1118 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); | 1395 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); |
1119 | c->dst.addr.mem.ea = register_address(c, c->regs[VCPU_REGS_RSP]); | 1396 | addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); |
1120 | c->dst.addr.mem.seg = VCPU_SREG_SS; | 1397 | addr.seg = VCPU_SREG_SS; |
1398 | |||
1399 | /* Disable writeback. */ | ||
1400 | c->dst.type = OP_NONE; | ||
1401 | return segmented_write(ctxt, addr, &c->src.val, c->op_bytes); | ||
1121 | } | 1402 | } |
1122 | 1403 | ||
1123 | static int emulate_pop(struct x86_emulate_ctxt *ctxt, | 1404 | static int emulate_pop(struct x86_emulate_ctxt *ctxt, |
1124 | struct x86_emulate_ops *ops, | ||
1125 | void *dest, int len) | 1405 | void *dest, int len) |
1126 | { | 1406 | { |
1127 | struct decode_cache *c = &ctxt->decode; | 1407 | struct decode_cache *c = &ctxt->decode; |
@@ -1130,7 +1410,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, | |||
1130 | 1410 | ||
1131 | addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); | 1411 | addr.ea = register_address(c, c->regs[VCPU_REGS_RSP]); |
1132 | addr.seg = VCPU_SREG_SS; | 1412 | addr.seg = VCPU_SREG_SS; |
1133 | rc = read_emulated(ctxt, ops, linear(ctxt, addr), dest, len); | 1413 | rc = segmented_read(ctxt, addr, dest, len); |
1134 | if (rc != X86EMUL_CONTINUE) | 1414 | if (rc != X86EMUL_CONTINUE) |
1135 | return rc; | 1415 | return rc; |
1136 | 1416 | ||
@@ -1138,6 +1418,13 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt, | |||
1138 | return rc; | 1418 | return rc; |
1139 | } | 1419 | } |
1140 | 1420 | ||
1421 | static int em_pop(struct x86_emulate_ctxt *ctxt) | ||
1422 | { | ||
1423 | struct decode_cache *c = &ctxt->decode; | ||
1424 | |||
1425 | return emulate_pop(ctxt, &c->dst.val, c->op_bytes); | ||
1426 | } | ||
1427 | |||
1141 | static int emulate_popf(struct x86_emulate_ctxt *ctxt, | 1428 | static int emulate_popf(struct x86_emulate_ctxt *ctxt, |
1142 | struct x86_emulate_ops *ops, | 1429 | struct x86_emulate_ops *ops, |
1143 | void *dest, int len) | 1430 | void *dest, int len) |
@@ -1145,9 +1432,9 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1145 | int rc; | 1432 | int rc; |
1146 | unsigned long val, change_mask; | 1433 | unsigned long val, change_mask; |
1147 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 1434 | int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
1148 | int cpl = ops->cpl(ctxt->vcpu); | 1435 | int cpl = ops->cpl(ctxt); |
1149 | 1436 | ||
1150 | rc = emulate_pop(ctxt, ops, &val, len); | 1437 | rc = emulate_pop(ctxt, &val, len); |
1151 | if (rc != X86EMUL_CONTINUE) | 1438 | if (rc != X86EMUL_CONTINUE) |
1152 | return rc; | 1439 | return rc; |
1153 | 1440 | ||
@@ -1179,14 +1466,24 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt, | |||
1179 | return rc; | 1466 | return rc; |
1180 | } | 1467 | } |
1181 | 1468 | ||
1182 | static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, | 1469 | static int em_popf(struct x86_emulate_ctxt *ctxt) |
1183 | struct x86_emulate_ops *ops, int seg) | ||
1184 | { | 1470 | { |
1185 | struct decode_cache *c = &ctxt->decode; | 1471 | struct decode_cache *c = &ctxt->decode; |
1186 | 1472 | ||
1187 | c->src.val = ops->get_segment_selector(seg, ctxt->vcpu); | 1473 | c->dst.type = OP_REG; |
1474 | c->dst.addr.reg = &ctxt->eflags; | ||
1475 | c->dst.bytes = c->op_bytes; | ||
1476 | return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes); | ||
1477 | } | ||
1188 | 1478 | ||
1189 | emulate_push(ctxt, ops); | 1479 | static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, |
1480 | struct x86_emulate_ops *ops, int seg) | ||
1481 | { | ||
1482 | struct decode_cache *c = &ctxt->decode; | ||
1483 | |||
1484 | c->src.val = get_segment_selector(ctxt, seg); | ||
1485 | |||
1486 | return em_push(ctxt); | ||
1190 | } | 1487 | } |
1191 | 1488 | ||
1192 | static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | 1489 | static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, |
@@ -1196,7 +1493,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | |||
1196 | unsigned long selector; | 1493 | unsigned long selector; |
1197 | int rc; | 1494 | int rc; |
1198 | 1495 | ||
1199 | rc = emulate_pop(ctxt, ops, &selector, c->op_bytes); | 1496 | rc = emulate_pop(ctxt, &selector, c->op_bytes); |
1200 | if (rc != X86EMUL_CONTINUE) | 1497 | if (rc != X86EMUL_CONTINUE) |
1201 | return rc; | 1498 | return rc; |
1202 | 1499 | ||
@@ -1204,8 +1501,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, | |||
1204 | return rc; | 1501 | return rc; |
1205 | } | 1502 | } |
1206 | 1503 | ||
1207 | static int emulate_pusha(struct x86_emulate_ctxt *ctxt, | 1504 | static int em_pusha(struct x86_emulate_ctxt *ctxt) |
1208 | struct x86_emulate_ops *ops) | ||
1209 | { | 1505 | { |
1210 | struct decode_cache *c = &ctxt->decode; | 1506 | struct decode_cache *c = &ctxt->decode; |
1211 | unsigned long old_esp = c->regs[VCPU_REGS_RSP]; | 1507 | unsigned long old_esp = c->regs[VCPU_REGS_RSP]; |
@@ -1216,23 +1512,25 @@ static int emulate_pusha(struct x86_emulate_ctxt *ctxt, | |||
1216 | (reg == VCPU_REGS_RSP) ? | 1512 | (reg == VCPU_REGS_RSP) ? |
1217 | (c->src.val = old_esp) : (c->src.val = c->regs[reg]); | 1513 | (c->src.val = old_esp) : (c->src.val = c->regs[reg]); |
1218 | 1514 | ||
1219 | emulate_push(ctxt, ops); | 1515 | rc = em_push(ctxt); |
1220 | |||
1221 | rc = writeback(ctxt, ops); | ||
1222 | if (rc != X86EMUL_CONTINUE) | 1516 | if (rc != X86EMUL_CONTINUE) |
1223 | return rc; | 1517 | return rc; |
1224 | 1518 | ||
1225 | ++reg; | 1519 | ++reg; |
1226 | } | 1520 | } |
1227 | 1521 | ||
1228 | /* Disable writeback. */ | ||
1229 | c->dst.type = OP_NONE; | ||
1230 | |||
1231 | return rc; | 1522 | return rc; |
1232 | } | 1523 | } |
1233 | 1524 | ||
1234 | static int emulate_popa(struct x86_emulate_ctxt *ctxt, | 1525 | static int em_pushf(struct x86_emulate_ctxt *ctxt) |
1235 | struct x86_emulate_ops *ops) | 1526 | { |
1527 | struct decode_cache *c = &ctxt->decode; | ||
1528 | |||
1529 | c->src.val = (unsigned long)ctxt->eflags; | ||
1530 | return em_push(ctxt); | ||
1531 | } | ||
1532 | |||
1533 | static int em_popa(struct x86_emulate_ctxt *ctxt) | ||
1236 | { | 1534 | { |
1237 | struct decode_cache *c = &ctxt->decode; | 1535 | struct decode_cache *c = &ctxt->decode; |
1238 | int rc = X86EMUL_CONTINUE; | 1536 | int rc = X86EMUL_CONTINUE; |
@@ -1245,7 +1543,7 @@ static int emulate_popa(struct x86_emulate_ctxt *ctxt, | |||
1245 | --reg; | 1543 | --reg; |
1246 | } | 1544 | } |
1247 | 1545 | ||
1248 | rc = emulate_pop(ctxt, ops, &c->regs[reg], c->op_bytes); | 1546 | rc = emulate_pop(ctxt, &c->regs[reg], c->op_bytes); |
1249 | if (rc != X86EMUL_CONTINUE) | 1547 | if (rc != X86EMUL_CONTINUE) |
1250 | break; | 1548 | break; |
1251 | --reg; | 1549 | --reg; |
@@ -1265,37 +1563,32 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, | |||
1265 | 1563 | ||
1266 | /* TODO: Add limit checks */ | 1564 | /* TODO: Add limit checks */ |
1267 | c->src.val = ctxt->eflags; | 1565 | c->src.val = ctxt->eflags; |
1268 | emulate_push(ctxt, ops); | 1566 | rc = em_push(ctxt); |
1269 | rc = writeback(ctxt, ops); | ||
1270 | if (rc != X86EMUL_CONTINUE) | 1567 | if (rc != X86EMUL_CONTINUE) |
1271 | return rc; | 1568 | return rc; |
1272 | 1569 | ||
1273 | ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); | 1570 | ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); |
1274 | 1571 | ||
1275 | c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 1572 | c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS); |
1276 | emulate_push(ctxt, ops); | 1573 | rc = em_push(ctxt); |
1277 | rc = writeback(ctxt, ops); | ||
1278 | if (rc != X86EMUL_CONTINUE) | 1574 | if (rc != X86EMUL_CONTINUE) |
1279 | return rc; | 1575 | return rc; |
1280 | 1576 | ||
1281 | c->src.val = c->eip; | 1577 | c->src.val = c->eip; |
1282 | emulate_push(ctxt, ops); | 1578 | rc = em_push(ctxt); |
1283 | rc = writeback(ctxt, ops); | ||
1284 | if (rc != X86EMUL_CONTINUE) | 1579 | if (rc != X86EMUL_CONTINUE) |
1285 | return rc; | 1580 | return rc; |
1286 | 1581 | ||
1287 | c->dst.type = OP_NONE; | 1582 | ops->get_idt(ctxt, &dt); |
1288 | |||
1289 | ops->get_idt(&dt, ctxt->vcpu); | ||
1290 | 1583 | ||
1291 | eip_addr = dt.address + (irq << 2); | 1584 | eip_addr = dt.address + (irq << 2); |
1292 | cs_addr = dt.address + (irq << 2) + 2; | 1585 | cs_addr = dt.address + (irq << 2) + 2; |
1293 | 1586 | ||
1294 | rc = ops->read_std(cs_addr, &cs, 2, ctxt->vcpu, &ctxt->exception); | 1587 | rc = ops->read_std(ctxt, cs_addr, &cs, 2, &ctxt->exception); |
1295 | if (rc != X86EMUL_CONTINUE) | 1588 | if (rc != X86EMUL_CONTINUE) |
1296 | return rc; | 1589 | return rc; |
1297 | 1590 | ||
1298 | rc = ops->read_std(eip_addr, &eip, 2, ctxt->vcpu, &ctxt->exception); | 1591 | rc = ops->read_std(ctxt, eip_addr, &eip, 2, &ctxt->exception); |
1299 | if (rc != X86EMUL_CONTINUE) | 1592 | if (rc != X86EMUL_CONTINUE) |
1300 | return rc; | 1593 | return rc; |
1301 | 1594 | ||
@@ -1339,7 +1632,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, | |||
1339 | 1632 | ||
1340 | /* TODO: Add stack limit check */ | 1633 | /* TODO: Add stack limit check */ |
1341 | 1634 | ||
1342 | rc = emulate_pop(ctxt, ops, &temp_eip, c->op_bytes); | 1635 | rc = emulate_pop(ctxt, &temp_eip, c->op_bytes); |
1343 | 1636 | ||
1344 | if (rc != X86EMUL_CONTINUE) | 1637 | if (rc != X86EMUL_CONTINUE) |
1345 | return rc; | 1638 | return rc; |
@@ -1347,12 +1640,12 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, | |||
1347 | if (temp_eip & ~0xffff) | 1640 | if (temp_eip & ~0xffff) |
1348 | return emulate_gp(ctxt, 0); | 1641 | return emulate_gp(ctxt, 0); |
1349 | 1642 | ||
1350 | rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); | 1643 | rc = emulate_pop(ctxt, &cs, c->op_bytes); |
1351 | 1644 | ||
1352 | if (rc != X86EMUL_CONTINUE) | 1645 | if (rc != X86EMUL_CONTINUE) |
1353 | return rc; | 1646 | return rc; |
1354 | 1647 | ||
1355 | rc = emulate_pop(ctxt, ops, &temp_eflags, c->op_bytes); | 1648 | rc = emulate_pop(ctxt, &temp_eflags, c->op_bytes); |
1356 | 1649 | ||
1357 | if (rc != X86EMUL_CONTINUE) | 1650 | if (rc != X86EMUL_CONTINUE) |
1358 | return rc; | 1651 | return rc; |
@@ -1394,15 +1687,31 @@ static inline int emulate_iret(struct x86_emulate_ctxt *ctxt, | |||
1394 | } | 1687 | } |
1395 | } | 1688 | } |
1396 | 1689 | ||
1397 | static inline int emulate_grp1a(struct x86_emulate_ctxt *ctxt, | 1690 | static int em_jmp_far(struct x86_emulate_ctxt *ctxt) |
1398 | struct x86_emulate_ops *ops) | 1691 | { |
1692 | struct decode_cache *c = &ctxt->decode; | ||
1693 | int rc; | ||
1694 | unsigned short sel; | ||
1695 | |||
1696 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | ||
1697 | |||
1698 | rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS); | ||
1699 | if (rc != X86EMUL_CONTINUE) | ||
1700 | return rc; | ||
1701 | |||
1702 | c->eip = 0; | ||
1703 | memcpy(&c->eip, c->src.valptr, c->op_bytes); | ||
1704 | return X86EMUL_CONTINUE; | ||
1705 | } | ||
1706 | |||
1707 | static int em_grp1a(struct x86_emulate_ctxt *ctxt) | ||
1399 | { | 1708 | { |
1400 | struct decode_cache *c = &ctxt->decode; | 1709 | struct decode_cache *c = &ctxt->decode; |
1401 | 1710 | ||
1402 | return emulate_pop(ctxt, ops, &c->dst.val, c->dst.bytes); | 1711 | return emulate_pop(ctxt, &c->dst.val, c->dst.bytes); |
1403 | } | 1712 | } |
1404 | 1713 | ||
1405 | static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt) | 1714 | static int em_grp2(struct x86_emulate_ctxt *ctxt) |
1406 | { | 1715 | { |
1407 | struct decode_cache *c = &ctxt->decode; | 1716 | struct decode_cache *c = &ctxt->decode; |
1408 | switch (c->modrm_reg) { | 1717 | switch (c->modrm_reg) { |
@@ -1429,10 +1738,10 @@ static inline void emulate_grp2(struct x86_emulate_ctxt *ctxt) | |||
1429 | emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags); | 1738 | emulate_2op_SrcB("sar", c->src, c->dst, ctxt->eflags); |
1430 | break; | 1739 | break; |
1431 | } | 1740 | } |
1741 | return X86EMUL_CONTINUE; | ||
1432 | } | 1742 | } |
1433 | 1743 | ||
1434 | static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt, | 1744 | static int em_grp3(struct x86_emulate_ctxt *ctxt) |
1435 | struct x86_emulate_ops *ops) | ||
1436 | { | 1745 | { |
1437 | struct decode_cache *c = &ctxt->decode; | 1746 | struct decode_cache *c = &ctxt->decode; |
1438 | unsigned long *rax = &c->regs[VCPU_REGS_RAX]; | 1747 | unsigned long *rax = &c->regs[VCPU_REGS_RAX]; |
@@ -1471,10 +1780,10 @@ static inline int emulate_grp3(struct x86_emulate_ctxt *ctxt, | |||
1471 | return X86EMUL_CONTINUE; | 1780 | return X86EMUL_CONTINUE; |
1472 | } | 1781 | } |
1473 | 1782 | ||
1474 | static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, | 1783 | static int em_grp45(struct x86_emulate_ctxt *ctxt) |
1475 | struct x86_emulate_ops *ops) | ||
1476 | { | 1784 | { |
1477 | struct decode_cache *c = &ctxt->decode; | 1785 | struct decode_cache *c = &ctxt->decode; |
1786 | int rc = X86EMUL_CONTINUE; | ||
1478 | 1787 | ||
1479 | switch (c->modrm_reg) { | 1788 | switch (c->modrm_reg) { |
1480 | case 0: /* inc */ | 1789 | case 0: /* inc */ |
@@ -1488,21 +1797,23 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt, | |||
1488 | old_eip = c->eip; | 1797 | old_eip = c->eip; |
1489 | c->eip = c->src.val; | 1798 | c->eip = c->src.val; |
1490 | c->src.val = old_eip; | 1799 | c->src.val = old_eip; |
1491 | emulate_push(ctxt, ops); | 1800 | rc = em_push(ctxt); |
1492 | break; | 1801 | break; |
1493 | } | 1802 | } |
1494 | case 4: /* jmp abs */ | 1803 | case 4: /* jmp abs */ |
1495 | c->eip = c->src.val; | 1804 | c->eip = c->src.val; |
1496 | break; | 1805 | break; |
1806 | case 5: /* jmp far */ | ||
1807 | rc = em_jmp_far(ctxt); | ||
1808 | break; | ||
1497 | case 6: /* push */ | 1809 | case 6: /* push */ |
1498 | emulate_push(ctxt, ops); | 1810 | rc = em_push(ctxt); |
1499 | break; | 1811 | break; |
1500 | } | 1812 | } |
1501 | return X86EMUL_CONTINUE; | 1813 | return rc; |
1502 | } | 1814 | } |
1503 | 1815 | ||
1504 | static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt, | 1816 | static int em_grp9(struct x86_emulate_ctxt *ctxt) |
1505 | struct x86_emulate_ops *ops) | ||
1506 | { | 1817 | { |
1507 | struct decode_cache *c = &ctxt->decode; | 1818 | struct decode_cache *c = &ctxt->decode; |
1508 | u64 old = c->dst.orig_val64; | 1819 | u64 old = c->dst.orig_val64; |
@@ -1528,12 +1839,12 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, | |||
1528 | int rc; | 1839 | int rc; |
1529 | unsigned long cs; | 1840 | unsigned long cs; |
1530 | 1841 | ||
1531 | rc = emulate_pop(ctxt, ops, &c->eip, c->op_bytes); | 1842 | rc = emulate_pop(ctxt, &c->eip, c->op_bytes); |
1532 | if (rc != X86EMUL_CONTINUE) | 1843 | if (rc != X86EMUL_CONTINUE) |
1533 | return rc; | 1844 | return rc; |
1534 | if (c->op_bytes == 4) | 1845 | if (c->op_bytes == 4) |
1535 | c->eip = (u32)c->eip; | 1846 | c->eip = (u32)c->eip; |
1536 | rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); | 1847 | rc = emulate_pop(ctxt, &cs, c->op_bytes); |
1537 | if (rc != X86EMUL_CONTINUE) | 1848 | if (rc != X86EMUL_CONTINUE) |
1538 | return rc; | 1849 | return rc; |
1539 | rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); | 1850 | rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); |
@@ -1562,8 +1873,10 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | |||
1562 | struct x86_emulate_ops *ops, struct desc_struct *cs, | 1873 | struct x86_emulate_ops *ops, struct desc_struct *cs, |
1563 | struct desc_struct *ss) | 1874 | struct desc_struct *ss) |
1564 | { | 1875 | { |
1876 | u16 selector; | ||
1877 | |||
1565 | memset(cs, 0, sizeof(struct desc_struct)); | 1878 | memset(cs, 0, sizeof(struct desc_struct)); |
1566 | ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu); | 1879 | ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS); |
1567 | memset(ss, 0, sizeof(struct desc_struct)); | 1880 | memset(ss, 0, sizeof(struct desc_struct)); |
1568 | 1881 | ||
1569 | cs->l = 0; /* will be adjusted later */ | 1882 | cs->l = 0; /* will be adjusted later */ |
@@ -1593,44 +1906,44 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1593 | struct desc_struct cs, ss; | 1906 | struct desc_struct cs, ss; |
1594 | u64 msr_data; | 1907 | u64 msr_data; |
1595 | u16 cs_sel, ss_sel; | 1908 | u16 cs_sel, ss_sel; |
1909 | u64 efer = 0; | ||
1596 | 1910 | ||
1597 | /* syscall is not available in real mode */ | 1911 | /* syscall is not available in real mode */ |
1598 | if (ctxt->mode == X86EMUL_MODE_REAL || | 1912 | if (ctxt->mode == X86EMUL_MODE_REAL || |
1599 | ctxt->mode == X86EMUL_MODE_VM86) | 1913 | ctxt->mode == X86EMUL_MODE_VM86) |
1600 | return emulate_ud(ctxt); | 1914 | return emulate_ud(ctxt); |
1601 | 1915 | ||
1916 | ops->get_msr(ctxt, MSR_EFER, &efer); | ||
1602 | setup_syscalls_segments(ctxt, ops, &cs, &ss); | 1917 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
1603 | 1918 | ||
1604 | ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); | 1919 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1605 | msr_data >>= 32; | 1920 | msr_data >>= 32; |
1606 | cs_sel = (u16)(msr_data & 0xfffc); | 1921 | cs_sel = (u16)(msr_data & 0xfffc); |
1607 | ss_sel = (u16)(msr_data + 8); | 1922 | ss_sel = (u16)(msr_data + 8); |
1608 | 1923 | ||
1609 | if (is_long_mode(ctxt->vcpu)) { | 1924 | if (efer & EFER_LMA) { |
1610 | cs.d = 0; | 1925 | cs.d = 0; |
1611 | cs.l = 1; | 1926 | cs.l = 1; |
1612 | } | 1927 | } |
1613 | ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); | 1928 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
1614 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 1929 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
1615 | ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); | ||
1616 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
1617 | 1930 | ||
1618 | c->regs[VCPU_REGS_RCX] = c->eip; | 1931 | c->regs[VCPU_REGS_RCX] = c->eip; |
1619 | if (is_long_mode(ctxt->vcpu)) { | 1932 | if (efer & EFER_LMA) { |
1620 | #ifdef CONFIG_X86_64 | 1933 | #ifdef CONFIG_X86_64 |
1621 | c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; | 1934 | c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; |
1622 | 1935 | ||
1623 | ops->get_msr(ctxt->vcpu, | 1936 | ops->get_msr(ctxt, |
1624 | ctxt->mode == X86EMUL_MODE_PROT64 ? | 1937 | ctxt->mode == X86EMUL_MODE_PROT64 ? |
1625 | MSR_LSTAR : MSR_CSTAR, &msr_data); | 1938 | MSR_LSTAR : MSR_CSTAR, &msr_data); |
1626 | c->eip = msr_data; | 1939 | c->eip = msr_data; |
1627 | 1940 | ||
1628 | ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data); | 1941 | ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data); |
1629 | ctxt->eflags &= ~(msr_data | EFLG_RF); | 1942 | ctxt->eflags &= ~(msr_data | EFLG_RF); |
1630 | #endif | 1943 | #endif |
1631 | } else { | 1944 | } else { |
1632 | /* legacy mode */ | 1945 | /* legacy mode */ |
1633 | ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); | 1946 | ops->get_msr(ctxt, MSR_STAR, &msr_data); |
1634 | c->eip = (u32)msr_data; | 1947 | c->eip = (u32)msr_data; |
1635 | 1948 | ||
1636 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); | 1949 | ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); |
@@ -1646,7 +1959,9 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1646 | struct desc_struct cs, ss; | 1959 | struct desc_struct cs, ss; |
1647 | u64 msr_data; | 1960 | u64 msr_data; |
1648 | u16 cs_sel, ss_sel; | 1961 | u16 cs_sel, ss_sel; |
1962 | u64 efer = 0; | ||
1649 | 1963 | ||
1964 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
1650 | /* inject #GP if in real mode */ | 1965 | /* inject #GP if in real mode */ |
1651 | if (ctxt->mode == X86EMUL_MODE_REAL) | 1966 | if (ctxt->mode == X86EMUL_MODE_REAL) |
1652 | return emulate_gp(ctxt, 0); | 1967 | return emulate_gp(ctxt, 0); |
@@ -1659,7 +1974,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1659 | 1974 | ||
1660 | setup_syscalls_segments(ctxt, ops, &cs, &ss); | 1975 | setup_syscalls_segments(ctxt, ops, &cs, &ss); |
1661 | 1976 | ||
1662 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); | 1977 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); |
1663 | switch (ctxt->mode) { | 1978 | switch (ctxt->mode) { |
1664 | case X86EMUL_MODE_PROT32: | 1979 | case X86EMUL_MODE_PROT32: |
1665 | if ((msr_data & 0xfffc) == 0x0) | 1980 | if ((msr_data & 0xfffc) == 0x0) |
@@ -1676,21 +1991,18 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1676 | cs_sel &= ~SELECTOR_RPL_MASK; | 1991 | cs_sel &= ~SELECTOR_RPL_MASK; |
1677 | ss_sel = cs_sel + 8; | 1992 | ss_sel = cs_sel + 8; |
1678 | ss_sel &= ~SELECTOR_RPL_MASK; | 1993 | ss_sel &= ~SELECTOR_RPL_MASK; |
1679 | if (ctxt->mode == X86EMUL_MODE_PROT64 | 1994 | if (ctxt->mode == X86EMUL_MODE_PROT64 || (efer & EFER_LMA)) { |
1680 | || is_long_mode(ctxt->vcpu)) { | ||
1681 | cs.d = 0; | 1995 | cs.d = 0; |
1682 | cs.l = 1; | 1996 | cs.l = 1; |
1683 | } | 1997 | } |
1684 | 1998 | ||
1685 | ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); | 1999 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
1686 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 2000 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
1687 | ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); | ||
1688 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
1689 | 2001 | ||
1690 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); | 2002 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); |
1691 | c->eip = msr_data; | 2003 | c->eip = msr_data; |
1692 | 2004 | ||
1693 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data); | 2005 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data); |
1694 | c->regs[VCPU_REGS_RSP] = msr_data; | 2006 | c->regs[VCPU_REGS_RSP] = msr_data; |
1695 | 2007 | ||
1696 | return X86EMUL_CONTINUE; | 2008 | return X86EMUL_CONTINUE; |
@@ -1719,7 +2031,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1719 | 2031 | ||
1720 | cs.dpl = 3; | 2032 | cs.dpl = 3; |
1721 | ss.dpl = 3; | 2033 | ss.dpl = 3; |
1722 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); | 2034 | ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); |
1723 | switch (usermode) { | 2035 | switch (usermode) { |
1724 | case X86EMUL_MODE_PROT32: | 2036 | case X86EMUL_MODE_PROT32: |
1725 | cs_sel = (u16)(msr_data + 16); | 2037 | cs_sel = (u16)(msr_data + 16); |
@@ -1739,10 +2051,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1739 | cs_sel |= SELECTOR_RPL_MASK; | 2051 | cs_sel |= SELECTOR_RPL_MASK; |
1740 | ss_sel |= SELECTOR_RPL_MASK; | 2052 | ss_sel |= SELECTOR_RPL_MASK; |
1741 | 2053 | ||
1742 | ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); | 2054 | ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); |
1743 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 2055 | ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); |
1744 | ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); | ||
1745 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | ||
1746 | 2056 | ||
1747 | c->eip = c->regs[VCPU_REGS_RDX]; | 2057 | c->eip = c->regs[VCPU_REGS_RDX]; |
1748 | c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; | 2058 | c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; |
@@ -1759,7 +2069,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, | |||
1759 | if (ctxt->mode == X86EMUL_MODE_VM86) | 2069 | if (ctxt->mode == X86EMUL_MODE_VM86) |
1760 | return true; | 2070 | return true; |
1761 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; | 2071 | iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; |
1762 | return ops->cpl(ctxt->vcpu) > iopl; | 2072 | return ops->cpl(ctxt) > iopl; |
1763 | } | 2073 | } |
1764 | 2074 | ||
1765 | static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | 2075 | static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, |
@@ -1769,11 +2079,11 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | |||
1769 | struct desc_struct tr_seg; | 2079 | struct desc_struct tr_seg; |
1770 | u32 base3; | 2080 | u32 base3; |
1771 | int r; | 2081 | int r; |
1772 | u16 io_bitmap_ptr, perm, bit_idx = port & 0x7; | 2082 | u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7; |
1773 | unsigned mask = (1 << len) - 1; | 2083 | unsigned mask = (1 << len) - 1; |
1774 | unsigned long base; | 2084 | unsigned long base; |
1775 | 2085 | ||
1776 | ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu); | 2086 | ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR); |
1777 | if (!tr_seg.p) | 2087 | if (!tr_seg.p) |
1778 | return false; | 2088 | return false; |
1779 | if (desc_limit_scaled(&tr_seg) < 103) | 2089 | if (desc_limit_scaled(&tr_seg) < 103) |
@@ -1782,13 +2092,12 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | |||
1782 | #ifdef CONFIG_X86_64 | 2092 | #ifdef CONFIG_X86_64 |
1783 | base |= ((u64)base3) << 32; | 2093 | base |= ((u64)base3) << 32; |
1784 | #endif | 2094 | #endif |
1785 | r = ops->read_std(base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, NULL); | 2095 | r = ops->read_std(ctxt, base + 102, &io_bitmap_ptr, 2, NULL); |
1786 | if (r != X86EMUL_CONTINUE) | 2096 | if (r != X86EMUL_CONTINUE) |
1787 | return false; | 2097 | return false; |
1788 | if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) | 2098 | if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg)) |
1789 | return false; | 2099 | return false; |
1790 | r = ops->read_std(base + io_bitmap_ptr + port/8, &perm, 2, ctxt->vcpu, | 2100 | r = ops->read_std(ctxt, base + io_bitmap_ptr + port/8, &perm, 2, NULL); |
1791 | NULL); | ||
1792 | if (r != X86EMUL_CONTINUE) | 2101 | if (r != X86EMUL_CONTINUE) |
1793 | return false; | 2102 | return false; |
1794 | if ((perm >> bit_idx) & mask) | 2103 | if ((perm >> bit_idx) & mask) |
@@ -1829,11 +2138,11 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, | |||
1829 | tss->si = c->regs[VCPU_REGS_RSI]; | 2138 | tss->si = c->regs[VCPU_REGS_RSI]; |
1830 | tss->di = c->regs[VCPU_REGS_RDI]; | 2139 | tss->di = c->regs[VCPU_REGS_RDI]; |
1831 | 2140 | ||
1832 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | 2141 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
1833 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2142 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
1834 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | 2143 | tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); |
1835 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | 2144 | tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); |
1836 | tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | 2145 | tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR); |
1837 | } | 2146 | } |
1838 | 2147 | ||
1839 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | 2148 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, |
@@ -1858,11 +2167,11 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | |||
1858 | * SDM says that segment selectors are loaded before segment | 2167 | * SDM says that segment selectors are loaded before segment |
1859 | * descriptors | 2168 | * descriptors |
1860 | */ | 2169 | */ |
1861 | ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu); | 2170 | set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); |
1862 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | 2171 | set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
1863 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | 2172 | set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
1864 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | 2173 | set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
1865 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | 2174 | set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
1866 | 2175 | ||
1867 | /* | 2176 | /* |
1868 | * Now load segment descriptors. If fault happenes at this stage | 2177 | * Now load segment descriptors. If fault happenes at this stage |
@@ -1896,7 +2205,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
1896 | int ret; | 2205 | int ret; |
1897 | u32 new_tss_base = get_desc_base(new_desc); | 2206 | u32 new_tss_base = get_desc_base(new_desc); |
1898 | 2207 | ||
1899 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2208 | ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
1900 | &ctxt->exception); | 2209 | &ctxt->exception); |
1901 | if (ret != X86EMUL_CONTINUE) | 2210 | if (ret != X86EMUL_CONTINUE) |
1902 | /* FIXME: need to provide precise fault address */ | 2211 | /* FIXME: need to provide precise fault address */ |
@@ -1904,13 +2213,13 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
1904 | 2213 | ||
1905 | save_state_to_tss16(ctxt, ops, &tss_seg); | 2214 | save_state_to_tss16(ctxt, ops, &tss_seg); |
1906 | 2215 | ||
1907 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2216 | ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
1908 | &ctxt->exception); | 2217 | &ctxt->exception); |
1909 | if (ret != X86EMUL_CONTINUE) | 2218 | if (ret != X86EMUL_CONTINUE) |
1910 | /* FIXME: need to provide precise fault address */ | 2219 | /* FIXME: need to provide precise fault address */ |
1911 | return ret; | 2220 | return ret; |
1912 | 2221 | ||
1913 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2222 | ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, |
1914 | &ctxt->exception); | 2223 | &ctxt->exception); |
1915 | if (ret != X86EMUL_CONTINUE) | 2224 | if (ret != X86EMUL_CONTINUE) |
1916 | /* FIXME: need to provide precise fault address */ | 2225 | /* FIXME: need to provide precise fault address */ |
@@ -1919,10 +2228,10 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt, | |||
1919 | if (old_tss_sel != 0xffff) { | 2228 | if (old_tss_sel != 0xffff) { |
1920 | tss_seg.prev_task_link = old_tss_sel; | 2229 | tss_seg.prev_task_link = old_tss_sel; |
1921 | 2230 | ||
1922 | ret = ops->write_std(new_tss_base, | 2231 | ret = ops->write_std(ctxt, new_tss_base, |
1923 | &tss_seg.prev_task_link, | 2232 | &tss_seg.prev_task_link, |
1924 | sizeof tss_seg.prev_task_link, | 2233 | sizeof tss_seg.prev_task_link, |
1925 | ctxt->vcpu, &ctxt->exception); | 2234 | &ctxt->exception); |
1926 | if (ret != X86EMUL_CONTINUE) | 2235 | if (ret != X86EMUL_CONTINUE) |
1927 | /* FIXME: need to provide precise fault address */ | 2236 | /* FIXME: need to provide precise fault address */ |
1928 | return ret; | 2237 | return ret; |
@@ -1937,7 +2246,7 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | |||
1937 | { | 2246 | { |
1938 | struct decode_cache *c = &ctxt->decode; | 2247 | struct decode_cache *c = &ctxt->decode; |
1939 | 2248 | ||
1940 | tss->cr3 = ops->get_cr(3, ctxt->vcpu); | 2249 | tss->cr3 = ops->get_cr(ctxt, 3); |
1941 | tss->eip = c->eip; | 2250 | tss->eip = c->eip; |
1942 | tss->eflags = ctxt->eflags; | 2251 | tss->eflags = ctxt->eflags; |
1943 | tss->eax = c->regs[VCPU_REGS_RAX]; | 2252 | tss->eax = c->regs[VCPU_REGS_RAX]; |
@@ -1949,13 +2258,13 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | |||
1949 | tss->esi = c->regs[VCPU_REGS_RSI]; | 2258 | tss->esi = c->regs[VCPU_REGS_RSI]; |
1950 | tss->edi = c->regs[VCPU_REGS_RDI]; | 2259 | tss->edi = c->regs[VCPU_REGS_RDI]; |
1951 | 2260 | ||
1952 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | 2261 | tss->es = get_segment_selector(ctxt, VCPU_SREG_ES); |
1953 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2262 | tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
1954 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | 2263 | tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS); |
1955 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | 2264 | tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS); |
1956 | tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu); | 2265 | tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS); |
1957 | tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu); | 2266 | tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS); |
1958 | tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | 2267 | tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR); |
1959 | } | 2268 | } |
1960 | 2269 | ||
1961 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | 2270 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, |
@@ -1965,7 +2274,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
1965 | struct decode_cache *c = &ctxt->decode; | 2274 | struct decode_cache *c = &ctxt->decode; |
1966 | int ret; | 2275 | int ret; |
1967 | 2276 | ||
1968 | if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) | 2277 | if (ops->set_cr(ctxt, 3, tss->cr3)) |
1969 | return emulate_gp(ctxt, 0); | 2278 | return emulate_gp(ctxt, 0); |
1970 | c->eip = tss->eip; | 2279 | c->eip = tss->eip; |
1971 | ctxt->eflags = tss->eflags | 2; | 2280 | ctxt->eflags = tss->eflags | 2; |
@@ -1982,13 +2291,13 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
1982 | * SDM says that segment selectors are loaded before segment | 2291 | * SDM says that segment selectors are loaded before segment |
1983 | * descriptors | 2292 | * descriptors |
1984 | */ | 2293 | */ |
1985 | ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu); | 2294 | set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); |
1986 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | 2295 | set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
1987 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | 2296 | set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
1988 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | 2297 | set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
1989 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | 2298 | set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
1990 | ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu); | 2299 | set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); |
1991 | ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu); | 2300 | set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); |
1992 | 2301 | ||
1993 | /* | 2302 | /* |
1994 | * Now load segment descriptors. If fault happenes at this stage | 2303 | * Now load segment descriptors. If fault happenes at this stage |
@@ -2028,7 +2337,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2028 | int ret; | 2337 | int ret; |
2029 | u32 new_tss_base = get_desc_base(new_desc); | 2338 | u32 new_tss_base = get_desc_base(new_desc); |
2030 | 2339 | ||
2031 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2340 | ret = ops->read_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
2032 | &ctxt->exception); | 2341 | &ctxt->exception); |
2033 | if (ret != X86EMUL_CONTINUE) | 2342 | if (ret != X86EMUL_CONTINUE) |
2034 | /* FIXME: need to provide precise fault address */ | 2343 | /* FIXME: need to provide precise fault address */ |
@@ -2036,13 +2345,13 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2036 | 2345 | ||
2037 | save_state_to_tss32(ctxt, ops, &tss_seg); | 2346 | save_state_to_tss32(ctxt, ops, &tss_seg); |
2038 | 2347 | ||
2039 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2348 | ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, |
2040 | &ctxt->exception); | 2349 | &ctxt->exception); |
2041 | if (ret != X86EMUL_CONTINUE) | 2350 | if (ret != X86EMUL_CONTINUE) |
2042 | /* FIXME: need to provide precise fault address */ | 2351 | /* FIXME: need to provide precise fault address */ |
2043 | return ret; | 2352 | return ret; |
2044 | 2353 | ||
2045 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | 2354 | ret = ops->read_std(ctxt, new_tss_base, &tss_seg, sizeof tss_seg, |
2046 | &ctxt->exception); | 2355 | &ctxt->exception); |
2047 | if (ret != X86EMUL_CONTINUE) | 2356 | if (ret != X86EMUL_CONTINUE) |
2048 | /* FIXME: need to provide precise fault address */ | 2357 | /* FIXME: need to provide precise fault address */ |
@@ -2051,10 +2360,10 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt, | |||
2051 | if (old_tss_sel != 0xffff) { | 2360 | if (old_tss_sel != 0xffff) { |
2052 | tss_seg.prev_task_link = old_tss_sel; | 2361 | tss_seg.prev_task_link = old_tss_sel; |
2053 | 2362 | ||
2054 | ret = ops->write_std(new_tss_base, | 2363 | ret = ops->write_std(ctxt, new_tss_base, |
2055 | &tss_seg.prev_task_link, | 2364 | &tss_seg.prev_task_link, |
2056 | sizeof tss_seg.prev_task_link, | 2365 | sizeof tss_seg.prev_task_link, |
2057 | ctxt->vcpu, &ctxt->exception); | 2366 | &ctxt->exception); |
2058 | if (ret != X86EMUL_CONTINUE) | 2367 | if (ret != X86EMUL_CONTINUE) |
2059 | /* FIXME: need to provide precise fault address */ | 2368 | /* FIXME: need to provide precise fault address */ |
2060 | return ret; | 2369 | return ret; |
@@ -2070,9 +2379,9 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2070 | { | 2379 | { |
2071 | struct desc_struct curr_tss_desc, next_tss_desc; | 2380 | struct desc_struct curr_tss_desc, next_tss_desc; |
2072 | int ret; | 2381 | int ret; |
2073 | u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); | 2382 | u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); |
2074 | ulong old_tss_base = | 2383 | ulong old_tss_base = |
2075 | ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu); | 2384 | ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); |
2076 | u32 desc_limit; | 2385 | u32 desc_limit; |
2077 | 2386 | ||
2078 | /* FIXME: old_tss_base == ~0 ? */ | 2387 | /* FIXME: old_tss_base == ~0 ? */ |
@@ -2088,7 +2397,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2088 | 2397 | ||
2089 | if (reason != TASK_SWITCH_IRET) { | 2398 | if (reason != TASK_SWITCH_IRET) { |
2090 | if ((tss_selector & 3) > next_tss_desc.dpl || | 2399 | if ((tss_selector & 3) > next_tss_desc.dpl || |
2091 | ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) | 2400 | ops->cpl(ctxt) > next_tss_desc.dpl) |
2092 | return emulate_gp(ctxt, 0); | 2401 | return emulate_gp(ctxt, 0); |
2093 | } | 2402 | } |
2094 | 2403 | ||
@@ -2132,9 +2441,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2132 | &next_tss_desc); | 2441 | &next_tss_desc); |
2133 | } | 2442 | } |
2134 | 2443 | ||
2135 | ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); | 2444 | ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); |
2136 | ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu); | 2445 | ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR); |
2137 | ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); | ||
2138 | 2446 | ||
2139 | if (has_error_code) { | 2447 | if (has_error_code) { |
2140 | struct decode_cache *c = &ctxt->decode; | 2448 | struct decode_cache *c = &ctxt->decode; |
@@ -2142,7 +2450,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2142 | c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; | 2450 | c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; |
2143 | c->lock_prefix = 0; | 2451 | c->lock_prefix = 0; |
2144 | c->src.val = (unsigned long) error_code; | 2452 | c->src.val = (unsigned long) error_code; |
2145 | emulate_push(ctxt, ops); | 2453 | ret = em_push(ctxt); |
2146 | } | 2454 | } |
2147 | 2455 | ||
2148 | return ret; | 2456 | return ret; |
@@ -2162,13 +2470,10 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2162 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, | 2470 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, |
2163 | has_error_code, error_code); | 2471 | has_error_code, error_code); |
2164 | 2472 | ||
2165 | if (rc == X86EMUL_CONTINUE) { | 2473 | if (rc == X86EMUL_CONTINUE) |
2166 | rc = writeback(ctxt, ops); | 2474 | ctxt->eip = c->eip; |
2167 | if (rc == X86EMUL_CONTINUE) | ||
2168 | ctxt->eip = c->eip; | ||
2169 | } | ||
2170 | 2475 | ||
2171 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 2476 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; |
2172 | } | 2477 | } |
2173 | 2478 | ||
2174 | static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, | 2479 | static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, |
@@ -2182,12 +2487,6 @@ static void string_addr_inc(struct x86_emulate_ctxt *ctxt, unsigned seg, | |||
2182 | op->addr.mem.seg = seg; | 2487 | op->addr.mem.seg = seg; |
2183 | } | 2488 | } |
2184 | 2489 | ||
2185 | static int em_push(struct x86_emulate_ctxt *ctxt) | ||
2186 | { | ||
2187 | emulate_push(ctxt, ctxt->ops); | ||
2188 | return X86EMUL_CONTINUE; | ||
2189 | } | ||
2190 | |||
2191 | static int em_das(struct x86_emulate_ctxt *ctxt) | 2490 | static int em_das(struct x86_emulate_ctxt *ctxt) |
2192 | { | 2491 | { |
2193 | struct decode_cache *c = &ctxt->decode; | 2492 | struct decode_cache *c = &ctxt->decode; |
@@ -2234,7 +2533,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) | |||
2234 | ulong old_eip; | 2533 | ulong old_eip; |
2235 | int rc; | 2534 | int rc; |
2236 | 2535 | ||
2237 | old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2536 | old_cs = get_segment_selector(ctxt, VCPU_SREG_CS); |
2238 | old_eip = c->eip; | 2537 | old_eip = c->eip; |
2239 | 2538 | ||
2240 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | 2539 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); |
@@ -2245,20 +2544,12 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) | |||
2245 | memcpy(&c->eip, c->src.valptr, c->op_bytes); | 2544 | memcpy(&c->eip, c->src.valptr, c->op_bytes); |
2246 | 2545 | ||
2247 | c->src.val = old_cs; | 2546 | c->src.val = old_cs; |
2248 | emulate_push(ctxt, ctxt->ops); | 2547 | rc = em_push(ctxt); |
2249 | rc = writeback(ctxt, ctxt->ops); | ||
2250 | if (rc != X86EMUL_CONTINUE) | 2548 | if (rc != X86EMUL_CONTINUE) |
2251 | return rc; | 2549 | return rc; |
2252 | 2550 | ||
2253 | c->src.val = old_eip; | 2551 | c->src.val = old_eip; |
2254 | emulate_push(ctxt, ctxt->ops); | 2552 | return em_push(ctxt); |
2255 | rc = writeback(ctxt, ctxt->ops); | ||
2256 | if (rc != X86EMUL_CONTINUE) | ||
2257 | return rc; | ||
2258 | |||
2259 | c->dst.type = OP_NONE; | ||
2260 | |||
2261 | return X86EMUL_CONTINUE; | ||
2262 | } | 2553 | } |
2263 | 2554 | ||
2264 | static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) | 2555 | static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) |
@@ -2269,13 +2560,79 @@ static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) | |||
2269 | c->dst.type = OP_REG; | 2560 | c->dst.type = OP_REG; |
2270 | c->dst.addr.reg = &c->eip; | 2561 | c->dst.addr.reg = &c->eip; |
2271 | c->dst.bytes = c->op_bytes; | 2562 | c->dst.bytes = c->op_bytes; |
2272 | rc = emulate_pop(ctxt, ctxt->ops, &c->dst.val, c->op_bytes); | 2563 | rc = emulate_pop(ctxt, &c->dst.val, c->op_bytes); |
2273 | if (rc != X86EMUL_CONTINUE) | 2564 | if (rc != X86EMUL_CONTINUE) |
2274 | return rc; | 2565 | return rc; |
2275 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val); | 2566 | register_address_increment(c, &c->regs[VCPU_REGS_RSP], c->src.val); |
2276 | return X86EMUL_CONTINUE; | 2567 | return X86EMUL_CONTINUE; |
2277 | } | 2568 | } |
2278 | 2569 | ||
2570 | static int em_add(struct x86_emulate_ctxt *ctxt) | ||
2571 | { | ||
2572 | struct decode_cache *c = &ctxt->decode; | ||
2573 | |||
2574 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); | ||
2575 | return X86EMUL_CONTINUE; | ||
2576 | } | ||
2577 | |||
2578 | static int em_or(struct x86_emulate_ctxt *ctxt) | ||
2579 | { | ||
2580 | struct decode_cache *c = &ctxt->decode; | ||
2581 | |||
2582 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); | ||
2583 | return X86EMUL_CONTINUE; | ||
2584 | } | ||
2585 | |||
2586 | static int em_adc(struct x86_emulate_ctxt *ctxt) | ||
2587 | { | ||
2588 | struct decode_cache *c = &ctxt->decode; | ||
2589 | |||
2590 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); | ||
2591 | return X86EMUL_CONTINUE; | ||
2592 | } | ||
2593 | |||
2594 | static int em_sbb(struct x86_emulate_ctxt *ctxt) | ||
2595 | { | ||
2596 | struct decode_cache *c = &ctxt->decode; | ||
2597 | |||
2598 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); | ||
2599 | return X86EMUL_CONTINUE; | ||
2600 | } | ||
2601 | |||
2602 | static int em_and(struct x86_emulate_ctxt *ctxt) | ||
2603 | { | ||
2604 | struct decode_cache *c = &ctxt->decode; | ||
2605 | |||
2606 | emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); | ||
2607 | return X86EMUL_CONTINUE; | ||
2608 | } | ||
2609 | |||
2610 | static int em_sub(struct x86_emulate_ctxt *ctxt) | ||
2611 | { | ||
2612 | struct decode_cache *c = &ctxt->decode; | ||
2613 | |||
2614 | emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags); | ||
2615 | return X86EMUL_CONTINUE; | ||
2616 | } | ||
2617 | |||
2618 | static int em_xor(struct x86_emulate_ctxt *ctxt) | ||
2619 | { | ||
2620 | struct decode_cache *c = &ctxt->decode; | ||
2621 | |||
2622 | emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags); | ||
2623 | return X86EMUL_CONTINUE; | ||
2624 | } | ||
2625 | |||
2626 | static int em_cmp(struct x86_emulate_ctxt *ctxt) | ||
2627 | { | ||
2628 | struct decode_cache *c = &ctxt->decode; | ||
2629 | |||
2630 | emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); | ||
2631 | /* Disable writeback. */ | ||
2632 | c->dst.type = OP_NONE; | ||
2633 | return X86EMUL_CONTINUE; | ||
2634 | } | ||
2635 | |||
2279 | static int em_imul(struct x86_emulate_ctxt *ctxt) | 2636 | static int em_imul(struct x86_emulate_ctxt *ctxt) |
2280 | { | 2637 | { |
2281 | struct decode_cache *c = &ctxt->decode; | 2638 | struct decode_cache *c = &ctxt->decode; |
@@ -2306,13 +2663,10 @@ static int em_cwd(struct x86_emulate_ctxt *ctxt) | |||
2306 | 2663 | ||
2307 | static int em_rdtsc(struct x86_emulate_ctxt *ctxt) | 2664 | static int em_rdtsc(struct x86_emulate_ctxt *ctxt) |
2308 | { | 2665 | { |
2309 | unsigned cpl = ctxt->ops->cpl(ctxt->vcpu); | ||
2310 | struct decode_cache *c = &ctxt->decode; | 2666 | struct decode_cache *c = &ctxt->decode; |
2311 | u64 tsc = 0; | 2667 | u64 tsc = 0; |
2312 | 2668 | ||
2313 | if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) | 2669 | ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc); |
2314 | return emulate_gp(ctxt, 0); | ||
2315 | ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc); | ||
2316 | c->regs[VCPU_REGS_RAX] = (u32)tsc; | 2670 | c->regs[VCPU_REGS_RAX] = (u32)tsc; |
2317 | c->regs[VCPU_REGS_RDX] = tsc >> 32; | 2671 | c->regs[VCPU_REGS_RDX] = tsc >> 32; |
2318 | return X86EMUL_CONTINUE; | 2672 | return X86EMUL_CONTINUE; |
@@ -2325,22 +2679,375 @@ static int em_mov(struct x86_emulate_ctxt *ctxt) | |||
2325 | return X86EMUL_CONTINUE; | 2679 | return X86EMUL_CONTINUE; |
2326 | } | 2680 | } |
2327 | 2681 | ||
2682 | static int em_movdqu(struct x86_emulate_ctxt *ctxt) | ||
2683 | { | ||
2684 | struct decode_cache *c = &ctxt->decode; | ||
2685 | memcpy(&c->dst.vec_val, &c->src.vec_val, c->op_bytes); | ||
2686 | return X86EMUL_CONTINUE; | ||
2687 | } | ||
2688 | |||
2689 | static int em_invlpg(struct x86_emulate_ctxt *ctxt) | ||
2690 | { | ||
2691 | struct decode_cache *c = &ctxt->decode; | ||
2692 | int rc; | ||
2693 | ulong linear; | ||
2694 | |||
2695 | rc = linearize(ctxt, c->src.addr.mem, 1, false, &linear); | ||
2696 | if (rc == X86EMUL_CONTINUE) | ||
2697 | ctxt->ops->invlpg(ctxt, linear); | ||
2698 | /* Disable writeback. */ | ||
2699 | c->dst.type = OP_NONE; | ||
2700 | return X86EMUL_CONTINUE; | ||
2701 | } | ||
2702 | |||
2703 | static int em_clts(struct x86_emulate_ctxt *ctxt) | ||
2704 | { | ||
2705 | ulong cr0; | ||
2706 | |||
2707 | cr0 = ctxt->ops->get_cr(ctxt, 0); | ||
2708 | cr0 &= ~X86_CR0_TS; | ||
2709 | ctxt->ops->set_cr(ctxt, 0, cr0); | ||
2710 | return X86EMUL_CONTINUE; | ||
2711 | } | ||
2712 | |||
2713 | static int em_vmcall(struct x86_emulate_ctxt *ctxt) | ||
2714 | { | ||
2715 | struct decode_cache *c = &ctxt->decode; | ||
2716 | int rc; | ||
2717 | |||
2718 | if (c->modrm_mod != 3 || c->modrm_rm != 1) | ||
2719 | return X86EMUL_UNHANDLEABLE; | ||
2720 | |||
2721 | rc = ctxt->ops->fix_hypercall(ctxt); | ||
2722 | if (rc != X86EMUL_CONTINUE) | ||
2723 | return rc; | ||
2724 | |||
2725 | /* Let the processor re-execute the fixed hypercall */ | ||
2726 | c->eip = ctxt->eip; | ||
2727 | /* Disable writeback. */ | ||
2728 | c->dst.type = OP_NONE; | ||
2729 | return X86EMUL_CONTINUE; | ||
2730 | } | ||
2731 | |||
2732 | static int em_lgdt(struct x86_emulate_ctxt *ctxt) | ||
2733 | { | ||
2734 | struct decode_cache *c = &ctxt->decode; | ||
2735 | struct desc_ptr desc_ptr; | ||
2736 | int rc; | ||
2737 | |||
2738 | rc = read_descriptor(ctxt, c->src.addr.mem, | ||
2739 | &desc_ptr.size, &desc_ptr.address, | ||
2740 | c->op_bytes); | ||
2741 | if (rc != X86EMUL_CONTINUE) | ||
2742 | return rc; | ||
2743 | ctxt->ops->set_gdt(ctxt, &desc_ptr); | ||
2744 | /* Disable writeback. */ | ||
2745 | c->dst.type = OP_NONE; | ||
2746 | return X86EMUL_CONTINUE; | ||
2747 | } | ||
2748 | |||
2749 | static int em_vmmcall(struct x86_emulate_ctxt *ctxt) | ||
2750 | { | ||
2751 | struct decode_cache *c = &ctxt->decode; | ||
2752 | int rc; | ||
2753 | |||
2754 | rc = ctxt->ops->fix_hypercall(ctxt); | ||
2755 | |||
2756 | /* Disable writeback. */ | ||
2757 | c->dst.type = OP_NONE; | ||
2758 | return rc; | ||
2759 | } | ||
2760 | |||
2761 | static int em_lidt(struct x86_emulate_ctxt *ctxt) | ||
2762 | { | ||
2763 | struct decode_cache *c = &ctxt->decode; | ||
2764 | struct desc_ptr desc_ptr; | ||
2765 | int rc; | ||
2766 | |||
2767 | rc = read_descriptor(ctxt, c->src.addr.mem, | ||
2768 | &desc_ptr.size, &desc_ptr.address, | ||
2769 | c->op_bytes); | ||
2770 | if (rc != X86EMUL_CONTINUE) | ||
2771 | return rc; | ||
2772 | ctxt->ops->set_idt(ctxt, &desc_ptr); | ||
2773 | /* Disable writeback. */ | ||
2774 | c->dst.type = OP_NONE; | ||
2775 | return X86EMUL_CONTINUE; | ||
2776 | } | ||
2777 | |||
2778 | static int em_smsw(struct x86_emulate_ctxt *ctxt) | ||
2779 | { | ||
2780 | struct decode_cache *c = &ctxt->decode; | ||
2781 | |||
2782 | c->dst.bytes = 2; | ||
2783 | c->dst.val = ctxt->ops->get_cr(ctxt, 0); | ||
2784 | return X86EMUL_CONTINUE; | ||
2785 | } | ||
2786 | |||
2787 | static int em_lmsw(struct x86_emulate_ctxt *ctxt) | ||
2788 | { | ||
2789 | struct decode_cache *c = &ctxt->decode; | ||
2790 | ctxt->ops->set_cr(ctxt, 0, (ctxt->ops->get_cr(ctxt, 0) & ~0x0eul) | ||
2791 | | (c->src.val & 0x0f)); | ||
2792 | c->dst.type = OP_NONE; | ||
2793 | return X86EMUL_CONTINUE; | ||
2794 | } | ||
2795 | |||
2796 | static bool valid_cr(int nr) | ||
2797 | { | ||
2798 | switch (nr) { | ||
2799 | case 0: | ||
2800 | case 2 ... 4: | ||
2801 | case 8: | ||
2802 | return true; | ||
2803 | default: | ||
2804 | return false; | ||
2805 | } | ||
2806 | } | ||
2807 | |||
2808 | static int check_cr_read(struct x86_emulate_ctxt *ctxt) | ||
2809 | { | ||
2810 | struct decode_cache *c = &ctxt->decode; | ||
2811 | |||
2812 | if (!valid_cr(c->modrm_reg)) | ||
2813 | return emulate_ud(ctxt); | ||
2814 | |||
2815 | return X86EMUL_CONTINUE; | ||
2816 | } | ||
2817 | |||
2818 | static int check_cr_write(struct x86_emulate_ctxt *ctxt) | ||
2819 | { | ||
2820 | struct decode_cache *c = &ctxt->decode; | ||
2821 | u64 new_val = c->src.val64; | ||
2822 | int cr = c->modrm_reg; | ||
2823 | u64 efer = 0; | ||
2824 | |||
2825 | static u64 cr_reserved_bits[] = { | ||
2826 | 0xffffffff00000000ULL, | ||
2827 | 0, 0, 0, /* CR3 checked later */ | ||
2828 | CR4_RESERVED_BITS, | ||
2829 | 0, 0, 0, | ||
2830 | CR8_RESERVED_BITS, | ||
2831 | }; | ||
2832 | |||
2833 | if (!valid_cr(cr)) | ||
2834 | return emulate_ud(ctxt); | ||
2835 | |||
2836 | if (new_val & cr_reserved_bits[cr]) | ||
2837 | return emulate_gp(ctxt, 0); | ||
2838 | |||
2839 | switch (cr) { | ||
2840 | case 0: { | ||
2841 | u64 cr4; | ||
2842 | if (((new_val & X86_CR0_PG) && !(new_val & X86_CR0_PE)) || | ||
2843 | ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) | ||
2844 | return emulate_gp(ctxt, 0); | ||
2845 | |||
2846 | cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2847 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2848 | |||
2849 | if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && | ||
2850 | !(cr4 & X86_CR4_PAE)) | ||
2851 | return emulate_gp(ctxt, 0); | ||
2852 | |||
2853 | break; | ||
2854 | } | ||
2855 | case 3: { | ||
2856 | u64 rsvd = 0; | ||
2857 | |||
2858 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2859 | if (efer & EFER_LMA) | ||
2860 | rsvd = CR3_L_MODE_RESERVED_BITS; | ||
2861 | else if (ctxt->ops->get_cr(ctxt, 4) & X86_CR4_PAE) | ||
2862 | rsvd = CR3_PAE_RESERVED_BITS; | ||
2863 | else if (ctxt->ops->get_cr(ctxt, 0) & X86_CR0_PG) | ||
2864 | rsvd = CR3_NONPAE_RESERVED_BITS; | ||
2865 | |||
2866 | if (new_val & rsvd) | ||
2867 | return emulate_gp(ctxt, 0); | ||
2868 | |||
2869 | break; | ||
2870 | } | ||
2871 | case 4: { | ||
2872 | u64 cr4; | ||
2873 | |||
2874 | cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2875 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2876 | |||
2877 | if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) | ||
2878 | return emulate_gp(ctxt, 0); | ||
2879 | |||
2880 | break; | ||
2881 | } | ||
2882 | } | ||
2883 | |||
2884 | return X86EMUL_CONTINUE; | ||
2885 | } | ||
2886 | |||
2887 | static int check_dr7_gd(struct x86_emulate_ctxt *ctxt) | ||
2888 | { | ||
2889 | unsigned long dr7; | ||
2890 | |||
2891 | ctxt->ops->get_dr(ctxt, 7, &dr7); | ||
2892 | |||
2893 | /* Check if DR7.Global_Enable is set */ | ||
2894 | return dr7 & (1 << 13); | ||
2895 | } | ||
2896 | |||
2897 | static int check_dr_read(struct x86_emulate_ctxt *ctxt) | ||
2898 | { | ||
2899 | struct decode_cache *c = &ctxt->decode; | ||
2900 | int dr = c->modrm_reg; | ||
2901 | u64 cr4; | ||
2902 | |||
2903 | if (dr > 7) | ||
2904 | return emulate_ud(ctxt); | ||
2905 | |||
2906 | cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2907 | if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) | ||
2908 | return emulate_ud(ctxt); | ||
2909 | |||
2910 | if (check_dr7_gd(ctxt)) | ||
2911 | return emulate_db(ctxt); | ||
2912 | |||
2913 | return X86EMUL_CONTINUE; | ||
2914 | } | ||
2915 | |||
2916 | static int check_dr_write(struct x86_emulate_ctxt *ctxt) | ||
2917 | { | ||
2918 | struct decode_cache *c = &ctxt->decode; | ||
2919 | u64 new_val = c->src.val64; | ||
2920 | int dr = c->modrm_reg; | ||
2921 | |||
2922 | if ((dr == 6 || dr == 7) && (new_val & 0xffffffff00000000ULL)) | ||
2923 | return emulate_gp(ctxt, 0); | ||
2924 | |||
2925 | return check_dr_read(ctxt); | ||
2926 | } | ||
2927 | |||
2928 | static int check_svme(struct x86_emulate_ctxt *ctxt) | ||
2929 | { | ||
2930 | u64 efer; | ||
2931 | |||
2932 | ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); | ||
2933 | |||
2934 | if (!(efer & EFER_SVME)) | ||
2935 | return emulate_ud(ctxt); | ||
2936 | |||
2937 | return X86EMUL_CONTINUE; | ||
2938 | } | ||
2939 | |||
2940 | static int check_svme_pa(struct x86_emulate_ctxt *ctxt) | ||
2941 | { | ||
2942 | u64 rax = ctxt->decode.regs[VCPU_REGS_RAX]; | ||
2943 | |||
2944 | /* Valid physical address? */ | ||
2945 | if (rax & 0xffff000000000000ULL) | ||
2946 | return emulate_gp(ctxt, 0); | ||
2947 | |||
2948 | return check_svme(ctxt); | ||
2949 | } | ||
2950 | |||
2951 | static int check_rdtsc(struct x86_emulate_ctxt *ctxt) | ||
2952 | { | ||
2953 | u64 cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2954 | |||
2955 | if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt)) | ||
2956 | return emulate_ud(ctxt); | ||
2957 | |||
2958 | return X86EMUL_CONTINUE; | ||
2959 | } | ||
2960 | |||
2961 | static int check_rdpmc(struct x86_emulate_ctxt *ctxt) | ||
2962 | { | ||
2963 | u64 cr4 = ctxt->ops->get_cr(ctxt, 4); | ||
2964 | u64 rcx = ctxt->decode.regs[VCPU_REGS_RCX]; | ||
2965 | |||
2966 | if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) || | ||
2967 | (rcx > 3)) | ||
2968 | return emulate_gp(ctxt, 0); | ||
2969 | |||
2970 | return X86EMUL_CONTINUE; | ||
2971 | } | ||
2972 | |||
2973 | static int check_perm_in(struct x86_emulate_ctxt *ctxt) | ||
2974 | { | ||
2975 | struct decode_cache *c = &ctxt->decode; | ||
2976 | |||
2977 | c->dst.bytes = min(c->dst.bytes, 4u); | ||
2978 | if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes)) | ||
2979 | return emulate_gp(ctxt, 0); | ||
2980 | |||
2981 | return X86EMUL_CONTINUE; | ||
2982 | } | ||
2983 | |||
2984 | static int check_perm_out(struct x86_emulate_ctxt *ctxt) | ||
2985 | { | ||
2986 | struct decode_cache *c = &ctxt->decode; | ||
2987 | |||
2988 | c->src.bytes = min(c->src.bytes, 4u); | ||
2989 | if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes)) | ||
2990 | return emulate_gp(ctxt, 0); | ||
2991 | |||
2992 | return X86EMUL_CONTINUE; | ||
2993 | } | ||
2994 | |||
2328 | #define D(_y) { .flags = (_y) } | 2995 | #define D(_y) { .flags = (_y) } |
2996 | #define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i } | ||
2997 | #define DIP(_y, _i, _p) { .flags = (_y), .intercept = x86_intercept_##_i, \ | ||
2998 | .check_perm = (_p) } | ||
2329 | #define N D(0) | 2999 | #define N D(0) |
3000 | #define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } | ||
2330 | #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) } | 3001 | #define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) } |
2331 | #define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) } | 3002 | #define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) } |
2332 | #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } | 3003 | #define I(_f, _e) { .flags = (_f), .u.execute = (_e) } |
3004 | #define II(_f, _e, _i) \ | ||
3005 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } | ||
3006 | #define IIP(_f, _e, _i, _p) \ | ||
3007 | { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i, \ | ||
3008 | .check_perm = (_p) } | ||
3009 | #define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } | ||
2333 | 3010 | ||
2334 | #define D2bv(_f) D((_f) | ByteOp), D(_f) | 3011 | #define D2bv(_f) D((_f) | ByteOp), D(_f) |
3012 | #define D2bvIP(_f, _i, _p) DIP((_f) | ByteOp, _i, _p), DIP(_f, _i, _p) | ||
2335 | #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) | 3013 | #define I2bv(_f, _e) I((_f) | ByteOp, _e), I(_f, _e) |
2336 | 3014 | ||
2337 | #define D6ALU(_f) D2bv((_f) | DstMem | SrcReg | ModRM), \ | 3015 | #define I6ALU(_f, _e) I2bv((_f) | DstMem | SrcReg | ModRM, _e), \ |
2338 | D2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock), \ | 3016 | I2bv(((_f) | DstReg | SrcMem | ModRM) & ~Lock, _e), \ |
2339 | D2bv(((_f) & ~Lock) | DstAcc | SrcImm) | 3017 | I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) |
2340 | 3018 | ||
3019 | static struct opcode group7_rm1[] = { | ||
3020 | DI(SrcNone | ModRM | Priv, monitor), | ||
3021 | DI(SrcNone | ModRM | Priv, mwait), | ||
3022 | N, N, N, N, N, N, | ||
3023 | }; | ||
3024 | |||
3025 | static struct opcode group7_rm3[] = { | ||
3026 | DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa), | ||
3027 | II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall), | ||
3028 | DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa), | ||
3029 | DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa), | ||
3030 | DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme), | ||
3031 | DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme), | ||
3032 | DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme), | ||
3033 | DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme), | ||
3034 | }; | ||
3035 | |||
3036 | static struct opcode group7_rm7[] = { | ||
3037 | N, | ||
3038 | DIP(SrcNone | ModRM, rdtscp, check_rdtsc), | ||
3039 | N, N, N, N, N, N, | ||
3040 | }; | ||
2341 | 3041 | ||
2342 | static struct opcode group1[] = { | 3042 | static struct opcode group1[] = { |
2343 | X7(D(Lock)), N | 3043 | I(Lock, em_add), |
3044 | I(Lock, em_or), | ||
3045 | I(Lock, em_adc), | ||
3046 | I(Lock, em_sbb), | ||
3047 | I(Lock, em_and), | ||
3048 | I(Lock, em_sub), | ||
3049 | I(Lock, em_xor), | ||
3050 | I(0, em_cmp), | ||
2344 | }; | 3051 | }; |
2345 | 3052 | ||
2346 | static struct opcode group1A[] = { | 3053 | static struct opcode group1A[] = { |
@@ -2366,16 +3073,28 @@ static struct opcode group5[] = { | |||
2366 | D(SrcMem | ModRM | Stack), N, | 3073 | D(SrcMem | ModRM | Stack), N, |
2367 | }; | 3074 | }; |
2368 | 3075 | ||
3076 | static struct opcode group6[] = { | ||
3077 | DI(ModRM | Prot, sldt), | ||
3078 | DI(ModRM | Prot, str), | ||
3079 | DI(ModRM | Prot | Priv, lldt), | ||
3080 | DI(ModRM | Prot | Priv, ltr), | ||
3081 | N, N, N, N, | ||
3082 | }; | ||
3083 | |||
2369 | static struct group_dual group7 = { { | 3084 | static struct group_dual group7 = { { |
2370 | N, N, D(ModRM | SrcMem | Priv), D(ModRM | SrcMem | Priv), | 3085 | DI(ModRM | Mov | DstMem | Priv, sgdt), |
2371 | D(SrcNone | ModRM | DstMem | Mov), N, | 3086 | DI(ModRM | Mov | DstMem | Priv, sidt), |
2372 | D(SrcMem16 | ModRM | Mov | Priv), | 3087 | II(ModRM | SrcMem | Priv, em_lgdt, lgdt), |
2373 | D(SrcMem | ModRM | ByteOp | Priv | NoAccess), | 3088 | II(ModRM | SrcMem | Priv, em_lidt, lidt), |
3089 | II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N, | ||
3090 | II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), | ||
3091 | II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg), | ||
2374 | }, { | 3092 | }, { |
2375 | D(SrcNone | ModRM | Priv | VendorSpecific), N, | 3093 | I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall), |
2376 | N, D(SrcNone | ModRM | Priv | VendorSpecific), | 3094 | EXT(0, group7_rm1), |
2377 | D(SrcNone | ModRM | DstMem | Mov), N, | 3095 | N, EXT(0, group7_rm3), |
2378 | D(SrcMem16 | ModRM | Mov | Priv), N, | 3096 | II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N, |
3097 | II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), | ||
2379 | } }; | 3098 | } }; |
2380 | 3099 | ||
2381 | static struct opcode group8[] = { | 3100 | static struct opcode group8[] = { |
@@ -2394,35 +3113,40 @@ static struct opcode group11[] = { | |||
2394 | I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)), | 3113 | I(DstMem | SrcImm | ModRM | Mov, em_mov), X7(D(Undefined)), |
2395 | }; | 3114 | }; |
2396 | 3115 | ||
3116 | static struct gprefix pfx_0f_6f_0f_7f = { | ||
3117 | N, N, N, I(Sse, em_movdqu), | ||
3118 | }; | ||
3119 | |||
2397 | static struct opcode opcode_table[256] = { | 3120 | static struct opcode opcode_table[256] = { |
2398 | /* 0x00 - 0x07 */ | 3121 | /* 0x00 - 0x07 */ |
2399 | D6ALU(Lock), | 3122 | I6ALU(Lock, em_add), |
2400 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), | 3123 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), |
2401 | /* 0x08 - 0x0F */ | 3124 | /* 0x08 - 0x0F */ |
2402 | D6ALU(Lock), | 3125 | I6ALU(Lock, em_or), |
2403 | D(ImplicitOps | Stack | No64), N, | 3126 | D(ImplicitOps | Stack | No64), N, |
2404 | /* 0x10 - 0x17 */ | 3127 | /* 0x10 - 0x17 */ |
2405 | D6ALU(Lock), | 3128 | I6ALU(Lock, em_adc), |
2406 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), | 3129 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), |
2407 | /* 0x18 - 0x1F */ | 3130 | /* 0x18 - 0x1F */ |
2408 | D6ALU(Lock), | 3131 | I6ALU(Lock, em_sbb), |
2409 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), | 3132 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), |
2410 | /* 0x20 - 0x27 */ | 3133 | /* 0x20 - 0x27 */ |
2411 | D6ALU(Lock), N, N, | 3134 | I6ALU(Lock, em_and), N, N, |
2412 | /* 0x28 - 0x2F */ | 3135 | /* 0x28 - 0x2F */ |
2413 | D6ALU(Lock), N, I(ByteOp | DstAcc | No64, em_das), | 3136 | I6ALU(Lock, em_sub), N, I(ByteOp | DstAcc | No64, em_das), |
2414 | /* 0x30 - 0x37 */ | 3137 | /* 0x30 - 0x37 */ |
2415 | D6ALU(Lock), N, N, | 3138 | I6ALU(Lock, em_xor), N, N, |
2416 | /* 0x38 - 0x3F */ | 3139 | /* 0x38 - 0x3F */ |
2417 | D6ALU(0), N, N, | 3140 | I6ALU(0, em_cmp), N, N, |
2418 | /* 0x40 - 0x4F */ | 3141 | /* 0x40 - 0x4F */ |
2419 | X16(D(DstReg)), | 3142 | X16(D(DstReg)), |
2420 | /* 0x50 - 0x57 */ | 3143 | /* 0x50 - 0x57 */ |
2421 | X8(I(SrcReg | Stack, em_push)), | 3144 | X8(I(SrcReg | Stack, em_push)), |
2422 | /* 0x58 - 0x5F */ | 3145 | /* 0x58 - 0x5F */ |
2423 | X8(D(DstReg | Stack)), | 3146 | X8(I(DstReg | Stack, em_pop)), |
2424 | /* 0x60 - 0x67 */ | 3147 | /* 0x60 - 0x67 */ |
2425 | D(ImplicitOps | Stack | No64), D(ImplicitOps | Stack | No64), | 3148 | I(ImplicitOps | Stack | No64, em_pusha), |
3149 | I(ImplicitOps | Stack | No64, em_popa), | ||
2426 | N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , | 3150 | N, D(DstReg | SrcMem32 | ModRM | Mov) /* movsxd (x86/64) */ , |
2427 | N, N, N, N, | 3151 | N, N, N, N, |
2428 | /* 0x68 - 0x6F */ | 3152 | /* 0x68 - 0x6F */ |
@@ -2430,8 +3154,8 @@ static struct opcode opcode_table[256] = { | |||
2430 | I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), | 3154 | I(DstReg | SrcMem | ModRM | Src2Imm, em_imul_3op), |
2431 | I(SrcImmByte | Mov | Stack, em_push), | 3155 | I(SrcImmByte | Mov | Stack, em_push), |
2432 | I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), | 3156 | I(DstReg | SrcMem | ModRM | Src2ImmByte, em_imul_3op), |
2433 | D2bv(DstDI | Mov | String), /* insb, insw/insd */ | 3157 | D2bvIP(DstDI | Mov | String, ins, check_perm_in), /* insb, insw/insd */ |
2434 | D2bv(SrcSI | ImplicitOps | String), /* outsb, outsw/outsd */ | 3158 | D2bvIP(SrcSI | ImplicitOps | String, outs, check_perm_out), /* outsb, outsw/outsd */ |
2435 | /* 0x70 - 0x7F */ | 3159 | /* 0x70 - 0x7F */ |
2436 | X16(D(SrcImmByte)), | 3160 | X16(D(SrcImmByte)), |
2437 | /* 0x80 - 0x87 */ | 3161 | /* 0x80 - 0x87 */ |
@@ -2446,21 +3170,22 @@ static struct opcode opcode_table[256] = { | |||
2446 | D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg), | 3170 | D(DstMem | SrcNone | ModRM | Mov), D(ModRM | SrcMem | NoAccess | DstReg), |
2447 | D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A), | 3171 | D(ImplicitOps | SrcMem16 | ModRM), G(0, group1A), |
2448 | /* 0x90 - 0x97 */ | 3172 | /* 0x90 - 0x97 */ |
2449 | X8(D(SrcAcc | DstReg)), | 3173 | DI(SrcAcc | DstReg, pause), X7(D(SrcAcc | DstReg)), |
2450 | /* 0x98 - 0x9F */ | 3174 | /* 0x98 - 0x9F */ |
2451 | D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), | 3175 | D(DstAcc | SrcNone), I(ImplicitOps | SrcAcc, em_cwd), |
2452 | I(SrcImmFAddr | No64, em_call_far), N, | 3176 | I(SrcImmFAddr | No64, em_call_far), N, |
2453 | D(ImplicitOps | Stack), D(ImplicitOps | Stack), N, N, | 3177 | II(ImplicitOps | Stack, em_pushf, pushf), |
3178 | II(ImplicitOps | Stack, em_popf, popf), N, N, | ||
2454 | /* 0xA0 - 0xA7 */ | 3179 | /* 0xA0 - 0xA7 */ |
2455 | I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), | 3180 | I2bv(DstAcc | SrcMem | Mov | MemAbs, em_mov), |
2456 | I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov), | 3181 | I2bv(DstMem | SrcAcc | Mov | MemAbs, em_mov), |
2457 | I2bv(SrcSI | DstDI | Mov | String, em_mov), | 3182 | I2bv(SrcSI | DstDI | Mov | String, em_mov), |
2458 | D2bv(SrcSI | DstDI | String), | 3183 | I2bv(SrcSI | DstDI | String, em_cmp), |
2459 | /* 0xA8 - 0xAF */ | 3184 | /* 0xA8 - 0xAF */ |
2460 | D2bv(DstAcc | SrcImm), | 3185 | D2bv(DstAcc | SrcImm), |
2461 | I2bv(SrcAcc | DstDI | Mov | String, em_mov), | 3186 | I2bv(SrcAcc | DstDI | Mov | String, em_mov), |
2462 | I2bv(SrcSI | DstAcc | Mov | String, em_mov), | 3187 | I2bv(SrcSI | DstAcc | Mov | String, em_mov), |
2463 | D2bv(SrcAcc | DstDI | String), | 3188 | I2bv(SrcAcc | DstDI | String, em_cmp), |
2464 | /* 0xB0 - 0xB7 */ | 3189 | /* 0xB0 - 0xB7 */ |
2465 | X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), | 3190 | X8(I(ByteOp | DstReg | SrcImm | Mov, em_mov)), |
2466 | /* 0xB8 - 0xBF */ | 3191 | /* 0xB8 - 0xBF */ |
@@ -2473,7 +3198,8 @@ static struct opcode opcode_table[256] = { | |||
2473 | G(ByteOp, group11), G(0, group11), | 3198 | G(ByteOp, group11), G(0, group11), |
2474 | /* 0xC8 - 0xCF */ | 3199 | /* 0xC8 - 0xCF */ |
2475 | N, N, N, D(ImplicitOps | Stack), | 3200 | N, N, N, D(ImplicitOps | Stack), |
2476 | D(ImplicitOps), D(SrcImmByte), D(ImplicitOps | No64), D(ImplicitOps), | 3201 | D(ImplicitOps), DI(SrcImmByte, intn), |
3202 | D(ImplicitOps | No64), DI(ImplicitOps, iret), | ||
2477 | /* 0xD0 - 0xD7 */ | 3203 | /* 0xD0 - 0xD7 */ |
2478 | D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM), | 3204 | D2bv(DstMem | SrcOne | ModRM), D2bv(DstMem | ModRM), |
2479 | N, N, N, N, | 3205 | N, N, N, N, |
@@ -2481,14 +3207,17 @@ static struct opcode opcode_table[256] = { | |||
2481 | N, N, N, N, N, N, N, N, | 3207 | N, N, N, N, N, N, N, N, |
2482 | /* 0xE0 - 0xE7 */ | 3208 | /* 0xE0 - 0xE7 */ |
2483 | X4(D(SrcImmByte)), | 3209 | X4(D(SrcImmByte)), |
2484 | D2bv(SrcImmUByte | DstAcc), D2bv(SrcAcc | DstImmUByte), | 3210 | D2bvIP(SrcImmUByte | DstAcc, in, check_perm_in), |
3211 | D2bvIP(SrcAcc | DstImmUByte, out, check_perm_out), | ||
2485 | /* 0xE8 - 0xEF */ | 3212 | /* 0xE8 - 0xEF */ |
2486 | D(SrcImm | Stack), D(SrcImm | ImplicitOps), | 3213 | D(SrcImm | Stack), D(SrcImm | ImplicitOps), |
2487 | D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), | 3214 | D(SrcImmFAddr | No64), D(SrcImmByte | ImplicitOps), |
2488 | D2bv(SrcNone | DstAcc), D2bv(SrcAcc | ImplicitOps), | 3215 | D2bvIP(SrcNone | DstAcc, in, check_perm_in), |
3216 | D2bvIP(SrcAcc | ImplicitOps, out, check_perm_out), | ||
2489 | /* 0xF0 - 0xF7 */ | 3217 | /* 0xF0 - 0xF7 */ |
2490 | N, N, N, N, | 3218 | N, DI(ImplicitOps, icebp), N, N, |
2491 | D(ImplicitOps | Priv), D(ImplicitOps), G(ByteOp, group3), G(0, group3), | 3219 | DI(ImplicitOps | Priv, hlt), D(ImplicitOps), |
3220 | G(ByteOp, group3), G(0, group3), | ||
2492 | /* 0xF8 - 0xFF */ | 3221 | /* 0xF8 - 0xFF */ |
2493 | D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), | 3222 | D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), D(ImplicitOps), |
2494 | D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), | 3223 | D(ImplicitOps), D(ImplicitOps), G(0, group4), G(0, group5), |
@@ -2496,20 +3225,24 @@ static struct opcode opcode_table[256] = { | |||
2496 | 3225 | ||
2497 | static struct opcode twobyte_table[256] = { | 3226 | static struct opcode twobyte_table[256] = { |
2498 | /* 0x00 - 0x0F */ | 3227 | /* 0x00 - 0x0F */ |
2499 | N, GD(0, &group7), N, N, | 3228 | G(0, group6), GD(0, &group7), N, N, |
2500 | N, D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv), N, | 3229 | N, D(ImplicitOps | VendorSpecific), DI(ImplicitOps | Priv, clts), N, |
2501 | D(ImplicitOps | Priv), D(ImplicitOps | Priv), N, N, | 3230 | DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, |
2502 | N, D(ImplicitOps | ModRM), N, N, | 3231 | N, D(ImplicitOps | ModRM), N, N, |
2503 | /* 0x10 - 0x1F */ | 3232 | /* 0x10 - 0x1F */ |
2504 | N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, | 3233 | N, N, N, N, N, N, N, N, D(ImplicitOps | ModRM), N, N, N, N, N, N, N, |
2505 | /* 0x20 - 0x2F */ | 3234 | /* 0x20 - 0x2F */ |
2506 | D(ModRM | DstMem | Priv | Op3264), D(ModRM | DstMem | Priv | Op3264), | 3235 | DIP(ModRM | DstMem | Priv | Op3264, cr_read, check_cr_read), |
2507 | D(ModRM | SrcMem | Priv | Op3264), D(ModRM | SrcMem | Priv | Op3264), | 3236 | DIP(ModRM | DstMem | Priv | Op3264, dr_read, check_dr_read), |
3237 | DIP(ModRM | SrcMem | Priv | Op3264, cr_write, check_cr_write), | ||
3238 | DIP(ModRM | SrcMem | Priv | Op3264, dr_write, check_dr_write), | ||
2508 | N, N, N, N, | 3239 | N, N, N, N, |
2509 | N, N, N, N, N, N, N, N, | 3240 | N, N, N, N, N, N, N, N, |
2510 | /* 0x30 - 0x3F */ | 3241 | /* 0x30 - 0x3F */ |
2511 | D(ImplicitOps | Priv), I(ImplicitOps, em_rdtsc), | 3242 | DI(ImplicitOps | Priv, wrmsr), |
2512 | D(ImplicitOps | Priv), N, | 3243 | IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), |
3244 | DI(ImplicitOps | Priv, rdmsr), | ||
3245 | DIP(ImplicitOps | Priv, rdpmc, check_rdpmc), | ||
2513 | D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific), | 3246 | D(ImplicitOps | VendorSpecific), D(ImplicitOps | Priv | VendorSpecific), |
2514 | N, N, | 3247 | N, N, |
2515 | N, N, N, N, N, N, N, N, | 3248 | N, N, N, N, N, N, N, N, |
@@ -2518,21 +3251,27 @@ static struct opcode twobyte_table[256] = { | |||
2518 | /* 0x50 - 0x5F */ | 3251 | /* 0x50 - 0x5F */ |
2519 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | 3252 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, |
2520 | /* 0x60 - 0x6F */ | 3253 | /* 0x60 - 0x6F */ |
2521 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | 3254 | N, N, N, N, |
3255 | N, N, N, N, | ||
3256 | N, N, N, N, | ||
3257 | N, N, N, GP(SrcMem | DstReg | ModRM | Mov, &pfx_0f_6f_0f_7f), | ||
2522 | /* 0x70 - 0x7F */ | 3258 | /* 0x70 - 0x7F */ |
2523 | N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, N, | 3259 | N, N, N, N, |
3260 | N, N, N, N, | ||
3261 | N, N, N, N, | ||
3262 | N, N, N, GP(SrcReg | DstMem | ModRM | Mov, &pfx_0f_6f_0f_7f), | ||
2524 | /* 0x80 - 0x8F */ | 3263 | /* 0x80 - 0x8F */ |
2525 | X16(D(SrcImm)), | 3264 | X16(D(SrcImm)), |
2526 | /* 0x90 - 0x9F */ | 3265 | /* 0x90 - 0x9F */ |
2527 | X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), | 3266 | X16(D(ByteOp | DstMem | SrcNone | ModRM| Mov)), |
2528 | /* 0xA0 - 0xA7 */ | 3267 | /* 0xA0 - 0xA7 */ |
2529 | D(ImplicitOps | Stack), D(ImplicitOps | Stack), | 3268 | D(ImplicitOps | Stack), D(ImplicitOps | Stack), |
2530 | N, D(DstMem | SrcReg | ModRM | BitOp), | 3269 | DI(ImplicitOps, cpuid), D(DstMem | SrcReg | ModRM | BitOp), |
2531 | D(DstMem | SrcReg | Src2ImmByte | ModRM), | 3270 | D(DstMem | SrcReg | Src2ImmByte | ModRM), |
2532 | D(DstMem | SrcReg | Src2CL | ModRM), N, N, | 3271 | D(DstMem | SrcReg | Src2CL | ModRM), N, N, |
2533 | /* 0xA8 - 0xAF */ | 3272 | /* 0xA8 - 0xAF */ |
2534 | D(ImplicitOps | Stack), D(ImplicitOps | Stack), | 3273 | D(ImplicitOps | Stack), D(ImplicitOps | Stack), |
2535 | N, D(DstMem | SrcReg | ModRM | BitOp | Lock), | 3274 | DI(ImplicitOps, rsm), D(DstMem | SrcReg | ModRM | BitOp | Lock), |
2536 | D(DstMem | SrcReg | Src2ImmByte | ModRM), | 3275 | D(DstMem | SrcReg | Src2ImmByte | ModRM), |
2537 | D(DstMem | SrcReg | Src2CL | ModRM), | 3276 | D(DstMem | SrcReg | Src2CL | ModRM), |
2538 | D(ModRM), I(DstReg | SrcMem | ModRM, em_imul), | 3277 | D(ModRM), I(DstReg | SrcMem | ModRM, em_imul), |
@@ -2564,10 +3303,13 @@ static struct opcode twobyte_table[256] = { | |||
2564 | #undef G | 3303 | #undef G |
2565 | #undef GD | 3304 | #undef GD |
2566 | #undef I | 3305 | #undef I |
3306 | #undef GP | ||
3307 | #undef EXT | ||
2567 | 3308 | ||
2568 | #undef D2bv | 3309 | #undef D2bv |
3310 | #undef D2bvIP | ||
2569 | #undef I2bv | 3311 | #undef I2bv |
2570 | #undef D6ALU | 3312 | #undef I6ALU |
2571 | 3313 | ||
2572 | static unsigned imm_size(struct decode_cache *c) | 3314 | static unsigned imm_size(struct decode_cache *c) |
2573 | { | 3315 | { |
@@ -2625,8 +3367,9 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
2625 | struct decode_cache *c = &ctxt->decode; | 3367 | struct decode_cache *c = &ctxt->decode; |
2626 | int rc = X86EMUL_CONTINUE; | 3368 | int rc = X86EMUL_CONTINUE; |
2627 | int mode = ctxt->mode; | 3369 | int mode = ctxt->mode; |
2628 | int def_op_bytes, def_ad_bytes, dual, goffset; | 3370 | int def_op_bytes, def_ad_bytes, goffset, simd_prefix; |
2629 | struct opcode opcode, *g_mod012, *g_mod3; | 3371 | bool op_prefix = false; |
3372 | struct opcode opcode; | ||
2630 | struct operand memop = { .type = OP_NONE }; | 3373 | struct operand memop = { .type = OP_NONE }; |
2631 | 3374 | ||
2632 | c->eip = ctxt->eip; | 3375 | c->eip = ctxt->eip; |
@@ -2634,7 +3377,6 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
2634 | c->fetch.end = c->fetch.start + insn_len; | 3377 | c->fetch.end = c->fetch.start + insn_len; |
2635 | if (insn_len > 0) | 3378 | if (insn_len > 0) |
2636 | memcpy(c->fetch.data, insn, insn_len); | 3379 | memcpy(c->fetch.data, insn, insn_len); |
2637 | ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS); | ||
2638 | 3380 | ||
2639 | switch (mode) { | 3381 | switch (mode) { |
2640 | case X86EMUL_MODE_REAL: | 3382 | case X86EMUL_MODE_REAL: |
@@ -2662,6 +3404,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
2662 | for (;;) { | 3404 | for (;;) { |
2663 | switch (c->b = insn_fetch(u8, 1, c->eip)) { | 3405 | switch (c->b = insn_fetch(u8, 1, c->eip)) { |
2664 | case 0x66: /* operand-size override */ | 3406 | case 0x66: /* operand-size override */ |
3407 | op_prefix = true; | ||
2665 | /* switch between 2/4 bytes */ | 3408 | /* switch between 2/4 bytes */ |
2666 | c->op_bytes = def_op_bytes ^ 6; | 3409 | c->op_bytes = def_op_bytes ^ 6; |
2667 | break; | 3410 | break; |
@@ -2692,10 +3435,8 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len) | |||
2692 | c->lock_prefix = 1; | 3435 | c->lock_prefix = 1; |
2693 | break; | 3436 | break; |
2694 | case 0xf2: /* REPNE/REPNZ */ | 3437 | case 0xf2: /* REPNE/REPNZ */ |
2695 | c->rep_prefix = REPNE_PREFIX; | ||
2696 | break; | ||
2697 | case 0xf3: /* REP/REPE/REPZ */ | 3438 | case 0xf3: /* REP/REPE/REPZ */ |
2698 | c->rep_prefix = REPE_PREFIX; | 3439 | c->rep_prefix = c->b; |
2699 | break; | 3440 | break; |
2700 | default: | 3441 | default: |
2701 | goto done_prefixes; | 3442 | goto done_prefixes; |
@@ -2722,29 +3463,49 @@ done_prefixes: | |||
2722 | } | 3463 | } |
2723 | c->d = opcode.flags; | 3464 | c->d = opcode.flags; |
2724 | 3465 | ||
2725 | if (c->d & Group) { | 3466 | while (c->d & GroupMask) { |
2726 | dual = c->d & GroupDual; | 3467 | switch (c->d & GroupMask) { |
2727 | c->modrm = insn_fetch(u8, 1, c->eip); | 3468 | case Group: |
2728 | --c->eip; | 3469 | c->modrm = insn_fetch(u8, 1, c->eip); |
2729 | 3470 | --c->eip; | |
2730 | if (c->d & GroupDual) { | 3471 | goffset = (c->modrm >> 3) & 7; |
2731 | g_mod012 = opcode.u.gdual->mod012; | 3472 | opcode = opcode.u.group[goffset]; |
2732 | g_mod3 = opcode.u.gdual->mod3; | 3473 | break; |
2733 | } else | 3474 | case GroupDual: |
2734 | g_mod012 = g_mod3 = opcode.u.group; | 3475 | c->modrm = insn_fetch(u8, 1, c->eip); |
2735 | 3476 | --c->eip; | |
2736 | c->d &= ~(Group | GroupDual); | 3477 | goffset = (c->modrm >> 3) & 7; |
2737 | 3478 | if ((c->modrm >> 6) == 3) | |
2738 | goffset = (c->modrm >> 3) & 7; | 3479 | opcode = opcode.u.gdual->mod3[goffset]; |
3480 | else | ||
3481 | opcode = opcode.u.gdual->mod012[goffset]; | ||
3482 | break; | ||
3483 | case RMExt: | ||
3484 | goffset = c->modrm & 7; | ||
3485 | opcode = opcode.u.group[goffset]; | ||
3486 | break; | ||
3487 | case Prefix: | ||
3488 | if (c->rep_prefix && op_prefix) | ||
3489 | return X86EMUL_UNHANDLEABLE; | ||
3490 | simd_prefix = op_prefix ? 0x66 : c->rep_prefix; | ||
3491 | switch (simd_prefix) { | ||
3492 | case 0x00: opcode = opcode.u.gprefix->pfx_no; break; | ||
3493 | case 0x66: opcode = opcode.u.gprefix->pfx_66; break; | ||
3494 | case 0xf2: opcode = opcode.u.gprefix->pfx_f2; break; | ||
3495 | case 0xf3: opcode = opcode.u.gprefix->pfx_f3; break; | ||
3496 | } | ||
3497 | break; | ||
3498 | default: | ||
3499 | return X86EMUL_UNHANDLEABLE; | ||
3500 | } | ||
2739 | 3501 | ||
2740 | if ((c->modrm >> 6) == 3) | 3502 | c->d &= ~GroupMask; |
2741 | opcode = g_mod3[goffset]; | ||
2742 | else | ||
2743 | opcode = g_mod012[goffset]; | ||
2744 | c->d |= opcode.flags; | 3503 | c->d |= opcode.flags; |
2745 | } | 3504 | } |
2746 | 3505 | ||
2747 | c->execute = opcode.u.execute; | 3506 | c->execute = opcode.u.execute; |
3507 | c->check_perm = opcode.check_perm; | ||
3508 | c->intercept = opcode.intercept; | ||
2748 | 3509 | ||
2749 | /* Unrecognised? */ | 3510 | /* Unrecognised? */ |
2750 | if (c->d == 0 || (c->d & Undefined)) | 3511 | if (c->d == 0 || (c->d & Undefined)) |
@@ -2763,6 +3524,9 @@ done_prefixes: | |||
2763 | c->op_bytes = 4; | 3524 | c->op_bytes = 4; |
2764 | } | 3525 | } |
2765 | 3526 | ||
3527 | if (c->d & Sse) | ||
3528 | c->op_bytes = 16; | ||
3529 | |||
2766 | /* ModRM and SIB bytes. */ | 3530 | /* ModRM and SIB bytes. */ |
2767 | if (c->d & ModRM) { | 3531 | if (c->d & ModRM) { |
2768 | rc = decode_modrm(ctxt, ops, &memop); | 3532 | rc = decode_modrm(ctxt, ops, &memop); |
@@ -2776,7 +3540,7 @@ done_prefixes: | |||
2776 | if (!c->has_seg_override) | 3540 | if (!c->has_seg_override) |
2777 | set_seg_override(c, VCPU_SREG_DS); | 3541 | set_seg_override(c, VCPU_SREG_DS); |
2778 | 3542 | ||
2779 | memop.addr.mem.seg = seg_override(ctxt, ops, c); | 3543 | memop.addr.mem.seg = seg_override(ctxt, c); |
2780 | 3544 | ||
2781 | if (memop.type == OP_MEM && c->ad_bytes != 8) | 3545 | if (memop.type == OP_MEM && c->ad_bytes != 8) |
2782 | memop.addr.mem.ea = (u32)memop.addr.mem.ea; | 3546 | memop.addr.mem.ea = (u32)memop.addr.mem.ea; |
@@ -2792,7 +3556,7 @@ done_prefixes: | |||
2792 | case SrcNone: | 3556 | case SrcNone: |
2793 | break; | 3557 | break; |
2794 | case SrcReg: | 3558 | case SrcReg: |
2795 | decode_register_operand(&c->src, c, 0); | 3559 | decode_register_operand(ctxt, &c->src, c, 0); |
2796 | break; | 3560 | break; |
2797 | case SrcMem16: | 3561 | case SrcMem16: |
2798 | memop.bytes = 2; | 3562 | memop.bytes = 2; |
@@ -2836,7 +3600,7 @@ done_prefixes: | |||
2836 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; | 3600 | c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; |
2837 | c->src.addr.mem.ea = | 3601 | c->src.addr.mem.ea = |
2838 | register_address(c, c->regs[VCPU_REGS_RSI]); | 3602 | register_address(c, c->regs[VCPU_REGS_RSI]); |
2839 | c->src.addr.mem.seg = seg_override(ctxt, ops, c), | 3603 | c->src.addr.mem.seg = seg_override(ctxt, c); |
2840 | c->src.val = 0; | 3604 | c->src.val = 0; |
2841 | break; | 3605 | break; |
2842 | case SrcImmFAddr: | 3606 | case SrcImmFAddr: |
@@ -2883,7 +3647,7 @@ done_prefixes: | |||
2883 | /* Decode and fetch the destination operand: register or memory. */ | 3647 | /* Decode and fetch the destination operand: register or memory. */ |
2884 | switch (c->d & DstMask) { | 3648 | switch (c->d & DstMask) { |
2885 | case DstReg: | 3649 | case DstReg: |
2886 | decode_register_operand(&c->dst, c, | 3650 | decode_register_operand(ctxt, &c->dst, c, |
2887 | c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); | 3651 | c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); |
2888 | break; | 3652 | break; |
2889 | case DstImmUByte: | 3653 | case DstImmUByte: |
@@ -2926,7 +3690,7 @@ done_prefixes: | |||
2926 | } | 3690 | } |
2927 | 3691 | ||
2928 | done: | 3692 | done: |
2929 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 3693 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; |
2930 | } | 3694 | } |
2931 | 3695 | ||
2932 | static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) | 3696 | static bool string_insn_completed(struct x86_emulate_ctxt *ctxt) |
@@ -2979,12 +3743,51 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
2979 | goto done; | 3743 | goto done; |
2980 | } | 3744 | } |
2981 | 3745 | ||
3746 | if ((c->d & Sse) | ||
3747 | && ((ops->get_cr(ctxt, 0) & X86_CR0_EM) | ||
3748 | || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) { | ||
3749 | rc = emulate_ud(ctxt); | ||
3750 | goto done; | ||
3751 | } | ||
3752 | |||
3753 | if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { | ||
3754 | rc = emulate_nm(ctxt); | ||
3755 | goto done; | ||
3756 | } | ||
3757 | |||
3758 | if (unlikely(ctxt->guest_mode) && c->intercept) { | ||
3759 | rc = emulator_check_intercept(ctxt, c->intercept, | ||
3760 | X86_ICPT_PRE_EXCEPT); | ||
3761 | if (rc != X86EMUL_CONTINUE) | ||
3762 | goto done; | ||
3763 | } | ||
3764 | |||
2982 | /* Privileged instruction can be executed only in CPL=0 */ | 3765 | /* Privileged instruction can be executed only in CPL=0 */ |
2983 | if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { | 3766 | if ((c->d & Priv) && ops->cpl(ctxt)) { |
2984 | rc = emulate_gp(ctxt, 0); | 3767 | rc = emulate_gp(ctxt, 0); |
2985 | goto done; | 3768 | goto done; |
2986 | } | 3769 | } |
2987 | 3770 | ||
3771 | /* Instruction can only be executed in protected mode */ | ||
3772 | if ((c->d & Prot) && !(ctxt->mode & X86EMUL_MODE_PROT)) { | ||
3773 | rc = emulate_ud(ctxt); | ||
3774 | goto done; | ||
3775 | } | ||
3776 | |||
3777 | /* Do instruction specific permission checks */ | ||
3778 | if (c->check_perm) { | ||
3779 | rc = c->check_perm(ctxt); | ||
3780 | if (rc != X86EMUL_CONTINUE) | ||
3781 | goto done; | ||
3782 | } | ||
3783 | |||
3784 | if (unlikely(ctxt->guest_mode) && c->intercept) { | ||
3785 | rc = emulator_check_intercept(ctxt, c->intercept, | ||
3786 | X86_ICPT_POST_EXCEPT); | ||
3787 | if (rc != X86EMUL_CONTINUE) | ||
3788 | goto done; | ||
3789 | } | ||
3790 | |||
2988 | if (c->rep_prefix && (c->d & String)) { | 3791 | if (c->rep_prefix && (c->d & String)) { |
2989 | /* All REP prefixes have the same first termination condition */ | 3792 | /* All REP prefixes have the same first termination condition */ |
2990 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { | 3793 | if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { |
@@ -2994,16 +3797,16 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
2994 | } | 3797 | } |
2995 | 3798 | ||
2996 | if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) { | 3799 | if ((c->src.type == OP_MEM) && !(c->d & NoAccess)) { |
2997 | rc = read_emulated(ctxt, ops, linear(ctxt, c->src.addr.mem), | 3800 | rc = segmented_read(ctxt, c->src.addr.mem, |
2998 | c->src.valptr, c->src.bytes); | 3801 | c->src.valptr, c->src.bytes); |
2999 | if (rc != X86EMUL_CONTINUE) | 3802 | if (rc != X86EMUL_CONTINUE) |
3000 | goto done; | 3803 | goto done; |
3001 | c->src.orig_val64 = c->src.val64; | 3804 | c->src.orig_val64 = c->src.val64; |
3002 | } | 3805 | } |
3003 | 3806 | ||
3004 | if (c->src2.type == OP_MEM) { | 3807 | if (c->src2.type == OP_MEM) { |
3005 | rc = read_emulated(ctxt, ops, linear(ctxt, c->src2.addr.mem), | 3808 | rc = segmented_read(ctxt, c->src2.addr.mem, |
3006 | &c->src2.val, c->src2.bytes); | 3809 | &c->src2.val, c->src2.bytes); |
3007 | if (rc != X86EMUL_CONTINUE) | 3810 | if (rc != X86EMUL_CONTINUE) |
3008 | goto done; | 3811 | goto done; |
3009 | } | 3812 | } |
@@ -3014,7 +3817,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
3014 | 3817 | ||
3015 | if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { | 3818 | if ((c->dst.type == OP_MEM) && !(c->d & Mov)) { |
3016 | /* optimisation - avoid slow emulated read if Mov */ | 3819 | /* optimisation - avoid slow emulated read if Mov */ |
3017 | rc = read_emulated(ctxt, ops, linear(ctxt, c->dst.addr.mem), | 3820 | rc = segmented_read(ctxt, c->dst.addr.mem, |
3018 | &c->dst.val, c->dst.bytes); | 3821 | &c->dst.val, c->dst.bytes); |
3019 | if (rc != X86EMUL_CONTINUE) | 3822 | if (rc != X86EMUL_CONTINUE) |
3020 | goto done; | 3823 | goto done; |
@@ -3023,6 +3826,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt) | |||
3023 | 3826 | ||
3024 | special_insn: | 3827 | special_insn: |
3025 | 3828 | ||
3829 | if (unlikely(ctxt->guest_mode) && c->intercept) { | ||
3830 | rc = emulator_check_intercept(ctxt, c->intercept, | ||
3831 | X86_ICPT_POST_MEMACCESS); | ||
3832 | if (rc != X86EMUL_CONTINUE) | ||
3833 | goto done; | ||
3834 | } | ||
3835 | |||
3026 | if (c->execute) { | 3836 | if (c->execute) { |
3027 | rc = c->execute(ctxt); | 3837 | rc = c->execute(ctxt); |
3028 | if (rc != X86EMUL_CONTINUE) | 3838 | if (rc != X86EMUL_CONTINUE) |
@@ -3034,75 +3844,33 @@ special_insn: | |||
3034 | goto twobyte_insn; | 3844 | goto twobyte_insn; |
3035 | 3845 | ||
3036 | switch (c->b) { | 3846 | switch (c->b) { |
3037 | case 0x00 ... 0x05: | ||
3038 | add: /* add */ | ||
3039 | emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); | ||
3040 | break; | ||
3041 | case 0x06: /* push es */ | 3847 | case 0x06: /* push es */ |
3042 | emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); | 3848 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); |
3043 | break; | 3849 | break; |
3044 | case 0x07: /* pop es */ | 3850 | case 0x07: /* pop es */ |
3045 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); | 3851 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); |
3046 | break; | 3852 | break; |
3047 | case 0x08 ... 0x0d: | ||
3048 | or: /* or */ | ||
3049 | emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); | ||
3050 | break; | ||
3051 | case 0x0e: /* push cs */ | 3853 | case 0x0e: /* push cs */ |
3052 | emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); | 3854 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); |
3053 | break; | ||
3054 | case 0x10 ... 0x15: | ||
3055 | adc: /* adc */ | ||
3056 | emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); | ||
3057 | break; | 3855 | break; |
3058 | case 0x16: /* push ss */ | 3856 | case 0x16: /* push ss */ |
3059 | emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); | 3857 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); |
3060 | break; | 3858 | break; |
3061 | case 0x17: /* pop ss */ | 3859 | case 0x17: /* pop ss */ |
3062 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); | 3860 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); |
3063 | break; | 3861 | break; |
3064 | case 0x18 ... 0x1d: | ||
3065 | sbb: /* sbb */ | ||
3066 | emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); | ||
3067 | break; | ||
3068 | case 0x1e: /* push ds */ | 3862 | case 0x1e: /* push ds */ |
3069 | emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); | 3863 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); |
3070 | break; | 3864 | break; |
3071 | case 0x1f: /* pop ds */ | 3865 | case 0x1f: /* pop ds */ |
3072 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); | 3866 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); |
3073 | break; | 3867 | break; |
3074 | case 0x20 ... 0x25: | ||
3075 | and: /* and */ | ||
3076 | emulate_2op_SrcV("and", c->src, c->dst, ctxt->eflags); | ||
3077 | break; | ||
3078 | case 0x28 ... 0x2d: | ||
3079 | sub: /* sub */ | ||
3080 | emulate_2op_SrcV("sub", c->src, c->dst, ctxt->eflags); | ||
3081 | break; | ||
3082 | case 0x30 ... 0x35: | ||
3083 | xor: /* xor */ | ||
3084 | emulate_2op_SrcV("xor", c->src, c->dst, ctxt->eflags); | ||
3085 | break; | ||
3086 | case 0x38 ... 0x3d: | ||
3087 | cmp: /* cmp */ | ||
3088 | emulate_2op_SrcV("cmp", c->src, c->dst, ctxt->eflags); | ||
3089 | break; | ||
3090 | case 0x40 ... 0x47: /* inc r16/r32 */ | 3868 | case 0x40 ... 0x47: /* inc r16/r32 */ |
3091 | emulate_1op("inc", c->dst, ctxt->eflags); | 3869 | emulate_1op("inc", c->dst, ctxt->eflags); |
3092 | break; | 3870 | break; |
3093 | case 0x48 ... 0x4f: /* dec r16/r32 */ | 3871 | case 0x48 ... 0x4f: /* dec r16/r32 */ |
3094 | emulate_1op("dec", c->dst, ctxt->eflags); | 3872 | emulate_1op("dec", c->dst, ctxt->eflags); |
3095 | break; | 3873 | break; |
3096 | case 0x58 ... 0x5f: /* pop reg */ | ||
3097 | pop_instruction: | ||
3098 | rc = emulate_pop(ctxt, ops, &c->dst.val, c->op_bytes); | ||
3099 | break; | ||
3100 | case 0x60: /* pusha */ | ||
3101 | rc = emulate_pusha(ctxt, ops); | ||
3102 | break; | ||
3103 | case 0x61: /* popa */ | ||
3104 | rc = emulate_popa(ctxt, ops); | ||
3105 | break; | ||
3106 | case 0x63: /* movsxd */ | 3874 | case 0x63: /* movsxd */ |
3107 | if (ctxt->mode != X86EMUL_MODE_PROT64) | 3875 | if (ctxt->mode != X86EMUL_MODE_PROT64) |
3108 | goto cannot_emulate; | 3876 | goto cannot_emulate; |
@@ -3121,26 +3889,6 @@ special_insn: | |||
3121 | if (test_cc(c->b, ctxt->eflags)) | 3889 | if (test_cc(c->b, ctxt->eflags)) |
3122 | jmp_rel(c, c->src.val); | 3890 | jmp_rel(c, c->src.val); |
3123 | break; | 3891 | break; |
3124 | case 0x80 ... 0x83: /* Grp1 */ | ||
3125 | switch (c->modrm_reg) { | ||
3126 | case 0: | ||
3127 | goto add; | ||
3128 | case 1: | ||
3129 | goto or; | ||
3130 | case 2: | ||
3131 | goto adc; | ||
3132 | case 3: | ||
3133 | goto sbb; | ||
3134 | case 4: | ||
3135 | goto and; | ||
3136 | case 5: | ||
3137 | goto sub; | ||
3138 | case 6: | ||
3139 | goto xor; | ||
3140 | case 7: | ||
3141 | goto cmp; | ||
3142 | } | ||
3143 | break; | ||
3144 | case 0x84 ... 0x85: | 3892 | case 0x84 ... 0x85: |
3145 | test: | 3893 | test: |
3146 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); | 3894 | emulate_2op_SrcV("test", c->src, c->dst, ctxt->eflags); |
@@ -3162,7 +3910,7 @@ special_insn: | |||
3162 | rc = emulate_ud(ctxt); | 3910 | rc = emulate_ud(ctxt); |
3163 | goto done; | 3911 | goto done; |
3164 | } | 3912 | } |
3165 | c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); | 3913 | c->dst.val = get_segment_selector(ctxt, c->modrm_reg); |
3166 | break; | 3914 | break; |
3167 | case 0x8d: /* lea r16/r32, m */ | 3915 | case 0x8d: /* lea r16/r32, m */ |
3168 | c->dst.val = c->src.addr.mem.ea; | 3916 | c->dst.val = c->src.addr.mem.ea; |
@@ -3187,7 +3935,7 @@ special_insn: | |||
3187 | break; | 3935 | break; |
3188 | } | 3936 | } |
3189 | case 0x8f: /* pop (sole member of Grp1a) */ | 3937 | case 0x8f: /* pop (sole member of Grp1a) */ |
3190 | rc = emulate_grp1a(ctxt, ops); | 3938 | rc = em_grp1a(ctxt); |
3191 | break; | 3939 | break; |
3192 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ | 3940 | case 0x90 ... 0x97: /* nop / xchg reg, rax */ |
3193 | if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX]) | 3941 | if (c->dst.addr.reg == &c->regs[VCPU_REGS_RAX]) |
@@ -3200,31 +3948,17 @@ special_insn: | |||
3200 | case 8: c->dst.val = (s32)c->dst.val; break; | 3948 | case 8: c->dst.val = (s32)c->dst.val; break; |
3201 | } | 3949 | } |
3202 | break; | 3950 | break; |
3203 | case 0x9c: /* pushf */ | ||
3204 | c->src.val = (unsigned long) ctxt->eflags; | ||
3205 | emulate_push(ctxt, ops); | ||
3206 | break; | ||
3207 | case 0x9d: /* popf */ | ||
3208 | c->dst.type = OP_REG; | ||
3209 | c->dst.addr.reg = &ctxt->eflags; | ||
3210 | c->dst.bytes = c->op_bytes; | ||
3211 | rc = emulate_popf(ctxt, ops, &c->dst.val, c->op_bytes); | ||
3212 | break; | ||
3213 | case 0xa6 ... 0xa7: /* cmps */ | ||
3214 | c->dst.type = OP_NONE; /* Disable writeback. */ | ||
3215 | goto cmp; | ||
3216 | case 0xa8 ... 0xa9: /* test ax, imm */ | 3951 | case 0xa8 ... 0xa9: /* test ax, imm */ |
3217 | goto test; | 3952 | goto test; |
3218 | case 0xae ... 0xaf: /* scas */ | ||
3219 | goto cmp; | ||
3220 | case 0xc0 ... 0xc1: | 3953 | case 0xc0 ... 0xc1: |
3221 | emulate_grp2(ctxt); | 3954 | rc = em_grp2(ctxt); |
3222 | break; | 3955 | break; |
3223 | case 0xc3: /* ret */ | 3956 | case 0xc3: /* ret */ |
3224 | c->dst.type = OP_REG; | 3957 | c->dst.type = OP_REG; |
3225 | c->dst.addr.reg = &c->eip; | 3958 | c->dst.addr.reg = &c->eip; |
3226 | c->dst.bytes = c->op_bytes; | 3959 | c->dst.bytes = c->op_bytes; |
3227 | goto pop_instruction; | 3960 | rc = em_pop(ctxt); |
3961 | break; | ||
3228 | case 0xc4: /* les */ | 3962 | case 0xc4: /* les */ |
3229 | rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES); | 3963 | rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES); |
3230 | break; | 3964 | break; |
@@ -3252,11 +3986,11 @@ special_insn: | |||
3252 | rc = emulate_iret(ctxt, ops); | 3986 | rc = emulate_iret(ctxt, ops); |
3253 | break; | 3987 | break; |
3254 | case 0xd0 ... 0xd1: /* Grp2 */ | 3988 | case 0xd0 ... 0xd1: /* Grp2 */ |
3255 | emulate_grp2(ctxt); | 3989 | rc = em_grp2(ctxt); |
3256 | break; | 3990 | break; |
3257 | case 0xd2 ... 0xd3: /* Grp2 */ | 3991 | case 0xd2 ... 0xd3: /* Grp2 */ |
3258 | c->src.val = c->regs[VCPU_REGS_RCX]; | 3992 | c->src.val = c->regs[VCPU_REGS_RCX]; |
3259 | emulate_grp2(ctxt); | 3993 | rc = em_grp2(ctxt); |
3260 | break; | 3994 | break; |
3261 | case 0xe0 ... 0xe2: /* loop/loopz/loopnz */ | 3995 | case 0xe0 ... 0xe2: /* loop/loopz/loopnz */ |
3262 | register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); | 3996 | register_address_increment(c, &c->regs[VCPU_REGS_RCX], -1); |
@@ -3278,23 +4012,14 @@ special_insn: | |||
3278 | long int rel = c->src.val; | 4012 | long int rel = c->src.val; |
3279 | c->src.val = (unsigned long) c->eip; | 4013 | c->src.val = (unsigned long) c->eip; |
3280 | jmp_rel(c, rel); | 4014 | jmp_rel(c, rel); |
3281 | emulate_push(ctxt, ops); | 4015 | rc = em_push(ctxt); |
3282 | break; | 4016 | break; |
3283 | } | 4017 | } |
3284 | case 0xe9: /* jmp rel */ | 4018 | case 0xe9: /* jmp rel */ |
3285 | goto jmp; | 4019 | goto jmp; |
3286 | case 0xea: { /* jmp far */ | 4020 | case 0xea: /* jmp far */ |
3287 | unsigned short sel; | 4021 | rc = em_jmp_far(ctxt); |
3288 | jump_far: | ||
3289 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | ||
3290 | |||
3291 | if (load_segment_descriptor(ctxt, ops, sel, VCPU_SREG_CS)) | ||
3292 | goto done; | ||
3293 | |||
3294 | c->eip = 0; | ||
3295 | memcpy(&c->eip, c->src.valptr, c->op_bytes); | ||
3296 | break; | 4022 | break; |
3297 | } | ||
3298 | case 0xeb: | 4023 | case 0xeb: |
3299 | jmp: /* jmp rel short */ | 4024 | jmp: /* jmp rel short */ |
3300 | jmp_rel(c, c->src.val); | 4025 | jmp_rel(c, c->src.val); |
@@ -3304,11 +4029,6 @@ special_insn: | |||
3304 | case 0xed: /* in (e/r)ax,dx */ | 4029 | case 0xed: /* in (e/r)ax,dx */ |
3305 | c->src.val = c->regs[VCPU_REGS_RDX]; | 4030 | c->src.val = c->regs[VCPU_REGS_RDX]; |
3306 | do_io_in: | 4031 | do_io_in: |
3307 | c->dst.bytes = min(c->dst.bytes, 4u); | ||
3308 | if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { | ||
3309 | rc = emulate_gp(ctxt, 0); | ||
3310 | goto done; | ||
3311 | } | ||
3312 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, | 4032 | if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, |
3313 | &c->dst.val)) | 4033 | &c->dst.val)) |
3314 | goto done; /* IO is needed */ | 4034 | goto done; /* IO is needed */ |
@@ -3317,25 +4037,19 @@ special_insn: | |||
3317 | case 0xef: /* out dx,(e/r)ax */ | 4037 | case 0xef: /* out dx,(e/r)ax */ |
3318 | c->dst.val = c->regs[VCPU_REGS_RDX]; | 4038 | c->dst.val = c->regs[VCPU_REGS_RDX]; |
3319 | do_io_out: | 4039 | do_io_out: |
3320 | c->src.bytes = min(c->src.bytes, 4u); | 4040 | ops->pio_out_emulated(ctxt, c->src.bytes, c->dst.val, |
3321 | if (!emulator_io_permited(ctxt, ops, c->dst.val, | 4041 | &c->src.val, 1); |
3322 | c->src.bytes)) { | ||
3323 | rc = emulate_gp(ctxt, 0); | ||
3324 | goto done; | ||
3325 | } | ||
3326 | ops->pio_out_emulated(c->src.bytes, c->dst.val, | ||
3327 | &c->src.val, 1, ctxt->vcpu); | ||
3328 | c->dst.type = OP_NONE; /* Disable writeback. */ | 4042 | c->dst.type = OP_NONE; /* Disable writeback. */ |
3329 | break; | 4043 | break; |
3330 | case 0xf4: /* hlt */ | 4044 | case 0xf4: /* hlt */ |
3331 | ctxt->vcpu->arch.halt_request = 1; | 4045 | ctxt->ops->halt(ctxt); |
3332 | break; | 4046 | break; |
3333 | case 0xf5: /* cmc */ | 4047 | case 0xf5: /* cmc */ |
3334 | /* complement carry flag from eflags reg */ | 4048 | /* complement carry flag from eflags reg */ |
3335 | ctxt->eflags ^= EFLG_CF; | 4049 | ctxt->eflags ^= EFLG_CF; |
3336 | break; | 4050 | break; |
3337 | case 0xf6 ... 0xf7: /* Grp3 */ | 4051 | case 0xf6 ... 0xf7: /* Grp3 */ |
3338 | rc = emulate_grp3(ctxt, ops); | 4052 | rc = em_grp3(ctxt); |
3339 | break; | 4053 | break; |
3340 | case 0xf8: /* clc */ | 4054 | case 0xf8: /* clc */ |
3341 | ctxt->eflags &= ~EFLG_CF; | 4055 | ctxt->eflags &= ~EFLG_CF; |
@@ -3366,13 +4080,11 @@ special_insn: | |||
3366 | ctxt->eflags |= EFLG_DF; | 4080 | ctxt->eflags |= EFLG_DF; |
3367 | break; | 4081 | break; |
3368 | case 0xfe: /* Grp4 */ | 4082 | case 0xfe: /* Grp4 */ |
3369 | grp45: | 4083 | rc = em_grp45(ctxt); |
3370 | rc = emulate_grp45(ctxt, ops); | ||
3371 | break; | 4084 | break; |
3372 | case 0xff: /* Grp5 */ | 4085 | case 0xff: /* Grp5 */ |
3373 | if (c->modrm_reg == 5) | 4086 | rc = em_grp45(ctxt); |
3374 | goto jump_far; | 4087 | break; |
3375 | goto grp45; | ||
3376 | default: | 4088 | default: |
3377 | goto cannot_emulate; | 4089 | goto cannot_emulate; |
3378 | } | 4090 | } |
@@ -3381,7 +4093,7 @@ special_insn: | |||
3381 | goto done; | 4093 | goto done; |
3382 | 4094 | ||
3383 | writeback: | 4095 | writeback: |
3384 | rc = writeback(ctxt, ops); | 4096 | rc = writeback(ctxt); |
3385 | if (rc != X86EMUL_CONTINUE) | 4097 | if (rc != X86EMUL_CONTINUE) |
3386 | goto done; | 4098 | goto done; |
3387 | 4099 | ||
@@ -3392,7 +4104,7 @@ writeback: | |||
3392 | c->dst.type = saved_dst_type; | 4104 | c->dst.type = saved_dst_type; |
3393 | 4105 | ||
3394 | if ((c->d & SrcMask) == SrcSI) | 4106 | if ((c->d & SrcMask) == SrcSI) |
3395 | string_addr_inc(ctxt, seg_override(ctxt, ops, c), | 4107 | string_addr_inc(ctxt, seg_override(ctxt, c), |
3396 | VCPU_REGS_RSI, &c->src); | 4108 | VCPU_REGS_RSI, &c->src); |
3397 | 4109 | ||
3398 | if ((c->d & DstMask) == DstDI) | 4110 | if ((c->d & DstMask) == DstDI) |
@@ -3427,115 +4139,34 @@ writeback: | |||
3427 | done: | 4139 | done: |
3428 | if (rc == X86EMUL_PROPAGATE_FAULT) | 4140 | if (rc == X86EMUL_PROPAGATE_FAULT) |
3429 | ctxt->have_exception = true; | 4141 | ctxt->have_exception = true; |
4142 | if (rc == X86EMUL_INTERCEPTED) | ||
4143 | return EMULATION_INTERCEPTED; | ||
4144 | |||
3430 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; | 4145 | return (rc == X86EMUL_UNHANDLEABLE) ? EMULATION_FAILED : EMULATION_OK; |
3431 | 4146 | ||
3432 | twobyte_insn: | 4147 | twobyte_insn: |
3433 | switch (c->b) { | 4148 | switch (c->b) { |
3434 | case 0x01: /* lgdt, lidt, lmsw */ | ||
3435 | switch (c->modrm_reg) { | ||
3436 | u16 size; | ||
3437 | unsigned long address; | ||
3438 | |||
3439 | case 0: /* vmcall */ | ||
3440 | if (c->modrm_mod != 3 || c->modrm_rm != 1) | ||
3441 | goto cannot_emulate; | ||
3442 | |||
3443 | rc = kvm_fix_hypercall(ctxt->vcpu); | ||
3444 | if (rc != X86EMUL_CONTINUE) | ||
3445 | goto done; | ||
3446 | |||
3447 | /* Let the processor re-execute the fixed hypercall */ | ||
3448 | c->eip = ctxt->eip; | ||
3449 | /* Disable writeback. */ | ||
3450 | c->dst.type = OP_NONE; | ||
3451 | break; | ||
3452 | case 2: /* lgdt */ | ||
3453 | rc = read_descriptor(ctxt, ops, c->src.addr.mem, | ||
3454 | &size, &address, c->op_bytes); | ||
3455 | if (rc != X86EMUL_CONTINUE) | ||
3456 | goto done; | ||
3457 | realmode_lgdt(ctxt->vcpu, size, address); | ||
3458 | /* Disable writeback. */ | ||
3459 | c->dst.type = OP_NONE; | ||
3460 | break; | ||
3461 | case 3: /* lidt/vmmcall */ | ||
3462 | if (c->modrm_mod == 3) { | ||
3463 | switch (c->modrm_rm) { | ||
3464 | case 1: | ||
3465 | rc = kvm_fix_hypercall(ctxt->vcpu); | ||
3466 | break; | ||
3467 | default: | ||
3468 | goto cannot_emulate; | ||
3469 | } | ||
3470 | } else { | ||
3471 | rc = read_descriptor(ctxt, ops, c->src.addr.mem, | ||
3472 | &size, &address, | ||
3473 | c->op_bytes); | ||
3474 | if (rc != X86EMUL_CONTINUE) | ||
3475 | goto done; | ||
3476 | realmode_lidt(ctxt->vcpu, size, address); | ||
3477 | } | ||
3478 | /* Disable writeback. */ | ||
3479 | c->dst.type = OP_NONE; | ||
3480 | break; | ||
3481 | case 4: /* smsw */ | ||
3482 | c->dst.bytes = 2; | ||
3483 | c->dst.val = ops->get_cr(0, ctxt->vcpu); | ||
3484 | break; | ||
3485 | case 6: /* lmsw */ | ||
3486 | ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) | | ||
3487 | (c->src.val & 0x0f), ctxt->vcpu); | ||
3488 | c->dst.type = OP_NONE; | ||
3489 | break; | ||
3490 | case 5: /* not defined */ | ||
3491 | emulate_ud(ctxt); | ||
3492 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3493 | goto done; | ||
3494 | case 7: /* invlpg*/ | ||
3495 | emulate_invlpg(ctxt->vcpu, | ||
3496 | linear(ctxt, c->src.addr.mem)); | ||
3497 | /* Disable writeback. */ | ||
3498 | c->dst.type = OP_NONE; | ||
3499 | break; | ||
3500 | default: | ||
3501 | goto cannot_emulate; | ||
3502 | } | ||
3503 | break; | ||
3504 | case 0x05: /* syscall */ | 4149 | case 0x05: /* syscall */ |
3505 | rc = emulate_syscall(ctxt, ops); | 4150 | rc = emulate_syscall(ctxt, ops); |
3506 | break; | 4151 | break; |
3507 | case 0x06: | 4152 | case 0x06: |
3508 | emulate_clts(ctxt->vcpu); | 4153 | rc = em_clts(ctxt); |
3509 | break; | 4154 | break; |
3510 | case 0x09: /* wbinvd */ | 4155 | case 0x09: /* wbinvd */ |
3511 | kvm_emulate_wbinvd(ctxt->vcpu); | 4156 | (ctxt->ops->wbinvd)(ctxt); |
3512 | break; | 4157 | break; |
3513 | case 0x08: /* invd */ | 4158 | case 0x08: /* invd */ |
3514 | case 0x0d: /* GrpP (prefetch) */ | 4159 | case 0x0d: /* GrpP (prefetch) */ |
3515 | case 0x18: /* Grp16 (prefetch/nop) */ | 4160 | case 0x18: /* Grp16 (prefetch/nop) */ |
3516 | break; | 4161 | break; |
3517 | case 0x20: /* mov cr, reg */ | 4162 | case 0x20: /* mov cr, reg */ |
3518 | switch (c->modrm_reg) { | 4163 | c->dst.val = ops->get_cr(ctxt, c->modrm_reg); |
3519 | case 1: | ||
3520 | case 5 ... 7: | ||
3521 | case 9 ... 15: | ||
3522 | emulate_ud(ctxt); | ||
3523 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3524 | goto done; | ||
3525 | } | ||
3526 | c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu); | ||
3527 | break; | 4164 | break; |
3528 | case 0x21: /* mov from dr to reg */ | 4165 | case 0x21: /* mov from dr to reg */ |
3529 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && | 4166 | ops->get_dr(ctxt, c->modrm_reg, &c->dst.val); |
3530 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { | ||
3531 | emulate_ud(ctxt); | ||
3532 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3533 | goto done; | ||
3534 | } | ||
3535 | ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu); | ||
3536 | break; | 4167 | break; |
3537 | case 0x22: /* mov reg, cr */ | 4168 | case 0x22: /* mov reg, cr */ |
3538 | if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) { | 4169 | if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) { |
3539 | emulate_gp(ctxt, 0); | 4170 | emulate_gp(ctxt, 0); |
3540 | rc = X86EMUL_PROPAGATE_FAULT; | 4171 | rc = X86EMUL_PROPAGATE_FAULT; |
3541 | goto done; | 4172 | goto done; |
@@ -3543,16 +4174,9 @@ twobyte_insn: | |||
3543 | c->dst.type = OP_NONE; | 4174 | c->dst.type = OP_NONE; |
3544 | break; | 4175 | break; |
3545 | case 0x23: /* mov from reg to dr */ | 4176 | case 0x23: /* mov from reg to dr */ |
3546 | if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && | 4177 | if (ops->set_dr(ctxt, c->modrm_reg, c->src.val & |
3547 | (c->modrm_reg == 4 || c->modrm_reg == 5)) { | ||
3548 | emulate_ud(ctxt); | ||
3549 | rc = X86EMUL_PROPAGATE_FAULT; | ||
3550 | goto done; | ||
3551 | } | ||
3552 | |||
3553 | if (ops->set_dr(c->modrm_reg, c->src.val & | ||
3554 | ((ctxt->mode == X86EMUL_MODE_PROT64) ? | 4178 | ((ctxt->mode == X86EMUL_MODE_PROT64) ? |
3555 | ~0ULL : ~0U), ctxt->vcpu) < 0) { | 4179 | ~0ULL : ~0U)) < 0) { |
3556 | /* #UD condition is already handled by the code above */ | 4180 | /* #UD condition is already handled by the code above */ |
3557 | emulate_gp(ctxt, 0); | 4181 | emulate_gp(ctxt, 0); |
3558 | rc = X86EMUL_PROPAGATE_FAULT; | 4182 | rc = X86EMUL_PROPAGATE_FAULT; |
@@ -3565,7 +4189,7 @@ twobyte_insn: | |||
3565 | /* wrmsr */ | 4189 | /* wrmsr */ |
3566 | msr_data = (u32)c->regs[VCPU_REGS_RAX] | 4190 | msr_data = (u32)c->regs[VCPU_REGS_RAX] |
3567 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); | 4191 | | ((u64)c->regs[VCPU_REGS_RDX] << 32); |
3568 | if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { | 4192 | if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) { |
3569 | emulate_gp(ctxt, 0); | 4193 | emulate_gp(ctxt, 0); |
3570 | rc = X86EMUL_PROPAGATE_FAULT; | 4194 | rc = X86EMUL_PROPAGATE_FAULT; |
3571 | goto done; | 4195 | goto done; |
@@ -3574,7 +4198,7 @@ twobyte_insn: | |||
3574 | break; | 4198 | break; |
3575 | case 0x32: | 4199 | case 0x32: |
3576 | /* rdmsr */ | 4200 | /* rdmsr */ |
3577 | if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { | 4201 | if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) { |
3578 | emulate_gp(ctxt, 0); | 4202 | emulate_gp(ctxt, 0); |
3579 | rc = X86EMUL_PROPAGATE_FAULT; | 4203 | rc = X86EMUL_PROPAGATE_FAULT; |
3580 | goto done; | 4204 | goto done; |
@@ -3603,7 +4227,7 @@ twobyte_insn: | |||
3603 | c->dst.val = test_cc(c->b, ctxt->eflags); | 4227 | c->dst.val = test_cc(c->b, ctxt->eflags); |
3604 | break; | 4228 | break; |
3605 | case 0xa0: /* push fs */ | 4229 | case 0xa0: /* push fs */ |
3606 | emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); | 4230 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); |
3607 | break; | 4231 | break; |
3608 | case 0xa1: /* pop fs */ | 4232 | case 0xa1: /* pop fs */ |
3609 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); | 4233 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); |
@@ -3620,7 +4244,7 @@ twobyte_insn: | |||
3620 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); | 4244 | emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); |
3621 | break; | 4245 | break; |
3622 | case 0xa8: /* push gs */ | 4246 | case 0xa8: /* push gs */ |
3623 | emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); | 4247 | rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); |
3624 | break; | 4248 | break; |
3625 | case 0xa9: /* pop gs */ | 4249 | case 0xa9: /* pop gs */ |
3626 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); | 4250 | rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); |
@@ -3727,7 +4351,7 @@ twobyte_insn: | |||
3727 | (u64) c->src.val; | 4351 | (u64) c->src.val; |
3728 | break; | 4352 | break; |
3729 | case 0xc7: /* Grp9 (cmpxchg8b) */ | 4353 | case 0xc7: /* Grp9 (cmpxchg8b) */ |
3730 | rc = emulate_grp9(ctxt, ops); | 4354 | rc = em_grp9(ctxt); |
3731 | break; | 4355 | break; |
3732 | default: | 4356 | default: |
3733 | goto cannot_emulate; | 4357 | goto cannot_emulate; |
@@ -3739,5 +4363,5 @@ twobyte_insn: | |||
3739 | goto writeback; | 4363 | goto writeback; |
3740 | 4364 | ||
3741 | cannot_emulate: | 4365 | cannot_emulate: |
3742 | return -1; | 4366 | return EMULATION_FAILED; |
3743 | } | 4367 | } |