aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c63
1 files changed, 48 insertions, 15 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 749f9fa38254..9f8a2faf5040 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -574,12 +574,14 @@ static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
574 case 4: 574 case 4:
575 ctxt->_eip = (u32)dst; 575 ctxt->_eip = (u32)dst;
576 break; 576 break;
577#ifdef CONFIG_X86_64
577 case 8: 578 case 8:
578 if ((cs_l && is_noncanonical_address(dst)) || 579 if ((cs_l && is_noncanonical_address(dst)) ||
579 (!cs_l && (dst & ~(u32)-1))) 580 (!cs_l && (dst >> 32) != 0))
580 return emulate_gp(ctxt, 0); 581 return emulate_gp(ctxt, 0);
581 ctxt->_eip = dst; 582 ctxt->_eip = dst;
582 break; 583 break;
584#endif
583 default: 585 default:
584 WARN(1, "unsupported eip assignment size\n"); 586 WARN(1, "unsupported eip assignment size\n");
585 } 587 }
@@ -641,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641 643
642static int __linearize(struct x86_emulate_ctxt *ctxt, 644static int __linearize(struct x86_emulate_ctxt *ctxt,
643 struct segmented_address addr, 645 struct segmented_address addr,
644 unsigned size, bool write, bool fetch, 646 unsigned *max_size, unsigned size,
647 bool write, bool fetch,
645 ulong *linear) 648 ulong *linear)
646{ 649{
647 struct desc_struct desc; 650 struct desc_struct desc;
@@ -652,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
652 unsigned cpl; 655 unsigned cpl;
653 656
654 la = seg_base(ctxt, addr.seg) + addr.ea; 657 la = seg_base(ctxt, addr.seg) + addr.ea;
658 *max_size = 0;
655 switch (ctxt->mode) { 659 switch (ctxt->mode) {
656 case X86EMUL_MODE_PROT64: 660 case X86EMUL_MODE_PROT64:
657 if (((signed long)la << 16) >> 16 != la) 661 if (((signed long)la << 16) >> 16 != la)
658 return emulate_gp(ctxt, 0); 662 return emulate_gp(ctxt, 0);
663
664 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
665 if (size > *max_size)
666 goto bad;
659 break; 667 break;
660 default: 668 default:
661 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 669 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
@@ -673,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
673 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && 681 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
674 (ctxt->d & NoBigReal)) { 682 (ctxt->d & NoBigReal)) {
675 /* la is between zero and 0xffff */ 683 /* la is between zero and 0xffff */
676 if (la > 0xffff || (u32)(la + size - 1) > 0xffff) 684 if (la > 0xffff)
677 goto bad; 685 goto bad;
686 *max_size = 0x10000 - la;
678 } else if ((desc.type & 8) || !(desc.type & 4)) { 687 } else if ((desc.type & 8) || !(desc.type & 4)) {
679 /* expand-up segment */ 688 /* expand-up segment */
680 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 689 if (addr.ea > lim)
681 goto bad; 690 goto bad;
691 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
682 } else { 692 } else {
683 /* expand-down segment */ 693 /* expand-down segment */
684 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 694 if (addr.ea <= lim)
685 goto bad; 695 goto bad;
686 lim = desc.d ? 0xffffffff : 0xffff; 696 lim = desc.d ? 0xffffffff : 0xffff;
687 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 697 if (addr.ea > lim)
688 goto bad; 698 goto bad;
699 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
689 } 700 }
701 if (size > *max_size)
702 goto bad;
690 cpl = ctxt->ops->cpl(ctxt); 703 cpl = ctxt->ops->cpl(ctxt);
691 if (!(desc.type & 8)) { 704 if (!(desc.type & 8)) {
692 /* data segment */ 705 /* data segment */
@@ -711,9 +724,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
711 return X86EMUL_CONTINUE; 724 return X86EMUL_CONTINUE;
712bad: 725bad:
713 if (addr.seg == VCPU_SREG_SS) 726 if (addr.seg == VCPU_SREG_SS)
714 return emulate_ss(ctxt, sel); 727 return emulate_ss(ctxt, 0);
715 else 728 else
716 return emulate_gp(ctxt, sel); 729 return emulate_gp(ctxt, 0);
717} 730}
718 731
719static int linearize(struct x86_emulate_ctxt *ctxt, 732static int linearize(struct x86_emulate_ctxt *ctxt,
@@ -721,7 +734,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
721 unsigned size, bool write, 734 unsigned size, bool write,
722 ulong *linear) 735 ulong *linear)
723{ 736{
724 return __linearize(ctxt, addr, size, write, false, linear); 737 unsigned max_size;
738 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
725} 739}
726 740
727 741
@@ -746,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
746static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 760static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
747{ 761{
748 int rc; 762 int rc;
749 unsigned size; 763 unsigned size, max_size;
750 unsigned long linear; 764 unsigned long linear;
751 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 765 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
752 struct segmented_address addr = { .seg = VCPU_SREG_CS, 766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
753 .ea = ctxt->eip + cur_size }; 767 .ea = ctxt->eip + cur_size };
754 768
755 size = 15UL ^ cur_size; 769 /*
756 rc = __linearize(ctxt, addr, size, false, true, &linear); 770 * We do not know exactly how many bytes will be needed, and
771 * __linearize is expensive, so fetch as much as possible. We
772 * just have to avoid going beyond the 15 byte limit, the end
773 * of the segment, or the end of the page.
774 *
775 * __linearize is called with size 0 so that it does not do any
776 * boundary check itself. Instead, we use max_size to check
777 * against op_size.
778 */
779 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
757 if (unlikely(rc != X86EMUL_CONTINUE)) 780 if (unlikely(rc != X86EMUL_CONTINUE))
758 return rc; 781 return rc;
759 782
783 size = min_t(unsigned, 15UL ^ cur_size, max_size);
760 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 784 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
761 785
762 /* 786 /*
@@ -766,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
766 * still, we must have hit the 15-byte boundary. 790 * still, we must have hit the 15-byte boundary.
767 */ 791 */
768 if (unlikely(size < op_size)) 792 if (unlikely(size < op_size))
769 return X86EMUL_UNHANDLEABLE; 793 return emulate_gp(ctxt, 0);
794
770 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 795 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
771 size, &ctxt->exception); 796 size, &ctxt->exception);
772 if (unlikely(rc != X86EMUL_CONTINUE)) 797 if (unlikely(rc != X86EMUL_CONTINUE))
@@ -2012,7 +2037,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
2012 2037
2013 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l); 2038 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2014 if (rc != X86EMUL_CONTINUE) { 2039 if (rc != X86EMUL_CONTINUE) {
2015 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); 2040 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2016 /* assigning eip failed; restore the old cs */ 2041 /* assigning eip failed; restore the old cs */
2017 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS); 2042 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2018 return rc; 2043 return rc;
@@ -2109,7 +2134,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2109 return rc; 2134 return rc;
2110 rc = assign_eip_far(ctxt, eip, new_desc.l); 2135 rc = assign_eip_far(ctxt, eip, new_desc.l);
2111 if (rc != X86EMUL_CONTINUE) { 2136 if (rc != X86EMUL_CONTINUE) {
2112 WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64); 2137 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2113 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS); 2138 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2114 } 2139 }
2115 return rc; 2140 return rc;
@@ -4262,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4262 fetch_register_operand(op); 4287 fetch_register_operand(op);
4263 break; 4288 break;
4264 case OpCL: 4289 case OpCL:
4290 op->type = OP_IMM;
4265 op->bytes = 1; 4291 op->bytes = 1;
4266 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4292 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4267 break; 4293 break;
@@ -4269,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4269 rc = decode_imm(ctxt, op, 1, true); 4295 rc = decode_imm(ctxt, op, 1, true);
4270 break; 4296 break;
4271 case OpOne: 4297 case OpOne:
4298 op->type = OP_IMM;
4272 op->bytes = 1; 4299 op->bytes = 1;
4273 op->val = 1; 4300 op->val = 1;
4274 break; 4301 break;
@@ -4327,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4327 ctxt->memop.bytes = ctxt->op_bytes + 2; 4354 ctxt->memop.bytes = ctxt->op_bytes + 2;
4328 goto mem_common; 4355 goto mem_common;
4329 case OpES: 4356 case OpES:
4357 op->type = OP_IMM;
4330 op->val = VCPU_SREG_ES; 4358 op->val = VCPU_SREG_ES;
4331 break; 4359 break;
4332 case OpCS: 4360 case OpCS:
4361 op->type = OP_IMM;
4333 op->val = VCPU_SREG_CS; 4362 op->val = VCPU_SREG_CS;
4334 break; 4363 break;
4335 case OpSS: 4364 case OpSS:
4365 op->type = OP_IMM;
4336 op->val = VCPU_SREG_SS; 4366 op->val = VCPU_SREG_SS;
4337 break; 4367 break;
4338 case OpDS: 4368 case OpDS:
4369 op->type = OP_IMM;
4339 op->val = VCPU_SREG_DS; 4370 op->val = VCPU_SREG_DS;
4340 break; 4371 break;
4341 case OpFS: 4372 case OpFS:
4373 op->type = OP_IMM;
4342 op->val = VCPU_SREG_FS; 4374 op->val = VCPU_SREG_FS;
4343 break; 4375 break;
4344 case OpGS: 4376 case OpGS:
4377 op->type = OP_IMM;
4345 op->val = VCPU_SREG_GS; 4378 op->val = VCPU_SREG_GS;
4346 break; 4379 break;
4347 case OpImplicit: 4380 case OpImplicit: