aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-10-27 09:40:39 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-10-29 08:13:48 -0400
commitfd56e1546a5f734290cbedd2b81c518850736511 (patch)
tree859bbfa5437caf77f1e53a5b1a43b7ac01fba58b /arch/x86/kvm
parent3606189fa3da6afcad0cbbc9b91e94f1f158da5a (diff)
KVM: emulator: fix execution close to the segment limit
Emulation of code that is 14 bytes to the segment limit or closer (e.g. RIP = 0xFFFFFFF2 after reset) is broken because we try to read as many as 15 bytes from the beginning of the instruction, and __linearize fails when the passed (address, size) pair reaches out of the segment. To fix this, let __linearize return the maximum accessible size (clamped to 2^32-1) for usage in __do_insn_fetch_bytes, and avoid the limit check by passing zero for the desired size. For expand-down segments, __linearize is performing a redundant check. (u32)(addr.ea + size - 1) <= lim can only happen if addr.ea is close to 4GB; in this case, addr.ea + size - 1 will also fail the check against the upper bound of the segment (which is provided by the D/B bit). After eliminating the redundant check, it is simple to compute the *max_size for expand-down segments too. Now that the limit check is done in __do_insn_fetch_bytes, we want to inject a general protection fault there if size < op_size (like __linearize would have done), instead of just aborting. This fixes booting Tiano Core from emulated flash with EPT disabled. Cc: stable@vger.kernel.org Fixes: 719d5a9b2487e0562f178f61e323c3dc18a8b200 Reported-by: Borislav Petkov <bp@suse.de> Tested-by: Borislav Petkov <bp@suse.de> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/emulate.c43
1 files changed, 33 insertions, 10 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8aa66068712f..52a96270b560 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -641,7 +641,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641 641
642static int __linearize(struct x86_emulate_ctxt *ctxt, 642static int __linearize(struct x86_emulate_ctxt *ctxt,
643 struct segmented_address addr, 643 struct segmented_address addr,
644 unsigned size, bool write, bool fetch, 644 unsigned *max_size, unsigned size,
645 bool write, bool fetch,
645 ulong *linear) 646 ulong *linear)
646{ 647{
647 struct desc_struct desc; 648 struct desc_struct desc;
@@ -652,10 +653,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
652 unsigned cpl; 653 unsigned cpl;
653 654
654 la = seg_base(ctxt, addr.seg) + addr.ea; 655 la = seg_base(ctxt, addr.seg) + addr.ea;
656 *max_size = 0;
655 switch (ctxt->mode) { 657 switch (ctxt->mode) {
656 case X86EMUL_MODE_PROT64: 658 case X86EMUL_MODE_PROT64:
657 if (((signed long)la << 16) >> 16 != la) 659 if (((signed long)la << 16) >> 16 != la)
658 return emulate_gp(ctxt, 0); 660 return emulate_gp(ctxt, 0);
661
662 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
663 if (size > *max_size)
664 goto bad;
659 break; 665 break;
660 default: 666 default:
661 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 667 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
@@ -673,20 +679,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
673 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && 679 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
674 (ctxt->d & NoBigReal)) { 680 (ctxt->d & NoBigReal)) {
675 /* la is between zero and 0xffff */ 681 /* la is between zero and 0xffff */
676 if (la > 0xffff || (u32)(la + size - 1) > 0xffff) 682 if (la > 0xffff)
677 goto bad; 683 goto bad;
684 *max_size = 0x10000 - la;
678 } else if ((desc.type & 8) || !(desc.type & 4)) { 685 } else if ((desc.type & 8) || !(desc.type & 4)) {
679 /* expand-up segment */ 686 /* expand-up segment */
680 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 687 if (addr.ea > lim)
681 goto bad; 688 goto bad;
689 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
682 } else { 690 } else {
683 /* expand-down segment */ 691 /* expand-down segment */
684 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 692 if (addr.ea <= lim)
685 goto bad; 693 goto bad;
686 lim = desc.d ? 0xffffffff : 0xffff; 694 lim = desc.d ? 0xffffffff : 0xffff;
687 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 695 if (addr.ea > lim)
688 goto bad; 696 goto bad;
697 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
689 } 698 }
699 if (size > *max_size)
700 goto bad;
690 cpl = ctxt->ops->cpl(ctxt); 701 cpl = ctxt->ops->cpl(ctxt);
691 if (!(desc.type & 8)) { 702 if (!(desc.type & 8)) {
692 /* data segment */ 703 /* data segment */
@@ -721,7 +732,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
721 unsigned size, bool write, 732 unsigned size, bool write,
722 ulong *linear) 733 ulong *linear)
723{ 734{
724 return __linearize(ctxt, addr, size, write, false, linear); 735 unsigned max_size;
736 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
725} 737}
726 738
727 739
@@ -746,17 +758,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
746static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 758static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
747{ 759{
748 int rc; 760 int rc;
749 unsigned size; 761 unsigned size, max_size;
750 unsigned long linear; 762 unsigned long linear;
751 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 763 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
752 struct segmented_address addr = { .seg = VCPU_SREG_CS, 764 struct segmented_address addr = { .seg = VCPU_SREG_CS,
753 .ea = ctxt->eip + cur_size }; 765 .ea = ctxt->eip + cur_size };
754 766
755 size = 15UL ^ cur_size; 767 /*
756 rc = __linearize(ctxt, addr, size, false, true, &linear); 768 * We do not know exactly how many bytes will be needed, and
769 * __linearize is expensive, so fetch as much as possible. We
770 * just have to avoid going beyond the 15 byte limit, the end
771 * of the segment, or the end of the page.
772 *
773 * __linearize is called with size 0 so that it does not do any
774 * boundary check itself. Instead, we use max_size to check
775 * against op_size.
776 */
777 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
757 if (unlikely(rc != X86EMUL_CONTINUE)) 778 if (unlikely(rc != X86EMUL_CONTINUE))
758 return rc; 779 return rc;
759 780
781 size = min_t(unsigned, 15UL ^ cur_size, max_size);
760 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 782 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
761 783
762 /* 784 /*
@@ -766,7 +788,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
766 * still, we must have hit the 15-byte boundary. 788 * still, we must have hit the 15-byte boundary.
767 */ 789 */
768 if (unlikely(size < op_size)) 790 if (unlikely(size < op_size))
769 return X86EMUL_UNHANDLEABLE; 791 return emulate_gp(ctxt, 0);
792
770 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 793 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
771 size, &ctxt->exception); 794 size, &ctxt->exception);
772 if (unlikely(rc != X86EMUL_CONTINUE)) 795 if (unlikely(rc != X86EMUL_CONTINUE))