aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-05-06 10:33:01 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-07-11 03:14:03 -0400
commit17052f16a51af6d8f4b7eee0631af675ac204f65 (patch)
treef2fa2f60c38e38ed175897b419ddb7b2df7901b2 /arch/x86/kvm/emulate.c
parent9506d57de3bc8277a4e306e0d439976862f68c6d (diff)
KVM: emulate: put pointers in the fetch_cache
This simplifies the code a bit, especially the overflow checks. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c34
1 files changed, 15 insertions, 19 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 02c668aca2b6..c16314807756 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -710,16 +710,15 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
710 */ 710 */
711static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 711static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
712{ 712{
713 struct fetch_cache *fc = &ctxt->fetch;
714 int rc; 713 int rc;
715 int size, cur_size; 714 int size;
716 unsigned long linear; 715 unsigned long linear;
717 716 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
718 struct segmented_address addr = { .seg = VCPU_SREG_CS, 717 struct segmented_address addr = { .seg = VCPU_SREG_CS,
719 .ea = fc->end }; 718 .ea = ctxt->eip + cur_size };
720 cur_size = fc->end - fc->start; 719
721 size = min(15UL - cur_size, 720 size = min(15UL ^ cur_size,
722 PAGE_SIZE - offset_in_page(fc->end)); 721 PAGE_SIZE - offset_in_page(addr.ea));
723 722
724 /* 723 /*
725 * One instruction can only straddle two pages, 724 * One instruction can only straddle two pages,
@@ -732,19 +731,18 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
732 rc = __linearize(ctxt, addr, size, false, true, &linear); 731 rc = __linearize(ctxt, addr, size, false, true, &linear);
733 if (unlikely(rc != X86EMUL_CONTINUE)) 732 if (unlikely(rc != X86EMUL_CONTINUE))
734 return rc; 733 return rc;
735 rc = ctxt->ops->fetch(ctxt, linear, fc->data + cur_size, 734 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
736 size, &ctxt->exception); 735 size, &ctxt->exception);
737 if (unlikely(rc != X86EMUL_CONTINUE)) 736 if (unlikely(rc != X86EMUL_CONTINUE))
738 return rc; 737 return rc;
739 fc->end += size; 738 ctxt->fetch.end += size;
740 return X86EMUL_CONTINUE; 739 return X86EMUL_CONTINUE;
741} 740}
742 741
743static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, 742static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
744 unsigned size) 743 unsigned size)
745{ 744{
746 /* We have to be careful about overflow! */ 745 if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size))
747 if (unlikely(ctxt->_eip > ctxt->fetch.end - size))
748 return __do_insn_fetch_bytes(ctxt, size); 746 return __do_insn_fetch_bytes(ctxt, size);
749 else 747 else
750 return X86EMUL_CONTINUE; 748 return X86EMUL_CONTINUE;
@@ -753,26 +751,24 @@ static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
753/* Fetch next part of the instruction being emulated. */ 751/* Fetch next part of the instruction being emulated. */
754#define insn_fetch(_type, _ctxt) \ 752#define insn_fetch(_type, _ctxt) \
755({ _type _x; \ 753({ _type _x; \
756 struct fetch_cache *_fc; \
757 \ 754 \
758 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \ 755 rc = do_insn_fetch_bytes(_ctxt, sizeof(_type)); \
759 if (rc != X86EMUL_CONTINUE) \ 756 if (rc != X86EMUL_CONTINUE) \
760 goto done; \ 757 goto done; \
761 _fc = &ctxt->fetch; \
762 _x = *(_type __aligned(1) *) &_fc->data[ctxt->_eip - _fc->start]; \
763 ctxt->_eip += sizeof(_type); \ 758 ctxt->_eip += sizeof(_type); \
759 _x = *(_type __aligned(1) *) ctxt->fetch.ptr; \
760 ctxt->fetch.ptr += sizeof(_type); \
764 _x; \ 761 _x; \
765}) 762})
766 763
767#define insn_fetch_arr(_arr, _size, _ctxt) \ 764#define insn_fetch_arr(_arr, _size, _ctxt) \
768({ \ 765({ \
769 struct fetch_cache *_fc; \
770 rc = do_insn_fetch_bytes(_ctxt, _size); \ 766 rc = do_insn_fetch_bytes(_ctxt, _size); \
771 if (rc != X86EMUL_CONTINUE) \ 767 if (rc != X86EMUL_CONTINUE) \
772 goto done; \ 768 goto done; \
773 _fc = &ctxt->fetch; \
774 memcpy(_arr, &_fc->data[ctxt->_eip - _fc->start], _size); \
775 ctxt->_eip += (_size); \ 769 ctxt->_eip += (_size); \
770 memcpy(_arr, ctxt->fetch.ptr, _size); \
771 ctxt->fetch.ptr += (_size); \
776}) 772})
777 773
778/* 774/*
@@ -4228,8 +4224,8 @@ int x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
4228 ctxt->memop.type = OP_NONE; 4224 ctxt->memop.type = OP_NONE;
4229 ctxt->memopp = NULL; 4225 ctxt->memopp = NULL;
4230 ctxt->_eip = ctxt->eip; 4226 ctxt->_eip = ctxt->eip;
4231 ctxt->fetch.start = ctxt->_eip; 4227 ctxt->fetch.ptr = ctxt->fetch.data;
4232 ctxt->fetch.end = ctxt->fetch.start + insn_len; 4228 ctxt->fetch.end = ctxt->fetch.data + insn_len;
4233 ctxt->opcode_len = 1; 4229 ctxt->opcode_len = 1;
4234 if (insn_len > 0) 4230 if (insn_len > 0)
4235 memcpy(ctxt->fetch.data, insn, insn_len); 4231 memcpy(ctxt->fetch.data, insn, insn_len);