aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorNelson Elhage <nelhage@ksplice.com>2011-04-18 12:05:53 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:57:10 -0400
commit3d9b938eefb7d91a1ae13e425931bd5ac103b762 (patch)
tree96ecffc81c09cc5578a5375cb357895084b552fa /arch
parent7c4c0f4fd5c3e82234c0ab61c7e7ffdb8f3af07b (diff)
KVM: emulator: Use linearize() when fetching instructions
Since segments need to be handled slightly differently when fetching instructions, we add a __linearize helper that accepts a new 'fetch' boolean. [avi: fix oops caused by wrong segmented_address initialization order] Signed-off-by: Nelson Elhage <nelhage@ksplice.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h1
-rw-r--r--arch/x86/kvm/emulate.c26
2 files changed, 18 insertions, 9 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 081844860a3d..9b760c8f2576 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -265,7 +265,6 @@ struct x86_emulate_ctxt {
265 unsigned long eip; /* eip before instruction emulation */ 265 unsigned long eip; /* eip before instruction emulation */
266 /* Emulated execution mode, represented by an X86EMUL_MODE value. */ 266 /* Emulated execution mode, represented by an X86EMUL_MODE value. */
267 int mode; 267 int mode;
268 u32 cs_base;
269 268
270 /* interruptibility state, as a result of execution of STI or MOV SS */ 269 /* interruptibility state, as a result of execution of STI or MOV SS */
271 int interruptibility; 270 int interruptibility;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 4c5ff22d101a..e1f77de95404 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -540,9 +540,9 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
540 return emulate_exception(ctxt, NM_VECTOR, 0, false); 540 return emulate_exception(ctxt, NM_VECTOR, 0, false);
541} 541}
542 542
543static int linearize(struct x86_emulate_ctxt *ctxt, 543static int __linearize(struct x86_emulate_ctxt *ctxt,
544 struct segmented_address addr, 544 struct segmented_address addr,
545 unsigned size, bool write, 545 unsigned size, bool write, bool fetch,
546 ulong *linear) 546 ulong *linear)
547{ 547{
548 struct decode_cache *c = &ctxt->decode; 548 struct decode_cache *c = &ctxt->decode;
@@ -569,7 +569,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
569 if (((desc.type & 8) || !(desc.type & 2)) && write) 569 if (((desc.type & 8) || !(desc.type & 2)) && write)
570 goto bad; 570 goto bad;
571 /* unreadable code segment */ 571 /* unreadable code segment */
572 if ((desc.type & 8) && !(desc.type & 2)) 572 if (!fetch && (desc.type & 8) && !(desc.type & 2))
573 goto bad; 573 goto bad;
574 lim = desc_limit_scaled(&desc); 574 lim = desc_limit_scaled(&desc);
575 if ((desc.type & 8) || !(desc.type & 4)) { 575 if ((desc.type & 8) || !(desc.type & 4)) {
@@ -602,7 +602,7 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
602 } 602 }
603 break; 603 break;
604 } 604 }
605 if (c->ad_bytes != 8) 605 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : c->ad_bytes != 8)
606 la &= (u32)-1; 606 la &= (u32)-1;
607 *linear = la; 607 *linear = la;
608 return X86EMUL_CONTINUE; 608 return X86EMUL_CONTINUE;
@@ -613,6 +613,15 @@ bad:
613 return emulate_gp(ctxt, addr.seg); 613 return emulate_gp(ctxt, addr.seg);
614} 614}
615 615
616static int linearize(struct x86_emulate_ctxt *ctxt,
617 struct segmented_address addr,
618 unsigned size, bool write,
619 ulong *linear)
620{
621 return __linearize(ctxt, addr, size, write, false, linear);
622}
623
624
616static int segmented_read_std(struct x86_emulate_ctxt *ctxt, 625static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
617 struct segmented_address addr, 626 struct segmented_address addr,
618 void *data, 627 void *data,
@@ -637,11 +646,13 @@ static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
637 int size, cur_size; 646 int size, cur_size;
638 647
639 if (eip == fc->end) { 648 if (eip == fc->end) {
640 unsigned long linear = eip + ctxt->cs_base; 649 unsigned long linear;
641 if (ctxt->mode != X86EMUL_MODE_PROT64) 650 struct segmented_address addr = { .seg=VCPU_SREG_CS, .ea=eip};
642 linear &= (u32)-1;
643 cur_size = fc->end - fc->start; 651 cur_size = fc->end - fc->start;
644 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip)); 652 size = min(15UL - cur_size, PAGE_SIZE - offset_in_page(eip));
653 rc = __linearize(ctxt, addr, size, false, true, &linear);
654 if (rc != X86EMUL_CONTINUE)
655 return rc;
645 rc = ops->fetch(linear, fc->data + cur_size, 656 rc = ops->fetch(linear, fc->data + cur_size,
646 size, ctxt->vcpu, &ctxt->exception); 657 size, ctxt->vcpu, &ctxt->exception);
647 if (rc != X86EMUL_CONTINUE) 658 if (rc != X86EMUL_CONTINUE)
@@ -3127,7 +3138,6 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, void *insn, int insn_len)
3127 c->fetch.end = c->fetch.start + insn_len; 3138 c->fetch.end = c->fetch.start + insn_len;
3128 if (insn_len > 0) 3139 if (insn_len > 0)
3129 memcpy(c->fetch.data, insn, insn_len); 3140 memcpy(c->fetch.data, insn, insn_len);
3130 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
3131 3141
3132 switch (mode) { 3142 switch (mode) {
3133 case X86EMUL_MODE_REAL: 3143 case X86EMUL_MODE_REAL: