aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-04-18 12:22:47 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-04-19 19:35:07 -0400
commitf78146b0f9230765c6315b2e14f56112513389ad (patch)
tree2e6780f2989c73c1bf214a5728514c1dc8e15f09 /arch/x86
parenteac0556750e727ff39144a9a9e59d5ccf1fc0e2a (diff)
KVM: Fix page-crossing MMIO
MMIO that are split across a page boundary are currently broken - the code does not expect to be aborted by the exit to userspace for the first MMIO fragment. This patch fixes the problem by generalizing the current code for handling 16-byte MMIOs to handle a number of "fragments", and changes the MMIO code to create those fragments. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/x86.c114
1 files changed, 81 insertions, 33 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 0d9a57875f0b..4de705cdcafd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3718,9 +3718,8 @@ struct read_write_emulator_ops {
3718static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 3718static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
3719{ 3719{
3720 if (vcpu->mmio_read_completed) { 3720 if (vcpu->mmio_read_completed) {
3721 memcpy(val, vcpu->mmio_data, bytes);
3722 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 3721 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3723 vcpu->mmio_phys_addr, *(u64 *)val); 3722 vcpu->mmio_fragments[0].gpa, *(u64 *)val);
3724 vcpu->mmio_read_completed = 0; 3723 vcpu->mmio_read_completed = 0;
3725 return 1; 3724 return 1;
3726 } 3725 }
@@ -3756,8 +3755,9 @@ static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3756static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 3755static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3757 void *val, int bytes) 3756 void *val, int bytes)
3758{ 3757{
3759 memcpy(vcpu->mmio_data, val, bytes); 3758 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
3760 memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8); 3759
3760 memcpy(vcpu->run->mmio.data, frag->data, frag->len);
3761 return X86EMUL_CONTINUE; 3761 return X86EMUL_CONTINUE;
3762} 3762}
3763 3763
@@ -3784,10 +3784,7 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
3784 gpa_t gpa; 3784 gpa_t gpa;
3785 int handled, ret; 3785 int handled, ret;
3786 bool write = ops->write; 3786 bool write = ops->write;
3787 3787 struct kvm_mmio_fragment *frag;
3788 if (ops->read_write_prepare &&
3789 ops->read_write_prepare(vcpu, val, bytes))
3790 return X86EMUL_CONTINUE;
3791 3788
3792 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 3789 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
3793 3790
@@ -3813,15 +3810,19 @@ mmio:
3813 bytes -= handled; 3810 bytes -= handled;
3814 val += handled; 3811 val += handled;
3815 3812
3816 vcpu->mmio_needed = 1; 3813 while (bytes) {
3817 vcpu->run->exit_reason = KVM_EXIT_MMIO; 3814 unsigned now = min(bytes, 8U);
3818 vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3819 vcpu->mmio_size = bytes;
3820 vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
3821 vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
3822 vcpu->mmio_index = 0;
3823 3815
3824 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 3816 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
3817 frag->gpa = gpa;
3818 frag->data = val;
3819 frag->len = now;
3820
3821 gpa += now;
3822 val += now;
3823 bytes -= now;
3824 }
3825 return X86EMUL_CONTINUE;
3825} 3826}
3826 3827
3827int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, 3828int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
@@ -3830,10 +3831,18 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3830 struct read_write_emulator_ops *ops) 3831 struct read_write_emulator_ops *ops)
3831{ 3832{
3832 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 3833 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3834 gpa_t gpa;
3835 int rc;
3836
3837 if (ops->read_write_prepare &&
3838 ops->read_write_prepare(vcpu, val, bytes))
3839 return X86EMUL_CONTINUE;
3840
3841 vcpu->mmio_nr_fragments = 0;
3833 3842
3834 /* Crossing a page boundary? */ 3843 /* Crossing a page boundary? */
3835 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 3844 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3836 int rc, now; 3845 int now;
3837 3846
3838 now = -addr & ~PAGE_MASK; 3847 now = -addr & ~PAGE_MASK;
3839 rc = emulator_read_write_onepage(addr, val, now, exception, 3848 rc = emulator_read_write_onepage(addr, val, now, exception,
@@ -3846,8 +3855,25 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3846 bytes -= now; 3855 bytes -= now;
3847 } 3856 }
3848 3857
3849 return emulator_read_write_onepage(addr, val, bytes, exception, 3858 rc = emulator_read_write_onepage(addr, val, bytes, exception,
3850 vcpu, ops); 3859 vcpu, ops);
3860 if (rc != X86EMUL_CONTINUE)
3861 return rc;
3862
3863 if (!vcpu->mmio_nr_fragments)
3864 return rc;
3865
3866 gpa = vcpu->mmio_fragments[0].gpa;
3867
3868 vcpu->mmio_needed = 1;
3869 vcpu->mmio_cur_fragment = 0;
3870
3871 vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
3872 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
3873 vcpu->run->exit_reason = KVM_EXIT_MMIO;
3874 vcpu->run->mmio.phys_addr = gpa;
3875
3876 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
3851} 3877}
3852 3878
3853static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 3879static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
@@ -5446,33 +5472,55 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5446 return r; 5472 return r;
5447} 5473}
5448 5474
5475/*
5476 * Implements the following, as a state machine:
5477 *
5478 * read:
5479 * for each fragment
5480 * write gpa, len
5481 * exit
5482 * copy data
5483 * execute insn
5484 *
5485 * write:
5486 * for each fragment
5487 * write gpa, len
5488 * copy data
5489 * exit
5490 */
5449static int complete_mmio(struct kvm_vcpu *vcpu) 5491static int complete_mmio(struct kvm_vcpu *vcpu)
5450{ 5492{
5451 struct kvm_run *run = vcpu->run; 5493 struct kvm_run *run = vcpu->run;
5494 struct kvm_mmio_fragment *frag;
5452 int r; 5495 int r;
5453 5496
5454 if (!(vcpu->arch.pio.count || vcpu->mmio_needed)) 5497 if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
5455 return 1; 5498 return 1;
5456 5499
5457 if (vcpu->mmio_needed) { 5500 if (vcpu->mmio_needed) {
5458 vcpu->mmio_needed = 0; 5501 /* Complete previous fragment */
5502 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
5459 if (!vcpu->mmio_is_write) 5503 if (!vcpu->mmio_is_write)
5460 memcpy(vcpu->mmio_data + vcpu->mmio_index, 5504 memcpy(frag->data, run->mmio.data, frag->len);
5461 run->mmio.data, 8); 5505 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
5462 vcpu->mmio_index += 8; 5506 vcpu->mmio_needed = 0;
5463 if (vcpu->mmio_index < vcpu->mmio_size) { 5507 if (vcpu->mmio_is_write)
5464 run->exit_reason = KVM_EXIT_MMIO; 5508 return 1;
5465 run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index; 5509 vcpu->mmio_read_completed = 1;
5466 memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8); 5510 goto done;
5467 run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
5468 run->mmio.is_write = vcpu->mmio_is_write;
5469 vcpu->mmio_needed = 1;
5470 return 0;
5471 } 5511 }
5512 /* Initiate next fragment */
5513 ++frag;
5514 run->exit_reason = KVM_EXIT_MMIO;
5515 run->mmio.phys_addr = frag->gpa;
5472 if (vcpu->mmio_is_write) 5516 if (vcpu->mmio_is_write)
5473 return 1; 5517 memcpy(run->mmio.data, frag->data, frag->len);
5474 vcpu->mmio_read_completed = 1; 5518 run->mmio.len = frag->len;
5519 run->mmio.is_write = vcpu->mmio_is_write;
5520 return 0;
5521
5475 } 5522 }
5523done:
5476 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 5524 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5477 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 5525 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
5478 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5526 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);