aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-04-18 12:22:47 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-04-19 19:35:07 -0400
commitf78146b0f9230765c6315b2e14f56112513389ad (patch)
tree2e6780f2989c73c1bf214a5728514c1dc8e15f09 /arch/ia64/kvm
parenteac0556750e727ff39144a9a9e59d5ccf1fc0e2a (diff)
KVM: Fix page-crossing MMIO
MMIO that are split across a page boundary are currently broken - the code does not expect to be aborted by the exit to userspace for the first MMIO fragment. This patch fixes the problem by generalizing the current code for handling 16-byte MMIOs to handle a number of "fragments", and changes the MMIO code to create those fragments. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/ia64/kvm')
-rw-r--r--arch/ia64/kvm/kvm-ia64.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 9d80ff8d9eff..882ab21a8dcd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
232 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) 232 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
233 goto mmio; 233 goto mmio;
234 vcpu->mmio_needed = 1; 234 vcpu->mmio_needed = 1;
235 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; 235 vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
236 vcpu->mmio_size = kvm_run->mmio.len = p->size; 236 vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
237 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; 237 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
238 238
239 if (vcpu->mmio_is_write) 239 if (vcpu->mmio_is_write)
240 memcpy(vcpu->mmio_data, &p->data, p->size); 240 memcpy(vcpu->arch.mmio_data, &p->data, p->size);
241 memcpy(kvm_run->mmio.data, &p->data, p->size); 241 memcpy(kvm_run->mmio.data, &p->data, p->size);
242 kvm_run->exit_reason = KVM_EXIT_MMIO; 242 kvm_run->exit_reason = KVM_EXIT_MMIO;
243 return 0; 243 return 0;
@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
719 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); 719 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
720 720
721 if (!vcpu->mmio_is_write) 721 if (!vcpu->mmio_is_write)
722 memcpy(&p->data, vcpu->mmio_data, 8); 722 memcpy(&p->data, vcpu->arch.mmio_data, 8);
723 p->state = STATE_IORESP_READY; 723 p->state = STATE_IORESP_READY;
724} 724}
725 725
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
739 } 739 }
740 740
741 if (vcpu->mmio_needed) { 741 if (vcpu->mmio_needed) {
742 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); 742 memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
743 kvm_set_mmio_data(vcpu); 743 kvm_set_mmio_data(vcpu);
744 vcpu->mmio_read_completed = 1; 744 vcpu->mmio_read_completed = 1;
745 vcpu->mmio_needed = 0; 745 vcpu->mmio_needed = 0;