aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2015-03-27 21:13:13 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2015-03-30 12:07:19 -0400
commit950324ab81bf006542f30a1d1ab3d65fcf15cbc1 (patch)
tree07a4b0a29bf056eb3d700eafc4577a10bb2b2972 /virt
parentfb8f61abab48467ef670ef165ff664cdc94f742e (diff)
KVM: arm/arm64: rework MMIO abort handling to use KVM MMIO bus
Currently we have struct kvm_exit_mmio for encapsulating MMIO abort data to be passed on from syndrome decoding all the way down to the VGIC register handlers. Now as we switch the MMIO handling to be routed through the KVM MMIO bus, it does not make sense anymore to use that structure already from the beginning. So we keep the data in local variables until we put them into the kvm_io_bus framework. Then we fill kvm_exit_mmio in the VGIC only, making it a VGIC private structure. On that way we replace the data buffer in that structure with a pointer pointing to a single location in a local variable, so we get rid of some copying on the way. With all of the virtual GIC emulation code now being registered with the kvm_io_bus, we can remove all of the old MMIO handling code and its dispatching functionality. I didn't bother to rename kvm_exit_mmio (to vgic_mmio or something), because that touches a lot of code lines without any good reason. This is based on an original patch by Nikolay. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Cc: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c21
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c35
-rw-r--r--virt/kvm/arm/vgic.c93
-rw-r--r--virt/kvm/arm/vgic.h13
4 files changed, 18 insertions, 144 deletions
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 7460b376d090..13907970d11c 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -404,24 +404,6 @@ static const struct vgic_io_range vgic_dist_ranges[] = {
404 {} 404 {}
405}; 405};
406 406
407static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
408 struct kvm_exit_mmio *mmio)
409{
410 unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
411
412 if (!is_in_range(mmio->phys_addr, mmio->len, base,
413 KVM_VGIC_V2_DIST_SIZE))
414 return false;
415
416 /* GICv2 does not support accesses wider than 32 bits */
417 if (mmio->len > 4) {
418 kvm_inject_dabt(vcpu, mmio->phys_addr);
419 return true;
420 }
421
422 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
423}
424
425static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) 407static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
426{ 408{
427 struct kvm *kvm = vcpu->kvm; 409 struct kvm *kvm = vcpu->kvm;
@@ -580,7 +562,6 @@ void vgic_v2_init_emulation(struct kvm *kvm)
580{ 562{
581 struct vgic_dist *dist = &kvm->arch.vgic; 563 struct vgic_dist *dist = &kvm->arch.vgic;
582 564
583 dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
584 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; 565 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
585 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; 566 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
586 dist->vm_ops.init_model = vgic_v2_init_model; 567 dist->vm_ops.init_model = vgic_v2_init_model;
@@ -690,6 +671,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
690 struct kvm_vcpu *vcpu, *tmp_vcpu; 671 struct kvm_vcpu *vcpu, *tmp_vcpu;
691 struct vgic_dist *vgic; 672 struct vgic_dist *vgic;
692 struct kvm_exit_mmio mmio; 673 struct kvm_exit_mmio mmio;
674 u32 data;
693 675
694 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 676 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
695 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> 677 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
@@ -711,6 +693,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
711 693
712 mmio.len = 4; 694 mmio.len = 4;
713 mmio.is_write = is_write; 695 mmio.is_write = is_write;
696 mmio.data = &data;
714 if (is_write) 697 if (is_write)
715 mmio_data_write(&mmio, ~0, *reg); 698 mmio_data_write(&mmio, ~0, *reg);
716 switch (attr->group) { 699 switch (attr->group) {
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index eb1a797cb9c1..e9c3a7a83833 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -708,40 +708,6 @@ static const struct vgic_io_range vgic_redist_ranges[] = {
708 {}, 708 {},
709}; 709};
710 710
711/*
712 * This function splits accesses between the distributor and the two
713 * redistributor parts (private/SPI). As each redistributor is accessible
714 * from any CPU, we have to determine the affected VCPU by taking the faulting
715 * address into account. We then pass this VCPU to the handler function via
716 * the private parameter.
717 */
718#define SGI_BASE_OFFSET SZ_64K
719static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
720 struct kvm_exit_mmio *mmio)
721{
722 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
723 unsigned long dbase = dist->vgic_dist_base;
724 unsigned long rdbase = dist->vgic_redist_base;
725 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
726 int vcpu_id;
727
728 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
729 return vgic_handle_mmio_range(vcpu, run, mmio,
730 vgic_v3_dist_ranges, dbase);
731 }
732
733 if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
734 GIC_V3_REDIST_SIZE * nrcpus))
735 return false;
736
737 vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
738 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
739 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
740
741 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_redist_ranges,
742 rdbase);
743}
744
745static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) 711static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
746{ 712{
747 if (vgic_queue_irq(vcpu, 0, irq)) { 713 if (vgic_queue_irq(vcpu, 0, irq)) {
@@ -861,7 +827,6 @@ void vgic_v3_init_emulation(struct kvm *kvm)
861{ 827{
862 struct vgic_dist *dist = &kvm->arch.vgic; 828 struct vgic_dist *dist = &kvm->arch.vgic;
863 829
864 dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
865 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi; 830 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
866 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source; 831 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
867 dist->vm_ops.init_model = vgic_v3_init_model; 832 dist->vm_ops.init_model = vgic_v3_init_model;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index e968179e592f..b70174e74868 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -758,7 +758,6 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
758 unsigned long offset, 758 unsigned long offset,
759 const struct vgic_io_range *range) 759 const struct vgic_io_range *range)
760{ 760{
761 u32 *data32 = (void *)mmio->data;
762 struct kvm_exit_mmio mmio32; 761 struct kvm_exit_mmio mmio32;
763 bool ret; 762 bool ret;
764 763
@@ -775,70 +774,17 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
775 mmio32.private = mmio->private; 774 mmio32.private = mmio->private;
776 775
777 mmio32.phys_addr = mmio->phys_addr + 4; 776 mmio32.phys_addr = mmio->phys_addr + 4;
778 if (mmio->is_write) 777 mmio32.data = &((u32 *)mmio->data)[1];
779 *(u32 *)mmio32.data = data32[1];
780 ret = range->handle_mmio(vcpu, &mmio32, offset + 4); 778 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
781 if (!mmio->is_write)
782 data32[1] = *(u32 *)mmio32.data;
783 779
784 mmio32.phys_addr = mmio->phys_addr; 780 mmio32.phys_addr = mmio->phys_addr;
785 if (mmio->is_write) 781 mmio32.data = &((u32 *)mmio->data)[0];
786 *(u32 *)mmio32.data = data32[0];
787 ret |= range->handle_mmio(vcpu, &mmio32, offset); 782 ret |= range->handle_mmio(vcpu, &mmio32, offset);
788 if (!mmio->is_write)
789 data32[0] = *(u32 *)mmio32.data;
790 783
791 return ret; 784 return ret;
792} 785}
793 786
794/** 787/**
795 * vgic_handle_mmio_range - handle an in-kernel MMIO access
796 * @vcpu: pointer to the vcpu performing the access
797 * @run: pointer to the kvm_run structure
798 * @mmio: pointer to the data describing the access
799 * @ranges: array of MMIO ranges in a given region
800 * @mmio_base: base address of that region
801 *
802 * returns true if the MMIO access could be performed
803 */
804bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
805 struct kvm_exit_mmio *mmio,
806 const struct vgic_io_range *ranges,
807 unsigned long mmio_base)
808{
809 const struct vgic_io_range *range;
810 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
811 bool updated_state;
812 unsigned long offset;
813
814 offset = mmio->phys_addr - mmio_base;
815 range = vgic_find_range(ranges, mmio->len, offset);
816 if (unlikely(!range || !range->handle_mmio)) {
817 pr_warn("Unhandled access %d %08llx %d\n",
818 mmio->is_write, mmio->phys_addr, mmio->len);
819 return false;
820 }
821
822 spin_lock(&vcpu->kvm->arch.vgic.lock);
823 offset -= range->base;
824 if (vgic_validate_access(dist, range, offset)) {
825 updated_state = call_range_handler(vcpu, mmio, offset, range);
826 } else {
827 if (!mmio->is_write)
828 memset(mmio->data, 0, mmio->len);
829 updated_state = false;
830 }
831 spin_unlock(&vcpu->kvm->arch.vgic.lock);
832 kvm_prepare_mmio(run, mmio);
833 kvm_handle_mmio_return(vcpu, run);
834
835 if (updated_state)
836 vgic_kick_vcpus(vcpu->kvm);
837
838 return true;
839}
840
841/**
842 * vgic_handle_mmio_access - handle an in-kernel MMIO access 788 * vgic_handle_mmio_access - handle an in-kernel MMIO access
843 * This is called by the read/write KVM IO device wrappers below. 789 * This is called by the read/write KVM IO device wrappers below.
844 * @vcpu: pointer to the vcpu performing the access 790 * @vcpu: pointer to the vcpu performing the access
@@ -873,23 +819,24 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
873 mmio.phys_addr = addr; 819 mmio.phys_addr = addr;
874 mmio.len = len; 820 mmio.len = len;
875 mmio.is_write = is_write; 821 mmio.is_write = is_write;
876 if (is_write) 822 mmio.data = val;
877 memcpy(mmio.data, val, len);
878 mmio.private = iodev->redist_vcpu; 823 mmio.private = iodev->redist_vcpu;
879 824
880 spin_lock(&dist->lock); 825 spin_lock(&dist->lock);
881 offset -= range->base; 826 offset -= range->base;
882 if (vgic_validate_access(dist, range, offset)) { 827 if (vgic_validate_access(dist, range, offset)) {
883 updated_state = call_range_handler(vcpu, &mmio, offset, range); 828 updated_state = call_range_handler(vcpu, &mmio, offset, range);
884 if (!is_write)
885 memcpy(val, mmio.data, len);
886 } else { 829 } else {
887 if (!is_write) 830 if (!is_write)
888 memset(val, 0, len); 831 memset(val, 0, len);
889 updated_state = false; 832 updated_state = false;
890 } 833 }
891 spin_unlock(&dist->lock); 834 spin_unlock(&dist->lock);
892 kvm_prepare_mmio(run, &mmio); 835 run->mmio.is_write = is_write;
836 run->mmio.len = len;
837 run->mmio.phys_addr = addr;
838 memcpy(run->mmio.data, val, len);
839
893 kvm_handle_mmio_return(vcpu, run); 840 kvm_handle_mmio_return(vcpu, run);
894 841
895 if (updated_state) 842 if (updated_state)
@@ -898,30 +845,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
898 return 0; 845 return 0;
899} 846}
900 847
901/**
902 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
903 * @vcpu: pointer to the vcpu performing the access
904 * @run: pointer to the kvm_run structure
905 * @mmio: pointer to the data describing the access
906 *
907 * returns true if the MMIO access has been performed in kernel space,
908 * and false if it needs to be emulated in user space.
909 * Calls the actual handling routine for the selected VGIC model.
910 */
911bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
912 struct kvm_exit_mmio *mmio)
913{
914 if (!irqchip_in_kernel(vcpu->kvm))
915 return false;
916
917 /*
918 * This will currently call either vgic_v2_handle_mmio() or
919 * vgic_v3_handle_mmio(), which in turn will call
920 * vgic_handle_mmio_range() defined above.
921 */
922 return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
923}
924
925static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu, 848static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
926 struct kvm_io_device *this, 849 struct kvm_io_device *this,
927 gpa_t addr, int len, void *val) 850 gpa_t addr, int len, void *val)
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index 28fa3aaf6367..0df74cbb6200 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -59,6 +59,14 @@ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
59bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); 59bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
60void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); 60void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
61 61
62struct kvm_exit_mmio {
63 phys_addr_t phys_addr;
64 void *data;
65 u32 len;
66 bool is_write;
67 void *private;
68};
69
62void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, 70void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
63 phys_addr_t offset, int mode); 71 phys_addr_t offset, int mode);
64bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 72bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
@@ -99,11 +107,6 @@ const
99struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges, 107struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
100 int len, gpa_t offset); 108 int len, gpa_t offset);
101 109
102bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
103 struct kvm_exit_mmio *mmio,
104 const struct vgic_io_range *ranges,
105 unsigned long mmio_base);
106
107bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 110bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
108 phys_addr_t offset, int vcpu_id, int access); 111 phys_addr_t offset, int vcpu_id, int access);
109 112