aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/vgic.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/vgic.c')
-rw-r--r--virt/kvm/arm/vgic.c93
1 files changed, 8 insertions, 85 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index e968179e592f..b70174e74868 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -758,7 +758,6 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
758 unsigned long offset, 758 unsigned long offset,
759 const struct vgic_io_range *range) 759 const struct vgic_io_range *range)
760{ 760{
761 u32 *data32 = (void *)mmio->data;
762 struct kvm_exit_mmio mmio32; 761 struct kvm_exit_mmio mmio32;
763 bool ret; 762 bool ret;
764 763
@@ -775,70 +774,17 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
775 mmio32.private = mmio->private; 774 mmio32.private = mmio->private;
776 775
777 mmio32.phys_addr = mmio->phys_addr + 4; 776 mmio32.phys_addr = mmio->phys_addr + 4;
778 if (mmio->is_write) 777 mmio32.data = &((u32 *)mmio->data)[1];
779 *(u32 *)mmio32.data = data32[1];
780 ret = range->handle_mmio(vcpu, &mmio32, offset + 4); 778 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
781 if (!mmio->is_write)
782 data32[1] = *(u32 *)mmio32.data;
783 779
784 mmio32.phys_addr = mmio->phys_addr; 780 mmio32.phys_addr = mmio->phys_addr;
785 if (mmio->is_write) 781 mmio32.data = &((u32 *)mmio->data)[0];
786 *(u32 *)mmio32.data = data32[0];
787 ret |= range->handle_mmio(vcpu, &mmio32, offset); 782 ret |= range->handle_mmio(vcpu, &mmio32, offset);
788 if (!mmio->is_write)
789 data32[0] = *(u32 *)mmio32.data;
790 783
791 return ret; 784 return ret;
792} 785}
793 786
794/** 787/**
795 * vgic_handle_mmio_range - handle an in-kernel MMIO access
796 * @vcpu: pointer to the vcpu performing the access
797 * @run: pointer to the kvm_run structure
798 * @mmio: pointer to the data describing the access
799 * @ranges: array of MMIO ranges in a given region
800 * @mmio_base: base address of that region
801 *
802 * returns true if the MMIO access could be performed
803 */
804bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
805 struct kvm_exit_mmio *mmio,
806 const struct vgic_io_range *ranges,
807 unsigned long mmio_base)
808{
809 const struct vgic_io_range *range;
810 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
811 bool updated_state;
812 unsigned long offset;
813
814 offset = mmio->phys_addr - mmio_base;
815 range = vgic_find_range(ranges, mmio->len, offset);
816 if (unlikely(!range || !range->handle_mmio)) {
817 pr_warn("Unhandled access %d %08llx %d\n",
818 mmio->is_write, mmio->phys_addr, mmio->len);
819 return false;
820 }
821
822 spin_lock(&vcpu->kvm->arch.vgic.lock);
823 offset -= range->base;
824 if (vgic_validate_access(dist, range, offset)) {
825 updated_state = call_range_handler(vcpu, mmio, offset, range);
826 } else {
827 if (!mmio->is_write)
828 memset(mmio->data, 0, mmio->len);
829 updated_state = false;
830 }
831 spin_unlock(&vcpu->kvm->arch.vgic.lock);
832 kvm_prepare_mmio(run, mmio);
833 kvm_handle_mmio_return(vcpu, run);
834
835 if (updated_state)
836 vgic_kick_vcpus(vcpu->kvm);
837
838 return true;
839}
840
841/**
842 * vgic_handle_mmio_access - handle an in-kernel MMIO access 788 * vgic_handle_mmio_access - handle an in-kernel MMIO access
843 * This is called by the read/write KVM IO device wrappers below. 789 * This is called by the read/write KVM IO device wrappers below.
844 * @vcpu: pointer to the vcpu performing the access 790 * @vcpu: pointer to the vcpu performing the access
@@ -873,23 +819,24 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
873 mmio.phys_addr = addr; 819 mmio.phys_addr = addr;
874 mmio.len = len; 820 mmio.len = len;
875 mmio.is_write = is_write; 821 mmio.is_write = is_write;
876 if (is_write) 822 mmio.data = val;
877 memcpy(mmio.data, val, len);
878 mmio.private = iodev->redist_vcpu; 823 mmio.private = iodev->redist_vcpu;
879 824
880 spin_lock(&dist->lock); 825 spin_lock(&dist->lock);
881 offset -= range->base; 826 offset -= range->base;
882 if (vgic_validate_access(dist, range, offset)) { 827 if (vgic_validate_access(dist, range, offset)) {
883 updated_state = call_range_handler(vcpu, &mmio, offset, range); 828 updated_state = call_range_handler(vcpu, &mmio, offset, range);
884 if (!is_write)
885 memcpy(val, mmio.data, len);
886 } else { 829 } else {
887 if (!is_write) 830 if (!is_write)
888 memset(val, 0, len); 831 memset(val, 0, len);
889 updated_state = false; 832 updated_state = false;
890 } 833 }
891 spin_unlock(&dist->lock); 834 spin_unlock(&dist->lock);
892 kvm_prepare_mmio(run, &mmio); 835 run->mmio.is_write = is_write;
836 run->mmio.len = len;
837 run->mmio.phys_addr = addr;
838 memcpy(run->mmio.data, val, len);
839
893 kvm_handle_mmio_return(vcpu, run); 840 kvm_handle_mmio_return(vcpu, run);
894 841
895 if (updated_state) 842 if (updated_state)
@@ -898,30 +845,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
898 return 0; 845 return 0;
899} 846}
900 847
901/**
902 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
903 * @vcpu: pointer to the vcpu performing the access
904 * @run: pointer to the kvm_run structure
905 * @mmio: pointer to the data describing the access
906 *
907 * returns true if the MMIO access has been performed in kernel space,
908 * and false if it needs to be emulated in user space.
909 * Calls the actual handling routine for the selected VGIC model.
910 */
911bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
912 struct kvm_exit_mmio *mmio)
913{
914 if (!irqchip_in_kernel(vcpu->kvm))
915 return false;
916
917 /*
918 * This will currently call either vgic_v2_handle_mmio() or
919 * vgic_v3_handle_mmio(), which in turn will call
920 * vgic_handle_mmio_range() defined above.
921 */
922 return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
923}
924
925static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu, 848static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
926 struct kvm_io_device *this, 849 struct kvm_io_device *this,
927 gpa_t addr, int len, void *val) 850 gpa_t addr, int len, void *val)