aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndre Przywara <andre.przywara@arm.com>2015-03-27 21:13:13 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2015-03-30 12:07:19 -0400
commit950324ab81bf006542f30a1d1ab3d65fcf15cbc1 (patch)
tree07a4b0a29bf056eb3d700eafc4577a10bb2b2972
parentfb8f61abab48467ef670ef165ff664cdc94f742e (diff)
KVM: arm/arm64: rework MMIO abort handling to use KVM MMIO bus
Currently we have struct kvm_exit_mmio for encapsulating MMIO abort data to be passed on from syndrome decoding all the way down to the VGIC register handlers. Now as we switch the MMIO handling to be routed through the KVM MMIO bus, it does not make sense anymore to use that structure already from the beginning. So we keep the data in local variables until we put them into the kvm_io_bus framework. Then we fill kvm_exit_mmio in the VGIC only, making it a VGIC private structure. On that way we replace the data buffer in that structure with a pointer pointing to a single location in a local variable, so we get rid of some copying on the way. With all of the virtual GIC emulation code now being registered with the kvm_io_bus, we can remove all of the old MMIO handling code and its dispatching functionality. I didn't bother to rename kvm_exit_mmio (to vgic_mmio or something), because that touches a lot of code lines without any good reason. This is based on an original patch by Nikolay. Signed-off-by: Andre Przywara <andre.przywara@arm.com> Cc: Nikolay Nikolaev <n.nikolaev@virtualopensystems.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/include/asm/kvm_mmio.h22
-rw-r--r--arch/arm/kvm/mmio.c64
-rw-r--r--arch/arm64/include/asm/kvm_mmio.h22
-rw-r--r--include/kvm/arm_vgic.h6
-rw-r--r--virt/kvm/arm/vgic-v2-emul.c21
-rw-r--r--virt/kvm/arm/vgic-v3-emul.c35
-rw-r--r--virt/kvm/arm/vgic.c93
-rw-r--r--virt/kvm/arm/vgic.h13
8 files changed, 55 insertions, 221 deletions
diff --git a/arch/arm/include/asm/kvm_mmio.h b/arch/arm/include/asm/kvm_mmio.h
index 3f83db2f6cf0..d8e90c8cb5fa 100644
--- a/arch/arm/include/asm/kvm_mmio.h
+++ b/arch/arm/include/asm/kvm_mmio.h
@@ -28,28 +28,6 @@ struct kvm_decode {
28 bool sign_extend; 28 bool sign_extend;
29}; 29};
30 30
31/*
32 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
33 * which is an anonymous type. Use our own type instead.
34 */
35struct kvm_exit_mmio {
36 phys_addr_t phys_addr;
37 u8 data[8];
38 u32 len;
39 bool is_write;
40 void *private;
41};
42
43static inline void kvm_prepare_mmio(struct kvm_run *run,
44 struct kvm_exit_mmio *mmio)
45{
46 run->mmio.phys_addr = mmio->phys_addr;
47 run->mmio.len = mmio->len;
48 run->mmio.is_write = mmio->is_write;
49 memcpy(run->mmio.data, mmio->data, mmio->len);
50 run->exit_reason = KVM_EXIT_MMIO;
51}
52
53int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); 31int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
54int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, 32int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
55 phys_addr_t fault_ipa); 33 phys_addr_t fault_ipa);
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 5d3bfc0eb3f0..974b1c606d04 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -121,12 +121,11 @@ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run)
121 return 0; 121 return 0;
122} 122}
123 123
124static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 124static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
125 struct kvm_exit_mmio *mmio)
126{ 125{
127 unsigned long rt; 126 unsigned long rt;
128 int len; 127 int access_size;
129 bool is_write, sign_extend; 128 bool sign_extend;
130 129
131 if (kvm_vcpu_dabt_isextabt(vcpu)) { 130 if (kvm_vcpu_dabt_isextabt(vcpu)) {
132 /* cache operation on I/O addr, tell guest unsupported */ 131 /* cache operation on I/O addr, tell guest unsupported */
@@ -140,17 +139,15 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
140 return 1; 139 return 1;
141 } 140 }
142 141
143 len = kvm_vcpu_dabt_get_as(vcpu); 142 access_size = kvm_vcpu_dabt_get_as(vcpu);
144 if (unlikely(len < 0)) 143 if (unlikely(access_size < 0))
145 return len; 144 return access_size;
146 145
147 is_write = kvm_vcpu_dabt_iswrite(vcpu); 146 *is_write = kvm_vcpu_dabt_iswrite(vcpu);
148 sign_extend = kvm_vcpu_dabt_issext(vcpu); 147 sign_extend = kvm_vcpu_dabt_issext(vcpu);
149 rt = kvm_vcpu_dabt_get_rd(vcpu); 148 rt = kvm_vcpu_dabt_get_rd(vcpu);
150 149
151 mmio->is_write = is_write; 150 *len = access_size;
152 mmio->phys_addr = fault_ipa;
153 mmio->len = len;
154 vcpu->arch.mmio_decode.sign_extend = sign_extend; 151 vcpu->arch.mmio_decode.sign_extend = sign_extend;
155 vcpu->arch.mmio_decode.rt = rt; 152 vcpu->arch.mmio_decode.rt = rt;
156 153
@@ -165,20 +162,20 @@ static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
165int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, 162int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
166 phys_addr_t fault_ipa) 163 phys_addr_t fault_ipa)
167{ 164{
168 struct kvm_exit_mmio mmio;
169 unsigned long data; 165 unsigned long data;
170 unsigned long rt; 166 unsigned long rt;
171 int ret; 167 int ret;
168 bool is_write;
169 int len;
170 u8 data_buf[8];
172 171
173 /* 172 /*
174 * Prepare MMIO operation. First stash it in a private 173 * Prepare MMIO operation. First decode the syndrome data we get
175 * structure that we can use for in-kernel emulation. If the 174 * from the CPU. Then try if some in-kernel emulation feels
176 * kernel can't handle it, copy it into run->mmio and let user 175 * responsible, otherwise let user space do its magic.
177 * space do its magic.
178 */ 176 */
179
180 if (kvm_vcpu_dabt_isvalid(vcpu)) { 177 if (kvm_vcpu_dabt_isvalid(vcpu)) {
181 ret = decode_hsr(vcpu, fault_ipa, &mmio); 178 ret = decode_hsr(vcpu, &is_write, &len);
182 if (ret) 179 if (ret)
183 return ret; 180 return ret;
184 } else { 181 } else {
@@ -188,21 +185,34 @@ int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
188 185
189 rt = vcpu->arch.mmio_decode.rt; 186 rt = vcpu->arch.mmio_decode.rt;
190 187
191 if (mmio.is_write) { 188 if (is_write) {
192 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), 189 data = vcpu_data_guest_to_host(vcpu, *vcpu_reg(vcpu, rt), len);
193 mmio.len); 190
191 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, len, fault_ipa, data);
192 mmio_write_buf(data_buf, len, data);
194 193
195 trace_kvm_mmio(KVM_TRACE_MMIO_WRITE, mmio.len, 194 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, fault_ipa, len,
196 fault_ipa, data); 195 data_buf);
197 mmio_write_buf(mmio.data, mmio.len, data);
198 } else { 196 } else {
199 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, mmio.len, 197 trace_kvm_mmio(KVM_TRACE_MMIO_READ_UNSATISFIED, len,
200 fault_ipa, 0); 198 fault_ipa, 0);
199
200 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, fault_ipa, len,
201 data_buf);
201 } 202 }
202 203
203 if (vgic_handle_mmio(vcpu, run, &mmio)) 204 /* Now prepare kvm_run for the potential return to userland. */
205 run->mmio.is_write = is_write;
206 run->mmio.phys_addr = fault_ipa;
207 run->mmio.len = len;
208 memcpy(run->mmio.data, data_buf, len);
209
210 if (!ret) {
211 /* We handled the access successfully in the kernel. */
212 kvm_handle_mmio_return(vcpu, run);
204 return 1; 213 return 1;
214 }
205 215
206 kvm_prepare_mmio(run, &mmio); 216 run->exit_reason = KVM_EXIT_MMIO;
207 return 0; 217 return 0;
208} 218}
diff --git a/arch/arm64/include/asm/kvm_mmio.h b/arch/arm64/include/asm/kvm_mmio.h
index 9f52beb7cb13..889c908ee631 100644
--- a/arch/arm64/include/asm/kvm_mmio.h
+++ b/arch/arm64/include/asm/kvm_mmio.h
@@ -31,28 +31,6 @@ struct kvm_decode {
31 bool sign_extend; 31 bool sign_extend;
32}; 32};
33 33
34/*
35 * The in-kernel MMIO emulation code wants to use a copy of run->mmio,
36 * which is an anonymous type. Use our own type instead.
37 */
38struct kvm_exit_mmio {
39 phys_addr_t phys_addr;
40 u8 data[8];
41 u32 len;
42 bool is_write;
43 void *private;
44};
45
46static inline void kvm_prepare_mmio(struct kvm_run *run,
47 struct kvm_exit_mmio *mmio)
48{
49 run->mmio.phys_addr = mmio->phys_addr;
50 run->mmio.len = mmio->len;
51 run->mmio.is_write = mmio->is_write;
52 memcpy(run->mmio.data, mmio->data, mmio->len);
53 run->exit_reason = KVM_EXIT_MMIO;
54}
55
56int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); 34int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
57int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run, 35int io_mem_abort(struct kvm_vcpu *vcpu, struct kvm_run *run,
58 phys_addr_t fault_ipa); 36 phys_addr_t fault_ipa);
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index d6705f447c28..16ec2c8b784d 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -140,8 +140,6 @@ struct vgic_params {
140}; 140};
141 141
142struct vgic_vm_ops { 142struct vgic_vm_ops {
143 bool (*handle_mmio)(struct kvm_vcpu *, struct kvm_run *,
144 struct kvm_exit_mmio *);
145 bool (*queue_sgi)(struct kvm_vcpu *, int irq); 143 bool (*queue_sgi)(struct kvm_vcpu *, int irq);
146 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source); 144 void (*add_sgi_source)(struct kvm_vcpu *, int irq, int source);
147 int (*init_model)(struct kvm *); 145 int (*init_model)(struct kvm *);
@@ -313,8 +311,6 @@ struct vgic_cpu {
313 311
314struct kvm; 312struct kvm;
315struct kvm_vcpu; 313struct kvm_vcpu;
316struct kvm_run;
317struct kvm_exit_mmio;
318 314
319int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write); 315int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
320int kvm_vgic_hyp_init(void); 316int kvm_vgic_hyp_init(void);
@@ -330,8 +326,6 @@ int kvm_vgic_inject_irq(struct kvm *kvm, int cpuid, unsigned int irq_num,
330void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg); 326void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
331int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu); 327int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
332int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu); 328int kvm_vgic_vcpu_active_irq(struct kvm_vcpu *vcpu);
333bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
334 struct kvm_exit_mmio *mmio);
335 329
336#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel)) 330#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
337#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus)) 331#define vgic_initialized(k) (!!((k)->arch.vgic.nr_cpus))
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 7460b376d090..13907970d11c 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -404,24 +404,6 @@ static const struct vgic_io_range vgic_dist_ranges[] = {
404 {} 404 {}
405}; 405};
406 406
407static bool vgic_v2_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
408 struct kvm_exit_mmio *mmio)
409{
410 unsigned long base = vcpu->kvm->arch.vgic.vgic_dist_base;
411
412 if (!is_in_range(mmio->phys_addr, mmio->len, base,
413 KVM_VGIC_V2_DIST_SIZE))
414 return false;
415
416 /* GICv2 does not support accesses wider than 32 bits */
417 if (mmio->len > 4) {
418 kvm_inject_dabt(vcpu, mmio->phys_addr);
419 return true;
420 }
421
422 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_dist_ranges, base);
423}
424
425static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg) 407static void vgic_dispatch_sgi(struct kvm_vcpu *vcpu, u32 reg)
426{ 408{
427 struct kvm *kvm = vcpu->kvm; 409 struct kvm *kvm = vcpu->kvm;
@@ -580,7 +562,6 @@ void vgic_v2_init_emulation(struct kvm *kvm)
580{ 562{
581 struct vgic_dist *dist = &kvm->arch.vgic; 563 struct vgic_dist *dist = &kvm->arch.vgic;
582 564
583 dist->vm_ops.handle_mmio = vgic_v2_handle_mmio;
584 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi; 565 dist->vm_ops.queue_sgi = vgic_v2_queue_sgi;
585 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source; 566 dist->vm_ops.add_sgi_source = vgic_v2_add_sgi_source;
586 dist->vm_ops.init_model = vgic_v2_init_model; 567 dist->vm_ops.init_model = vgic_v2_init_model;
@@ -690,6 +671,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
690 struct kvm_vcpu *vcpu, *tmp_vcpu; 671 struct kvm_vcpu *vcpu, *tmp_vcpu;
691 struct vgic_dist *vgic; 672 struct vgic_dist *vgic;
692 struct kvm_exit_mmio mmio; 673 struct kvm_exit_mmio mmio;
674 u32 data;
693 675
694 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK; 676 offset = attr->attr & KVM_DEV_ARM_VGIC_OFFSET_MASK;
695 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >> 677 cpuid = (attr->attr & KVM_DEV_ARM_VGIC_CPUID_MASK) >>
@@ -711,6 +693,7 @@ static int vgic_attr_regs_access(struct kvm_device *dev,
711 693
712 mmio.len = 4; 694 mmio.len = 4;
713 mmio.is_write = is_write; 695 mmio.is_write = is_write;
696 mmio.data = &data;
714 if (is_write) 697 if (is_write)
715 mmio_data_write(&mmio, ~0, *reg); 698 mmio_data_write(&mmio, ~0, *reg);
716 switch (attr->group) { 699 switch (attr->group) {
diff --git a/virt/kvm/arm/vgic-v3-emul.c b/virt/kvm/arm/vgic-v3-emul.c
index eb1a797cb9c1..e9c3a7a83833 100644
--- a/virt/kvm/arm/vgic-v3-emul.c
+++ b/virt/kvm/arm/vgic-v3-emul.c
@@ -708,40 +708,6 @@ static const struct vgic_io_range vgic_redist_ranges[] = {
708 {}, 708 {},
709}; 709};
710 710
711/*
712 * This function splits accesses between the distributor and the two
713 * redistributor parts (private/SPI). As each redistributor is accessible
714 * from any CPU, we have to determine the affected VCPU by taking the faulting
715 * address into account. We then pass this VCPU to the handler function via
716 * the private parameter.
717 */
718#define SGI_BASE_OFFSET SZ_64K
719static bool vgic_v3_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
720 struct kvm_exit_mmio *mmio)
721{
722 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
723 unsigned long dbase = dist->vgic_dist_base;
724 unsigned long rdbase = dist->vgic_redist_base;
725 int nrcpus = atomic_read(&vcpu->kvm->online_vcpus);
726 int vcpu_id;
727
728 if (is_in_range(mmio->phys_addr, mmio->len, dbase, GIC_V3_DIST_SIZE)) {
729 return vgic_handle_mmio_range(vcpu, run, mmio,
730 vgic_v3_dist_ranges, dbase);
731 }
732
733 if (!is_in_range(mmio->phys_addr, mmio->len, rdbase,
734 GIC_V3_REDIST_SIZE * nrcpus))
735 return false;
736
737 vcpu_id = (mmio->phys_addr - rdbase) / GIC_V3_REDIST_SIZE;
738 rdbase += (vcpu_id * GIC_V3_REDIST_SIZE);
739 mmio->private = kvm_get_vcpu(vcpu->kvm, vcpu_id);
740
741 return vgic_handle_mmio_range(vcpu, run, mmio, vgic_redist_ranges,
742 rdbase);
743}
744
745static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq) 711static bool vgic_v3_queue_sgi(struct kvm_vcpu *vcpu, int irq)
746{ 712{
747 if (vgic_queue_irq(vcpu, 0, irq)) { 713 if (vgic_queue_irq(vcpu, 0, irq)) {
@@ -861,7 +827,6 @@ void vgic_v3_init_emulation(struct kvm *kvm)
861{ 827{
862 struct vgic_dist *dist = &kvm->arch.vgic; 828 struct vgic_dist *dist = &kvm->arch.vgic;
863 829
864 dist->vm_ops.handle_mmio = vgic_v3_handle_mmio;
865 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi; 830 dist->vm_ops.queue_sgi = vgic_v3_queue_sgi;
866 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source; 831 dist->vm_ops.add_sgi_source = vgic_v3_add_sgi_source;
867 dist->vm_ops.init_model = vgic_v3_init_model; 832 dist->vm_ops.init_model = vgic_v3_init_model;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index e968179e592f..b70174e74868 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -758,7 +758,6 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
758 unsigned long offset, 758 unsigned long offset,
759 const struct vgic_io_range *range) 759 const struct vgic_io_range *range)
760{ 760{
761 u32 *data32 = (void *)mmio->data;
762 struct kvm_exit_mmio mmio32; 761 struct kvm_exit_mmio mmio32;
763 bool ret; 762 bool ret;
764 763
@@ -775,70 +774,17 @@ static bool call_range_handler(struct kvm_vcpu *vcpu,
775 mmio32.private = mmio->private; 774 mmio32.private = mmio->private;
776 775
777 mmio32.phys_addr = mmio->phys_addr + 4; 776 mmio32.phys_addr = mmio->phys_addr + 4;
778 if (mmio->is_write) 777 mmio32.data = &((u32 *)mmio->data)[1];
779 *(u32 *)mmio32.data = data32[1];
780 ret = range->handle_mmio(vcpu, &mmio32, offset + 4); 778 ret = range->handle_mmio(vcpu, &mmio32, offset + 4);
781 if (!mmio->is_write)
782 data32[1] = *(u32 *)mmio32.data;
783 779
784 mmio32.phys_addr = mmio->phys_addr; 780 mmio32.phys_addr = mmio->phys_addr;
785 if (mmio->is_write) 781 mmio32.data = &((u32 *)mmio->data)[0];
786 *(u32 *)mmio32.data = data32[0];
787 ret |= range->handle_mmio(vcpu, &mmio32, offset); 782 ret |= range->handle_mmio(vcpu, &mmio32, offset);
788 if (!mmio->is_write)
789 data32[0] = *(u32 *)mmio32.data;
790 783
791 return ret; 784 return ret;
792} 785}
793 786
794/** 787/**
795 * vgic_handle_mmio_range - handle an in-kernel MMIO access
796 * @vcpu: pointer to the vcpu performing the access
797 * @run: pointer to the kvm_run structure
798 * @mmio: pointer to the data describing the access
799 * @ranges: array of MMIO ranges in a given region
800 * @mmio_base: base address of that region
801 *
802 * returns true if the MMIO access could be performed
803 */
804bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
805 struct kvm_exit_mmio *mmio,
806 const struct vgic_io_range *ranges,
807 unsigned long mmio_base)
808{
809 const struct vgic_io_range *range;
810 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
811 bool updated_state;
812 unsigned long offset;
813
814 offset = mmio->phys_addr - mmio_base;
815 range = vgic_find_range(ranges, mmio->len, offset);
816 if (unlikely(!range || !range->handle_mmio)) {
817 pr_warn("Unhandled access %d %08llx %d\n",
818 mmio->is_write, mmio->phys_addr, mmio->len);
819 return false;
820 }
821
822 spin_lock(&vcpu->kvm->arch.vgic.lock);
823 offset -= range->base;
824 if (vgic_validate_access(dist, range, offset)) {
825 updated_state = call_range_handler(vcpu, mmio, offset, range);
826 } else {
827 if (!mmio->is_write)
828 memset(mmio->data, 0, mmio->len);
829 updated_state = false;
830 }
831 spin_unlock(&vcpu->kvm->arch.vgic.lock);
832 kvm_prepare_mmio(run, mmio);
833 kvm_handle_mmio_return(vcpu, run);
834
835 if (updated_state)
836 vgic_kick_vcpus(vcpu->kvm);
837
838 return true;
839}
840
841/**
842 * vgic_handle_mmio_access - handle an in-kernel MMIO access 788 * vgic_handle_mmio_access - handle an in-kernel MMIO access
843 * This is called by the read/write KVM IO device wrappers below. 789 * This is called by the read/write KVM IO device wrappers below.
844 * @vcpu: pointer to the vcpu performing the access 790 * @vcpu: pointer to the vcpu performing the access
@@ -873,23 +819,24 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
873 mmio.phys_addr = addr; 819 mmio.phys_addr = addr;
874 mmio.len = len; 820 mmio.len = len;
875 mmio.is_write = is_write; 821 mmio.is_write = is_write;
876 if (is_write) 822 mmio.data = val;
877 memcpy(mmio.data, val, len);
878 mmio.private = iodev->redist_vcpu; 823 mmio.private = iodev->redist_vcpu;
879 824
880 spin_lock(&dist->lock); 825 spin_lock(&dist->lock);
881 offset -= range->base; 826 offset -= range->base;
882 if (vgic_validate_access(dist, range, offset)) { 827 if (vgic_validate_access(dist, range, offset)) {
883 updated_state = call_range_handler(vcpu, &mmio, offset, range); 828 updated_state = call_range_handler(vcpu, &mmio, offset, range);
884 if (!is_write)
885 memcpy(val, mmio.data, len);
886 } else { 829 } else {
887 if (!is_write) 830 if (!is_write)
888 memset(val, 0, len); 831 memset(val, 0, len);
889 updated_state = false; 832 updated_state = false;
890 } 833 }
891 spin_unlock(&dist->lock); 834 spin_unlock(&dist->lock);
892 kvm_prepare_mmio(run, &mmio); 835 run->mmio.is_write = is_write;
836 run->mmio.len = len;
837 run->mmio.phys_addr = addr;
838 memcpy(run->mmio.data, val, len);
839
893 kvm_handle_mmio_return(vcpu, run); 840 kvm_handle_mmio_return(vcpu, run);
894 841
895 if (updated_state) 842 if (updated_state)
@@ -898,30 +845,6 @@ static int vgic_handle_mmio_access(struct kvm_vcpu *vcpu,
898 return 0; 845 return 0;
899} 846}
900 847
901/**
902 * vgic_handle_mmio - handle an in-kernel MMIO access for the GIC emulation
903 * @vcpu: pointer to the vcpu performing the access
904 * @run: pointer to the kvm_run structure
905 * @mmio: pointer to the data describing the access
906 *
907 * returns true if the MMIO access has been performed in kernel space,
908 * and false if it needs to be emulated in user space.
909 * Calls the actual handling routine for the selected VGIC model.
910 */
911bool vgic_handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *run,
912 struct kvm_exit_mmio *mmio)
913{
914 if (!irqchip_in_kernel(vcpu->kvm))
915 return false;
916
917 /*
918 * This will currently call either vgic_v2_handle_mmio() or
919 * vgic_v3_handle_mmio(), which in turn will call
920 * vgic_handle_mmio_range() defined above.
921 */
922 return vcpu->kvm->arch.vgic.vm_ops.handle_mmio(vcpu, run, mmio);
923}
924
925static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu, 848static int vgic_handle_mmio_read(struct kvm_vcpu *vcpu,
926 struct kvm_io_device *this, 849 struct kvm_io_device *this,
927 gpa_t addr, int len, void *val) 850 gpa_t addr, int len, void *val)
diff --git a/virt/kvm/arm/vgic.h b/virt/kvm/arm/vgic.h
index 28fa3aaf6367..0df74cbb6200 100644
--- a/virt/kvm/arm/vgic.h
+++ b/virt/kvm/arm/vgic.h
@@ -59,6 +59,14 @@ void vgic_set_vmcr(struct kvm_vcpu *vcpu, struct vgic_vmcr *vmcr);
59bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq); 59bool vgic_queue_irq(struct kvm_vcpu *vcpu, u8 sgi_source_id, int irq);
60void vgic_unqueue_irqs(struct kvm_vcpu *vcpu); 60void vgic_unqueue_irqs(struct kvm_vcpu *vcpu);
61 61
62struct kvm_exit_mmio {
63 phys_addr_t phys_addr;
64 void *data;
65 u32 len;
66 bool is_write;
67 void *private;
68};
69
62void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg, 70void vgic_reg_access(struct kvm_exit_mmio *mmio, u32 *reg,
63 phys_addr_t offset, int mode); 71 phys_addr_t offset, int mode);
64bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio, 72bool handle_mmio_raz_wi(struct kvm_vcpu *vcpu, struct kvm_exit_mmio *mmio,
@@ -99,11 +107,6 @@ const
99struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges, 107struct vgic_io_range *vgic_find_range(const struct vgic_io_range *ranges,
100 int len, gpa_t offset); 108 int len, gpa_t offset);
101 109
102bool vgic_handle_mmio_range(struct kvm_vcpu *vcpu, struct kvm_run *run,
103 struct kvm_exit_mmio *mmio,
104 const struct vgic_io_range *ranges,
105 unsigned long mmio_base);
106
107bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio, 110bool vgic_handle_enable_reg(struct kvm *kvm, struct kvm_exit_mmio *mmio,
108 phys_addr_t offset, int vcpu_id, int access); 111 phys_addr_t offset, int vcpu_id, int access);
109 112