aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2017-10-27 10:28:44 -0400
committerChristoffer Dall <christoffer.dall@linaro.org>2017-11-10 03:29:38 -0500
commitff9c114394aaed2061df112b75be9835a1394be4 (patch)
tree106d67893f397b9a96fd90426bc42a1efc9b1b2d
parentfb0cada604fcda82813f654b0d95963ee165770f (diff)
KVM: arm/arm64: GICv4: Handle MOVALL applied to a vPE
The current implementation of MOVALL doesn't allow us to call into the core ITS code as we hold a number of spinlocks. Let's try a method used in other parts of the code, were we copy the intids of the candicate interrupts, and then do whatever we need to do with them outside of the critical section. This allows us to move the interrupts one by one, at the expense of a bit of CPU time. Who cares? MOVALL is such a stupid command anyway... Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--virt/kvm/arm/vgic/vgic-its.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
index 15e79285380d..78d11aed1e17 100644
--- a/virt/kvm/arm/vgic/vgic-its.c
+++ b/virt/kvm/arm/vgic/vgic-its.c
@@ -1169,11 +1169,12 @@ static int vgic_its_cmd_handle_invall(struct kvm *kvm, struct vgic_its *its,
1169static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its, 1169static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1170 u64 *its_cmd) 1170 u64 *its_cmd)
1171{ 1171{
1172 struct vgic_dist *dist = &kvm->arch.vgic;
1173 u32 target1_addr = its_cmd_get_target_addr(its_cmd); 1172 u32 target1_addr = its_cmd_get_target_addr(its_cmd);
1174 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32); 1173 u32 target2_addr = its_cmd_mask_field(its_cmd, 3, 16, 32);
1175 struct kvm_vcpu *vcpu1, *vcpu2; 1174 struct kvm_vcpu *vcpu1, *vcpu2;
1176 struct vgic_irq *irq; 1175 struct vgic_irq *irq;
1176 u32 *intids;
1177 int irq_count, i;
1177 1178
1178 if (target1_addr >= atomic_read(&kvm->online_vcpus) || 1179 if (target1_addr >= atomic_read(&kvm->online_vcpus) ||
1179 target2_addr >= atomic_read(&kvm->online_vcpus)) 1180 target2_addr >= atomic_read(&kvm->online_vcpus))
@@ -1185,19 +1186,19 @@ static int vgic_its_cmd_handle_movall(struct kvm *kvm, struct vgic_its *its,
1185 vcpu1 = kvm_get_vcpu(kvm, target1_addr); 1186 vcpu1 = kvm_get_vcpu(kvm, target1_addr);
1186 vcpu2 = kvm_get_vcpu(kvm, target2_addr); 1187 vcpu2 = kvm_get_vcpu(kvm, target2_addr);
1187 1188
1188 spin_lock(&dist->lpi_list_lock); 1189 irq_count = vgic_copy_lpi_list(vcpu1, &intids);
1190 if (irq_count < 0)
1191 return irq_count;
1189 1192
1190 list_for_each_entry(irq, &dist->lpi_list_head, lpi_list) { 1193 for (i = 0; i < irq_count; i++) {
1191 spin_lock(&irq->irq_lock); 1194 irq = vgic_get_irq(kvm, NULL, intids[i]);
1192 1195
1193 if (irq->target_vcpu == vcpu1) 1196 update_affinity(irq, vcpu2);
1194 irq->target_vcpu = vcpu2;
1195 1197
1196 spin_unlock(&irq->irq_lock); 1198 vgic_put_irq(kvm, irq);
1197 } 1199 }
1198 1200
1199 spin_unlock(&dist->lpi_list_lock); 1201 kfree(intids);
1200
1201 return 0; 1202 return 0;
1202} 1203}
1203 1204