summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorJulien Thierry <julien.thierry@arm.com>2018-11-26 13:26:44 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2018-12-18 10:14:49 -0500
commit2e2f6c3c0b08eed3fcf7de3c7684c940451bdeb1 (patch)
treee790bdc796515059a8976644e056df0b2f9029d1 /virt
parentb8e0ba7c8bea994011aff3b4c35256b180fab874 (diff)
KVM: arm/arm64: vgic: Do not cond_resched_lock() with IRQs disabled
To change the active state of an MMIO, halt is requested for all vcpus of the affected guest before modifying the IRQ state. This is done by calling cond_resched_lock() in vgic_mmio_change_active(). However interrupts are disabled at this point and we cannot reschedule a vcpu. We actually don't need any of this, as kvm_arm_halt_guest ensures that all the other vcpus are out of the guest. Let's just drop that useless code. Signed-off-by: Julien Thierry <julien.thierry@arm.com> Suggested-by: Christoffer Dall <christoffer.dall@arm.com> Cc: stable@vger.kernel.org Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c21
1 files changed, 0 insertions, 21 deletions
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index 2b450d49a046..7c2231950c33 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -313,27 +313,6 @@ static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
313 313
314 spin_lock_irqsave(&irq->irq_lock, flags); 314 spin_lock_irqsave(&irq->irq_lock, flags);
315 315
316 /*
317 * If this virtual IRQ was written into a list register, we
318 * have to make sure the CPU that runs the VCPU thread has
319 * synced back the LR state to the struct vgic_irq.
320 *
321 * As long as the conditions below are true, we know the VCPU thread
322 * may be on its way back from the guest (we kicked the VCPU thread in
323 * vgic_change_active_prepare) and still has to sync back this IRQ,
324 * so we release and re-acquire the spin_lock to let the other thread
325 * sync back the IRQ.
326 *
327 * When accessing VGIC state from user space, requester_vcpu is
328 * NULL, which is fine, because we guarantee that no VCPUs are running
329 * when accessing VGIC state from user space so irq->vcpu->cpu is
330 * always -1.
331 */
332 while (irq->vcpu && /* IRQ may have state in an LR somewhere */
333 irq->vcpu != requester_vcpu && /* Current thread is not the VCPU thread */
334 irq->vcpu->cpu != -1) /* VCPU thread is running */
335 cond_resched_lock(&irq->irq_lock);
336
337 if (irq->hw) { 316 if (irq->hw) {
338 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); 317 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu);
339 } else { 318 } else {