aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorEric Auger <eric.auger@linaro.org>2015-03-04 05:14:35 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2015-03-12 10:15:33 -0400
commit649cf73994e8ac69dfe3e7a35fba9acf051e7fe6 (patch)
treeb77d15b99f731e35ac0a5a8682cadf9e07db8a61 /virt/kvm
parentc1426e4c5add09042840013dfa5565e6be6d412e (diff)
KVM: arm/arm64: remove coarse grain dist locking at kvm_vgic_sync_hwstate
To prepare for irqfd addition, coarse grain locking is removed at kvm_vgic_sync_hwstate level and finer grain locking is introduced in vgic_process_maintenance only. Signed-off-by: Eric Auger <eric.auger@linaro.org> Acked-by: Christoffer Dall <christoffer.dall@linaro.org> Acked-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/arm/vgic.c13
1 files changed, 5 insertions, 8 deletions
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 86cec7924611..897c849305db 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1081,6 +1081,7 @@ epilog:
1081static bool vgic_process_maintenance(struct kvm_vcpu *vcpu) 1081static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1082{ 1082{
1083 u32 status = vgic_get_interrupt_status(vcpu); 1083 u32 status = vgic_get_interrupt_status(vcpu);
1084 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1084 bool level_pending = false; 1085 bool level_pending = false;
1085 1086
1086 kvm_debug("STATUS = %08x\n", status); 1087 kvm_debug("STATUS = %08x\n", status);
@@ -1098,6 +1099,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1098 struct vgic_lr vlr = vgic_get_lr(vcpu, lr); 1099 struct vgic_lr vlr = vgic_get_lr(vcpu, lr);
1099 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq)); 1100 WARN_ON(vgic_irq_is_edge(vcpu, vlr.irq));
1100 1101
1102 spin_lock(&dist->lock);
1101 vgic_irq_clear_queued(vcpu, vlr.irq); 1103 vgic_irq_clear_queued(vcpu, vlr.irq);
1102 WARN_ON(vlr.state & LR_STATE_MASK); 1104 WARN_ON(vlr.state & LR_STATE_MASK);
1103 vlr.state = 0; 1105 vlr.state = 0;
@@ -1125,6 +1127,8 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1125 vgic_cpu_irq_clear(vcpu, vlr.irq); 1127 vgic_cpu_irq_clear(vcpu, vlr.irq);
1126 } 1128 }
1127 1129
1130 spin_unlock(&dist->lock);
1131
1128 /* 1132 /*
1129 * Despite being EOIed, the LR may not have 1133 * Despite being EOIed, the LR may not have
1130 * been marked as empty. 1134 * been marked as empty.
@@ -1139,10 +1143,7 @@ static bool vgic_process_maintenance(struct kvm_vcpu *vcpu)
1139 return level_pending; 1143 return level_pending;
1140} 1144}
1141 1145
1142/* 1146/* Sync back the VGIC state after a guest run */
1143 * Sync back the VGIC state after a guest run. The distributor lock is
1144 * needed so we don't get preempted in the middle of the state processing.
1145 */
1146static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1147static void __kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1147{ 1148{
1148 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 1149 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
@@ -1189,14 +1190,10 @@ void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu)
1189 1190
1190void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu) 1191void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu)
1191{ 1192{
1192 struct vgic_dist *dist = &vcpu->kvm->arch.vgic;
1193
1194 if (!irqchip_in_kernel(vcpu->kvm)) 1193 if (!irqchip_in_kernel(vcpu->kvm))
1195 return; 1194 return;
1196 1195
1197 spin_lock(&dist->lock);
1198 __kvm_vgic_sync_hwstate(vcpu); 1196 __kvm_vgic_sync_hwstate(vcpu);
1199 spin_unlock(&dist->lock);
1200} 1197}
1201 1198
1202int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu) 1199int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu)