summaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorLance Roy <ldr709@gmail.com>2018-10-05 02:45:50 -0400
committerPaul E. McKenney <paulmck@linux.ibm.com>2018-11-12 12:06:22 -0500
commitd4d592a6eeda1e381f38f398e7a0474a599c11ed (patch)
tree52b092e47fde5c929488eadbdc20a4147e6ce26e /virt
parent35f3aa39f243e8c95e12a2b2d05b1d2e62ac58a4 (diff)
KVM: arm/arm64: vgic: Replace spin_is_locked() with lockdep
lockdep_assert_held() is better suited to checking locking requirements, since it only checks if the current thread holds the lock regardless of whether someone else does. This is also a step towards possibly removing spin_is_locked(). Signed-off-by: Lance Roy <ldr709@gmail.com> Cc: Marc Zyngier <marc.zyngier@arm.com> Cc: Eric Auger <eric.auger@redhat.com> Cc: linux-arm-kernel@lists.infradead.org Cc: <kvmarm@lists.cs.columbia.edu> Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com> Acked-by: Christoffer Dall <christoffer.dall@arm.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/arm/vgic/vgic.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 7cfdfbc910e0..50e25438fb3c 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
196 */ 196 */
197static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) 197static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
198{ 198{
199 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); 199 lockdep_assert_held(&irq->irq_lock);
200 200
201 /* If the interrupt is active, it must stay on the current vcpu */ 201 /* If the interrupt is active, it must stay on the current vcpu */
202 if (irq->active) 202 if (irq->active)
@@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
273{ 273{
274 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; 274 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
275 275
276 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); 276 lockdep_assert_held(&vgic_cpu->ap_list_lock);
277 277
278 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); 278 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
279} 279}
@@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
311{ 311{
312 struct kvm_vcpu *vcpu; 312 struct kvm_vcpu *vcpu;
313 313
314 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); 314 lockdep_assert_held(&irq->irq_lock);
315 315
316retry: 316retry:
317 vcpu = vgic_target_oracle(irq); 317 vcpu = vgic_target_oracle(irq);
@@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
702static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, 702static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
703 struct vgic_irq *irq, int lr) 703 struct vgic_irq *irq, int lr)
704{ 704{
705 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); 705 lockdep_assert_held(&irq->irq_lock);
706 706
707 if (kvm_vgic_global_state.type == VGIC_V2) 707 if (kvm_vgic_global_state.type == VGIC_V2)
708 vgic_v2_populate_lr(vcpu, irq, lr); 708 vgic_v2_populate_lr(vcpu, irq, lr);
@@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
736 736
737 *multi_sgi = false; 737 *multi_sgi = false;
738 738
739 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); 739 lockdep_assert_held(&vgic_cpu->ap_list_lock);
740 740
741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { 741 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
742 int w; 742 int w;
@@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
761 bool multi_sgi; 761 bool multi_sgi;
762 u8 prio = 0xff; 762 u8 prio = 0xff;
763 763
764 DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); 764 lockdep_assert_held(&vgic_cpu->ap_list_lock);
765 765
766 count = compute_ap_list_depth(vcpu, &multi_sgi); 766 count = compute_ap_list_depth(vcpu, &multi_sgi);
767 if (count > kvm_vgic_global_state.nr_lr || multi_sgi) 767 if (count > kvm_vgic_global_state.nr_lr || multi_sgi)