diff options
author | Ingo Molnar <mingo@kernel.org> | 2018-12-04 01:52:30 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2018-12-04 01:52:30 -0500 |
commit | 4bbfd7467cfc7d42e18d3008fa6a28ffd56e901a (patch) | |
tree | 3b6d27e740976d0393fd13ae675ae6a0e07812a9 /virt | |
parent | 2595646791c319cadfdbf271563aac97d0843dc7 (diff) | |
parent | 5ac7cdc29897e5fc3f5e214f3f8c8b03ef8d7029 (diff) |
Merge branch 'for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu into core/rcu
Pull RCU changes from Paul E. McKenney:
- Convert RCU's BUG_ON() and similar calls to WARN_ON() and similar.
- Replace calls of RCU-bh and RCU-sched update-side functions
to their vanilla RCU counterparts. This series is a step
towards complete removal of the RCU-bh and RCU-sched update-side
functions.
( Note that some of these conversions are going upstream via their
respective maintainers. )
- Documentation updates, including a number of flavor-consolidation
updates from Joel Fernandes.
- Miscellaneous fixes.
- Automate generation of the initrd filesystem used for
rcutorture testing.
- Convert spin_is_locked() assertions to instead use lockdep.
( Note that some of these conversions are going upstream via their
respective maintainers. )
- SRCU updates, especially including a fix from Dennis Krein
for a bag-on-head-class bug.
- RCU torture-test updates.
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'virt')
-rw-r--r-- | virt/kvm/arm/vgic/vgic.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c index 7cfdfbc910e0..50e25438fb3c 100644 --- a/virt/kvm/arm/vgic/vgic.c +++ b/virt/kvm/arm/vgic/vgic.c | |||
@@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) | |||
196 | */ | 196 | */ |
197 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) | 197 | static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) |
198 | { | 198 | { |
199 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | 199 | lockdep_assert_held(&irq->irq_lock); |
200 | 200 | ||
201 | /* If the interrupt is active, it must stay on the current vcpu */ | 201 | /* If the interrupt is active, it must stay on the current vcpu */ |
202 | if (irq->active) | 202 | if (irq->active) |
@@ -273,7 +273,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu) | |||
273 | { | 273 | { |
274 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; | 274 | struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; |
275 | 275 | ||
276 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 276 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
277 | 277 | ||
278 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); | 278 | list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); |
279 | } | 279 | } |
@@ -311,7 +311,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, | |||
311 | { | 311 | { |
312 | struct kvm_vcpu *vcpu; | 312 | struct kvm_vcpu *vcpu; |
313 | 313 | ||
314 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | 314 | lockdep_assert_held(&irq->irq_lock); |
315 | 315 | ||
316 | retry: | 316 | retry: |
317 | vcpu = vgic_target_oracle(irq); | 317 | vcpu = vgic_target_oracle(irq); |
@@ -702,7 +702,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu) | |||
702 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, | 702 | static inline void vgic_populate_lr(struct kvm_vcpu *vcpu, |
703 | struct vgic_irq *irq, int lr) | 703 | struct vgic_irq *irq, int lr) |
704 | { | 704 | { |
705 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock)); | 705 | lockdep_assert_held(&irq->irq_lock); |
706 | 706 | ||
707 | if (kvm_vgic_global_state.type == VGIC_V2) | 707 | if (kvm_vgic_global_state.type == VGIC_V2) |
708 | vgic_v2_populate_lr(vcpu, irq, lr); | 708 | vgic_v2_populate_lr(vcpu, irq, lr); |
@@ -736,7 +736,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu, | |||
736 | 736 | ||
737 | *multi_sgi = false; | 737 | *multi_sgi = false; |
738 | 738 | ||
739 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 739 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
740 | 740 | ||
741 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { | 741 | list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { |
742 | int w; | 742 | int w; |
@@ -761,7 +761,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu) | |||
761 | bool multi_sgi; | 761 | bool multi_sgi; |
762 | u8 prio = 0xff; | 762 | u8 prio = 0xff; |
763 | 763 | ||
764 | DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock)); | 764 | lockdep_assert_held(&vgic_cpu->ap_list_lock); |
765 | 765 | ||
766 | count = compute_ap_list_depth(vcpu, &multi_sgi); | 766 | count = compute_ap_list_depth(vcpu, &multi_sgi); |
767 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) | 767 | if (count > kvm_vgic_global_state.nr_lr || multi_sgi) |