summaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 34cdcbedda49..e16766ff184b 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4689,8 +4689,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4689 return; 4689 return;
4690 4690
4691 raw_local_irq_save(flags); 4691 raw_local_irq_save(flags);
4692 if (!graph_lock()) 4692 arch_spin_lock(&lockdep_lock);
4693 goto out_irq; 4693 current->lockdep_recursion = 1;
4694 4694
4695 /* closed head */ 4695 /* closed head */
4696 pf = delayed_free.pf + (delayed_free.index ^ 1); 4696 pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4702,8 @@ static void free_zapped_rcu(struct rcu_head *ch)
4702 */ 4702 */
4703 call_rcu_zapped(delayed_free.pf + delayed_free.index); 4703 call_rcu_zapped(delayed_free.pf + delayed_free.index);
4704 4704
4705 graph_unlock(); 4705 current->lockdep_recursion = 0;
4706out_irq: 4706 arch_spin_unlock(&lockdep_lock);
4707 raw_local_irq_restore(flags); 4707 raw_local_irq_restore(flags);
4708} 4708}
4709 4709
@@ -4744,21 +4744,17 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
4744{ 4744{
4745 struct pending_free *pf; 4745 struct pending_free *pf;
4746 unsigned long flags; 4746 unsigned long flags;
4747 int locked;
4748 4747
4749 init_data_structures_once(); 4748 init_data_structures_once();
4750 4749
4751 raw_local_irq_save(flags); 4750 raw_local_irq_save(flags);
4752 locked = graph_lock(); 4751 arch_spin_lock(&lockdep_lock);
4753 if (!locked) 4752 current->lockdep_recursion = 1;
4754 goto out_irq;
4755
4756 pf = get_pending_free(); 4753 pf = get_pending_free();
4757 __lockdep_free_key_range(pf, start, size); 4754 __lockdep_free_key_range(pf, start, size);
4758 call_rcu_zapped(pf); 4755 call_rcu_zapped(pf);
4759 4756 current->lockdep_recursion = 0;
4760 graph_unlock(); 4757 arch_spin_unlock(&lockdep_lock);
4761out_irq:
4762 raw_local_irq_restore(flags); 4758 raw_local_irq_restore(flags);
4763 4759
4764 /* 4760 /*
@@ -4911,9 +4907,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
4911 return; 4907 return;
4912 4908
4913 raw_local_irq_save(flags); 4909 raw_local_irq_save(flags);
4914 if (!graph_lock()) 4910 arch_spin_lock(&lockdep_lock);
4915 goto out_irq; 4911 current->lockdep_recursion = 1;
4916
4917 pf = get_pending_free(); 4912 pf = get_pending_free();
4918 hlist_for_each_entry_rcu(k, hash_head, hash_entry) { 4913 hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
4919 if (k == key) { 4914 if (k == key) {
@@ -4925,8 +4920,8 @@ void lockdep_unregister_key(struct lock_class_key *key)
4925 WARN_ON_ONCE(!found); 4920 WARN_ON_ONCE(!found);
4926 __lockdep_free_key_range(pf, key, 1); 4921 __lockdep_free_key_range(pf, key, 1);
4927 call_rcu_zapped(pf); 4922 call_rcu_zapped(pf);
4928 graph_unlock(); 4923 current->lockdep_recursion = 0;
4929out_irq: 4924 arch_spin_unlock(&lockdep_lock);
4930 raw_local_irq_restore(flags); 4925 raw_local_irq_restore(flags);
4931 4926
4932 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */ 4927 /* Wait until is_dynamic_key() has finished accessing k->hash_entry. */