aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lglock.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2013-07-08 17:23:51 -0400
committerIngo Molnar <mingo@kernel.org>2013-07-12 07:51:19 -0400
commitc4be9cb4f19cbd534a6c4c334cd48d8bb483e17a (patch)
tree936e4c08a620b1614394b4bdff72d2d3b524b43d /kernel/lglock.c
parenta51805efae5dda0da66f79268ffcf0715f9dbea4 (diff)
lglock: Update lockdep annotations to report recursive local locks
Oleg Nesterov recently noticed that the lockdep annotations in lglock.c are not sufficient to detect some obvious deadlocks, such as lg_local_lock(LOCK) + lg_local_lock(LOCK) or spin_lock(X) + lg_local_lock(Y) vs lg_local_lock(Y) + spin_lock(X). Both issues are easily fixed by indicating to lockdep that lglock's local locks are not recursive. We shouldn't use the rwlock acquire/release functions here, as lglock doesn't share the same semantics. Instead we can base our lockdep annotations on the lock_acquire_shared (for local lglock) and lock_acquire_exclusive (for global lglock) helpers. I am not proposing new lglock specific helpers as I don't see the point of the existing second level of helpers :) Noticed-by: Oleg Nesterov <oleg@redhat.com> Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Lai Jiangshan <laijs@cn.fujitsu.com> Cc: "Srivatsa S. Bhat" <srivatsa.bhat@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Andi Kleen <ak@linux.intel.com> Cc: "Paul E. McKenney" <paulmck@us.ibm.com> Cc: Steven Rostedt <rostedt@goodmis.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/20130708212352.1769031C15E@corp2gmr1-1.hot.corp.google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/lglock.c')
-rw-r--r--kernel/lglock.c12
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/lglock.c b/kernel/lglock.c
index 6535a667a5a7..86ae2aebf004 100644
--- a/kernel/lglock.c
+++ b/kernel/lglock.c
@@ -21,7 +21,7 @@ void lg_local_lock(struct lglock *lg)
21 arch_spinlock_t *lock; 21 arch_spinlock_t *lock;
22 22
23 preempt_disable(); 23 preempt_disable();
24 rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); 24 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
25 lock = this_cpu_ptr(lg->lock); 25 lock = this_cpu_ptr(lg->lock);
26 arch_spin_lock(lock); 26 arch_spin_lock(lock);
27} 27}
@@ -31,7 +31,7 @@ void lg_local_unlock(struct lglock *lg)
31{ 31{
32 arch_spinlock_t *lock; 32 arch_spinlock_t *lock;
33 33
34 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); 34 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
35 lock = this_cpu_ptr(lg->lock); 35 lock = this_cpu_ptr(lg->lock);
36 arch_spin_unlock(lock); 36 arch_spin_unlock(lock);
37 preempt_enable(); 37 preempt_enable();
@@ -43,7 +43,7 @@ void lg_local_lock_cpu(struct lglock *lg, int cpu)
43 arch_spinlock_t *lock; 43 arch_spinlock_t *lock;
44 44
45 preempt_disable(); 45 preempt_disable();
46 rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_); 46 lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
47 lock = per_cpu_ptr(lg->lock, cpu); 47 lock = per_cpu_ptr(lg->lock, cpu);
48 arch_spin_lock(lock); 48 arch_spin_lock(lock);
49} 49}
@@ -53,7 +53,7 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
53{ 53{
54 arch_spinlock_t *lock; 54 arch_spinlock_t *lock;
55 55
56 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); 56 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
57 lock = per_cpu_ptr(lg->lock, cpu); 57 lock = per_cpu_ptr(lg->lock, cpu);
58 arch_spin_unlock(lock); 58 arch_spin_unlock(lock);
59 preempt_enable(); 59 preempt_enable();
@@ -65,7 +65,7 @@ void lg_global_lock(struct lglock *lg)
65 int i; 65 int i;
66 66
67 preempt_disable(); 67 preempt_disable();
68 rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_); 68 lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
69 for_each_possible_cpu(i) { 69 for_each_possible_cpu(i) {
70 arch_spinlock_t *lock; 70 arch_spinlock_t *lock;
71 lock = per_cpu_ptr(lg->lock, i); 71 lock = per_cpu_ptr(lg->lock, i);
@@ -78,7 +78,7 @@ void lg_global_unlock(struct lglock *lg)
78{ 78{
79 int i; 79 int i;
80 80
81 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_); 81 lock_release(&lg->lock_dep_map, 1, _RET_IP_);
82 for_each_possible_cpu(i) { 82 for_each_possible_cpu(i) {
83 arch_spinlock_t *lock; 83 arch_spinlock_t *lock;
84 lock = per_cpu_ptr(lg->lock, i); 84 lock = per_cpu_ptr(lg->lock, i);