aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorAlfredo Alvarez Fernandez <alfredoalvarezfernandez@gmail.com>2016-02-10 18:33:32 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-29 04:32:29 -0500
commit5f18ab5c6bdba735b31a1e7c2618f48eae6b1b5c (patch)
treec9769ebfc3fb1f3aa11483b369db64e93bb0d3b5 /kernel/locking/lockdep.c
parent013e379a3094ff2898f8d33cfbff1573d471ee14 (diff)
locking/lockdep: Prevent chain_key collisions
The chain_key hashing macro iterate_chain_key(key1, key2) does not generate a new different value if both key1 and key2 are 0. In that case the generated value is again 0. This can lead to collisions which can result in lockdep not detecting deadlocks or circular dependencies. Avoid the problem by using class_idx (1-based) instead of class id (0-based) as an input for the hashing macro 'key2' in iterate_chain_key(key1, key2). The use of class id created collisions in cases like the following: 1.- Consider an initial state in which no class has been acquired yet. Under these circumstances an AA deadlock will not be detected by lockdep: lock [key1,key2]->new key (key1=old chain_key, key2=id) -------------------------- A [0,0]->0 A [0,0]->0 (collision) The newly generated chain_key collides with the one used before and as a result the check for a deadlock is skipped A simple test using liblockdep and a pthread mutex confirms the problem: (omitting stack traces) new class 0xe15038: 0x7ffc64950f20 acquire class [0xe15038] 0x7ffc64950f20 acquire class [0xe15038] 0x7ffc64950f20 hash chain already cached, key: 0000000000000000 tail class: [0xe15038] 0x7ffc64950f20 2.- Consider an ABBA in 2 different tasks and no class yet acquired. T1 [key1,key2]->new key T2[key1,key2]->new key -- -- A [0,0]->0 B [0,1]->1 B [0,1]->1 (collision) A In this case the collision prevents lockdep from creating the new dependency A->B. This in turn results in lockdep not detecting the circular dependency when T2 acquires A. Signed-off-by: Alfredo Alvarez Fernandez <alfredoalvarezernandez@gmail.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: sasha.levin@oracle.com Link: http://lkml.kernel.org/r/1455147212-2389-4-git-send-email-alfredoalvarezernandez@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c14
1 files changed, 6 insertions, 8 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 3261214323fa..6f446eb3c742 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2143,7 +2143,7 @@ static void check_chain_key(struct task_struct *curr)
2143{ 2143{
2144#ifdef CONFIG_DEBUG_LOCKDEP 2144#ifdef CONFIG_DEBUG_LOCKDEP
2145 struct held_lock *hlock, *prev_hlock = NULL; 2145 struct held_lock *hlock, *prev_hlock = NULL;
2146 unsigned int i, id; 2146 unsigned int i;
2147 u64 chain_key = 0; 2147 u64 chain_key = 0;
2148 2148
2149 for (i = 0; i < curr->lockdep_depth; i++) { 2149 for (i = 0; i < curr->lockdep_depth; i++) {
@@ -2160,17 +2160,16 @@ static void check_chain_key(struct task_struct *curr)
2160 (unsigned long long)hlock->prev_chain_key); 2160 (unsigned long long)hlock->prev_chain_key);
2161 return; 2161 return;
2162 } 2162 }
2163 id = hlock->class_idx - 1;
2164 /* 2163 /*
2165 * Whoops ran out of static storage again? 2164 * Whoops ran out of static storage again?
2166 */ 2165 */
2167 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 2166 if (DEBUG_LOCKS_WARN_ON(hlock->class_idx > MAX_LOCKDEP_KEYS))
2168 return; 2167 return;
2169 2168
2170 if (prev_hlock && (prev_hlock->irq_context != 2169 if (prev_hlock && (prev_hlock->irq_context !=
2171 hlock->irq_context)) 2170 hlock->irq_context))
2172 chain_key = 0; 2171 chain_key = 0;
2173 chain_key = iterate_chain_key(chain_key, id); 2172 chain_key = iterate_chain_key(chain_key, hlock->class_idx);
2174 prev_hlock = hlock; 2173 prev_hlock = hlock;
2175 } 2174 }
2176 if (chain_key != curr->curr_chain_key) { 2175 if (chain_key != curr->curr_chain_key) {
@@ -3048,7 +3047,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3048 struct task_struct *curr = current; 3047 struct task_struct *curr = current;
3049 struct lock_class *class = NULL; 3048 struct lock_class *class = NULL;
3050 struct held_lock *hlock; 3049 struct held_lock *hlock;
3051 unsigned int depth, id; 3050 unsigned int depth;
3052 int chain_head = 0; 3051 int chain_head = 0;
3053 int class_idx; 3052 int class_idx;
3054 u64 chain_key; 3053 u64 chain_key;
@@ -3151,11 +3150,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3151 * The 'key ID' is what is the most compact key value to drive 3150 * The 'key ID' is what is the most compact key value to drive
3152 * the hash, not class->key. 3151 * the hash, not class->key.
3153 */ 3152 */
3154 id = class - lock_classes;
3155 /* 3153 /*
3156 * Whoops, we did it again.. ran straight out of our static allocation. 3154 * Whoops, we did it again.. ran straight out of our static allocation.
3157 */ 3155 */
3158 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) 3156 if (DEBUG_LOCKS_WARN_ON(class_idx > MAX_LOCKDEP_KEYS))
3159 return 0; 3157 return 0;
3160 3158
3161 chain_key = curr->curr_chain_key; 3159 chain_key = curr->curr_chain_key;
@@ -3173,7 +3171,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
3173 chain_key = 0; 3171 chain_key = 0;
3174 chain_head = 1; 3172 chain_head = 1;
3175 } 3173 }
3176 chain_key = iterate_chain_key(chain_key, id); 3174 chain_key = iterate_chain_key(chain_key, class_idx);
3177 3175
3178 if (nest_lock && !__lock_is_held(nest_lock)) 3176 if (nest_lock && !__lock_is_held(nest_lock))
3179 return print_lock_nested_lock_not_held(curr, hlock, ip); 3177 return print_lock_nested_lock_not_held(curr, hlock, ip);