aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/locking/lockdep.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@kernel.org>2016-02-29 04:03:58 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-29 04:32:29 -0500
commit9e4e7554e755aaad8df0e26268787438735b4b76 (patch)
treedf7a17c7b998c32e9c5a5dbd99b391aa25e7aa9b /kernel/locking/lockdep.c
parent5f18ab5c6bdba735b31a1e7c2618f48eae6b1b5c (diff)
locking/lockdep: Detect chain_key collisions
Add detection for chain_key collision under CONFIG_DEBUG_LOCKDEP. When a collision is detected the problem is reported and all lock debugging is turned off. Tested using liblockdep and the added tests before and after applying the fix, confirming both that the code added for the detection correctly reports the problem and that the fix actually fixes it. Tested tweaking lockdep to generate false collisions and verified that the problem is reported and that lock debugging is turned off. Also tested with lockdep's test suite after applying the patch: [ 0.000000] Good, all 253 testcases passed! | Signed-off-by: Alfredo Alvarez Fernandez <alfredoalvarezernandez@gmail.com> Cc: Alfredo Alvarez Fernandez <alfredoalvarezfernandez@gmail.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: sasha.levin@oracle.com Link: http://lkml.kernel.org/r/1455864533-7536-4-git-send-email-alfredoalvarezernandez@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r--kernel/locking/lockdep.c59
1 files changed, 51 insertions, 8 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 6f446eb3c742..f894a2cd9b2a 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1982,6 +1982,53 @@ struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i)
1982} 1982}
1983 1983
1984/* 1984/*
1985 * Returns the index of the first held_lock of the current chain
1986 */
1987static inline int get_first_held_lock(struct task_struct *curr,
1988 struct held_lock *hlock)
1989{
1990 int i;
1991 struct held_lock *hlock_curr;
1992
1993 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
1994 hlock_curr = curr->held_locks + i;
1995 if (hlock_curr->irq_context != hlock->irq_context)
1996 break;
1997
1998 }
1999
2000 return ++i;
2001}
2002
2003/*
2004 * Checks whether the chain and the current held locks are consistent
2005 * in depth and also in content. If they are not it most likely means
2006 * that there was a collision during the calculation of the chain_key.
2007 * Returns: 0 not passed, 1 passed
2008 */
2009static int check_no_collision(struct task_struct *curr,
2010 struct held_lock *hlock,
2011 struct lock_chain *chain)
2012{
2013#ifdef CONFIG_DEBUG_LOCKDEP
2014 int i, j, id;
2015
2016 i = get_first_held_lock(curr, hlock);
2017
2018 if (DEBUG_LOCKS_WARN_ON(chain->depth != curr->lockdep_depth - (i - 1)))
2019 return 0;
2020
2021 for (j = 0; j < chain->depth - 1; j++, i++) {
2022 id = curr->held_locks[i].class_idx - 1;
2023
2024 if (DEBUG_LOCKS_WARN_ON(chain_hlocks[chain->base + j] != id))
2025 return 0;
2026 }
2027#endif
2028 return 1;
2029}
2030
2031/*
1985 * Look up a dependency chain. If the key is not present yet then 2032 * Look up a dependency chain. If the key is not present yet then
1986 * add it and return 1 - in this case the new dependency chain is 2033 * add it and return 1 - in this case the new dependency chain is
1987 * validated. If the key is already hashed, return 0. 2034 * validated. If the key is already hashed, return 0.
@@ -1994,7 +2041,6 @@ static inline int lookup_chain_cache(struct task_struct *curr,
1994 struct lock_class *class = hlock_class(hlock); 2041 struct lock_class *class = hlock_class(hlock);
1995 struct hlist_head *hash_head = chainhashentry(chain_key); 2042 struct hlist_head *hash_head = chainhashentry(chain_key);
1996 struct lock_chain *chain; 2043 struct lock_chain *chain;
1997 struct held_lock *hlock_curr;
1998 int i, j; 2044 int i, j;
1999 2045
2000 /* 2046 /*
@@ -2012,6 +2058,9 @@ static inline int lookup_chain_cache(struct task_struct *curr,
2012 if (chain->chain_key == chain_key) { 2058 if (chain->chain_key == chain_key) {
2013cache_hit: 2059cache_hit:
2014 debug_atomic_inc(chain_lookup_hits); 2060 debug_atomic_inc(chain_lookup_hits);
2061 if (!check_no_collision(curr, hlock, chain))
2062 return 0;
2063
2015 if (very_verbose(class)) 2064 if (very_verbose(class))
2016 printk("\nhash chain already cached, key: " 2065 printk("\nhash chain already cached, key: "
2017 "%016Lx tail class: [%p] %s\n", 2066 "%016Lx tail class: [%p] %s\n",
@@ -2049,13 +2098,7 @@ cache_hit:
2049 chain = lock_chains + nr_lock_chains++; 2098 chain = lock_chains + nr_lock_chains++;
2050 chain->chain_key = chain_key; 2099 chain->chain_key = chain_key;
2051 chain->irq_context = hlock->irq_context; 2100 chain->irq_context = hlock->irq_context;
2052 /* Find the first held_lock of current chain */ 2101 i = get_first_held_lock(curr, hlock);
2053 for (i = curr->lockdep_depth - 1; i >= 0; i--) {
2054 hlock_curr = curr->held_locks + i;
2055 if (hlock_curr->irq_context != hlock->irq_context)
2056 break;
2057 }
2058 i++;
2059 chain->depth = curr->lockdep_depth + 1 - i; 2102 chain->depth = curr->lockdep_depth + 1 - i;
2060 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { 2103 if (likely(nr_chain_hlocks + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) {
2061 chain->base = nr_chain_hlocks; 2104 chain->base = nr_chain_hlocks;