aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDmitry Vyukov <dvyukov@google.com>2016-02-04 08:40:40 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-09 05:06:08 -0500
commit8a5fd56431fe1682e870bd6ab0c276e74befbeb9 (patch)
treebc2047bcdb5f7a29656deeaee113b8223b5246db
parent765bdb406de4b6132e349c5d4e077866536a9cc0 (diff)
locking/lockdep: Fix stack trace caching logic
check_prev_add() caches saved stack trace in static trace variable to avoid duplicate save_trace() calls in dependencies involving trylocks. But that caching logic contains a bug. We may not save trace on first iteration due to early return from check_prev_add(). Then on the second iteration when we actually need the trace we don't save it because we think that we've already saved it. Let check_prev_add() itself control when stack is saved. There is another bug. Trace variable is protected by graph lock. But we can temporary release graph lock during printing. Fix this by invalidating cached stack trace when we release graph lock. Signed-off-by: Dmitry Vyukov <dvyukov@google.com> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: glider@google.com Cc: kcc@google.com Cc: peter@hurleysoftware.com Cc: sasha.levin@oracle.com Link: http://lkml.kernel.org/r/1454593240-121647-1-git-send-email-dvyukov@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/lockdep.c16
1 files changed, 10 insertions, 6 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 60ace56618f6..c7710e4092ef 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1822,7 +1822,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1822 */ 1822 */
1823static int 1823static int
1824check_prev_add(struct task_struct *curr, struct held_lock *prev, 1824check_prev_add(struct task_struct *curr, struct held_lock *prev,
1825 struct held_lock *next, int distance, int trylock_loop) 1825 struct held_lock *next, int distance, int *stack_saved)
1826{ 1826{
1827 struct lock_list *entry; 1827 struct lock_list *entry;
1828 int ret; 1828 int ret;
@@ -1883,8 +1883,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1883 } 1883 }
1884 } 1884 }
1885 1885
1886 if (!trylock_loop && !save_trace(&trace)) 1886 if (!*stack_saved) {
1887 return 0; 1887 if (!save_trace(&trace))
1888 return 0;
1889 *stack_saved = 1;
1890 }
1888 1891
1889 /* 1892 /*
1890 * Ok, all validations passed, add the new lock 1893 * Ok, all validations passed, add the new lock
@@ -1907,6 +1910,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1907 * Debugging printouts: 1910 * Debugging printouts:
1908 */ 1911 */
1909 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { 1912 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1913 /* We drop graph lock, so another thread can overwrite trace. */
1914 *stack_saved = 0;
1910 graph_unlock(); 1915 graph_unlock();
1911 printk("\n new dependency: "); 1916 printk("\n new dependency: ");
1912 print_lock_name(hlock_class(prev)); 1917 print_lock_name(hlock_class(prev));
@@ -1929,7 +1934,7 @@ static int
1929check_prevs_add(struct task_struct *curr, struct held_lock *next) 1934check_prevs_add(struct task_struct *curr, struct held_lock *next)
1930{ 1935{
1931 int depth = curr->lockdep_depth; 1936 int depth = curr->lockdep_depth;
1932 int trylock_loop = 0; 1937 int stack_saved = 0;
1933 struct held_lock *hlock; 1938 struct held_lock *hlock;
1934 1939
1935 /* 1940 /*
@@ -1956,7 +1961,7 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1956 */ 1961 */
1957 if (hlock->read != 2 && hlock->check) { 1962 if (hlock->read != 2 && hlock->check) {
1958 if (!check_prev_add(curr, hlock, next, 1963 if (!check_prev_add(curr, hlock, next,
1959 distance, trylock_loop)) 1964 distance, &stack_saved))
1960 return 0; 1965 return 0;
1961 /* 1966 /*
1962 * Stop after the first non-trylock entry, 1967 * Stop after the first non-trylock entry,
@@ -1979,7 +1984,6 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1979 if (curr->held_locks[depth].irq_context != 1984 if (curr->held_locks[depth].irq_context !=
1980 curr->held_locks[depth-1].irq_context) 1985 curr->held_locks[depth-1].irq_context)
1981 break; 1986 break;
1982 trylock_loop = 1;
1983 } 1987 }
1984 return 1; 1988 return 1;
1985out_bug: 1989out_bug: