aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorByungchul Park <byungchul.park@lge.com>2017-08-07 03:12:51 -0400
committerIngo Molnar <mingo@kernel.org>2017-08-10 06:29:06 -0400
commitce07a9415f266e181a0a33033a5f7138760240a4 (patch)
tree29cb6ed4f40bd89847c0884c2178766704a68fdc
parent70911fdc9576f4eeb3986689a1c9a778a4a4aacb (diff)
locking/lockdep: Make check_prev_add() able to handle external stack_trace
Currently, a space for stack_trace is pinned in check_prev_add(), that makes us not able to use external stack_trace. The simplest way to achieve it is to pass an external stack_trace as an argument. A more suitable solution is to pass a callback additionally along with a stack_trace so that callers can decide the way to save or whether to save. Actually crossrelease needs to do other than saving a stack_trace. So pass a stack_trace and callback to handle it, to check_prev_add(). Signed-off-by: Byungchul Park <byungchul.park@lge.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: akpm@linux-foundation.org Cc: boqun.feng@gmail.com Cc: kernel-team@lge.com Cc: kirill@shutemov.name Cc: npiggin@gmail.com Cc: walken@google.com Cc: willy@infradead.org Link: http://lkml.kernel.org/r/1502089981-21272-5-git-send-email-byungchul.park@lge.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/locking/lockdep.c40
1 files changed, 19 insertions, 21 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 7cf02fab1725..841828ba35b9 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1824,20 +1824,13 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
1824 */ 1824 */
1825static int 1825static int
1826check_prev_add(struct task_struct *curr, struct held_lock *prev, 1826check_prev_add(struct task_struct *curr, struct held_lock *prev,
1827 struct held_lock *next, int distance, int *stack_saved) 1827 struct held_lock *next, int distance, struct stack_trace *trace,
1828 int (*save)(struct stack_trace *trace))
1828{ 1829{
1829 struct lock_list *entry; 1830 struct lock_list *entry;
1830 int ret; 1831 int ret;
1831 struct lock_list this; 1832 struct lock_list this;
1832 struct lock_list *uninitialized_var(target_entry); 1833 struct lock_list *uninitialized_var(target_entry);
1833 /*
1834 * Static variable, serialized by the graph_lock().
1835 *
1836 * We use this static variable to save the stack trace in case
1837 * we call into this function multiple times due to encountering
1838 * trylocks in the held lock stack.
1839 */
1840 static struct stack_trace trace;
1841 1834
1842 /* 1835 /*
1843 * Prove that the new <prev> -> <next> dependency would not 1836 * Prove that the new <prev> -> <next> dependency would not
@@ -1899,11 +1892,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1899 return print_bfs_bug(ret); 1892 return print_bfs_bug(ret);
1900 1893
1901 1894
1902 if (!*stack_saved) { 1895 if (save && !save(trace))
1903 if (!save_trace(&trace)) 1896 return 0;
1904 return 0;
1905 *stack_saved = 1;
1906 }
1907 1897
1908 /* 1898 /*
1909 * Ok, all validations passed, add the new lock 1899 * Ok, all validations passed, add the new lock
@@ -1911,14 +1901,14 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1911 */ 1901 */
1912 ret = add_lock_to_list(hlock_class(next), 1902 ret = add_lock_to_list(hlock_class(next),
1913 &hlock_class(prev)->locks_after, 1903 &hlock_class(prev)->locks_after,
1914 next->acquire_ip, distance, &trace); 1904 next->acquire_ip, distance, trace);
1915 1905
1916 if (!ret) 1906 if (!ret)
1917 return 0; 1907 return 0;
1918 1908
1919 ret = add_lock_to_list(hlock_class(prev), 1909 ret = add_lock_to_list(hlock_class(prev),
1920 &hlock_class(next)->locks_before, 1910 &hlock_class(next)->locks_before,
1921 next->acquire_ip, distance, &trace); 1911 next->acquire_ip, distance, trace);
1922 if (!ret) 1912 if (!ret)
1923 return 0; 1913 return 0;
1924 1914
@@ -1926,8 +1916,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1926 * Debugging printouts: 1916 * Debugging printouts:
1927 */ 1917 */
1928 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { 1918 if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
1929 /* We drop graph lock, so another thread can overwrite trace. */
1930 *stack_saved = 0;
1931 graph_unlock(); 1919 graph_unlock();
1932 printk("\n new dependency: "); 1920 printk("\n new dependency: ");
1933 print_lock_name(hlock_class(prev)); 1921 print_lock_name(hlock_class(prev));
@@ -1951,8 +1939,9 @@ static int
1951check_prevs_add(struct task_struct *curr, struct held_lock *next) 1939check_prevs_add(struct task_struct *curr, struct held_lock *next)
1952{ 1940{
1953 int depth = curr->lockdep_depth; 1941 int depth = curr->lockdep_depth;
1954 int stack_saved = 0;
1955 struct held_lock *hlock; 1942 struct held_lock *hlock;
1943 struct stack_trace trace;
1944 int (*save)(struct stack_trace *trace) = save_trace;
1956 1945
1957 /* 1946 /*
1958 * Debugging checks. 1947 * Debugging checks.
@@ -1977,9 +1966,18 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1977 * added: 1966 * added:
1978 */ 1967 */
1979 if (hlock->read != 2 && hlock->check) { 1968 if (hlock->read != 2 && hlock->check) {
1980 if (!check_prev_add(curr, hlock, next, 1969 int ret = check_prev_add(curr, hlock, next,
1981 distance, &stack_saved)) 1970 distance, &trace, save);
1971 if (!ret)
1982 return 0; 1972 return 0;
1973
1974 /*
1975 * Stop saving stack_trace if save_trace() was
1976 * called at least once:
1977 */
1978 if (save && ret == 2)
1979 save = NULL;
1980
1983 /* 1981 /*
1984 * Stop after the first non-trylock entry, 1982 * Stop after the first non-trylock entry,
1985 * as non-trylock entries have added their 1983 * as non-trylock entries have added their