diff options
Diffstat (limited to 'kernel/locking/lockdep.c')
-rw-r--r-- | kernel/locking/lockdep.c | 48 |
1 files changed, 20 insertions, 28 deletions
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c index 44c8d0d17170..e36e652d996f 100644 --- a/kernel/locking/lockdep.c +++ b/kernel/locking/lockdep.c | |||
@@ -1873,10 +1873,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1873 | struct held_lock *next, int distance, struct stack_trace *trace, | 1873 | struct held_lock *next, int distance, struct stack_trace *trace, |
1874 | int (*save)(struct stack_trace *trace)) | 1874 | int (*save)(struct stack_trace *trace)) |
1875 | { | 1875 | { |
1876 | struct lock_list *uninitialized_var(target_entry); | ||
1876 | struct lock_list *entry; | 1877 | struct lock_list *entry; |
1877 | int ret; | ||
1878 | struct lock_list this; | 1878 | struct lock_list this; |
1879 | struct lock_list *uninitialized_var(target_entry); | 1879 | int ret; |
1880 | 1880 | ||
1881 | /* | 1881 | /* |
1882 | * Prove that the new <prev> -> <next> dependency would not | 1882 | * Prove that the new <prev> -> <next> dependency would not |
@@ -1890,8 +1890,17 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1890 | this.class = hlock_class(next); | 1890 | this.class = hlock_class(next); |
1891 | this.parent = NULL; | 1891 | this.parent = NULL; |
1892 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); | 1892 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); |
1893 | if (unlikely(!ret)) | 1893 | if (unlikely(!ret)) { |
1894 | if (!trace->entries) { | ||
1895 | /* | ||
1896 | * If @save fails here, the printing might trigger | ||
1897 | * a WARN but because of the !nr_entries it should | ||
1898 | * not do bad things. | ||
1899 | */ | ||
1900 | save(trace); | ||
1901 | } | ||
1894 | return print_circular_bug(&this, target_entry, next, prev, trace); | 1902 | return print_circular_bug(&this, target_entry, next, prev, trace); |
1903 | } | ||
1895 | else if (unlikely(ret < 0)) | 1904 | else if (unlikely(ret < 0)) |
1896 | return print_bfs_bug(ret); | 1905 | return print_bfs_bug(ret); |
1897 | 1906 | ||
@@ -1938,7 +1947,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1938 | return print_bfs_bug(ret); | 1947 | return print_bfs_bug(ret); |
1939 | 1948 | ||
1940 | 1949 | ||
1941 | if (save && !save(trace)) | 1950 | if (!trace->entries && !save(trace)) |
1942 | return 0; | 1951 | return 0; |
1943 | 1952 | ||
1944 | /* | 1953 | /* |
@@ -1958,20 +1967,6 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1958 | if (!ret) | 1967 | if (!ret) |
1959 | return 0; | 1968 | return 0; |
1960 | 1969 | ||
1961 | /* | ||
1962 | * Debugging printouts: | ||
1963 | */ | ||
1964 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { | ||
1965 | graph_unlock(); | ||
1966 | printk("\n new dependency: "); | ||
1967 | print_lock_name(hlock_class(prev)); | ||
1968 | printk(KERN_CONT " => "); | ||
1969 | print_lock_name(hlock_class(next)); | ||
1970 | printk(KERN_CONT "\n"); | ||
1971 | dump_stack(); | ||
1972 | if (!graph_lock()) | ||
1973 | return 0; | ||
1974 | } | ||
1975 | return 2; | 1970 | return 2; |
1976 | } | 1971 | } |
1977 | 1972 | ||
@@ -1986,8 +1981,12 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1986 | { | 1981 | { |
1987 | int depth = curr->lockdep_depth; | 1982 | int depth = curr->lockdep_depth; |
1988 | struct held_lock *hlock; | 1983 | struct held_lock *hlock; |
1989 | struct stack_trace trace; | 1984 | struct stack_trace trace = { |
1990 | int (*save)(struct stack_trace *trace) = save_trace; | 1985 | .nr_entries = 0, |
1986 | .max_entries = 0, | ||
1987 | .entries = NULL, | ||
1988 | .skip = 0, | ||
1989 | }; | ||
1991 | 1990 | ||
1992 | /* | 1991 | /* |
1993 | * Debugging checks. | 1992 | * Debugging checks. |
@@ -2018,18 +2017,11 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
2018 | */ | 2017 | */ |
2019 | if (hlock->read != 2 && hlock->check) { | 2018 | if (hlock->read != 2 && hlock->check) { |
2020 | int ret = check_prev_add(curr, hlock, next, | 2019 | int ret = check_prev_add(curr, hlock, next, |
2021 | distance, &trace, save); | 2020 | distance, &trace, save_trace); |
2022 | if (!ret) | 2021 | if (!ret) |
2023 | return 0; | 2022 | return 0; |
2024 | 2023 | ||
2025 | /* | 2024 | /* |
2026 | * Stop saving stack_trace if save_trace() was | ||
2027 | * called at least once: | ||
2028 | */ | ||
2029 | if (save && ret == 2) | ||
2030 | save = NULL; | ||
2031 | |||
2032 | /* | ||
2033 | * Stop after the first non-trylock entry, | 2025 | * Stop after the first non-trylock entry, |
2034 | * as non-trylock entries have added their | 2026 | * as non-trylock entries have added their |
2035 | * own direct dependencies already, so this | 2027 | * own direct dependencies already, so this |