diff options
| author | Ming Lei <tom.leiming@gmail.com> | 2009-07-16 09:44:29 -0400 |
|---|---|---|
| committer | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-07-24 04:49:54 -0400 |
| commit | 24208ca76707581a097c01a73fd63781e73d3404 (patch) | |
| tree | 46d2e44f11dd0b3dda782d6e4467c700f6d03149 /kernel | |
| parent | d7aaba140a09b7a2295aec061629c880f088de3d (diff) | |
lockdep: Introduce print_shortest_lock_dependencies
Since the shortest lock dependencies' path may be obtained by BFS,
we print the shortest one by print_shortest_lock_dependencies().
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-7-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/lockdep.c | 93 | ||||
| -rw-r--r-- | kernel/lockdep_internals.h | 4 |
2 files changed, 69 insertions, 28 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index b3ade507c6ce..938dc500f1aa 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -576,6 +576,36 @@ static void print_lock_class_header(struct lock_class *class, int depth) | |||
| 576 | } | 576 | } |
| 577 | 577 | ||
| 578 | /* | 578 | /* |
| 579 | * printk the shortest lock dependencies from @start to @end in reverse order: | ||
| 580 | */ | ||
| 581 | static void __used | ||
| 582 | print_shortest_lock_dependencies(struct lock_list *leaf, | ||
| 583 | struct lock_list *root) | ||
| 584 | { | ||
| 585 | struct lock_list *entry = leaf; | ||
| 586 | int depth; | ||
| 587 | |||
| 588 | /*compute depth from generated tree by BFS*/ | ||
| 589 | depth = get_lock_depth(leaf); | ||
| 590 | |||
| 591 | do { | ||
| 592 | print_lock_class_header(entry->class, depth); | ||
| 593 | printk("%*s ... acquired at:\n", depth, ""); | ||
| 594 | print_stack_trace(&entry->trace, 2); | ||
| 595 | printk("\n"); | ||
| 596 | |||
| 597 | if (depth == 0 && (entry != root)) { | ||
| 598 | printk("lockdep:%s bad BFS generated tree\n", __func__); | ||
| 599 | break; | ||
| 600 | } | ||
| 601 | |||
| 602 | entry = get_lock_parent(entry); | ||
| 603 | depth--; | ||
| 604 | } while (entry && (depth >= 0)); | ||
| 605 | |||
| 606 | return; | ||
| 607 | } | ||
| 608 | /* | ||
| 579 | * printk all lock dependencies starting at <entry>: | 609 | * printk all lock dependencies starting at <entry>: |
| 580 | */ | 610 | */ |
| 581 | static void __used | 611 | static void __used |
| @@ -992,7 +1022,7 @@ static inline int __bfs_backwards(struct lock_list *src_entry, | |||
| 992 | * has been detected): | 1022 | * has been detected): |
| 993 | */ | 1023 | */ |
| 994 | static noinline int | 1024 | static noinline int |
| 995 | print_circular_bug_entry(struct lock_list *target, unsigned int depth) | 1025 | print_circular_bug_entry(struct lock_list *target, int depth) |
| 996 | { | 1026 | { |
| 997 | if (debug_locks_silent) | 1027 | if (debug_locks_silent) |
| 998 | return 0; | 1028 | return 0; |
| @@ -1047,7 +1077,7 @@ static noinline int print_circular_bug(struct lock_list *this, | |||
| 1047 | { | 1077 | { |
| 1048 | struct task_struct *curr = current; | 1078 | struct task_struct *curr = current; |
| 1049 | struct lock_list *parent; | 1079 | struct lock_list *parent; |
| 1050 | unsigned long depth; | 1080 | int depth; |
| 1051 | 1081 | ||
| 1052 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1082 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
| 1053 | return 0; | 1083 | return 0; |
| @@ -1169,7 +1199,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target, | |||
| 1169 | * proving that two subgraphs can be connected by a new dependency | 1199 | * proving that two subgraphs can be connected by a new dependency |
| 1170 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. | 1200 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. |
| 1171 | */ | 1201 | */ |
| 1172 | static struct lock_class *forwards_match, *backwards_match; | ||
| 1173 | 1202 | ||
| 1174 | 1203 | ||
| 1175 | #define BFS_PROCESS_RET(ret) do { \ | 1204 | #define BFS_PROCESS_RET(ret) do { \ |
| @@ -1235,6 +1264,10 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, | |||
| 1235 | 1264 | ||
| 1236 | static int | 1265 | static int |
| 1237 | print_bad_irq_dependency(struct task_struct *curr, | 1266 | print_bad_irq_dependency(struct task_struct *curr, |
| 1267 | struct lock_list *prev_root, | ||
| 1268 | struct lock_list *next_root, | ||
| 1269 | struct lock_list *backwards_entry, | ||
| 1270 | struct lock_list *forwards_entry, | ||
| 1238 | struct held_lock *prev, | 1271 | struct held_lock *prev, |
| 1239 | struct held_lock *next, | 1272 | struct held_lock *next, |
| 1240 | enum lock_usage_bit bit1, | 1273 | enum lock_usage_bit bit1, |
| @@ -1267,26 +1300,32 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
| 1267 | 1300 | ||
| 1268 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1301 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
| 1269 | irqclass); | 1302 | irqclass); |
| 1270 | print_lock_name(backwards_match); | 1303 | print_lock_name(backwards_entry->class); |
| 1271 | printk("\n... which became %s-irq-safe at:\n", irqclass); | 1304 | printk("\n... which became %s-irq-safe at:\n", irqclass); |
| 1272 | 1305 | ||
| 1273 | print_stack_trace(backwards_match->usage_traces + bit1, 1); | 1306 | print_stack_trace(backwards_entry->class->usage_traces + bit1, 1); |
| 1274 | 1307 | ||
| 1275 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); | 1308 | printk("\nto a %s-irq-unsafe lock:\n", irqclass); |
| 1276 | print_lock_name(forwards_match); | 1309 | print_lock_name(forwards_entry->class); |
| 1277 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); | 1310 | printk("\n... which became %s-irq-unsafe at:\n", irqclass); |
| 1278 | printk("..."); | 1311 | printk("..."); |
| 1279 | 1312 | ||
| 1280 | print_stack_trace(forwards_match->usage_traces + bit2, 1); | 1313 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
| 1281 | 1314 | ||
| 1282 | printk("\nother info that might help us debug this:\n\n"); | 1315 | printk("\nother info that might help us debug this:\n\n"); |
| 1283 | lockdep_print_held_locks(curr); | 1316 | lockdep_print_held_locks(curr); |
| 1284 | 1317 | ||
| 1285 | printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); | 1318 | printk("\nthe dependencies between %s-irq-safe lock", irqclass); |
| 1286 | print_lock_dependencies(backwards_match, 0); | 1319 | printk(" and the holding lock:\n"); |
| 1320 | if (!save_trace(&prev_root->trace)) | ||
| 1321 | return 0; | ||
| 1322 | print_shortest_lock_dependencies(backwards_entry, prev_root); | ||
| 1287 | 1323 | ||
| 1288 | printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); | 1324 | printk("\nthe dependencies between the lock to be acquired"); |
| 1289 | print_lock_dependencies(forwards_match, 0); | 1325 | printk(" and %s-irq-unsafe lock:\n", irqclass); |
| 1326 | if (!save_trace(&next_root->trace)) | ||
| 1327 | return 0; | ||
| 1328 | print_shortest_lock_dependencies(forwards_entry, next_root); | ||
| 1290 | 1329 | ||
| 1291 | printk("\nstack backtrace:\n"); | 1330 | printk("\nstack backtrace:\n"); |
| 1292 | dump_stack(); | 1331 | dump_stack(); |
| @@ -1300,22 +1339,24 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
| 1300 | enum lock_usage_bit bit_forwards, const char *irqclass) | 1339 | enum lock_usage_bit bit_forwards, const char *irqclass) |
| 1301 | { | 1340 | { |
| 1302 | int ret; | 1341 | int ret; |
| 1303 | struct lock_list this; | 1342 | struct lock_list this, that; |
| 1304 | struct lock_list *uninitialized_var(target_entry); | 1343 | struct lock_list *uninitialized_var(target_entry); |
| 1344 | struct lock_list *uninitialized_var(target_entry1); | ||
| 1305 | 1345 | ||
| 1306 | this.parent = NULL; | 1346 | this.parent = NULL; |
| 1307 | 1347 | ||
| 1308 | this.class = hlock_class(prev); | 1348 | this.class = hlock_class(prev); |
| 1309 | ret = find_usage_backwards(&this, bit_backwards, &target_entry); | 1349 | ret = find_usage_backwards(&this, bit_backwards, &target_entry); |
| 1310 | BFS_PROCESS_RET(ret); | 1350 | BFS_PROCESS_RET(ret); |
| 1311 | backwards_match = target_entry->class; | ||
| 1312 | 1351 | ||
| 1313 | this.class = hlock_class(next); | 1352 | that.parent = NULL; |
| 1314 | ret = find_usage_forwards(&this, bit_forwards, &target_entry); | 1353 | that.class = hlock_class(next); |
| 1354 | ret = find_usage_forwards(&that, bit_forwards, &target_entry1); | ||
| 1315 | BFS_PROCESS_RET(ret); | 1355 | BFS_PROCESS_RET(ret); |
| 1316 | forwards_match = target_entry->class; | ||
| 1317 | 1356 | ||
| 1318 | return print_bad_irq_dependency(curr, prev, next, | 1357 | return print_bad_irq_dependency(curr, &this, &that, |
| 1358 | target_entry, target_entry1, | ||
| 1359 | prev, next, | ||
| 1319 | bit_backwards, bit_forwards, irqclass); | 1360 | bit_backwards, bit_forwards, irqclass); |
| 1320 | } | 1361 | } |
| 1321 | 1362 | ||
| @@ -1944,7 +1985,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
| 1944 | * print irq inversion bug: | 1985 | * print irq inversion bug: |
| 1945 | */ | 1986 | */ |
| 1946 | static int | 1987 | static int |
| 1947 | print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | 1988 | print_irq_inversion_bug(struct task_struct *curr, |
| 1989 | struct lock_list *root, struct lock_list *other, | ||
| 1948 | struct held_lock *this, int forwards, | 1990 | struct held_lock *this, int forwards, |
| 1949 | const char *irqclass) | 1991 | const char *irqclass) |
| 1950 | { | 1992 | { |
| @@ -1962,17 +2004,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
| 1962 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); | 2004 | printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); |
| 1963 | else | 2005 | else |
| 1964 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); | 2006 | printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); |
| 1965 | print_lock_name(other); | 2007 | print_lock_name(other->class); |
| 1966 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 2008 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
| 1967 | 2009 | ||
| 1968 | printk("\nother info that might help us debug this:\n"); | 2010 | printk("\nother info that might help us debug this:\n"); |
| 1969 | lockdep_print_held_locks(curr); | 2011 | lockdep_print_held_locks(curr); |
| 1970 | 2012 | ||
| 1971 | printk("\nthe first lock's dependencies:\n"); | 2013 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |
| 1972 | print_lock_dependencies(hlock_class(this), 0); | 2014 | if (!save_trace(&root->trace)) |
| 1973 | 2015 | return 0; | |
| 1974 | printk("\nthe second lock's dependencies:\n"); | 2016 | print_shortest_lock_dependencies(other, root); |
| 1975 | print_lock_dependencies(other, 0); | ||
| 1976 | 2017 | ||
| 1977 | printk("\nstack backtrace:\n"); | 2018 | printk("\nstack backtrace:\n"); |
| 1978 | dump_stack(); | 2019 | dump_stack(); |
| @@ -1997,7 +2038,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, | |||
| 1997 | ret = find_usage_forwards(&root, bit, &target_entry); | 2038 | ret = find_usage_forwards(&root, bit, &target_entry); |
| 1998 | BFS_PROCESS_RET(ret); | 2039 | BFS_PROCESS_RET(ret); |
| 1999 | 2040 | ||
| 2000 | return print_irq_inversion_bug(curr, target_entry->class, | 2041 | return print_irq_inversion_bug(curr, &root, target_entry, |
| 2001 | this, 1, irqclass); | 2042 | this, 1, irqclass); |
| 2002 | } | 2043 | } |
| 2003 | 2044 | ||
| @@ -2018,7 +2059,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
| 2018 | ret = find_usage_backwards(&root, bit, &target_entry); | 2059 | ret = find_usage_backwards(&root, bit, &target_entry); |
| 2019 | BFS_PROCESS_RET(ret); | 2060 | BFS_PROCESS_RET(ret); |
| 2020 | 2061 | ||
| 2021 | return print_irq_inversion_bug(curr, target_entry->class, | 2062 | return print_irq_inversion_bug(curr, &root, target_entry, |
| 2022 | this, 1, irqclass); | 2063 | this, 1, irqclass); |
| 2023 | } | 2064 | } |
| 2024 | 2065 | ||
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index c2f6594966f3..b115aaa0bf35 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
| @@ -219,9 +219,9 @@ static inline struct lock_list *get_lock_parent(struct lock_list *child) | |||
| 219 | return child->parent; | 219 | return child->parent; |
| 220 | } | 220 | } |
| 221 | 221 | ||
| 222 | static inline unsigned long get_lock_depth(struct lock_list *child) | 222 | static inline int get_lock_depth(struct lock_list *child) |
| 223 | { | 223 | { |
| 224 | unsigned long depth = 0; | 224 | int depth = 0; |
| 225 | struct lock_list *parent; | 225 | struct lock_list *parent; |
| 226 | 226 | ||
| 227 | while ((parent = get_lock_parent(child))) { | 227 | while ((parent = get_lock_parent(child))) { |
