diff options
author | Ming Lei <tom.leiming@gmail.com> | 2009-07-16 09:44:29 -0400 |
---|---|---|
committer | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-07-24 04:49:50 -0400 |
commit | db0002a32f31060ca900b533d93a074ddf7d5b61 (patch) | |
tree | fb33cb30c852e6dc22ed66fd82d2daa1d5b5206e /kernel/lockdep.c | |
parent | 9e2d551ea0d767c0d624965f0c273e942f4be536 (diff) |
lockdep: Implement check_noncircular() by BFS
This patch uses BFS to implement check_noncircular() and
prints the generated shortest circle if exists.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-5-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 89 |
1 files changed, 37 insertions, 52 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index ce6d09e65ad1..5609d309d568 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -985,12 +985,7 @@ static inline int __bfs_backward(struct lock_list *src_entry, | |||
985 | * Recursive, forwards-direction lock-dependency checking, used for | 985 | * Recursive, forwards-direction lock-dependency checking, used for |
986 | * both noncyclic checking and for hardirq-unsafe/softirq-unsafe | 986 | * both noncyclic checking and for hardirq-unsafe/softirq-unsafe |
987 | * checking. | 987 | * checking. |
988 | * | ||
989 | * (to keep the stackframe of the recursive functions small we | ||
990 | * use these global variables, and we also mark various helper | ||
991 | * functions as noinline.) | ||
992 | */ | 988 | */ |
993 | static struct held_lock *check_source, *check_target; | ||
994 | 989 | ||
995 | /* | 990 | /* |
996 | * Print a dependency chain entry (this is only done when a deadlock | 991 | * Print a dependency chain entry (this is only done when a deadlock |
@@ -1014,7 +1009,9 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth) | |||
1014 | * header first: | 1009 | * header first: |
1015 | */ | 1010 | */ |
1016 | static noinline int | 1011 | static noinline int |
1017 | print_circular_bug_header(struct lock_list *entry, unsigned int depth) | 1012 | print_circular_bug_header(struct lock_list *entry, unsigned int depth, |
1013 | struct held_lock *check_src, | ||
1014 | struct held_lock *check_tgt) | ||
1018 | { | 1015 | { |
1019 | struct task_struct *curr = current; | 1016 | struct task_struct *curr = current; |
1020 | 1017 | ||
@@ -1027,9 +1024,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth) | |||
1027 | printk( "-------------------------------------------------------\n"); | 1024 | printk( "-------------------------------------------------------\n"); |
1028 | printk("%s/%d is trying to acquire lock:\n", | 1025 | printk("%s/%d is trying to acquire lock:\n", |
1029 | curr->comm, task_pid_nr(curr)); | 1026 | curr->comm, task_pid_nr(curr)); |
1030 | print_lock(check_source); | 1027 | print_lock(check_src); |
1031 | printk("\nbut task is already holding lock:\n"); | 1028 | printk("\nbut task is already holding lock:\n"); |
1032 | print_lock(check_target); | 1029 | print_lock(check_tgt); |
1033 | printk("\nwhich lock already depends on the new lock.\n\n"); | 1030 | printk("\nwhich lock already depends on the new lock.\n\n"); |
1034 | printk("\nthe existing dependency chain (in reverse order) is:\n"); | 1031 | printk("\nthe existing dependency chain (in reverse order) is:\n"); |
1035 | 1032 | ||
@@ -1043,36 +1040,24 @@ static inline int class_equal(struct lock_list *entry, void *data) | |||
1043 | return entry->class == data; | 1040 | return entry->class == data; |
1044 | } | 1041 | } |
1045 | 1042 | ||
1046 | static noinline int print_circular_bug(void) | 1043 | static noinline int print_circular_bug(struct lock_list *this, |
1044 | struct lock_list *target, | ||
1045 | struct held_lock *check_src, | ||
1046 | struct held_lock *check_tgt) | ||
1047 | { | 1047 | { |
1048 | struct task_struct *curr = current; | 1048 | struct task_struct *curr = current; |
1049 | struct lock_list this; | ||
1050 | struct lock_list *target; | ||
1051 | struct lock_list *parent; | 1049 | struct lock_list *parent; |
1052 | int result; | ||
1053 | unsigned long depth; | 1050 | unsigned long depth; |
1054 | 1051 | ||
1055 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 1052 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
1056 | return 0; | 1053 | return 0; |
1057 | 1054 | ||
1058 | this.class = hlock_class(check_source); | 1055 | if (!save_trace(&this->trace)) |
1059 | this.parent = NULL; | ||
1060 | if (!save_trace(&this.trace)) | ||
1061 | return 0; | 1056 | return 0; |
1062 | 1057 | ||
1063 | result = __bfs_forward(&this, | ||
1064 | hlock_class(check_target), | ||
1065 | class_equal, | ||
1066 | &target); | ||
1067 | if (result) { | ||
1068 | printk("\n%s:search shortest path failed:%d\n", __func__, | ||
1069 | result); | ||
1070 | return 0; | ||
1071 | } | ||
1072 | |||
1073 | depth = get_lock_depth(target); | 1058 | depth = get_lock_depth(target); |
1074 | 1059 | ||
1075 | print_circular_bug_header(target, depth); | 1060 | print_circular_bug_header(target, depth, check_src, check_tgt); |
1076 | 1061 | ||
1077 | parent = get_lock_parent(target); | 1062 | parent = get_lock_parent(target); |
1078 | 1063 | ||
@@ -1090,6 +1075,16 @@ static noinline int print_circular_bug(void) | |||
1090 | return 0; | 1075 | return 0; |
1091 | } | 1076 | } |
1092 | 1077 | ||
1078 | static noinline int print_bfs_bug(int ret) | ||
1079 | { | ||
1080 | if (!debug_locks_off_graph_unlock()) | ||
1081 | return 0; | ||
1082 | |||
1083 | WARN(1, "lockdep bfs error:%d\n", ret); | ||
1084 | |||
1085 | return 0; | ||
1086 | } | ||
1087 | |||
1093 | #define RECURSION_LIMIT 40 | 1088 | #define RECURSION_LIMIT 40 |
1094 | 1089 | ||
1095 | static int noinline print_infinite_recursion_bug(void) | 1090 | static int noinline print_infinite_recursion_bug(void) |
@@ -1168,31 +1163,17 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class) | |||
1168 | * lead to <target>. Print an error and return 0 if it does. | 1163 | * lead to <target>. Print an error and return 0 if it does. |
1169 | */ | 1164 | */ |
1170 | static noinline int | 1165 | static noinline int |
1171 | check_noncircular(struct lock_class *source, unsigned int depth) | 1166 | check_noncircular(struct lock_list *root, struct lock_class *target, |
1167 | struct lock_list **target_entry) | ||
1172 | { | 1168 | { |
1173 | struct lock_list *entry; | 1169 | int result; |
1174 | 1170 | ||
1175 | if (lockdep_dependency_visit(source, depth)) | 1171 | debug_atomic_inc(&nr_cyclic_checks); |
1176 | return 1; | ||
1177 | 1172 | ||
1178 | debug_atomic_inc(&nr_cyclic_check_recursions); | 1173 | result = __bfs_forward(root, target, class_equal, target_entry); |
1179 | if (depth > max_recursion_depth) | ||
1180 | max_recursion_depth = depth; | ||
1181 | if (depth >= RECURSION_LIMIT) | ||
1182 | return print_infinite_recursion_bug(); | ||
1183 | /* | ||
1184 | * Check this lock's dependency list: | ||
1185 | */ | ||
1186 | list_for_each_entry(entry, &source->locks_after, entry) { | ||
1187 | if (entry->class == hlock_class(check_target)) | ||
1188 | return 2; | ||
1189 | debug_atomic_inc(&nr_cyclic_checks); | ||
1190 | if (check_noncircular(entry->class, depth+1) == 2) | ||
1191 | return 2; | ||
1192 | } | ||
1193 | return 1; | ||
1194 | } | ||
1195 | 1174 | ||
1175 | return result; | ||
1176 | } | ||
1196 | 1177 | ||
1197 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) | 1178 | #if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) |
1198 | /* | 1179 | /* |
@@ -1586,6 +1567,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1586 | { | 1567 | { |
1587 | struct lock_list *entry; | 1568 | struct lock_list *entry; |
1588 | int ret; | 1569 | int ret; |
1570 | struct lock_list this; | ||
1571 | struct lock_list *uninitialized_var(target_entry); | ||
1589 | 1572 | ||
1590 | /* | 1573 | /* |
1591 | * Prove that the new <prev> -> <next> dependency would not | 1574 | * Prove that the new <prev> -> <next> dependency would not |
@@ -1596,11 +1579,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
1596 | * We are using global variables to control the recursion, to | 1579 | * We are using global variables to control the recursion, to |
1597 | * keep the stackframe size of the recursive functions low: | 1580 | * keep the stackframe size of the recursive functions low: |
1598 | */ | 1581 | */ |
1599 | check_source = next; | 1582 | this.class = hlock_class(next); |
1600 | check_target = prev; | 1583 | this.parent = NULL; |
1601 | 1584 | ret = check_noncircular(&this, hlock_class(prev), &target_entry); | |
1602 | if (check_noncircular(hlock_class(next), 0) == 2) | 1585 | if (unlikely(!ret)) |
1603 | return print_circular_bug(); | 1586 | return print_circular_bug(&this, target_entry, next, prev); |
1587 | else if (unlikely(ret < 0)) | ||
1588 | return print_bfs_bug(ret); | ||
1604 | 1589 | ||
1605 | if (!check_prev_add_irq(curr, prev, next)) | 1590 | if (!check_prev_add_irq(curr, prev, next)) |
1606 | return 0; | 1591 | return 0; |