diff options
author | Ming Lei <tom.leiming@gmail.com> | 2009-07-16 09:44:29 -0400 |
---|---|---|
committer | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-07-24 04:49:52 -0400 |
commit | d7aaba140a09b7a2295aec061629c880f088de3d (patch) | |
tree | 2ea4cac57c25b52049bd5da6b85ef2667d6563b9 /kernel/lockdep.c | |
parent | db0002a32f31060ca900b533d93a074ddf7d5b61 (diff) |
lockdep: Implement find_usage_*wards by BFS
This patch uses BFS to implement find_usage_*wards(),which
was originally writen by DFS.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1246201486-7308-6-git-send-email-tom.leiming@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 180 |
1 files changed, 72 insertions, 108 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 5609d309d568..b3ade507c6ce 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -963,7 +963,7 @@ exit: | |||
963 | return ret; | 963 | return ret; |
964 | } | 964 | } |
965 | 965 | ||
966 | static inline int __bfs_forward(struct lock_list *src_entry, | 966 | static inline int __bfs_forwards(struct lock_list *src_entry, |
967 | void *data, | 967 | void *data, |
968 | int (*match)(struct lock_list *entry, void *data), | 968 | int (*match)(struct lock_list *entry, void *data), |
969 | struct lock_list **target_entry) | 969 | struct lock_list **target_entry) |
@@ -972,7 +972,7 @@ static inline int __bfs_forward(struct lock_list *src_entry, | |||
972 | 972 | ||
973 | } | 973 | } |
974 | 974 | ||
975 | static inline int __bfs_backward(struct lock_list *src_entry, | 975 | static inline int __bfs_backwards(struct lock_list *src_entry, |
976 | void *data, | 976 | void *data, |
977 | int (*match)(struct lock_list *entry, void *data), | 977 | int (*match)(struct lock_list *entry, void *data), |
978 | struct lock_list **target_entry) | 978 | struct lock_list **target_entry) |
@@ -1085,18 +1085,6 @@ static noinline int print_bfs_bug(int ret) | |||
1085 | return 0; | 1085 | return 0; |
1086 | } | 1086 | } |
1087 | 1087 | ||
1088 | #define RECURSION_LIMIT 40 | ||
1089 | |||
1090 | static int noinline print_infinite_recursion_bug(void) | ||
1091 | { | ||
1092 | if (!debug_locks_off_graph_unlock()) | ||
1093 | return 0; | ||
1094 | |||
1095 | WARN_ON(1); | ||
1096 | |||
1097 | return 0; | ||
1098 | } | ||
1099 | |||
1100 | unsigned long __lockdep_count_forward_deps(struct lock_class *class, | 1088 | unsigned long __lockdep_count_forward_deps(struct lock_class *class, |
1101 | unsigned int depth) | 1089 | unsigned int depth) |
1102 | { | 1090 | { |
@@ -1170,7 +1158,7 @@ check_noncircular(struct lock_list *root, struct lock_class *target, | |||
1170 | 1158 | ||
1171 | debug_atomic_inc(&nr_cyclic_checks); | 1159 | debug_atomic_inc(&nr_cyclic_checks); |
1172 | 1160 | ||
1173 | result = __bfs_forward(root, target, class_equal, target_entry); | 1161 | result = __bfs_forwards(root, target, class_equal, target_entry); |
1174 | 1162 | ||
1175 | return result; | 1163 | return result; |
1176 | } | 1164 | } |
@@ -1181,101 +1169,70 @@ check_noncircular(struct lock_list *root, struct lock_class *target, | |||
1181 | * proving that two subgraphs can be connected by a new dependency | 1169 | * proving that two subgraphs can be connected by a new dependency |
1182 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. | 1170 | * without creating any illegal irq-safe -> irq-unsafe lock dependency. |
1183 | */ | 1171 | */ |
1184 | static enum lock_usage_bit find_usage_bit; | ||
1185 | static struct lock_class *forwards_match, *backwards_match; | 1172 | static struct lock_class *forwards_match, *backwards_match; |
1186 | 1173 | ||
1174 | |||
1175 | #define BFS_PROCESS_RET(ret) do { \ | ||
1176 | if (ret < 0) \ | ||
1177 | return print_bfs_bug(ret); \ | ||
1178 | if (ret == 1) \ | ||
1179 | return 1; \ | ||
1180 | } while (0) | ||
1181 | |||
1182 | static inline int usage_match(struct lock_list *entry, void *bit) | ||
1183 | { | ||
1184 | return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); | ||
1185 | } | ||
1186 | |||
1187 | |||
1188 | |||
1187 | /* | 1189 | /* |
1188 | * Find a node in the forwards-direction dependency sub-graph starting | 1190 | * Find a node in the forwards-direction dependency sub-graph starting |
1189 | * at <source> that matches <find_usage_bit>. | 1191 | * at @root->class that matches @bit. |
1190 | * | 1192 | * |
1191 | * Return 2 if such a node exists in the subgraph, and put that node | 1193 | * Return 0 if such a node exists in the subgraph, and put that node |
1192 | * into <forwards_match>. | 1194 | * into *@target_entry. |
1193 | * | 1195 | * |
1194 | * Return 1 otherwise and keep <forwards_match> unchanged. | 1196 | * Return 1 otherwise and keep *@target_entry unchanged. |
1195 | * Return 0 on error. | 1197 | * Return <0 on error. |
1196 | */ | 1198 | */ |
1197 | static noinline int | 1199 | static int |
1198 | find_usage_forwards(struct lock_class *source, unsigned int depth) | 1200 | find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit, |
1201 | struct lock_list **target_entry) | ||
1199 | { | 1202 | { |
1200 | struct lock_list *entry; | 1203 | int result; |
1201 | int ret; | ||
1202 | |||
1203 | if (lockdep_dependency_visit(source, depth)) | ||
1204 | return 1; | ||
1205 | |||
1206 | if (depth > max_recursion_depth) | ||
1207 | max_recursion_depth = depth; | ||
1208 | if (depth >= RECURSION_LIMIT) | ||
1209 | return print_infinite_recursion_bug(); | ||
1210 | 1204 | ||
1211 | debug_atomic_inc(&nr_find_usage_forwards_checks); | 1205 | debug_atomic_inc(&nr_find_usage_forwards_checks); |
1212 | if (source->usage_mask & (1 << find_usage_bit)) { | ||
1213 | forwards_match = source; | ||
1214 | return 2; | ||
1215 | } | ||
1216 | 1206 | ||
1217 | /* | 1207 | result = __bfs_forwards(root, (void *)bit, usage_match, target_entry); |
1218 | * Check this lock's dependency list: | 1208 | |
1219 | */ | 1209 | return result; |
1220 | list_for_each_entry(entry, &source->locks_after, entry) { | ||
1221 | debug_atomic_inc(&nr_find_usage_forwards_recursions); | ||
1222 | ret = find_usage_forwards(entry->class, depth+1); | ||
1223 | if (ret == 2 || ret == 0) | ||
1224 | return ret; | ||
1225 | } | ||
1226 | return 1; | ||
1227 | } | 1210 | } |
1228 | 1211 | ||
1229 | /* | 1212 | /* |
1230 | * Find a node in the backwards-direction dependency sub-graph starting | 1213 | * Find a node in the backwards-direction dependency sub-graph starting |
1231 | * at <source> that matches <find_usage_bit>. | 1214 | * at @root->class that matches @bit. |
1232 | * | 1215 | * |
1233 | * Return 2 if such a node exists in the subgraph, and put that node | 1216 | * Return 0 if such a node exists in the subgraph, and put that node |
1234 | * into <backwards_match>. | 1217 | * into *@target_entry. |
1235 | * | 1218 | * |
1236 | * Return 1 otherwise and keep <backwards_match> unchanged. | 1219 | * Return 1 otherwise and keep *@target_entry unchanged. |
1237 | * Return 0 on error. | 1220 | * Return <0 on error. |
1238 | */ | 1221 | */ |
1239 | static noinline int | 1222 | static int |
1240 | find_usage_backwards(struct lock_class *source, unsigned int depth) | 1223 | find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit, |
1224 | struct lock_list **target_entry) | ||
1241 | { | 1225 | { |
1242 | struct lock_list *entry; | 1226 | int result; |
1243 | int ret; | ||
1244 | |||
1245 | if (lockdep_dependency_visit(source, depth)) | ||
1246 | return 1; | ||
1247 | |||
1248 | if (!__raw_spin_is_locked(&lockdep_lock)) | ||
1249 | return DEBUG_LOCKS_WARN_ON(1); | ||
1250 | |||
1251 | if (depth > max_recursion_depth) | ||
1252 | max_recursion_depth = depth; | ||
1253 | if (depth >= RECURSION_LIMIT) | ||
1254 | return print_infinite_recursion_bug(); | ||
1255 | 1227 | ||
1256 | debug_atomic_inc(&nr_find_usage_backwards_checks); | 1228 | debug_atomic_inc(&nr_find_usage_backwards_checks); |
1257 | if (source->usage_mask & (1 << find_usage_bit)) { | ||
1258 | backwards_match = source; | ||
1259 | return 2; | ||
1260 | } | ||
1261 | 1229 | ||
1262 | if (!source && debug_locks_off_graph_unlock()) { | 1230 | result = __bfs_backwards(root, (void *)bit, usage_match, target_entry); |
1263 | WARN_ON(1); | ||
1264 | return 0; | ||
1265 | } | ||
1266 | 1231 | ||
1267 | /* | 1232 | return result; |
1268 | * Check this lock's dependency list: | ||
1269 | */ | ||
1270 | list_for_each_entry(entry, &source->locks_before, entry) { | ||
1271 | debug_atomic_inc(&nr_find_usage_backwards_recursions); | ||
1272 | ret = find_usage_backwards(entry->class, depth+1); | ||
1273 | if (ret == 2 || ret == 0) | ||
1274 | return ret; | ||
1275 | } | ||
1276 | return 1; | ||
1277 | } | 1233 | } |
1278 | 1234 | ||
1235 | |||
1279 | static int | 1236 | static int |
1280 | print_bad_irq_dependency(struct task_struct *curr, | 1237 | print_bad_irq_dependency(struct task_struct *curr, |
1281 | struct held_lock *prev, | 1238 | struct held_lock *prev, |
@@ -1343,18 +1300,21 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
1343 | enum lock_usage_bit bit_forwards, const char *irqclass) | 1300 | enum lock_usage_bit bit_forwards, const char *irqclass) |
1344 | { | 1301 | { |
1345 | int ret; | 1302 | int ret; |
1303 | struct lock_list this; | ||
1304 | struct lock_list *uninitialized_var(target_entry); | ||
1305 | |||
1306 | this.parent = NULL; | ||
1307 | |||
1308 | this.class = hlock_class(prev); | ||
1309 | ret = find_usage_backwards(&this, bit_backwards, &target_entry); | ||
1310 | BFS_PROCESS_RET(ret); | ||
1311 | backwards_match = target_entry->class; | ||
1312 | |||
1313 | this.class = hlock_class(next); | ||
1314 | ret = find_usage_forwards(&this, bit_forwards, &target_entry); | ||
1315 | BFS_PROCESS_RET(ret); | ||
1316 | forwards_match = target_entry->class; | ||
1346 | 1317 | ||
1347 | find_usage_bit = bit_backwards; | ||
1348 | /* fills in <backwards_match> */ | ||
1349 | ret = find_usage_backwards(hlock_class(prev), 0); | ||
1350 | if (!ret || ret == 1) | ||
1351 | return ret; | ||
1352 | |||
1353 | find_usage_bit = bit_forwards; | ||
1354 | ret = find_usage_forwards(hlock_class(next), 0); | ||
1355 | if (!ret || ret == 1) | ||
1356 | return ret; | ||
1357 | /* ret == 2 */ | ||
1358 | return print_bad_irq_dependency(curr, prev, next, | 1318 | return print_bad_irq_dependency(curr, prev, next, |
1359 | bit_backwards, bit_forwards, irqclass); | 1319 | bit_backwards, bit_forwards, irqclass); |
1360 | } | 1320 | } |
@@ -2029,14 +1989,16 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, | |||
2029 | enum lock_usage_bit bit, const char *irqclass) | 1989 | enum lock_usage_bit bit, const char *irqclass) |
2030 | { | 1990 | { |
2031 | int ret; | 1991 | int ret; |
1992 | struct lock_list root; | ||
1993 | struct lock_list *uninitialized_var(target_entry); | ||
2032 | 1994 | ||
2033 | find_usage_bit = bit; | 1995 | root.parent = NULL; |
2034 | /* fills in <forwards_match> */ | 1996 | root.class = hlock_class(this); |
2035 | ret = find_usage_forwards(hlock_class(this), 0); | 1997 | ret = find_usage_forwards(&root, bit, &target_entry); |
2036 | if (!ret || ret == 1) | 1998 | BFS_PROCESS_RET(ret); |
2037 | return ret; | ||
2038 | 1999 | ||
2039 | return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); | 2000 | return print_irq_inversion_bug(curr, target_entry->class, |
2001 | this, 1, irqclass); | ||
2040 | } | 2002 | } |
2041 | 2003 | ||
2042 | /* | 2004 | /* |
@@ -2048,14 +2010,16 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
2048 | enum lock_usage_bit bit, const char *irqclass) | 2010 | enum lock_usage_bit bit, const char *irqclass) |
2049 | { | 2011 | { |
2050 | int ret; | 2012 | int ret; |
2013 | struct lock_list root; | ||
2014 | struct lock_list *uninitialized_var(target_entry); | ||
2051 | 2015 | ||
2052 | find_usage_bit = bit; | 2016 | root.parent = NULL; |
2053 | /* fills in <backwards_match> */ | 2017 | root.class = hlock_class(this); |
2054 | ret = find_usage_backwards(hlock_class(this), 0); | 2018 | ret = find_usage_backwards(&root, bit, &target_entry); |
2055 | if (!ret || ret == 1) | 2019 | BFS_PROCESS_RET(ret); |
2056 | return ret; | ||
2057 | 2020 | ||
2058 | return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); | 2021 | return print_irq_inversion_bug(curr, target_entry->class, |
2022 | this, 1, irqclass); | ||
2059 | } | 2023 | } |
2060 | 2024 | ||
2061 | void print_irqtrace_events(struct task_struct *curr) | 2025 | void print_irqtrace_events(struct task_struct *curr) |