diff options
-rw-r--r-- | kernel/lockdep.c | 23 |
1 files changed, 19 insertions, 4 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 509efd49540f..2d616f4d853c 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -70,6 +70,9 @@ static int graph_lock(void) | |||
70 | 70 | ||
71 | static inline int graph_unlock(void) | 71 | static inline int graph_unlock(void) |
72 | { | 72 | { |
73 | if (debug_locks && !__raw_spin_is_locked(&lockdep_lock)) | ||
74 | return DEBUG_LOCKS_WARN_ON(1); | ||
75 | |||
73 | __raw_spin_unlock(&lockdep_lock); | 76 | __raw_spin_unlock(&lockdep_lock); |
74 | return 0; | 77 | return 0; |
75 | } | 78 | } |
@@ -712,6 +715,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
712 | struct lock_list *entry; | 715 | struct lock_list *entry; |
713 | int ret; | 716 | int ret; |
714 | 717 | ||
718 | if (!__raw_spin_is_locked(&lockdep_lock)) | ||
719 | return DEBUG_LOCKS_WARN_ON(1); | ||
720 | |||
715 | if (depth > max_recursion_depth) | 721 | if (depth > max_recursion_depth) |
716 | max_recursion_depth = depth; | 722 | max_recursion_depth = depth; |
717 | if (depth >= RECURSION_LIMIT) | 723 | if (depth >= RECURSION_LIMIT) |
@@ -1293,7 +1299,8 @@ out_unlock_set: | |||
1293 | if (!subclass || force) | 1299 | if (!subclass || force) |
1294 | lock->class_cache = class; | 1300 | lock->class_cache = class; |
1295 | 1301 | ||
1296 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | 1302 | if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass)) |
1303 | return NULL; | ||
1297 | 1304 | ||
1298 | return class; | 1305 | return class; |
1299 | } | 1306 | } |
@@ -1308,7 +1315,8 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class) | |||
1308 | struct list_head *hash_head = chainhashentry(chain_key); | 1315 | struct list_head *hash_head = chainhashentry(chain_key); |
1309 | struct lock_chain *chain; | 1316 | struct lock_chain *chain; |
1310 | 1317 | ||
1311 | DEBUG_LOCKS_WARN_ON(!irqs_disabled()); | 1318 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
1319 | return 0; | ||
1312 | /* | 1320 | /* |
1313 | * We can walk it lock-free, because entries only get added | 1321 | * We can walk it lock-free, because entries only get added |
1314 | * to the hash: | 1322 | * to the hash: |
@@ -1394,7 +1402,9 @@ static void check_chain_key(struct task_struct *curr) | |||
1394 | return; | 1402 | return; |
1395 | } | 1403 | } |
1396 | id = hlock->class - lock_classes; | 1404 | id = hlock->class - lock_classes; |
1397 | DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS); | 1405 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
1406 | return; | ||
1407 | |||
1398 | if (prev_hlock && (prev_hlock->irq_context != | 1408 | if (prev_hlock && (prev_hlock->irq_context != |
1399 | hlock->irq_context)) | 1409 | hlock->irq_context)) |
1400 | chain_key = 0; | 1410 | chain_key = 0; |
@@ -2205,7 +2215,11 @@ out_calc_hash: | |||
2205 | if (!check_prevs_add(curr, hlock)) | 2215 | if (!check_prevs_add(curr, hlock)) |
2206 | return 0; | 2216 | return 0; |
2207 | graph_unlock(); | 2217 | graph_unlock(); |
2208 | } | 2218 | } else |
2219 | /* after lookup_chain_cache(): */ | ||
2220 | if (unlikely(!debug_locks)) | ||
2221 | return 0; | ||
2222 | |||
2209 | curr->lockdep_depth++; | 2223 | curr->lockdep_depth++; |
2210 | check_chain_key(curr); | 2224 | check_chain_key(curr); |
2211 | if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { | 2225 | if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { |
@@ -2214,6 +2228,7 @@ out_calc_hash: | |||
2214 | printk("turning off the locking correctness validator.\n"); | 2228 | printk("turning off the locking correctness validator.\n"); |
2215 | return 0; | 2229 | return 0; |
2216 | } | 2230 | } |
2231 | |||
2217 | if (unlikely(curr->lockdep_depth > max_lockdep_depth)) | 2232 | if (unlikely(curr->lockdep_depth > max_lockdep_depth)) |
2218 | max_lockdep_depth = curr->lockdep_depth; | 2233 | max_lockdep_depth = curr->lockdep_depth; |
2219 | 2234 | ||