diff options
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 52 |
1 files changed, 28 insertions, 24 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index c9fefdb1a7db..b02032476dc2 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -140,13 +140,6 @@ void lockdep_on(void) | |||
140 | 140 | ||
141 | EXPORT_SYMBOL(lockdep_on); | 141 | EXPORT_SYMBOL(lockdep_on); |
142 | 142 | ||
143 | int lockdep_internal(void) | ||
144 | { | ||
145 | return current->lockdep_recursion != 0; | ||
146 | } | ||
147 | |||
148 | EXPORT_SYMBOL(lockdep_internal); | ||
149 | |||
150 | /* | 143 | /* |
151 | * Debugging switches: | 144 | * Debugging switches: |
152 | */ | 145 | */ |
@@ -228,17 +221,15 @@ static int save_trace(struct stack_trace *trace) | |||
228 | trace->skip = 3; | 221 | trace->skip = 3; |
229 | trace->all_contexts = 0; | 222 | trace->all_contexts = 0; |
230 | 223 | ||
231 | /* Make sure to not recurse in case the the unwinder needs to tak | ||
232 | e locks. */ | ||
233 | lockdep_off(); | ||
234 | save_stack_trace(trace, NULL); | 224 | save_stack_trace(trace, NULL); |
235 | lockdep_on(); | ||
236 | 225 | ||
237 | trace->max_entries = trace->nr_entries; | 226 | trace->max_entries = trace->nr_entries; |
238 | 227 | ||
239 | nr_stack_trace_entries += trace->nr_entries; | 228 | nr_stack_trace_entries += trace->nr_entries; |
240 | if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) | 229 | if (DEBUG_LOCKS_WARN_ON(nr_stack_trace_entries > MAX_STACK_TRACE_ENTRIES)) { |
230 | __raw_spin_unlock(&hash_lock); | ||
241 | return 0; | 231 | return 0; |
232 | } | ||
242 | 233 | ||
243 | if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { | 234 | if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { |
244 | __raw_spin_unlock(&hash_lock); | 235 | __raw_spin_unlock(&hash_lock); |
@@ -357,7 +348,7 @@ get_usage_chars(struct lock_class *class, char *c1, char *c2, char *c3, char *c4 | |||
357 | 348 | ||
358 | static void print_lock_name(struct lock_class *class) | 349 | static void print_lock_name(struct lock_class *class) |
359 | { | 350 | { |
360 | char str[128], c1, c2, c3, c4; | 351 | char str[KSYM_NAME_LEN + 1], c1, c2, c3, c4; |
361 | const char *name; | 352 | const char *name; |
362 | 353 | ||
363 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 354 | get_usage_chars(class, &c1, &c2, &c3, &c4); |
@@ -379,7 +370,7 @@ static void print_lock_name(struct lock_class *class) | |||
379 | static void print_lockdep_cache(struct lockdep_map *lock) | 370 | static void print_lockdep_cache(struct lockdep_map *lock) |
380 | { | 371 | { |
381 | const char *name; | 372 | const char *name; |
382 | char str[128]; | 373 | char str[KSYM_NAME_LEN + 1]; |
383 | 374 | ||
384 | name = lock->name; | 375 | name = lock->name; |
385 | if (!name) | 376 | if (!name) |
@@ -449,7 +440,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | |||
449 | print_lock_class_header(class, depth); | 440 | print_lock_class_header(class, depth); |
450 | 441 | ||
451 | list_for_each_entry(entry, &class->locks_after, entry) { | 442 | list_for_each_entry(entry, &class->locks_after, entry) { |
452 | DEBUG_LOCKS_WARN_ON(!entry->class); | 443 | if (DEBUG_LOCKS_WARN_ON(!entry->class)) |
444 | return; | ||
445 | |||
453 | print_lock_dependencies(entry->class, depth + 1); | 446 | print_lock_dependencies(entry->class, depth + 1); |
454 | 447 | ||
455 | printk("%*s ... acquired at:\n",depth,""); | 448 | printk("%*s ... acquired at:\n",depth,""); |
@@ -474,7 +467,8 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
474 | return 0; | 467 | return 0; |
475 | 468 | ||
476 | entry->class = this; | 469 | entry->class = this; |
477 | save_trace(&entry->trace); | 470 | if (!save_trace(&entry->trace)) |
471 | return 0; | ||
478 | 472 | ||
479 | /* | 473 | /* |
480 | * Since we never remove from the dependency list, the list can | 474 | * Since we never remove from the dependency list, the list can |
@@ -562,8 +556,12 @@ static noinline int print_circular_bug_tail(void) | |||
562 | if (debug_locks_silent) | 556 | if (debug_locks_silent) |
563 | return 0; | 557 | return 0; |
564 | 558 | ||
559 | /* hash_lock unlocked by the header */ | ||
560 | __raw_spin_lock(&hash_lock); | ||
565 | this.class = check_source->class; | 561 | this.class = check_source->class; |
566 | save_trace(&this.trace); | 562 | if (!save_trace(&this.trace)) |
563 | return 0; | ||
564 | __raw_spin_unlock(&hash_lock); | ||
567 | print_circular_bug_entry(&this, 0); | 565 | print_circular_bug_entry(&this, 0); |
568 | 566 | ||
569 | printk("\nother info that might help us debug this:\n\n"); | 567 | printk("\nother info that might help us debug this:\n\n"); |
@@ -966,14 +964,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
966 | &prev->class->locks_after, next->acquire_ip); | 964 | &prev->class->locks_after, next->acquire_ip); |
967 | if (!ret) | 965 | if (!ret) |
968 | return 0; | 966 | return 0; |
969 | /* | 967 | |
970 | * Return value of 2 signals 'dependency already added', | ||
971 | * in that case we dont have to add the backlink either. | ||
972 | */ | ||
973 | if (ret == 2) | ||
974 | return 2; | ||
975 | ret = add_lock_to_list(next->class, prev->class, | 968 | ret = add_lock_to_list(next->class, prev->class, |
976 | &next->class->locks_before, next->acquire_ip); | 969 | &next->class->locks_before, next->acquire_ip); |
970 | if (!ret) | ||
971 | return 0; | ||
977 | 972 | ||
978 | /* | 973 | /* |
979 | * Debugging printouts: | 974 | * Debugging printouts: |
@@ -1025,7 +1020,8 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next) | |||
1025 | * added: | 1020 | * added: |
1026 | */ | 1021 | */ |
1027 | if (hlock->read != 2) { | 1022 | if (hlock->read != 2) { |
1028 | check_prev_add(curr, hlock, next); | 1023 | if (!check_prev_add(curr, hlock, next)) |
1024 | return 0; | ||
1029 | /* | 1025 | /* |
1030 | * Stop after the first non-trylock entry, | 1026 | * Stop after the first non-trylock entry, |
1031 | * as non-trylock entries have added their | 1027 | * as non-trylock entries have added their |
@@ -1182,6 +1178,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
1182 | struct lockdep_subclass_key *key; | 1178 | struct lockdep_subclass_key *key; |
1183 | struct list_head *hash_head; | 1179 | struct list_head *hash_head; |
1184 | struct lock_class *class; | 1180 | struct lock_class *class; |
1181 | unsigned long flags; | ||
1185 | 1182 | ||
1186 | class = look_up_lock_class(lock, subclass); | 1183 | class = look_up_lock_class(lock, subclass); |
1187 | if (likely(class)) | 1184 | if (likely(class)) |
@@ -1203,6 +1200,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
1203 | key = lock->key->subkeys + subclass; | 1200 | key = lock->key->subkeys + subclass; |
1204 | hash_head = classhashentry(key); | 1201 | hash_head = classhashentry(key); |
1205 | 1202 | ||
1203 | raw_local_irq_save(flags); | ||
1206 | __raw_spin_lock(&hash_lock); | 1204 | __raw_spin_lock(&hash_lock); |
1207 | /* | 1205 | /* |
1208 | * We have to do the hash-walk again, to avoid races | 1206 | * We have to do the hash-walk again, to avoid races |
@@ -1217,6 +1215,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
1217 | */ | 1215 | */ |
1218 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { | 1216 | if (nr_lock_classes >= MAX_LOCKDEP_KEYS) { |
1219 | __raw_spin_unlock(&hash_lock); | 1217 | __raw_spin_unlock(&hash_lock); |
1218 | raw_local_irq_restore(flags); | ||
1220 | debug_locks_off(); | 1219 | debug_locks_off(); |
1221 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); | 1220 | printk("BUG: MAX_LOCKDEP_KEYS too low!\n"); |
1222 | printk("turning off the locking correctness validator.\n"); | 1221 | printk("turning off the locking correctness validator.\n"); |
@@ -1239,15 +1238,18 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) | |||
1239 | 1238 | ||
1240 | if (verbose(class)) { | 1239 | if (verbose(class)) { |
1241 | __raw_spin_unlock(&hash_lock); | 1240 | __raw_spin_unlock(&hash_lock); |
1241 | raw_local_irq_restore(flags); | ||
1242 | printk("\nnew class %p: %s", class->key, class->name); | 1242 | printk("\nnew class %p: %s", class->key, class->name); |
1243 | if (class->name_version > 1) | 1243 | if (class->name_version > 1) |
1244 | printk("#%d", class->name_version); | 1244 | printk("#%d", class->name_version); |
1245 | printk("\n"); | 1245 | printk("\n"); |
1246 | dump_stack(); | 1246 | dump_stack(); |
1247 | raw_local_irq_save(flags); | ||
1247 | __raw_spin_lock(&hash_lock); | 1248 | __raw_spin_lock(&hash_lock); |
1248 | } | 1249 | } |
1249 | out_unlock_set: | 1250 | out_unlock_set: |
1250 | __raw_spin_unlock(&hash_lock); | 1251 | __raw_spin_unlock(&hash_lock); |
1252 | raw_local_irq_restore(flags); | ||
1251 | 1253 | ||
1252 | if (!subclass || force) | 1254 | if (!subclass || force) |
1253 | lock->class_cache = class; | 1255 | lock->class_cache = class; |
@@ -1728,6 +1730,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
1728 | debug_atomic_dec(&nr_unused_locks); | 1730 | debug_atomic_dec(&nr_unused_locks); |
1729 | break; | 1731 | break; |
1730 | default: | 1732 | default: |
1733 | __raw_spin_unlock(&hash_lock); | ||
1731 | debug_locks_off(); | 1734 | debug_locks_off(); |
1732 | WARN_ON(1); | 1735 | WARN_ON(1); |
1733 | return 0; | 1736 | return 0; |
@@ -2645,6 +2648,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | |||
2645 | } | 2648 | } |
2646 | local_irq_restore(flags); | 2649 | local_irq_restore(flags); |
2647 | } | 2650 | } |
2651 | EXPORT_SYMBOL_GPL(debug_check_no_locks_freed); | ||
2648 | 2652 | ||
2649 | static void print_held_locks_bug(struct task_struct *curr) | 2653 | static void print_held_locks_bug(struct task_struct *curr) |
2650 | { | 2654 | { |