diff options
Diffstat (limited to 'kernel/lockdep.c')
| -rw-r--r-- | kernel/lockdep.c | 136 |
1 files changed, 69 insertions, 67 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index f32ca78c198d..9bad17884513 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -169,22 +169,17 @@ EXPORT_SYMBOL(lockdep_internal); | |||
| 169 | */ | 169 | */ |
| 170 | static int class_filter(struct lock_class *class) | 170 | static int class_filter(struct lock_class *class) |
| 171 | { | 171 | { |
| 172 | #if 0 | ||
| 173 | /* Example */ | ||
| 172 | if (class->name_version == 1 && | 174 | if (class->name_version == 1 && |
| 173 | !strcmp(class->name, "&rl->lock")) | 175 | !strcmp(class->name, "lockname")) |
| 174 | return 1; | 176 | return 1; |
| 175 | if (class->name_version == 1 && | 177 | if (class->name_version == 1 && |
| 176 | !strcmp(class->name, "&ni->mrec_lock")) | 178 | !strcmp(class->name, "&struct->lockfield")) |
| 177 | return 1; | 179 | return 1; |
| 178 | if (class->name_version == 1 && | 180 | #endif |
| 179 | !strcmp(class->name, "mft_ni_runlist_lock")) | 181 | /* Allow everything else. 0 would be filter everything else */ |
| 180 | return 1; | 182 | return 1; |
| 181 | if (class->name_version == 1 && | ||
| 182 | !strcmp(class->name, "mft_ni_mrec_lock")) | ||
| 183 | return 1; | ||
| 184 | if (class->name_version == 1 && | ||
| 185 | !strcmp(class->name, "&vol->lcnbmp_lock")) | ||
| 186 | return 1; | ||
| 187 | return 0; | ||
| 188 | } | 183 | } |
| 189 | #endif | 184 | #endif |
| 190 | 185 | ||
| @@ -408,23 +403,12 @@ static void lockdep_print_held_locks(struct task_struct *curr) | |||
| 408 | print_lock(curr->held_locks + i); | 403 | print_lock(curr->held_locks + i); |
| 409 | } | 404 | } |
| 410 | } | 405 | } |
| 411 | /* | ||
| 412 | * Helper to print a nice hierarchy of lock dependencies: | ||
| 413 | */ | ||
| 414 | static void print_spaces(int nr) | ||
| 415 | { | ||
| 416 | int i; | ||
| 417 | |||
| 418 | for (i = 0; i < nr; i++) | ||
| 419 | printk(" "); | ||
| 420 | } | ||
| 421 | 406 | ||
| 422 | static void print_lock_class_header(struct lock_class *class, int depth) | 407 | static void print_lock_class_header(struct lock_class *class, int depth) |
| 423 | { | 408 | { |
| 424 | int bit; | 409 | int bit; |
| 425 | 410 | ||
| 426 | print_spaces(depth); | 411 | printk("%*s->", depth, ""); |
| 427 | printk("->"); | ||
| 428 | print_lock_name(class); | 412 | print_lock_name(class); |
| 429 | printk(" ops: %lu", class->ops); | 413 | printk(" ops: %lu", class->ops); |
| 430 | printk(" {\n"); | 414 | printk(" {\n"); |
| @@ -433,17 +417,14 @@ static void print_lock_class_header(struct lock_class *class, int depth) | |||
| 433 | if (class->usage_mask & (1 << bit)) { | 417 | if (class->usage_mask & (1 << bit)) { |
| 434 | int len = depth; | 418 | int len = depth; |
| 435 | 419 | ||
| 436 | print_spaces(depth); | 420 | len += printk("%*s %s", depth, "", usage_str[bit]); |
| 437 | len += printk(" %s", usage_str[bit]); | ||
| 438 | len += printk(" at:\n"); | 421 | len += printk(" at:\n"); |
| 439 | print_stack_trace(class->usage_traces + bit, len); | 422 | print_stack_trace(class->usage_traces + bit, len); |
| 440 | } | 423 | } |
| 441 | } | 424 | } |
| 442 | print_spaces(depth); | 425 | printk("%*s }\n", depth, ""); |
| 443 | printk(" }\n"); | ||
| 444 | 426 | ||
| 445 | print_spaces(depth); | 427 | printk("%*s ... key at: ",depth,""); |
| 446 | printk(" ... key at: "); | ||
| 447 | print_ip_sym((unsigned long)class->key); | 428 | print_ip_sym((unsigned long)class->key); |
| 448 | } | 429 | } |
| 449 | 430 | ||
| @@ -463,8 +444,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | |||
| 463 | DEBUG_LOCKS_WARN_ON(!entry->class); | 444 | DEBUG_LOCKS_WARN_ON(!entry->class); |
| 464 | print_lock_dependencies(entry->class, depth + 1); | 445 | print_lock_dependencies(entry->class, depth + 1); |
| 465 | 446 | ||
| 466 | print_spaces(depth); | 447 | printk("%*s ... acquired at:\n",depth,""); |
| 467 | printk(" ... acquired at:\n"); | ||
| 468 | print_stack_trace(&entry->trace, 2); | 448 | print_stack_trace(&entry->trace, 2); |
| 469 | printk("\n"); | 449 | printk("\n"); |
| 470 | } | 450 | } |
| @@ -1124,7 +1104,7 @@ extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void); | |||
| 1124 | * itself, so actual lookup of the hash should be once per lock object. | 1104 | * itself, so actual lookup of the hash should be once per lock object. |
| 1125 | */ | 1105 | */ |
| 1126 | static inline struct lock_class * | 1106 | static inline struct lock_class * |
| 1127 | register_lock_class(struct lockdep_map *lock, unsigned int subclass) | 1107 | look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) |
| 1128 | { | 1108 | { |
| 1129 | struct lockdep_subclass_key *key; | 1109 | struct lockdep_subclass_key *key; |
| 1130 | struct list_head *hash_head; | 1110 | struct list_head *hash_head; |
| @@ -1168,7 +1148,26 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 1168 | */ | 1148 | */ |
| 1169 | list_for_each_entry(class, hash_head, hash_entry) | 1149 | list_for_each_entry(class, hash_head, hash_entry) |
| 1170 | if (class->key == key) | 1150 | if (class->key == key) |
| 1171 | goto out_set; | 1151 | return class; |
| 1152 | |||
| 1153 | return NULL; | ||
| 1154 | } | ||
| 1155 | |||
| 1156 | /* | ||
| 1157 | * Register a lock's class in the hash-table, if the class is not present | ||
| 1158 | * yet. Otherwise we look it up. We cache the result in the lock object | ||
| 1159 | * itself, so actual lookup of the hash should be once per lock object. | ||
| 1160 | */ | ||
| 1161 | static inline struct lock_class * | ||
| 1162 | register_lock_class(struct lockdep_map *lock, unsigned int subclass) | ||
| 1163 | { | ||
| 1164 | struct lockdep_subclass_key *key; | ||
| 1165 | struct list_head *hash_head; | ||
| 1166 | struct lock_class *class; | ||
| 1167 | |||
| 1168 | class = look_up_lock_class(lock, subclass); | ||
| 1169 | if (likely(class)) | ||
| 1170 | return class; | ||
| 1172 | 1171 | ||
| 1173 | /* | 1172 | /* |
| 1174 | * Debug-check: all keys must be persistent! | 1173 | * Debug-check: all keys must be persistent! |
| @@ -1183,6 +1182,9 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 1183 | return NULL; | 1182 | return NULL; |
| 1184 | } | 1183 | } |
| 1185 | 1184 | ||
| 1185 | key = lock->key->subkeys + subclass; | ||
| 1186 | hash_head = classhashentry(key); | ||
| 1187 | |||
| 1186 | __raw_spin_lock(&hash_lock); | 1188 | __raw_spin_lock(&hash_lock); |
| 1187 | /* | 1189 | /* |
| 1188 | * We have to do the hash-walk again, to avoid races | 1190 | * We have to do the hash-walk again, to avoid races |
| @@ -1229,8 +1231,8 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
| 1229 | out_unlock_set: | 1231 | out_unlock_set: |
| 1230 | __raw_spin_unlock(&hash_lock); | 1232 | __raw_spin_unlock(&hash_lock); |
| 1231 | 1233 | ||
| 1232 | out_set: | 1234 | if (!subclass) |
| 1233 | lock->class[subclass] = class; | 1235 | lock->class_cache = class; |
| 1234 | 1236 | ||
| 1235 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | 1237 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); |
| 1236 | 1238 | ||
| @@ -1934,7 +1936,7 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
| 1934 | } | 1936 | } |
| 1935 | lock->name = name; | 1937 | lock->name = name; |
| 1936 | lock->key = key; | 1938 | lock->key = key; |
| 1937 | memset(lock->class, 0, sizeof(lock->class[0])*MAX_LOCKDEP_SUBCLASSES); | 1939 | lock->class_cache = NULL; |
| 1938 | } | 1940 | } |
| 1939 | 1941 | ||
| 1940 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 1942 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
| @@ -1948,8 +1950,8 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 1948 | unsigned long ip) | 1950 | unsigned long ip) |
| 1949 | { | 1951 | { |
| 1950 | struct task_struct *curr = current; | 1952 | struct task_struct *curr = current; |
| 1953 | struct lock_class *class = NULL; | ||
| 1951 | struct held_lock *hlock; | 1954 | struct held_lock *hlock; |
| 1952 | struct lock_class *class; | ||
| 1953 | unsigned int depth, id; | 1955 | unsigned int depth, id; |
| 1954 | int chain_head = 0; | 1956 | int chain_head = 0; |
| 1955 | u64 chain_key; | 1957 | u64 chain_key; |
| @@ -1967,8 +1969,11 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 1967 | return 0; | 1969 | return 0; |
| 1968 | } | 1970 | } |
| 1969 | 1971 | ||
| 1970 | class = lock->class[subclass]; | 1972 | if (!subclass) |
| 1971 | /* not cached yet? */ | 1973 | class = lock->class_cache; |
| 1974 | /* | ||
| 1975 | * Not cached yet or subclass? | ||
| 1976 | */ | ||
| 1972 | if (unlikely(!class)) { | 1977 | if (unlikely(!class)) { |
| 1973 | class = register_lock_class(lock, subclass); | 1978 | class = register_lock_class(lock, subclass); |
| 1974 | if (!class) | 1979 | if (!class) |
| @@ -2469,48 +2474,44 @@ void lockdep_free_key_range(void *start, unsigned long size) | |||
| 2469 | 2474 | ||
| 2470 | void lockdep_reset_lock(struct lockdep_map *lock) | 2475 | void lockdep_reset_lock(struct lockdep_map *lock) |
| 2471 | { | 2476 | { |
| 2472 | struct lock_class *class, *next, *entry; | 2477 | struct lock_class *class, *next; |
| 2473 | struct list_head *head; | 2478 | struct list_head *head; |
| 2474 | unsigned long flags; | 2479 | unsigned long flags; |
| 2475 | int i, j; | 2480 | int i, j; |
| 2476 | 2481 | ||
| 2477 | raw_local_irq_save(flags); | 2482 | raw_local_irq_save(flags); |
| 2478 | __raw_spin_lock(&hash_lock); | ||
| 2479 | 2483 | ||
| 2480 | /* | 2484 | /* |
| 2481 | * Remove all classes this lock has: | 2485 | * Remove all classes this lock might have: |
| 2486 | */ | ||
| 2487 | for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { | ||
| 2488 | /* | ||
| 2489 | * If the class exists we look it up and zap it: | ||
| 2490 | */ | ||
| 2491 | class = look_up_lock_class(lock, j); | ||
| 2492 | if (class) | ||
| 2493 | zap_class(class); | ||
| 2494 | } | ||
| 2495 | /* | ||
| 2496 | * Debug check: in the end all mapped classes should | ||
| 2497 | * be gone. | ||
| 2482 | */ | 2498 | */ |
| 2499 | __raw_spin_lock(&hash_lock); | ||
| 2483 | for (i = 0; i < CLASSHASH_SIZE; i++) { | 2500 | for (i = 0; i < CLASSHASH_SIZE; i++) { |
| 2484 | head = classhash_table + i; | 2501 | head = classhash_table + i; |
| 2485 | if (list_empty(head)) | 2502 | if (list_empty(head)) |
| 2486 | continue; | 2503 | continue; |
| 2487 | list_for_each_entry_safe(class, next, head, hash_entry) { | 2504 | list_for_each_entry_safe(class, next, head, hash_entry) { |
| 2488 | for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { | 2505 | if (unlikely(class == lock->class_cache)) { |
| 2489 | entry = lock->class[j]; | 2506 | __raw_spin_unlock(&hash_lock); |
| 2490 | if (class == entry) { | 2507 | DEBUG_LOCKS_WARN_ON(1); |
| 2491 | zap_class(class); | 2508 | goto out_restore; |
| 2492 | lock->class[j] = NULL; | ||
| 2493 | break; | ||
| 2494 | } | ||
| 2495 | } | 2509 | } |
| 2496 | } | 2510 | } |
| 2497 | } | 2511 | } |
| 2498 | |||
| 2499 | /* | ||
| 2500 | * Debug check: in the end all mapped classes should | ||
| 2501 | * be gone. | ||
| 2502 | */ | ||
| 2503 | for (j = 0; j < MAX_LOCKDEP_SUBCLASSES; j++) { | ||
| 2504 | entry = lock->class[j]; | ||
| 2505 | if (!entry) | ||
| 2506 | continue; | ||
| 2507 | __raw_spin_unlock(&hash_lock); | ||
| 2508 | DEBUG_LOCKS_WARN_ON(1); | ||
| 2509 | raw_local_irq_restore(flags); | ||
| 2510 | return; | ||
| 2511 | } | ||
| 2512 | |||
| 2513 | __raw_spin_unlock(&hash_lock); | 2512 | __raw_spin_unlock(&hash_lock); |
| 2513 | |||
| 2514 | out_restore: | ||
| 2514 | raw_local_irq_restore(flags); | 2515 | raw_local_irq_restore(flags); |
| 2515 | } | 2516 | } |
| 2516 | 2517 | ||
| @@ -2571,7 +2572,7 @@ static inline int in_range(const void *start, const void *addr, const void *end) | |||
| 2571 | 2572 | ||
| 2572 | static void | 2573 | static void |
| 2573 | print_freed_lock_bug(struct task_struct *curr, const void *mem_from, | 2574 | print_freed_lock_bug(struct task_struct *curr, const void *mem_from, |
| 2574 | const void *mem_to) | 2575 | const void *mem_to, struct held_lock *hlock) |
| 2575 | { | 2576 | { |
| 2576 | if (!debug_locks_off()) | 2577 | if (!debug_locks_off()) |
| 2577 | return; | 2578 | return; |
| @@ -2583,6 +2584,7 @@ print_freed_lock_bug(struct task_struct *curr, const void *mem_from, | |||
| 2583 | printk( "-------------------------\n"); | 2584 | printk( "-------------------------\n"); |
| 2584 | printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", | 2585 | printk("%s/%d is freeing memory %p-%p, with a lock still held there!\n", |
| 2585 | curr->comm, curr->pid, mem_from, mem_to-1); | 2586 | curr->comm, curr->pid, mem_from, mem_to-1); |
| 2587 | print_lock(hlock); | ||
| 2586 | lockdep_print_held_locks(curr); | 2588 | lockdep_print_held_locks(curr); |
| 2587 | 2589 | ||
| 2588 | printk("\nstack backtrace:\n"); | 2590 | printk("\nstack backtrace:\n"); |
| @@ -2616,7 +2618,7 @@ void debug_check_no_locks_freed(const void *mem_from, unsigned long mem_len) | |||
| 2616 | !in_range(mem_from, lock_to, mem_to)) | 2618 | !in_range(mem_from, lock_to, mem_to)) |
| 2617 | continue; | 2619 | continue; |
| 2618 | 2620 | ||
| 2619 | print_freed_lock_bug(curr, mem_from, mem_to); | 2621 | print_freed_lock_bug(curr, mem_from, mem_to, hlock); |
| 2620 | break; | 2622 | break; |
| 2621 | } | 2623 | } |
| 2622 | local_irq_restore(flags); | 2624 | local_irq_restore(flags); |
