diff options
Diffstat (limited to 'kernel/lockdep.c')
| -rw-r--r-- | kernel/lockdep.c | 309 |
1 files changed, 245 insertions, 64 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d38a64362973..dbda475b13bd 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -124,6 +124,15 @@ static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; | |||
| 124 | unsigned long nr_lock_classes; | 124 | unsigned long nr_lock_classes; |
| 125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; | 125 | static struct lock_class lock_classes[MAX_LOCKDEP_KEYS]; |
| 126 | 126 | ||
| 127 | static inline struct lock_class *hlock_class(struct held_lock *hlock) | ||
| 128 | { | ||
| 129 | if (!hlock->class_idx) { | ||
| 130 | DEBUG_LOCKS_WARN_ON(1); | ||
| 131 | return NULL; | ||
| 132 | } | ||
| 133 | return lock_classes + hlock->class_idx - 1; | ||
| 134 | } | ||
| 135 | |||
| 127 | #ifdef CONFIG_LOCK_STAT | 136 | #ifdef CONFIG_LOCK_STAT |
| 128 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); | 137 | static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); |
| 129 | 138 | ||
| @@ -222,7 +231,7 @@ static void lock_release_holdtime(struct held_lock *hlock) | |||
| 222 | 231 | ||
| 223 | holdtime = sched_clock() - hlock->holdtime_stamp; | 232 | holdtime = sched_clock() - hlock->holdtime_stamp; |
| 224 | 233 | ||
| 225 | stats = get_lock_stats(hlock->class); | 234 | stats = get_lock_stats(hlock_class(hlock)); |
| 226 | if (hlock->read) | 235 | if (hlock->read) |
| 227 | lock_time_inc(&stats->read_holdtime, holdtime); | 236 | lock_time_inc(&stats->read_holdtime, holdtime); |
| 228 | else | 237 | else |
| @@ -372,6 +381,19 @@ unsigned int nr_process_chains; | |||
| 372 | unsigned int max_lockdep_depth; | 381 | unsigned int max_lockdep_depth; |
| 373 | unsigned int max_recursion_depth; | 382 | unsigned int max_recursion_depth; |
| 374 | 383 | ||
| 384 | static unsigned int lockdep_dependency_gen_id; | ||
| 385 | |||
| 386 | static bool lockdep_dependency_visit(struct lock_class *source, | ||
| 387 | unsigned int depth) | ||
| 388 | { | ||
| 389 | if (!depth) | ||
| 390 | lockdep_dependency_gen_id++; | ||
| 391 | if (source->dep_gen_id == lockdep_dependency_gen_id) | ||
| 392 | return true; | ||
| 393 | source->dep_gen_id = lockdep_dependency_gen_id; | ||
| 394 | return false; | ||
| 395 | } | ||
| 396 | |||
| 375 | #ifdef CONFIG_DEBUG_LOCKDEP | 397 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 376 | /* | 398 | /* |
| 377 | * We cannot printk in early bootup code. Not even early_printk() | 399 | * We cannot printk in early bootup code. Not even early_printk() |
| @@ -505,7 +527,7 @@ static void print_lockdep_cache(struct lockdep_map *lock) | |||
| 505 | 527 | ||
| 506 | static void print_lock(struct held_lock *hlock) | 528 | static void print_lock(struct held_lock *hlock) |
| 507 | { | 529 | { |
| 508 | print_lock_name(hlock->class); | 530 | print_lock_name(hlock_class(hlock)); |
| 509 | printk(", at: "); | 531 | printk(", at: "); |
| 510 | print_ip_sym(hlock->acquire_ip); | 532 | print_ip_sym(hlock->acquire_ip); |
| 511 | } | 533 | } |
| @@ -558,6 +580,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | |||
| 558 | { | 580 | { |
| 559 | struct lock_list *entry; | 581 | struct lock_list *entry; |
| 560 | 582 | ||
| 583 | if (lockdep_dependency_visit(class, depth)) | ||
| 584 | return; | ||
| 585 | |||
| 561 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | 586 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) |
| 562 | return; | 587 | return; |
| 563 | 588 | ||
| @@ -850,11 +875,11 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this, | |||
| 850 | if (!entry) | 875 | if (!entry) |
| 851 | return 0; | 876 | return 0; |
| 852 | 877 | ||
| 853 | entry->class = this; | ||
| 854 | entry->distance = distance; | ||
| 855 | if (!save_trace(&entry->trace)) | 878 | if (!save_trace(&entry->trace)) |
| 856 | return 0; | 879 | return 0; |
| 857 | 880 | ||
| 881 | entry->class = this; | ||
| 882 | entry->distance = distance; | ||
| 858 | /* | 883 | /* |
| 859 | * Since we never remove from the dependency list, the list can | 884 | * Since we never remove from the dependency list, the list can |
| 860 | * be walked lockless by other CPUs, it's only allocation | 885 | * be walked lockless by other CPUs, it's only allocation |
| @@ -932,7 +957,7 @@ static noinline int print_circular_bug_tail(void) | |||
| 932 | if (debug_locks_silent) | 957 | if (debug_locks_silent) |
| 933 | return 0; | 958 | return 0; |
| 934 | 959 | ||
| 935 | this.class = check_source->class; | 960 | this.class = hlock_class(check_source); |
| 936 | if (!save_trace(&this.trace)) | 961 | if (!save_trace(&this.trace)) |
| 937 | return 0; | 962 | return 0; |
| 938 | 963 | ||
| @@ -959,6 +984,67 @@ static int noinline print_infinite_recursion_bug(void) | |||
| 959 | return 0; | 984 | return 0; |
| 960 | } | 985 | } |
| 961 | 986 | ||
| 987 | unsigned long __lockdep_count_forward_deps(struct lock_class *class, | ||
| 988 | unsigned int depth) | ||
| 989 | { | ||
| 990 | struct lock_list *entry; | ||
| 991 | unsigned long ret = 1; | ||
| 992 | |||
| 993 | if (lockdep_dependency_visit(class, depth)) | ||
| 994 | return 0; | ||
| 995 | |||
| 996 | /* | ||
| 997 | * Recurse this class's dependency list: | ||
| 998 | */ | ||
| 999 | list_for_each_entry(entry, &class->locks_after, entry) | ||
| 1000 | ret += __lockdep_count_forward_deps(entry->class, depth + 1); | ||
| 1001 | |||
| 1002 | return ret; | ||
| 1003 | } | ||
| 1004 | |||
| 1005 | unsigned long lockdep_count_forward_deps(struct lock_class *class) | ||
| 1006 | { | ||
| 1007 | unsigned long ret, flags; | ||
| 1008 | |||
| 1009 | local_irq_save(flags); | ||
| 1010 | __raw_spin_lock(&lockdep_lock); | ||
| 1011 | ret = __lockdep_count_forward_deps(class, 0); | ||
| 1012 | __raw_spin_unlock(&lockdep_lock); | ||
| 1013 | local_irq_restore(flags); | ||
| 1014 | |||
| 1015 | return ret; | ||
| 1016 | } | ||
| 1017 | |||
| 1018 | unsigned long __lockdep_count_backward_deps(struct lock_class *class, | ||
| 1019 | unsigned int depth) | ||
| 1020 | { | ||
| 1021 | struct lock_list *entry; | ||
| 1022 | unsigned long ret = 1; | ||
| 1023 | |||
| 1024 | if (lockdep_dependency_visit(class, depth)) | ||
| 1025 | return 0; | ||
| 1026 | /* | ||
| 1027 | * Recurse this class's dependency list: | ||
| 1028 | */ | ||
| 1029 | list_for_each_entry(entry, &class->locks_before, entry) | ||
| 1030 | ret += __lockdep_count_backward_deps(entry->class, depth + 1); | ||
| 1031 | |||
| 1032 | return ret; | ||
| 1033 | } | ||
| 1034 | |||
| 1035 | unsigned long lockdep_count_backward_deps(struct lock_class *class) | ||
| 1036 | { | ||
| 1037 | unsigned long ret, flags; | ||
| 1038 | |||
| 1039 | local_irq_save(flags); | ||
| 1040 | __raw_spin_lock(&lockdep_lock); | ||
| 1041 | ret = __lockdep_count_backward_deps(class, 0); | ||
| 1042 | __raw_spin_unlock(&lockdep_lock); | ||
| 1043 | local_irq_restore(flags); | ||
| 1044 | |||
| 1045 | return ret; | ||
| 1046 | } | ||
| 1047 | |||
| 962 | /* | 1048 | /* |
| 963 | * Prove that the dependency graph starting at <entry> can not | 1049 | * Prove that the dependency graph starting at <entry> can not |
| 964 | * lead to <target>. Print an error and return 0 if it does. | 1050 | * lead to <target>. Print an error and return 0 if it does. |
| @@ -968,6 +1054,9 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
| 968 | { | 1054 | { |
| 969 | struct lock_list *entry; | 1055 | struct lock_list *entry; |
| 970 | 1056 | ||
| 1057 | if (lockdep_dependency_visit(source, depth)) | ||
| 1058 | return 1; | ||
| 1059 | |||
| 971 | debug_atomic_inc(&nr_cyclic_check_recursions); | 1060 | debug_atomic_inc(&nr_cyclic_check_recursions); |
| 972 | if (depth > max_recursion_depth) | 1061 | if (depth > max_recursion_depth) |
| 973 | max_recursion_depth = depth; | 1062 | max_recursion_depth = depth; |
| @@ -977,7 +1066,7 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
| 977 | * Check this lock's dependency list: | 1066 | * Check this lock's dependency list: |
| 978 | */ | 1067 | */ |
| 979 | list_for_each_entry(entry, &source->locks_after, entry) { | 1068 | list_for_each_entry(entry, &source->locks_after, entry) { |
| 980 | if (entry->class == check_target->class) | 1069 | if (entry->class == hlock_class(check_target)) |
| 981 | return print_circular_bug_header(entry, depth+1); | 1070 | return print_circular_bug_header(entry, depth+1); |
| 982 | debug_atomic_inc(&nr_cyclic_checks); | 1071 | debug_atomic_inc(&nr_cyclic_checks); |
| 983 | if (!check_noncircular(entry->class, depth+1)) | 1072 | if (!check_noncircular(entry->class, depth+1)) |
| @@ -1011,6 +1100,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) | |||
| 1011 | struct lock_list *entry; | 1100 | struct lock_list *entry; |
| 1012 | int ret; | 1101 | int ret; |
| 1013 | 1102 | ||
| 1103 | if (lockdep_dependency_visit(source, depth)) | ||
| 1104 | return 1; | ||
| 1105 | |||
| 1014 | if (depth > max_recursion_depth) | 1106 | if (depth > max_recursion_depth) |
| 1015 | max_recursion_depth = depth; | 1107 | max_recursion_depth = depth; |
| 1016 | if (depth >= RECURSION_LIMIT) | 1108 | if (depth >= RECURSION_LIMIT) |
| @@ -1050,6 +1142,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
| 1050 | struct lock_list *entry; | 1142 | struct lock_list *entry; |
| 1051 | int ret; | 1143 | int ret; |
| 1052 | 1144 | ||
| 1145 | if (lockdep_dependency_visit(source, depth)) | ||
| 1146 | return 1; | ||
| 1147 | |||
| 1053 | if (!__raw_spin_is_locked(&lockdep_lock)) | 1148 | if (!__raw_spin_is_locked(&lockdep_lock)) |
| 1054 | return DEBUG_LOCKS_WARN_ON(1); | 1149 | return DEBUG_LOCKS_WARN_ON(1); |
| 1055 | 1150 | ||
| @@ -1064,6 +1159,11 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
| 1064 | return 2; | 1159 | return 2; |
| 1065 | } | 1160 | } |
| 1066 | 1161 | ||
| 1162 | if (!source && debug_locks_off_graph_unlock()) { | ||
| 1163 | WARN_ON(1); | ||
| 1164 | return 0; | ||
| 1165 | } | ||
| 1166 | |||
| 1067 | /* | 1167 | /* |
| 1068 | * Check this lock's dependency list: | 1168 | * Check this lock's dependency list: |
| 1069 | */ | 1169 | */ |
| @@ -1103,9 +1203,9 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
| 1103 | printk("\nand this task is already holding:\n"); | 1203 | printk("\nand this task is already holding:\n"); |
| 1104 | print_lock(prev); | 1204 | print_lock(prev); |
| 1105 | printk("which would create a new lock dependency:\n"); | 1205 | printk("which would create a new lock dependency:\n"); |
| 1106 | print_lock_name(prev->class); | 1206 | print_lock_name(hlock_class(prev)); |
| 1107 | printk(" ->"); | 1207 | printk(" ->"); |
| 1108 | print_lock_name(next->class); | 1208 | print_lock_name(hlock_class(next)); |
| 1109 | printk("\n"); | 1209 | printk("\n"); |
| 1110 | 1210 | ||
| 1111 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", | 1211 | printk("\nbut this new dependency connects a %s-irq-safe lock:\n", |
| @@ -1146,12 +1246,12 @@ check_usage(struct task_struct *curr, struct held_lock *prev, | |||
| 1146 | 1246 | ||
| 1147 | find_usage_bit = bit_backwards; | 1247 | find_usage_bit = bit_backwards; |
| 1148 | /* fills in <backwards_match> */ | 1248 | /* fills in <backwards_match> */ |
| 1149 | ret = find_usage_backwards(prev->class, 0); | 1249 | ret = find_usage_backwards(hlock_class(prev), 0); |
| 1150 | if (!ret || ret == 1) | 1250 | if (!ret || ret == 1) |
| 1151 | return ret; | 1251 | return ret; |
| 1152 | 1252 | ||
| 1153 | find_usage_bit = bit_forwards; | 1253 | find_usage_bit = bit_forwards; |
| 1154 | ret = find_usage_forwards(next->class, 0); | 1254 | ret = find_usage_forwards(hlock_class(next), 0); |
| 1155 | if (!ret || ret == 1) | 1255 | if (!ret || ret == 1) |
| 1156 | return ret; | 1256 | return ret; |
| 1157 | /* ret == 2 */ | 1257 | /* ret == 2 */ |
| @@ -1272,18 +1372,32 @@ check_deadlock(struct task_struct *curr, struct held_lock *next, | |||
| 1272 | struct lockdep_map *next_instance, int read) | 1372 | struct lockdep_map *next_instance, int read) |
| 1273 | { | 1373 | { |
| 1274 | struct held_lock *prev; | 1374 | struct held_lock *prev; |
| 1375 | struct held_lock *nest = NULL; | ||
| 1275 | int i; | 1376 | int i; |
| 1276 | 1377 | ||
| 1277 | for (i = 0; i < curr->lockdep_depth; i++) { | 1378 | for (i = 0; i < curr->lockdep_depth; i++) { |
| 1278 | prev = curr->held_locks + i; | 1379 | prev = curr->held_locks + i; |
| 1279 | if (prev->class != next->class) | 1380 | |
| 1381 | if (prev->instance == next->nest_lock) | ||
| 1382 | nest = prev; | ||
| 1383 | |||
| 1384 | if (hlock_class(prev) != hlock_class(next)) | ||
| 1280 | continue; | 1385 | continue; |
| 1386 | |||
| 1281 | /* | 1387 | /* |
| 1282 | * Allow read-after-read recursion of the same | 1388 | * Allow read-after-read recursion of the same |
| 1283 | * lock class (i.e. read_lock(lock)+read_lock(lock)): | 1389 | * lock class (i.e. read_lock(lock)+read_lock(lock)): |
| 1284 | */ | 1390 | */ |
| 1285 | if ((read == 2) && prev->read) | 1391 | if ((read == 2) && prev->read) |
| 1286 | return 2; | 1392 | return 2; |
| 1393 | |||
| 1394 | /* | ||
| 1395 | * We're holding the nest_lock, which serializes this lock's | ||
| 1396 | * nesting behaviour. | ||
| 1397 | */ | ||
| 1398 | if (nest) | ||
| 1399 | return 2; | ||
| 1400 | |||
| 1287 | return print_deadlock_bug(curr, prev, next); | 1401 | return print_deadlock_bug(curr, prev, next); |
| 1288 | } | 1402 | } |
| 1289 | return 1; | 1403 | return 1; |
| @@ -1329,7 +1443,7 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 1329 | */ | 1443 | */ |
| 1330 | check_source = next; | 1444 | check_source = next; |
| 1331 | check_target = prev; | 1445 | check_target = prev; |
| 1332 | if (!(check_noncircular(next->class, 0))) | 1446 | if (!(check_noncircular(hlock_class(next), 0))) |
| 1333 | return print_circular_bug_tail(); | 1447 | return print_circular_bug_tail(); |
| 1334 | 1448 | ||
| 1335 | if (!check_prev_add_irq(curr, prev, next)) | 1449 | if (!check_prev_add_irq(curr, prev, next)) |
| @@ -1353,8 +1467,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 1353 | * chains - the second one will be new, but L1 already has | 1467 | * chains - the second one will be new, but L1 already has |
| 1354 | * L2 added to its dependency list, due to the first chain.) | 1468 | * L2 added to its dependency list, due to the first chain.) |
| 1355 | */ | 1469 | */ |
| 1356 | list_for_each_entry(entry, &prev->class->locks_after, entry) { | 1470 | list_for_each_entry(entry, &hlock_class(prev)->locks_after, entry) { |
| 1357 | if (entry->class == next->class) { | 1471 | if (entry->class == hlock_class(next)) { |
| 1358 | if (distance == 1) | 1472 | if (distance == 1) |
| 1359 | entry->distance = 1; | 1473 | entry->distance = 1; |
| 1360 | return 2; | 1474 | return 2; |
| @@ -1365,26 +1479,28 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev, | |||
| 1365 | * Ok, all validations passed, add the new lock | 1479 | * Ok, all validations passed, add the new lock |
| 1366 | * to the previous lock's dependency list: | 1480 | * to the previous lock's dependency list: |
| 1367 | */ | 1481 | */ |
| 1368 | ret = add_lock_to_list(prev->class, next->class, | 1482 | ret = add_lock_to_list(hlock_class(prev), hlock_class(next), |
| 1369 | &prev->class->locks_after, next->acquire_ip, distance); | 1483 | &hlock_class(prev)->locks_after, |
| 1484 | next->acquire_ip, distance); | ||
| 1370 | 1485 | ||
| 1371 | if (!ret) | 1486 | if (!ret) |
| 1372 | return 0; | 1487 | return 0; |
| 1373 | 1488 | ||
| 1374 | ret = add_lock_to_list(next->class, prev->class, | 1489 | ret = add_lock_to_list(hlock_class(next), hlock_class(prev), |
| 1375 | &next->class->locks_before, next->acquire_ip, distance); | 1490 | &hlock_class(next)->locks_before, |
| 1491 | next->acquire_ip, distance); | ||
| 1376 | if (!ret) | 1492 | if (!ret) |
| 1377 | return 0; | 1493 | return 0; |
| 1378 | 1494 | ||
| 1379 | /* | 1495 | /* |
| 1380 | * Debugging printouts: | 1496 | * Debugging printouts: |
| 1381 | */ | 1497 | */ |
| 1382 | if (verbose(prev->class) || verbose(next->class)) { | 1498 | if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) { |
| 1383 | graph_unlock(); | 1499 | graph_unlock(); |
| 1384 | printk("\n new dependency: "); | 1500 | printk("\n new dependency: "); |
| 1385 | print_lock_name(prev->class); | 1501 | print_lock_name(hlock_class(prev)); |
| 1386 | printk(" => "); | 1502 | printk(" => "); |
| 1387 | print_lock_name(next->class); | 1503 | print_lock_name(hlock_class(next)); |
| 1388 | printk("\n"); | 1504 | printk("\n"); |
| 1389 | dump_stack(); | 1505 | dump_stack(); |
| 1390 | return graph_lock(); | 1506 | return graph_lock(); |
| @@ -1481,7 +1597,7 @@ static inline int lookup_chain_cache(struct task_struct *curr, | |||
| 1481 | struct held_lock *hlock, | 1597 | struct held_lock *hlock, |
| 1482 | u64 chain_key) | 1598 | u64 chain_key) |
| 1483 | { | 1599 | { |
| 1484 | struct lock_class *class = hlock->class; | 1600 | struct lock_class *class = hlock_class(hlock); |
| 1485 | struct list_head *hash_head = chainhashentry(chain_key); | 1601 | struct list_head *hash_head = chainhashentry(chain_key); |
| 1486 | struct lock_chain *chain; | 1602 | struct lock_chain *chain; |
| 1487 | struct held_lock *hlock_curr, *hlock_next; | 1603 | struct held_lock *hlock_curr, *hlock_next; |
| @@ -1554,7 +1670,7 @@ cache_hit: | |||
| 1554 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { | 1670 | if (likely(cn + chain->depth <= MAX_LOCKDEP_CHAIN_HLOCKS)) { |
| 1555 | chain->base = cn; | 1671 | chain->base = cn; |
| 1556 | for (j = 0; j < chain->depth - 1; j++, i++) { | 1672 | for (j = 0; j < chain->depth - 1; j++, i++) { |
| 1557 | int lock_id = curr->held_locks[i].class - lock_classes; | 1673 | int lock_id = curr->held_locks[i].class_idx - 1; |
| 1558 | chain_hlocks[chain->base + j] = lock_id; | 1674 | chain_hlocks[chain->base + j] = lock_id; |
| 1559 | } | 1675 | } |
| 1560 | chain_hlocks[chain->base + j] = class - lock_classes; | 1676 | chain_hlocks[chain->base + j] = class - lock_classes; |
| @@ -1643,14 +1759,13 @@ static void check_chain_key(struct task_struct *curr) | |||
| 1643 | hlock = curr->held_locks + i; | 1759 | hlock = curr->held_locks + i; |
| 1644 | if (chain_key != hlock->prev_chain_key) { | 1760 | if (chain_key != hlock->prev_chain_key) { |
| 1645 | debug_locks_off(); | 1761 | debug_locks_off(); |
| 1646 | printk("hm#1, depth: %u [%u], %016Lx != %016Lx\n", | 1762 | WARN(1, "hm#1, depth: %u [%u], %016Lx != %016Lx\n", |
| 1647 | curr->lockdep_depth, i, | 1763 | curr->lockdep_depth, i, |
| 1648 | (unsigned long long)chain_key, | 1764 | (unsigned long long)chain_key, |
| 1649 | (unsigned long long)hlock->prev_chain_key); | 1765 | (unsigned long long)hlock->prev_chain_key); |
| 1650 | WARN_ON(1); | ||
| 1651 | return; | 1766 | return; |
| 1652 | } | 1767 | } |
| 1653 | id = hlock->class - lock_classes; | 1768 | id = hlock->class_idx - 1; |
| 1654 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) | 1769 | if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS)) |
| 1655 | return; | 1770 | return; |
| 1656 | 1771 | ||
| @@ -1662,11 +1777,10 @@ static void check_chain_key(struct task_struct *curr) | |||
| 1662 | } | 1777 | } |
| 1663 | if (chain_key != curr->curr_chain_key) { | 1778 | if (chain_key != curr->curr_chain_key) { |
| 1664 | debug_locks_off(); | 1779 | debug_locks_off(); |
| 1665 | printk("hm#2, depth: %u [%u], %016Lx != %016Lx\n", | 1780 | WARN(1, "hm#2, depth: %u [%u], %016Lx != %016Lx\n", |
| 1666 | curr->lockdep_depth, i, | 1781 | curr->lockdep_depth, i, |
| 1667 | (unsigned long long)chain_key, | 1782 | (unsigned long long)chain_key, |
| 1668 | (unsigned long long)curr->curr_chain_key); | 1783 | (unsigned long long)curr->curr_chain_key); |
| 1669 | WARN_ON(1); | ||
| 1670 | } | 1784 | } |
| 1671 | #endif | 1785 | #endif |
| 1672 | } | 1786 | } |
| @@ -1695,7 +1809,7 @@ print_usage_bug(struct task_struct *curr, struct held_lock *this, | |||
| 1695 | print_lock(this); | 1809 | print_lock(this); |
| 1696 | 1810 | ||
| 1697 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); | 1811 | printk("{%s} state was registered at:\n", usage_str[prev_bit]); |
| 1698 | print_stack_trace(this->class->usage_traces + prev_bit, 1); | 1812 | print_stack_trace(hlock_class(this)->usage_traces + prev_bit, 1); |
| 1699 | 1813 | ||
| 1700 | print_irqtrace_events(curr); | 1814 | print_irqtrace_events(curr); |
| 1701 | printk("\nother info that might help us debug this:\n"); | 1815 | printk("\nother info that might help us debug this:\n"); |
| @@ -1714,7 +1828,7 @@ static inline int | |||
| 1714 | valid_state(struct task_struct *curr, struct held_lock *this, | 1828 | valid_state(struct task_struct *curr, struct held_lock *this, |
| 1715 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) | 1829 | enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit) |
| 1716 | { | 1830 | { |
| 1717 | if (unlikely(this->class->usage_mask & (1 << bad_bit))) | 1831 | if (unlikely(hlock_class(this)->usage_mask & (1 << bad_bit))) |
| 1718 | return print_usage_bug(curr, this, bad_bit, new_bit); | 1832 | return print_usage_bug(curr, this, bad_bit, new_bit); |
| 1719 | return 1; | 1833 | return 1; |
| 1720 | } | 1834 | } |
| @@ -1753,7 +1867,7 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, | |||
| 1753 | lockdep_print_held_locks(curr); | 1867 | lockdep_print_held_locks(curr); |
| 1754 | 1868 | ||
| 1755 | printk("\nthe first lock's dependencies:\n"); | 1869 | printk("\nthe first lock's dependencies:\n"); |
| 1756 | print_lock_dependencies(this->class, 0); | 1870 | print_lock_dependencies(hlock_class(this), 0); |
| 1757 | 1871 | ||
| 1758 | printk("\nthe second lock's dependencies:\n"); | 1872 | printk("\nthe second lock's dependencies:\n"); |
| 1759 | print_lock_dependencies(other, 0); | 1873 | print_lock_dependencies(other, 0); |
| @@ -1776,7 +1890,7 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this, | |||
| 1776 | 1890 | ||
| 1777 | find_usage_bit = bit; | 1891 | find_usage_bit = bit; |
| 1778 | /* fills in <forwards_match> */ | 1892 | /* fills in <forwards_match> */ |
| 1779 | ret = find_usage_forwards(this->class, 0); | 1893 | ret = find_usage_forwards(hlock_class(this), 0); |
| 1780 | if (!ret || ret == 1) | 1894 | if (!ret || ret == 1) |
| 1781 | return ret; | 1895 | return ret; |
| 1782 | 1896 | ||
| @@ -1795,7 +1909,7 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this, | |||
| 1795 | 1909 | ||
| 1796 | find_usage_bit = bit; | 1910 | find_usage_bit = bit; |
| 1797 | /* fills in <backwards_match> */ | 1911 | /* fills in <backwards_match> */ |
| 1798 | ret = find_usage_backwards(this->class, 0); | 1912 | ret = find_usage_backwards(hlock_class(this), 0); |
| 1799 | if (!ret || ret == 1) | 1913 | if (!ret || ret == 1) |
| 1800 | return ret; | 1914 | return ret; |
| 1801 | 1915 | ||
| @@ -1861,7 +1975,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1861 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) | 1975 | LOCK_ENABLED_HARDIRQS_READ, "hard-read")) |
| 1862 | return 0; | 1976 | return 0; |
| 1863 | #endif | 1977 | #endif |
| 1864 | if (hardirq_verbose(this->class)) | 1978 | if (hardirq_verbose(hlock_class(this))) |
| 1865 | ret = 2; | 1979 | ret = 2; |
| 1866 | break; | 1980 | break; |
| 1867 | case LOCK_USED_IN_SOFTIRQ: | 1981 | case LOCK_USED_IN_SOFTIRQ: |
| @@ -1886,7 +2000,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1886 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) | 2000 | LOCK_ENABLED_SOFTIRQS_READ, "soft-read")) |
| 1887 | return 0; | 2001 | return 0; |
| 1888 | #endif | 2002 | #endif |
| 1889 | if (softirq_verbose(this->class)) | 2003 | if (softirq_verbose(hlock_class(this))) |
| 1890 | ret = 2; | 2004 | ret = 2; |
| 1891 | break; | 2005 | break; |
| 1892 | case LOCK_USED_IN_HARDIRQ_READ: | 2006 | case LOCK_USED_IN_HARDIRQ_READ: |
| @@ -1899,7 +2013,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1899 | if (!check_usage_forwards(curr, this, | 2013 | if (!check_usage_forwards(curr, this, |
| 1900 | LOCK_ENABLED_HARDIRQS, "hard")) | 2014 | LOCK_ENABLED_HARDIRQS, "hard")) |
| 1901 | return 0; | 2015 | return 0; |
| 1902 | if (hardirq_verbose(this->class)) | 2016 | if (hardirq_verbose(hlock_class(this))) |
| 1903 | ret = 2; | 2017 | ret = 2; |
| 1904 | break; | 2018 | break; |
| 1905 | case LOCK_USED_IN_SOFTIRQ_READ: | 2019 | case LOCK_USED_IN_SOFTIRQ_READ: |
| @@ -1912,7 +2026,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1912 | if (!check_usage_forwards(curr, this, | 2026 | if (!check_usage_forwards(curr, this, |
| 1913 | LOCK_ENABLED_SOFTIRQS, "soft")) | 2027 | LOCK_ENABLED_SOFTIRQS, "soft")) |
| 1914 | return 0; | 2028 | return 0; |
| 1915 | if (softirq_verbose(this->class)) | 2029 | if (softirq_verbose(hlock_class(this))) |
| 1916 | ret = 2; | 2030 | ret = 2; |
| 1917 | break; | 2031 | break; |
| 1918 | case LOCK_ENABLED_HARDIRQS: | 2032 | case LOCK_ENABLED_HARDIRQS: |
| @@ -1938,7 +2052,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1938 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) | 2052 | LOCK_USED_IN_HARDIRQ_READ, "hard-read")) |
| 1939 | return 0; | 2053 | return 0; |
| 1940 | #endif | 2054 | #endif |
| 1941 | if (hardirq_verbose(this->class)) | 2055 | if (hardirq_verbose(hlock_class(this))) |
| 1942 | ret = 2; | 2056 | ret = 2; |
| 1943 | break; | 2057 | break; |
| 1944 | case LOCK_ENABLED_SOFTIRQS: | 2058 | case LOCK_ENABLED_SOFTIRQS: |
| @@ -1964,7 +2078,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1964 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) | 2078 | LOCK_USED_IN_SOFTIRQ_READ, "soft-read")) |
| 1965 | return 0; | 2079 | return 0; |
| 1966 | #endif | 2080 | #endif |
| 1967 | if (softirq_verbose(this->class)) | 2081 | if (softirq_verbose(hlock_class(this))) |
| 1968 | ret = 2; | 2082 | ret = 2; |
| 1969 | break; | 2083 | break; |
| 1970 | case LOCK_ENABLED_HARDIRQS_READ: | 2084 | case LOCK_ENABLED_HARDIRQS_READ: |
| @@ -1979,7 +2093,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1979 | LOCK_USED_IN_HARDIRQ, "hard")) | 2093 | LOCK_USED_IN_HARDIRQ, "hard")) |
| 1980 | return 0; | 2094 | return 0; |
| 1981 | #endif | 2095 | #endif |
| 1982 | if (hardirq_verbose(this->class)) | 2096 | if (hardirq_verbose(hlock_class(this))) |
| 1983 | ret = 2; | 2097 | ret = 2; |
| 1984 | break; | 2098 | break; |
| 1985 | case LOCK_ENABLED_SOFTIRQS_READ: | 2099 | case LOCK_ENABLED_SOFTIRQS_READ: |
| @@ -1994,7 +2108,7 @@ static int mark_lock_irq(struct task_struct *curr, struct held_lock *this, | |||
| 1994 | LOCK_USED_IN_SOFTIRQ, "soft")) | 2108 | LOCK_USED_IN_SOFTIRQ, "soft")) |
| 1995 | return 0; | 2109 | return 0; |
| 1996 | #endif | 2110 | #endif |
| 1997 | if (softirq_verbose(this->class)) | 2111 | if (softirq_verbose(hlock_class(this))) |
| 1998 | ret = 2; | 2112 | ret = 2; |
| 1999 | break; | 2113 | break; |
| 2000 | default: | 2114 | default: |
| @@ -2310,7 +2424,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
| 2310 | * If already set then do not dirty the cacheline, | 2424 | * If already set then do not dirty the cacheline, |
| 2311 | * nor do any checks: | 2425 | * nor do any checks: |
| 2312 | */ | 2426 | */ |
| 2313 | if (likely(this->class->usage_mask & new_mask)) | 2427 | if (likely(hlock_class(this)->usage_mask & new_mask)) |
| 2314 | return 1; | 2428 | return 1; |
| 2315 | 2429 | ||
| 2316 | if (!graph_lock()) | 2430 | if (!graph_lock()) |
| @@ -2318,14 +2432,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this, | |||
| 2318 | /* | 2432 | /* |
| 2319 | * Make sure we didnt race: | 2433 | * Make sure we didnt race: |
| 2320 | */ | 2434 | */ |
| 2321 | if (unlikely(this->class->usage_mask & new_mask)) { | 2435 | if (unlikely(hlock_class(this)->usage_mask & new_mask)) { |
| 2322 | graph_unlock(); | 2436 | graph_unlock(); |
| 2323 | return 1; | 2437 | return 1; |
| 2324 | } | 2438 | } |
| 2325 | 2439 | ||
| 2326 | this->class->usage_mask |= new_mask; | 2440 | hlock_class(this)->usage_mask |= new_mask; |
| 2327 | 2441 | ||
| 2328 | if (!save_trace(this->class->usage_traces + new_bit)) | 2442 | if (!save_trace(hlock_class(this)->usage_traces + new_bit)) |
| 2329 | return 0; | 2443 | return 0; |
| 2330 | 2444 | ||
| 2331 | switch (new_bit) { | 2445 | switch (new_bit) { |
| @@ -2405,7 +2519,7 @@ EXPORT_SYMBOL_GPL(lockdep_init_map); | |||
| 2405 | */ | 2519 | */ |
| 2406 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2520 | static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
| 2407 | int trylock, int read, int check, int hardirqs_off, | 2521 | int trylock, int read, int check, int hardirqs_off, |
| 2408 | unsigned long ip) | 2522 | struct lockdep_map *nest_lock, unsigned long ip) |
| 2409 | { | 2523 | { |
| 2410 | struct task_struct *curr = current; | 2524 | struct task_struct *curr = current; |
| 2411 | struct lock_class *class = NULL; | 2525 | struct lock_class *class = NULL; |
| @@ -2459,14 +2573,16 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2459 | return 0; | 2573 | return 0; |
| 2460 | 2574 | ||
| 2461 | hlock = curr->held_locks + depth; | 2575 | hlock = curr->held_locks + depth; |
| 2462 | 2576 | if (DEBUG_LOCKS_WARN_ON(!class)) | |
| 2463 | hlock->class = class; | 2577 | return 0; |
| 2578 | hlock->class_idx = class - lock_classes + 1; | ||
| 2464 | hlock->acquire_ip = ip; | 2579 | hlock->acquire_ip = ip; |
| 2465 | hlock->instance = lock; | 2580 | hlock->instance = lock; |
| 2581 | hlock->nest_lock = nest_lock; | ||
| 2466 | hlock->trylock = trylock; | 2582 | hlock->trylock = trylock; |
| 2467 | hlock->read = read; | 2583 | hlock->read = read; |
| 2468 | hlock->check = check; | 2584 | hlock->check = check; |
| 2469 | hlock->hardirqs_off = hardirqs_off; | 2585 | hlock->hardirqs_off = !!hardirqs_off; |
| 2470 | #ifdef CONFIG_LOCK_STAT | 2586 | #ifdef CONFIG_LOCK_STAT |
| 2471 | hlock->waittime_stamp = 0; | 2587 | hlock->waittime_stamp = 0; |
| 2472 | hlock->holdtime_stamp = sched_clock(); | 2588 | hlock->holdtime_stamp = sched_clock(); |
| @@ -2574,6 +2690,55 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock, | |||
| 2574 | return 1; | 2690 | return 1; |
| 2575 | } | 2691 | } |
| 2576 | 2692 | ||
| 2693 | static int | ||
| 2694 | __lock_set_subclass(struct lockdep_map *lock, | ||
| 2695 | unsigned int subclass, unsigned long ip) | ||
| 2696 | { | ||
| 2697 | struct task_struct *curr = current; | ||
| 2698 | struct held_lock *hlock, *prev_hlock; | ||
| 2699 | struct lock_class *class; | ||
| 2700 | unsigned int depth; | ||
| 2701 | int i; | ||
| 2702 | |||
| 2703 | depth = curr->lockdep_depth; | ||
| 2704 | if (DEBUG_LOCKS_WARN_ON(!depth)) | ||
| 2705 | return 0; | ||
| 2706 | |||
| 2707 | prev_hlock = NULL; | ||
| 2708 | for (i = depth-1; i >= 0; i--) { | ||
| 2709 | hlock = curr->held_locks + i; | ||
| 2710 | /* | ||
| 2711 | * We must not cross into another context: | ||
| 2712 | */ | ||
| 2713 | if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) | ||
| 2714 | break; | ||
| 2715 | if (hlock->instance == lock) | ||
| 2716 | goto found_it; | ||
| 2717 | prev_hlock = hlock; | ||
| 2718 | } | ||
| 2719 | return print_unlock_inbalance_bug(curr, lock, ip); | ||
| 2720 | |||
| 2721 | found_it: | ||
| 2722 | class = register_lock_class(lock, subclass, 0); | ||
| 2723 | hlock->class_idx = class - lock_classes + 1; | ||
| 2724 | |||
| 2725 | curr->lockdep_depth = i; | ||
| 2726 | curr->curr_chain_key = hlock->prev_chain_key; | ||
| 2727 | |||
| 2728 | for (; i < depth; i++) { | ||
| 2729 | hlock = curr->held_locks + i; | ||
| 2730 | if (!__lock_acquire(hlock->instance, | ||
| 2731 | hlock_class(hlock)->subclass, hlock->trylock, | ||
| 2732 | hlock->read, hlock->check, hlock->hardirqs_off, | ||
| 2733 | hlock->nest_lock, hlock->acquire_ip)) | ||
| 2734 | return 0; | ||
| 2735 | } | ||
| 2736 | |||
| 2737 | if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) | ||
| 2738 | return 0; | ||
| 2739 | return 1; | ||
| 2740 | } | ||
| 2741 | |||
| 2577 | /* | 2742 | /* |
| 2578 | * Remove the lock to the list of currently held locks in a | 2743 | * Remove the lock to the list of currently held locks in a |
| 2579 | * potentially non-nested (out of order) manner. This is a | 2744 | * potentially non-nested (out of order) manner. This is a |
| @@ -2624,9 +2789,9 @@ found_it: | |||
| 2624 | for (i++; i < depth; i++) { | 2789 | for (i++; i < depth; i++) { |
| 2625 | hlock = curr->held_locks + i; | 2790 | hlock = curr->held_locks + i; |
| 2626 | if (!__lock_acquire(hlock->instance, | 2791 | if (!__lock_acquire(hlock->instance, |
| 2627 | hlock->class->subclass, hlock->trylock, | 2792 | hlock_class(hlock)->subclass, hlock->trylock, |
| 2628 | hlock->read, hlock->check, hlock->hardirqs_off, | 2793 | hlock->read, hlock->check, hlock->hardirqs_off, |
| 2629 | hlock->acquire_ip)) | 2794 | hlock->nest_lock, hlock->acquire_ip)) |
| 2630 | return 0; | 2795 | return 0; |
| 2631 | } | 2796 | } |
| 2632 | 2797 | ||
| @@ -2669,7 +2834,7 @@ static int lock_release_nested(struct task_struct *curr, | |||
| 2669 | 2834 | ||
| 2670 | #ifdef CONFIG_DEBUG_LOCKDEP | 2835 | #ifdef CONFIG_DEBUG_LOCKDEP |
| 2671 | hlock->prev_chain_key = 0; | 2836 | hlock->prev_chain_key = 0; |
| 2672 | hlock->class = NULL; | 2837 | hlock->class_idx = 0; |
| 2673 | hlock->acquire_ip = 0; | 2838 | hlock->acquire_ip = 0; |
| 2674 | hlock->irq_context = 0; | 2839 | hlock->irq_context = 0; |
| 2675 | #endif | 2840 | #endif |
| @@ -2738,18 +2903,36 @@ static void check_flags(unsigned long flags) | |||
| 2738 | #endif | 2903 | #endif |
| 2739 | } | 2904 | } |
| 2740 | 2905 | ||
| 2906 | void | ||
| 2907 | lock_set_subclass(struct lockdep_map *lock, | ||
| 2908 | unsigned int subclass, unsigned long ip) | ||
| 2909 | { | ||
| 2910 | unsigned long flags; | ||
| 2911 | |||
| 2912 | if (unlikely(current->lockdep_recursion)) | ||
| 2913 | return; | ||
| 2914 | |||
| 2915 | raw_local_irq_save(flags); | ||
| 2916 | current->lockdep_recursion = 1; | ||
| 2917 | check_flags(flags); | ||
| 2918 | if (__lock_set_subclass(lock, subclass, ip)) | ||
| 2919 | check_chain_key(current); | ||
| 2920 | current->lockdep_recursion = 0; | ||
| 2921 | raw_local_irq_restore(flags); | ||
| 2922 | } | ||
| 2923 | |||
| 2924 | EXPORT_SYMBOL_GPL(lock_set_subclass); | ||
| 2925 | |||
| 2741 | /* | 2926 | /* |
| 2742 | * We are not always called with irqs disabled - do that here, | 2927 | * We are not always called with irqs disabled - do that here, |
| 2743 | * and also avoid lockdep recursion: | 2928 | * and also avoid lockdep recursion: |
| 2744 | */ | 2929 | */ |
| 2745 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | 2930 | void lock_acquire(struct lockdep_map *lock, unsigned int subclass, |
| 2746 | int trylock, int read, int check, unsigned long ip) | 2931 | int trylock, int read, int check, |
| 2932 | struct lockdep_map *nest_lock, unsigned long ip) | ||
| 2747 | { | 2933 | { |
| 2748 | unsigned long flags; | 2934 | unsigned long flags; |
| 2749 | 2935 | ||
| 2750 | if (unlikely(!lock_stat && !prove_locking)) | ||
| 2751 | return; | ||
| 2752 | |||
| 2753 | if (unlikely(current->lockdep_recursion)) | 2936 | if (unlikely(current->lockdep_recursion)) |
| 2754 | return; | 2937 | return; |
| 2755 | 2938 | ||
| @@ -2758,7 +2941,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
| 2758 | 2941 | ||
| 2759 | current->lockdep_recursion = 1; | 2942 | current->lockdep_recursion = 1; |
| 2760 | __lock_acquire(lock, subclass, trylock, read, check, | 2943 | __lock_acquire(lock, subclass, trylock, read, check, |
| 2761 | irqs_disabled_flags(flags), ip); | 2944 | irqs_disabled_flags(flags), nest_lock, ip); |
| 2762 | current->lockdep_recursion = 0; | 2945 | current->lockdep_recursion = 0; |
| 2763 | raw_local_irq_restore(flags); | 2946 | raw_local_irq_restore(flags); |
| 2764 | } | 2947 | } |
| @@ -2770,9 +2953,6 @@ void lock_release(struct lockdep_map *lock, int nested, | |||
| 2770 | { | 2953 | { |
| 2771 | unsigned long flags; | 2954 | unsigned long flags; |
| 2772 | 2955 | ||
| 2773 | if (unlikely(!lock_stat && !prove_locking)) | ||
| 2774 | return; | ||
| 2775 | |||
| 2776 | if (unlikely(current->lockdep_recursion)) | 2956 | if (unlikely(current->lockdep_recursion)) |
| 2777 | return; | 2957 | return; |
| 2778 | 2958 | ||
| @@ -2845,11 +3025,11 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip) | |||
| 2845 | found_it: | 3025 | found_it: |
| 2846 | hlock->waittime_stamp = sched_clock(); | 3026 | hlock->waittime_stamp = sched_clock(); |
| 2847 | 3027 | ||
| 2848 | point = lock_contention_point(hlock->class, ip); | 3028 | point = lock_contention_point(hlock_class(hlock), ip); |
| 2849 | 3029 | ||
| 2850 | stats = get_lock_stats(hlock->class); | 3030 | stats = get_lock_stats(hlock_class(hlock)); |
| 2851 | if (point < ARRAY_SIZE(stats->contention_point)) | 3031 | if (point < ARRAY_SIZE(stats->contention_point)) |
| 2852 | stats->contention_point[i]++; | 3032 | stats->contention_point[point]++; |
| 2853 | if (lock->cpu != smp_processor_id()) | 3033 | if (lock->cpu != smp_processor_id()) |
| 2854 | stats->bounces[bounce_contended + !!hlock->read]++; | 3034 | stats->bounces[bounce_contended + !!hlock->read]++; |
| 2855 | put_lock_stats(stats); | 3035 | put_lock_stats(stats); |
| @@ -2893,7 +3073,7 @@ found_it: | |||
| 2893 | hlock->holdtime_stamp = now; | 3073 | hlock->holdtime_stamp = now; |
| 2894 | } | 3074 | } |
| 2895 | 3075 | ||
| 2896 | stats = get_lock_stats(hlock->class); | 3076 | stats = get_lock_stats(hlock_class(hlock)); |
| 2897 | if (waittime) { | 3077 | if (waittime) { |
| 2898 | if (hlock->read) | 3078 | if (hlock->read) |
| 2899 | lock_time_inc(&stats->read_waittime, waittime); | 3079 | lock_time_inc(&stats->read_waittime, waittime); |
| @@ -2988,6 +3168,7 @@ static void zap_class(struct lock_class *class) | |||
| 2988 | list_del_rcu(&class->hash_entry); | 3168 | list_del_rcu(&class->hash_entry); |
| 2989 | list_del_rcu(&class->lock_entry); | 3169 | list_del_rcu(&class->lock_entry); |
| 2990 | 3170 | ||
| 3171 | class->key = NULL; | ||
| 2991 | } | 3172 | } |
| 2992 | 3173 | ||
| 2993 | static inline int within(const void *addr, void *start, unsigned long size) | 3174 | static inline int within(const void *addr, void *start, unsigned long size) |
