aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2007-07-19 04:48:54 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-07-19 13:04:49 -0400
commit8e18257d29238311e82085152741f0c3aa18b74d (patch)
tree4c3e43b3b001763b4280a4b6d754387a52fb4d73 /kernel/lockdep.c
parentca58abcb4a6d52ee2db1b1130cea3ca2a76677b9 (diff)
lockdep: reduce the ifdeffery
Move code around to get fewer but larger #ifdef sections. Break some in-function #ifdefs out into their own functions. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c1171
1 files changed, 628 insertions, 543 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 05c1261791f..87ac3642507 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -95,25 +95,6 @@ static int lockdep_initialized;
95unsigned long nr_list_entries; 95unsigned long nr_list_entries;
96static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 96static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
97 97
98#ifdef CONFIG_PROVE_LOCKING
99/*
100 * Allocate a lockdep entry. (assumes the graph_lock held, returns
101 * with NULL on failure)
102 */
103static struct lock_list *alloc_list_entry(void)
104{
105 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
106 if (!debug_locks_off_graph_unlock())
107 return NULL;
108
109 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
110 printk("turning off the locking correctness validator.\n");
111 return NULL;
112 }
113 return list_entries + nr_list_entries++;
114}
115#endif
116
117/* 98/*
118 * All data structures here are protected by the global debug_lock. 99 * All data structures here are protected by the global debug_lock.
119 * 100 *
@@ -141,11 +122,6 @@ LIST_HEAD(all_lock_classes);
141 122
142static struct list_head classhash_table[CLASSHASH_SIZE]; 123static struct list_head classhash_table[CLASSHASH_SIZE];
143 124
144unsigned long nr_lock_chains;
145#ifdef CONFIG_PROVE_LOCKING
146static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
147#endif
148
149/* 125/*
150 * We put the lock dependency chains into a hash-table as well, to cache 126 * We put the lock dependency chains into a hash-table as well, to cache
151 * their existence: 127 * their existence:
@@ -227,26 +203,6 @@ static int verbose(struct lock_class *class)
227 return 0; 203 return 0;
228} 204}
229 205
230#ifdef CONFIG_TRACE_IRQFLAGS
231
232static int hardirq_verbose(struct lock_class *class)
233{
234#if HARDIRQ_VERBOSE
235 return class_filter(class);
236#endif
237 return 0;
238}
239
240static int softirq_verbose(struct lock_class *class)
241{
242#if SOFTIRQ_VERBOSE
243 return class_filter(class);
244#endif
245 return 0;
246}
247
248#endif
249
250/* 206/*
251 * Stack-trace: tightly packed array of stack backtrace 207 * Stack-trace: tightly packed array of stack backtrace
252 * addresses. Protected by the graph_lock. 208 * addresses. Protected by the graph_lock.
@@ -486,8 +442,258 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
486 } 442 }
487} 443}
488 444
445static void print_kernel_version(void)
446{
447 printk("%s %.*s\n", init_utsname()->release,
448 (int)strcspn(init_utsname()->version, " "),
449 init_utsname()->version);
450}
451
452static int very_verbose(struct lock_class *class)
453{
454#if VERY_VERBOSE
455 return class_filter(class);
456#endif
457 return 0;
458}
459
460/*
461 * Is this the address of a static object:
462 */
463static int static_obj(void *obj)
464{
465 unsigned long start = (unsigned long) &_stext,
466 end = (unsigned long) &_end,
467 addr = (unsigned long) obj;
468#ifdef CONFIG_SMP
469 int i;
470#endif
471
472 /*
473 * static variable?
474 */
475 if ((addr >= start) && (addr < end))
476 return 1;
477
478#ifdef CONFIG_SMP
479 /*
480 * percpu var?
481 */
482 for_each_possible_cpu(i) {
483 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
484 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
485 + per_cpu_offset(i);
486
487 if ((addr >= start) && (addr < end))
488 return 1;
489 }
490#endif
491
492 /*
493 * module var?
494 */
495 return is_module_address(addr);
496}
497
498/*
499 * To make lock name printouts unique, we calculate a unique
500 * class->name_version generation counter:
501 */
502static int count_matching_names(struct lock_class *new_class)
503{
504 struct lock_class *class;
505 int count = 0;
506
507 if (!new_class->name)
508 return 0;
509
510 list_for_each_entry(class, &all_lock_classes, lock_entry) {
511 if (new_class->key - new_class->subclass == class->key)
512 return class->name_version;
513 if (class->name && !strcmp(class->name, new_class->name))
514 count = max(count, class->name_version);
515 }
516
517 return count + 1;
518}
519
520/*
521 * Register a lock's class in the hash-table, if the class is not present
522 * yet. Otherwise we look it up. We cache the result in the lock object
523 * itself, so actual lookup of the hash should be once per lock object.
524 */
525static inline struct lock_class *
526look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
527{
528 struct lockdep_subclass_key *key;
529 struct list_head *hash_head;
530 struct lock_class *class;
531
532#ifdef CONFIG_DEBUG_LOCKDEP
533 /*
534 * If the architecture calls into lockdep before initializing
535 * the hashes then we'll warn about it later. (we cannot printk
536 * right now)
537 */
538 if (unlikely(!lockdep_initialized)) {
539 lockdep_init();
540 lockdep_init_error = 1;
541 }
542#endif
543
544 /*
545 * Static locks do not have their class-keys yet - for them the key
546 * is the lock object itself:
547 */
548 if (unlikely(!lock->key))
549 lock->key = (void *)lock;
550
551 /*
552 * NOTE: the class-key must be unique. For dynamic locks, a static
553 * lock_class_key variable is passed in through the mutex_init()
554 * (or spin_lock_init()) call - which acts as the key. For static
555 * locks we use the lock object itself as the key.
556 */
557 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
558
559 key = lock->key->subkeys + subclass;
560
561 hash_head = classhashentry(key);
562
563 /*
564 * We can walk the hash lockfree, because the hash only
565 * grows, and we are careful when adding entries to the end:
566 */
567 list_for_each_entry(class, hash_head, hash_entry)
568 if (class->key == key)
569 return class;
570
571 return NULL;
572}
573
574/*
575 * Register a lock's class in the hash-table, if the class is not present
576 * yet. Otherwise we look it up. We cache the result in the lock object
577 * itself, so actual lookup of the hash should be once per lock object.
578 */
579static inline struct lock_class *
580register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
581{
582 struct lockdep_subclass_key *key;
583 struct list_head *hash_head;
584 struct lock_class *class;
585 unsigned long flags;
586
587 class = look_up_lock_class(lock, subclass);
588 if (likely(class))
589 return class;
590
591 /*
592 * Debug-check: all keys must be persistent!
593 */
594 if (!static_obj(lock->key)) {
595 debug_locks_off();
596 printk("INFO: trying to register non-static key.\n");
597 printk("the code is fine but needs lockdep annotation.\n");
598 printk("turning off the locking correctness validator.\n");
599 dump_stack();
600
601 return NULL;
602 }
603
604 key = lock->key->subkeys + subclass;
605 hash_head = classhashentry(key);
606
607 raw_local_irq_save(flags);
608 if (!graph_lock()) {
609 raw_local_irq_restore(flags);
610 return NULL;
611 }
612 /*
613 * We have to do the hash-walk again, to avoid races
614 * with another CPU:
615 */
616 list_for_each_entry(class, hash_head, hash_entry)
617 if (class->key == key)
618 goto out_unlock_set;
619 /*
620 * Allocate a new key from the static array, and add it to
621 * the hash:
622 */
623 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
624 if (!debug_locks_off_graph_unlock()) {
625 raw_local_irq_restore(flags);
626 return NULL;
627 }
628 raw_local_irq_restore(flags);
629
630 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
631 printk("turning off the locking correctness validator.\n");
632 return NULL;
633 }
634 class = lock_classes + nr_lock_classes++;
635 debug_atomic_inc(&nr_unused_locks);
636 class->key = key;
637 class->name = lock->name;
638 class->subclass = subclass;
639 INIT_LIST_HEAD(&class->lock_entry);
640 INIT_LIST_HEAD(&class->locks_before);
641 INIT_LIST_HEAD(&class->locks_after);
642 class->name_version = count_matching_names(class);
643 /*
644 * We use RCU's safe list-add method to make
645 * parallel walking of the hash-list safe:
646 */
647 list_add_tail_rcu(&class->hash_entry, hash_head);
648
649 if (verbose(class)) {
650 graph_unlock();
651 raw_local_irq_restore(flags);
652
653 printk("\nnew class %p: %s", class->key, class->name);
654 if (class->name_version > 1)
655 printk("#%d", class->name_version);
656 printk("\n");
657 dump_stack();
658
659 raw_local_irq_save(flags);
660 if (!graph_lock()) {
661 raw_local_irq_restore(flags);
662 return NULL;
663 }
664 }
665out_unlock_set:
666 graph_unlock();
667 raw_local_irq_restore(flags);
668
669 if (!subclass || force)
670 lock->class_cache = class;
671
672 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
673 return NULL;
674
675 return class;
676}
677
489#ifdef CONFIG_PROVE_LOCKING 678#ifdef CONFIG_PROVE_LOCKING
490/* 679/*
680 * Allocate a lockdep entry. (assumes the graph_lock held, returns
681 * with NULL on failure)
682 */
683static struct lock_list *alloc_list_entry(void)
684{
685 if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
686 if (!debug_locks_off_graph_unlock())
687 return NULL;
688
689 printk("BUG: MAX_LOCKDEP_ENTRIES too low!\n");
690 printk("turning off the locking correctness validator.\n");
691 return NULL;
692 }
693 return list_entries + nr_list_entries++;
694}
695
696/*
491 * Add a new dependency to the head of the list: 697 * Add a new dependency to the head of the list:
492 */ 698 */
493static int add_lock_to_list(struct lock_class *class, struct lock_class *this, 699static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
@@ -546,16 +752,7 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
546 752
547 return 0; 753 return 0;
548} 754}
549#endif
550 755
551static void print_kernel_version(void)
552{
553 printk("%s %.*s\n", init_utsname()->release,
554 (int)strcspn(init_utsname()->version, " "),
555 init_utsname()->version);
556}
557
558#ifdef CONFIG_PROVE_LOCKING
559/* 756/*
560 * When a circular dependency is detected, print the 757 * When a circular dependency is detected, print the
561 * header first: 758 * header first:
@@ -646,17 +843,8 @@ check_noncircular(struct lock_class *source, unsigned int depth)
646 } 843 }
647 return 1; 844 return 1;
648} 845}
649#endif
650 846
651static int very_verbose(struct lock_class *class)
652{
653#if VERY_VERBOSE
654 return class_filter(class);
655#endif
656 return 0;
657}
658#ifdef CONFIG_TRACE_IRQFLAGS 847#ifdef CONFIG_TRACE_IRQFLAGS
659
660/* 848/*
661 * Forwards and backwards subgraph searching, for the purposes of 849 * Forwards and backwards subgraph searching, for the purposes of
662 * proving that two subgraphs can be connected by a new dependency 850 * proving that two subgraphs can be connected by a new dependency
@@ -829,9 +1017,80 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
829 bit_backwards, bit_forwards, irqclass); 1017 bit_backwards, bit_forwards, irqclass);
830} 1018}
831 1019
1020static int
1021check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1022 struct held_lock *next)
1023{
1024 /*
1025 * Prove that the new dependency does not connect a hardirq-safe
1026 * lock with a hardirq-unsafe lock - to achieve this we search
1027 * the backwards-subgraph starting at <prev>, and the
1028 * forwards-subgraph starting at <next>:
1029 */
1030 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
1031 LOCK_ENABLED_HARDIRQS, "hard"))
1032 return 0;
1033
1034 /*
1035 * Prove that the new dependency does not connect a hardirq-safe-read
1036 * lock with a hardirq-unsafe lock - to achieve this we search
1037 * the backwards-subgraph starting at <prev>, and the
1038 * forwards-subgraph starting at <next>:
1039 */
1040 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
1041 LOCK_ENABLED_HARDIRQS, "hard-read"))
1042 return 0;
1043
1044 /*
1045 * Prove that the new dependency does not connect a softirq-safe
1046 * lock with a softirq-unsafe lock - to achieve this we search
1047 * the backwards-subgraph starting at <prev>, and the
1048 * forwards-subgraph starting at <next>:
1049 */
1050 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
1051 LOCK_ENABLED_SOFTIRQS, "soft"))
1052 return 0;
1053 /*
1054 * Prove that the new dependency does not connect a softirq-safe-read
1055 * lock with a softirq-unsafe lock - to achieve this we search
1056 * the backwards-subgraph starting at <prev>, and the
1057 * forwards-subgraph starting at <next>:
1058 */
1059 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
1060 LOCK_ENABLED_SOFTIRQS, "soft"))
1061 return 0;
1062
1063 return 1;
1064}
1065
1066static void inc_chains(void)
1067{
1068 if (current->hardirq_context)
1069 nr_hardirq_chains++;
1070 else {
1071 if (current->softirq_context)
1072 nr_softirq_chains++;
1073 else
1074 nr_process_chains++;
1075 }
1076}
1077
1078#else
1079
1080static inline int
1081check_prev_add_irq(struct task_struct *curr, struct held_lock *prev,
1082 struct held_lock *next)
1083{
1084 return 1;
1085}
1086
1087static inline void inc_chains(void)
1088{
1089 nr_process_chains++;
1090}
1091
832#endif 1092#endif
833 1093
834#ifdef CONFIG_PROVE_LOCKING
835static int 1094static int
836print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, 1095print_deadlock_bug(struct task_struct *curr, struct held_lock *prev,
837 struct held_lock *next) 1096 struct held_lock *next)
@@ -931,47 +1190,10 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
931 if (!(check_noncircular(next->class, 0))) 1190 if (!(check_noncircular(next->class, 0)))
932 return print_circular_bug_tail(); 1191 return print_circular_bug_tail();
933 1192
934#ifdef CONFIG_TRACE_IRQFLAGS 1193 if (!check_prev_add_irq(curr, prev, next))
935 /*
936 * Prove that the new dependency does not connect a hardirq-safe
937 * lock with a hardirq-unsafe lock - to achieve this we search
938 * the backwards-subgraph starting at <prev>, and the
939 * forwards-subgraph starting at <next>:
940 */
941 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ,
942 LOCK_ENABLED_HARDIRQS, "hard"))
943 return 0;
944
945 /*
946 * Prove that the new dependency does not connect a hardirq-safe-read
947 * lock with a hardirq-unsafe lock - to achieve this we search
948 * the backwards-subgraph starting at <prev>, and the
949 * forwards-subgraph starting at <next>:
950 */
951 if (!check_usage(curr, prev, next, LOCK_USED_IN_HARDIRQ_READ,
952 LOCK_ENABLED_HARDIRQS, "hard-read"))
953 return 0; 1194 return 0;
954 1195
955 /* 1196 /*
956 * Prove that the new dependency does not connect a softirq-safe
957 * lock with a softirq-unsafe lock - to achieve this we search
958 * the backwards-subgraph starting at <prev>, and the
959 * forwards-subgraph starting at <next>:
960 */
961 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ,
962 LOCK_ENABLED_SOFTIRQS, "soft"))
963 return 0;
964 /*
965 * Prove that the new dependency does not connect a softirq-safe-read
966 * lock with a softirq-unsafe lock - to achieve this we search
967 * the backwards-subgraph starting at <prev>, and the
968 * forwards-subgraph starting at <next>:
969 */
970 if (!check_usage(curr, prev, next, LOCK_USED_IN_SOFTIRQ_READ,
971 LOCK_ENABLED_SOFTIRQS, "soft"))
972 return 0;
973#endif
974 /*
975 * For recursive read-locks we do all the dependency checks, 1197 * For recursive read-locks we do all the dependency checks,
976 * but we dont store read-triggered dependencies (only 1198 * but we dont store read-triggered dependencies (only
977 * write-triggered dependencies). This ensures that only the 1199 * write-triggered dependencies). This ensures that only the
@@ -1096,227 +1318,10 @@ out_bug:
1096 1318
1097 return 0; 1319 return 0;
1098} 1320}
1099#endif
1100
1101/*
1102 * Is this the address of a static object:
1103 */
1104static int static_obj(void *obj)
1105{
1106 unsigned long start = (unsigned long) &_stext,
1107 end = (unsigned long) &_end,
1108 addr = (unsigned long) obj;
1109#ifdef CONFIG_SMP
1110 int i;
1111#endif
1112
1113 /*
1114 * static variable?
1115 */
1116 if ((addr >= start) && (addr < end))
1117 return 1;
1118
1119#ifdef CONFIG_SMP
1120 /*
1121 * percpu var?
1122 */
1123 for_each_possible_cpu(i) {
1124 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1125 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
1126 + per_cpu_offset(i);
1127
1128 if ((addr >= start) && (addr < end))
1129 return 1;
1130 }
1131#endif
1132
1133 /*
1134 * module var?
1135 */
1136 return is_module_address(addr);
1137}
1138
1139/*
1140 * To make lock name printouts unique, we calculate a unique
1141 * class->name_version generation counter:
1142 */
1143static int count_matching_names(struct lock_class *new_class)
1144{
1145 struct lock_class *class;
1146 int count = 0;
1147
1148 if (!new_class->name)
1149 return 0;
1150
1151 list_for_each_entry(class, &all_lock_classes, lock_entry) {
1152 if (new_class->key - new_class->subclass == class->key)
1153 return class->name_version;
1154 if (class->name && !strcmp(class->name, new_class->name))
1155 count = max(count, class->name_version);
1156 }
1157
1158 return count + 1;
1159}
1160
1161/*
1162 * Register a lock's class in the hash-table, if the class is not present
1163 * yet. Otherwise we look it up. We cache the result in the lock object
1164 * itself, so actual lookup of the hash should be once per lock object.
1165 */
1166static inline struct lock_class *
1167look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1168{
1169 struct lockdep_subclass_key *key;
1170 struct list_head *hash_head;
1171 struct lock_class *class;
1172
1173#ifdef CONFIG_DEBUG_LOCKDEP
1174 /*
1175 * If the architecture calls into lockdep before initializing
1176 * the hashes then we'll warn about it later. (we cannot printk
1177 * right now)
1178 */
1179 if (unlikely(!lockdep_initialized)) {
1180 lockdep_init();
1181 lockdep_init_error = 1;
1182 }
1183#endif
1184
1185 /*
1186 * Static locks do not have their class-keys yet - for them the key
1187 * is the lock object itself:
1188 */
1189 if (unlikely(!lock->key))
1190 lock->key = (void *)lock;
1191
1192 /*
1193 * NOTE: the class-key must be unique. For dynamic locks, a static
1194 * lock_class_key variable is passed in through the mutex_init()
1195 * (or spin_lock_init()) call - which acts as the key. For static
1196 * locks we use the lock object itself as the key.
1197 */
1198 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
1199
1200 key = lock->key->subkeys + subclass;
1201
1202 hash_head = classhashentry(key);
1203
1204 /*
1205 * We can walk the hash lockfree, because the hash only
1206 * grows, and we are careful when adding entries to the end:
1207 */
1208 list_for_each_entry(class, hash_head, hash_entry)
1209 if (class->key == key)
1210 return class;
1211
1212 return NULL;
1213}
1214
1215/*
1216 * Register a lock's class in the hash-table, if the class is not present
1217 * yet. Otherwise we look it up. We cache the result in the lock object
1218 * itself, so actual lookup of the hash should be once per lock object.
1219 */
1220static inline struct lock_class *
1221register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1222{
1223 struct lockdep_subclass_key *key;
1224 struct list_head *hash_head;
1225 struct lock_class *class;
1226 unsigned long flags;
1227
1228 class = look_up_lock_class(lock, subclass);
1229 if (likely(class))
1230 return class;
1231
1232 /*
1233 * Debug-check: all keys must be persistent!
1234 */
1235 if (!static_obj(lock->key)) {
1236 debug_locks_off();
1237 printk("INFO: trying to register non-static key.\n");
1238 printk("the code is fine but needs lockdep annotation.\n");
1239 printk("turning off the locking correctness validator.\n");
1240 dump_stack();
1241 1321
1242 return NULL; 1322unsigned long nr_lock_chains;
1243 } 1323static struct lock_chain lock_chains[MAX_LOCKDEP_CHAINS];
1244
1245 key = lock->key->subkeys + subclass;
1246 hash_head = classhashentry(key);
1247
1248 raw_local_irq_save(flags);
1249 if (!graph_lock()) {
1250 raw_local_irq_restore(flags);
1251 return NULL;
1252 }
1253 /*
1254 * We have to do the hash-walk again, to avoid races
1255 * with another CPU:
1256 */
1257 list_for_each_entry(class, hash_head, hash_entry)
1258 if (class->key == key)
1259 goto out_unlock_set;
1260 /*
1261 * Allocate a new key from the static array, and add it to
1262 * the hash:
1263 */
1264 if (nr_lock_classes >= MAX_LOCKDEP_KEYS) {
1265 if (!debug_locks_off_graph_unlock()) {
1266 raw_local_irq_restore(flags);
1267 return NULL;
1268 }
1269 raw_local_irq_restore(flags);
1270
1271 printk("BUG: MAX_LOCKDEP_KEYS too low!\n");
1272 printk("turning off the locking correctness validator.\n");
1273 return NULL;
1274 }
1275 class = lock_classes + nr_lock_classes++;
1276 debug_atomic_inc(&nr_unused_locks);
1277 class->key = key;
1278 class->name = lock->name;
1279 class->subclass = subclass;
1280 INIT_LIST_HEAD(&class->lock_entry);
1281 INIT_LIST_HEAD(&class->locks_before);
1282 INIT_LIST_HEAD(&class->locks_after);
1283 class->name_version = count_matching_names(class);
1284 /*
1285 * We use RCU's safe list-add method to make
1286 * parallel walking of the hash-list safe:
1287 */
1288 list_add_tail_rcu(&class->hash_entry, hash_head);
1289
1290 if (verbose(class)) {
1291 graph_unlock();
1292 raw_local_irq_restore(flags);
1293
1294 printk("\nnew class %p: %s", class->key, class->name);
1295 if (class->name_version > 1)
1296 printk("#%d", class->name_version);
1297 printk("\n");
1298 dump_stack();
1299
1300 raw_local_irq_save(flags);
1301 if (!graph_lock()) {
1302 raw_local_irq_restore(flags);
1303 return NULL;
1304 }
1305 }
1306out_unlock_set:
1307 graph_unlock();
1308 raw_local_irq_restore(flags);
1309
1310 if (!subclass || force)
1311 lock->class_cache = class;
1312
1313 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1314 return NULL;
1315
1316 return class;
1317}
1318 1324
1319#ifdef CONFIG_PROVE_LOCKING
1320/* 1325/*
1321 * Look up a dependency chain. If the key is not present yet then 1326 * Look up a dependency chain. If the key is not present yet then
1322 * add it and return 1 - in this case the new dependency chain is 1327 * add it and return 1 - in this case the new dependency chain is
@@ -1376,21 +1381,71 @@ cache_hit:
1376 chain->chain_key = chain_key; 1381 chain->chain_key = chain_key;
1377 list_add_tail_rcu(&chain->entry, hash_head); 1382 list_add_tail_rcu(&chain->entry, hash_head);
1378 debug_atomic_inc(&chain_lookup_misses); 1383 debug_atomic_inc(&chain_lookup_misses);
1379#ifdef CONFIG_TRACE_IRQFLAGS 1384 inc_chains();
1380 if (current->hardirq_context) 1385
1381 nr_hardirq_chains++; 1386 return 1;
1382 else { 1387}
1383 if (current->softirq_context) 1388
1384 nr_softirq_chains++; 1389static int validate_chain(struct task_struct *curr, struct lockdep_map *lock,
1385 else 1390 struct held_lock *hlock, int chain_head)
1386 nr_process_chains++; 1391{
1387 } 1392 /*
1388#else 1393 * Trylock needs to maintain the stack of held locks, but it
1389 nr_process_chains++; 1394 * does not add new dependencies, because trylock can be done
1390#endif 1395 * in any order.
1396 *
1397 * We look up the chain_key and do the O(N^2) check and update of
1398 * the dependencies only if this is a new dependency chain.
1399 * (If lookup_chain_cache() returns with 1 it acquires
1400 * graph_lock for us)
1401 */
1402 if (!hlock->trylock && (hlock->check == 2) &&
1403 lookup_chain_cache(curr->curr_chain_key, hlock->class)) {
1404 /*
1405 * Check whether last held lock:
1406 *
1407 * - is irq-safe, if this lock is irq-unsafe
1408 * - is softirq-safe, if this lock is hardirq-unsafe
1409 *
1410 * And check whether the new lock's dependency graph
1411 * could lead back to the previous lock.
1412 *
1413 * any of these scenarios could lead to a deadlock. If
1414 * All validations
1415 */
1416 int ret = check_deadlock(curr, hlock, lock, hlock->read);
1417
1418 if (!ret)
1419 return 0;
1420 /*
1421 * Mark recursive read, as we jump over it when
1422 * building dependencies (just like we jump over
1423 * trylock entries):
1424 */
1425 if (ret == 2)
1426 hlock->read = 2;
1427 /*
1428 * Add dependency only if this lock is not the head
1429 * of the chain, and if it's not a secondary read-lock:
1430 */
1431 if (!chain_head && ret != 2)
1432 if (!check_prevs_add(curr, hlock))
1433 return 0;
1434 graph_unlock();
1435 } else
1436 /* after lookup_chain_cache(): */
1437 if (unlikely(!debug_locks))
1438 return 0;
1391 1439
1392 return 1; 1440 return 1;
1393} 1441}
1442#else
1443static inline int validate_chain(struct task_struct *curr,
1444 struct lockdep_map *lock, struct held_lock *hlock,
1445 int chain_head)
1446{
1447 return 1;
1448}
1394#endif 1449#endif
1395 1450
1396/* 1451/*
@@ -1436,6 +1491,57 @@ static void check_chain_key(struct task_struct *curr)
1436#endif 1491#endif
1437} 1492}
1438 1493
1494static int
1495print_usage_bug(struct task_struct *curr, struct held_lock *this,
1496 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1497{
1498 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
1499 return 0;
1500
1501 printk("\n=================================\n");
1502 printk( "[ INFO: inconsistent lock state ]\n");
1503 print_kernel_version();
1504 printk( "---------------------------------\n");
1505
1506 printk("inconsistent {%s} -> {%s} usage.\n",
1507 usage_str[prev_bit], usage_str[new_bit]);
1508
1509 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1510 curr->comm, curr->pid,
1511 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1512 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1513 trace_hardirqs_enabled(curr),
1514 trace_softirqs_enabled(curr));
1515 print_lock(this);
1516
1517 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1518 print_stack_trace(this->class->usage_traces + prev_bit, 1);
1519
1520 print_irqtrace_events(curr);
1521 printk("\nother info that might help us debug this:\n");
1522 lockdep_print_held_locks(curr);
1523
1524 printk("\nstack backtrace:\n");
1525 dump_stack();
1526
1527 return 0;
1528}
1529
1530/*
1531 * Print out an error if an invalid bit is set:
1532 */
1533static inline int
1534valid_state(struct task_struct *curr, struct held_lock *this,
1535 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1536{
1537 if (unlikely(this->class->usage_mask & (1 << bad_bit)))
1538 return print_usage_bug(curr, this, bad_bit, new_bit);
1539 return 1;
1540}
1541
1542static int mark_lock(struct task_struct *curr, struct held_lock *this,
1543 enum lock_usage_bit new_bit);
1544
1439#ifdef CONFIG_TRACE_IRQFLAGS 1545#ifdef CONFIG_TRACE_IRQFLAGS
1440 1546
1441/* 1547/*
@@ -1529,90 +1635,30 @@ void print_irqtrace_events(struct task_struct *curr)
1529 print_ip_sym(curr->softirq_disable_ip); 1635 print_ip_sym(curr->softirq_disable_ip);
1530} 1636}
1531 1637
1532#endif 1638static int hardirq_verbose(struct lock_class *class)
1533
1534static int
1535print_usage_bug(struct task_struct *curr, struct held_lock *this,
1536 enum lock_usage_bit prev_bit, enum lock_usage_bit new_bit)
1537{ 1639{
1538 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1640#if HARDIRQ_VERBOSE
1539 return 0; 1641 return class_filter(class);
1540 1642#endif
1541 printk("\n=================================\n");
1542 printk( "[ INFO: inconsistent lock state ]\n");
1543 print_kernel_version();
1544 printk( "---------------------------------\n");
1545
1546 printk("inconsistent {%s} -> {%s} usage.\n",
1547 usage_str[prev_bit], usage_str[new_bit]);
1548
1549 printk("%s/%d [HC%u[%lu]:SC%u[%lu]:HE%u:SE%u] takes:\n",
1550 curr->comm, curr->pid,
1551 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT,
1552 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT,
1553 trace_hardirqs_enabled(curr),
1554 trace_softirqs_enabled(curr));
1555 print_lock(this);
1556
1557 printk("{%s} state was registered at:\n", usage_str[prev_bit]);
1558 print_stack_trace(this->class->usage_traces + prev_bit, 1);
1559
1560 print_irqtrace_events(curr);
1561 printk("\nother info that might help us debug this:\n");
1562 lockdep_print_held_locks(curr);
1563
1564 printk("\nstack backtrace:\n");
1565 dump_stack();
1566
1567 return 0; 1643 return 0;
1568} 1644}
1569 1645
1570/* 1646static int softirq_verbose(struct lock_class *class)
1571 * Print out an error if an invalid bit is set:
1572 */
1573static inline int
1574valid_state(struct task_struct *curr, struct held_lock *this,
1575 enum lock_usage_bit new_bit, enum lock_usage_bit bad_bit)
1576{ 1647{
1577 if (unlikely(this->class->usage_mask & (1 << bad_bit))) 1648#if SOFTIRQ_VERBOSE
1578 return print_usage_bug(curr, this, bad_bit, new_bit); 1649 return class_filter(class);
1579 return 1; 1650#endif
1651 return 0;
1580} 1652}
1581 1653
1582#define STRICT_READ_CHECKS 1 1654#define STRICT_READ_CHECKS 1
1583 1655
1584/* 1656static int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
1585 * Mark a lock with a usage bit, and validate the state transition: 1657 enum lock_usage_bit new_bit)
1586 */
1587static int mark_lock(struct task_struct *curr, struct held_lock *this,
1588 enum lock_usage_bit new_bit)
1589{ 1658{
1590 unsigned int new_mask = 1 << new_bit, ret = 1; 1659 int ret = 1;
1591
1592 /*
1593 * If already set then do not dirty the cacheline,
1594 * nor do any checks:
1595 */
1596 if (likely(this->class->usage_mask & new_mask))
1597 return 1;
1598 1660
1599 if (!graph_lock()) 1661 switch(new_bit) {
1600 return 0;
1601 /*
1602 * Make sure we didnt race:
1603 */
1604 if (unlikely(this->class->usage_mask & new_mask)) {
1605 graph_unlock();
1606 return 1;
1607 }
1608
1609 this->class->usage_mask |= new_mask;
1610
1611 if (!save_trace(this->class->usage_traces + new_bit))
1612 return 0;
1613
1614 switch (new_bit) {
1615#ifdef CONFIG_TRACE_IRQFLAGS
1616 case LOCK_USED_IN_HARDIRQ: 1662 case LOCK_USED_IN_HARDIRQ:
1617 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS)) 1663 if (!valid_state(curr, this, new_bit, LOCK_ENABLED_HARDIRQS))
1618 return 0; 1664 return 0;
@@ -1771,37 +1817,14 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1771 if (softirq_verbose(this->class)) 1817 if (softirq_verbose(this->class))
1772 ret = 2; 1818 ret = 2;
1773 break; 1819 break;
1774#endif
1775 case LOCK_USED:
1776 /*
1777 * Add it to the global list of classes:
1778 */
1779 list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
1780 debug_atomic_dec(&nr_unused_locks);
1781 break;
1782 default: 1820 default:
1783 if (!debug_locks_off_graph_unlock())
1784 return 0;
1785 WARN_ON(1); 1821 WARN_ON(1);
1786 return 0; 1822 break;
1787 }
1788
1789 graph_unlock();
1790
1791 /*
1792 * We must printk outside of the graph_lock:
1793 */
1794 if (ret == 2) {
1795 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
1796 print_lock(this);
1797 print_irqtrace_events(curr);
1798 dump_stack();
1799 } 1823 }
1800 1824
1801 return ret; 1825 return ret;
1802} 1826}
1803 1827
1804#ifdef CONFIG_TRACE_IRQFLAGS
1805/* 1828/*
1806 * Mark all held locks with a usage bit: 1829 * Mark all held locks with a usage bit:
1807 */ 1830 */
@@ -1984,9 +2007,176 @@ void trace_softirqs_off(unsigned long ip)
1984 debug_atomic_inc(&redundant_softirqs_off); 2007 debug_atomic_inc(&redundant_softirqs_off);
1985} 2008}
1986 2009
2010static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock)
2011{
2012 /*
2013 * If non-trylock use in a hardirq or softirq context, then
2014 * mark the lock as used in these contexts:
2015 */
2016 if (!hlock->trylock) {
2017 if (hlock->read) {
2018 if (curr->hardirq_context)
2019 if (!mark_lock(curr, hlock,
2020 LOCK_USED_IN_HARDIRQ_READ))
2021 return 0;
2022 if (curr->softirq_context)
2023 if (!mark_lock(curr, hlock,
2024 LOCK_USED_IN_SOFTIRQ_READ))
2025 return 0;
2026 } else {
2027 if (curr->hardirq_context)
2028 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2029 return 0;
2030 if (curr->softirq_context)
2031 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2032 return 0;
2033 }
2034 }
2035 if (!hlock->hardirqs_off) {
2036 if (hlock->read) {
2037 if (!mark_lock(curr, hlock,
2038 LOCK_ENABLED_HARDIRQS_READ))
2039 return 0;
2040 if (curr->softirqs_enabled)
2041 if (!mark_lock(curr, hlock,
2042 LOCK_ENABLED_SOFTIRQS_READ))
2043 return 0;
2044 } else {
2045 if (!mark_lock(curr, hlock,
2046 LOCK_ENABLED_HARDIRQS))
2047 return 0;
2048 if (curr->softirqs_enabled)
2049 if (!mark_lock(curr, hlock,
2050 LOCK_ENABLED_SOFTIRQS))
2051 return 0;
2052 }
2053 }
2054
2055 return 1;
2056}
2057
2058static int separate_irq_context(struct task_struct *curr,
2059 struct held_lock *hlock)
2060{
2061 unsigned int depth = curr->lockdep_depth;
2062
2063 /*
2064 * Keep track of points where we cross into an interrupt context:
2065 */
2066 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2067 curr->softirq_context;
2068 if (depth) {
2069 struct held_lock *prev_hlock;
2070
2071 prev_hlock = curr->held_locks + depth-1;
2072 /*
2073 * If we cross into another context, reset the
2074 * hash key (this also prevents the checking and the
2075 * adding of the dependency to 'prev'):
2076 */
2077 if (prev_hlock->irq_context != hlock->irq_context)
2078 return 1;
2079 }
2080 return 0;
2081}
2082
2083#else
2084
2085static inline
2086int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
2087 enum lock_usage_bit new_bit)
2088{
2089 WARN_ON(1);
2090 return 1;
2091}
2092
2093static inline int mark_irqflags(struct task_struct *curr,
2094 struct held_lock *hlock)
2095{
2096 return 1;
2097}
2098
2099static inline int separate_irq_context(struct task_struct *curr,
2100 struct held_lock *hlock)
2101{
2102 return 0;
2103}
2104
1987#endif 2105#endif
1988 2106
1989/* 2107/*
2108 * Mark a lock with a usage bit, and validate the state transition:
2109 */
2110static int mark_lock(struct task_struct *curr, struct held_lock *this,
2111 enum lock_usage_bit new_bit)
2112{
2113 unsigned int new_mask = 1 << new_bit, ret = 1;
2114
2115 /*
2116 * If already set then do not dirty the cacheline,
2117 * nor do any checks:
2118 */
2119 if (likely(this->class->usage_mask & new_mask))
2120 return 1;
2121
2122 if (!graph_lock())
2123 return 0;
2124 /*
2125 * Make sure we didnt race:
2126 */
2127 if (unlikely(this->class->usage_mask & new_mask)) {
2128 graph_unlock();
2129 return 1;
2130 }
2131
2132 this->class->usage_mask |= new_mask;
2133
2134 if (!save_trace(this->class->usage_traces + new_bit))
2135 return 0;
2136
2137 switch (new_bit) {
2138 case LOCK_USED_IN_HARDIRQ:
2139 case LOCK_USED_IN_SOFTIRQ:
2140 case LOCK_USED_IN_HARDIRQ_READ:
2141 case LOCK_USED_IN_SOFTIRQ_READ:
2142 case LOCK_ENABLED_HARDIRQS:
2143 case LOCK_ENABLED_SOFTIRQS:
2144 case LOCK_ENABLED_HARDIRQS_READ:
2145 case LOCK_ENABLED_SOFTIRQS_READ:
2146 ret = mark_lock_irq(curr, this, new_bit);
2147 if (!ret)
2148 return 0;
2149 break;
2150 case LOCK_USED:
2151 /*
2152 * Add it to the global list of classes:
2153 */
2154 list_add_tail_rcu(&this->class->lock_entry, &all_lock_classes);
2155 debug_atomic_dec(&nr_unused_locks);
2156 break;
2157 default:
2158 if (!debug_locks_off_graph_unlock())
2159 return 0;
2160 WARN_ON(1);
2161 return 0;
2162 }
2163
2164 graph_unlock();
2165
2166 /*
2167 * We must printk outside of the graph_lock:
2168 */
2169 if (ret == 2) {
2170 printk("\nmarked lock as {%s}:\n", usage_str[new_bit]);
2171 print_lock(this);
2172 print_irqtrace_events(curr);
2173 dump_stack();
2174 }
2175
2176 return ret;
2177}
2178
2179/*
1990 * Initialize a lock instance's lock-class mapping info: 2180 * Initialize a lock instance's lock-class mapping info:
1991 */ 2181 */
1992void lockdep_init_map(struct lockdep_map *lock, const char *name, 2182void lockdep_init_map(struct lockdep_map *lock, const char *name,
@@ -2082,56 +2272,13 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2082 hlock->check = check; 2272 hlock->check = check;
2083 hlock->hardirqs_off = hardirqs_off; 2273 hlock->hardirqs_off = hardirqs_off;
2084 2274
2085 if (check != 2) 2275 if (check == 2 && !mark_irqflags(curr, hlock))
2086 goto out_calc_hash; 2276 return 0;
2087#ifdef CONFIG_TRACE_IRQFLAGS 2277
2088 /*
2089 * If non-trylock use in a hardirq or softirq context, then
2090 * mark the lock as used in these contexts:
2091 */
2092 if (!trylock) {
2093 if (read) {
2094 if (curr->hardirq_context)
2095 if (!mark_lock(curr, hlock,
2096 LOCK_USED_IN_HARDIRQ_READ))
2097 return 0;
2098 if (curr->softirq_context)
2099 if (!mark_lock(curr, hlock,
2100 LOCK_USED_IN_SOFTIRQ_READ))
2101 return 0;
2102 } else {
2103 if (curr->hardirq_context)
2104 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ))
2105 return 0;
2106 if (curr->softirq_context)
2107 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ))
2108 return 0;
2109 }
2110 }
2111 if (!hardirqs_off) {
2112 if (read) {
2113 if (!mark_lock(curr, hlock,
2114 LOCK_ENABLED_HARDIRQS_READ))
2115 return 0;
2116 if (curr->softirqs_enabled)
2117 if (!mark_lock(curr, hlock,
2118 LOCK_ENABLED_SOFTIRQS_READ))
2119 return 0;
2120 } else {
2121 if (!mark_lock(curr, hlock,
2122 LOCK_ENABLED_HARDIRQS))
2123 return 0;
2124 if (curr->softirqs_enabled)
2125 if (!mark_lock(curr, hlock,
2126 LOCK_ENABLED_SOFTIRQS))
2127 return 0;
2128 }
2129 }
2130#endif
2131 /* mark it as used: */ 2278 /* mark it as used: */
2132 if (!mark_lock(curr, hlock, LOCK_USED)) 2279 if (!mark_lock(curr, hlock, LOCK_USED))
2133 return 0; 2280 return 0;
2134out_calc_hash: 2281
2135 /* 2282 /*
2136 * Calculate the chain hash: it's the combined has of all the 2283 * Calculate the chain hash: it's the combined has of all the
2137 * lock keys along the dependency chain. We save the hash value 2284 * lock keys along the dependency chain. We save the hash value
@@ -2154,77 +2301,15 @@ out_calc_hash:
2154 } 2301 }
2155 2302
2156 hlock->prev_chain_key = chain_key; 2303 hlock->prev_chain_key = chain_key;
2157 2304 if (separate_irq_context(curr, hlock)) {
2158#ifdef CONFIG_TRACE_IRQFLAGS 2305 chain_key = 0;
2159 /* 2306 chain_head = 1;
2160 * Keep track of points where we cross into an interrupt context:
2161 */
2162 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) +
2163 curr->softirq_context;
2164 if (depth) {
2165 struct held_lock *prev_hlock;
2166
2167 prev_hlock = curr->held_locks + depth-1;
2168 /*
2169 * If we cross into another context, reset the
2170 * hash key (this also prevents the checking and the
2171 * adding of the dependency to 'prev'):
2172 */
2173 if (prev_hlock->irq_context != hlock->irq_context) {
2174 chain_key = 0;
2175 chain_head = 1;
2176 }
2177 } 2307 }
2178#endif
2179 chain_key = iterate_chain_key(chain_key, id); 2308 chain_key = iterate_chain_key(chain_key, id);
2180 curr->curr_chain_key = chain_key; 2309 curr->curr_chain_key = chain_key;
2181 2310
2182 /* 2311 if (!validate_chain(curr, lock, hlock, chain_head))
2183 * Trylock needs to maintain the stack of held locks, but it 2312 return 0;
2184 * does not add new dependencies, because trylock can be done
2185 * in any order.
2186 *
2187 * We look up the chain_key and do the O(N^2) check and update of
2188 * the dependencies only if this is a new dependency chain.
2189 * (If lookup_chain_cache() returns with 1 it acquires
2190 * graph_lock for us)
2191 */
2192 if (!trylock && (check == 2) && lookup_chain_cache(chain_key, class)) {
2193 /*
2194 * Check whether last held lock:
2195 *
2196 * - is irq-safe, if this lock is irq-unsafe
2197 * - is softirq-safe, if this lock is hardirq-unsafe
2198 *
2199 * And check whether the new lock's dependency graph
2200 * could lead back to the previous lock.
2201 *
2202 * any of these scenarios could lead to a deadlock. If
2203 * All validations
2204 */
2205 int ret = check_deadlock(curr, hlock, lock, read);
2206
2207 if (!ret)
2208 return 0;
2209 /*
2210 * Mark recursive read, as we jump over it when
2211 * building dependencies (just like we jump over
2212 * trylock entries):
2213 */
2214 if (ret == 2)
2215 hlock->read = 2;
2216 /*
2217 * Add dependency only if this lock is not the head
2218 * of the chain, and if it's not a secondary read-lock:
2219 */
2220 if (!chain_head && ret != 2)
2221 if (!check_prevs_add(curr, hlock))
2222 return 0;
2223 graph_unlock();
2224 } else
2225 /* after lookup_chain_cache(): */
2226 if (unlikely(!debug_locks))
2227 return 0;
2228 2313
2229 curr->lockdep_depth++; 2314 curr->lockdep_depth++;
2230 check_chain_key(curr); 2315 check_chain_key(curr);