aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c795
1 files changed, 538 insertions, 257 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef996c76..3815ac1d58b2 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <linux/bitops.h>
45 46
46#include <asm/sections.h> 47#include <asm/sections.h>
47 48
@@ -366,11 +367,21 @@ static int save_trace(struct stack_trace *trace)
366 367
367 save_stack_trace(trace); 368 save_stack_trace(trace);
368 369
370 /*
371 * Some daft arches put -1 at the end to indicate its a full trace.
372 *
373 * <rant> this is buggy anyway, since it takes a whole extra entry so a
374 * complete trace that maxes out the entries provided will be reported
375 * as incomplete, friggin useless </rant>
376 */
377 if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
378 trace->nr_entries--;
379
369 trace->max_entries = trace->nr_entries; 380 trace->max_entries = trace->nr_entries;
370 381
371 nr_stack_trace_entries += trace->nr_entries; 382 nr_stack_trace_entries += trace->nr_entries;
372 383
373 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { 384 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
374 if (!debug_locks_off_graph_unlock()) 385 if (!debug_locks_off_graph_unlock())
375 return 0; 386 return 0;
376 387
@@ -388,20 +399,6 @@ unsigned int nr_hardirq_chains;
388unsigned int nr_softirq_chains; 399unsigned int nr_softirq_chains;
389unsigned int nr_process_chains; 400unsigned int nr_process_chains;
390unsigned int max_lockdep_depth; 401unsigned int max_lockdep_depth;
391unsigned int max_recursion_depth;
392
393static unsigned int lockdep_dependency_gen_id;
394
395static bool lockdep_dependency_visit(struct lock_class *source,
396 unsigned int depth)
397{
398 if (!depth)
399 lockdep_dependency_gen_id++;
400 if (source->dep_gen_id == lockdep_dependency_gen_id)
401 return true;
402 source->dep_gen_id = lockdep_dependency_gen_id;
403 return false;
404}
405 402
406#ifdef CONFIG_DEBUG_LOCKDEP 403#ifdef CONFIG_DEBUG_LOCKDEP
407/* 404/*
@@ -431,11 +428,8 @@ atomic_t redundant_softirqs_on;
431atomic_t redundant_softirqs_off; 428atomic_t redundant_softirqs_off;
432atomic_t nr_unused_locks; 429atomic_t nr_unused_locks;
433atomic_t nr_cyclic_checks; 430atomic_t nr_cyclic_checks;
434atomic_t nr_cyclic_check_recursions;
435atomic_t nr_find_usage_forwards_checks; 431atomic_t nr_find_usage_forwards_checks;
436atomic_t nr_find_usage_forwards_recursions;
437atomic_t nr_find_usage_backwards_checks; 432atomic_t nr_find_usage_backwards_checks;
438atomic_t nr_find_usage_backwards_recursions;
439#endif 433#endif
440 434
441/* 435/*
@@ -551,58 +545,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
551 } 545 }
552} 546}
553 547
554static void print_lock_class_header(struct lock_class *class, int depth)
555{
556 int bit;
557
558 printk("%*s->", depth, "");
559 print_lock_name(class);
560 printk(" ops: %lu", class->ops);
561 printk(" {\n");
562
563 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
564 if (class->usage_mask & (1 << bit)) {
565 int len = depth;
566
567 len += printk("%*s %s", depth, "", usage_str[bit]);
568 len += printk(" at:\n");
569 print_stack_trace(class->usage_traces + bit, len);
570 }
571 }
572 printk("%*s }\n", depth, "");
573
574 printk("%*s ... key at: ",depth,"");
575 print_ip_sym((unsigned long)class->key);
576}
577
578/*
579 * printk all lock dependencies starting at <entry>:
580 */
581static void __used
582print_lock_dependencies(struct lock_class *class, int depth)
583{
584 struct lock_list *entry;
585
586 if (lockdep_dependency_visit(class, depth))
587 return;
588
589 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
590 return;
591
592 print_lock_class_header(class, depth);
593
594 list_for_each_entry(entry, &class->locks_after, entry) {
595 if (DEBUG_LOCKS_WARN_ON(!entry->class))
596 return;
597
598 print_lock_dependencies(entry->class, depth + 1);
599
600 printk("%*s ... acquired at:\n",depth,"");
601 print_stack_trace(&entry->trace, 2);
602 printk("\n");
603 }
604}
605
606static void print_kernel_version(void) 548static void print_kernel_version(void)
607{ 549{
608 printk("%s %.*s\n", init_utsname()->release, 550 printk("%s %.*s\n", init_utsname()->release,
@@ -636,6 +578,9 @@ static int static_obj(void *obj)
636 if ((addr >= start) && (addr < end)) 578 if ((addr >= start) && (addr < end))
637 return 1; 579 return 1;
638 580
581 if (arch_is_kernel_data(addr))
582 return 1;
583
639#ifdef CONFIG_SMP 584#ifdef CONFIG_SMP
640 /* 585 /*
641 * percpu var? 586 * percpu var?
@@ -898,22 +843,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
898} 843}
899 844
900/* 845/*
846 * For good efficiency of modular, we use power of 2
847 */
848#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
849#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
850
851/*
852 * The circular_queue and helpers is used to implement the
853 * breadth-first search(BFS)algorithem, by which we can build
854 * the shortest path from the next lock to be acquired to the
855 * previous held lock if there is a circular between them.
856 */
857struct circular_queue {
858 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
859 unsigned int front, rear;
860};
861
862static struct circular_queue lock_cq;
863
864unsigned int max_bfs_queue_depth;
865
866static unsigned int lockdep_dependency_gen_id;
867
868static inline void __cq_init(struct circular_queue *cq)
869{
870 cq->front = cq->rear = 0;
871 lockdep_dependency_gen_id++;
872}
873
874static inline int __cq_empty(struct circular_queue *cq)
875{
876 return (cq->front == cq->rear);
877}
878
879static inline int __cq_full(struct circular_queue *cq)
880{
881 return ((cq->rear + 1) & CQ_MASK) == cq->front;
882}
883
884static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
885{
886 if (__cq_full(cq))
887 return -1;
888
889 cq->element[cq->rear] = elem;
890 cq->rear = (cq->rear + 1) & CQ_MASK;
891 return 0;
892}
893
894static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
895{
896 if (__cq_empty(cq))
897 return -1;
898
899 *elem = cq->element[cq->front];
900 cq->front = (cq->front + 1) & CQ_MASK;
901 return 0;
902}
903
904static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
905{
906 return (cq->rear - cq->front) & CQ_MASK;
907}
908
909static inline void mark_lock_accessed(struct lock_list *lock,
910 struct lock_list *parent)
911{
912 unsigned long nr;
913
914 nr = lock - list_entries;
915 WARN_ON(nr >= nr_list_entries);
916 lock->parent = parent;
917 lock->class->dep_gen_id = lockdep_dependency_gen_id;
918}
919
920static inline unsigned long lock_accessed(struct lock_list *lock)
921{
922 unsigned long nr;
923
924 nr = lock - list_entries;
925 WARN_ON(nr >= nr_list_entries);
926 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
927}
928
929static inline struct lock_list *get_lock_parent(struct lock_list *child)
930{
931 return child->parent;
932}
933
934static inline int get_lock_depth(struct lock_list *child)
935{
936 int depth = 0;
937 struct lock_list *parent;
938
939 while ((parent = get_lock_parent(child))) {
940 child = parent;
941 depth++;
942 }
943 return depth;
944}
945
946static int __bfs(struct lock_list *source_entry,
947 void *data,
948 int (*match)(struct lock_list *entry, void *data),
949 struct lock_list **target_entry,
950 int forward)
951{
952 struct lock_list *entry;
953 struct list_head *head;
954 struct circular_queue *cq = &lock_cq;
955 int ret = 1;
956
957 if (match(source_entry, data)) {
958 *target_entry = source_entry;
959 ret = 0;
960 goto exit;
961 }
962
963 if (forward)
964 head = &source_entry->class->locks_after;
965 else
966 head = &source_entry->class->locks_before;
967
968 if (list_empty(head))
969 goto exit;
970
971 __cq_init(cq);
972 __cq_enqueue(cq, (unsigned long)source_entry);
973
974 while (!__cq_empty(cq)) {
975 struct lock_list *lock;
976
977 __cq_dequeue(cq, (unsigned long *)&lock);
978
979 if (!lock->class) {
980 ret = -2;
981 goto exit;
982 }
983
984 if (forward)
985 head = &lock->class->locks_after;
986 else
987 head = &lock->class->locks_before;
988
989 list_for_each_entry(entry, head, entry) {
990 if (!lock_accessed(entry)) {
991 unsigned int cq_depth;
992 mark_lock_accessed(entry, lock);
993 if (match(entry, data)) {
994 *target_entry = entry;
995 ret = 0;
996 goto exit;
997 }
998
999 if (__cq_enqueue(cq, (unsigned long)entry)) {
1000 ret = -1;
1001 goto exit;
1002 }
1003 cq_depth = __cq_get_elem_count(cq);
1004 if (max_bfs_queue_depth < cq_depth)
1005 max_bfs_queue_depth = cq_depth;
1006 }
1007 }
1008 }
1009exit:
1010 return ret;
1011}
1012
1013static inline int __bfs_forwards(struct lock_list *src_entry,
1014 void *data,
1015 int (*match)(struct lock_list *entry, void *data),
1016 struct lock_list **target_entry)
1017{
1018 return __bfs(src_entry, data, match, target_entry, 1);
1019
1020}
1021
1022static inline int __bfs_backwards(struct lock_list *src_entry,
1023 void *data,
1024 int (*match)(struct lock_list *entry, void *data),
1025 struct lock_list **target_entry)
1026{
1027 return __bfs(src_entry, data, match, target_entry, 0);
1028
1029}
1030
1031/*
901 * Recursive, forwards-direction lock-dependency checking, used for 1032 * Recursive, forwards-direction lock-dependency checking, used for
902 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 1033 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
903 * checking. 1034 * checking.
904 *
905 * (to keep the stackframe of the recursive functions small we
906 * use these global variables, and we also mark various helper
907 * functions as noinline.)
908 */ 1035 */
909static struct held_lock *check_source, *check_target;
910 1036
911/* 1037/*
912 * Print a dependency chain entry (this is only done when a deadlock 1038 * Print a dependency chain entry (this is only done when a deadlock
913 * has been detected): 1039 * has been detected):
914 */ 1040 */
915static noinline int 1041static noinline int
916print_circular_bug_entry(struct lock_list *target, unsigned int depth) 1042print_circular_bug_entry(struct lock_list *target, int depth)
917{ 1043{
918 if (debug_locks_silent) 1044 if (debug_locks_silent)
919 return 0; 1045 return 0;
@@ -930,11 +1056,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
930 * header first: 1056 * header first:
931 */ 1057 */
932static noinline int 1058static noinline int
933print_circular_bug_header(struct lock_list *entry, unsigned int depth) 1059print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1060 struct held_lock *check_src,
1061 struct held_lock *check_tgt)
934{ 1062{
935 struct task_struct *curr = current; 1063 struct task_struct *curr = current;
936 1064
937 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1065 if (debug_locks_silent)
938 return 0; 1066 return 0;
939 1067
940 printk("\n=======================================================\n"); 1068 printk("\n=======================================================\n");
@@ -943,9 +1071,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
943 printk( "-------------------------------------------------------\n"); 1071 printk( "-------------------------------------------------------\n");
944 printk("%s/%d is trying to acquire lock:\n", 1072 printk("%s/%d is trying to acquire lock:\n",
945 curr->comm, task_pid_nr(curr)); 1073 curr->comm, task_pid_nr(curr));
946 print_lock(check_source); 1074 print_lock(check_src);
947 printk("\nbut task is already holding lock:\n"); 1075 printk("\nbut task is already holding lock:\n");
948 print_lock(check_target); 1076 print_lock(check_tgt);
949 printk("\nwhich lock already depends on the new lock.\n\n"); 1077 printk("\nwhich lock already depends on the new lock.\n\n");
950 printk("\nthe existing dependency chain (in reverse order) is:\n"); 1078 printk("\nthe existing dependency chain (in reverse order) is:\n");
951 1079
@@ -954,19 +1082,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
954 return 0; 1082 return 0;
955} 1083}
956 1084
957static noinline int print_circular_bug_tail(void) 1085static inline int class_equal(struct lock_list *entry, void *data)
1086{
1087 return entry->class == data;
1088}
1089
1090static noinline int print_circular_bug(struct lock_list *this,
1091 struct lock_list *target,
1092 struct held_lock *check_src,
1093 struct held_lock *check_tgt)
958{ 1094{
959 struct task_struct *curr = current; 1095 struct task_struct *curr = current;
960 struct lock_list this; 1096 struct lock_list *parent;
1097 int depth;
961 1098
962 if (debug_locks_silent) 1099 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
963 return 0; 1100 return 0;
964 1101
965 this.class = hlock_class(check_source); 1102 if (!save_trace(&this->trace))
966 if (!save_trace(&this.trace))
967 return 0; 1103 return 0;
968 1104
969 print_circular_bug_entry(&this, 0); 1105 depth = get_lock_depth(target);
1106
1107 print_circular_bug_header(target, depth, check_src, check_tgt);
1108
1109 parent = get_lock_parent(target);
1110
1111 while (parent) {
1112 print_circular_bug_entry(parent, --depth);
1113 parent = get_lock_parent(parent);
1114 }
970 1115
971 printk("\nother info that might help us debug this:\n\n"); 1116 printk("\nother info that might help us debug this:\n\n");
972 lockdep_print_held_locks(curr); 1117 lockdep_print_held_locks(curr);
@@ -977,73 +1122,69 @@ static noinline int print_circular_bug_tail(void)
977 return 0; 1122 return 0;
978} 1123}
979 1124
980#define RECURSION_LIMIT 40 1125static noinline int print_bfs_bug(int ret)
981
982static int noinline print_infinite_recursion_bug(void)
983{ 1126{
984 if (!debug_locks_off_graph_unlock()) 1127 if (!debug_locks_off_graph_unlock())
985 return 0; 1128 return 0;
986 1129
987 WARN_ON(1); 1130 WARN(1, "lockdep bfs error:%d\n", ret);
988 1131
989 return 0; 1132 return 0;
990} 1133}
991 1134
992unsigned long __lockdep_count_forward_deps(struct lock_class *class, 1135static int noop_count(struct lock_list *entry, void *data)
993 unsigned int depth)
994{ 1136{
995 struct lock_list *entry; 1137 (*(unsigned long *)data)++;
996 unsigned long ret = 1; 1138 return 0;
1139}
997 1140
998 if (lockdep_dependency_visit(class, depth)) 1141unsigned long __lockdep_count_forward_deps(struct lock_list *this)
999 return 0; 1142{
1143 unsigned long count = 0;
1144 struct lock_list *uninitialized_var(target_entry);
1000 1145
1001 /* 1146 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1002 * Recurse this class's dependency list:
1003 */
1004 list_for_each_entry(entry, &class->locks_after, entry)
1005 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1006 1147
1007 return ret; 1148 return count;
1008} 1149}
1009
1010unsigned long lockdep_count_forward_deps(struct lock_class *class) 1150unsigned long lockdep_count_forward_deps(struct lock_class *class)
1011{ 1151{
1012 unsigned long ret, flags; 1152 unsigned long ret, flags;
1153 struct lock_list this;
1154
1155 this.parent = NULL;
1156 this.class = class;
1013 1157
1014 local_irq_save(flags); 1158 local_irq_save(flags);
1015 __raw_spin_lock(&lockdep_lock); 1159 __raw_spin_lock(&lockdep_lock);
1016 ret = __lockdep_count_forward_deps(class, 0); 1160 ret = __lockdep_count_forward_deps(&this);
1017 __raw_spin_unlock(&lockdep_lock); 1161 __raw_spin_unlock(&lockdep_lock);
1018 local_irq_restore(flags); 1162 local_irq_restore(flags);
1019 1163
1020 return ret; 1164 return ret;
1021} 1165}
1022 1166
1023unsigned long __lockdep_count_backward_deps(struct lock_class *class, 1167unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1024 unsigned int depth)
1025{ 1168{
1026 struct lock_list *entry; 1169 unsigned long count = 0;
1027 unsigned long ret = 1; 1170 struct lock_list *uninitialized_var(target_entry);
1028 1171
1029 if (lockdep_dependency_visit(class, depth)) 1172 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1030 return 0;
1031 /*
1032 * Recurse this class's dependency list:
1033 */
1034 list_for_each_entry(entry, &class->locks_before, entry)
1035 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1036 1173
1037 return ret; 1174 return count;
1038} 1175}
1039 1176
1040unsigned long lockdep_count_backward_deps(struct lock_class *class) 1177unsigned long lockdep_count_backward_deps(struct lock_class *class)
1041{ 1178{
1042 unsigned long ret, flags; 1179 unsigned long ret, flags;
1180 struct lock_list this;
1181
1182 this.parent = NULL;
1183 this.class = class;
1043 1184
1044 local_irq_save(flags); 1185 local_irq_save(flags);
1045 __raw_spin_lock(&lockdep_lock); 1186 __raw_spin_lock(&lockdep_lock);
1046 ret = __lockdep_count_backward_deps(class, 0); 1187 ret = __lockdep_count_backward_deps(&this);
1047 __raw_spin_unlock(&lockdep_lock); 1188 __raw_spin_unlock(&lockdep_lock);
1048 local_irq_restore(flags); 1189 local_irq_restore(flags);
1049 1190
@@ -1055,29 +1196,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
1055 * lead to <target>. Print an error and return 0 if it does. 1196 * lead to <target>. Print an error and return 0 if it does.
1056 */ 1197 */
1057static noinline int 1198static noinline int
1058check_noncircular(struct lock_class *source, unsigned int depth) 1199check_noncircular(struct lock_list *root, struct lock_class *target,
1200 struct lock_list **target_entry)
1059{ 1201{
1060 struct lock_list *entry; 1202 int result;
1061 1203
1062 if (lockdep_dependency_visit(source, depth)) 1204 debug_atomic_inc(&nr_cyclic_checks);
1063 return 1;
1064 1205
1065 debug_atomic_inc(&nr_cyclic_check_recursions); 1206 result = __bfs_forwards(root, target, class_equal, target_entry);
1066 if (depth > max_recursion_depth) 1207
1067 max_recursion_depth = depth; 1208 return result;
1068 if (depth >= RECURSION_LIMIT)
1069 return print_infinite_recursion_bug();
1070 /*
1071 * Check this lock's dependency list:
1072 */
1073 list_for_each_entry(entry, &source->locks_after, entry) {
1074 if (entry->class == hlock_class(check_target))
1075 return print_circular_bug_header(entry, depth+1);
1076 debug_atomic_inc(&nr_cyclic_checks);
1077 if (!check_noncircular(entry->class, depth+1))
1078 return print_circular_bug_entry(entry, depth+1);
1079 }
1080 return 1;
1081} 1209}
1082 1210
1083#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1211#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
@@ -1086,103 +1214,121 @@ check_noncircular(struct lock_class *source, unsigned int depth)
1086 * proving that two subgraphs can be connected by a new dependency 1214 * proving that two subgraphs can be connected by a new dependency
1087 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1215 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1088 */ 1216 */
1089static enum lock_usage_bit find_usage_bit; 1217
1090static struct lock_class *forwards_match, *backwards_match; 1218static inline int usage_match(struct lock_list *entry, void *bit)
1219{
1220 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1221}
1222
1223
1091 1224
1092/* 1225/*
1093 * Find a node in the forwards-direction dependency sub-graph starting 1226 * Find a node in the forwards-direction dependency sub-graph starting
1094 * at <source> that matches <find_usage_bit>. 1227 * at @root->class that matches @bit.
1095 * 1228 *
1096 * Return 2 if such a node exists in the subgraph, and put that node 1229 * Return 0 if such a node exists in the subgraph, and put that node
1097 * into <forwards_match>. 1230 * into *@target_entry.
1098 * 1231 *
1099 * Return 1 otherwise and keep <forwards_match> unchanged. 1232 * Return 1 otherwise and keep *@target_entry unchanged.
1100 * Return 0 on error. 1233 * Return <0 on error.
1101 */ 1234 */
1102static noinline int 1235static int
1103find_usage_forwards(struct lock_class *source, unsigned int depth) 1236find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1237 struct lock_list **target_entry)
1104{ 1238{
1105 struct lock_list *entry; 1239 int result;
1106 int ret;
1107
1108 if (lockdep_dependency_visit(source, depth))
1109 return 1;
1110
1111 if (depth > max_recursion_depth)
1112 max_recursion_depth = depth;
1113 if (depth >= RECURSION_LIMIT)
1114 return print_infinite_recursion_bug();
1115 1240
1116 debug_atomic_inc(&nr_find_usage_forwards_checks); 1241 debug_atomic_inc(&nr_find_usage_forwards_checks);
1117 if (source->usage_mask & (1 << find_usage_bit)) {
1118 forwards_match = source;
1119 return 2;
1120 }
1121 1242
1122 /* 1243 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1123 * Check this lock's dependency list: 1244
1124 */ 1245 return result;
1125 list_for_each_entry(entry, &source->locks_after, entry) {
1126 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1127 ret = find_usage_forwards(entry->class, depth+1);
1128 if (ret == 2 || ret == 0)
1129 return ret;
1130 }
1131 return 1;
1132} 1246}
1133 1247
1134/* 1248/*
1135 * Find a node in the backwards-direction dependency sub-graph starting 1249 * Find a node in the backwards-direction dependency sub-graph starting
1136 * at <source> that matches <find_usage_bit>. 1250 * at @root->class that matches @bit.
1137 * 1251 *
1138 * Return 2 if such a node exists in the subgraph, and put that node 1252 * Return 0 if such a node exists in the subgraph, and put that node
1139 * into <backwards_match>. 1253 * into *@target_entry.
1140 * 1254 *
1141 * Return 1 otherwise and keep <backwards_match> unchanged. 1255 * Return 1 otherwise and keep *@target_entry unchanged.
1142 * Return 0 on error. 1256 * Return <0 on error.
1143 */ 1257 */
1144static noinline int 1258static int
1145find_usage_backwards(struct lock_class *source, unsigned int depth) 1259find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1260 struct lock_list **target_entry)
1146{ 1261{
1147 struct lock_list *entry; 1262 int result;
1148 int ret;
1149 1263
1150 if (lockdep_dependency_visit(source, depth)) 1264 debug_atomic_inc(&nr_find_usage_backwards_checks);
1151 return 1;
1152 1265
1153 if (!__raw_spin_is_locked(&lockdep_lock)) 1266 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1154 return DEBUG_LOCKS_WARN_ON(1);
1155 1267
1156 if (depth > max_recursion_depth) 1268 return result;
1157 max_recursion_depth = depth; 1269}
1158 if (depth >= RECURSION_LIMIT)
1159 return print_infinite_recursion_bug();
1160 1270
1161 debug_atomic_inc(&nr_find_usage_backwards_checks); 1271static void print_lock_class_header(struct lock_class *class, int depth)
1162 if (source->usage_mask & (1 << find_usage_bit)) { 1272{
1163 backwards_match = source; 1273 int bit;
1164 return 2;
1165 }
1166 1274
1167 if (!source && debug_locks_off_graph_unlock()) { 1275 printk("%*s->", depth, "");
1168 WARN_ON(1); 1276 print_lock_name(class);
1169 return 0; 1277 printk(" ops: %lu", class->ops);
1170 } 1278 printk(" {\n");
1171 1279
1172 /* 1280 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1173 * Check this lock's dependency list: 1281 if (class->usage_mask & (1 << bit)) {
1174 */ 1282 int len = depth;
1175 list_for_each_entry(entry, &source->locks_before, entry) { 1283
1176 debug_atomic_inc(&nr_find_usage_backwards_recursions); 1284 len += printk("%*s %s", depth, "", usage_str[bit]);
1177 ret = find_usage_backwards(entry->class, depth+1); 1285 len += printk(" at:\n");
1178 if (ret == 2 || ret == 0) 1286 print_stack_trace(class->usage_traces + bit, len);
1179 return ret; 1287 }
1180 } 1288 }
1181 return 1; 1289 printk("%*s }\n", depth, "");
1290
1291 printk("%*s ... key at: ",depth,"");
1292 print_ip_sym((unsigned long)class->key);
1293}
1294
1295/*
1296 * printk the shortest lock dependencies from @start to @end in reverse order:
1297 */
1298static void __used
1299print_shortest_lock_dependencies(struct lock_list *leaf,
1300 struct lock_list *root)
1301{
1302 struct lock_list *entry = leaf;
1303 int depth;
1304
1305 /*compute depth from generated tree by BFS*/
1306 depth = get_lock_depth(leaf);
1307
1308 do {
1309 print_lock_class_header(entry->class, depth);
1310 printk("%*s ... acquired at:\n", depth, "");
1311 print_stack_trace(&entry->trace, 2);
1312 printk("\n");
1313
1314 if (depth == 0 && (entry != root)) {
1315 printk("lockdep:%s bad BFS generated tree\n", __func__);
1316 break;
1317 }
1318
1319 entry = get_lock_parent(entry);
1320 depth--;
1321 } while (entry && (depth >= 0));
1322
1323 return;
1182} 1324}
1183 1325
1184static int 1326static int
1185print_bad_irq_dependency(struct task_struct *curr, 1327print_bad_irq_dependency(struct task_struct *curr,
1328 struct lock_list *prev_root,
1329 struct lock_list *next_root,
1330 struct lock_list *backwards_entry,
1331 struct lock_list *forwards_entry,
1186 struct held_lock *prev, 1332 struct held_lock *prev,
1187 struct held_lock *next, 1333 struct held_lock *next,
1188 enum lock_usage_bit bit1, 1334 enum lock_usage_bit bit1,
@@ -1215,26 +1361,32 @@ print_bad_irq_dependency(struct task_struct *curr,
1215 1361
1216 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1362 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1217 irqclass); 1363 irqclass);
1218 print_lock_name(backwards_match); 1364 print_lock_name(backwards_entry->class);
1219 printk("\n... which became %s-irq-safe at:\n", irqclass); 1365 printk("\n... which became %s-irq-safe at:\n", irqclass);
1220 1366
1221 print_stack_trace(backwards_match->usage_traces + bit1, 1); 1367 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1222 1368
1223 printk("\nto a %s-irq-unsafe lock:\n", irqclass); 1369 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1224 print_lock_name(forwards_match); 1370 print_lock_name(forwards_entry->class);
1225 printk("\n... which became %s-irq-unsafe at:\n", irqclass); 1371 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1226 printk("..."); 1372 printk("...");
1227 1373
1228 print_stack_trace(forwards_match->usage_traces + bit2, 1); 1374 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1229 1375
1230 printk("\nother info that might help us debug this:\n\n"); 1376 printk("\nother info that might help us debug this:\n\n");
1231 lockdep_print_held_locks(curr); 1377 lockdep_print_held_locks(curr);
1232 1378
1233 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); 1379 printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1234 print_lock_dependencies(backwards_match, 0); 1380 printk(" and the holding lock:\n");
1381 if (!save_trace(&prev_root->trace))
1382 return 0;
1383 print_shortest_lock_dependencies(backwards_entry, prev_root);
1235 1384
1236 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); 1385 printk("\nthe dependencies between the lock to be acquired");
1237 print_lock_dependencies(forwards_match, 0); 1386 printk(" and %s-irq-unsafe lock:\n", irqclass);
1387 if (!save_trace(&next_root->trace))
1388 return 0;
1389 print_shortest_lock_dependencies(forwards_entry, next_root);
1238 1390
1239 printk("\nstack backtrace:\n"); 1391 printk("\nstack backtrace:\n");
1240 dump_stack(); 1392 dump_stack();
@@ -1248,19 +1400,30 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1248 enum lock_usage_bit bit_forwards, const char *irqclass) 1400 enum lock_usage_bit bit_forwards, const char *irqclass)
1249{ 1401{
1250 int ret; 1402 int ret;
1403 struct lock_list this, that;
1404 struct lock_list *uninitialized_var(target_entry);
1405 struct lock_list *uninitialized_var(target_entry1);
1406
1407 this.parent = NULL;
1251 1408
1252 find_usage_bit = bit_backwards; 1409 this.class = hlock_class(prev);
1253 /* fills in <backwards_match> */ 1410 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1254 ret = find_usage_backwards(hlock_class(prev), 0); 1411 if (ret < 0)
1255 if (!ret || ret == 1) 1412 return print_bfs_bug(ret);
1413 if (ret == 1)
1256 return ret; 1414 return ret;
1257 1415
1258 find_usage_bit = bit_forwards; 1416 that.parent = NULL;
1259 ret = find_usage_forwards(hlock_class(next), 0); 1417 that.class = hlock_class(next);
1260 if (!ret || ret == 1) 1418 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1419 if (ret < 0)
1420 return print_bfs_bug(ret);
1421 if (ret == 1)
1261 return ret; 1422 return ret;
1262 /* ret == 2 */ 1423
1263 return print_bad_irq_dependency(curr, prev, next, 1424 return print_bad_irq_dependency(curr, &this, &that,
1425 target_entry, target_entry1,
1426 prev, next,
1264 bit_backwards, bit_forwards, irqclass); 1427 bit_backwards, bit_forwards, irqclass);
1265} 1428}
1266 1429
@@ -1472,6 +1635,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1472{ 1635{
1473 struct lock_list *entry; 1636 struct lock_list *entry;
1474 int ret; 1637 int ret;
1638 struct lock_list this;
1639 struct lock_list *uninitialized_var(target_entry);
1475 1640
1476 /* 1641 /*
1477 * Prove that the new <prev> -> <next> dependency would not 1642 * Prove that the new <prev> -> <next> dependency would not
@@ -1482,10 +1647,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1482 * We are using global variables to control the recursion, to 1647 * We are using global variables to control the recursion, to
1483 * keep the stackframe size of the recursive functions low: 1648 * keep the stackframe size of the recursive functions low:
1484 */ 1649 */
1485 check_source = next; 1650 this.class = hlock_class(next);
1486 check_target = prev; 1651 this.parent = NULL;
1487 if (!(check_noncircular(hlock_class(next), 0))) 1652 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1488 return print_circular_bug_tail(); 1653 if (unlikely(!ret))
1654 return print_circular_bug(&this, target_entry, next, prev);
1655 else if (unlikely(ret < 0))
1656 return print_bfs_bug(ret);
1489 1657
1490 if (!check_prev_add_irq(curr, prev, next)) 1658 if (!check_prev_add_irq(curr, prev, next))
1491 return 0; 1659 return 0;
@@ -1884,7 +2052,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1884 * print irq inversion bug: 2052 * print irq inversion bug:
1885 */ 2053 */
1886static int 2054static int
1887print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, 2055print_irq_inversion_bug(struct task_struct *curr,
2056 struct lock_list *root, struct lock_list *other,
1888 struct held_lock *this, int forwards, 2057 struct held_lock *this, int forwards,
1889 const char *irqclass) 2058 const char *irqclass)
1890{ 2059{
@@ -1902,17 +2071,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1902 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2071 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1903 else 2072 else
1904 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2073 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1905 print_lock_name(other); 2074 print_lock_name(other->class);
1906 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2075 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1907 2076
1908 printk("\nother info that might help us debug this:\n"); 2077 printk("\nother info that might help us debug this:\n");
1909 lockdep_print_held_locks(curr); 2078 lockdep_print_held_locks(curr);
1910 2079
1911 printk("\nthe first lock's dependencies:\n"); 2080 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
1912 print_lock_dependencies(hlock_class(this), 0); 2081 if (!save_trace(&root->trace))
1913 2082 return 0;
1914 printk("\nthe second lock's dependencies:\n"); 2083 print_shortest_lock_dependencies(other, root);
1915 print_lock_dependencies(other, 0);
1916 2084
1917 printk("\nstack backtrace:\n"); 2085 printk("\nstack backtrace:\n");
1918 dump_stack(); 2086 dump_stack();
@@ -1929,14 +2097,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1929 enum lock_usage_bit bit, const char *irqclass) 2097 enum lock_usage_bit bit, const char *irqclass)
1930{ 2098{
1931 int ret; 2099 int ret;
1932 2100 struct lock_list root;
1933 find_usage_bit = bit; 2101 struct lock_list *uninitialized_var(target_entry);
1934 /* fills in <forwards_match> */ 2102
1935 ret = find_usage_forwards(hlock_class(this), 0); 2103 root.parent = NULL;
1936 if (!ret || ret == 1) 2104 root.class = hlock_class(this);
2105 ret = find_usage_forwards(&root, bit, &target_entry);
2106 if (ret < 0)
2107 return print_bfs_bug(ret);
2108 if (ret == 1)
1937 return ret; 2109 return ret;
1938 2110
1939 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); 2111 return print_irq_inversion_bug(curr, &root, target_entry,
2112 this, 1, irqclass);
1940} 2113}
1941 2114
1942/* 2115/*
@@ -1948,14 +2121,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1948 enum lock_usage_bit bit, const char *irqclass) 2121 enum lock_usage_bit bit, const char *irqclass)
1949{ 2122{
1950 int ret; 2123 int ret;
1951 2124 struct lock_list root;
1952 find_usage_bit = bit; 2125 struct lock_list *uninitialized_var(target_entry);
1953 /* fills in <backwards_match> */ 2126
1954 ret = find_usage_backwards(hlock_class(this), 0); 2127 root.parent = NULL;
1955 if (!ret || ret == 1) 2128 root.class = hlock_class(this);
2129 ret = find_usage_backwards(&root, bit, &target_entry);
2130 if (ret < 0)
2131 return print_bfs_bug(ret);
2132 if (ret == 1)
1956 return ret; 2133 return ret;
1957 2134
1958 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); 2135 return print_irq_inversion_bug(curr, &root, target_entry,
2136 this, 1, irqclass);
1959} 2137}
1960 2138
1961void print_irqtrace_events(struct task_struct *curr) 2139void print_irqtrace_events(struct task_struct *curr)
@@ -2530,13 +2708,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
2530 */ 2708 */
2531static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2709static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2532 int trylock, int read, int check, int hardirqs_off, 2710 int trylock, int read, int check, int hardirqs_off,
2533 struct lockdep_map *nest_lock, unsigned long ip) 2711 struct lockdep_map *nest_lock, unsigned long ip,
2712 int references)
2534{ 2713{
2535 struct task_struct *curr = current; 2714 struct task_struct *curr = current;
2536 struct lock_class *class = NULL; 2715 struct lock_class *class = NULL;
2537 struct held_lock *hlock; 2716 struct held_lock *hlock;
2538 unsigned int depth, id; 2717 unsigned int depth, id;
2539 int chain_head = 0; 2718 int chain_head = 0;
2719 int class_idx;
2540 u64 chain_key; 2720 u64 chain_key;
2541 2721
2542 if (!prove_locking) 2722 if (!prove_locking)
@@ -2584,10 +2764,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2584 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 2764 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2585 return 0; 2765 return 0;
2586 2766
2767 class_idx = class - lock_classes + 1;
2768
2769 if (depth) {
2770 hlock = curr->held_locks + depth - 1;
2771 if (hlock->class_idx == class_idx && nest_lock) {
2772 if (hlock->references)
2773 hlock->references++;
2774 else
2775 hlock->references = 2;
2776
2777 return 1;
2778 }
2779 }
2780
2587 hlock = curr->held_locks + depth; 2781 hlock = curr->held_locks + depth;
2588 if (DEBUG_LOCKS_WARN_ON(!class)) 2782 if (DEBUG_LOCKS_WARN_ON(!class))
2589 return 0; 2783 return 0;
2590 hlock->class_idx = class - lock_classes + 1; 2784 hlock->class_idx = class_idx;
2591 hlock->acquire_ip = ip; 2785 hlock->acquire_ip = ip;
2592 hlock->instance = lock; 2786 hlock->instance = lock;
2593 hlock->nest_lock = nest_lock; 2787 hlock->nest_lock = nest_lock;
@@ -2595,6 +2789,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2595 hlock->read = read; 2789 hlock->read = read;
2596 hlock->check = check; 2790 hlock->check = check;
2597 hlock->hardirqs_off = !!hardirqs_off; 2791 hlock->hardirqs_off = !!hardirqs_off;
2792 hlock->references = references;
2598#ifdef CONFIG_LOCK_STAT 2793#ifdef CONFIG_LOCK_STAT
2599 hlock->waittime_stamp = 0; 2794 hlock->waittime_stamp = 0;
2600 hlock->holdtime_stamp = sched_clock(); 2795 hlock->holdtime_stamp = sched_clock();
@@ -2703,6 +2898,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2703 return 1; 2898 return 1;
2704} 2899}
2705 2900
2901static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2902{
2903 if (hlock->instance == lock)
2904 return 1;
2905
2906 if (hlock->references) {
2907 struct lock_class *class = lock->class_cache;
2908
2909 if (!class)
2910 class = look_up_lock_class(lock, 0);
2911
2912 if (DEBUG_LOCKS_WARN_ON(!class))
2913 return 0;
2914
2915 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
2916 return 0;
2917
2918 if (hlock->class_idx == class - lock_classes + 1)
2919 return 1;
2920 }
2921
2922 return 0;
2923}
2924
2706static int 2925static int
2707__lock_set_class(struct lockdep_map *lock, const char *name, 2926__lock_set_class(struct lockdep_map *lock, const char *name,
2708 struct lock_class_key *key, unsigned int subclass, 2927 struct lock_class_key *key, unsigned int subclass,
@@ -2726,7 +2945,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2726 */ 2945 */
2727 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 2946 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2728 break; 2947 break;
2729 if (hlock->instance == lock) 2948 if (match_held_lock(hlock, lock))
2730 goto found_it; 2949 goto found_it;
2731 prev_hlock = hlock; 2950 prev_hlock = hlock;
2732 } 2951 }
@@ -2745,7 +2964,8 @@ found_it:
2745 if (!__lock_acquire(hlock->instance, 2964 if (!__lock_acquire(hlock->instance,
2746 hlock_class(hlock)->subclass, hlock->trylock, 2965 hlock_class(hlock)->subclass, hlock->trylock,
2747 hlock->read, hlock->check, hlock->hardirqs_off, 2966 hlock->read, hlock->check, hlock->hardirqs_off,
2748 hlock->nest_lock, hlock->acquire_ip)) 2967 hlock->nest_lock, hlock->acquire_ip,
2968 hlock->references))
2749 return 0; 2969 return 0;
2750 } 2970 }
2751 2971
@@ -2784,20 +3004,34 @@ lock_release_non_nested(struct task_struct *curr,
2784 */ 3004 */
2785 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3005 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2786 break; 3006 break;
2787 if (hlock->instance == lock) 3007 if (match_held_lock(hlock, lock))
2788 goto found_it; 3008 goto found_it;
2789 prev_hlock = hlock; 3009 prev_hlock = hlock;
2790 } 3010 }
2791 return print_unlock_inbalance_bug(curr, lock, ip); 3011 return print_unlock_inbalance_bug(curr, lock, ip);
2792 3012
2793found_it: 3013found_it:
2794 lock_release_holdtime(hlock); 3014 if (hlock->instance == lock)
3015 lock_release_holdtime(hlock);
3016
3017 if (hlock->references) {
3018 hlock->references--;
3019 if (hlock->references) {
3020 /*
3021 * We had, and after removing one, still have
3022 * references, the current lock stack is still
3023 * valid. We're done!
3024 */
3025 return 1;
3026 }
3027 }
2795 3028
2796 /* 3029 /*
2797 * We have the right lock to unlock, 'hlock' points to it. 3030 * We have the right lock to unlock, 'hlock' points to it.
2798 * Now we remove it from the stack, and add back the other 3031 * Now we remove it from the stack, and add back the other
2799 * entries (if any), recalculating the hash along the way: 3032 * entries (if any), recalculating the hash along the way:
2800 */ 3033 */
3034
2801 curr->lockdep_depth = i; 3035 curr->lockdep_depth = i;
2802 curr->curr_chain_key = hlock->prev_chain_key; 3036 curr->curr_chain_key = hlock->prev_chain_key;
2803 3037
@@ -2806,7 +3040,8 @@ found_it:
2806 if (!__lock_acquire(hlock->instance, 3040 if (!__lock_acquire(hlock->instance,
2807 hlock_class(hlock)->subclass, hlock->trylock, 3041 hlock_class(hlock)->subclass, hlock->trylock,
2808 hlock->read, hlock->check, hlock->hardirqs_off, 3042 hlock->read, hlock->check, hlock->hardirqs_off,
2809 hlock->nest_lock, hlock->acquire_ip)) 3043 hlock->nest_lock, hlock->acquire_ip,
3044 hlock->references))
2810 return 0; 3045 return 0;
2811 } 3046 }
2812 3047
@@ -2836,7 +3071,7 @@ static int lock_release_nested(struct task_struct *curr,
2836 /* 3071 /*
2837 * Is the unlock non-nested: 3072 * Is the unlock non-nested:
2838 */ 3073 */
2839 if (hlock->instance != lock) 3074 if (hlock->instance != lock || hlock->references)
2840 return lock_release_non_nested(curr, lock, ip); 3075 return lock_release_non_nested(curr, lock, ip);
2841 curr->lockdep_depth--; 3076 curr->lockdep_depth--;
2842 3077
@@ -2881,6 +3116,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2881 check_chain_key(curr); 3116 check_chain_key(curr);
2882} 3117}
2883 3118
3119static int __lock_is_held(struct lockdep_map *lock)
3120{
3121 struct task_struct *curr = current;
3122 int i;
3123
3124 for (i = 0; i < curr->lockdep_depth; i++) {
3125 struct held_lock *hlock = curr->held_locks + i;
3126
3127 if (match_held_lock(hlock, lock))
3128 return 1;
3129 }
3130
3131 return 0;
3132}
3133
2884/* 3134/*
2885 * Check whether we follow the irq-flags state precisely: 3135 * Check whether we follow the irq-flags state precisely:
2886 */ 3136 */
@@ -2957,7 +3207,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2957 3207
2958 current->lockdep_recursion = 1; 3208 current->lockdep_recursion = 1;
2959 __lock_acquire(lock, subclass, trylock, read, check, 3209 __lock_acquire(lock, subclass, trylock, read, check,
2960 irqs_disabled_flags(flags), nest_lock, ip); 3210 irqs_disabled_flags(flags), nest_lock, ip, 0);
2961 current->lockdep_recursion = 0; 3211 current->lockdep_recursion = 0;
2962 raw_local_irq_restore(flags); 3212 raw_local_irq_restore(flags);
2963} 3213}
@@ -2982,6 +3232,26 @@ void lock_release(struct lockdep_map *lock, int nested,
2982} 3232}
2983EXPORT_SYMBOL_GPL(lock_release); 3233EXPORT_SYMBOL_GPL(lock_release);
2984 3234
3235int lock_is_held(struct lockdep_map *lock)
3236{
3237 unsigned long flags;
3238 int ret = 0;
3239
3240 if (unlikely(current->lockdep_recursion))
3241 return ret;
3242
3243 raw_local_irq_save(flags);
3244 check_flags(flags);
3245
3246 current->lockdep_recursion = 1;
3247 ret = __lock_is_held(lock);
3248 current->lockdep_recursion = 0;
3249 raw_local_irq_restore(flags);
3250
3251 return ret;
3252}
3253EXPORT_SYMBOL_GPL(lock_is_held);
3254
2985void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3255void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2986{ 3256{
2987 current->lockdep_reclaim_gfp = gfp_mask; 3257 current->lockdep_reclaim_gfp = gfp_mask;
@@ -3041,7 +3311,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3041 */ 3311 */
3042 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3312 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3043 break; 3313 break;
3044 if (hlock->instance == lock) 3314 if (match_held_lock(hlock, lock))
3045 goto found_it; 3315 goto found_it;
3046 prev_hlock = hlock; 3316 prev_hlock = hlock;
3047 } 3317 }
@@ -3049,6 +3319,9 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3049 return; 3319 return;
3050 3320
3051found_it: 3321found_it:
3322 if (hlock->instance != lock)
3323 return;
3324
3052 hlock->waittime_stamp = sched_clock(); 3325 hlock->waittime_stamp = sched_clock();
3053 3326
3054 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3327 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
@@ -3088,7 +3361,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3088 */ 3361 */
3089 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3362 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3090 break; 3363 break;
3091 if (hlock->instance == lock) 3364 if (match_held_lock(hlock, lock))
3092 goto found_it; 3365 goto found_it;
3093 prev_hlock = hlock; 3366 prev_hlock = hlock;
3094 } 3367 }
@@ -3096,6 +3369,9 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3096 return; 3369 return;
3097 3370
3098found_it: 3371found_it:
3372 if (hlock->instance != lock)
3373 return;
3374
3099 cpu = smp_processor_id(); 3375 cpu = smp_processor_id();
3100 if (hlock->waittime_stamp) { 3376 if (hlock->waittime_stamp) {
3101 now = sched_clock(); 3377 now = sched_clock();
@@ -3326,7 +3602,12 @@ void __init lockdep_info(void)
3326 sizeof(struct list_head) * CLASSHASH_SIZE + 3602 sizeof(struct list_head) * CLASSHASH_SIZE +
3327 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + 3603 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3328 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + 3604 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3329 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); 3605 sizeof(struct list_head) * CHAINHASH_SIZE
3606#ifdef CONFIG_PROVE_LOCKING
3607 + sizeof(struct circular_queue)
3608#endif
3609 ) / 1024
3610 );
3330 3611
3331 printk(" per task-struct memory footprint: %lu bytes\n", 3612 printk(" per task-struct memory footprint: %lu bytes\n",
3332 sizeof(struct held_lock) * MAX_LOCK_DEPTH); 3613 sizeof(struct held_lock) * MAX_LOCK_DEPTH);