aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c815
1 files changed, 550 insertions, 265 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef996c76..9af56723c096 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -42,6 +42,7 @@
42#include <linux/hash.h> 42#include <linux/hash.h>
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <linux/bitops.h>
45 46
46#include <asm/sections.h> 47#include <asm/sections.h>
47 48
@@ -141,6 +142,11 @@ static inline struct lock_class *hlock_class(struct held_lock *hlock)
141#ifdef CONFIG_LOCK_STAT 142#ifdef CONFIG_LOCK_STAT
142static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats); 143static DEFINE_PER_CPU(struct lock_class_stats[MAX_LOCKDEP_KEYS], lock_stats);
143 144
145static inline u64 lockstat_clock(void)
146{
147 return cpu_clock(smp_processor_id());
148}
149
144static int lock_point(unsigned long points[], unsigned long ip) 150static int lock_point(unsigned long points[], unsigned long ip)
145{ 151{
146 int i; 152 int i;
@@ -157,7 +163,7 @@ static int lock_point(unsigned long points[], unsigned long ip)
157 return i; 163 return i;
158} 164}
159 165
160static void lock_time_inc(struct lock_time *lt, s64 time) 166static void lock_time_inc(struct lock_time *lt, u64 time)
161{ 167{
162 if (time > lt->max) 168 if (time > lt->max)
163 lt->max = time; 169 lt->max = time;
@@ -233,12 +239,12 @@ static void put_lock_stats(struct lock_class_stats *stats)
233static void lock_release_holdtime(struct held_lock *hlock) 239static void lock_release_holdtime(struct held_lock *hlock)
234{ 240{
235 struct lock_class_stats *stats; 241 struct lock_class_stats *stats;
236 s64 holdtime; 242 u64 holdtime;
237 243
238 if (!lock_stat) 244 if (!lock_stat)
239 return; 245 return;
240 246
241 holdtime = sched_clock() - hlock->holdtime_stamp; 247 holdtime = lockstat_clock() - hlock->holdtime_stamp;
242 248
243 stats = get_lock_stats(hlock_class(hlock)); 249 stats = get_lock_stats(hlock_class(hlock));
244 if (hlock->read) 250 if (hlock->read)
@@ -366,11 +372,21 @@ static int save_trace(struct stack_trace *trace)
366 372
367 save_stack_trace(trace); 373 save_stack_trace(trace);
368 374
375 /*
376 * Some daft arches put -1 at the end to indicate its a full trace.
377 *
378 * <rant> this is buggy anyway, since it takes a whole extra entry so a
379 * complete trace that maxes out the entries provided will be reported
380 * as incomplete, friggin useless </rant>
381 */
382 if (trace->entries[trace->nr_entries-1] == ULONG_MAX)
383 trace->nr_entries--;
384
369 trace->max_entries = trace->nr_entries; 385 trace->max_entries = trace->nr_entries;
370 386
371 nr_stack_trace_entries += trace->nr_entries; 387 nr_stack_trace_entries += trace->nr_entries;
372 388
373 if (nr_stack_trace_entries == MAX_STACK_TRACE_ENTRIES) { 389 if (nr_stack_trace_entries >= MAX_STACK_TRACE_ENTRIES-1) {
374 if (!debug_locks_off_graph_unlock()) 390 if (!debug_locks_off_graph_unlock())
375 return 0; 391 return 0;
376 392
@@ -388,20 +404,6 @@ unsigned int nr_hardirq_chains;
388unsigned int nr_softirq_chains; 404unsigned int nr_softirq_chains;
389unsigned int nr_process_chains; 405unsigned int nr_process_chains;
390unsigned int max_lockdep_depth; 406unsigned int max_lockdep_depth;
391unsigned int max_recursion_depth;
392
393static unsigned int lockdep_dependency_gen_id;
394
395static bool lockdep_dependency_visit(struct lock_class *source,
396 unsigned int depth)
397{
398 if (!depth)
399 lockdep_dependency_gen_id++;
400 if (source->dep_gen_id == lockdep_dependency_gen_id)
401 return true;
402 source->dep_gen_id = lockdep_dependency_gen_id;
403 return false;
404}
405 407
406#ifdef CONFIG_DEBUG_LOCKDEP 408#ifdef CONFIG_DEBUG_LOCKDEP
407/* 409/*
@@ -431,11 +433,8 @@ atomic_t redundant_softirqs_on;
431atomic_t redundant_softirqs_off; 433atomic_t redundant_softirqs_off;
432atomic_t nr_unused_locks; 434atomic_t nr_unused_locks;
433atomic_t nr_cyclic_checks; 435atomic_t nr_cyclic_checks;
434atomic_t nr_cyclic_check_recursions;
435atomic_t nr_find_usage_forwards_checks; 436atomic_t nr_find_usage_forwards_checks;
436atomic_t nr_find_usage_forwards_recursions;
437atomic_t nr_find_usage_backwards_checks; 437atomic_t nr_find_usage_backwards_checks;
438atomic_t nr_find_usage_backwards_recursions;
439#endif 438#endif
440 439
441/* 440/*
@@ -551,58 +550,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
551 } 550 }
552} 551}
553 552
554static void print_lock_class_header(struct lock_class *class, int depth)
555{
556 int bit;
557
558 printk("%*s->", depth, "");
559 print_lock_name(class);
560 printk(" ops: %lu", class->ops);
561 printk(" {\n");
562
563 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
564 if (class->usage_mask & (1 << bit)) {
565 int len = depth;
566
567 len += printk("%*s %s", depth, "", usage_str[bit]);
568 len += printk(" at:\n");
569 print_stack_trace(class->usage_traces + bit, len);
570 }
571 }
572 printk("%*s }\n", depth, "");
573
574 printk("%*s ... key at: ",depth,"");
575 print_ip_sym((unsigned long)class->key);
576}
577
578/*
579 * printk all lock dependencies starting at <entry>:
580 */
581static void __used
582print_lock_dependencies(struct lock_class *class, int depth)
583{
584 struct lock_list *entry;
585
586 if (lockdep_dependency_visit(class, depth))
587 return;
588
589 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
590 return;
591
592 print_lock_class_header(class, depth);
593
594 list_for_each_entry(entry, &class->locks_after, entry) {
595 if (DEBUG_LOCKS_WARN_ON(!entry->class))
596 return;
597
598 print_lock_dependencies(entry->class, depth + 1);
599
600 printk("%*s ... acquired at:\n",depth,"");
601 print_stack_trace(&entry->trace, 2);
602 printk("\n");
603 }
604}
605
606static void print_kernel_version(void) 553static void print_kernel_version(void)
607{ 554{
608 printk("%s %.*s\n", init_utsname()->release, 555 printk("%s %.*s\n", init_utsname()->release,
@@ -636,6 +583,9 @@ static int static_obj(void *obj)
636 if ((addr >= start) && (addr < end)) 583 if ((addr >= start) && (addr < end))
637 return 1; 584 return 1;
638 585
586 if (arch_is_kernel_data(addr))
587 return 1;
588
639#ifdef CONFIG_SMP 589#ifdef CONFIG_SMP
640 /* 590 /*
641 * percpu var? 591 * percpu var?
@@ -898,22 +848,203 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
898} 848}
899 849
900/* 850/*
851 * For good efficiency of modular, we use power of 2
852 */
853#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
854#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
855
856/*
857 * The circular_queue and helpers is used to implement the
858 * breadth-first search(BFS)algorithem, by which we can build
859 * the shortest path from the next lock to be acquired to the
860 * previous held lock if there is a circular between them.
861 */
862struct circular_queue {
863 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
864 unsigned int front, rear;
865};
866
867static struct circular_queue lock_cq;
868
869unsigned int max_bfs_queue_depth;
870
871static unsigned int lockdep_dependency_gen_id;
872
873static inline void __cq_init(struct circular_queue *cq)
874{
875 cq->front = cq->rear = 0;
876 lockdep_dependency_gen_id++;
877}
878
879static inline int __cq_empty(struct circular_queue *cq)
880{
881 return (cq->front == cq->rear);
882}
883
884static inline int __cq_full(struct circular_queue *cq)
885{
886 return ((cq->rear + 1) & CQ_MASK) == cq->front;
887}
888
889static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
890{
891 if (__cq_full(cq))
892 return -1;
893
894 cq->element[cq->rear] = elem;
895 cq->rear = (cq->rear + 1) & CQ_MASK;
896 return 0;
897}
898
899static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
900{
901 if (__cq_empty(cq))
902 return -1;
903
904 *elem = cq->element[cq->front];
905 cq->front = (cq->front + 1) & CQ_MASK;
906 return 0;
907}
908
909static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
910{
911 return (cq->rear - cq->front) & CQ_MASK;
912}
913
914static inline void mark_lock_accessed(struct lock_list *lock,
915 struct lock_list *parent)
916{
917 unsigned long nr;
918
919 nr = lock - list_entries;
920 WARN_ON(nr >= nr_list_entries);
921 lock->parent = parent;
922 lock->class->dep_gen_id = lockdep_dependency_gen_id;
923}
924
925static inline unsigned long lock_accessed(struct lock_list *lock)
926{
927 unsigned long nr;
928
929 nr = lock - list_entries;
930 WARN_ON(nr >= nr_list_entries);
931 return lock->class->dep_gen_id == lockdep_dependency_gen_id;
932}
933
934static inline struct lock_list *get_lock_parent(struct lock_list *child)
935{
936 return child->parent;
937}
938
939static inline int get_lock_depth(struct lock_list *child)
940{
941 int depth = 0;
942 struct lock_list *parent;
943
944 while ((parent = get_lock_parent(child))) {
945 child = parent;
946 depth++;
947 }
948 return depth;
949}
950
951static int __bfs(struct lock_list *source_entry,
952 void *data,
953 int (*match)(struct lock_list *entry, void *data),
954 struct lock_list **target_entry,
955 int forward)
956{
957 struct lock_list *entry;
958 struct list_head *head;
959 struct circular_queue *cq = &lock_cq;
960 int ret = 1;
961
962 if (match(source_entry, data)) {
963 *target_entry = source_entry;
964 ret = 0;
965 goto exit;
966 }
967
968 if (forward)
969 head = &source_entry->class->locks_after;
970 else
971 head = &source_entry->class->locks_before;
972
973 if (list_empty(head))
974 goto exit;
975
976 __cq_init(cq);
977 __cq_enqueue(cq, (unsigned long)source_entry);
978
979 while (!__cq_empty(cq)) {
980 struct lock_list *lock;
981
982 __cq_dequeue(cq, (unsigned long *)&lock);
983
984 if (!lock->class) {
985 ret = -2;
986 goto exit;
987 }
988
989 if (forward)
990 head = &lock->class->locks_after;
991 else
992 head = &lock->class->locks_before;
993
994 list_for_each_entry(entry, head, entry) {
995 if (!lock_accessed(entry)) {
996 unsigned int cq_depth;
997 mark_lock_accessed(entry, lock);
998 if (match(entry, data)) {
999 *target_entry = entry;
1000 ret = 0;
1001 goto exit;
1002 }
1003
1004 if (__cq_enqueue(cq, (unsigned long)entry)) {
1005 ret = -1;
1006 goto exit;
1007 }
1008 cq_depth = __cq_get_elem_count(cq);
1009 if (max_bfs_queue_depth < cq_depth)
1010 max_bfs_queue_depth = cq_depth;
1011 }
1012 }
1013 }
1014exit:
1015 return ret;
1016}
1017
1018static inline int __bfs_forwards(struct lock_list *src_entry,
1019 void *data,
1020 int (*match)(struct lock_list *entry, void *data),
1021 struct lock_list **target_entry)
1022{
1023 return __bfs(src_entry, data, match, target_entry, 1);
1024
1025}
1026
1027static inline int __bfs_backwards(struct lock_list *src_entry,
1028 void *data,
1029 int (*match)(struct lock_list *entry, void *data),
1030 struct lock_list **target_entry)
1031{
1032 return __bfs(src_entry, data, match, target_entry, 0);
1033
1034}
1035
1036/*
901 * Recursive, forwards-direction lock-dependency checking, used for 1037 * Recursive, forwards-direction lock-dependency checking, used for
902 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 1038 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
903 * checking. 1039 * checking.
904 *
905 * (to keep the stackframe of the recursive functions small we
906 * use these global variables, and we also mark various helper
907 * functions as noinline.)
908 */ 1040 */
909static struct held_lock *check_source, *check_target;
910 1041
911/* 1042/*
912 * Print a dependency chain entry (this is only done when a deadlock 1043 * Print a dependency chain entry (this is only done when a deadlock
913 * has been detected): 1044 * has been detected):
914 */ 1045 */
915static noinline int 1046static noinline int
916print_circular_bug_entry(struct lock_list *target, unsigned int depth) 1047print_circular_bug_entry(struct lock_list *target, int depth)
917{ 1048{
918 if (debug_locks_silent) 1049 if (debug_locks_silent)
919 return 0; 1050 return 0;
@@ -930,11 +1061,13 @@ print_circular_bug_entry(struct lock_list *target, unsigned int depth)
930 * header first: 1061 * header first:
931 */ 1062 */
932static noinline int 1063static noinline int
933print_circular_bug_header(struct lock_list *entry, unsigned int depth) 1064print_circular_bug_header(struct lock_list *entry, unsigned int depth,
1065 struct held_lock *check_src,
1066 struct held_lock *check_tgt)
934{ 1067{
935 struct task_struct *curr = current; 1068 struct task_struct *curr = current;
936 1069
937 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1070 if (debug_locks_silent)
938 return 0; 1071 return 0;
939 1072
940 printk("\n=======================================================\n"); 1073 printk("\n=======================================================\n");
@@ -943,9 +1076,9 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
943 printk( "-------------------------------------------------------\n"); 1076 printk( "-------------------------------------------------------\n");
944 printk("%s/%d is trying to acquire lock:\n", 1077 printk("%s/%d is trying to acquire lock:\n",
945 curr->comm, task_pid_nr(curr)); 1078 curr->comm, task_pid_nr(curr));
946 print_lock(check_source); 1079 print_lock(check_src);
947 printk("\nbut task is already holding lock:\n"); 1080 printk("\nbut task is already holding lock:\n");
948 print_lock(check_target); 1081 print_lock(check_tgt);
949 printk("\nwhich lock already depends on the new lock.\n\n"); 1082 printk("\nwhich lock already depends on the new lock.\n\n");
950 printk("\nthe existing dependency chain (in reverse order) is:\n"); 1083 printk("\nthe existing dependency chain (in reverse order) is:\n");
951 1084
@@ -954,19 +1087,36 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
954 return 0; 1087 return 0;
955} 1088}
956 1089
957static noinline int print_circular_bug_tail(void) 1090static inline int class_equal(struct lock_list *entry, void *data)
1091{
1092 return entry->class == data;
1093}
1094
1095static noinline int print_circular_bug(struct lock_list *this,
1096 struct lock_list *target,
1097 struct held_lock *check_src,
1098 struct held_lock *check_tgt)
958{ 1099{
959 struct task_struct *curr = current; 1100 struct task_struct *curr = current;
960 struct lock_list this; 1101 struct lock_list *parent;
1102 int depth;
961 1103
962 if (debug_locks_silent) 1104 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
963 return 0; 1105 return 0;
964 1106
965 this.class = hlock_class(check_source); 1107 if (!save_trace(&this->trace))
966 if (!save_trace(&this.trace))
967 return 0; 1108 return 0;
968 1109
969 print_circular_bug_entry(&this, 0); 1110 depth = get_lock_depth(target);
1111
1112 print_circular_bug_header(target, depth, check_src, check_tgt);
1113
1114 parent = get_lock_parent(target);
1115
1116 while (parent) {
1117 print_circular_bug_entry(parent, --depth);
1118 parent = get_lock_parent(parent);
1119 }
970 1120
971 printk("\nother info that might help us debug this:\n\n"); 1121 printk("\nother info that might help us debug this:\n\n");
972 lockdep_print_held_locks(curr); 1122 lockdep_print_held_locks(curr);
@@ -977,73 +1127,69 @@ static noinline int print_circular_bug_tail(void)
977 return 0; 1127 return 0;
978} 1128}
979 1129
980#define RECURSION_LIMIT 40 1130static noinline int print_bfs_bug(int ret)
981
982static int noinline print_infinite_recursion_bug(void)
983{ 1131{
984 if (!debug_locks_off_graph_unlock()) 1132 if (!debug_locks_off_graph_unlock())
985 return 0; 1133 return 0;
986 1134
987 WARN_ON(1); 1135 WARN(1, "lockdep bfs error:%d\n", ret);
988 1136
989 return 0; 1137 return 0;
990} 1138}
991 1139
992unsigned long __lockdep_count_forward_deps(struct lock_class *class, 1140static int noop_count(struct lock_list *entry, void *data)
993 unsigned int depth)
994{ 1141{
995 struct lock_list *entry; 1142 (*(unsigned long *)data)++;
996 unsigned long ret = 1; 1143 return 0;
1144}
997 1145
998 if (lockdep_dependency_visit(class, depth)) 1146unsigned long __lockdep_count_forward_deps(struct lock_list *this)
999 return 0; 1147{
1148 unsigned long count = 0;
1149 struct lock_list *uninitialized_var(target_entry);
1000 1150
1001 /* 1151 __bfs_forwards(this, (void *)&count, noop_count, &target_entry);
1002 * Recurse this class's dependency list:
1003 */
1004 list_for_each_entry(entry, &class->locks_after, entry)
1005 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
1006 1152
1007 return ret; 1153 return count;
1008} 1154}
1009
1010unsigned long lockdep_count_forward_deps(struct lock_class *class) 1155unsigned long lockdep_count_forward_deps(struct lock_class *class)
1011{ 1156{
1012 unsigned long ret, flags; 1157 unsigned long ret, flags;
1158 struct lock_list this;
1159
1160 this.parent = NULL;
1161 this.class = class;
1013 1162
1014 local_irq_save(flags); 1163 local_irq_save(flags);
1015 __raw_spin_lock(&lockdep_lock); 1164 __raw_spin_lock(&lockdep_lock);
1016 ret = __lockdep_count_forward_deps(class, 0); 1165 ret = __lockdep_count_forward_deps(&this);
1017 __raw_spin_unlock(&lockdep_lock); 1166 __raw_spin_unlock(&lockdep_lock);
1018 local_irq_restore(flags); 1167 local_irq_restore(flags);
1019 1168
1020 return ret; 1169 return ret;
1021} 1170}
1022 1171
1023unsigned long __lockdep_count_backward_deps(struct lock_class *class, 1172unsigned long __lockdep_count_backward_deps(struct lock_list *this)
1024 unsigned int depth)
1025{ 1173{
1026 struct lock_list *entry; 1174 unsigned long count = 0;
1027 unsigned long ret = 1; 1175 struct lock_list *uninitialized_var(target_entry);
1028 1176
1029 if (lockdep_dependency_visit(class, depth)) 1177 __bfs_backwards(this, (void *)&count, noop_count, &target_entry);
1030 return 0;
1031 /*
1032 * Recurse this class's dependency list:
1033 */
1034 list_for_each_entry(entry, &class->locks_before, entry)
1035 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1036 1178
1037 return ret; 1179 return count;
1038} 1180}
1039 1181
1040unsigned long lockdep_count_backward_deps(struct lock_class *class) 1182unsigned long lockdep_count_backward_deps(struct lock_class *class)
1041{ 1183{
1042 unsigned long ret, flags; 1184 unsigned long ret, flags;
1185 struct lock_list this;
1186
1187 this.parent = NULL;
1188 this.class = class;
1043 1189
1044 local_irq_save(flags); 1190 local_irq_save(flags);
1045 __raw_spin_lock(&lockdep_lock); 1191 __raw_spin_lock(&lockdep_lock);
1046 ret = __lockdep_count_backward_deps(class, 0); 1192 ret = __lockdep_count_backward_deps(&this);
1047 __raw_spin_unlock(&lockdep_lock); 1193 __raw_spin_unlock(&lockdep_lock);
1048 local_irq_restore(flags); 1194 local_irq_restore(flags);
1049 1195
@@ -1055,29 +1201,16 @@ unsigned long lockdep_count_backward_deps(struct lock_class *class)
1055 * lead to <target>. Print an error and return 0 if it does. 1201 * lead to <target>. Print an error and return 0 if it does.
1056 */ 1202 */
1057static noinline int 1203static noinline int
1058check_noncircular(struct lock_class *source, unsigned int depth) 1204check_noncircular(struct lock_list *root, struct lock_class *target,
1205 struct lock_list **target_entry)
1059{ 1206{
1060 struct lock_list *entry; 1207 int result;
1061 1208
1062 if (lockdep_dependency_visit(source, depth)) 1209 debug_atomic_inc(&nr_cyclic_checks);
1063 return 1;
1064 1210
1065 debug_atomic_inc(&nr_cyclic_check_recursions); 1211 result = __bfs_forwards(root, target, class_equal, target_entry);
1066 if (depth > max_recursion_depth) 1212
1067 max_recursion_depth = depth; 1213 return result;
1068 if (depth >= RECURSION_LIMIT)
1069 return print_infinite_recursion_bug();
1070 /*
1071 * Check this lock's dependency list:
1072 */
1073 list_for_each_entry(entry, &source->locks_after, entry) {
1074 if (entry->class == hlock_class(check_target))
1075 return print_circular_bug_header(entry, depth+1);
1076 debug_atomic_inc(&nr_cyclic_checks);
1077 if (!check_noncircular(entry->class, depth+1))
1078 return print_circular_bug_entry(entry, depth+1);
1079 }
1080 return 1;
1081} 1214}
1082 1215
1083#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1216#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
@@ -1086,103 +1219,121 @@ check_noncircular(struct lock_class *source, unsigned int depth)
1086 * proving that two subgraphs can be connected by a new dependency 1219 * proving that two subgraphs can be connected by a new dependency
1087 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1220 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1088 */ 1221 */
1089static enum lock_usage_bit find_usage_bit; 1222
1090static struct lock_class *forwards_match, *backwards_match; 1223static inline int usage_match(struct lock_list *entry, void *bit)
1224{
1225 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
1226}
1227
1228
1091 1229
1092/* 1230/*
1093 * Find a node in the forwards-direction dependency sub-graph starting 1231 * Find a node in the forwards-direction dependency sub-graph starting
1094 * at <source> that matches <find_usage_bit>. 1232 * at @root->class that matches @bit.
1095 * 1233 *
1096 * Return 2 if such a node exists in the subgraph, and put that node 1234 * Return 0 if such a node exists in the subgraph, and put that node
1097 * into <forwards_match>. 1235 * into *@target_entry.
1098 * 1236 *
1099 * Return 1 otherwise and keep <forwards_match> unchanged. 1237 * Return 1 otherwise and keep *@target_entry unchanged.
1100 * Return 0 on error. 1238 * Return <0 on error.
1101 */ 1239 */
1102static noinline int 1240static int
1103find_usage_forwards(struct lock_class *source, unsigned int depth) 1241find_usage_forwards(struct lock_list *root, enum lock_usage_bit bit,
1242 struct lock_list **target_entry)
1104{ 1243{
1105 struct lock_list *entry; 1244 int result;
1106 int ret;
1107
1108 if (lockdep_dependency_visit(source, depth))
1109 return 1;
1110
1111 if (depth > max_recursion_depth)
1112 max_recursion_depth = depth;
1113 if (depth >= RECURSION_LIMIT)
1114 return print_infinite_recursion_bug();
1115 1245
1116 debug_atomic_inc(&nr_find_usage_forwards_checks); 1246 debug_atomic_inc(&nr_find_usage_forwards_checks);
1117 if (source->usage_mask & (1 << find_usage_bit)) {
1118 forwards_match = source;
1119 return 2;
1120 }
1121 1247
1122 /* 1248 result = __bfs_forwards(root, (void *)bit, usage_match, target_entry);
1123 * Check this lock's dependency list: 1249
1124 */ 1250 return result;
1125 list_for_each_entry(entry, &source->locks_after, entry) {
1126 debug_atomic_inc(&nr_find_usage_forwards_recursions);
1127 ret = find_usage_forwards(entry->class, depth+1);
1128 if (ret == 2 || ret == 0)
1129 return ret;
1130 }
1131 return 1;
1132} 1251}
1133 1252
1134/* 1253/*
1135 * Find a node in the backwards-direction dependency sub-graph starting 1254 * Find a node in the backwards-direction dependency sub-graph starting
1136 * at <source> that matches <find_usage_bit>. 1255 * at @root->class that matches @bit.
1137 * 1256 *
1138 * Return 2 if such a node exists in the subgraph, and put that node 1257 * Return 0 if such a node exists in the subgraph, and put that node
1139 * into <backwards_match>. 1258 * into *@target_entry.
1140 * 1259 *
1141 * Return 1 otherwise and keep <backwards_match> unchanged. 1260 * Return 1 otherwise and keep *@target_entry unchanged.
1142 * Return 0 on error. 1261 * Return <0 on error.
1143 */ 1262 */
1144static noinline int 1263static int
1145find_usage_backwards(struct lock_class *source, unsigned int depth) 1264find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1265 struct lock_list **target_entry)
1146{ 1266{
1147 struct lock_list *entry; 1267 int result;
1148 int ret;
1149 1268
1150 if (lockdep_dependency_visit(source, depth)) 1269 debug_atomic_inc(&nr_find_usage_backwards_checks);
1151 return 1;
1152 1270
1153 if (!__raw_spin_is_locked(&lockdep_lock)) 1271 result = __bfs_backwards(root, (void *)bit, usage_match, target_entry);
1154 return DEBUG_LOCKS_WARN_ON(1);
1155 1272
1156 if (depth > max_recursion_depth) 1273 return result;
1157 max_recursion_depth = depth; 1274}
1158 if (depth >= RECURSION_LIMIT)
1159 return print_infinite_recursion_bug();
1160 1275
1161 debug_atomic_inc(&nr_find_usage_backwards_checks); 1276static void print_lock_class_header(struct lock_class *class, int depth)
1162 if (source->usage_mask & (1 << find_usage_bit)) { 1277{
1163 backwards_match = source; 1278 int bit;
1164 return 2;
1165 }
1166 1279
1167 if (!source && debug_locks_off_graph_unlock()) { 1280 printk("%*s->", depth, "");
1168 WARN_ON(1); 1281 print_lock_name(class);
1169 return 0; 1282 printk(" ops: %lu", class->ops);
1170 } 1283 printk(" {\n");
1171 1284
1172 /* 1285 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1173 * Check this lock's dependency list: 1286 if (class->usage_mask & (1 << bit)) {
1174 */ 1287 int len = depth;
1175 list_for_each_entry(entry, &source->locks_before, entry) { 1288
1176 debug_atomic_inc(&nr_find_usage_backwards_recursions); 1289 len += printk("%*s %s", depth, "", usage_str[bit]);
1177 ret = find_usage_backwards(entry->class, depth+1); 1290 len += printk(" at:\n");
1178 if (ret == 2 || ret == 0) 1291 print_stack_trace(class->usage_traces + bit, len);
1179 return ret; 1292 }
1180 } 1293 }
1181 return 1; 1294 printk("%*s }\n", depth, "");
1295
1296 printk("%*s ... key at: ",depth,"");
1297 print_ip_sym((unsigned long)class->key);
1298}
1299
1300/*
1301 * printk the shortest lock dependencies from @start to @end in reverse order:
1302 */
1303static void __used
1304print_shortest_lock_dependencies(struct lock_list *leaf,
1305 struct lock_list *root)
1306{
1307 struct lock_list *entry = leaf;
1308 int depth;
1309
1310 /*compute depth from generated tree by BFS*/
1311 depth = get_lock_depth(leaf);
1312
1313 do {
1314 print_lock_class_header(entry->class, depth);
1315 printk("%*s ... acquired at:\n", depth, "");
1316 print_stack_trace(&entry->trace, 2);
1317 printk("\n");
1318
1319 if (depth == 0 && (entry != root)) {
1320 printk("lockdep:%s bad BFS generated tree\n", __func__);
1321 break;
1322 }
1323
1324 entry = get_lock_parent(entry);
1325 depth--;
1326 } while (entry && (depth >= 0));
1327
1328 return;
1182} 1329}
1183 1330
1184static int 1331static int
1185print_bad_irq_dependency(struct task_struct *curr, 1332print_bad_irq_dependency(struct task_struct *curr,
1333 struct lock_list *prev_root,
1334 struct lock_list *next_root,
1335 struct lock_list *backwards_entry,
1336 struct lock_list *forwards_entry,
1186 struct held_lock *prev, 1337 struct held_lock *prev,
1187 struct held_lock *next, 1338 struct held_lock *next,
1188 enum lock_usage_bit bit1, 1339 enum lock_usage_bit bit1,
@@ -1215,26 +1366,32 @@ print_bad_irq_dependency(struct task_struct *curr,
1215 1366
1216 printk("\nbut this new dependency connects a %s-irq-safe lock:\n", 1367 printk("\nbut this new dependency connects a %s-irq-safe lock:\n",
1217 irqclass); 1368 irqclass);
1218 print_lock_name(backwards_match); 1369 print_lock_name(backwards_entry->class);
1219 printk("\n... which became %s-irq-safe at:\n", irqclass); 1370 printk("\n... which became %s-irq-safe at:\n", irqclass);
1220 1371
1221 print_stack_trace(backwards_match->usage_traces + bit1, 1); 1372 print_stack_trace(backwards_entry->class->usage_traces + bit1, 1);
1222 1373
1223 printk("\nto a %s-irq-unsafe lock:\n", irqclass); 1374 printk("\nto a %s-irq-unsafe lock:\n", irqclass);
1224 print_lock_name(forwards_match); 1375 print_lock_name(forwards_entry->class);
1225 printk("\n... which became %s-irq-unsafe at:\n", irqclass); 1376 printk("\n... which became %s-irq-unsafe at:\n", irqclass);
1226 printk("..."); 1377 printk("...");
1227 1378
1228 print_stack_trace(forwards_match->usage_traces + bit2, 1); 1379 print_stack_trace(forwards_entry->class->usage_traces + bit2, 1);
1229 1380
1230 printk("\nother info that might help us debug this:\n\n"); 1381 printk("\nother info that might help us debug this:\n\n");
1231 lockdep_print_held_locks(curr); 1382 lockdep_print_held_locks(curr);
1232 1383
1233 printk("\nthe %s-irq-safe lock's dependencies:\n", irqclass); 1384 printk("\nthe dependencies between %s-irq-safe lock", irqclass);
1234 print_lock_dependencies(backwards_match, 0); 1385 printk(" and the holding lock:\n");
1386 if (!save_trace(&prev_root->trace))
1387 return 0;
1388 print_shortest_lock_dependencies(backwards_entry, prev_root);
1235 1389
1236 printk("\nthe %s-irq-unsafe lock's dependencies:\n", irqclass); 1390 printk("\nthe dependencies between the lock to be acquired");
1237 print_lock_dependencies(forwards_match, 0); 1391 printk(" and %s-irq-unsafe lock:\n", irqclass);
1392 if (!save_trace(&next_root->trace))
1393 return 0;
1394 print_shortest_lock_dependencies(forwards_entry, next_root);
1238 1395
1239 printk("\nstack backtrace:\n"); 1396 printk("\nstack backtrace:\n");
1240 dump_stack(); 1397 dump_stack();
@@ -1248,19 +1405,30 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1248 enum lock_usage_bit bit_forwards, const char *irqclass) 1405 enum lock_usage_bit bit_forwards, const char *irqclass)
1249{ 1406{
1250 int ret; 1407 int ret;
1408 struct lock_list this, that;
1409 struct lock_list *uninitialized_var(target_entry);
1410 struct lock_list *uninitialized_var(target_entry1);
1411
1412 this.parent = NULL;
1251 1413
1252 find_usage_bit = bit_backwards; 1414 this.class = hlock_class(prev);
1253 /* fills in <backwards_match> */ 1415 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1254 ret = find_usage_backwards(hlock_class(prev), 0); 1416 if (ret < 0)
1255 if (!ret || ret == 1) 1417 return print_bfs_bug(ret);
1418 if (ret == 1)
1256 return ret; 1419 return ret;
1257 1420
1258 find_usage_bit = bit_forwards; 1421 that.parent = NULL;
1259 ret = find_usage_forwards(hlock_class(next), 0); 1422 that.class = hlock_class(next);
1260 if (!ret || ret == 1) 1423 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1424 if (ret < 0)
1425 return print_bfs_bug(ret);
1426 if (ret == 1)
1261 return ret; 1427 return ret;
1262 /* ret == 2 */ 1428
1263 return print_bad_irq_dependency(curr, prev, next, 1429 return print_bad_irq_dependency(curr, &this, &that,
1430 target_entry, target_entry1,
1431 prev, next,
1264 bit_backwards, bit_forwards, irqclass); 1432 bit_backwards, bit_forwards, irqclass);
1265} 1433}
1266 1434
@@ -1472,6 +1640,8 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1472{ 1640{
1473 struct lock_list *entry; 1641 struct lock_list *entry;
1474 int ret; 1642 int ret;
1643 struct lock_list this;
1644 struct lock_list *uninitialized_var(target_entry);
1475 1645
1476 /* 1646 /*
1477 * Prove that the new <prev> -> <next> dependency would not 1647 * Prove that the new <prev> -> <next> dependency would not
@@ -1482,10 +1652,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1482 * We are using global variables to control the recursion, to 1652 * We are using global variables to control the recursion, to
1483 * keep the stackframe size of the recursive functions low: 1653 * keep the stackframe size of the recursive functions low:
1484 */ 1654 */
1485 check_source = next; 1655 this.class = hlock_class(next);
1486 check_target = prev; 1656 this.parent = NULL;
1487 if (!(check_noncircular(hlock_class(next), 0))) 1657 ret = check_noncircular(&this, hlock_class(prev), &target_entry);
1488 return print_circular_bug_tail(); 1658 if (unlikely(!ret))
1659 return print_circular_bug(&this, target_entry, next, prev);
1660 else if (unlikely(ret < 0))
1661 return print_bfs_bug(ret);
1489 1662
1490 if (!check_prev_add_irq(curr, prev, next)) 1663 if (!check_prev_add_irq(curr, prev, next))
1491 return 0; 1664 return 0;
@@ -1884,7 +2057,8 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
1884 * print irq inversion bug: 2057 * print irq inversion bug:
1885 */ 2058 */
1886static int 2059static int
1887print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other, 2060print_irq_inversion_bug(struct task_struct *curr,
2061 struct lock_list *root, struct lock_list *other,
1888 struct held_lock *this, int forwards, 2062 struct held_lock *this, int forwards,
1889 const char *irqclass) 2063 const char *irqclass)
1890{ 2064{
@@ -1902,17 +2076,16 @@ print_irq_inversion_bug(struct task_struct *curr, struct lock_class *other,
1902 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass); 2076 printk("but this lock took another, %s-unsafe lock in the past:\n", irqclass);
1903 else 2077 else
1904 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass); 2078 printk("but this lock was taken by another, %s-safe lock in the past:\n", irqclass);
1905 print_lock_name(other); 2079 print_lock_name(other->class);
1906 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); 2080 printk("\n\nand interrupts could create inverse lock ordering between them.\n\n");
1907 2081
1908 printk("\nother info that might help us debug this:\n"); 2082 printk("\nother info that might help us debug this:\n");
1909 lockdep_print_held_locks(curr); 2083 lockdep_print_held_locks(curr);
1910 2084
1911 printk("\nthe first lock's dependencies:\n"); 2085 printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n");
1912 print_lock_dependencies(hlock_class(this), 0); 2086 if (!save_trace(&root->trace))
1913 2087 return 0;
1914 printk("\nthe second lock's dependencies:\n"); 2088 print_shortest_lock_dependencies(other, root);
1915 print_lock_dependencies(other, 0);
1916 2089
1917 printk("\nstack backtrace:\n"); 2090 printk("\nstack backtrace:\n");
1918 dump_stack(); 2091 dump_stack();
@@ -1929,14 +2102,19 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
1929 enum lock_usage_bit bit, const char *irqclass) 2102 enum lock_usage_bit bit, const char *irqclass)
1930{ 2103{
1931 int ret; 2104 int ret;
1932 2105 struct lock_list root;
1933 find_usage_bit = bit; 2106 struct lock_list *uninitialized_var(target_entry);
1934 /* fills in <forwards_match> */ 2107
1935 ret = find_usage_forwards(hlock_class(this), 0); 2108 root.parent = NULL;
1936 if (!ret || ret == 1) 2109 root.class = hlock_class(this);
2110 ret = find_usage_forwards(&root, bit, &target_entry);
2111 if (ret < 0)
2112 return print_bfs_bug(ret);
2113 if (ret == 1)
1937 return ret; 2114 return ret;
1938 2115
1939 return print_irq_inversion_bug(curr, forwards_match, this, 1, irqclass); 2116 return print_irq_inversion_bug(curr, &root, target_entry,
2117 this, 1, irqclass);
1940} 2118}
1941 2119
1942/* 2120/*
@@ -1948,14 +2126,19 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
1948 enum lock_usage_bit bit, const char *irqclass) 2126 enum lock_usage_bit bit, const char *irqclass)
1949{ 2127{
1950 int ret; 2128 int ret;
1951 2129 struct lock_list root;
1952 find_usage_bit = bit; 2130 struct lock_list *uninitialized_var(target_entry);
1953 /* fills in <backwards_match> */ 2131
1954 ret = find_usage_backwards(hlock_class(this), 0); 2132 root.parent = NULL;
1955 if (!ret || ret == 1) 2133 root.class = hlock_class(this);
2134 ret = find_usage_backwards(&root, bit, &target_entry);
2135 if (ret < 0)
2136 return print_bfs_bug(ret);
2137 if (ret == 1)
1956 return ret; 2138 return ret;
1957 2139
1958 return print_irq_inversion_bug(curr, backwards_match, this, 0, irqclass); 2140 return print_irq_inversion_bug(curr, &root, target_entry,
2141 this, 1, irqclass);
1959} 2142}
1960 2143
1961void print_irqtrace_events(struct task_struct *curr) 2144void print_irqtrace_events(struct task_struct *curr)
@@ -2530,13 +2713,15 @@ EXPORT_SYMBOL_GPL(lockdep_init_map);
2530 */ 2713 */
2531static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, 2714static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2532 int trylock, int read, int check, int hardirqs_off, 2715 int trylock, int read, int check, int hardirqs_off,
2533 struct lockdep_map *nest_lock, unsigned long ip) 2716 struct lockdep_map *nest_lock, unsigned long ip,
2717 int references)
2534{ 2718{
2535 struct task_struct *curr = current; 2719 struct task_struct *curr = current;
2536 struct lock_class *class = NULL; 2720 struct lock_class *class = NULL;
2537 struct held_lock *hlock; 2721 struct held_lock *hlock;
2538 unsigned int depth, id; 2722 unsigned int depth, id;
2539 int chain_head = 0; 2723 int chain_head = 0;
2724 int class_idx;
2540 u64 chain_key; 2725 u64 chain_key;
2541 2726
2542 if (!prove_locking) 2727 if (!prove_locking)
@@ -2584,10 +2769,24 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2584 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH)) 2769 if (DEBUG_LOCKS_WARN_ON(depth >= MAX_LOCK_DEPTH))
2585 return 0; 2770 return 0;
2586 2771
2772 class_idx = class - lock_classes + 1;
2773
2774 if (depth) {
2775 hlock = curr->held_locks + depth - 1;
2776 if (hlock->class_idx == class_idx && nest_lock) {
2777 if (hlock->references)
2778 hlock->references++;
2779 else
2780 hlock->references = 2;
2781
2782 return 1;
2783 }
2784 }
2785
2587 hlock = curr->held_locks + depth; 2786 hlock = curr->held_locks + depth;
2588 if (DEBUG_LOCKS_WARN_ON(!class)) 2787 if (DEBUG_LOCKS_WARN_ON(!class))
2589 return 0; 2788 return 0;
2590 hlock->class_idx = class - lock_classes + 1; 2789 hlock->class_idx = class_idx;
2591 hlock->acquire_ip = ip; 2790 hlock->acquire_ip = ip;
2592 hlock->instance = lock; 2791 hlock->instance = lock;
2593 hlock->nest_lock = nest_lock; 2792 hlock->nest_lock = nest_lock;
@@ -2595,9 +2794,10 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2595 hlock->read = read; 2794 hlock->read = read;
2596 hlock->check = check; 2795 hlock->check = check;
2597 hlock->hardirqs_off = !!hardirqs_off; 2796 hlock->hardirqs_off = !!hardirqs_off;
2797 hlock->references = references;
2598#ifdef CONFIG_LOCK_STAT 2798#ifdef CONFIG_LOCK_STAT
2599 hlock->waittime_stamp = 0; 2799 hlock->waittime_stamp = 0;
2600 hlock->holdtime_stamp = sched_clock(); 2800 hlock->holdtime_stamp = lockstat_clock();
2601#endif 2801#endif
2602 2802
2603 if (check == 2 && !mark_irqflags(curr, hlock)) 2803 if (check == 2 && !mark_irqflags(curr, hlock))
@@ -2703,6 +2903,30 @@ static int check_unlock(struct task_struct *curr, struct lockdep_map *lock,
2703 return 1; 2903 return 1;
2704} 2904}
2705 2905
2906static int match_held_lock(struct held_lock *hlock, struct lockdep_map *lock)
2907{
2908 if (hlock->instance == lock)
2909 return 1;
2910
2911 if (hlock->references) {
2912 struct lock_class *class = lock->class_cache;
2913
2914 if (!class)
2915 class = look_up_lock_class(lock, 0);
2916
2917 if (DEBUG_LOCKS_WARN_ON(!class))
2918 return 0;
2919
2920 if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock))
2921 return 0;
2922
2923 if (hlock->class_idx == class - lock_classes + 1)
2924 return 1;
2925 }
2926
2927 return 0;
2928}
2929
2706static int 2930static int
2707__lock_set_class(struct lockdep_map *lock, const char *name, 2931__lock_set_class(struct lockdep_map *lock, const char *name,
2708 struct lock_class_key *key, unsigned int subclass, 2932 struct lock_class_key *key, unsigned int subclass,
@@ -2726,7 +2950,7 @@ __lock_set_class(struct lockdep_map *lock, const char *name,
2726 */ 2950 */
2727 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 2951 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2728 break; 2952 break;
2729 if (hlock->instance == lock) 2953 if (match_held_lock(hlock, lock))
2730 goto found_it; 2954 goto found_it;
2731 prev_hlock = hlock; 2955 prev_hlock = hlock;
2732 } 2956 }
@@ -2745,7 +2969,8 @@ found_it:
2745 if (!__lock_acquire(hlock->instance, 2969 if (!__lock_acquire(hlock->instance,
2746 hlock_class(hlock)->subclass, hlock->trylock, 2970 hlock_class(hlock)->subclass, hlock->trylock,
2747 hlock->read, hlock->check, hlock->hardirqs_off, 2971 hlock->read, hlock->check, hlock->hardirqs_off,
2748 hlock->nest_lock, hlock->acquire_ip)) 2972 hlock->nest_lock, hlock->acquire_ip,
2973 hlock->references))
2749 return 0; 2974 return 0;
2750 } 2975 }
2751 2976
@@ -2784,20 +3009,34 @@ lock_release_non_nested(struct task_struct *curr,
2784 */ 3009 */
2785 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3010 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
2786 break; 3011 break;
2787 if (hlock->instance == lock) 3012 if (match_held_lock(hlock, lock))
2788 goto found_it; 3013 goto found_it;
2789 prev_hlock = hlock; 3014 prev_hlock = hlock;
2790 } 3015 }
2791 return print_unlock_inbalance_bug(curr, lock, ip); 3016 return print_unlock_inbalance_bug(curr, lock, ip);
2792 3017
2793found_it: 3018found_it:
2794 lock_release_holdtime(hlock); 3019 if (hlock->instance == lock)
3020 lock_release_holdtime(hlock);
3021
3022 if (hlock->references) {
3023 hlock->references--;
3024 if (hlock->references) {
3025 /*
3026 * We had, and after removing one, still have
3027 * references, the current lock stack is still
3028 * valid. We're done!
3029 */
3030 return 1;
3031 }
3032 }
2795 3033
2796 /* 3034 /*
2797 * We have the right lock to unlock, 'hlock' points to it. 3035 * We have the right lock to unlock, 'hlock' points to it.
2798 * Now we remove it from the stack, and add back the other 3036 * Now we remove it from the stack, and add back the other
2799 * entries (if any), recalculating the hash along the way: 3037 * entries (if any), recalculating the hash along the way:
2800 */ 3038 */
3039
2801 curr->lockdep_depth = i; 3040 curr->lockdep_depth = i;
2802 curr->curr_chain_key = hlock->prev_chain_key; 3041 curr->curr_chain_key = hlock->prev_chain_key;
2803 3042
@@ -2806,7 +3045,8 @@ found_it:
2806 if (!__lock_acquire(hlock->instance, 3045 if (!__lock_acquire(hlock->instance,
2807 hlock_class(hlock)->subclass, hlock->trylock, 3046 hlock_class(hlock)->subclass, hlock->trylock,
2808 hlock->read, hlock->check, hlock->hardirqs_off, 3047 hlock->read, hlock->check, hlock->hardirqs_off,
2809 hlock->nest_lock, hlock->acquire_ip)) 3048 hlock->nest_lock, hlock->acquire_ip,
3049 hlock->references))
2810 return 0; 3050 return 0;
2811 } 3051 }
2812 3052
@@ -2836,7 +3076,7 @@ static int lock_release_nested(struct task_struct *curr,
2836 /* 3076 /*
2837 * Is the unlock non-nested: 3077 * Is the unlock non-nested:
2838 */ 3078 */
2839 if (hlock->instance != lock) 3079 if (hlock->instance != lock || hlock->references)
2840 return lock_release_non_nested(curr, lock, ip); 3080 return lock_release_non_nested(curr, lock, ip);
2841 curr->lockdep_depth--; 3081 curr->lockdep_depth--;
2842 3082
@@ -2881,6 +3121,21 @@ __lock_release(struct lockdep_map *lock, int nested, unsigned long ip)
2881 check_chain_key(curr); 3121 check_chain_key(curr);
2882} 3122}
2883 3123
3124static int __lock_is_held(struct lockdep_map *lock)
3125{
3126 struct task_struct *curr = current;
3127 int i;
3128
3129 for (i = 0; i < curr->lockdep_depth; i++) {
3130 struct held_lock *hlock = curr->held_locks + i;
3131
3132 if (match_held_lock(hlock, lock))
3133 return 1;
3134 }
3135
3136 return 0;
3137}
3138
2884/* 3139/*
2885 * Check whether we follow the irq-flags state precisely: 3140 * Check whether we follow the irq-flags state precisely:
2886 */ 3141 */
@@ -2957,7 +3212,7 @@ void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
2957 3212
2958 current->lockdep_recursion = 1; 3213 current->lockdep_recursion = 1;
2959 __lock_acquire(lock, subclass, trylock, read, check, 3214 __lock_acquire(lock, subclass, trylock, read, check,
2960 irqs_disabled_flags(flags), nest_lock, ip); 3215 irqs_disabled_flags(flags), nest_lock, ip, 0);
2961 current->lockdep_recursion = 0; 3216 current->lockdep_recursion = 0;
2962 raw_local_irq_restore(flags); 3217 raw_local_irq_restore(flags);
2963} 3218}
@@ -2982,6 +3237,26 @@ void lock_release(struct lockdep_map *lock, int nested,
2982} 3237}
2983EXPORT_SYMBOL_GPL(lock_release); 3238EXPORT_SYMBOL_GPL(lock_release);
2984 3239
3240int lock_is_held(struct lockdep_map *lock)
3241{
3242 unsigned long flags;
3243 int ret = 0;
3244
3245 if (unlikely(current->lockdep_recursion))
3246 return ret;
3247
3248 raw_local_irq_save(flags);
3249 check_flags(flags);
3250
3251 current->lockdep_recursion = 1;
3252 ret = __lock_is_held(lock);
3253 current->lockdep_recursion = 0;
3254 raw_local_irq_restore(flags);
3255
3256 return ret;
3257}
3258EXPORT_SYMBOL_GPL(lock_is_held);
3259
2985void lockdep_set_current_reclaim_state(gfp_t gfp_mask) 3260void lockdep_set_current_reclaim_state(gfp_t gfp_mask)
2986{ 3261{
2987 current->lockdep_reclaim_gfp = gfp_mask; 3262 current->lockdep_reclaim_gfp = gfp_mask;
@@ -3041,7 +3316,7 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3041 */ 3316 */
3042 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3317 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3043 break; 3318 break;
3044 if (hlock->instance == lock) 3319 if (match_held_lock(hlock, lock))
3045 goto found_it; 3320 goto found_it;
3046 prev_hlock = hlock; 3321 prev_hlock = hlock;
3047 } 3322 }
@@ -3049,7 +3324,10 @@ __lock_contended(struct lockdep_map *lock, unsigned long ip)
3049 return; 3324 return;
3050 3325
3051found_it: 3326found_it:
3052 hlock->waittime_stamp = sched_clock(); 3327 if (hlock->instance != lock)
3328 return;
3329
3330 hlock->waittime_stamp = lockstat_clock();
3053 3331
3054 contention_point = lock_point(hlock_class(hlock)->contention_point, ip); 3332 contention_point = lock_point(hlock_class(hlock)->contention_point, ip);
3055 contending_point = lock_point(hlock_class(hlock)->contending_point, 3333 contending_point = lock_point(hlock_class(hlock)->contending_point,
@@ -3072,8 +3350,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3072 struct held_lock *hlock, *prev_hlock; 3350 struct held_lock *hlock, *prev_hlock;
3073 struct lock_class_stats *stats; 3351 struct lock_class_stats *stats;
3074 unsigned int depth; 3352 unsigned int depth;
3075 u64 now; 3353 u64 now, waittime = 0;
3076 s64 waittime = 0;
3077 int i, cpu; 3354 int i, cpu;
3078 3355
3079 depth = curr->lockdep_depth; 3356 depth = curr->lockdep_depth;
@@ -3088,7 +3365,7 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3088 */ 3365 */
3089 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context) 3366 if (prev_hlock && prev_hlock->irq_context != hlock->irq_context)
3090 break; 3367 break;
3091 if (hlock->instance == lock) 3368 if (match_held_lock(hlock, lock))
3092 goto found_it; 3369 goto found_it;
3093 prev_hlock = hlock; 3370 prev_hlock = hlock;
3094 } 3371 }
@@ -3096,9 +3373,12 @@ __lock_acquired(struct lockdep_map *lock, unsigned long ip)
3096 return; 3373 return;
3097 3374
3098found_it: 3375found_it:
3376 if (hlock->instance != lock)
3377 return;
3378
3099 cpu = smp_processor_id(); 3379 cpu = smp_processor_id();
3100 if (hlock->waittime_stamp) { 3380 if (hlock->waittime_stamp) {
3101 now = sched_clock(); 3381 now = lockstat_clock();
3102 waittime = now - hlock->waittime_stamp; 3382 waittime = now - hlock->waittime_stamp;
3103 hlock->holdtime_stamp = now; 3383 hlock->holdtime_stamp = now;
3104 } 3384 }
@@ -3326,7 +3606,12 @@ void __init lockdep_info(void)
3326 sizeof(struct list_head) * CLASSHASH_SIZE + 3606 sizeof(struct list_head) * CLASSHASH_SIZE +
3327 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES + 3607 sizeof(struct lock_list) * MAX_LOCKDEP_ENTRIES +
3328 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS + 3608 sizeof(struct lock_chain) * MAX_LOCKDEP_CHAINS +
3329 sizeof(struct list_head) * CHAINHASH_SIZE) / 1024); 3609 sizeof(struct list_head) * CHAINHASH_SIZE
3610#ifdef CONFIG_PROVE_LOCKING
3611 + sizeof(struct circular_queue)
3612#endif
3613 ) / 1024
3614 );
3330 3615
3331 printk(" per task-struct memory footprint: %lu bytes\n", 3616 printk(" per task-struct memory footprint: %lu bytes\n",
3332 sizeof(struct held_lock) * MAX_LOCK_DEPTH); 3617 sizeof(struct held_lock) * MAX_LOCK_DEPTH);