aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <tom.leiming@gmail.com>2009-07-16 09:44:29 -0400
committerPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-24 04:49:44 -0400
commitc94aa5ca3088018d2a7a9bd3258aefffe29df265 (patch)
tree29c81673e37315087ee3087180fae043085e6343
parent4be3bd7849165e7efa6b0b35a23d6a3598d97465 (diff)
lockdep: Print the shortest dependency chain if finding a circle
Currently lockdep will print the 1st circle detected if it exists when acquiring a new (next) lock. This patch prints the shortest path from the next lock to be acquired to the previous held lock if a circle is found. The patch still uses the current method to check circle, and once the circle is found, breadth-first search algorithem is used to compute the shortest path from the next lock to the previous lock in the forward lock dependency graph. Printing the shortest path will shorten the dependency chain, and make troubleshooting for possible circular locking easier. Signed-off-by: Ming Lei <tom.leiming@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1246201486-7308-2-git-send-email-tom.leiming@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/lockdep.h6
-rw-r--r--kernel/lockdep.c115
-rw-r--r--kernel/lockdep_internals.h83
3 files changed, 195 insertions, 9 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b25d1b53df0d..9ec026f8d09e 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -149,6 +149,12 @@ struct lock_list {
149 struct lock_class *class; 149 struct lock_class *class;
150 struct stack_trace trace; 150 struct stack_trace trace;
151 int distance; 151 int distance;
152
153 /*The parent field is used to implement breadth-first search,and
154 *the bit 0 is reused to indicate if the lock has been accessed
155 *in BFS.
156 */
157 struct lock_list *parent;
152}; 158};
153 159
154/* 160/*
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 8bbeef996c76..93dc70d18cdf 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -897,6 +897,79 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
897 return 1; 897 return 1;
898} 898}
899 899
900static struct circular_queue lock_cq;
901static int __search_shortest_path(struct lock_list *source_entry,
902 struct lock_class *target,
903 struct lock_list **target_entry,
904 int forward)
905{
906 struct lock_list *entry;
907 struct circular_queue *cq = &lock_cq;
908 int ret = 1;
909
910 __cq_init(cq);
911
912 mark_lock_accessed(source_entry, NULL);
913 if (source_entry->class == target) {
914 *target_entry = source_entry;
915 ret = 0;
916 goto exit;
917 }
918
919 __cq_enqueue(cq, (unsigned long)source_entry);
920
921 while (!__cq_empty(cq)) {
922 struct lock_list *lock;
923 struct list_head *head;
924
925 __cq_dequeue(cq, (unsigned long *)&lock);
926
927 if (!lock->class) {
928 ret = -2;
929 goto exit;
930 }
931
932 if (forward)
933 head = &lock->class->locks_after;
934 else
935 head = &lock->class->locks_before;
936
937 list_for_each_entry(entry, head, entry) {
938 if (!lock_accessed(entry)) {
939 mark_lock_accessed(entry, lock);
940 if (entry->class == target) {
941 *target_entry = entry;
942 ret = 0;
943 goto exit;
944 }
945
946 if (__cq_enqueue(cq, (unsigned long)entry)) {
947 ret = -1;
948 goto exit;
949 }
950 }
951 }
952 }
953exit:
954 return ret;
955}
956
957static inline int __search_forward_shortest_path(struct lock_list *src_entry,
958 struct lock_class *target,
959 struct lock_list **target_entry)
960{
961 return __search_shortest_path(src_entry, target, target_entry, 1);
962
963}
964
965static inline int __search_backward_shortest_path(struct lock_list *src_entry,
966 struct lock_class *target,
967 struct lock_list **target_entry)
968{
969 return __search_shortest_path(src_entry, target, target_entry, 0);
970
971}
972
900/* 973/*
901 * Recursive, forwards-direction lock-dependency checking, used for 974 * Recursive, forwards-direction lock-dependency checking, used for
902 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe 975 * both noncyclic checking and for hardirq-unsafe/softirq-unsafe
@@ -934,7 +1007,7 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
934{ 1007{
935 struct task_struct *curr = current; 1008 struct task_struct *curr = current;
936 1009
937 if (!debug_locks_off_graph_unlock() || debug_locks_silent) 1010 if (debug_locks_silent)
938 return 0; 1011 return 0;
939 1012
940 printk("\n=======================================================\n"); 1013 printk("\n=======================================================\n");
@@ -954,19 +1027,41 @@ print_circular_bug_header(struct lock_list *entry, unsigned int depth)
954 return 0; 1027 return 0;
955} 1028}
956 1029
957static noinline int print_circular_bug_tail(void) 1030static noinline int print_circular_bug(void)
958{ 1031{
959 struct task_struct *curr = current; 1032 struct task_struct *curr = current;
960 struct lock_list this; 1033 struct lock_list this;
1034 struct lock_list *target;
1035 struct lock_list *parent;
1036 int result;
1037 unsigned long depth;
961 1038
962 if (debug_locks_silent) 1039 if (!debug_locks_off_graph_unlock() || debug_locks_silent)
963 return 0; 1040 return 0;
964 1041
965 this.class = hlock_class(check_source); 1042 this.class = hlock_class(check_source);
966 if (!save_trace(&this.trace)) 1043 if (!save_trace(&this.trace))
967 return 0; 1044 return 0;
968 1045
969 print_circular_bug_entry(&this, 0); 1046 result = __search_forward_shortest_path(&this,
1047 hlock_class(check_target),
1048 &target);
1049 if (result) {
1050 printk("\n%s:search shortest path failed:%d\n", __func__,
1051 result);
1052 return 0;
1053 }
1054
1055 depth = get_lock_depth(target);
1056
1057 print_circular_bug_header(target, depth);
1058
1059 parent = get_lock_parent(target);
1060
1061 while (parent) {
1062 print_circular_bug_entry(parent, --depth);
1063 parent = get_lock_parent(parent);
1064 }
970 1065
971 printk("\nother info that might help us debug this:\n\n"); 1066 printk("\nother info that might help us debug this:\n\n");
972 lockdep_print_held_locks(curr); 1067 lockdep_print_held_locks(curr);
@@ -1072,14 +1167,15 @@ check_noncircular(struct lock_class *source, unsigned int depth)
1072 */ 1167 */
1073 list_for_each_entry(entry, &source->locks_after, entry) { 1168 list_for_each_entry(entry, &source->locks_after, entry) {
1074 if (entry->class == hlock_class(check_target)) 1169 if (entry->class == hlock_class(check_target))
1075 return print_circular_bug_header(entry, depth+1); 1170 return 2;
1076 debug_atomic_inc(&nr_cyclic_checks); 1171 debug_atomic_inc(&nr_cyclic_checks);
1077 if (!check_noncircular(entry->class, depth+1)) 1172 if (check_noncircular(entry->class, depth+1) == 2)
1078 return print_circular_bug_entry(entry, depth+1); 1173 return 2;
1079 } 1174 }
1080 return 1; 1175 return 1;
1081} 1176}
1082 1177
1178
1083#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) 1179#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
1084/* 1180/*
1085 * Forwards and backwards subgraph searching, for the purposes of 1181 * Forwards and backwards subgraph searching, for the purposes of
@@ -1484,8 +1580,9 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
1484 */ 1580 */
1485 check_source = next; 1581 check_source = next;
1486 check_target = prev; 1582 check_target = prev;
1487 if (!(check_noncircular(hlock_class(next), 0))) 1583 if (check_noncircular(hlock_class(next), 0) == 2)
1488 return print_circular_bug_tail(); 1584 return print_circular_bug();
1585
1489 1586
1490 if (!check_prev_add_irq(curr, prev, next)) 1587 if (!check_prev_add_irq(curr, prev, next))
1491 return 0; 1588 return 0;
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 699a2ac3a0d7..6f48d37d5be2 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -136,3 +136,86 @@ extern atomic_t nr_find_usage_backwards_recursions;
136# define debug_atomic_dec(ptr) do { } while (0) 136# define debug_atomic_dec(ptr) do { } while (0)
137# define debug_atomic_read(ptr) 0 137# define debug_atomic_read(ptr) 0
138#endif 138#endif
139
140/* The circular_queue and helpers is used to implement the
141 * breadth-first search(BFS)algorithem, by which we can build
142 * the shortest path from the next lock to be acquired to the
143 * previous held lock if there is a circular between them.
144 * */
145#define MAX_CIRCULAR_QUE_SIZE 4096UL
146struct circular_queue{
147 unsigned long element[MAX_CIRCULAR_QUE_SIZE];
148 unsigned int front, rear;
149};
150
151#define LOCK_ACCESSED 1UL
152#define LOCK_ACCESSED_MASK (~LOCK_ACCESSED)
153
154static inline void __cq_init(struct circular_queue *cq)
155{
156 cq->front = cq->rear = 0;
157}
158
159static inline int __cq_empty(struct circular_queue *cq)
160{
161 return (cq->front == cq->rear);
162}
163
164static inline int __cq_full(struct circular_queue *cq)
165{
166 return ((cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE) == cq->front;
167}
168
169static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
170{
171 if (__cq_full(cq))
172 return -1;
173
174 cq->element[cq->rear] = elem;
175 cq->rear = (cq->rear + 1)%MAX_CIRCULAR_QUE_SIZE;
176 return 0;
177}
178
179static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
180{
181 if (__cq_empty(cq))
182 return -1;
183
184 *elem = cq->element[cq->front];
185 cq->front = (cq->front + 1)%MAX_CIRCULAR_QUE_SIZE;
186 return 0;
187}
188
189static inline int __cq_get_elem_count(struct circular_queue *cq)
190{
191 return (cq->rear - cq->front)%MAX_CIRCULAR_QUE_SIZE;
192}
193
194static inline void mark_lock_accessed(struct lock_list *lock,
195 struct lock_list *parent)
196{
197 lock->parent = (void *) parent + LOCK_ACCESSED;
198}
199
200static inline unsigned long lock_accessed(struct lock_list *lock)
201{
202 return (unsigned long)lock->parent & LOCK_ACCESSED;
203}
204
205static inline struct lock_list *get_lock_parent(struct lock_list *child)
206{
207 return (struct lock_list *)
208 ((unsigned long)child->parent & LOCK_ACCESSED_MASK);
209}
210
211static inline unsigned long get_lock_depth(struct lock_list *child)
212{
213 unsigned long depth = 0;
214 struct lock_list *parent;
215
216 while ((parent = get_lock_parent(child))) {
217 child = parent;
218 depth++;
219 }
220 return depth;
221}