aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-16 09:44:29 -0400
committerPeter Zijlstra <a.p.zijlstra@chello.nl>2009-07-24 04:53:29 -0400
commitaf012961450949ea297b209e091bd1a3805b8a0a (patch)
treed092a182ad014b4508079bacf416deb1e0650c01
parent12f3dfd022d7e616757a94f0538d3d525d806a16 (diff)
lockdep: BFS cleanup
Some cleanups of the lockdep code after the BFS series: - Remove the last traces of the generation id - Fixup comment style - Move the bfs routines into lockdep.c - Cleanup the bfs routines [ tom.leiming@gmail.com: Fix crash ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <1246201486-7308-11-git-send-email-tom.leiming@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/lockdep.h7
-rw-r--r--kernel/lockdep.c284
-rw-r--r--kernel/lockdep_internals.h97
3 files changed, 175 insertions, 213 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 9ec026f8d09e..12aabfcb45f6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -58,7 +58,6 @@ struct lock_class {
58 58
59 struct lockdep_subclass_key *key; 59 struct lockdep_subclass_key *key;
60 unsigned int subclass; 60 unsigned int subclass;
61 unsigned int dep_gen_id;
62 61
63 /* 62 /*
64 * IRQ/softirq usage tracking bits: 63 * IRQ/softirq usage tracking bits:
@@ -150,9 +149,9 @@ struct lock_list {
150 struct stack_trace trace; 149 struct stack_trace trace;
151 int distance; 150 int distance;
152 151
153 /*The parent field is used to implement breadth-first search,and 152 /*
154 *the bit 0 is reused to indicate if the lock has been accessed 153 * The parent field is used to implement breadth-first search, and the
155 *in BFS. 154 * bit 0 is reused to indicate if the lock has been accessed in BFS.
156 */ 155 */
157 struct lock_list *parent; 156 struct lock_list *parent;
158}; 157};
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 744da6265d99..1cedb00e3e7a 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -43,6 +43,7 @@
43#include <linux/ftrace.h> 43#include <linux/ftrace.h>
44#include <linux/stringify.h> 44#include <linux/stringify.h>
45#include <linux/bitops.h> 45#include <linux/bitops.h>
46
46#include <asm/sections.h> 47#include <asm/sections.h>
47 48
48#include "lockdep_internals.h" 49#include "lockdep_internals.h"
@@ -118,7 +119,7 @@ static inline int debug_locks_off_graph_unlock(void)
118static int lockdep_initialized; 119static int lockdep_initialized;
119 120
120unsigned long nr_list_entries; 121unsigned long nr_list_entries;
121struct lock_list list_entries[MAX_LOCKDEP_ENTRIES]; 122static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
122 123
123/* 124/*
124 * All data structures here are protected by the global debug_lock. 125 * All data structures here are protected by the global debug_lock.
@@ -390,19 +391,6 @@ unsigned int nr_process_chains;
390unsigned int max_lockdep_depth; 391unsigned int max_lockdep_depth;
391unsigned int max_recursion_depth; 392unsigned int max_recursion_depth;
392 393
393static unsigned int lockdep_dependency_gen_id;
394
395static bool lockdep_dependency_visit(struct lock_class *source,
396 unsigned int depth)
397{
398 if (!depth)
399 lockdep_dependency_gen_id++;
400 if (source->dep_gen_id == lockdep_dependency_gen_id)
401 return true;
402 source->dep_gen_id = lockdep_dependency_gen_id;
403 return false;
404}
405
406#ifdef CONFIG_DEBUG_LOCKDEP 394#ifdef CONFIG_DEBUG_LOCKDEP
407/* 395/*
408 * We cannot printk in early bootup code. Not even early_printk() 396 * We cannot printk in early bootup code. Not even early_printk()
@@ -551,88 +539,6 @@ static void lockdep_print_held_locks(struct task_struct *curr)
551 } 539 }
552} 540}
553 541
554static void print_lock_class_header(struct lock_class *class, int depth)
555{
556 int bit;
557
558 printk("%*s->", depth, "");
559 print_lock_name(class);
560 printk(" ops: %lu", class->ops);
561 printk(" {\n");
562
563 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
564 if (class->usage_mask & (1 << bit)) {
565 int len = depth;
566
567 len += printk("%*s %s", depth, "", usage_str[bit]);
568 len += printk(" at:\n");
569 print_stack_trace(class->usage_traces + bit, len);
570 }
571 }
572 printk("%*s }\n", depth, "");
573
574 printk("%*s ... key at: ",depth,"");
575 print_ip_sym((unsigned long)class->key);
576}
577
578/*
579 * printk the shortest lock dependencies from @start to @end in reverse order:
580 */
581static void __used
582print_shortest_lock_dependencies(struct lock_list *leaf,
583 struct lock_list *root)
584{
585 struct lock_list *entry = leaf;
586 int depth;
587
588 /*compute depth from generated tree by BFS*/
589 depth = get_lock_depth(leaf);
590
591 do {
592 print_lock_class_header(entry->class, depth);
593 printk("%*s ... acquired at:\n", depth, "");
594 print_stack_trace(&entry->trace, 2);
595 printk("\n");
596
597 if (depth == 0 && (entry != root)) {
598 printk("lockdep:%s bad BFS generated tree\n", __func__);
599 break;
600 }
601
602 entry = get_lock_parent(entry);
603 depth--;
604 } while (entry && (depth >= 0));
605
606 return;
607}
608/*
609 * printk all lock dependencies starting at <entry>:
610 */
611static void __used
612print_lock_dependencies(struct lock_class *class, int depth)
613{
614 struct lock_list *entry;
615
616 if (lockdep_dependency_visit(class, depth))
617 return;
618
619 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
620 return;
621
622 print_lock_class_header(class, depth);
623
624 list_for_each_entry(entry, &class->locks_after, entry) {
625 if (DEBUG_LOCKS_WARN_ON(!entry->class))
626 return;
627
628 print_lock_dependencies(entry->class, depth + 1);
629
630 printk("%*s ... acquired at:\n",depth,"");
631 print_stack_trace(&entry->trace, 2);
632 printk("\n");
633 }
634}
635
636static void print_kernel_version(void) 542static void print_kernel_version(void)
637{ 543{
638 printk("%s %.*s\n", init_utsname()->release, 544 printk("%s %.*s\n", init_utsname()->release,
@@ -927,14 +833,106 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
927 return 1; 833 return 1;
928} 834}
929 835
930unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)]; 836/*For good efficiency of modular, we use power of 2*/
931static struct circular_queue lock_cq; 837#define MAX_CIRCULAR_QUEUE_SIZE 4096UL
838#define CQ_MASK (MAX_CIRCULAR_QUEUE_SIZE-1)
839
840/* The circular_queue and helpers is used to implement the
841 * breadth-first search(BFS)algorithem, by which we can build
842 * the shortest path from the next lock to be acquired to the
843 * previous held lock if there is a circular between them.
844 * */
845struct circular_queue {
846 unsigned long element[MAX_CIRCULAR_QUEUE_SIZE];
847 unsigned int front, rear;
848};
849
850static struct circular_queue lock_cq;
851static unsigned long bfs_accessed[BITS_TO_LONGS(MAX_LOCKDEP_ENTRIES)];
852
932unsigned int max_bfs_queue_depth; 853unsigned int max_bfs_queue_depth;
854
855static inline void __cq_init(struct circular_queue *cq)
856{
857 cq->front = cq->rear = 0;
858 bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
859}
860
861static inline int __cq_empty(struct circular_queue *cq)
862{
863 return (cq->front == cq->rear);
864}
865
866static inline int __cq_full(struct circular_queue *cq)
867{
868 return ((cq->rear + 1) & CQ_MASK) == cq->front;
869}
870
871static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
872{
873 if (__cq_full(cq))
874 return -1;
875
876 cq->element[cq->rear] = elem;
877 cq->rear = (cq->rear + 1) & CQ_MASK;
878 return 0;
879}
880
881static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
882{
883 if (__cq_empty(cq))
884 return -1;
885
886 *elem = cq->element[cq->front];
887 cq->front = (cq->front + 1) & CQ_MASK;
888 return 0;
889}
890
891static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
892{
893 return (cq->rear - cq->front) & CQ_MASK;
894}
895
896static inline void mark_lock_accessed(struct lock_list *lock,
897 struct lock_list *parent)
898{
899 unsigned long nr;
900 nr = lock - list_entries;
901 WARN_ON(nr >= nr_list_entries);
902 lock->parent = parent;
903 set_bit(nr, bfs_accessed);
904}
905
906static inline unsigned long lock_accessed(struct lock_list *lock)
907{
908 unsigned long nr;
909 nr = lock - list_entries;
910 WARN_ON(nr >= nr_list_entries);
911 return test_bit(nr, bfs_accessed);
912}
913
914static inline struct lock_list *get_lock_parent(struct lock_list *child)
915{
916 return child->parent;
917}
918
919static inline int get_lock_depth(struct lock_list *child)
920{
921 int depth = 0;
922 struct lock_list *parent;
923
924 while ((parent = get_lock_parent(child))) {
925 child = parent;
926 depth++;
927 }
928 return depth;
929}
930
933static int __bfs(struct lock_list *source_entry, 931static int __bfs(struct lock_list *source_entry,
934 void *data, 932 void *data,
935 int (*match)(struct lock_list *entry, void *data), 933 int (*match)(struct lock_list *entry, void *data),
936 struct lock_list **target_entry, 934 struct lock_list **target_entry,
937 int forward) 935 int forward)
938{ 936{
939 struct lock_list *entry; 937 struct lock_list *entry;
940 struct list_head *head; 938 struct list_head *head;
@@ -1202,14 +1200,6 @@ check_noncircular(struct lock_list *root, struct lock_class *target,
1202 * without creating any illegal irq-safe -> irq-unsafe lock dependency. 1200 * without creating any illegal irq-safe -> irq-unsafe lock dependency.
1203 */ 1201 */
1204 1202
1205
1206#define BFS_PROCESS_RET(ret) do { \
1207 if (ret < 0) \
1208 return print_bfs_bug(ret); \
1209 if (ret == 1) \
1210 return 1; \
1211 } while (0)
1212
1213static inline int usage_match(struct lock_list *entry, void *bit) 1203static inline int usage_match(struct lock_list *entry, void *bit)
1214{ 1204{
1215 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit); 1205 return entry->class->usage_mask & (1 << (enum lock_usage_bit)bit);
@@ -1263,6 +1253,60 @@ find_usage_backwards(struct lock_list *root, enum lock_usage_bit bit,
1263 return result; 1253 return result;
1264} 1254}
1265 1255
1256static void print_lock_class_header(struct lock_class *class, int depth)
1257{
1258 int bit;
1259
1260 printk("%*s->", depth, "");
1261 print_lock_name(class);
1262 printk(" ops: %lu", class->ops);
1263 printk(" {\n");
1264
1265 for (bit = 0; bit < LOCK_USAGE_STATES; bit++) {
1266 if (class->usage_mask & (1 << bit)) {
1267 int len = depth;
1268
1269 len += printk("%*s %s", depth, "", usage_str[bit]);
1270 len += printk(" at:\n");
1271 print_stack_trace(class->usage_traces + bit, len);
1272 }
1273 }
1274 printk("%*s }\n", depth, "");
1275
1276 printk("%*s ... key at: ",depth,"");
1277 print_ip_sym((unsigned long)class->key);
1278}
1279
1280/*
1281 * printk the shortest lock dependencies from @start to @end in reverse order:
1282 */
1283static void __used
1284print_shortest_lock_dependencies(struct lock_list *leaf,
1285 struct lock_list *root)
1286{
1287 struct lock_list *entry = leaf;
1288 int depth;
1289
1290 /*compute depth from generated tree by BFS*/
1291 depth = get_lock_depth(leaf);
1292
1293 do {
1294 print_lock_class_header(entry->class, depth);
1295 printk("%*s ... acquired at:\n", depth, "");
1296 print_stack_trace(&entry->trace, 2);
1297 printk("\n");
1298
1299 if (depth == 0 && (entry != root)) {
1300 printk("lockdep:%s bad BFS generated tree\n", __func__);
1301 break;
1302 }
1303
1304 entry = get_lock_parent(entry);
1305 depth--;
1306 } while (entry && (depth >= 0));
1307
1308 return;
1309}
1266 1310
1267static int 1311static int
1268print_bad_irq_dependency(struct task_struct *curr, 1312print_bad_irq_dependency(struct task_struct *curr,
@@ -1349,12 +1393,18 @@ check_usage(struct task_struct *curr, struct held_lock *prev,
1349 1393
1350 this.class = hlock_class(prev); 1394 this.class = hlock_class(prev);
1351 ret = find_usage_backwards(&this, bit_backwards, &target_entry); 1395 ret = find_usage_backwards(&this, bit_backwards, &target_entry);
1352 BFS_PROCESS_RET(ret); 1396 if (ret < 0)
1397 return print_bfs_bug(ret);
1398 if (ret == 1)
1399 return ret;
1353 1400
1354 that.parent = NULL; 1401 that.parent = NULL;
1355 that.class = hlock_class(next); 1402 that.class = hlock_class(next);
1356 ret = find_usage_forwards(&that, bit_forwards, &target_entry1); 1403 ret = find_usage_forwards(&that, bit_forwards, &target_entry1);
1357 BFS_PROCESS_RET(ret); 1404 if (ret < 0)
1405 return print_bfs_bug(ret);
1406 if (ret == 1)
1407 return ret;
1358 1408
1359 return print_bad_irq_dependency(curr, &this, &that, 1409 return print_bad_irq_dependency(curr, &this, &that,
1360 target_entry, target_entry1, 1410 target_entry, target_entry1,
@@ -2038,7 +2088,10 @@ check_usage_forwards(struct task_struct *curr, struct held_lock *this,
2038 root.parent = NULL; 2088 root.parent = NULL;
2039 root.class = hlock_class(this); 2089 root.class = hlock_class(this);
2040 ret = find_usage_forwards(&root, bit, &target_entry); 2090 ret = find_usage_forwards(&root, bit, &target_entry);
2041 BFS_PROCESS_RET(ret); 2091 if (ret < 0)
2092 return print_bfs_bug(ret);
2093 if (ret == 1)
2094 return ret;
2042 2095
2043 return print_irq_inversion_bug(curr, &root, target_entry, 2096 return print_irq_inversion_bug(curr, &root, target_entry,
2044 this, 1, irqclass); 2097 this, 1, irqclass);
@@ -2059,7 +2112,10 @@ check_usage_backwards(struct task_struct *curr, struct held_lock *this,
2059 root.parent = NULL; 2112 root.parent = NULL;
2060 root.class = hlock_class(this); 2113 root.class = hlock_class(this);
2061 ret = find_usage_backwards(&root, bit, &target_entry); 2114 ret = find_usage_backwards(&root, bit, &target_entry);
2062 BFS_PROCESS_RET(ret); 2115 if (ret < 0)
2116 return print_bfs_bug(ret);
2117 if (ret == 1)
2118 return ret;
2063 2119
2064 return print_irq_inversion_bug(curr, &root, target_entry, 2120 return print_irq_inversion_bug(curr, &root, target_entry,
2065 this, 1, irqclass); 2121 this, 1, irqclass);
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h
index 6baa8807efdd..a2ee95ad1313 100644
--- a/kernel/lockdep_internals.h
+++ b/kernel/lockdep_internals.h
@@ -91,6 +91,8 @@ extern unsigned int nr_process_chains;
91extern unsigned int max_lockdep_depth; 91extern unsigned int max_lockdep_depth;
92extern unsigned int max_recursion_depth; 92extern unsigned int max_recursion_depth;
93 93
94extern unsigned int max_bfs_queue_depth;
95
94#ifdef CONFIG_PROVE_LOCKING 96#ifdef CONFIG_PROVE_LOCKING
95extern unsigned long lockdep_count_forward_deps(struct lock_class *); 97extern unsigned long lockdep_count_forward_deps(struct lock_class *);
96extern unsigned long lockdep_count_backward_deps(struct lock_class *); 98extern unsigned long lockdep_count_backward_deps(struct lock_class *);
@@ -136,98 +138,3 @@ extern atomic_t nr_find_usage_backwards_recursions;
136# define debug_atomic_dec(ptr) do { } while (0) 138# define debug_atomic_dec(ptr) do { } while (0)
137# define debug_atomic_read(ptr) 0 139# define debug_atomic_read(ptr) 0
138#endif 140#endif
139
140
141extern unsigned int max_bfs_queue_depth;
142extern unsigned long nr_list_entries;
143extern struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
144extern unsigned long bfs_accessed[];
145
146/*For good efficiency of modular, we use power of 2*/
147#define MAX_CIRCULAR_QUE_SIZE 4096UL
148
149/* The circular_queue and helpers is used to implement the
150 * breadth-first search(BFS)algorithem, by which we can build
151 * the shortest path from the next lock to be acquired to the
152 * previous held lock if there is a circular between them.
153 * */
154struct circular_queue{
155 unsigned long element[MAX_CIRCULAR_QUE_SIZE];
156 unsigned int front, rear;
157};
158
159static inline void __cq_init(struct circular_queue *cq)
160{
161 cq->front = cq->rear = 0;
162 bitmap_zero(bfs_accessed, MAX_LOCKDEP_ENTRIES);
163}
164
165static inline int __cq_empty(struct circular_queue *cq)
166{
167 return (cq->front == cq->rear);
168}
169
170static inline int __cq_full(struct circular_queue *cq)
171{
172 return ((cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1)) == cq->front;
173}
174
175static inline int __cq_enqueue(struct circular_queue *cq, unsigned long elem)
176{
177 if (__cq_full(cq))
178 return -1;
179
180 cq->element[cq->rear] = elem;
181 cq->rear = (cq->rear + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
182 return 0;
183}
184
185static inline int __cq_dequeue(struct circular_queue *cq, unsigned long *elem)
186{
187 if (__cq_empty(cq))
188 return -1;
189
190 *elem = cq->element[cq->front];
191 cq->front = (cq->front + 1)&(MAX_CIRCULAR_QUE_SIZE-1);
192 return 0;
193}
194
195static inline unsigned int __cq_get_elem_count(struct circular_queue *cq)
196{
197 return (cq->rear - cq->front)&(MAX_CIRCULAR_QUE_SIZE-1);
198}
199
200static inline void mark_lock_accessed(struct lock_list *lock,
201 struct lock_list *parent)
202{
203 unsigned long nr;
204 nr = lock - list_entries;
205 WARN_ON(nr >= nr_list_entries);
206 lock->parent = parent;
207 set_bit(nr, bfs_accessed);
208}
209
210static inline unsigned long lock_accessed(struct lock_list *lock)
211{
212 unsigned long nr;
213 nr = lock - list_entries;
214 WARN_ON(nr >= nr_list_entries);
215 return test_bit(nr, bfs_accessed);
216}
217
218static inline struct lock_list *get_lock_parent(struct lock_list *child)
219{
220 return child->parent;
221}
222
223static inline int get_lock_depth(struct lock_list *child)
224{
225 int depth = 0;
226 struct lock_list *parent;
227
228 while ((parent = get_lock_parent(child))) {
229 child = parent;
230 depth++;
231 }
232 return depth;
233}