diff options
author | David Miller <davem@davemloft.net> | 2008-07-30 00:45:03 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-31 12:38:28 -0400 |
commit | 419ca3f13532793b81aff09f80c60af3eacbb43d (patch) | |
tree | eb2d82e52917ebccff269a51e90868ce229336b2 | |
parent | 6e86841d05f371b5b9b86ce76c02aaee83352298 (diff) |
lockdep: fix combinatorial explosion in lock subgraph traversal
When we traverse the graph, either forwards or backwards, we
are interested in whether a certain property exists somewhere
in a node reachable in the graph.
Therefore it is never necessary to traverse through a node more
than once to get a correct answer to the given query.
Take advantage of this property using a global ID counter so that we
need not clear all the markers in all the lock_class entries before
doing a traversal. A new ID is choosen when we start to traverse, and
we continue through a lock_class only if it's ID hasn't been marked
with the new value yet.
This short-circuiting is essential especially for high CPU count
systems. The scheduler has a runqueue per cpu, and needs to take
two runqueue locks at a time, which leads to long chains of
backwards and forwards subgraphs from these runqueue lock nodes.
Without the short-circuit implemented here, a graph traversal on
a runqueue lock can take up to (1 << (N - 1)) checks on a system
with N cpus.
For anything more than 16 cpus or so, lockdep will eventually bring
the machine to a complete standstill.
Signed-off-by: David S. Miller <davem@davemloft.net>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/lockdep.h | 1 | ||||
-rw-r--r-- | kernel/lockdep.c | 86 | ||||
-rw-r--r-- | kernel/lockdep_internals.h | 3 | ||||
-rw-r--r-- | kernel/lockdep_proc.c | 34 |
4 files changed, 93 insertions, 31 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 2486eb4edbf1..1bfdc30bb0af 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -89,6 +89,7 @@ struct lock_class { | |||
89 | 89 | ||
90 | struct lockdep_subclass_key *key; | 90 | struct lockdep_subclass_key *key; |
91 | unsigned int subclass; | 91 | unsigned int subclass; |
92 | unsigned int dep_gen_id; | ||
92 | 93 | ||
93 | /* | 94 | /* |
94 | * IRQ/softirq usage tracking bits: | 95 | * IRQ/softirq usage tracking bits: |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index d38a64362973..6999e64fc248 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -372,6 +372,19 @@ unsigned int nr_process_chains; | |||
372 | unsigned int max_lockdep_depth; | 372 | unsigned int max_lockdep_depth; |
373 | unsigned int max_recursion_depth; | 373 | unsigned int max_recursion_depth; |
374 | 374 | ||
375 | static unsigned int lockdep_dependency_gen_id; | ||
376 | |||
377 | static bool lockdep_dependency_visit(struct lock_class *source, | ||
378 | unsigned int depth) | ||
379 | { | ||
380 | if (!depth) | ||
381 | lockdep_dependency_gen_id++; | ||
382 | if (source->dep_gen_id == lockdep_dependency_gen_id) | ||
383 | return true; | ||
384 | source->dep_gen_id = lockdep_dependency_gen_id; | ||
385 | return false; | ||
386 | } | ||
387 | |||
375 | #ifdef CONFIG_DEBUG_LOCKDEP | 388 | #ifdef CONFIG_DEBUG_LOCKDEP |
376 | /* | 389 | /* |
377 | * We cannot printk in early bootup code. Not even early_printk() | 390 | * We cannot printk in early bootup code. Not even early_printk() |
@@ -558,6 +571,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth) | |||
558 | { | 571 | { |
559 | struct lock_list *entry; | 572 | struct lock_list *entry; |
560 | 573 | ||
574 | if (lockdep_dependency_visit(class, depth)) | ||
575 | return; | ||
576 | |||
561 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) | 577 | if (DEBUG_LOCKS_WARN_ON(depth >= 20)) |
562 | return; | 578 | return; |
563 | 579 | ||
@@ -959,6 +975,67 @@ static int noinline print_infinite_recursion_bug(void) | |||
959 | return 0; | 975 | return 0; |
960 | } | 976 | } |
961 | 977 | ||
978 | unsigned long __lockdep_count_forward_deps(struct lock_class *class, | ||
979 | unsigned int depth) | ||
980 | { | ||
981 | struct lock_list *entry; | ||
982 | unsigned long ret = 1; | ||
983 | |||
984 | if (lockdep_dependency_visit(class, depth)) | ||
985 | return 0; | ||
986 | |||
987 | /* | ||
988 | * Recurse this class's dependency list: | ||
989 | */ | ||
990 | list_for_each_entry(entry, &class->locks_after, entry) | ||
991 | ret += __lockdep_count_forward_deps(entry->class, depth + 1); | ||
992 | |||
993 | return ret; | ||
994 | } | ||
995 | |||
996 | unsigned long lockdep_count_forward_deps(struct lock_class *class) | ||
997 | { | ||
998 | unsigned long ret, flags; | ||
999 | |||
1000 | local_irq_save(flags); | ||
1001 | __raw_spin_lock(&lockdep_lock); | ||
1002 | ret = __lockdep_count_forward_deps(class, 0); | ||
1003 | __raw_spin_unlock(&lockdep_lock); | ||
1004 | local_irq_restore(flags); | ||
1005 | |||
1006 | return ret; | ||
1007 | } | ||
1008 | |||
1009 | unsigned long __lockdep_count_backward_deps(struct lock_class *class, | ||
1010 | unsigned int depth) | ||
1011 | { | ||
1012 | struct lock_list *entry; | ||
1013 | unsigned long ret = 1; | ||
1014 | |||
1015 | if (lockdep_dependency_visit(class, depth)) | ||
1016 | return 0; | ||
1017 | /* | ||
1018 | * Recurse this class's dependency list: | ||
1019 | */ | ||
1020 | list_for_each_entry(entry, &class->locks_before, entry) | ||
1021 | ret += __lockdep_count_backward_deps(entry->class, depth + 1); | ||
1022 | |||
1023 | return ret; | ||
1024 | } | ||
1025 | |||
1026 | unsigned long lockdep_count_backward_deps(struct lock_class *class) | ||
1027 | { | ||
1028 | unsigned long ret, flags; | ||
1029 | |||
1030 | local_irq_save(flags); | ||
1031 | __raw_spin_lock(&lockdep_lock); | ||
1032 | ret = __lockdep_count_backward_deps(class, 0); | ||
1033 | __raw_spin_unlock(&lockdep_lock); | ||
1034 | local_irq_restore(flags); | ||
1035 | |||
1036 | return ret; | ||
1037 | } | ||
1038 | |||
962 | /* | 1039 | /* |
963 | * Prove that the dependency graph starting at <entry> can not | 1040 | * Prove that the dependency graph starting at <entry> can not |
964 | * lead to <target>. Print an error and return 0 if it does. | 1041 | * lead to <target>. Print an error and return 0 if it does. |
@@ -968,6 +1045,9 @@ check_noncircular(struct lock_class *source, unsigned int depth) | |||
968 | { | 1045 | { |
969 | struct lock_list *entry; | 1046 | struct lock_list *entry; |
970 | 1047 | ||
1048 | if (lockdep_dependency_visit(source, depth)) | ||
1049 | return 1; | ||
1050 | |||
971 | debug_atomic_inc(&nr_cyclic_check_recursions); | 1051 | debug_atomic_inc(&nr_cyclic_check_recursions); |
972 | if (depth > max_recursion_depth) | 1052 | if (depth > max_recursion_depth) |
973 | max_recursion_depth = depth; | 1053 | max_recursion_depth = depth; |
@@ -1011,6 +1091,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth) | |||
1011 | struct lock_list *entry; | 1091 | struct lock_list *entry; |
1012 | int ret; | 1092 | int ret; |
1013 | 1093 | ||
1094 | if (lockdep_dependency_visit(source, depth)) | ||
1095 | return 1; | ||
1096 | |||
1014 | if (depth > max_recursion_depth) | 1097 | if (depth > max_recursion_depth) |
1015 | max_recursion_depth = depth; | 1098 | max_recursion_depth = depth; |
1016 | if (depth >= RECURSION_LIMIT) | 1099 | if (depth >= RECURSION_LIMIT) |
@@ -1050,6 +1133,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth) | |||
1050 | struct lock_list *entry; | 1133 | struct lock_list *entry; |
1051 | int ret; | 1134 | int ret; |
1052 | 1135 | ||
1136 | if (lockdep_dependency_visit(source, depth)) | ||
1137 | return 1; | ||
1138 | |||
1053 | if (!__raw_spin_is_locked(&lockdep_lock)) | 1139 | if (!__raw_spin_is_locked(&lockdep_lock)) |
1054 | return DEBUG_LOCKS_WARN_ON(1); | 1140 | return DEBUG_LOCKS_WARN_ON(1); |
1055 | 1141 | ||
diff --git a/kernel/lockdep_internals.h b/kernel/lockdep_internals.h index c3600a091a28..68d44ec77ab5 100644 --- a/kernel/lockdep_internals.h +++ b/kernel/lockdep_internals.h | |||
@@ -53,6 +53,9 @@ extern unsigned int nr_process_chains; | |||
53 | extern unsigned int max_lockdep_depth; | 53 | extern unsigned int max_lockdep_depth; |
54 | extern unsigned int max_recursion_depth; | 54 | extern unsigned int max_recursion_depth; |
55 | 55 | ||
56 | extern unsigned long lockdep_count_forward_deps(struct lock_class *); | ||
57 | extern unsigned long lockdep_count_backward_deps(struct lock_class *); | ||
58 | |||
56 | #ifdef CONFIG_DEBUG_LOCKDEP | 59 | #ifdef CONFIG_DEBUG_LOCKDEP |
57 | /* | 60 | /* |
58 | * Various lockdep statistics: | 61 | * Various lockdep statistics: |
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c index 9b0e940e2545..6252ff799d19 100644 --- a/kernel/lockdep_proc.c +++ b/kernel/lockdep_proc.c | |||
@@ -63,34 +63,6 @@ static void l_stop(struct seq_file *m, void *v) | |||
63 | { | 63 | { |
64 | } | 64 | } |
65 | 65 | ||
66 | static unsigned long count_forward_deps(struct lock_class *class) | ||
67 | { | ||
68 | struct lock_list *entry; | ||
69 | unsigned long ret = 1; | ||
70 | |||
71 | /* | ||
72 | * Recurse this class's dependency list: | ||
73 | */ | ||
74 | list_for_each_entry(entry, &class->locks_after, entry) | ||
75 | ret += count_forward_deps(entry->class); | ||
76 | |||
77 | return ret; | ||
78 | } | ||
79 | |||
80 | static unsigned long count_backward_deps(struct lock_class *class) | ||
81 | { | ||
82 | struct lock_list *entry; | ||
83 | unsigned long ret = 1; | ||
84 | |||
85 | /* | ||
86 | * Recurse this class's dependency list: | ||
87 | */ | ||
88 | list_for_each_entry(entry, &class->locks_before, entry) | ||
89 | ret += count_backward_deps(entry->class); | ||
90 | |||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | static void print_name(struct seq_file *m, struct lock_class *class) | 66 | static void print_name(struct seq_file *m, struct lock_class *class) |
95 | { | 67 | { |
96 | char str[128]; | 68 | char str[128]; |
@@ -124,10 +96,10 @@ static int l_show(struct seq_file *m, void *v) | |||
124 | #ifdef CONFIG_DEBUG_LOCKDEP | 96 | #ifdef CONFIG_DEBUG_LOCKDEP |
125 | seq_printf(m, " OPS:%8ld", class->ops); | 97 | seq_printf(m, " OPS:%8ld", class->ops); |
126 | #endif | 98 | #endif |
127 | nr_forward_deps = count_forward_deps(class); | 99 | nr_forward_deps = lockdep_count_forward_deps(class); |
128 | seq_printf(m, " FD:%5ld", nr_forward_deps); | 100 | seq_printf(m, " FD:%5ld", nr_forward_deps); |
129 | 101 | ||
130 | nr_backward_deps = count_backward_deps(class); | 102 | nr_backward_deps = lockdep_count_backward_deps(class); |
131 | seq_printf(m, " BD:%5ld", nr_backward_deps); | 103 | seq_printf(m, " BD:%5ld", nr_backward_deps); |
132 | 104 | ||
133 | get_usage_chars(class, &c1, &c2, &c3, &c4); | 105 | get_usage_chars(class, &c1, &c2, &c3, &c4); |
@@ -350,7 +322,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v) | |||
350 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) | 322 | if (class->usage_mask & LOCKF_ENABLED_HARDIRQS_READ) |
351 | nr_hardirq_read_unsafe++; | 323 | nr_hardirq_read_unsafe++; |
352 | 324 | ||
353 | sum_forward_deps += count_forward_deps(class); | 325 | sum_forward_deps += lockdep_count_forward_deps(class); |
354 | } | 326 | } |
355 | #ifdef CONFIG_DEBUG_LOCKDEP | 327 | #ifdef CONFIG_DEBUG_LOCKDEP |
356 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); | 328 | DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); |