aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorDavid Miller <davem@davemloft.net>2008-07-30 00:45:03 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-31 12:38:28 -0400
commit419ca3f13532793b81aff09f80c60af3eacbb43d (patch)
treeeb2d82e52917ebccff269a51e90868ce229336b2 /kernel/lockdep.c
parent6e86841d05f371b5b9b86ce76c02aaee83352298 (diff)
lockdep: fix combinatorial explosion in lock subgraph traversal
When we traverse the graph, either forwards or backwards, we are interested in whether a certain property exists somewhere in a node reachable in the graph. Therefore it is never necessary to traverse through a node more than once to get a correct answer to the given query. Take advantage of this property using a global ID counter so that we need not clear all the markers in all the lock_class entries before doing a traversal. A new ID is choosen when we start to traverse, and we continue through a lock_class only if it's ID hasn't been marked with the new value yet. This short-circuiting is essential especially for high CPU count systems. The scheduler has a runqueue per cpu, and needs to take two runqueue locks at a time, which leads to long chains of backwards and forwards subgraphs from these runqueue lock nodes. Without the short-circuit implemented here, a graph traversal on a runqueue lock can take up to (1 << (N - 1)) checks on a system with N cpus. For anything more than 16 cpus or so, lockdep will eventually bring the machine to a complete standstill. Signed-off-by: David S. Miller <davem@davemloft.net> Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c86
1 files changed, 86 insertions, 0 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index d38a64362973..6999e64fc248 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -372,6 +372,19 @@ unsigned int nr_process_chains;
372unsigned int max_lockdep_depth; 372unsigned int max_lockdep_depth;
373unsigned int max_recursion_depth; 373unsigned int max_recursion_depth;
374 374
375static unsigned int lockdep_dependency_gen_id;
376
377static bool lockdep_dependency_visit(struct lock_class *source,
378 unsigned int depth)
379{
380 if (!depth)
381 lockdep_dependency_gen_id++;
382 if (source->dep_gen_id == lockdep_dependency_gen_id)
383 return true;
384 source->dep_gen_id = lockdep_dependency_gen_id;
385 return false;
386}
387
375#ifdef CONFIG_DEBUG_LOCKDEP 388#ifdef CONFIG_DEBUG_LOCKDEP
376/* 389/*
377 * We cannot printk in early bootup code. Not even early_printk() 390 * We cannot printk in early bootup code. Not even early_printk()
@@ -558,6 +571,9 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
558{ 571{
559 struct lock_list *entry; 572 struct lock_list *entry;
560 573
574 if (lockdep_dependency_visit(class, depth))
575 return;
576
561 if (DEBUG_LOCKS_WARN_ON(depth >= 20)) 577 if (DEBUG_LOCKS_WARN_ON(depth >= 20))
562 return; 578 return;
563 579
@@ -959,6 +975,67 @@ static int noinline print_infinite_recursion_bug(void)
959 return 0; 975 return 0;
960} 976}
961 977
978unsigned long __lockdep_count_forward_deps(struct lock_class *class,
979 unsigned int depth)
980{
981 struct lock_list *entry;
982 unsigned long ret = 1;
983
984 if (lockdep_dependency_visit(class, depth))
985 return 0;
986
987 /*
988 * Recurse this class's dependency list:
989 */
990 list_for_each_entry(entry, &class->locks_after, entry)
991 ret += __lockdep_count_forward_deps(entry->class, depth + 1);
992
993 return ret;
994}
995
996unsigned long lockdep_count_forward_deps(struct lock_class *class)
997{
998 unsigned long ret, flags;
999
1000 local_irq_save(flags);
1001 __raw_spin_lock(&lockdep_lock);
1002 ret = __lockdep_count_forward_deps(class, 0);
1003 __raw_spin_unlock(&lockdep_lock);
1004 local_irq_restore(flags);
1005
1006 return ret;
1007}
1008
1009unsigned long __lockdep_count_backward_deps(struct lock_class *class,
1010 unsigned int depth)
1011{
1012 struct lock_list *entry;
1013 unsigned long ret = 1;
1014
1015 if (lockdep_dependency_visit(class, depth))
1016 return 0;
1017 /*
1018 * Recurse this class's dependency list:
1019 */
1020 list_for_each_entry(entry, &class->locks_before, entry)
1021 ret += __lockdep_count_backward_deps(entry->class, depth + 1);
1022
1023 return ret;
1024}
1025
1026unsigned long lockdep_count_backward_deps(struct lock_class *class)
1027{
1028 unsigned long ret, flags;
1029
1030 local_irq_save(flags);
1031 __raw_spin_lock(&lockdep_lock);
1032 ret = __lockdep_count_backward_deps(class, 0);
1033 __raw_spin_unlock(&lockdep_lock);
1034 local_irq_restore(flags);
1035
1036 return ret;
1037}
1038
962/* 1039/*
963 * Prove that the dependency graph starting at <entry> can not 1040 * Prove that the dependency graph starting at <entry> can not
964 * lead to <target>. Print an error and return 0 if it does. 1041 * lead to <target>. Print an error and return 0 if it does.
@@ -968,6 +1045,9 @@ check_noncircular(struct lock_class *source, unsigned int depth)
968{ 1045{
969 struct lock_list *entry; 1046 struct lock_list *entry;
970 1047
1048 if (lockdep_dependency_visit(source, depth))
1049 return 1;
1050
971 debug_atomic_inc(&nr_cyclic_check_recursions); 1051 debug_atomic_inc(&nr_cyclic_check_recursions);
972 if (depth > max_recursion_depth) 1052 if (depth > max_recursion_depth)
973 max_recursion_depth = depth; 1053 max_recursion_depth = depth;
@@ -1011,6 +1091,9 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
1011 struct lock_list *entry; 1091 struct lock_list *entry;
1012 int ret; 1092 int ret;
1013 1093
1094 if (lockdep_dependency_visit(source, depth))
1095 return 1;
1096
1014 if (depth > max_recursion_depth) 1097 if (depth > max_recursion_depth)
1015 max_recursion_depth = depth; 1098 max_recursion_depth = depth;
1016 if (depth >= RECURSION_LIMIT) 1099 if (depth >= RECURSION_LIMIT)
@@ -1050,6 +1133,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
1050 struct lock_list *entry; 1133 struct lock_list *entry;
1051 int ret; 1134 int ret;
1052 1135
1136 if (lockdep_dependency_visit(source, depth))
1137 return 1;
1138
1053 if (!__raw_spin_is_locked(&lockdep_lock)) 1139 if (!__raw_spin_is_locked(&lockdep_lock))
1054 return DEBUG_LOCKS_WARN_ON(1); 1140 return DEBUG_LOCKS_WARN_ON(1);
1055 1141