aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 13:06:44 -0500
committerJames Bottomley <jejb@mulgrave.il.steeleye.com>2006-11-22 13:06:44 -0500
commit0bd2af46839ad6262d25714a6ec0365db9d6b98f (patch)
treedcced72d230d69fd0c5816ac6dd03ab84799a93e /kernel/lockdep.c
parente138a5d2356729b8752e88520cc1525fae9794ac (diff)
parentf26b90440cd74c78fe10c9bd5160809704a9627c (diff)
Merge ../scsi-rc-fixes-2.6
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c26
1 files changed, 14 insertions, 12 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 4c0553461000..c9fefdb1a7db 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -575,6 +575,8 @@ static noinline int print_circular_bug_tail(void)
575 return 0; 575 return 0;
576} 576}
577 577
578#define RECURSION_LIMIT 40
579
578static int noinline print_infinite_recursion_bug(void) 580static int noinline print_infinite_recursion_bug(void)
579{ 581{
580 __raw_spin_unlock(&hash_lock); 582 __raw_spin_unlock(&hash_lock);
@@ -595,7 +597,7 @@ check_noncircular(struct lock_class *source, unsigned int depth)
595 debug_atomic_inc(&nr_cyclic_check_recursions); 597 debug_atomic_inc(&nr_cyclic_check_recursions);
596 if (depth > max_recursion_depth) 598 if (depth > max_recursion_depth)
597 max_recursion_depth = depth; 599 max_recursion_depth = depth;
598 if (depth >= 20) 600 if (depth >= RECURSION_LIMIT)
599 return print_infinite_recursion_bug(); 601 return print_infinite_recursion_bug();
600 /* 602 /*
601 * Check this lock's dependency list: 603 * Check this lock's dependency list:
@@ -645,7 +647,7 @@ find_usage_forwards(struct lock_class *source, unsigned int depth)
645 647
646 if (depth > max_recursion_depth) 648 if (depth > max_recursion_depth)
647 max_recursion_depth = depth; 649 max_recursion_depth = depth;
648 if (depth >= 20) 650 if (depth >= RECURSION_LIMIT)
649 return print_infinite_recursion_bug(); 651 return print_infinite_recursion_bug();
650 652
651 debug_atomic_inc(&nr_find_usage_forwards_checks); 653 debug_atomic_inc(&nr_find_usage_forwards_checks);
@@ -684,7 +686,7 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
684 686
685 if (depth > max_recursion_depth) 687 if (depth > max_recursion_depth)
686 max_recursion_depth = depth; 688 max_recursion_depth = depth;
687 if (depth >= 20) 689 if (depth >= RECURSION_LIMIT)
688 return print_infinite_recursion_bug(); 690 return print_infinite_recursion_bug();
689 691
690 debug_atomic_inc(&nr_find_usage_backwards_checks); 692 debug_atomic_inc(&nr_find_usage_backwards_checks);
@@ -1079,7 +1081,8 @@ static int static_obj(void *obj)
1079 */ 1081 */
1080 for_each_possible_cpu(i) { 1082 for_each_possible_cpu(i) {
1081 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i); 1083 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
1082 end = (unsigned long) &__per_cpu_end + per_cpu_offset(i); 1084 end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
1085 + per_cpu_offset(i);
1083 1086
1084 if ((addr >= start) && (addr < end)) 1087 if ((addr >= start) && (addr < end))
1085 return 1; 1088 return 1;
@@ -1114,8 +1117,6 @@ static int count_matching_names(struct lock_class *new_class)
1114 return count + 1; 1117 return count + 1;
1115} 1118}
1116 1119
1117extern void __error_too_big_MAX_LOCKDEP_SUBCLASSES(void);
1118
1119/* 1120/*
1120 * Register a lock's class in the hash-table, if the class is not present 1121 * Register a lock's class in the hash-table, if the class is not present
1121 * yet. Otherwise we look it up. We cache the result in the lock object 1122 * yet. Otherwise we look it up. We cache the result in the lock object
@@ -1153,8 +1154,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1153 * (or spin_lock_init()) call - which acts as the key. For static 1154 * (or spin_lock_init()) call - which acts as the key. For static
1154 * locks we use the lock object itself as the key. 1155 * locks we use the lock object itself as the key.
1155 */ 1156 */
1156 if (sizeof(struct lock_class_key) > sizeof(struct lock_class)) 1157 BUILD_BUG_ON(sizeof(struct lock_class_key) > sizeof(struct lock_class));
1157 __error_too_big_MAX_LOCKDEP_SUBCLASSES();
1158 1158
1159 key = lock->key->subkeys + subclass; 1159 key = lock->key->subkeys + subclass;
1160 1160
@@ -1177,7 +1177,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1177 * itself, so actual lookup of the hash should be once per lock object. 1177 * itself, so actual lookup of the hash should be once per lock object.
1178 */ 1178 */
1179static inline struct lock_class * 1179static inline struct lock_class *
1180register_lock_class(struct lockdep_map *lock, unsigned int subclass) 1180register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1181{ 1181{
1182 struct lockdep_subclass_key *key; 1182 struct lockdep_subclass_key *key;
1183 struct list_head *hash_head; 1183 struct list_head *hash_head;
@@ -1249,7 +1249,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1249out_unlock_set: 1249out_unlock_set:
1250 __raw_spin_unlock(&hash_lock); 1250 __raw_spin_unlock(&hash_lock);
1251 1251
1252 if (!subclass) 1252 if (!subclass || force)
1253 lock->class_cache = class; 1253 lock->class_cache = class;
1254 1254
1255 DEBUG_LOCKS_WARN_ON(class->subclass != subclass); 1255 DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
@@ -1937,7 +1937,7 @@ void trace_softirqs_off(unsigned long ip)
1937 * Initialize a lock instance's lock-class mapping info: 1937 * Initialize a lock instance's lock-class mapping info:
1938 */ 1938 */
1939void lockdep_init_map(struct lockdep_map *lock, const char *name, 1939void lockdep_init_map(struct lockdep_map *lock, const char *name,
1940 struct lock_class_key *key) 1940 struct lock_class_key *key, int subclass)
1941{ 1941{
1942 if (unlikely(!debug_locks)) 1942 if (unlikely(!debug_locks))
1943 return; 1943 return;
@@ -1957,6 +1957,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
1957 lock->name = name; 1957 lock->name = name;
1958 lock->key = key; 1958 lock->key = key;
1959 lock->class_cache = NULL; 1959 lock->class_cache = NULL;
1960 if (subclass)
1961 register_lock_class(lock, subclass, 1);
1960} 1962}
1961 1963
1962EXPORT_SYMBOL_GPL(lockdep_init_map); 1964EXPORT_SYMBOL_GPL(lockdep_init_map);
@@ -1995,7 +1997,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1995 * Not cached yet or subclass? 1997 * Not cached yet or subclass?
1996 */ 1998 */
1997 if (unlikely(!class)) { 1999 if (unlikely(!class)) {
1998 class = register_lock_class(lock, subclass); 2000 class = register_lock_class(lock, subclass, 0);
1999 if (!class) 2001 if (!class)
2000 return 0; 2002 return 0;
2001 } 2003 }