aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2006-10-11 01:45:14 -0400
committerDmitry Torokhov <dtor@insightbb.com>2006-10-11 01:45:14 -0400
commit4dfbb9d8c6cbfc32faa5c71145bd2a43e1f8237c (patch)
treea4fefea0d5f5930240f4ecd6f9716a029cc927a9 /kernel/lockdep.c
parent86255d9d0bede79140f4912482447963f00818c0 (diff)
Lockdep: add lockdep_set_class_and_subclass() and lockdep_set_subclass()
This annotation makes it possible to assign a subclass on lock init. This annotation is meant to reduce the _nested() annotations by assigning a default subclass. One could do without this annotation and rely on lockdep_set_class() exclusively, but that would require a manual stack of struct lock_class_key objects. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Signed-off-by: Dmitry Torokhov <dtor@mail.ru>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 4c0553461000..ba7156ac70c1 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -1177,7 +1177,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
1177 * itself, so actual lookup of the hash should be once per lock object. 1177 * itself, so actual lookup of the hash should be once per lock object.
1178 */ 1178 */
1179static inline struct lock_class * 1179static inline struct lock_class *
1180register_lock_class(struct lockdep_map *lock, unsigned int subclass) 1180register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
1181{ 1181{
1182 struct lockdep_subclass_key *key; 1182 struct lockdep_subclass_key *key;
1183 struct list_head *hash_head; 1183 struct list_head *hash_head;
@@ -1249,7 +1249,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass)
1249out_unlock_set: 1249out_unlock_set:
1250 __raw_spin_unlock(&hash_lock); 1250 __raw_spin_unlock(&hash_lock);
1251 1251
1252 if (!subclass) 1252 if (!subclass || force)
1253 lock->class_cache = class; 1253 lock->class_cache = class;
1254 1254
1255 DEBUG_LOCKS_WARN_ON(class->subclass != subclass); 1255 DEBUG_LOCKS_WARN_ON(class->subclass != subclass);
@@ -1937,7 +1937,7 @@ void trace_softirqs_off(unsigned long ip)
1937 * Initialize a lock instance's lock-class mapping info: 1937 * Initialize a lock instance's lock-class mapping info:
1938 */ 1938 */
1939void lockdep_init_map(struct lockdep_map *lock, const char *name, 1939void lockdep_init_map(struct lockdep_map *lock, const char *name,
1940 struct lock_class_key *key) 1940 struct lock_class_key *key, int subclass)
1941{ 1941{
1942 if (unlikely(!debug_locks)) 1942 if (unlikely(!debug_locks))
1943 return; 1943 return;
@@ -1957,6 +1957,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name,
1957 lock->name = name; 1957 lock->name = name;
1958 lock->key = key; 1958 lock->key = key;
1959 lock->class_cache = NULL; 1959 lock->class_cache = NULL;
1960 if (subclass)
1961 register_lock_class(lock, subclass, 1);
1960} 1962}
1961 1963
1962EXPORT_SYMBOL_GPL(lockdep_init_map); 1964EXPORT_SYMBOL_GPL(lockdep_init_map);
@@ -1995,7 +1997,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
1995 * Not cached yet or subclass? 1997 * Not cached yet or subclass?
1996 */ 1998 */
1997 if (unlikely(!class)) { 1999 if (unlikely(!class)) {
1998 class = register_lock_class(lock, subclass); 2000 class = register_lock_class(lock, subclass, 0);
1999 if (!class) 2001 if (!class)
2000 return 0; 2002 return 0;
2001 } 2003 }