diff options
-rw-r--r-- | include/linux/lockdep.h | 15 | ||||
-rw-r--r-- | kernel/lockdep.c | 10 | ||||
-rw-r--r-- | kernel/mutex-debug.c | 2 | ||||
-rw-r--r-- | lib/rwsem-spinlock.c | 2 | ||||
-rw-r--r-- | lib/rwsem.c | 2 | ||||
-rw-r--r-- | lib/spinlock_debug.c | 4 | ||||
-rw-r--r-- | net/core/sock.c | 2 |
7 files changed, 23 insertions, 14 deletions
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h index 1314ca0f29be..14fec2a23b2e 100644 --- a/include/linux/lockdep.h +++ b/include/linux/lockdep.h | |||
@@ -202,7 +202,7 @@ extern int lockdep_internal(void); | |||
202 | */ | 202 | */ |
203 | 203 | ||
204 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | 204 | extern void lockdep_init_map(struct lockdep_map *lock, const char *name, |
205 | struct lock_class_key *key); | 205 | struct lock_class_key *key, int subclass); |
206 | 206 | ||
207 | /* | 207 | /* |
208 | * Reinitialize a lock key - for cases where there is special locking or | 208 | * Reinitialize a lock key - for cases where there is special locking or |
@@ -211,9 +211,14 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
211 | * or they are too narrow (they suffer from a false class-split): | 211 | * or they are too narrow (they suffer from a false class-split): |
212 | */ | 212 | */ |
213 | #define lockdep_set_class(lock, key) \ | 213 | #define lockdep_set_class(lock, key) \ |
214 | lockdep_init_map(&(lock)->dep_map, #key, key) | 214 | lockdep_init_map(&(lock)->dep_map, #key, key, 0) |
215 | #define lockdep_set_class_and_name(lock, key, name) \ | 215 | #define lockdep_set_class_and_name(lock, key, name) \ |
216 | lockdep_init_map(&(lock)->dep_map, name, key) | 216 | lockdep_init_map(&(lock)->dep_map, name, key, 0) |
217 | #define lockdep_set_class_and_subclass(lock, key, sub) \ | ||
218 | lockdep_init_map(&(lock)->dep_map, #key, key, sub) | ||
219 | #define lockdep_set_subclass(lock, sub) \ | ||
220 | lockdep_init_map(&(lock)->dep_map, #lock, \ | ||
221 | (lock)->dep_map.key, sub) | ||
217 | 222 | ||
218 | /* | 223 | /* |
219 | * Acquire a lock. | 224 | * Acquire a lock. |
@@ -257,10 +262,12 @@ static inline int lockdep_internal(void) | |||
257 | # define lock_release(l, n, i) do { } while (0) | 262 | # define lock_release(l, n, i) do { } while (0) |
258 | # define lockdep_init() do { } while (0) | 263 | # define lockdep_init() do { } while (0) |
259 | # define lockdep_info() do { } while (0) | 264 | # define lockdep_info() do { } while (0) |
260 | # define lockdep_init_map(lock, name, key) do { (void)(key); } while (0) | 265 | # define lockdep_init_map(lock, name, key, sub) do { (void)(key); } while (0) |
261 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) | 266 | # define lockdep_set_class(lock, key) do { (void)(key); } while (0) |
262 | # define lockdep_set_class_and_name(lock, key, name) \ | 267 | # define lockdep_set_class_and_name(lock, key, name) \ |
263 | do { (void)(key); } while (0) | 268 | do { (void)(key); } while (0) |
269 | #define lockdep_set_class_and_subclass(lock, key, sub) \ | ||
270 | do { (void)(key); } while (0) | ||
264 | # define INIT_LOCKDEP | 271 | # define INIT_LOCKDEP |
265 | # define lockdep_reset() do { debug_locks = 1; } while (0) | 272 | # define lockdep_reset() do { debug_locks = 1; } while (0) |
266 | # define lockdep_free_key_range(start, size) do { } while (0) | 273 | # define lockdep_free_key_range(start, size) do { } while (0) |
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 4c0553461000..ba7156ac70c1 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1177,7 +1177,7 @@ look_up_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
1177 | * itself, so actual lookup of the hash should be once per lock object. | 1177 | * itself, so actual lookup of the hash should be once per lock object. |
1178 | */ | 1178 | */ |
1179 | static inline struct lock_class * | 1179 | static inline struct lock_class * |
1180 | register_lock_class(struct lockdep_map *lock, unsigned int subclass) | 1180 | register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) |
1181 | { | 1181 | { |
1182 | struct lockdep_subclass_key *key; | 1182 | struct lockdep_subclass_key *key; |
1183 | struct list_head *hash_head; | 1183 | struct list_head *hash_head; |
@@ -1249,7 +1249,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass) | |||
1249 | out_unlock_set: | 1249 | out_unlock_set: |
1250 | __raw_spin_unlock(&hash_lock); | 1250 | __raw_spin_unlock(&hash_lock); |
1251 | 1251 | ||
1252 | if (!subclass) | 1252 | if (!subclass || force) |
1253 | lock->class_cache = class; | 1253 | lock->class_cache = class; |
1254 | 1254 | ||
1255 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); | 1255 | DEBUG_LOCKS_WARN_ON(class->subclass != subclass); |
@@ -1937,7 +1937,7 @@ void trace_softirqs_off(unsigned long ip) | |||
1937 | * Initialize a lock instance's lock-class mapping info: | 1937 | * Initialize a lock instance's lock-class mapping info: |
1938 | */ | 1938 | */ |
1939 | void lockdep_init_map(struct lockdep_map *lock, const char *name, | 1939 | void lockdep_init_map(struct lockdep_map *lock, const char *name, |
1940 | struct lock_class_key *key) | 1940 | struct lock_class_key *key, int subclass) |
1941 | { | 1941 | { |
1942 | if (unlikely(!debug_locks)) | 1942 | if (unlikely(!debug_locks)) |
1943 | return; | 1943 | return; |
@@ -1957,6 +1957,8 @@ void lockdep_init_map(struct lockdep_map *lock, const char *name, | |||
1957 | lock->name = name; | 1957 | lock->name = name; |
1958 | lock->key = key; | 1958 | lock->key = key; |
1959 | lock->class_cache = NULL; | 1959 | lock->class_cache = NULL; |
1960 | if (subclass) | ||
1961 | register_lock_class(lock, subclass, 1); | ||
1960 | } | 1962 | } |
1961 | 1963 | ||
1962 | EXPORT_SYMBOL_GPL(lockdep_init_map); | 1964 | EXPORT_SYMBOL_GPL(lockdep_init_map); |
@@ -1995,7 +1997,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass, | |||
1995 | * Not cached yet or subclass? | 1997 | * Not cached yet or subclass? |
1996 | */ | 1998 | */ |
1997 | if (unlikely(!class)) { | 1999 | if (unlikely(!class)) { |
1998 | class = register_lock_class(lock, subclass); | 2000 | class = register_lock_class(lock, subclass, 0); |
1999 | if (!class) | 2001 | if (!class) |
2000 | return 0; | 2002 | return 0; |
2001 | } | 2003 | } |
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c index e3203c654dda..18651641a7b5 100644 --- a/kernel/mutex-debug.c +++ b/kernel/mutex-debug.c | |||
@@ -91,7 +91,7 @@ void debug_mutex_init(struct mutex *lock, const char *name, | |||
91 | * Make sure we are not reinitializing a held lock: | 91 | * Make sure we are not reinitializing a held lock: |
92 | */ | 92 | */ |
93 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 93 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
94 | lockdep_init_map(&lock->dep_map, name, key); | 94 | lockdep_init_map(&lock->dep_map, name, key, 0); |
95 | #endif | 95 | #endif |
96 | lock->owner = NULL; | 96 | lock->owner = NULL; |
97 | lock->magic = lock; | 97 | lock->magic = lock; |
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index db4fed74b940..c4cfd6c0342f 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -28,7 +28,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
28 | * Make sure we are not reinitializing a held semaphore: | 28 | * Make sure we are not reinitializing a held semaphore: |
29 | */ | 29 | */ |
30 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | 30 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
31 | lockdep_init_map(&sem->dep_map, name, key); | 31 | lockdep_init_map(&sem->dep_map, name, key, 0); |
32 | #endif | 32 | #endif |
33 | sem->activity = 0; | 33 | sem->activity = 0; |
34 | spin_lock_init(&sem->wait_lock); | 34 | spin_lock_init(&sem->wait_lock); |
diff --git a/lib/rwsem.c b/lib/rwsem.c index 901d0e7da892..cdb4e3d05607 100644 --- a/lib/rwsem.c +++ b/lib/rwsem.c | |||
@@ -19,7 +19,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name, | |||
19 | * Make sure we are not reinitializing a held semaphore: | 19 | * Make sure we are not reinitializing a held semaphore: |
20 | */ | 20 | */ |
21 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | 21 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); |
22 | lockdep_init_map(&sem->dep_map, name, key); | 22 | lockdep_init_map(&sem->dep_map, name, key, 0); |
23 | #endif | 23 | #endif |
24 | sem->count = RWSEM_UNLOCKED_VALUE; | 24 | sem->count = RWSEM_UNLOCKED_VALUE; |
25 | spin_lock_init(&sem->wait_lock); | 25 | spin_lock_init(&sem->wait_lock); |
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c index dafaf1de2491..b6c4f898197c 100644 --- a/lib/spinlock_debug.c +++ b/lib/spinlock_debug.c | |||
@@ -20,7 +20,7 @@ void __spin_lock_init(spinlock_t *lock, const char *name, | |||
20 | * Make sure we are not reinitializing a held lock: | 20 | * Make sure we are not reinitializing a held lock: |
21 | */ | 21 | */ |
22 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 22 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
23 | lockdep_init_map(&lock->dep_map, name, key); | 23 | lockdep_init_map(&lock->dep_map, name, key, 0); |
24 | #endif | 24 | #endif |
25 | lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; | 25 | lock->raw_lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; |
26 | lock->magic = SPINLOCK_MAGIC; | 26 | lock->magic = SPINLOCK_MAGIC; |
@@ -38,7 +38,7 @@ void __rwlock_init(rwlock_t *lock, const char *name, | |||
38 | * Make sure we are not reinitializing a held lock: | 38 | * Make sure we are not reinitializing a held lock: |
39 | */ | 39 | */ |
40 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); | 40 | debug_check_no_locks_freed((void *)lock, sizeof(*lock)); |
41 | lockdep_init_map(&lock->dep_map, name, key); | 41 | lockdep_init_map(&lock->dep_map, name, key, 0); |
42 | #endif | 42 | #endif |
43 | lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; | 43 | lock->raw_lock = (raw_rwlock_t) __RAW_RW_LOCK_UNLOCKED; |
44 | lock->magic = RWLOCK_MAGIC; | 44 | lock->magic = RWLOCK_MAGIC; |
diff --git a/net/core/sock.c b/net/core/sock.c index b77e155cbe6c..d472db4776c3 100644 --- a/net/core/sock.c +++ b/net/core/sock.c | |||
@@ -823,7 +823,7 @@ static void inline sock_lock_init(struct sock *sk) | |||
823 | af_family_slock_key_strings[sk->sk_family]); | 823 | af_family_slock_key_strings[sk->sk_family]); |
824 | lockdep_init_map(&sk->sk_lock.dep_map, | 824 | lockdep_init_map(&sk->sk_lock.dep_map, |
825 | af_family_key_strings[sk->sk_family], | 825 | af_family_key_strings[sk->sk_family], |
826 | af_family_keys + sk->sk_family); | 826 | af_family_keys + sk->sk_family, 0); |
827 | } | 827 | } |
828 | 828 | ||
829 | /** | 829 | /** |