diff options
author | Ingo Molnar <mingo@elte.hu> | 2006-07-03 03:24:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-07-03 18:27:04 -0400 |
commit | 4ea2176dfa714882e88180b474e4cbcd888b70af (patch) | |
tree | 7ff3810f6b8750c226234887bb3063d91e1d71c3 /lib/rwsem-spinlock.c | |
parent | a8f24a3978c5f82419e1c90dc90460731204f46f (diff) |
[PATCH] lockdep: prove rwsem locking correctness
Use the lock validator framework to prove rwsem locking correctness.
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'lib/rwsem-spinlock.c')
-rw-r--r-- | lib/rwsem-spinlock.c | 20 |
1 files changed, 17 insertions, 3 deletions
diff --git a/lib/rwsem-spinlock.c b/lib/rwsem-spinlock.c index 03b6097eb04e..db4fed74b940 100644 --- a/lib/rwsem-spinlock.c +++ b/lib/rwsem-spinlock.c | |||
@@ -20,8 +20,16 @@ struct rwsem_waiter { | |||
20 | /* | 20 | /* |
21 | * initialise the semaphore | 21 | * initialise the semaphore |
22 | */ | 22 | */ |
23 | void fastcall init_rwsem(struct rw_semaphore *sem) | 23 | void __init_rwsem(struct rw_semaphore *sem, const char *name, |
24 | struct lock_class_key *key) | ||
24 | { | 25 | { |
26 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
27 | /* | ||
28 | * Make sure we are not reinitializing a held semaphore: | ||
29 | */ | ||
30 | debug_check_no_locks_freed((void *)sem, sizeof(*sem)); | ||
31 | lockdep_init_map(&sem->dep_map, name, key); | ||
32 | #endif | ||
25 | sem->activity = 0; | 33 | sem->activity = 0; |
26 | spin_lock_init(&sem->wait_lock); | 34 | spin_lock_init(&sem->wait_lock); |
27 | INIT_LIST_HEAD(&sem->wait_list); | 35 | INIT_LIST_HEAD(&sem->wait_list); |
@@ -183,7 +191,7 @@ int fastcall __down_read_trylock(struct rw_semaphore *sem) | |||
183 | * get a write lock on the semaphore | 191 | * get a write lock on the semaphore |
184 | * - we increment the waiting count anyway to indicate an exclusive lock | 192 | * - we increment the waiting count anyway to indicate an exclusive lock |
185 | */ | 193 | */ |
186 | void fastcall __sched __down_write(struct rw_semaphore *sem) | 194 | void fastcall __sched __down_write_nested(struct rw_semaphore *sem, int subclass) |
187 | { | 195 | { |
188 | struct rwsem_waiter waiter; | 196 | struct rwsem_waiter waiter; |
189 | struct task_struct *tsk; | 197 | struct task_struct *tsk; |
@@ -223,6 +231,11 @@ void fastcall __sched __down_write(struct rw_semaphore *sem) | |||
223 | ; | 231 | ; |
224 | } | 232 | } |
225 | 233 | ||
234 | void fastcall __sched __down_write(struct rw_semaphore *sem) | ||
235 | { | ||
236 | __down_write_nested(sem, 0); | ||
237 | } | ||
238 | |||
226 | /* | 239 | /* |
227 | * trylock for writing -- returns 1 if successful, 0 if contention | 240 | * trylock for writing -- returns 1 if successful, 0 if contention |
228 | */ | 241 | */ |
@@ -292,9 +305,10 @@ void fastcall __downgrade_write(struct rw_semaphore *sem) | |||
292 | spin_unlock_irqrestore(&sem->wait_lock, flags); | 305 | spin_unlock_irqrestore(&sem->wait_lock, flags); |
293 | } | 306 | } |
294 | 307 | ||
295 | EXPORT_SYMBOL(init_rwsem); | 308 | EXPORT_SYMBOL(__init_rwsem); |
296 | EXPORT_SYMBOL(__down_read); | 309 | EXPORT_SYMBOL(__down_read); |
297 | EXPORT_SYMBOL(__down_read_trylock); | 310 | EXPORT_SYMBOL(__down_read_trylock); |
311 | EXPORT_SYMBOL(__down_write_nested); | ||
298 | EXPORT_SYMBOL(__down_write); | 312 | EXPORT_SYMBOL(__down_write); |
299 | EXPORT_SYMBOL(__down_write_trylock); | 313 | EXPORT_SYMBOL(__down_write_trylock); |
300 | EXPORT_SYMBOL(__up_read); | 314 | EXPORT_SYMBOL(__up_read); |