aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:55 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:04 -0400
commitef5d4707b9065c0cf8a69fa3716893f3b75201ba (patch)
tree9ec92f31356bf404486c1b26df9fa40bd784f983 /kernel/mutex.c
parent8a25d5debff2daee280e83e09d8c25d67c26a972 (diff)
[PATCH] lockdep: prove mutex locking correctness
Use the lock validator framework to prove mutex locking correctness. Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c28
1 files changed, 22 insertions, 6 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 43a50c18701a..8c71cf72a497 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -39,13 +39,14 @@
39 * 39 *
40 * It is not allowed to initialize an already locked mutex. 40 * It is not allowed to initialize an already locked mutex.
41 */ 41 */
42__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name) 42void
43__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
43{ 44{
44 atomic_set(&lock->count, 1); 45 atomic_set(&lock->count, 1);
45 spin_lock_init(&lock->wait_lock); 46 spin_lock_init(&lock->wait_lock);
46 INIT_LIST_HEAD(&lock->wait_list); 47 INIT_LIST_HEAD(&lock->wait_list);
47 48
48 debug_mutex_init(lock, name); 49 debug_mutex_init(lock, name, key);
49} 50}
50 51
51EXPORT_SYMBOL(__mutex_init); 52EXPORT_SYMBOL(__mutex_init);
@@ -131,6 +132,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
131 spin_lock_mutex(&lock->wait_lock, flags); 132 spin_lock_mutex(&lock->wait_lock, flags);
132 133
133 debug_mutex_lock_common(lock, &waiter); 134 debug_mutex_lock_common(lock, &waiter);
135 mutex_acquire(&lock->dep_map, subclass, 0, _RET_IP_);
134 debug_mutex_add_waiter(lock, &waiter, task->thread_info); 136 debug_mutex_add_waiter(lock, &waiter, task->thread_info);
135 137
136 /* add waiting tasks to the end of the waitqueue (FIFO): */ 138 /* add waiting tasks to the end of the waitqueue (FIFO): */
@@ -158,6 +160,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
158 if (unlikely(state == TASK_INTERRUPTIBLE && 160 if (unlikely(state == TASK_INTERRUPTIBLE &&
159 signal_pending(task))) { 161 signal_pending(task))) {
160 mutex_remove_waiter(lock, &waiter, task->thread_info); 162 mutex_remove_waiter(lock, &waiter, task->thread_info);
163 mutex_release(&lock->dep_map, 1, _RET_IP_);
161 spin_unlock_mutex(&lock->wait_lock, flags); 164 spin_unlock_mutex(&lock->wait_lock, flags);
162 165
163 debug_mutex_free_waiter(&waiter); 166 debug_mutex_free_waiter(&waiter);
@@ -194,16 +197,28 @@ __mutex_lock_slowpath(atomic_t *lock_count)
194 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0); 197 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
195} 198}
196 199
200#ifdef CONFIG_DEBUG_LOCK_ALLOC
201void __sched
202mutex_lock_nested(struct mutex *lock, unsigned int subclass)
203{
204 might_sleep();
205 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass);
206}
207
208EXPORT_SYMBOL_GPL(mutex_lock_nested);
209#endif
210
197/* 211/*
198 * Release the lock, slowpath: 212 * Release the lock, slowpath:
199 */ 213 */
200static fastcall inline void 214static fastcall inline void
201__mutex_unlock_common_slowpath(atomic_t *lock_count) 215__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
202{ 216{
203 struct mutex *lock = container_of(lock_count, struct mutex, count); 217 struct mutex *lock = container_of(lock_count, struct mutex, count);
204 unsigned long flags; 218 unsigned long flags;
205 219
206 spin_lock_mutex(&lock->wait_lock, flags); 220 spin_lock_mutex(&lock->wait_lock, flags);
221 mutex_release(&lock->dep_map, nested, _RET_IP_);
207 debug_mutex_unlock(lock); 222 debug_mutex_unlock(lock);
208 223
209 /* 224 /*
@@ -236,7 +251,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count)
236static fastcall noinline void 251static fastcall noinline void
237__mutex_unlock_slowpath(atomic_t *lock_count) 252__mutex_unlock_slowpath(atomic_t *lock_count)
238{ 253{
239 __mutex_unlock_common_slowpath(lock_count); 254 __mutex_unlock_common_slowpath(lock_count, 1);
240} 255}
241 256
242/* 257/*
@@ -287,9 +302,10 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
287 spin_lock_mutex(&lock->wait_lock, flags); 302 spin_lock_mutex(&lock->wait_lock, flags);
288 303
289 prev = atomic_xchg(&lock->count, -1); 304 prev = atomic_xchg(&lock->count, -1);
290 if (likely(prev == 1)) 305 if (likely(prev == 1)) {
291 debug_mutex_set_owner(lock, current_thread_info()); 306 debug_mutex_set_owner(lock, current_thread_info());
292 307 mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
308 }
293 /* Set it back to 0 if there are no waiters: */ 309 /* Set it back to 0 if there are no waiters: */
294 if (likely(list_empty(&lock->wait_list))) 310 if (likely(list_empty(&lock->wait_list)))
295 atomic_set(&lock->count, 0); 311 atomic_set(&lock->count, 0);