aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/mutex.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:33 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:01 -0400
commit9a11b49a805665e13a56aa067afaf81d43ec1514 (patch)
treebf499956e3f67d1211d68ab1e2eb76645f453dfb /kernel/mutex.c
parentfb7e42413a098cc45b3adf858da290033af62bae (diff)
[PATCH] lockdep: better lock debugging
Generic lock debugging: - generalized lock debugging framework. For example, a bug in one lock subsystem turns off debugging in all lock subsystems. - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from the mutex/rtmutex debugging code: it caused way too much prototype hackery, and lockdep will give the same information anyway. - ability to do silent tests - check lock freeing in vfree too. - more finegrained debugging options, to allow distributions to turn off more expensive debugging features. There's no separate 'held mutexes' list anymore - but there's a 'held locks' stack within lockdep, which unifies deadlock detection across all lock classes. (this is independent of the lockdep validation stuff - lockdep first checks whether we are holding a lock already) Here are the current debugging options: CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_LOCK_ALLOC=y which do: config DEBUG_MUTEXES bool "Mutex debugging, basic checks" config DEBUG_LOCK_ALLOC bool "Detect incorrect freeing of live mutexes" Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel/mutex.c')
-rw-r--r--kernel/mutex.c52
1 files changed, 28 insertions, 24 deletions
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 101ceeb38925..3aad0b7992f4 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/debug_locks.h>
20 21
21/* 22/*
22 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -38,7 +39,7 @@
38 * 39 *
39 * It is not allowed to initialize an already locked mutex. 40 * It is not allowed to initialize an already locked mutex.
40 */ 41 */
41void fastcall __mutex_init(struct mutex *lock, const char *name) 42__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name)
42{ 43{
43 atomic_set(&lock->count, 1); 44 atomic_set(&lock->count, 1);
44 spin_lock_init(&lock->wait_lock); 45 spin_lock_init(&lock->wait_lock);
@@ -56,7 +57,7 @@ EXPORT_SYMBOL(__mutex_init);
56 * branch is predicted by the CPU as default-untaken. 57 * branch is predicted by the CPU as default-untaken.
57 */ 58 */
58static void fastcall noinline __sched 59static void fastcall noinline __sched
59__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); 60__mutex_lock_slowpath(atomic_t *lock_count);
60 61
61/*** 62/***
62 * mutex_lock - acquire the mutex 63 * mutex_lock - acquire the mutex
@@ -79,7 +80,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__);
79 * 80 *
80 * This function is similar to (but not equivalent to) down(). 81 * This function is similar to (but not equivalent to) down().
81 */ 82 */
82void fastcall __sched mutex_lock(struct mutex *lock) 83void inline fastcall __sched mutex_lock(struct mutex *lock)
83{ 84{
84 might_sleep(); 85 might_sleep();
85 /* 86 /*
@@ -92,7 +93,7 @@ void fastcall __sched mutex_lock(struct mutex *lock)
92EXPORT_SYMBOL(mutex_lock); 93EXPORT_SYMBOL(mutex_lock);
93 94
94static void fastcall noinline __sched 95static void fastcall noinline __sched
95__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); 96__mutex_unlock_slowpath(atomic_t *lock_count);
96 97
97/*** 98/***
98 * mutex_unlock - release the mutex 99 * mutex_unlock - release the mutex
@@ -120,18 +121,17 @@ EXPORT_SYMBOL(mutex_unlock);
120 * Lock a mutex (possibly interruptible), slowpath: 121 * Lock a mutex (possibly interruptible), slowpath:
121 */ 122 */
122static inline int __sched 123static inline int __sched
123__mutex_lock_common(struct mutex *lock, long state __IP_DECL__) 124__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
124{ 125{
125 struct task_struct *task = current; 126 struct task_struct *task = current;
126 struct mutex_waiter waiter; 127 struct mutex_waiter waiter;
127 unsigned int old_val; 128 unsigned int old_val;
128 unsigned long flags; 129 unsigned long flags;
129 130
130 debug_mutex_init_waiter(&waiter);
131
132 spin_lock_mutex(&lock->wait_lock, flags); 131 spin_lock_mutex(&lock->wait_lock, flags);
133 132
134 debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); 133 debug_mutex_lock_common(lock, &waiter);
134 debug_mutex_add_waiter(lock, &waiter, task->thread_info);
135 135
136 /* add waiting tasks to the end of the waitqueue (FIFO): */ 136 /* add waiting tasks to the end of the waitqueue (FIFO): */
137 list_add_tail(&waiter.list, &lock->wait_list); 137 list_add_tail(&waiter.list, &lock->wait_list);
@@ -173,7 +173,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
173 173
174 /* got the lock - rejoice! */ 174 /* got the lock - rejoice! */
175 mutex_remove_waiter(lock, &waiter, task->thread_info); 175 mutex_remove_waiter(lock, &waiter, task->thread_info);
176 debug_mutex_set_owner(lock, task->thread_info __IP__); 176 debug_mutex_set_owner(lock, task->thread_info);
177 177
178 /* set it to 0 if there are no waiters left: */ 178 /* set it to 0 if there are no waiters left: */
179 if (likely(list_empty(&lock->wait_list))) 179 if (likely(list_empty(&lock->wait_list)))
@@ -183,32 +183,28 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
183 183
184 debug_mutex_free_waiter(&waiter); 184 debug_mutex_free_waiter(&waiter);
185 185
186 DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list));
187 DEBUG_LOCKS_WARN_ON(lock->owner != task->thread_info);
188
189 return 0; 186 return 0;
190} 187}
191 188
192static void fastcall noinline __sched 189static void fastcall noinline __sched
193__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) 190__mutex_lock_slowpath(atomic_t *lock_count)
194{ 191{
195 struct mutex *lock = container_of(lock_count, struct mutex, count); 192 struct mutex *lock = container_of(lock_count, struct mutex, count);
196 193
197 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); 194 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
198} 195}
199 196
200/* 197/*
201 * Release the lock, slowpath: 198 * Release the lock, slowpath:
202 */ 199 */
203static fastcall noinline void 200static fastcall inline void
204__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) 201__mutex_unlock_common_slowpath(atomic_t *lock_count)
205{ 202{
206 struct mutex *lock = container_of(lock_count, struct mutex, count); 203 struct mutex *lock = container_of(lock_count, struct mutex, count);
207 unsigned long flags; 204 unsigned long flags;
208 205
209 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
210
211 spin_lock_mutex(&lock->wait_lock, flags); 206 spin_lock_mutex(&lock->wait_lock, flags);
207 debug_mutex_unlock(lock);
212 208
213 /* 209 /*
214 * some architectures leave the lock unlocked in the fastpath failure 210 * some architectures leave the lock unlocked in the fastpath failure
@@ -218,8 +214,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
218 if (__mutex_slowpath_needs_to_unlock()) 214 if (__mutex_slowpath_needs_to_unlock())
219 atomic_set(&lock->count, 1); 215 atomic_set(&lock->count, 1);
220 216
221 debug_mutex_unlock(lock);
222
223 if (!list_empty(&lock->wait_list)) { 217 if (!list_empty(&lock->wait_list)) {
224 /* get the first entry from the wait-list: */ 218 /* get the first entry from the wait-list: */
225 struct mutex_waiter *waiter = 219 struct mutex_waiter *waiter =
@@ -237,11 +231,20 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
237} 231}
238 232
239/* 233/*
234 * Release the lock, slowpath:
235 */
236static fastcall noinline void
237__mutex_unlock_slowpath(atomic_t *lock_count)
238{
239 __mutex_unlock_common_slowpath(lock_count);
240}
241
242/*
240 * Here come the less common (and hence less performance-critical) APIs: 243 * Here come the less common (and hence less performance-critical) APIs:
241 * mutex_lock_interruptible() and mutex_trylock(). 244 * mutex_lock_interruptible() and mutex_trylock().
242 */ 245 */
243static int fastcall noinline __sched 246static int fastcall noinline __sched
244__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); 247__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
245 248
246/*** 249/***
247 * mutex_lock_interruptible - acquire the mutex, interruptable 250 * mutex_lock_interruptible - acquire the mutex, interruptable
@@ -264,11 +267,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
264EXPORT_SYMBOL(mutex_lock_interruptible); 267EXPORT_SYMBOL(mutex_lock_interruptible);
265 268
266static int fastcall noinline __sched 269static int fastcall noinline __sched
267__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) 270__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
268{ 271{
269 struct mutex *lock = container_of(lock_count, struct mutex, count); 272 struct mutex *lock = container_of(lock_count, struct mutex, count);
270 273
271 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); 274 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
272} 275}
273 276
274/* 277/*
@@ -285,7 +288,8 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
285 288
286 prev = atomic_xchg(&lock->count, -1); 289 prev = atomic_xchg(&lock->count, -1);
287 if (likely(prev == 1)) 290 if (likely(prev == 1))
288 debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); 291 debug_mutex_set_owner(lock, current_thread_info());
292
289 /* Set it back to 0 if there are no waiters: */ 293 /* Set it back to 0 if there are no waiters: */
290 if (likely(list_empty(&lock->wait_list))) 294 if (likely(list_empty(&lock->wait_list)))
291 atomic_set(&lock->count, 0); 295 atomic_set(&lock->count, 0);