aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2006-07-03 03:24:33 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-07-03 18:27:01 -0400
commit9a11b49a805665e13a56aa067afaf81d43ec1514 (patch)
treebf499956e3f67d1211d68ab1e2eb76645f453dfb /kernel
parentfb7e42413a098cc45b3adf858da290033af62bae (diff)
[PATCH] lockdep: better lock debugging
Generic lock debugging: - generalized lock debugging framework. For example, a bug in one lock subsystem turns off debugging in all lock subsystems. - got rid of the caller address passing (__IP__/__IP_DECL__/etc.) from the mutex/rtmutex debugging code: it caused way too much prototype hackery, and lockdep will give the same information anyway. - ability to do silent tests - check lock freeing in vfree too. - more finegrained debugging options, to allow distributions to turn off more expensive debugging features. There's no separate 'held mutexes' list anymore - but there's a 'held locks' stack within lockdep, which unifies deadlock detection across all lock classes. (this is independent of the lockdep validation stuff - lockdep first checks whether we are holding a lock already) Here are the current debugging options: CONFIG_DEBUG_MUTEXES=y CONFIG_DEBUG_LOCK_ALLOC=y which do: config DEBUG_MUTEXES bool "Mutex debugging, basic checks" config DEBUG_LOCK_ALLOC bool "Detect incorrect freeing of live mutexes" Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Arjan van de Ven <arjan@linux.intel.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/mutex-debug.c51
-rw-r--r--kernel/mutex-debug.h82
-rw-r--r--kernel/mutex.c52
-rw-r--r--kernel/mutex.h17
-rw-r--r--kernel/rtmutex-debug.c302
-rw-r--r--kernel/rtmutex-debug.h8
-rw-r--r--kernel/rtmutex.c46
-rw-r--r--kernel/rtmutex.h3
-rw-r--r--kernel/sched.c16
11 files changed, 104 insertions, 482 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 7f7ef2258553..c595db14cf25 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -933,10 +933,9 @@ fastcall NORET_TYPE void do_exit(long code)
933 if (unlikely(current->pi_state_cache)) 933 if (unlikely(current->pi_state_cache))
934 kfree(current->pi_state_cache); 934 kfree(current->pi_state_cache);
935 /* 935 /*
936 * If DEBUG_MUTEXES is on, make sure we are holding no locks: 936 * Make sure we are holding no locks:
937 */ 937 */
938 mutex_debug_check_no_locks_held(tsk); 938 debug_check_no_locks_held(tsk);
939 rt_mutex_debug_check_no_locks_held(tsk);
940 939
941 if (tsk->io_context) 940 if (tsk->io_context)
942 exit_io_context(); 941 exit_io_context();
diff --git a/kernel/fork.c b/kernel/fork.c
index 9064bf9e131b..1cd46a4fb0d3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -919,10 +919,6 @@ static inline void rt_mutex_init_task(struct task_struct *p)
919 spin_lock_init(&p->pi_lock); 919 spin_lock_init(&p->pi_lock);
920 plist_head_init(&p->pi_waiters, &p->pi_lock); 920 plist_head_init(&p->pi_waiters, &p->pi_lock);
921 p->pi_blocked_on = NULL; 921 p->pi_blocked_on = NULL;
922# ifdef CONFIG_DEBUG_RT_MUTEXES
923 spin_lock_init(&p->held_list_lock);
924 INIT_LIST_HEAD(&p->held_list_head);
925# endif
926#endif 922#endif
927} 923}
928 924
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index a92de145ed0d..5569766a1ea2 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -20,52 +20,19 @@
20#include <linux/spinlock.h> 20#include <linux/spinlock.h>
21#include <linux/kallsyms.h> 21#include <linux/kallsyms.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/debug_locks.h>
23 24
24#include "mutex-debug.h" 25#include "mutex-debug.h"
25 26
26/* 27/*
27 * We need a global lock when we walk through the multi-process
28 * lock tree. Only used in the deadlock-debugging case.
29 */
30DEFINE_SPINLOCK(debug_mutex_lock);
31
32/*
33 * All locks held by all tasks, in a single global list:
34 */
35LIST_HEAD(debug_mutex_held_locks);
36
37/*
38 * In the debug case we carry the caller's instruction pointer into
39 * other functions, but we dont want the function argument overhead
40 * in the nondebug case - hence these macros:
41 */
42#define __IP_DECL__ , unsigned long ip
43#define __IP__ , ip
44#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
45
46/*
47 * "mutex debugging enabled" flag. We turn it off when we detect
48 * the first problem because we dont want to recurse back
49 * into the tracing code when doing error printk or
50 * executing a BUG():
51 */
52int debug_mutex_on = 1;
53
54/*
55 * Must be called with lock->wait_lock held. 28 * Must be called with lock->wait_lock held.
56 */ 29 */
57void debug_mutex_set_owner(struct mutex *lock, 30void debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner)
58 struct thread_info *new_owner __IP_DECL__)
59{ 31{
60 lock->owner = new_owner; 32 lock->owner = new_owner;
61 DEBUG_LOCKS_WARN_ON(!list_empty(&lock->held_list));
62 if (debug_mutex_on) {
63 list_add_tail(&lock->held_list, &debug_mutex_held_locks);
64 lock->acquire_ip = ip;
65 }
66} 33}
67 34
68void debug_mutex_init_waiter(struct mutex_waiter *waiter) 35void debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
69{ 36{
70 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter)); 37 memset(waiter, MUTEX_DEBUG_INIT, sizeof(*waiter));
71 waiter->magic = waiter; 38 waiter->magic = waiter;
@@ -87,9 +54,10 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
87} 54}
88 55
89void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter, 56void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
90 struct thread_info *ti __IP_DECL__) 57 struct thread_info *ti)
91{ 58{
92 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock)); 59 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
60
93 /* Mark the current thread as blocked on the lock: */ 61 /* Mark the current thread as blocked on the lock: */
94 ti->task->blocked_on = waiter; 62 ti->task->blocked_on = waiter;
95 waiter->lock = lock; 63 waiter->lock = lock;
@@ -109,13 +77,10 @@ void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
109 77
110void debug_mutex_unlock(struct mutex *lock) 78void debug_mutex_unlock(struct mutex *lock)
111{ 79{
80 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
112 DEBUG_LOCKS_WARN_ON(lock->magic != lock); 81 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
113 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next); 82 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
114 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info()); 83 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
115 if (debug_mutex_on) {
116 DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list));
117 list_del_init(&lock->held_list);
118 }
119} 84}
120 85
121void debug_mutex_init(struct mutex *lock, const char *name) 86void debug_mutex_init(struct mutex *lock, const char *name)
@@ -123,10 +88,8 @@ void debug_mutex_init(struct mutex *lock, const char *name)
123 /* 88 /*
124 * Make sure we are not reinitializing a held lock: 89 * Make sure we are not reinitializing a held lock:
125 */ 90 */
126 mutex_debug_check_no_locks_freed((void *)lock, sizeof(*lock)); 91 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
127 lock->owner = NULL; 92 lock->owner = NULL;
128 INIT_LIST_HEAD(&lock->held_list);
129 lock->name = name;
130 lock->magic = lock; 93 lock->magic = lock;
131} 94}
132 95
diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
index bdab13a9ee26..babfbdfc534b 100644
--- a/kernel/mutex-debug.h
+++ b/kernel/mutex-debug.h
@@ -10,102 +10,44 @@
10 * More details are in kernel/mutex-debug.c. 10 * More details are in kernel/mutex-debug.c.
11 */ 11 */
12 12
13extern spinlock_t debug_mutex_lock;
14extern struct list_head debug_mutex_held_locks;
15extern int debug_mutex_on;
16
17/*
18 * In the debug case we carry the caller's instruction pointer into
19 * other functions, but we dont want the function argument overhead
20 * in the nondebug case - hence these macros:
21 */
22#define __IP_DECL__ , unsigned long ip
23#define __IP__ , ip
24#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
25
26/* 13/*
27 * This must be called with lock->wait_lock held. 14 * This must be called with lock->wait_lock held.
28 */ 15 */
29extern void debug_mutex_set_owner(struct mutex *lock, 16extern void
30 struct thread_info *new_owner __IP_DECL__); 17debug_mutex_set_owner(struct mutex *lock, struct thread_info *new_owner);
31 18
32static inline void debug_mutex_clear_owner(struct mutex *lock) 19static inline void debug_mutex_clear_owner(struct mutex *lock)
33{ 20{
34 lock->owner = NULL; 21 lock->owner = NULL;
35} 22}
36 23
37extern void debug_mutex_init_waiter(struct mutex_waiter *waiter); 24extern void debug_mutex_lock_common(struct mutex *lock,
25 struct mutex_waiter *waiter);
38extern void debug_mutex_wake_waiter(struct mutex *lock, 26extern void debug_mutex_wake_waiter(struct mutex *lock,
39 struct mutex_waiter *waiter); 27 struct mutex_waiter *waiter);
40extern void debug_mutex_free_waiter(struct mutex_waiter *waiter); 28extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
41extern void debug_mutex_add_waiter(struct mutex *lock, 29extern void debug_mutex_add_waiter(struct mutex *lock,
42 struct mutex_waiter *waiter, 30 struct mutex_waiter *waiter,
43 struct thread_info *ti __IP_DECL__); 31 struct thread_info *ti);
44extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter, 32extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
45 struct thread_info *ti); 33 struct thread_info *ti);
46extern void debug_mutex_unlock(struct mutex *lock); 34extern void debug_mutex_unlock(struct mutex *lock);
47extern void debug_mutex_init(struct mutex *lock, const char *name); 35extern void debug_mutex_init(struct mutex *lock, const char *name,
48 36 struct lock_class_key *key);
49#define debug_spin_lock_save(lock, flags) \
50 do { \
51 local_irq_save(flags); \
52 if (debug_mutex_on) \
53 spin_lock(lock); \
54 } while (0)
55
56#define debug_spin_unlock_restore(lock, flags) \
57 do { \
58 if (debug_mutex_on) \
59 spin_unlock(lock); \
60 local_irq_restore(flags); \
61 preempt_check_resched(); \
62 } while (0)
63 37
64#define spin_lock_mutex(lock, flags) \ 38#define spin_lock_mutex(lock, flags) \
65 do { \ 39 do { \
66 struct mutex *l = container_of(lock, struct mutex, wait_lock); \ 40 struct mutex *l = container_of(lock, struct mutex, wait_lock); \
67 \ 41 \
68 DEBUG_LOCKS_WARN_ON(in_interrupt()); \ 42 DEBUG_LOCKS_WARN_ON(in_interrupt()); \
69 debug_spin_lock_save(&debug_mutex_lock, flags); \ 43 local_irq_save(flags); \
70 spin_lock(lock); \ 44 __raw_spin_lock(&(lock)->raw_lock); \
71 DEBUG_LOCKS_WARN_ON(l->magic != l); \ 45 DEBUG_LOCKS_WARN_ON(l->magic != l); \
72 } while (0) 46 } while (0)
73 47
74#define spin_unlock_mutex(lock, flags) \ 48#define spin_unlock_mutex(lock, flags) \
75 do { \ 49 do { \
76 spin_unlock(lock); \ 50 __raw_spin_unlock(&(lock)->raw_lock); \
77 debug_spin_unlock_restore(&debug_mutex_lock, flags); \ 51 local_irq_restore(flags); \
52 preempt_check_resched(); \
78 } while (0) 53 } while (0)
79
80#define DEBUG_OFF() \
81do { \
82 if (debug_mutex_on) { \
83 debug_mutex_on = 0; \
84 console_verbose(); \
85 if (spin_is_locked(&debug_mutex_lock)) \
86 spin_unlock(&debug_mutex_lock); \
87 } \
88} while (0)
89
90#define DEBUG_BUG() \
91do { \
92 if (debug_mutex_on) { \
93 DEBUG_OFF(); \
94 BUG(); \
95 } \
96} while (0)
97
98#define DEBUG_LOCKS_WARN_ON(c) \
99do { \
100 if (unlikely(c && debug_mutex_on)) { \
101 DEBUG_OFF(); \
102 WARN_ON(1); \
103 } \
104} while (0)
105
106#ifdef CONFIG_SMP
107# define SMP_DEBUG_LOCKS_WARN_ON(c) DEBUG_LOCKS_WARN_ON(c)
108#else
109# define SMP_DEBUG_LOCKS_WARN_ON(c) do { } while (0)
110#endif
111
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 101ceeb38925..3aad0b7992f4 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -17,6 +17,7 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/spinlock.h> 18#include <linux/spinlock.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/debug_locks.h>
20 21
21/* 22/*
22 * In the DEBUG case we are using the "NULL fastpath" for mutexes, 23 * In the DEBUG case we are using the "NULL fastpath" for mutexes,
@@ -38,7 +39,7 @@
38 * 39 *
39 * It is not allowed to initialize an already locked mutex. 40 * It is not allowed to initialize an already locked mutex.
40 */ 41 */
41void fastcall __mutex_init(struct mutex *lock, const char *name) 42__always_inline void fastcall __mutex_init(struct mutex *lock, const char *name)
42{ 43{
43 atomic_set(&lock->count, 1); 44 atomic_set(&lock->count, 1);
44 spin_lock_init(&lock->wait_lock); 45 spin_lock_init(&lock->wait_lock);
@@ -56,7 +57,7 @@ EXPORT_SYMBOL(__mutex_init);
56 * branch is predicted by the CPU as default-untaken. 57 * branch is predicted by the CPU as default-untaken.
57 */ 58 */
58static void fastcall noinline __sched 59static void fastcall noinline __sched
59__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__); 60__mutex_lock_slowpath(atomic_t *lock_count);
60 61
61/*** 62/***
62 * mutex_lock - acquire the mutex 63 * mutex_lock - acquire the mutex
@@ -79,7 +80,7 @@ __mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__);
79 * 80 *
80 * This function is similar to (but not equivalent to) down(). 81 * This function is similar to (but not equivalent to) down().
81 */ 82 */
82void fastcall __sched mutex_lock(struct mutex *lock) 83void inline fastcall __sched mutex_lock(struct mutex *lock)
83{ 84{
84 might_sleep(); 85 might_sleep();
85 /* 86 /*
@@ -92,7 +93,7 @@ void fastcall __sched mutex_lock(struct mutex *lock)
92EXPORT_SYMBOL(mutex_lock); 93EXPORT_SYMBOL(mutex_lock);
93 94
94static void fastcall noinline __sched 95static void fastcall noinline __sched
95__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__); 96__mutex_unlock_slowpath(atomic_t *lock_count);
96 97
97/*** 98/***
98 * mutex_unlock - release the mutex 99 * mutex_unlock - release the mutex
@@ -120,18 +121,17 @@ EXPORT_SYMBOL(mutex_unlock);
120 * Lock a mutex (possibly interruptible), slowpath: 121 * Lock a mutex (possibly interruptible), slowpath:
121 */ 122 */
122static inline int __sched 123static inline int __sched
123__mutex_lock_common(struct mutex *lock, long state __IP_DECL__) 124__mutex_lock_common(struct mutex *lock, long state, unsigned int subclass)
124{ 125{
125 struct task_struct *task = current; 126 struct task_struct *task = current;
126 struct mutex_waiter waiter; 127 struct mutex_waiter waiter;
127 unsigned int old_val; 128 unsigned int old_val;
128 unsigned long flags; 129 unsigned long flags;
129 130
130 debug_mutex_init_waiter(&waiter);
131
132 spin_lock_mutex(&lock->wait_lock, flags); 131 spin_lock_mutex(&lock->wait_lock, flags);
133 132
134 debug_mutex_add_waiter(lock, &waiter, task->thread_info, ip); 133 debug_mutex_lock_common(lock, &waiter);
134 debug_mutex_add_waiter(lock, &waiter, task->thread_info);
135 135
136 /* add waiting tasks to the end of the waitqueue (FIFO): */ 136 /* add waiting tasks to the end of the waitqueue (FIFO): */
137 list_add_tail(&waiter.list, &lock->wait_list); 137 list_add_tail(&waiter.list, &lock->wait_list);
@@ -173,7 +173,7 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
173 173
174 /* got the lock - rejoice! */ 174 /* got the lock - rejoice! */
175 mutex_remove_waiter(lock, &waiter, task->thread_info); 175 mutex_remove_waiter(lock, &waiter, task->thread_info);
176 debug_mutex_set_owner(lock, task->thread_info __IP__); 176 debug_mutex_set_owner(lock, task->thread_info);
177 177
178 /* set it to 0 if there are no waiters left: */ 178 /* set it to 0 if there are no waiters left: */
179 if (likely(list_empty(&lock->wait_list))) 179 if (likely(list_empty(&lock->wait_list)))
@@ -183,32 +183,28 @@ __mutex_lock_common(struct mutex *lock, long state __IP_DECL__)
183 183
184 debug_mutex_free_waiter(&waiter); 184 debug_mutex_free_waiter(&waiter);
185 185
186 DEBUG_LOCKS_WARN_ON(list_empty(&lock->held_list));
187 DEBUG_LOCKS_WARN_ON(lock->owner != task->thread_info);
188
189 return 0; 186 return 0;
190} 187}
191 188
192static void fastcall noinline __sched 189static void fastcall noinline __sched
193__mutex_lock_slowpath(atomic_t *lock_count __IP_DECL__) 190__mutex_lock_slowpath(atomic_t *lock_count)
194{ 191{
195 struct mutex *lock = container_of(lock_count, struct mutex, count); 192 struct mutex *lock = container_of(lock_count, struct mutex, count);
196 193
197 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE __IP__); 194 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0);
198} 195}
199 196
200/* 197/*
201 * Release the lock, slowpath: 198 * Release the lock, slowpath:
202 */ 199 */
203static fastcall noinline void 200static fastcall inline void
204__mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__) 201__mutex_unlock_common_slowpath(atomic_t *lock_count)
205{ 202{
206 struct mutex *lock = container_of(lock_count, struct mutex, count); 203 struct mutex *lock = container_of(lock_count, struct mutex, count);
207 unsigned long flags; 204 unsigned long flags;
208 205
209 DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
210
211 spin_lock_mutex(&lock->wait_lock, flags); 206 spin_lock_mutex(&lock->wait_lock, flags);
207 debug_mutex_unlock(lock);
212 208
213 /* 209 /*
214 * some architectures leave the lock unlocked in the fastpath failure 210 * some architectures leave the lock unlocked in the fastpath failure
@@ -218,8 +214,6 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
218 if (__mutex_slowpath_needs_to_unlock()) 214 if (__mutex_slowpath_needs_to_unlock())
219 atomic_set(&lock->count, 1); 215 atomic_set(&lock->count, 1);
220 216
221 debug_mutex_unlock(lock);
222
223 if (!list_empty(&lock->wait_list)) { 217 if (!list_empty(&lock->wait_list)) {
224 /* get the first entry from the wait-list: */ 218 /* get the first entry from the wait-list: */
225 struct mutex_waiter *waiter = 219 struct mutex_waiter *waiter =
@@ -237,11 +231,20 @@ __mutex_unlock_slowpath(atomic_t *lock_count __IP_DECL__)
237} 231}
238 232
239/* 233/*
234 * Release the lock, slowpath:
235 */
236static fastcall noinline void
237__mutex_unlock_slowpath(atomic_t *lock_count)
238{
239 __mutex_unlock_common_slowpath(lock_count);
240}
241
242/*
240 * Here come the less common (and hence less performance-critical) APIs: 243 * Here come the less common (and hence less performance-critical) APIs:
241 * mutex_lock_interruptible() and mutex_trylock(). 244 * mutex_lock_interruptible() and mutex_trylock().
242 */ 245 */
243static int fastcall noinline __sched 246static int fastcall noinline __sched
244__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__); 247__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
245 248
246/*** 249/***
247 * mutex_lock_interruptible - acquire the mutex, interruptable 250 * mutex_lock_interruptible - acquire the mutex, interruptable
@@ -264,11 +267,11 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
264EXPORT_SYMBOL(mutex_lock_interruptible); 267EXPORT_SYMBOL(mutex_lock_interruptible);
265 268
266static int fastcall noinline __sched 269static int fastcall noinline __sched
267__mutex_lock_interruptible_slowpath(atomic_t *lock_count __IP_DECL__) 270__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
268{ 271{
269 struct mutex *lock = container_of(lock_count, struct mutex, count); 272 struct mutex *lock = container_of(lock_count, struct mutex, count);
270 273
271 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE __IP__); 274 return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0);
272} 275}
273 276
274/* 277/*
@@ -285,7 +288,8 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
285 288
286 prev = atomic_xchg(&lock->count, -1); 289 prev = atomic_xchg(&lock->count, -1);
287 if (likely(prev == 1)) 290 if (likely(prev == 1))
288 debug_mutex_set_owner(lock, current_thread_info() __RET_IP__); 291 debug_mutex_set_owner(lock, current_thread_info());
292
289 /* Set it back to 0 if there are no waiters: */ 293 /* Set it back to 0 if there are no waiters: */
290 if (likely(list_empty(&lock->wait_list))) 294 if (likely(list_empty(&lock->wait_list)))
291 atomic_set(&lock->count, 0); 295 atomic_set(&lock->count, 0);
diff --git a/kernel/mutex.h b/kernel/mutex.h
index 7e1ed48d1a6c..aeb2d916aa0e 100644
--- a/kernel/mutex.h
+++ b/kernel/mutex.h
@@ -16,22 +16,15 @@
16#define mutex_remove_waiter(lock, waiter, ti) \ 16#define mutex_remove_waiter(lock, waiter, ti) \
17 __list_del((waiter)->list.prev, (waiter)->list.next) 17 __list_del((waiter)->list.prev, (waiter)->list.next)
18 18
19#define DEBUG_LOCKS_WARN_ON(c) do { } while (0)
20#define debug_mutex_set_owner(lock, new_owner) do { } while (0) 19#define debug_mutex_set_owner(lock, new_owner) do { } while (0)
21#define debug_mutex_clear_owner(lock) do { } while (0) 20#define debug_mutex_clear_owner(lock) do { } while (0)
22#define debug_mutex_init_waiter(waiter) do { } while (0)
23#define debug_mutex_wake_waiter(lock, waiter) do { } while (0) 21#define debug_mutex_wake_waiter(lock, waiter) do { } while (0)
24#define debug_mutex_free_waiter(waiter) do { } while (0) 22#define debug_mutex_free_waiter(waiter) do { } while (0)
25#define debug_mutex_add_waiter(lock, waiter, ti, ip) do { } while (0) 23#define debug_mutex_add_waiter(lock, waiter, ti) do { } while (0)
26#define debug_mutex_unlock(lock) do { } while (0) 24#define debug_mutex_unlock(lock) do { } while (0)
27#define debug_mutex_init(lock, name) do { } while (0) 25#define debug_mutex_init(lock, name) do { } while (0)
28 26
29/* 27static inline void
30 * Return-address parameters/declarations. They are very useful for 28debug_mutex_lock_common(struct mutex *lock, struct mutex_waiter *waiter)
31 * debugging, but add overhead in the !DEBUG case - so we go the 29{
32 * trouble of using this not too elegant but zero-cost solution: 30}
33 */
34#define __IP_DECL__
35#define __IP__
36#define __RET_IP__
37
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 4aa8a2c9f453..353a853bc390 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -26,6 +26,7 @@
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/plist.h> 27#include <linux/plist.h>
28#include <linux/fs.h> 28#include <linux/fs.h>
29#include <linux/debug_locks.h>
29 30
30#include "rtmutex_common.h" 31#include "rtmutex_common.h"
31 32
@@ -45,8 +46,6 @@ do { \
45 console_verbose(); \ 46 console_verbose(); \
46 if (spin_is_locked(&current->pi_lock)) \ 47 if (spin_is_locked(&current->pi_lock)) \
47 spin_unlock(&current->pi_lock); \ 48 spin_unlock(&current->pi_lock); \
48 if (spin_is_locked(&current->held_list_lock)) \
49 spin_unlock(&current->held_list_lock); \
50 } \ 49 } \
51} while (0) 50} while (0)
52 51
@@ -105,14 +104,6 @@ static void printk_task(task_t *p)
105 printk("<none>"); 104 printk("<none>");
106} 105}
107 106
108static void printk_task_short(task_t *p)
109{
110 if (p)
111 printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio);
112 else
113 printk("<none>");
114}
115
116static void printk_lock(struct rt_mutex *lock, int print_owner) 107static void printk_lock(struct rt_mutex *lock, int print_owner)
117{ 108{
118 if (lock->name) 109 if (lock->name)
@@ -128,222 +119,6 @@ static void printk_lock(struct rt_mutex *lock, int print_owner)
128 printk_task(rt_mutex_owner(lock)); 119 printk_task(rt_mutex_owner(lock));
129 printk("\n"); 120 printk("\n");
130 } 121 }
131 if (rt_mutex_owner(lock)) {
132 printk("... acquired at: ");
133 print_symbol("%s\n", lock->acquire_ip);
134 }
135}
136
137static void printk_waiter(struct rt_mutex_waiter *w)
138{
139 printk("-------------------------\n");
140 printk("| waiter struct %p:\n", w);
141 printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
142 w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next,
143 w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next,
144 w->list_entry.prio);
145 printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n",
146 w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next,
147 w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next,
148 w->pi_list_entry.prio);
149 printk("\n| lock:\n");
150 printk_lock(w->lock, 1);
151 printk("| w->ti->task:\n");
152 printk_task(w->task);
153 printk("| blocked at: ");
154 print_symbol("%s\n", w->ip);
155 printk("-------------------------\n");
156}
157
158static void show_task_locks(task_t *p)
159{
160 switch (p->state) {
161 case TASK_RUNNING: printk("R"); break;
162 case TASK_INTERRUPTIBLE: printk("S"); break;
163 case TASK_UNINTERRUPTIBLE: printk("D"); break;
164 case TASK_STOPPED: printk("T"); break;
165 case EXIT_ZOMBIE: printk("Z"); break;
166 case EXIT_DEAD: printk("X"); break;
167 default: printk("?"); break;
168 }
169 printk_task(p);
170 if (p->pi_blocked_on) {
171 struct rt_mutex *lock = p->pi_blocked_on->lock;
172
173 printk(" blocked on:");
174 printk_lock(lock, 1);
175 } else
176 printk(" (not blocked)\n");
177}
178
179void rt_mutex_show_held_locks(task_t *task, int verbose)
180{
181 struct list_head *curr, *cursor = NULL;
182 struct rt_mutex *lock;
183 task_t *t;
184 unsigned long flags;
185 int count = 0;
186
187 if (!rt_trace_on)
188 return;
189
190 if (verbose) {
191 printk("------------------------------\n");
192 printk("| showing all locks held by: | (");
193 printk_task_short(task);
194 printk("):\n");
195 printk("------------------------------\n");
196 }
197
198next:
199 spin_lock_irqsave(&task->held_list_lock, flags);
200 list_for_each(curr, &task->held_list_head) {
201 if (cursor && curr != cursor)
202 continue;
203 lock = list_entry(curr, struct rt_mutex, held_list_entry);
204 t = rt_mutex_owner(lock);
205 WARN_ON(t != task);
206 count++;
207 cursor = curr->next;
208 spin_unlock_irqrestore(&task->held_list_lock, flags);
209
210 printk("\n#%03d: ", count);
211 printk_lock(lock, 0);
212 goto next;
213 }
214 spin_unlock_irqrestore(&task->held_list_lock, flags);
215
216 printk("\n");
217}
218
219void rt_mutex_show_all_locks(void)
220{
221 task_t *g, *p;
222 int count = 10;
223 int unlock = 1;
224
225 printk("\n");
226 printk("----------------------\n");
227 printk("| showing all tasks: |\n");
228 printk("----------------------\n");
229
230 /*
231 * Here we try to get the tasklist_lock as hard as possible,
232 * if not successful after 2 seconds we ignore it (but keep
233 * trying). This is to enable a debug printout even if a
234 * tasklist_lock-holding task deadlocks or crashes.
235 */
236retry:
237 if (!read_trylock(&tasklist_lock)) {
238 if (count == 10)
239 printk("hm, tasklist_lock locked, retrying... ");
240 if (count) {
241 count--;
242 printk(" #%d", 10-count);
243 mdelay(200);
244 goto retry;
245 }
246 printk(" ignoring it.\n");
247 unlock = 0;
248 }
249 if (count != 10)
250 printk(" locked it.\n");
251
252 do_each_thread(g, p) {
253 show_task_locks(p);
254 if (!unlock)
255 if (read_trylock(&tasklist_lock))
256 unlock = 1;
257 } while_each_thread(g, p);
258
259 printk("\n");
260
261 printk("-----------------------------------------\n");
262 printk("| showing all locks held in the system: |\n");
263 printk("-----------------------------------------\n");
264
265 do_each_thread(g, p) {
266 rt_mutex_show_held_locks(p, 0);
267 if (!unlock)
268 if (read_trylock(&tasklist_lock))
269 unlock = 1;
270 } while_each_thread(g, p);
271
272
273 printk("=============================================\n\n");
274
275 if (unlock)
276 read_unlock(&tasklist_lock);
277}
278
279void rt_mutex_debug_check_no_locks_held(task_t *task)
280{
281 struct rt_mutex_waiter *w;
282 struct list_head *curr;
283 struct rt_mutex *lock;
284
285 if (!rt_trace_on)
286 return;
287 if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) {
288 printk("BUG: PI priority boost leaked!\n");
289 printk_task(task);
290 printk("\n");
291 }
292 if (list_empty(&task->held_list_head))
293 return;
294
295 spin_lock(&task->pi_lock);
296 plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) {
297 TRACE_OFF();
298
299 printk("hm, PI interest held at exit time? Task:\n");
300 printk_task(task);
301 printk_waiter(w);
302 return;
303 }
304 spin_unlock(&task->pi_lock);
305
306 list_for_each(curr, &task->held_list_head) {
307 lock = list_entry(curr, struct rt_mutex, held_list_entry);
308
309 printk("BUG: %s/%d, lock held at task exit time!\n",
310 task->comm, task->pid);
311 printk_lock(lock, 1);
312 if (rt_mutex_owner(lock) != task)
313 printk("exiting task is not even the owner??\n");
314 }
315}
316
317int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len)
318{
319 const void *to = from + len;
320 struct list_head *curr;
321 struct rt_mutex *lock;
322 unsigned long flags;
323 void *lock_addr;
324
325 if (!rt_trace_on)
326 return 0;
327
328 spin_lock_irqsave(&current->held_list_lock, flags);
329 list_for_each(curr, &current->held_list_head) {
330 lock = list_entry(curr, struct rt_mutex, held_list_entry);
331 lock_addr = lock;
332 if (lock_addr < from || lock_addr >= to)
333 continue;
334 TRACE_OFF();
335
336 printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n",
337 current->comm, current->pid, lock, from, to);
338 dump_stack();
339 printk_lock(lock, 1);
340 if (rt_mutex_owner(lock) != current)
341 printk("freeing task is not even the owner??\n");
342 return 1;
343 }
344 spin_unlock_irqrestore(&current->held_list_lock, flags);
345
346 return 0;
347} 122}
348 123
349void rt_mutex_debug_task_free(struct task_struct *task) 124void rt_mutex_debug_task_free(struct task_struct *task)
@@ -395,85 +170,41 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
395 current->comm, current->pid); 170 current->comm, current->pid);
396 printk_lock(waiter->lock, 1); 171 printk_lock(waiter->lock, 1);
397 172
398 printk("... trying at: ");
399 print_symbol("%s\n", waiter->ip);
400
401 printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); 173 printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid);
402 printk_lock(waiter->deadlock_lock, 1); 174 printk_lock(waiter->deadlock_lock, 1);
403 175
404 rt_mutex_show_held_locks(current, 1); 176 debug_show_held_locks(current);
405 rt_mutex_show_held_locks(task, 1); 177 debug_show_held_locks(task);
406 178
407 printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); 179 printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid);
408 show_stack(task, NULL); 180 show_stack(task, NULL);
409 printk("\n%s/%d's [current] stackdump:\n\n", 181 printk("\n%s/%d's [current] stackdump:\n\n",
410 current->comm, current->pid); 182 current->comm, current->pid);
411 dump_stack(); 183 dump_stack();
412 rt_mutex_show_all_locks(); 184 debug_show_all_locks();
185
413 printk("[ turning off deadlock detection." 186 printk("[ turning off deadlock detection."
414 "Please report this trace. ]\n\n"); 187 "Please report this trace. ]\n\n");
415 local_irq_disable(); 188 local_irq_disable();
416} 189}
417 190
418void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__) 191void debug_rt_mutex_lock(struct rt_mutex *lock)
419{ 192{
420 unsigned long flags;
421
422 if (rt_trace_on) {
423 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
424
425 spin_lock_irqsave(&current->held_list_lock, flags);
426 list_add_tail(&lock->held_list_entry, &current->held_list_head);
427 spin_unlock_irqrestore(&current->held_list_lock, flags);
428
429 lock->acquire_ip = ip;
430 }
431} 193}
432 194
433void debug_rt_mutex_unlock(struct rt_mutex *lock) 195void debug_rt_mutex_unlock(struct rt_mutex *lock)
434{ 196{
435 unsigned long flags; 197 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
436
437 if (rt_trace_on) {
438 TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current);
439 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
440
441 spin_lock_irqsave(&current->held_list_lock, flags);
442 list_del_init(&lock->held_list_entry);
443 spin_unlock_irqrestore(&current->held_list_lock, flags);
444 }
445} 198}
446 199
447void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, 200void
448 struct task_struct *powner __IP_DECL__) 201debug_rt_mutex_proxy_lock(struct rt_mutex *lock, struct task_struct *powner)
449{ 202{
450 unsigned long flags;
451
452 if (rt_trace_on) {
453 TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry));
454
455 spin_lock_irqsave(&powner->held_list_lock, flags);
456 list_add_tail(&lock->held_list_entry, &powner->held_list_head);
457 spin_unlock_irqrestore(&powner->held_list_lock, flags);
458
459 lock->acquire_ip = ip;
460 }
461} 203}
462 204
463void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) 205void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock)
464{ 206{
465 unsigned long flags; 207 TRACE_WARN_ON_LOCKED(!rt_mutex_owner(lock));
466
467 if (rt_trace_on) {
468 struct task_struct *owner = rt_mutex_owner(lock);
469
470 TRACE_WARN_ON_LOCKED(!owner);
471 TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry));
472
473 spin_lock_irqsave(&owner->held_list_lock, flags);
474 list_del_init(&lock->held_list_entry);
475 spin_unlock_irqrestore(&owner->held_list_lock, flags);
476 }
477} 208}
478 209
479void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) 210void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
@@ -493,14 +224,11 @@ void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
493 224
494void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) 225void debug_rt_mutex_init(struct rt_mutex *lock, const char *name)
495{ 226{
496 void *addr = lock; 227 /*
497 228 * Make sure we are not reinitializing a held lock:
498 if (rt_trace_on) { 229 */
499 rt_mutex_debug_check_no_locks_freed(addr, 230 debug_check_no_locks_freed((void *)lock, sizeof(*lock));
500 sizeof(struct rt_mutex)); 231 lock->name = name;
501 INIT_LIST_HEAD(&lock->held_list_entry);
502 lock->name = name;
503 }
504} 232}
505 233
506void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) 234void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task)
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h
index 7612fbc62d70..14193d596d78 100644
--- a/kernel/rtmutex-debug.h
+++ b/kernel/rtmutex-debug.h
@@ -9,20 +9,16 @@
9 * This file contains macros used solely by rtmutex.c. Debug version. 9 * This file contains macros used solely by rtmutex.c. Debug version.
10 */ 10 */
11 11
12#define __IP_DECL__ , unsigned long ip
13#define __IP__ , ip
14#define __RET_IP__ , (unsigned long)__builtin_return_address(0)
15
16extern void 12extern void
17rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); 13rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task);
18extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); 14extern void rt_mutex_deadlock_account_unlock(struct task_struct *task);
19extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); 15extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter);
20extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); 16extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter);
21extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); 17extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name);
22extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__); 18extern void debug_rt_mutex_lock(struct rt_mutex *lock);
23extern void debug_rt_mutex_unlock(struct rt_mutex *lock); 19extern void debug_rt_mutex_unlock(struct rt_mutex *lock);
24extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, 20extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock,
25 struct task_struct *powner __IP_DECL__); 21 struct task_struct *powner);
26extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); 22extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock);
27extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, 23extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter,
28 struct rt_mutex *lock); 24 struct rt_mutex *lock);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index 45d61016da57..91b699aa658b 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -161,8 +161,7 @@ static int rt_mutex_adjust_prio_chain(task_t *task,
161 int deadlock_detect, 161 int deadlock_detect,
162 struct rt_mutex *orig_lock, 162 struct rt_mutex *orig_lock,
163 struct rt_mutex_waiter *orig_waiter, 163 struct rt_mutex_waiter *orig_waiter,
164 struct task_struct *top_task 164 struct task_struct *top_task)
165 __IP_DECL__)
166{ 165{
167 struct rt_mutex *lock; 166 struct rt_mutex *lock;
168 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter; 167 struct rt_mutex_waiter *waiter, *top_waiter = orig_waiter;
@@ -357,7 +356,7 @@ static inline int try_to_steal_lock(struct rt_mutex *lock)
357 * 356 *
358 * Must be called with lock->wait_lock held. 357 * Must be called with lock->wait_lock held.
359 */ 358 */
360static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__) 359static int try_to_take_rt_mutex(struct rt_mutex *lock)
361{ 360{
362 /* 361 /*
363 * We have to be careful here if the atomic speedups are 362 * We have to be careful here if the atomic speedups are
@@ -384,7 +383,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
384 return 0; 383 return 0;
385 384
386 /* We got the lock. */ 385 /* We got the lock. */
387 debug_rt_mutex_lock(lock __IP__); 386 debug_rt_mutex_lock(lock);
388 387
389 rt_mutex_set_owner(lock, current, 0); 388 rt_mutex_set_owner(lock, current, 0);
390 389
@@ -402,8 +401,7 @@ static int try_to_take_rt_mutex(struct rt_mutex *lock __IP_DECL__)
402 */ 401 */
403static int task_blocks_on_rt_mutex(struct rt_mutex *lock, 402static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
404 struct rt_mutex_waiter *waiter, 403 struct rt_mutex_waiter *waiter,
405 int detect_deadlock 404 int detect_deadlock)
406 __IP_DECL__)
407{ 405{
408 struct rt_mutex_waiter *top_waiter = waiter; 406 struct rt_mutex_waiter *top_waiter = waiter;
409 task_t *owner = rt_mutex_owner(lock); 407 task_t *owner = rt_mutex_owner(lock);
@@ -454,7 +452,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
454 spin_unlock(&lock->wait_lock); 452 spin_unlock(&lock->wait_lock);
455 453
456 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter, 454 res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
457 current __IP__); 455 current);
458 456
459 spin_lock(&lock->wait_lock); 457 spin_lock(&lock->wait_lock);
460 458
@@ -526,7 +524,7 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
526 * Must be called with lock->wait_lock held 524 * Must be called with lock->wait_lock held
527 */ 525 */
528static void remove_waiter(struct rt_mutex *lock, 526static void remove_waiter(struct rt_mutex *lock,
529 struct rt_mutex_waiter *waiter __IP_DECL__) 527 struct rt_mutex_waiter *waiter)
530{ 528{
531 int first = (waiter == rt_mutex_top_waiter(lock)); 529 int first = (waiter == rt_mutex_top_waiter(lock));
532 int boost = 0; 530 int boost = 0;
@@ -568,7 +566,7 @@ static void remove_waiter(struct rt_mutex *lock,
568 566
569 spin_unlock(&lock->wait_lock); 567 spin_unlock(&lock->wait_lock);
570 568
571 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current __IP__); 569 rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
572 570
573 spin_lock(&lock->wait_lock); 571 spin_lock(&lock->wait_lock);
574} 572}
@@ -595,7 +593,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
595 get_task_struct(task); 593 get_task_struct(task);
596 spin_unlock_irqrestore(&task->pi_lock, flags); 594 spin_unlock_irqrestore(&task->pi_lock, flags);
597 595
598 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task __RET_IP__); 596 rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
599} 597}
600 598
601/* 599/*
@@ -604,7 +602,7 @@ void rt_mutex_adjust_pi(struct task_struct *task)
604static int __sched 602static int __sched
605rt_mutex_slowlock(struct rt_mutex *lock, int state, 603rt_mutex_slowlock(struct rt_mutex *lock, int state,
606 struct hrtimer_sleeper *timeout, 604 struct hrtimer_sleeper *timeout,
607 int detect_deadlock __IP_DECL__) 605 int detect_deadlock)
608{ 606{
609 struct rt_mutex_waiter waiter; 607 struct rt_mutex_waiter waiter;
610 int ret = 0; 608 int ret = 0;
@@ -615,7 +613,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
615 spin_lock(&lock->wait_lock); 613 spin_lock(&lock->wait_lock);
616 614
617 /* Try to acquire the lock again: */ 615 /* Try to acquire the lock again: */
618 if (try_to_take_rt_mutex(lock __IP__)) { 616 if (try_to_take_rt_mutex(lock)) {
619 spin_unlock(&lock->wait_lock); 617 spin_unlock(&lock->wait_lock);
620 return 0; 618 return 0;
621 } 619 }
@@ -629,7 +627,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
629 627
630 for (;;) { 628 for (;;) {
631 /* Try to acquire the lock: */ 629 /* Try to acquire the lock: */
632 if (try_to_take_rt_mutex(lock __IP__)) 630 if (try_to_take_rt_mutex(lock))
633 break; 631 break;
634 632
635 /* 633 /*
@@ -653,7 +651,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
653 */ 651 */
654 if (!waiter.task) { 652 if (!waiter.task) {
655 ret = task_blocks_on_rt_mutex(lock, &waiter, 653 ret = task_blocks_on_rt_mutex(lock, &waiter,
656 detect_deadlock __IP__); 654 detect_deadlock);
657 /* 655 /*
658 * If we got woken up by the owner then start loop 656 * If we got woken up by the owner then start loop
659 * all over without going into schedule to try 657 * all over without going into schedule to try
@@ -680,7 +678,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
680 set_current_state(TASK_RUNNING); 678 set_current_state(TASK_RUNNING);
681 679
682 if (unlikely(waiter.task)) 680 if (unlikely(waiter.task))
683 remove_waiter(lock, &waiter __IP__); 681 remove_waiter(lock, &waiter);
684 682
685 /* 683 /*
686 * try_to_take_rt_mutex() sets the waiter bit 684 * try_to_take_rt_mutex() sets the waiter bit
@@ -711,7 +709,7 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
711 * Slow path try-lock function: 709 * Slow path try-lock function:
712 */ 710 */
713static inline int 711static inline int
714rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__) 712rt_mutex_slowtrylock(struct rt_mutex *lock)
715{ 713{
716 int ret = 0; 714 int ret = 0;
717 715
@@ -719,7 +717,7 @@ rt_mutex_slowtrylock(struct rt_mutex *lock __IP_DECL__)
719 717
720 if (likely(rt_mutex_owner(lock) != current)) { 718 if (likely(rt_mutex_owner(lock) != current)) {
721 719
722 ret = try_to_take_rt_mutex(lock __IP__); 720 ret = try_to_take_rt_mutex(lock);
723 /* 721 /*
724 * try_to_take_rt_mutex() sets the lock waiters 722 * try_to_take_rt_mutex() sets the lock waiters
725 * bit unconditionally. Clean this up. 723 * bit unconditionally. Clean this up.
@@ -769,13 +767,13 @@ rt_mutex_fastlock(struct rt_mutex *lock, int state,
769 int detect_deadlock, 767 int detect_deadlock,
770 int (*slowfn)(struct rt_mutex *lock, int state, 768 int (*slowfn)(struct rt_mutex *lock, int state,
771 struct hrtimer_sleeper *timeout, 769 struct hrtimer_sleeper *timeout,
772 int detect_deadlock __IP_DECL__)) 770 int detect_deadlock))
773{ 771{
774 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 772 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
775 rt_mutex_deadlock_account_lock(lock, current); 773 rt_mutex_deadlock_account_lock(lock, current);
776 return 0; 774 return 0;
777 } else 775 } else
778 return slowfn(lock, state, NULL, detect_deadlock __RET_IP__); 776 return slowfn(lock, state, NULL, detect_deadlock);
779} 777}
780 778
781static inline int 779static inline int
@@ -783,24 +781,24 @@ rt_mutex_timed_fastlock(struct rt_mutex *lock, int state,
783 struct hrtimer_sleeper *timeout, int detect_deadlock, 781 struct hrtimer_sleeper *timeout, int detect_deadlock,
784 int (*slowfn)(struct rt_mutex *lock, int state, 782 int (*slowfn)(struct rt_mutex *lock, int state,
785 struct hrtimer_sleeper *timeout, 783 struct hrtimer_sleeper *timeout,
786 int detect_deadlock __IP_DECL__)) 784 int detect_deadlock))
787{ 785{
788 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) { 786 if (!detect_deadlock && likely(rt_mutex_cmpxchg(lock, NULL, current))) {
789 rt_mutex_deadlock_account_lock(lock, current); 787 rt_mutex_deadlock_account_lock(lock, current);
790 return 0; 788 return 0;
791 } else 789 } else
792 return slowfn(lock, state, timeout, detect_deadlock __RET_IP__); 790 return slowfn(lock, state, timeout, detect_deadlock);
793} 791}
794 792
795static inline int 793static inline int
796rt_mutex_fasttrylock(struct rt_mutex *lock, 794rt_mutex_fasttrylock(struct rt_mutex *lock,
797 int (*slowfn)(struct rt_mutex *lock __IP_DECL__)) 795 int (*slowfn)(struct rt_mutex *lock))
798{ 796{
799 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) { 797 if (likely(rt_mutex_cmpxchg(lock, NULL, current))) {
800 rt_mutex_deadlock_account_lock(lock, current); 798 rt_mutex_deadlock_account_lock(lock, current);
801 return 1; 799 return 1;
802 } 800 }
803 return slowfn(lock __RET_IP__); 801 return slowfn(lock);
804} 802}
805 803
806static inline void 804static inline void
@@ -948,7 +946,7 @@ void rt_mutex_init_proxy_locked(struct rt_mutex *lock,
948 struct task_struct *proxy_owner) 946 struct task_struct *proxy_owner)
949{ 947{
950 __rt_mutex_init(lock, NULL); 948 __rt_mutex_init(lock, NULL);
951 debug_rt_mutex_proxy_lock(lock, proxy_owner __RET_IP__); 949 debug_rt_mutex_proxy_lock(lock, proxy_owner);
952 rt_mutex_set_owner(lock, proxy_owner, 0); 950 rt_mutex_set_owner(lock, proxy_owner, 0);
953 rt_mutex_deadlock_account_lock(lock, proxy_owner); 951 rt_mutex_deadlock_account_lock(lock, proxy_owner);
954} 952}
diff --git a/kernel/rtmutex.h b/kernel/rtmutex.h
index 1e0fca13ff72..a1a1dd06421d 100644
--- a/kernel/rtmutex.h
+++ b/kernel/rtmutex.h
@@ -10,9 +10,6 @@
10 * Non-debug version. 10 * Non-debug version.
11 */ 11 */
12 12
13#define __IP_DECL__
14#define __IP__
15#define __RET_IP__
16#define rt_mutex_deadlock_check(l) (0) 13#define rt_mutex_deadlock_check(l) (0)
17#define rt_mutex_deadlock_account_lock(m, t) do { } while (0) 14#define rt_mutex_deadlock_account_lock(m, t) do { } while (0)
18#define rt_mutex_deadlock_account_unlock(l) do { } while (0) 15#define rt_mutex_deadlock_account_unlock(l) do { } while (0)
diff --git a/kernel/sched.c b/kernel/sched.c
index d5e37072ea54..48c1faa60a67 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -30,6 +30,7 @@
30#include <linux/capability.h> 30#include <linux/capability.h>
31#include <linux/completion.h> 31#include <linux/completion.h>
32#include <linux/kernel_stat.h> 32#include <linux/kernel_stat.h>
33#include <linux/debug_locks.h>
33#include <linux/security.h> 34#include <linux/security.h>
34#include <linux/notifier.h> 35#include <linux/notifier.h>
35#include <linux/profile.h> 36#include <linux/profile.h>
@@ -3142,12 +3143,13 @@ void fastcall add_preempt_count(int val)
3142 /* 3143 /*
3143 * Underflow? 3144 * Underflow?
3144 */ 3145 */
3145 BUG_ON((preempt_count() < 0)); 3146 if (DEBUG_LOCKS_WARN_ON((preempt_count() < 0)))
3147 return;
3146 preempt_count() += val; 3148 preempt_count() += val;
3147 /* 3149 /*
3148 * Spinlock count overflowing soon? 3150 * Spinlock count overflowing soon?
3149 */ 3151 */
3150 BUG_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10); 3152 DEBUG_LOCKS_WARN_ON((preempt_count() & PREEMPT_MASK) >= PREEMPT_MASK-10);
3151} 3153}
3152EXPORT_SYMBOL(add_preempt_count); 3154EXPORT_SYMBOL(add_preempt_count);
3153 3155
@@ -3156,11 +3158,15 @@ void fastcall sub_preempt_count(int val)
3156 /* 3158 /*
3157 * Underflow? 3159 * Underflow?
3158 */ 3160 */
3159 BUG_ON(val > preempt_count()); 3161 if (DEBUG_LOCKS_WARN_ON(val > preempt_count()))
3162 return;
3160 /* 3163 /*
3161 * Is the spinlock portion underflowing? 3164 * Is the spinlock portion underflowing?
3162 */ 3165 */
3163 BUG_ON((val < PREEMPT_MASK) && !(preempt_count() & PREEMPT_MASK)); 3166 if (DEBUG_LOCKS_WARN_ON((val < PREEMPT_MASK) &&
3167 !(preempt_count() & PREEMPT_MASK)))
3168 return;
3169
3164 preempt_count() -= val; 3170 preempt_count() -= val;
3165} 3171}
3166EXPORT_SYMBOL(sub_preempt_count); 3172EXPORT_SYMBOL(sub_preempt_count);
@@ -4690,7 +4696,7 @@ void show_state(void)
4690 } while_each_thread(g, p); 4696 } while_each_thread(g, p);
4691 4697
4692 read_unlock(&tasklist_lock); 4698 read_unlock(&tasklist_lock);
4693 mutex_debug_show_all_locks(); 4699 debug_show_all_locks();
4694} 4700}
4695 4701
4696/** 4702/**