diff options
-rw-r--r-- | include/linux/mm.h | 1 | ||||
-rw-r--r-- | include/linux/rtmutex.h | 15 | ||||
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/exit.c | 1 | ||||
-rw-r--r-- | kernel/rtmutex-debug.c | 513 | ||||
-rw-r--r-- | kernel/rtmutex-debug.h | 37 | ||||
-rw-r--r-- | lib/Kconfig.debug | 13 | ||||
-rw-r--r-- | mm/slab.c | 1 |
8 files changed, 581 insertions, 1 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 0ac255720f34..c41a1299b8cf 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -1034,6 +1034,7 @@ static inline void | |||
1034 | debug_check_no_locks_freed(const void *from, unsigned long len) | 1034 | debug_check_no_locks_freed(const void *from, unsigned long len) |
1035 | { | 1035 | { |
1036 | mutex_debug_check_no_locks_freed(from, len); | 1036 | mutex_debug_check_no_locks_freed(from, len); |
1037 | rt_mutex_debug_check_no_locks_freed(from, len); | ||
1037 | } | 1038 | } |
1038 | 1039 | ||
1039 | #ifndef CONFIG_DEBUG_PAGEALLOC | 1040 | #ifndef CONFIG_DEBUG_PAGEALLOC |
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h index 12309c916c68..fa4a3b82ba70 100644 --- a/include/linux/rtmutex.h +++ b/include/linux/rtmutex.h | |||
@@ -41,6 +41,19 @@ struct rt_mutex_waiter; | |||
41 | struct hrtimer_sleeper; | 41 | struct hrtimer_sleeper; |
42 | 42 | ||
43 | #ifdef CONFIG_DEBUG_RT_MUTEXES | 43 | #ifdef CONFIG_DEBUG_RT_MUTEXES |
44 | extern int rt_mutex_debug_check_no_locks_freed(const void *from, | ||
45 | unsigned long len); | ||
46 | extern void rt_mutex_debug_check_no_locks_held(struct task_struct *task); | ||
47 | #else | ||
48 | static inline int rt_mutex_debug_check_no_locks_freed(const void *from, | ||
49 | unsigned long len) | ||
50 | { | ||
51 | return 0; | ||
52 | } | ||
53 | # define rt_mutex_debug_check_no_locks_held(task) do { } while (0) | ||
54 | #endif | ||
55 | |||
56 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
44 | # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ | 57 | # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) \ |
45 | , .name = #mutexname, .file = __FILE__, .line = __LINE__ | 58 | , .name = #mutexname, .file = __FILE__, .line = __LINE__ |
46 | # define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__) | 59 | # define rt_mutex_init(mutex) __rt_mutex_init(mutex, __FUNCTION__) |
@@ -48,7 +61,7 @@ struct hrtimer_sleeper; | |||
48 | #else | 61 | #else |
49 | # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) | 62 | # define __DEBUG_RT_MUTEX_INITIALIZER(mutexname) |
50 | # define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) | 63 | # define rt_mutex_init(mutex) __rt_mutex_init(mutex, NULL) |
51 | # define rt_mutex_debug_task_free(t) do { } while (0) | 64 | # define rt_mutex_debug_task_free(t) do { } while (0) |
52 | #endif | 65 | #endif |
53 | 66 | ||
54 | #define __RT_MUTEX_INITIALIZER(mutexname) \ | 67 | #define __RT_MUTEX_INITIALIZER(mutexname) \ |
diff --git a/kernel/Makefile b/kernel/Makefile index 21df9a338ff0..f9c92d34cde5 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -17,6 +17,7 @@ ifeq ($(CONFIG_COMPAT),y) | |||
17 | obj-$(CONFIG_FUTEX) += futex_compat.o | 17 | obj-$(CONFIG_FUTEX) += futex_compat.o |
18 | endif | 18 | endif |
19 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o | 19 | obj-$(CONFIG_RT_MUTEXES) += rtmutex.o |
20 | obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o | ||
20 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o | 21 | obj-$(CONFIG_GENERIC_ISA_DMA) += dma.o |
21 | obj-$(CONFIG_SMP) += cpu.o spinlock.o | 22 | obj-$(CONFIG_SMP) += cpu.o spinlock.o |
22 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o | 23 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o |
diff --git a/kernel/exit.c b/kernel/exit.c index 304ef637be6c..3e8a0282e9a5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -929,6 +929,7 @@ fastcall NORET_TYPE void do_exit(long code) | |||
929 | * If DEBUG_MUTEXES is on, make sure we are holding no locks: | 929 | * If DEBUG_MUTEXES is on, make sure we are holding no locks: |
930 | */ | 930 | */ |
931 | mutex_debug_check_no_locks_held(tsk); | 931 | mutex_debug_check_no_locks_held(tsk); |
932 | rt_mutex_debug_check_no_locks_held(tsk); | ||
932 | 933 | ||
933 | if (tsk->io_context) | 934 | if (tsk->io_context) |
934 | exit_io_context(); | 935 | exit_io_context(); |
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c new file mode 100644 index 000000000000..4aa8a2c9f453 --- /dev/null +++ b/kernel/rtmutex-debug.c | |||
@@ -0,0 +1,513 @@ | |||
1 | /* | ||
2 | * RT-Mutexes: blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * | ||
9 | * This code is based on the rt.c implementation in the preempt-rt tree. | ||
10 | * Portions of said code are | ||
11 | * | ||
12 | * Copyright (C) 2004 LynuxWorks, Inc., Igor Manyilov, Bill Huey | ||
13 | * Copyright (C) 2006 Esben Nielsen | ||
14 | * Copyright (C) 2006 Kihon Technologies Inc., | ||
15 | * Steven Rostedt <rostedt@goodmis.org> | ||
16 | * | ||
17 | * See rt.c in preempt-rt for proper credits and further information | ||
18 | */ | ||
19 | #include <linux/config.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/delay.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/spinlock.h> | ||
24 | #include <linux/kallsyms.h> | ||
25 | #include <linux/syscalls.h> | ||
26 | #include <linux/interrupt.h> | ||
27 | #include <linux/plist.h> | ||
28 | #include <linux/fs.h> | ||
29 | |||
30 | #include "rtmutex_common.h" | ||
31 | |||
32 | #ifdef CONFIG_DEBUG_RT_MUTEXES | ||
33 | # include "rtmutex-debug.h" | ||
34 | #else | ||
35 | # include "rtmutex.h" | ||
36 | #endif | ||
37 | |||
38 | # define TRACE_WARN_ON(x) WARN_ON(x) | ||
39 | # define TRACE_BUG_ON(x) BUG_ON(x) | ||
40 | |||
41 | # define TRACE_OFF() \ | ||
42 | do { \ | ||
43 | if (rt_trace_on) { \ | ||
44 | rt_trace_on = 0; \ | ||
45 | console_verbose(); \ | ||
46 | if (spin_is_locked(¤t->pi_lock)) \ | ||
47 | spin_unlock(¤t->pi_lock); \ | ||
48 | if (spin_is_locked(¤t->held_list_lock)) \ | ||
49 | spin_unlock(¤t->held_list_lock); \ | ||
50 | } \ | ||
51 | } while (0) | ||
52 | |||
53 | # define TRACE_OFF_NOLOCK() \ | ||
54 | do { \ | ||
55 | if (rt_trace_on) { \ | ||
56 | rt_trace_on = 0; \ | ||
57 | console_verbose(); \ | ||
58 | } \ | ||
59 | } while (0) | ||
60 | |||
61 | # define TRACE_BUG_LOCKED() \ | ||
62 | do { \ | ||
63 | TRACE_OFF(); \ | ||
64 | BUG(); \ | ||
65 | } while (0) | ||
66 | |||
67 | # define TRACE_WARN_ON_LOCKED(c) \ | ||
68 | do { \ | ||
69 | if (unlikely(c)) { \ | ||
70 | TRACE_OFF(); \ | ||
71 | WARN_ON(1); \ | ||
72 | } \ | ||
73 | } while (0) | ||
74 | |||
75 | # define TRACE_BUG_ON_LOCKED(c) \ | ||
76 | do { \ | ||
77 | if (unlikely(c)) \ | ||
78 | TRACE_BUG_LOCKED(); \ | ||
79 | } while (0) | ||
80 | |||
81 | #ifdef CONFIG_SMP | ||
82 | # define SMP_TRACE_BUG_ON_LOCKED(c) TRACE_BUG_ON_LOCKED(c) | ||
83 | #else | ||
84 | # define SMP_TRACE_BUG_ON_LOCKED(c) do { } while (0) | ||
85 | #endif | ||
86 | |||
87 | /* | ||
88 | * deadlock detection flag. We turn it off when we detect | ||
89 | * the first problem because we dont want to recurse back | ||
90 | * into the tracing code when doing error printk or | ||
91 | * executing a BUG(): | ||
92 | */ | ||
93 | int rt_trace_on = 1; | ||
94 | |||
95 | void deadlock_trace_off(void) | ||
96 | { | ||
97 | rt_trace_on = 0; | ||
98 | } | ||
99 | |||
100 | static void printk_task(task_t *p) | ||
101 | { | ||
102 | if (p) | ||
103 | printk("%16s:%5d [%p, %3d]", p->comm, p->pid, p, p->prio); | ||
104 | else | ||
105 | printk("<none>"); | ||
106 | } | ||
107 | |||
108 | static void printk_task_short(task_t *p) | ||
109 | { | ||
110 | if (p) | ||
111 | printk("%s/%d [%p, %3d]", p->comm, p->pid, p, p->prio); | ||
112 | else | ||
113 | printk("<none>"); | ||
114 | } | ||
115 | |||
116 | static void printk_lock(struct rt_mutex *lock, int print_owner) | ||
117 | { | ||
118 | if (lock->name) | ||
119 | printk(" [%p] {%s}\n", | ||
120 | lock, lock->name); | ||
121 | else | ||
122 | printk(" [%p] {%s:%d}\n", | ||
123 | lock, lock->file, lock->line); | ||
124 | |||
125 | if (print_owner && rt_mutex_owner(lock)) { | ||
126 | printk(".. ->owner: %p\n", lock->owner); | ||
127 | printk(".. held by: "); | ||
128 | printk_task(rt_mutex_owner(lock)); | ||
129 | printk("\n"); | ||
130 | } | ||
131 | if (rt_mutex_owner(lock)) { | ||
132 | printk("... acquired at: "); | ||
133 | print_symbol("%s\n", lock->acquire_ip); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | static void printk_waiter(struct rt_mutex_waiter *w) | ||
138 | { | ||
139 | printk("-------------------------\n"); | ||
140 | printk("| waiter struct %p:\n", w); | ||
141 | printk("| w->list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", | ||
142 | w->list_entry.plist.prio_list.prev, w->list_entry.plist.prio_list.next, | ||
143 | w->list_entry.plist.node_list.prev, w->list_entry.plist.node_list.next, | ||
144 | w->list_entry.prio); | ||
145 | printk("| w->pi_list_entry: [DP:%p/%p|SP:%p/%p|PRI:%d]\n", | ||
146 | w->pi_list_entry.plist.prio_list.prev, w->pi_list_entry.plist.prio_list.next, | ||
147 | w->pi_list_entry.plist.node_list.prev, w->pi_list_entry.plist.node_list.next, | ||
148 | w->pi_list_entry.prio); | ||
149 | printk("\n| lock:\n"); | ||
150 | printk_lock(w->lock, 1); | ||
151 | printk("| w->ti->task:\n"); | ||
152 | printk_task(w->task); | ||
153 | printk("| blocked at: "); | ||
154 | print_symbol("%s\n", w->ip); | ||
155 | printk("-------------------------\n"); | ||
156 | } | ||
157 | |||
158 | static void show_task_locks(task_t *p) | ||
159 | { | ||
160 | switch (p->state) { | ||
161 | case TASK_RUNNING: printk("R"); break; | ||
162 | case TASK_INTERRUPTIBLE: printk("S"); break; | ||
163 | case TASK_UNINTERRUPTIBLE: printk("D"); break; | ||
164 | case TASK_STOPPED: printk("T"); break; | ||
165 | case EXIT_ZOMBIE: printk("Z"); break; | ||
166 | case EXIT_DEAD: printk("X"); break; | ||
167 | default: printk("?"); break; | ||
168 | } | ||
169 | printk_task(p); | ||
170 | if (p->pi_blocked_on) { | ||
171 | struct rt_mutex *lock = p->pi_blocked_on->lock; | ||
172 | |||
173 | printk(" blocked on:"); | ||
174 | printk_lock(lock, 1); | ||
175 | } else | ||
176 | printk(" (not blocked)\n"); | ||
177 | } | ||
178 | |||
179 | void rt_mutex_show_held_locks(task_t *task, int verbose) | ||
180 | { | ||
181 | struct list_head *curr, *cursor = NULL; | ||
182 | struct rt_mutex *lock; | ||
183 | task_t *t; | ||
184 | unsigned long flags; | ||
185 | int count = 0; | ||
186 | |||
187 | if (!rt_trace_on) | ||
188 | return; | ||
189 | |||
190 | if (verbose) { | ||
191 | printk("------------------------------\n"); | ||
192 | printk("| showing all locks held by: | ("); | ||
193 | printk_task_short(task); | ||
194 | printk("):\n"); | ||
195 | printk("------------------------------\n"); | ||
196 | } | ||
197 | |||
198 | next: | ||
199 | spin_lock_irqsave(&task->held_list_lock, flags); | ||
200 | list_for_each(curr, &task->held_list_head) { | ||
201 | if (cursor && curr != cursor) | ||
202 | continue; | ||
203 | lock = list_entry(curr, struct rt_mutex, held_list_entry); | ||
204 | t = rt_mutex_owner(lock); | ||
205 | WARN_ON(t != task); | ||
206 | count++; | ||
207 | cursor = curr->next; | ||
208 | spin_unlock_irqrestore(&task->held_list_lock, flags); | ||
209 | |||
210 | printk("\n#%03d: ", count); | ||
211 | printk_lock(lock, 0); | ||
212 | goto next; | ||
213 | } | ||
214 | spin_unlock_irqrestore(&task->held_list_lock, flags); | ||
215 | |||
216 | printk("\n"); | ||
217 | } | ||
218 | |||
219 | void rt_mutex_show_all_locks(void) | ||
220 | { | ||
221 | task_t *g, *p; | ||
222 | int count = 10; | ||
223 | int unlock = 1; | ||
224 | |||
225 | printk("\n"); | ||
226 | printk("----------------------\n"); | ||
227 | printk("| showing all tasks: |\n"); | ||
228 | printk("----------------------\n"); | ||
229 | |||
230 | /* | ||
231 | * Here we try to get the tasklist_lock as hard as possible, | ||
232 | * if not successful after 2 seconds we ignore it (but keep | ||
233 | * trying). This is to enable a debug printout even if a | ||
234 | * tasklist_lock-holding task deadlocks or crashes. | ||
235 | */ | ||
236 | retry: | ||
237 | if (!read_trylock(&tasklist_lock)) { | ||
238 | if (count == 10) | ||
239 | printk("hm, tasklist_lock locked, retrying... "); | ||
240 | if (count) { | ||
241 | count--; | ||
242 | printk(" #%d", 10-count); | ||
243 | mdelay(200); | ||
244 | goto retry; | ||
245 | } | ||
246 | printk(" ignoring it.\n"); | ||
247 | unlock = 0; | ||
248 | } | ||
249 | if (count != 10) | ||
250 | printk(" locked it.\n"); | ||
251 | |||
252 | do_each_thread(g, p) { | ||
253 | show_task_locks(p); | ||
254 | if (!unlock) | ||
255 | if (read_trylock(&tasklist_lock)) | ||
256 | unlock = 1; | ||
257 | } while_each_thread(g, p); | ||
258 | |||
259 | printk("\n"); | ||
260 | |||
261 | printk("-----------------------------------------\n"); | ||
262 | printk("| showing all locks held in the system: |\n"); | ||
263 | printk("-----------------------------------------\n"); | ||
264 | |||
265 | do_each_thread(g, p) { | ||
266 | rt_mutex_show_held_locks(p, 0); | ||
267 | if (!unlock) | ||
268 | if (read_trylock(&tasklist_lock)) | ||
269 | unlock = 1; | ||
270 | } while_each_thread(g, p); | ||
271 | |||
272 | |||
273 | printk("=============================================\n\n"); | ||
274 | |||
275 | if (unlock) | ||
276 | read_unlock(&tasklist_lock); | ||
277 | } | ||
278 | |||
279 | void rt_mutex_debug_check_no_locks_held(task_t *task) | ||
280 | { | ||
281 | struct rt_mutex_waiter *w; | ||
282 | struct list_head *curr; | ||
283 | struct rt_mutex *lock; | ||
284 | |||
285 | if (!rt_trace_on) | ||
286 | return; | ||
287 | if (!rt_prio(task->normal_prio) && rt_prio(task->prio)) { | ||
288 | printk("BUG: PI priority boost leaked!\n"); | ||
289 | printk_task(task); | ||
290 | printk("\n"); | ||
291 | } | ||
292 | if (list_empty(&task->held_list_head)) | ||
293 | return; | ||
294 | |||
295 | spin_lock(&task->pi_lock); | ||
296 | plist_for_each_entry(w, &task->pi_waiters, pi_list_entry) { | ||
297 | TRACE_OFF(); | ||
298 | |||
299 | printk("hm, PI interest held at exit time? Task:\n"); | ||
300 | printk_task(task); | ||
301 | printk_waiter(w); | ||
302 | return; | ||
303 | } | ||
304 | spin_unlock(&task->pi_lock); | ||
305 | |||
306 | list_for_each(curr, &task->held_list_head) { | ||
307 | lock = list_entry(curr, struct rt_mutex, held_list_entry); | ||
308 | |||
309 | printk("BUG: %s/%d, lock held at task exit time!\n", | ||
310 | task->comm, task->pid); | ||
311 | printk_lock(lock, 1); | ||
312 | if (rt_mutex_owner(lock) != task) | ||
313 | printk("exiting task is not even the owner??\n"); | ||
314 | } | ||
315 | } | ||
316 | |||
317 | int rt_mutex_debug_check_no_locks_freed(const void *from, unsigned long len) | ||
318 | { | ||
319 | const void *to = from + len; | ||
320 | struct list_head *curr; | ||
321 | struct rt_mutex *lock; | ||
322 | unsigned long flags; | ||
323 | void *lock_addr; | ||
324 | |||
325 | if (!rt_trace_on) | ||
326 | return 0; | ||
327 | |||
328 | spin_lock_irqsave(¤t->held_list_lock, flags); | ||
329 | list_for_each(curr, ¤t->held_list_head) { | ||
330 | lock = list_entry(curr, struct rt_mutex, held_list_entry); | ||
331 | lock_addr = lock; | ||
332 | if (lock_addr < from || lock_addr >= to) | ||
333 | continue; | ||
334 | TRACE_OFF(); | ||
335 | |||
336 | printk("BUG: %s/%d, active lock [%p(%p-%p)] freed!\n", | ||
337 | current->comm, current->pid, lock, from, to); | ||
338 | dump_stack(); | ||
339 | printk_lock(lock, 1); | ||
340 | if (rt_mutex_owner(lock) != current) | ||
341 | printk("freeing task is not even the owner??\n"); | ||
342 | return 1; | ||
343 | } | ||
344 | spin_unlock_irqrestore(¤t->held_list_lock, flags); | ||
345 | |||
346 | return 0; | ||
347 | } | ||
348 | |||
349 | void rt_mutex_debug_task_free(struct task_struct *task) | ||
350 | { | ||
351 | WARN_ON(!plist_head_empty(&task->pi_waiters)); | ||
352 | WARN_ON(task->pi_blocked_on); | ||
353 | } | ||
354 | |||
355 | /* | ||
356 | * We fill out the fields in the waiter to store the information about | ||
357 | * the deadlock. We print when we return. act_waiter can be NULL in | ||
358 | * case of a remove waiter operation. | ||
359 | */ | ||
360 | void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter, | ||
361 | struct rt_mutex *lock) | ||
362 | { | ||
363 | struct task_struct *task; | ||
364 | |||
365 | if (!rt_trace_on || detect || !act_waiter) | ||
366 | return; | ||
367 | |||
368 | task = rt_mutex_owner(act_waiter->lock); | ||
369 | if (task && task != current) { | ||
370 | act_waiter->deadlock_task_pid = task->pid; | ||
371 | act_waiter->deadlock_lock = lock; | ||
372 | } | ||
373 | } | ||
374 | |||
375 | void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter) | ||
376 | { | ||
377 | struct task_struct *task; | ||
378 | |||
379 | if (!waiter->deadlock_lock || !rt_trace_on) | ||
380 | return; | ||
381 | |||
382 | task = find_task_by_pid(waiter->deadlock_task_pid); | ||
383 | if (!task) | ||
384 | return; | ||
385 | |||
386 | TRACE_OFF_NOLOCK(); | ||
387 | |||
388 | printk("\n============================================\n"); | ||
389 | printk( "[ BUG: circular locking deadlock detected! ]\n"); | ||
390 | printk( "--------------------------------------------\n"); | ||
391 | printk("%s/%d is deadlocking current task %s/%d\n\n", | ||
392 | task->comm, task->pid, current->comm, current->pid); | ||
393 | |||
394 | printk("\n1) %s/%d is trying to acquire this lock:\n", | ||
395 | current->comm, current->pid); | ||
396 | printk_lock(waiter->lock, 1); | ||
397 | |||
398 | printk("... trying at: "); | ||
399 | print_symbol("%s\n", waiter->ip); | ||
400 | |||
401 | printk("\n2) %s/%d is blocked on this lock:\n", task->comm, task->pid); | ||
402 | printk_lock(waiter->deadlock_lock, 1); | ||
403 | |||
404 | rt_mutex_show_held_locks(current, 1); | ||
405 | rt_mutex_show_held_locks(task, 1); | ||
406 | |||
407 | printk("\n%s/%d's [blocked] stackdump:\n\n", task->comm, task->pid); | ||
408 | show_stack(task, NULL); | ||
409 | printk("\n%s/%d's [current] stackdump:\n\n", | ||
410 | current->comm, current->pid); | ||
411 | dump_stack(); | ||
412 | rt_mutex_show_all_locks(); | ||
413 | printk("[ turning off deadlock detection." | ||
414 | "Please report this trace. ]\n\n"); | ||
415 | local_irq_disable(); | ||
416 | } | ||
417 | |||
418 | void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__) | ||
419 | { | ||
420 | unsigned long flags; | ||
421 | |||
422 | if (rt_trace_on) { | ||
423 | TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); | ||
424 | |||
425 | spin_lock_irqsave(¤t->held_list_lock, flags); | ||
426 | list_add_tail(&lock->held_list_entry, ¤t->held_list_head); | ||
427 | spin_unlock_irqrestore(¤t->held_list_lock, flags); | ||
428 | |||
429 | lock->acquire_ip = ip; | ||
430 | } | ||
431 | } | ||
432 | |||
433 | void debug_rt_mutex_unlock(struct rt_mutex *lock) | ||
434 | { | ||
435 | unsigned long flags; | ||
436 | |||
437 | if (rt_trace_on) { | ||
438 | TRACE_WARN_ON_LOCKED(rt_mutex_owner(lock) != current); | ||
439 | TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); | ||
440 | |||
441 | spin_lock_irqsave(¤t->held_list_lock, flags); | ||
442 | list_del_init(&lock->held_list_entry); | ||
443 | spin_unlock_irqrestore(¤t->held_list_lock, flags); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | ||
448 | struct task_struct *powner __IP_DECL__) | ||
449 | { | ||
450 | unsigned long flags; | ||
451 | |||
452 | if (rt_trace_on) { | ||
453 | TRACE_WARN_ON_LOCKED(!list_empty(&lock->held_list_entry)); | ||
454 | |||
455 | spin_lock_irqsave(&powner->held_list_lock, flags); | ||
456 | list_add_tail(&lock->held_list_entry, &powner->held_list_head); | ||
457 | spin_unlock_irqrestore(&powner->held_list_lock, flags); | ||
458 | |||
459 | lock->acquire_ip = ip; | ||
460 | } | ||
461 | } | ||
462 | |||
463 | void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock) | ||
464 | { | ||
465 | unsigned long flags; | ||
466 | |||
467 | if (rt_trace_on) { | ||
468 | struct task_struct *owner = rt_mutex_owner(lock); | ||
469 | |||
470 | TRACE_WARN_ON_LOCKED(!owner); | ||
471 | TRACE_WARN_ON_LOCKED(list_empty(&lock->held_list_entry)); | ||
472 | |||
473 | spin_lock_irqsave(&owner->held_list_lock, flags); | ||
474 | list_del_init(&lock->held_list_entry); | ||
475 | spin_unlock_irqrestore(&owner->held_list_lock, flags); | ||
476 | } | ||
477 | } | ||
478 | |||
479 | void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter) | ||
480 | { | ||
481 | memset(waiter, 0x11, sizeof(*waiter)); | ||
482 | plist_node_init(&waiter->list_entry, MAX_PRIO); | ||
483 | plist_node_init(&waiter->pi_list_entry, MAX_PRIO); | ||
484 | } | ||
485 | |||
486 | void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) | ||
487 | { | ||
488 | TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); | ||
489 | TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); | ||
490 | TRACE_WARN_ON(waiter->task); | ||
491 | memset(waiter, 0x22, sizeof(*waiter)); | ||
492 | } | ||
493 | |||
494 | void debug_rt_mutex_init(struct rt_mutex *lock, const char *name) | ||
495 | { | ||
496 | void *addr = lock; | ||
497 | |||
498 | if (rt_trace_on) { | ||
499 | rt_mutex_debug_check_no_locks_freed(addr, | ||
500 | sizeof(struct rt_mutex)); | ||
501 | INIT_LIST_HEAD(&lock->held_list_entry); | ||
502 | lock->name = name; | ||
503 | } | ||
504 | } | ||
505 | |||
506 | void rt_mutex_deadlock_account_lock(struct rt_mutex *lock, task_t *task) | ||
507 | { | ||
508 | } | ||
509 | |||
510 | void rt_mutex_deadlock_account_unlock(struct task_struct *task) | ||
511 | { | ||
512 | } | ||
513 | |||
diff --git a/kernel/rtmutex-debug.h b/kernel/rtmutex-debug.h new file mode 100644 index 000000000000..7612fbc62d70 --- /dev/null +++ b/kernel/rtmutex-debug.h | |||
@@ -0,0 +1,37 @@ | |||
1 | /* | ||
2 | * RT-Mutexes: blocking mutual exclusion locks with PI support | ||
3 | * | ||
4 | * started by Ingo Molnar and Thomas Gleixner: | ||
5 | * | ||
6 | * Copyright (C) 2004-2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com> | ||
7 | * Copyright (C) 2006, Timesys Corp., Thomas Gleixner <tglx@timesys.com> | ||
8 | * | ||
9 | * This file contains macros used solely by rtmutex.c. Debug version. | ||
10 | */ | ||
11 | |||
12 | #define __IP_DECL__ , unsigned long ip | ||
13 | #define __IP__ , ip | ||
14 | #define __RET_IP__ , (unsigned long)__builtin_return_address(0) | ||
15 | |||
16 | extern void | ||
17 | rt_mutex_deadlock_account_lock(struct rt_mutex *lock, struct task_struct *task); | ||
18 | extern void rt_mutex_deadlock_account_unlock(struct task_struct *task); | ||
19 | extern void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter); | ||
20 | extern void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter); | ||
21 | extern void debug_rt_mutex_init(struct rt_mutex *lock, const char *name); | ||
22 | extern void debug_rt_mutex_lock(struct rt_mutex *lock __IP_DECL__); | ||
23 | extern void debug_rt_mutex_unlock(struct rt_mutex *lock); | ||
24 | extern void debug_rt_mutex_proxy_lock(struct rt_mutex *lock, | ||
25 | struct task_struct *powner __IP_DECL__); | ||
26 | extern void debug_rt_mutex_proxy_unlock(struct rt_mutex *lock); | ||
27 | extern void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *waiter, | ||
28 | struct rt_mutex *lock); | ||
29 | extern void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter); | ||
30 | # define debug_rt_mutex_reset_waiter(w) \ | ||
31 | do { (w)->deadlock_lock = NULL; } while (0) | ||
32 | |||
33 | static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter, | ||
34 | int detect) | ||
35 | { | ||
36 | return (waiter != NULL); | ||
37 | } | ||
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 8bab0102ac73..06d3ea1133c9 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug | |||
@@ -107,6 +107,19 @@ config DEBUG_MUTEXES | |||
107 | This allows mutex semantics violations and mutex related deadlocks | 107 | This allows mutex semantics violations and mutex related deadlocks |
108 | (lockups) to be detected and reported automatically. | 108 | (lockups) to be detected and reported automatically. |
109 | 109 | ||
110 | config DEBUG_RT_MUTEXES | ||
111 | bool "RT Mutex debugging, deadlock detection" | ||
112 | default y | ||
113 | depends on DEBUG_KERNEL && RT_MUTEXES | ||
114 | help | ||
115 | This allows rt mutex semantics violations and rt mutex related | ||
116 | deadlocks (lockups) to be detected and reported automatically. | ||
117 | |||
118 | config DEBUG_PI_LIST | ||
119 | bool | ||
120 | default y | ||
121 | depends on DEBUG_RT_MUTEXES | ||
122 | |||
110 | config DEBUG_SPINLOCK | 123 | config DEBUG_SPINLOCK |
111 | bool "Spinlock debugging" | 124 | bool "Spinlock debugging" |
112 | depends on DEBUG_KERNEL | 125 | depends on DEBUG_KERNEL |
@@ -107,6 +107,7 @@ | |||
107 | #include <linux/nodemask.h> | 107 | #include <linux/nodemask.h> |
108 | #include <linux/mempolicy.h> | 108 | #include <linux/mempolicy.h> |
109 | #include <linux/mutex.h> | 109 | #include <linux/mutex.h> |
110 | #include <linux/rtmutex.h> | ||
110 | 111 | ||
111 | #include <asm/uaccess.h> | 112 | #include <asm/uaccess.h> |
112 | #include <asm/cacheflush.h> | 113 | #include <asm/cacheflush.h> |