diff options
author | Steven Rostedt <srostedt@redhat.com> | 2011-04-20 21:41:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-04-22 05:06:58 -0400 |
commit | dad3d7435e1d8c254d6877dc06852dc00c5da812 (patch) | |
tree | 85f4ab38ff63879b117d363ef1fdb3cf83bf870d /kernel/lockdep.c | |
parent | 48702ecf308e53f176c1f6fdc193d622ded54ac0 (diff) |
lockdep: Print a nicer description for irq inversion bugs
Irq inversion and irq dependency bugs are only subtly
different. The diffenerence lies where the interrupt occurred.
For irq dependency:
irq_disable
lock(A)
lock(B)
unlock(B)
unlock(A)
irq_enable
lock(B)
unlock(B)
<interrupt>
lock(A)
The interrupt comes in after it has been established that lock A
can be held when taking an irq unsafe lock. Lockdep detects the
problem when taking lock A in interrupt context.
With the irq_inversion the irq happens before it is established
and lockdep detects the problem with the taking of lock B:
<interrupt>
lock(A)
irq_disable
lock(A)
lock(B)
unlock(B)
unlock(A)
irq_enable
lock(B)
unlock(B)
Since the problem with the locking logic for both of these issues
is in actuality the same, they both should report the same scenario.
This patch implements that and prints this:
other info that might help us debug this:
Chain exists of:
&rq->lock --> lockA --> lockC
Possible interrupt unsafe locking scenario:
CPU0 CPU1
---- ----
lock(lockC);
local_irq_disable();
lock(&rq->lock);
lock(lockA);
<Interrupt>
lock(&rq->lock);
*** DEADLOCK ***
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Link: http://lkml.kernel.org/r/20110421014259.910720381@goodmis.org
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r-- | kernel/lockdep.c | 34 |
1 files changed, 29 insertions, 5 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index c4cc5d1acf48..0b497dda6ae5 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
@@ -1395,15 +1395,15 @@ print_shortest_lock_dependencies(struct lock_list *leaf, | |||
1395 | static void | 1395 | static void |
1396 | print_irq_lock_scenario(struct lock_list *safe_entry, | 1396 | print_irq_lock_scenario(struct lock_list *safe_entry, |
1397 | struct lock_list *unsafe_entry, | 1397 | struct lock_list *unsafe_entry, |
1398 | struct held_lock *prev, | 1398 | struct lock_class *prev_class, |
1399 | struct held_lock *next) | 1399 | struct lock_class *next_class) |
1400 | { | 1400 | { |
1401 | struct lock_class *safe_class = safe_entry->class; | 1401 | struct lock_class *safe_class = safe_entry->class; |
1402 | struct lock_class *unsafe_class = unsafe_entry->class; | 1402 | struct lock_class *unsafe_class = unsafe_entry->class; |
1403 | struct lock_class *middle_class = hlock_class(prev); | 1403 | struct lock_class *middle_class = prev_class; |
1404 | 1404 | ||
1405 | if (middle_class == safe_class) | 1405 | if (middle_class == safe_class) |
1406 | middle_class = hlock_class(next); | 1406 | middle_class = next_class; |
1407 | 1407 | ||
1408 | /* | 1408 | /* |
1409 | * A direct locking problem where unsafe_class lock is taken | 1409 | * A direct locking problem where unsafe_class lock is taken |
@@ -1499,7 +1499,8 @@ print_bad_irq_dependency(struct task_struct *curr, | |||
1499 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); | 1499 | print_stack_trace(forwards_entry->class->usage_traces + bit2, 1); |
1500 | 1500 | ||
1501 | printk("\nother info that might help us debug this:\n\n"); | 1501 | printk("\nother info that might help us debug this:\n\n"); |
1502 | print_irq_lock_scenario(backwards_entry, forwards_entry, prev, next); | 1502 | print_irq_lock_scenario(backwards_entry, forwards_entry, |
1503 | hlock_class(prev), hlock_class(next)); | ||
1503 | 1504 | ||
1504 | lockdep_print_held_locks(curr); | 1505 | lockdep_print_held_locks(curr); |
1505 | 1506 | ||
@@ -2219,6 +2220,10 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2219 | struct held_lock *this, int forwards, | 2220 | struct held_lock *this, int forwards, |
2220 | const char *irqclass) | 2221 | const char *irqclass) |
2221 | { | 2222 | { |
2223 | struct lock_list *entry = other; | ||
2224 | struct lock_list *middle = NULL; | ||
2225 | int depth; | ||
2226 | |||
2222 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) | 2227 | if (!debug_locks_off_graph_unlock() || debug_locks_silent) |
2223 | return 0; | 2228 | return 0; |
2224 | 2229 | ||
@@ -2237,6 +2242,25 @@ print_irq_inversion_bug(struct task_struct *curr, | |||
2237 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); | 2242 | printk("\n\nand interrupts could create inverse lock ordering between them.\n\n"); |
2238 | 2243 | ||
2239 | printk("\nother info that might help us debug this:\n"); | 2244 | printk("\nother info that might help us debug this:\n"); |
2245 | |||
2246 | /* Find a middle lock (if one exists) */ | ||
2247 | depth = get_lock_depth(other); | ||
2248 | do { | ||
2249 | if (depth == 0 && (entry != root)) { | ||
2250 | printk("lockdep:%s bad path found in chain graph\n", __func__); | ||
2251 | break; | ||
2252 | } | ||
2253 | middle = entry; | ||
2254 | entry = get_lock_parent(entry); | ||
2255 | depth--; | ||
2256 | } while (entry && entry != root && (depth >= 0)); | ||
2257 | if (forwards) | ||
2258 | print_irq_lock_scenario(root, other, | ||
2259 | middle ? middle->class : root->class, other->class); | ||
2260 | else | ||
2261 | print_irq_lock_scenario(other, root, | ||
2262 | middle ? middle->class : other->class, root->class); | ||
2263 | |||
2240 | lockdep_print_held_locks(curr); | 2264 | lockdep_print_held_locks(curr); |
2241 | 2265 | ||
2242 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); | 2266 | printk("\nthe shortest dependencies between 2nd lock and 1st lock:\n"); |