diff options
| author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2011-07-26 07:13:44 -0400 |
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2011-08-04 04:17:36 -0400 |
| commit | 7d36b26be0f3c6b86e3ab7e1539e42f3a3bc79ca (patch) | |
| tree | f22ac58f11dc1d0738e66024052f2e1c709533f1 /kernel | |
| parent | d7619fe39d9769b4d4545cc511c891deea18ae08 (diff) | |
lockdep: Fix trace_hardirqs_on_caller()
Commit dd4e5d3ac4a ("lockdep: Fix trace_[soft,hard]irqs_[on,off]()
recursion") made a bit of a mess of the various checks and error
conditions.
In particular it moved the check for !irqs_disabled() before the
spurious enable test, resulting in some warnings.
Reported-by: Arnaud Lacombe <lacombar@gmail.com>
Reported-by: Dave Jones <davej@redhat.com>
Reported-and-tested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/1311679697.24752.28.camel@twins
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/lockdep.c | 30 |
1 files changed, 16 insertions, 14 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c index 3956f5149e25..74ca247a4d4f 100644 --- a/kernel/lockdep.c +++ b/kernel/lockdep.c | |||
| @@ -2485,23 +2485,9 @@ static void __trace_hardirqs_on_caller(unsigned long ip) | |||
| 2485 | { | 2485 | { |
| 2486 | struct task_struct *curr = current; | 2486 | struct task_struct *curr = current; |
| 2487 | 2487 | ||
| 2488 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) | ||
| 2489 | return; | ||
| 2490 | |||
| 2491 | if (unlikely(curr->hardirqs_enabled)) { | ||
| 2492 | /* | ||
| 2493 | * Neither irq nor preemption are disabled here | ||
| 2494 | * so this is racy by nature but losing one hit | ||
| 2495 | * in a stat is not a big deal. | ||
| 2496 | */ | ||
| 2497 | __debug_atomic_inc(redundant_hardirqs_on); | ||
| 2498 | return; | ||
| 2499 | } | ||
| 2500 | /* we'll do an OFF -> ON transition: */ | 2488 | /* we'll do an OFF -> ON transition: */ |
| 2501 | curr->hardirqs_enabled = 1; | 2489 | curr->hardirqs_enabled = 1; |
| 2502 | 2490 | ||
| 2503 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | ||
| 2504 | return; | ||
| 2505 | /* | 2491 | /* |
| 2506 | * We are going to turn hardirqs on, so set the | 2492 | * We are going to turn hardirqs on, so set the |
| 2507 | * usage bit for all held locks: | 2493 | * usage bit for all held locks: |
| @@ -2529,9 +2515,25 @@ void trace_hardirqs_on_caller(unsigned long ip) | |||
| 2529 | if (unlikely(!debug_locks || current->lockdep_recursion)) | 2515 | if (unlikely(!debug_locks || current->lockdep_recursion)) |
| 2530 | return; | 2516 | return; |
| 2531 | 2517 | ||
| 2518 | if (unlikely(current->hardirqs_enabled)) { | ||
| 2519 | /* | ||
| 2520 | * Neither irq nor preemption are disabled here | ||
| 2521 | * so this is racy by nature but losing one hit | ||
| 2522 | * in a stat is not a big deal. | ||
| 2523 | */ | ||
| 2524 | __debug_atomic_inc(redundant_hardirqs_on); | ||
| 2525 | return; | ||
| 2526 | } | ||
| 2527 | |||
| 2532 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) | 2528 | if (DEBUG_LOCKS_WARN_ON(!irqs_disabled())) |
| 2533 | return; | 2529 | return; |
| 2534 | 2530 | ||
| 2531 | if (DEBUG_LOCKS_WARN_ON(unlikely(early_boot_irqs_disabled))) | ||
| 2532 | return; | ||
| 2533 | |||
| 2534 | if (DEBUG_LOCKS_WARN_ON(current->hardirq_context)) | ||
| 2535 | return; | ||
| 2536 | |||
| 2535 | current->lockdep_recursion = 1; | 2537 | current->lockdep_recursion = 1; |
| 2536 | __trace_hardirqs_on_caller(ip); | 2538 | __trace_hardirqs_on_caller(ip); |
| 2537 | current->lockdep_recursion = 0; | 2539 | current->lockdep_recursion = 0; |
