aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/lockdep.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-04-15 17:10:42 -0400
committerFrederic Weisbecker <fweisbec@gmail.com>2010-04-30 13:15:49 -0400
commit8795d7717c467bea7b0a0649d44a258e09f34db2 (patch)
tree33d0dc56ef918bfc7dd615162be20e77899147c5 /kernel/lockdep.c
parent868c522b1b75fd3fd3e6a636b4c344ac08edf13a (diff)
lockdep: Fix redundant_hardirqs_on incremented with irqs enabled
When a path restore the flags while irqs are already enabled, we update the per cpu var redundant_hardirqs_on in a racy fashion and debug_atomic_inc() warns about this situation. In this particular case, loosing a few hits in a stat is not a big deal, so increment it without protection. v2: Don't bother with disabling irq, we can miss one count in rare situations Reported-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: David Miller <davem@davemloft.net> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'kernel/lockdep.c')
-rw-r--r--kernel/lockdep.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 78325f8f1139..1b58a1bbcc87 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2298,7 +2298,12 @@ void trace_hardirqs_on_caller(unsigned long ip)
2298 return; 2298 return;
2299 2299
2300 if (unlikely(curr->hardirqs_enabled)) { 2300 if (unlikely(curr->hardirqs_enabled)) {
2301 debug_atomic_inc(redundant_hardirqs_on); 2301 /*
2302 * Neither irq nor preemption are disabled here
2303 * so this is racy by nature but loosing one hit
2304 * in a stat is not a big deal.
2305 */
2306 this_cpu_inc(lockdep_stats.redundant_hardirqs_on);
2302 return; 2307 return;
2303 } 2308 }
2304 /* we'll do an OFF -> ON transition: */ 2309 /* we'll do an OFF -> ON transition: */