diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-04-20 16:16:11 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-04-20 16:16:11 -0400 |
commit | aa18efb2a2f07e1cf062039848e9d369bb358724 (patch) | |
tree | e68169b1116a313a8db02e913443a2c2899f3bf2 /kernel/trace/ring_buffer.c | |
parent | e395898e98119085f666febbc7b631dd69bc637f (diff) |
tracing: use recursive counter over irq level
Althought using the irq level (hardirq_count, softirq_count and in_nmi)
was nice to detect bad recursion right away, but since the counters are
not atomically updated with respect to the interrupts, the function tracer
might trigger the test from an interrupt handler before the hardirq_count
is updated. This will trigger a false warning.
This patch converts the recursive detection to a simple counter.
If the depth is greater than 16 then the recursive detection will trigger.
16 is more than enough for any nested interrupts.
[ Impact: fix false positive trace recursion detection ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace/ring_buffer.c')
-rw-r--r-- | kernel/trace/ring_buffer.c | 45 |
1 files changed, 16 insertions, 29 deletions
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index a6997670cc46..7bcfd3e60537 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1481,47 +1481,34 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1481 | return event; | 1481 | return event; |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | static int trace_irq_level(void) | 1484 | #define TRACE_RECURSIVE_DEPTH 16 |
1485 | { | ||
1486 | return (hardirq_count() >> HARDIRQ_SHIFT) + | ||
1487 | (softirq_count() >> + SOFTIRQ_SHIFT) + | ||
1488 | !!in_nmi(); | ||
1489 | } | ||
1490 | 1485 | ||
1491 | static int trace_recursive_lock(void) | 1486 | static int trace_recursive_lock(void) |
1492 | { | 1487 | { |
1493 | int level; | 1488 | current->trace_recursion++; |
1494 | |||
1495 | level = trace_irq_level(); | ||
1496 | 1489 | ||
1497 | if (unlikely(current->trace_recursion & (1 << level))) { | 1490 | if (likely(current->trace_recursion < TRACE_RECURSIVE_DEPTH)) |
1498 | /* Disable all tracing before we do anything else */ | 1491 | return 0; |
1499 | tracing_off_permanent(); | ||
1500 | 1492 | ||
1501 | printk_once(KERN_WARNING "Tracing recursion: " | 1493 | /* Disable all tracing before we do anything else */ |
1502 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", | 1494 | tracing_off_permanent(); |
1503 | hardirq_count() >> HARDIRQ_SHIFT, | ||
1504 | softirq_count() >> SOFTIRQ_SHIFT, | ||
1505 | in_nmi()); | ||
1506 | 1495 | ||
1507 | WARN_ON_ONCE(1); | 1496 | printk_once(KERN_WARNING "Tracing recursion: depth[%d]:" |
1508 | return -1; | 1497 | "HC[%lu]:SC[%lu]:NMI[%lu]\n", |
1509 | } | 1498 | current->trace_recursion, |
1499 | hardirq_count() >> HARDIRQ_SHIFT, | ||
1500 | softirq_count() >> SOFTIRQ_SHIFT, | ||
1501 | in_nmi()); | ||
1510 | 1502 | ||
1511 | current->trace_recursion |= 1 << level; | 1503 | WARN_ON_ONCE(1); |
1512 | 1504 | return -1; | |
1513 | return 0; | ||
1514 | } | 1505 | } |
1515 | 1506 | ||
1516 | static void trace_recursive_unlock(void) | 1507 | static void trace_recursive_unlock(void) |
1517 | { | 1508 | { |
1518 | int level; | 1509 | WARN_ON_ONCE(!current->trace_recursion); |
1519 | |||
1520 | level = trace_irq_level(); | ||
1521 | |||
1522 | WARN_ON_ONCE(!current->trace_recursion & (1 << level)); | ||
1523 | 1510 | ||
1524 | current->trace_recursion &= ~(1 << level); | 1511 | current->trace_recursion--; |
1525 | } | 1512 | } |
1526 | 1513 | ||
1527 | static DEFINE_PER_CPU(int, rb_need_resched); | 1514 | static DEFINE_PER_CPU(int, rb_need_resched); |