diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-04-16 21:41:52 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-04-17 16:21:32 -0400 |
commit | 261842b7c9099f56de2eb969c8ad65402d68e00e (patch) | |
tree | 6cdf0b4b5ad188294da67520dc9579a0c67940ac | |
parent | 12acd473d45cf2e40de3782cb2de712e5cd4d715 (diff) |
tracing: add same level recursion detection
The tracing infrastructure allows for recursion. That is, an interrupt
may interrupt the act of tracing an event, and that interrupt may very well
perform its own trace. This is a recursive trace, and is fine to do.
The problem arises when there is a bug, and the utility doing the trace
calls something that recurses back into the tracer. This recursion is not
caused by an external event like an interrupt, but by code that is not
expected to recurse. The result could be a lockup.
This patch adds a bitmask to the task structure that keeps track
of the trace recursion. To find the interrupt depth, the following
algorithm is used:
level = hardirq_count() + softirq_count() + in_nmi;
Here, level will be the depth of interrutps and softirqs, and even handles
the nmi. Then the corresponding bit is set in the recursion bitmask.
If the bit was already set, we know we had a recursion at the same level
and we warn about it and fail the writing to the buffer.
After the data has been committed to the buffer, we clear the bit.
No atomics are needed. The only races are with interrupts and they reset
the bitmask before returning anywy.
[ Impact: detect same irq level trace recursion ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | include/linux/ftrace.h | 7 | ||||
-rw-r--r-- | include/linux/init_task.h | 1 | ||||
-rw-r--r-- | include/linux/sched.h | 4 | ||||
-rw-r--r-- | kernel/trace/ring_buffer.c | 42 |
4 files changed, 53 insertions, 1 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 97c83e1bc589..39b95c56587e 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -488,8 +488,15 @@ static inline int test_tsk_trace_graph(struct task_struct *tsk) | |||
488 | 488 | ||
489 | extern int ftrace_dump_on_oops; | 489 | extern int ftrace_dump_on_oops; |
490 | 490 | ||
491 | #ifdef CONFIG_PREEMPT | ||
492 | #define INIT_TRACE_RECURSION .trace_recursion = 0, | ||
493 | #endif | ||
494 | |||
491 | #endif /* CONFIG_TRACING */ | 495 | #endif /* CONFIG_TRACING */ |
492 | 496 | ||
497 | #ifndef INIT_TRACE_RECURSION | ||
498 | #define INIT_TRACE_RECURSION | ||
499 | #endif | ||
493 | 500 | ||
494 | #ifdef CONFIG_HW_BRANCH_TRACER | 501 | #ifdef CONFIG_HW_BRANCH_TRACER |
495 | 502 | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index dcfb93337e9a..6fc218529863 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -187,6 +187,7 @@ extern struct cred init_cred; | |||
187 | INIT_TRACE_IRQFLAGS \ | 187 | INIT_TRACE_IRQFLAGS \ |
188 | INIT_LOCKDEP \ | 188 | INIT_LOCKDEP \ |
189 | INIT_FTRACE_GRAPH \ | 189 | INIT_FTRACE_GRAPH \ |
190 | INIT_TRACE_RECURSION \ | ||
190 | } | 191 | } |
191 | 192 | ||
192 | 193 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index b4c38bc8049c..7ede5e490913 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -1428,7 +1428,9 @@ struct task_struct { | |||
1428 | #ifdef CONFIG_TRACING | 1428 | #ifdef CONFIG_TRACING |
1429 | /* state flags for use by tracers */ | 1429 | /* state flags for use by tracers */ |
1430 | unsigned long trace; | 1430 | unsigned long trace; |
1431 | #endif | 1431 | /* bitmask of trace recursion */ |
1432 | unsigned long trace_recursion; | ||
1433 | #endif /* CONFIG_TRACING */ | ||
1432 | }; | 1434 | }; |
1433 | 1435 | ||
1434 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ | 1436 | /* Future-safe accessor for struct task_struct's cpus_allowed. */ |
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c index 84a6055f37c9..b421b0ea9112 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c | |||
@@ -1481,6 +1481,40 @@ rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer, | |||
1481 | return event; | 1481 | return event; |
1482 | } | 1482 | } |
1483 | 1483 | ||
1484 | static int trace_irq_level(void) | ||
1485 | { | ||
1486 | return hardirq_count() + softirq_count() + in_nmi(); | ||
1487 | } | ||
1488 | |||
1489 | static int trace_recursive_lock(void) | ||
1490 | { | ||
1491 | int level; | ||
1492 | |||
1493 | level = trace_irq_level(); | ||
1494 | |||
1495 | if (unlikely(current->trace_recursion & (1 << level))) { | ||
1496 | /* Disable all tracing before we do anything else */ | ||
1497 | tracing_off_permanent(); | ||
1498 | WARN_ON_ONCE(1); | ||
1499 | return -1; | ||
1500 | } | ||
1501 | |||
1502 | current->trace_recursion |= 1 << level; | ||
1503 | |||
1504 | return 0; | ||
1505 | } | ||
1506 | |||
1507 | static void trace_recursive_unlock(void) | ||
1508 | { | ||
1509 | int level; | ||
1510 | |||
1511 | level = trace_irq_level(); | ||
1512 | |||
1513 | WARN_ON_ONCE(!current->trace_recursion & (1 << level)); | ||
1514 | |||
1515 | current->trace_recursion &= ~(1 << level); | ||
1516 | } | ||
1517 | |||
1484 | static DEFINE_PER_CPU(int, rb_need_resched); | 1518 | static DEFINE_PER_CPU(int, rb_need_resched); |
1485 | 1519 | ||
1486 | /** | 1520 | /** |
@@ -1514,6 +1548,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
1514 | /* If we are tracing schedule, we don't want to recurse */ | 1548 | /* If we are tracing schedule, we don't want to recurse */ |
1515 | resched = ftrace_preempt_disable(); | 1549 | resched = ftrace_preempt_disable(); |
1516 | 1550 | ||
1551 | if (trace_recursive_lock()) | ||
1552 | goto out_nocheck; | ||
1553 | |||
1517 | cpu = raw_smp_processor_id(); | 1554 | cpu = raw_smp_processor_id(); |
1518 | 1555 | ||
1519 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) | 1556 | if (!cpumask_test_cpu(cpu, buffer->cpumask)) |
@@ -1543,6 +1580,9 @@ ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) | |||
1543 | return event; | 1580 | return event; |
1544 | 1581 | ||
1545 | out: | 1582 | out: |
1583 | trace_recursive_unlock(); | ||
1584 | |||
1585 | out_nocheck: | ||
1546 | ftrace_preempt_enable(resched); | 1586 | ftrace_preempt_enable(resched); |
1547 | return NULL; | 1587 | return NULL; |
1548 | } | 1588 | } |
@@ -1581,6 +1621,8 @@ int ring_buffer_unlock_commit(struct ring_buffer *buffer, | |||
1581 | 1621 | ||
1582 | rb_commit(cpu_buffer, event); | 1622 | rb_commit(cpu_buffer, event); |
1583 | 1623 | ||
1624 | trace_recursive_unlock(); | ||
1625 | |||
1584 | /* | 1626 | /* |
1585 | * Only the last preempt count needs to restore preemption. | 1627 | * Only the last preempt count needs to restore preemption. |
1586 | */ | 1628 | */ |