diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2009-03-22 00:04:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-22 09:06:40 -0400 |
commit | cf586b61f80229491127d3c57c06ed93c9f530d3 (patch) | |
tree | 3f5e0d9429f32b8ad4f53104a260c8dff349cd9b /kernel/trace | |
parent | ac199db0189c091f2863312061c0575937f68810 (diff) |
tracing/function-graph-tracer: prevent hangs during self-tests
Impact: detect tracing related hangs
Sometimes, with some configs, the function graph tracer can make
the timer interrupt too much slow, hanging the kernel in an endless
loop of timer interrupts servicing.
As suggested by Ingo, this patch brings a watchdog which stops the
selftest after a defined number of functions traced, definitely
disabling this tracer.
For those who want to debug the cause of the function graph trace
hang, you can pass the ftrace_dump_on_oops kernel parameter to dump
the traces after this hang detection.
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Steven Rostedt <rostedt@goodmis.org>
LKML-Reference: <1237694675-23509-1-git-send-email-fweisbec@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/trace.c | 26 | ||||
-rw-r--r-- | kernel/trace/trace_selftest.c | 38 |
2 files changed, 60 insertions, 4 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index e3dfefe69348..e6fac0ffe6f0 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -4018,11 +4018,12 @@ trace_printk_seq(struct trace_seq *s) | |||
4018 | trace_seq_init(s); | 4018 | trace_seq_init(s); |
4019 | } | 4019 | } |
4020 | 4020 | ||
4021 | void ftrace_dump(void) | 4021 | static void __ftrace_dump(bool disable_tracing) |
4022 | { | 4022 | { |
4023 | static DEFINE_SPINLOCK(ftrace_dump_lock); | 4023 | static DEFINE_SPINLOCK(ftrace_dump_lock); |
4024 | /* use static because iter can be a bit big for the stack */ | 4024 | /* use static because iter can be a bit big for the stack */ |
4025 | static struct trace_iterator iter; | 4025 | static struct trace_iterator iter; |
4026 | unsigned int old_userobj; | ||
4026 | static int dump_ran; | 4027 | static int dump_ran; |
4027 | unsigned long flags; | 4028 | unsigned long flags; |
4028 | int cnt = 0, cpu; | 4029 | int cnt = 0, cpu; |
@@ -4034,14 +4035,17 @@ void ftrace_dump(void) | |||
4034 | 4035 | ||
4035 | dump_ran = 1; | 4036 | dump_ran = 1; |
4036 | 4037 | ||
4037 | /* No turning back! */ | ||
4038 | tracing_off(); | 4038 | tracing_off(); |
4039 | ftrace_kill(); | 4039 | |
4040 | if (disable_tracing) | ||
4041 | ftrace_kill(); | ||
4040 | 4042 | ||
4041 | for_each_tracing_cpu(cpu) { | 4043 | for_each_tracing_cpu(cpu) { |
4042 | atomic_inc(&global_trace.data[cpu]->disabled); | 4044 | atomic_inc(&global_trace.data[cpu]->disabled); |
4043 | } | 4045 | } |
4044 | 4046 | ||
4047 | old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ; | ||
4048 | |||
4045 | /* don't look at user memory in panic mode */ | 4049 | /* don't look at user memory in panic mode */ |
4046 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; | 4050 | trace_flags &= ~TRACE_ITER_SYM_USEROBJ; |
4047 | 4051 | ||
@@ -4086,10 +4090,26 @@ void ftrace_dump(void) | |||
4086 | else | 4090 | else |
4087 | printk(KERN_TRACE "---------------------------------\n"); | 4091 | printk(KERN_TRACE "---------------------------------\n"); |
4088 | 4092 | ||
4093 | /* Re-enable tracing if requested */ | ||
4094 | if (!disable_tracing) { | ||
4095 | trace_flags |= old_userobj; | ||
4096 | |||
4097 | for_each_tracing_cpu(cpu) { | ||
4098 | atomic_dec(&global_trace.data[cpu]->disabled); | ||
4099 | } | ||
4100 | tracing_on(); | ||
4101 | } | ||
4102 | |||
4089 | out: | 4103 | out: |
4090 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); | 4104 | spin_unlock_irqrestore(&ftrace_dump_lock, flags); |
4091 | } | 4105 | } |
4092 | 4106 | ||
4107 | /* By default: disable tracing after the dump */ | ||
4108 | void ftrace_dump(void) | ||
4109 | { | ||
4110 | __ftrace_dump(true); | ||
4111 | } | ||
4112 | |||
4093 | __init static int tracer_alloc_buffers(void) | 4113 | __init static int tracer_alloc_buffers(void) |
4094 | { | 4114 | { |
4095 | struct trace_array_cpu *data; | 4115 | struct trace_array_cpu *data; |
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 38856ba78a92..b56dcf7d3566 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c | |||
@@ -248,6 +248,28 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) | |||
248 | 248 | ||
249 | 249 | ||
250 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | 250 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER |
251 | |||
252 | /* Maximum number of functions to trace before diagnosing a hang */ | ||
253 | #define GRAPH_MAX_FUNC_TEST 100000000 | ||
254 | |||
255 | static void __ftrace_dump(bool disable_tracing); | ||
256 | static unsigned int graph_hang_thresh; | ||
257 | |||
258 | /* Wrap the real function entry probe to avoid possible hanging */ | ||
259 | static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) | ||
260 | { | ||
261 | /* This is harmlessly racy, we want to approximately detect a hang */ | ||
262 | if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { | ||
263 | ftrace_graph_stop(); | ||
264 | printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); | ||
265 | if (ftrace_dump_on_oops) | ||
266 | __ftrace_dump(false); | ||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | return trace_graph_entry(trace); | ||
271 | } | ||
272 | |||
251 | /* | 273 | /* |
252 | * Pretty much the same than for the function tracer from which the selftest | 274 | * Pretty much the same than for the function tracer from which the selftest |
253 | * has been borrowed. | 275 | * has been borrowed. |
@@ -259,15 +281,29 @@ trace_selftest_startup_function_graph(struct tracer *trace, | |||
259 | int ret; | 281 | int ret; |
260 | unsigned long count; | 282 | unsigned long count; |
261 | 283 | ||
262 | ret = tracer_init(trace, tr); | 284 | /* |
285 | * Simulate the init() callback but we attach a watchdog callback | ||
286 | * to detect and recover from possible hangs | ||
287 | */ | ||
288 | tracing_reset_online_cpus(tr); | ||
289 | ret = register_ftrace_graph(&trace_graph_return, | ||
290 | &trace_graph_entry_watchdog); | ||
263 | if (ret) { | 291 | if (ret) { |
264 | warn_failed_init_tracer(trace, ret); | 292 | warn_failed_init_tracer(trace, ret); |
265 | goto out; | 293 | goto out; |
266 | } | 294 | } |
295 | tracing_start_cmdline_record(); | ||
267 | 296 | ||
268 | /* Sleep for a 1/10 of a second */ | 297 | /* Sleep for a 1/10 of a second */ |
269 | msleep(100); | 298 | msleep(100); |
270 | 299 | ||
300 | /* Have we just recovered from a hang? */ | ||
301 | if (graph_hang_thresh > GRAPH_MAX_FUNC_TEST) { | ||
302 | trace->reset(tr); | ||
303 | ret = -1; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
271 | tracing_stop(); | 307 | tracing_stop(); |
272 | 308 | ||
273 | /* check the trace buffer */ | 309 | /* check the trace buffer */ |