aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/trace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2009-03-22 00:04:35 -0400
committerIngo Molnar <mingo@elte.hu>2009-03-22 09:06:40 -0400
commitcf586b61f80229491127d3c57c06ed93c9f530d3 (patch)
tree3f5e0d9429f32b8ad4f53104a260c8dff349cd9b /kernel/trace/trace.c
parentac199db0189c091f2863312061c0575937f68810 (diff)
tracing/function-graph-tracer: prevent hangs during self-tests
Impact: detect tracing related hangs Sometimes, with some configs, the function graph tracer can make the timer interrupt too much slow, hanging the kernel in an endless loop of timer interrupts servicing. As suggested by Ingo, this patch brings a watchdog which stops the selftest after a defined number of functions traced, definitely disabling this tracer. For those who want to debug the cause of the function graph trace hang, you can pass the ftrace_dump_on_oops kernel parameter to dump the traces after this hang detection. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Steven Rostedt <rostedt@goodmis.org> LKML-Reference: <1237694675-23509-1-git-send-email-fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/trace.c')
-rw-r--r--kernel/trace/trace.c26
1 files changed, 23 insertions, 3 deletions
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index e3dfefe69348..e6fac0ffe6f0 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -4018,11 +4018,12 @@ trace_printk_seq(struct trace_seq *s)
4018 trace_seq_init(s); 4018 trace_seq_init(s);
4019} 4019}
4020 4020
4021void ftrace_dump(void) 4021static void __ftrace_dump(bool disable_tracing)
4022{ 4022{
4023 static DEFINE_SPINLOCK(ftrace_dump_lock); 4023 static DEFINE_SPINLOCK(ftrace_dump_lock);
4024 /* use static because iter can be a bit big for the stack */ 4024 /* use static because iter can be a bit big for the stack */
4025 static struct trace_iterator iter; 4025 static struct trace_iterator iter;
4026 unsigned int old_userobj;
4026 static int dump_ran; 4027 static int dump_ran;
4027 unsigned long flags; 4028 unsigned long flags;
4028 int cnt = 0, cpu; 4029 int cnt = 0, cpu;
@@ -4034,14 +4035,17 @@ void ftrace_dump(void)
4034 4035
4035 dump_ran = 1; 4036 dump_ran = 1;
4036 4037
4037 /* No turning back! */
4038 tracing_off(); 4038 tracing_off();
4039 ftrace_kill(); 4039
4040 if (disable_tracing)
4041 ftrace_kill();
4040 4042
4041 for_each_tracing_cpu(cpu) { 4043 for_each_tracing_cpu(cpu) {
4042 atomic_inc(&global_trace.data[cpu]->disabled); 4044 atomic_inc(&global_trace.data[cpu]->disabled);
4043 } 4045 }
4044 4046
4047 old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4048
4045 /* don't look at user memory in panic mode */ 4049 /* don't look at user memory in panic mode */
4046 trace_flags &= ~TRACE_ITER_SYM_USEROBJ; 4050 trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4047 4051
@@ -4086,10 +4090,26 @@ void ftrace_dump(void)
4086 else 4090 else
4087 printk(KERN_TRACE "---------------------------------\n"); 4091 printk(KERN_TRACE "---------------------------------\n");
4088 4092
4093 /* Re-enable tracing if requested */
4094 if (!disable_tracing) {
4095 trace_flags |= old_userobj;
4096
4097 for_each_tracing_cpu(cpu) {
4098 atomic_dec(&global_trace.data[cpu]->disabled);
4099 }
4100 tracing_on();
4101 }
4102
4089 out: 4103 out:
4090 spin_unlock_irqrestore(&ftrace_dump_lock, flags); 4104 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
4091} 4105}
4092 4106
4107/* By default: disable tracing after the dump */
4108void ftrace_dump(void)
4109{
4110 __ftrace_dump(true);
4111}
4112
4093__init static int tracer_alloc_buffers(void) 4113__init static int tracer_alloc_buffers(void)
4094{ 4114{
4095 struct trace_array_cpu *data; 4115 struct trace_array_cpu *data;