aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2008-12-05 21:43:41 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-08 09:11:45 -0500
commit380c4b1411ccd6885f92b2c8ceb08433a720f44e (patch)
treef3b571e1caff2de3e6b4cace87c2b1ec332667d7 /kernel/trace/ftrace.c
parent8e1b82e0866befaa0b2920be296c6e4c3fc7f422 (diff)
tracing/function-graph-tracer: append the tracing_graph_flag
Impact: Provide a way to pause the function graph tracer As suggested by Steven Rostedt, the previous patch that prevented from spinlock function tracing shouldn't use the raw_spinlock to fix it. It's much better to follow lockdep with normal spinlock, so this patch adds a new flag for each task to make the function graph tracer able to be paused. We also can send an ftrace_printk whithout worrying of the irrelevant traced spinlock during insertion. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c2
1 files changed, 2 insertions, 0 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 2971fe48f55e..a12f80efceaa 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1998,6 +1998,7 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
1998 /* Make sure IRQs see the -1 first: */ 1998 /* Make sure IRQs see the -1 first: */
1999 barrier(); 1999 barrier();
2000 t->ret_stack = ret_stack_list[start++]; 2000 t->ret_stack = ret_stack_list[start++];
2001 atomic_set(&t->tracing_graph_pause, 0);
2001 atomic_set(&t->trace_overrun, 0); 2002 atomic_set(&t->trace_overrun, 0);
2002 } 2003 }
2003 } while_each_thread(g, t); 2004 } while_each_thread(g, t);
@@ -2077,6 +2078,7 @@ void ftrace_graph_init_task(struct task_struct *t)
2077 if (!t->ret_stack) 2078 if (!t->ret_stack)
2078 return; 2079 return;
2079 t->curr_ret_stack = -1; 2080 t->curr_ret_stack = -1;
2081 atomic_set(&t->tracing_graph_pause, 0);
2080 atomic_set(&t->trace_overrun, 0); 2082 atomic_set(&t->trace_overrun, 0);
2081 } else 2083 } else
2082 t->ret_stack = NULL; 2084 t->ret_stack = NULL;