diff options
author | Steven Rostedt <srostedt@redhat.com> | 2009-06-02 12:26:07 -0400 |
---|---|---|
committer | Steven Rostedt <rostedt@goodmis.org> | 2009-06-02 14:41:50 -0400 |
commit | 82310a3272d5a2a7652f5649ad8a55f58c8f74d9 (patch) | |
tree | c909c7a3f1fd3430f64be1d4aa8aff84fdf45b04 | |
parent | 179c498ae2998461fe436437a74dc29036fc7dcc (diff) |
function-graph: enable the stack after initialization of other variables
The function graph tracer checks if the task_struct has ret_stack defined
to know if it is OK or not to use it. The initialization is done for
all tasks by one process, but the idle tasks use the same initialization
used by new tasks.
If an interrupt happens on an idle task that just had the ret_stack
created, but before the rest of the initialization took place, then
we can corrupt the return address of the functions.
This patch moves the setting of the task_struct's ret_stack to after
the other variables have been initialized.
[ Impact: prevent kernel panic on idle task when starting function graph ]
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
-rw-r--r-- | kernel/trace/ftrace.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 |
2 files changed, 13 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ebff62ef40be..20e066065eb3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2739,15 +2739,20 @@ void unregister_ftrace_graph(void) | |||
2739 | void ftrace_graph_init_task(struct task_struct *t) | 2739 | void ftrace_graph_init_task(struct task_struct *t) |
2740 | { | 2740 | { |
2741 | if (atomic_read(&ftrace_graph_active)) { | 2741 | if (atomic_read(&ftrace_graph_active)) { |
2742 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 2742 | struct ftrace_ret_stack *ret_stack; |
2743 | |||
2744 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
2743 | * sizeof(struct ftrace_ret_stack), | 2745 | * sizeof(struct ftrace_ret_stack), |
2744 | GFP_KERNEL); | 2746 | GFP_KERNEL); |
2745 | if (!t->ret_stack) | 2747 | if (!ret_stack) |
2746 | return; | 2748 | return; |
2747 | t->curr_ret_stack = -1; | 2749 | t->curr_ret_stack = -1; |
2748 | atomic_set(&t->tracing_graph_pause, 0); | 2750 | atomic_set(&t->tracing_graph_pause, 0); |
2749 | atomic_set(&t->trace_overrun, 0); | 2751 | atomic_set(&t->trace_overrun, 0); |
2750 | t->ftrace_timestamp = 0; | 2752 | t->ftrace_timestamp = 0; |
2753 | /* make curr_ret_stack visable before we add the ret_stack */ | ||
2754 | smp_wmb(); | ||
2755 | t->ret_stack = ret_stack; | ||
2751 | } else | 2756 | } else |
2752 | t->ret_stack = NULL; | 2757 | t->ret_stack = NULL; |
2753 | } | 2758 | } |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d28687e7b3a7..baeb5fe36108 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
65 | if (!current->ret_stack) | 65 | if (!current->ret_stack) |
66 | return -EBUSY; | 66 | return -EBUSY; |
67 | 67 | ||
68 | /* | ||
69 | * We must make sure the ret_stack is tested before we read | ||
70 | * anything else. | ||
71 | */ | ||
72 | smp_rmb(); | ||
73 | |||
68 | /* The return trace stack is full */ | 74 | /* The return trace stack is full */ |
69 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 75 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
70 | atomic_inc(¤t->trace_overrun); | 76 | atomic_inc(¤t->trace_overrun); |