aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-06-02 14:01:19 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-06-02 14:42:17 -0400
commit26c01624a2a40f8a4ddf6449b65c9b1c418d0e72 (patch)
treeeeff81aa0fa56ba1f2c180d4ec6e64cb31af898e /kernel/trace
parent82310a3272d5a2a7652f5649ad8a55f58c8f74d9 (diff)
function-graph: add memory barriers for accessing task's ret_stack
The code that handles the tasks ret_stack allocation for every task assumes that only an interrupt can cause issues (even though interrupts are disabled). In reality, the code is allocating the ret_stack for tasks that may be running on other CPUs and there are not efficient memory barriers to handle this case. [ Impact: prevent crash due to using of uninitialized ret_stack variables ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/ftrace.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 20e066065eb3..1664d3f33d38 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2580,12 +2580,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
2580 } 2580 }
2581 2581
2582 if (t->ret_stack == NULL) { 2582 if (t->ret_stack == NULL) {
2583 t->curr_ret_stack = -1;
2584 /* Make sure IRQs see the -1 first: */
2585 barrier();
2586 t->ret_stack = ret_stack_list[start++];
2587 atomic_set(&t->tracing_graph_pause, 0); 2583 atomic_set(&t->tracing_graph_pause, 0);
2588 atomic_set(&t->trace_overrun, 0); 2584 atomic_set(&t->trace_overrun, 0);
2585 t->curr_ret_stack = -1;
2586 /* Make sure the tasks see the -1 first: */
2587 smp_wmb();
2588 t->ret_stack = ret_stack_list[start++];
2589 } 2589 }
2590 } while_each_thread(g, t); 2590 } while_each_thread(g, t);
2591 2591