diff options
-rw-r--r-- | kernel/trace/ftrace.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 |
2 files changed, 13 insertions, 2 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index ebff62ef40be..20e066065eb3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -2739,15 +2739,20 @@ void unregister_ftrace_graph(void) | |||
2739 | void ftrace_graph_init_task(struct task_struct *t) | 2739 | void ftrace_graph_init_task(struct task_struct *t) |
2740 | { | 2740 | { |
2741 | if (atomic_read(&ftrace_graph_active)) { | 2741 | if (atomic_read(&ftrace_graph_active)) { |
2742 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 2742 | struct ftrace_ret_stack *ret_stack; |
2743 | |||
2744 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
2743 | * sizeof(struct ftrace_ret_stack), | 2745 | * sizeof(struct ftrace_ret_stack), |
2744 | GFP_KERNEL); | 2746 | GFP_KERNEL); |
2745 | if (!t->ret_stack) | 2747 | if (!ret_stack) |
2746 | return; | 2748 | return; |
2747 | t->curr_ret_stack = -1; | 2749 | t->curr_ret_stack = -1; |
2748 | atomic_set(&t->tracing_graph_pause, 0); | 2750 | atomic_set(&t->tracing_graph_pause, 0); |
2749 | atomic_set(&t->trace_overrun, 0); | 2751 | atomic_set(&t->trace_overrun, 0); |
2750 | t->ftrace_timestamp = 0; | 2752 | t->ftrace_timestamp = 0; |
2753 | /* make curr_ret_stack visable before we add the ret_stack */ | ||
2754 | smp_wmb(); | ||
2755 | t->ret_stack = ret_stack; | ||
2751 | } else | 2756 | } else |
2752 | t->ret_stack = NULL; | 2757 | t->ret_stack = NULL; |
2753 | } | 2758 | } |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index d28687e7b3a7..baeb5fe36108 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
65 | if (!current->ret_stack) | 65 | if (!current->ret_stack) |
66 | return -EBUSY; | 66 | return -EBUSY; |
67 | 67 | ||
68 | /* | ||
69 | * We must make sure the ret_stack is tested before we read | ||
70 | * anything else. | ||
71 | */ | ||
72 | smp_rmb(); | ||
73 | |||
68 | /* The return trace stack is full */ | 74 | /* The return trace stack is full */ |
69 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 75 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
70 | atomic_inc(¤t->trace_overrun); | 76 | atomic_inc(¤t->trace_overrun); |