aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c29
1 files changed, 19 insertions, 10 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index d6973dfadb36..bb60732ade0c 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3218,12 +3218,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3218 } 3218 }
3219 3219
3220 if (t->ret_stack == NULL) { 3220 if (t->ret_stack == NULL) {
3221 t->curr_ret_stack = -1;
3222 /* Make sure IRQs see the -1 first: */
3223 barrier();
3224 t->ret_stack = ret_stack_list[start++];
3225 atomic_set(&t->tracing_graph_pause, 0); 3221 atomic_set(&t->tracing_graph_pause, 0);
3226 atomic_set(&t->trace_overrun, 0); 3222 atomic_set(&t->trace_overrun, 0);
3223 t->curr_ret_stack = -1;
3224 /* Make sure the tasks see the -1 first: */
3225 smp_wmb();
3226 t->ret_stack = ret_stack_list[start++];
3227 } 3227 }
3228 } while_each_thread(g, t); 3228 } while_each_thread(g, t);
3229 3229
@@ -3281,8 +3281,10 @@ static int start_graph_tracing(void)
3281 return -ENOMEM; 3281 return -ENOMEM;
3282 3282
3283 /* The cpu_boot init_task->ret_stack will never be freed */ 3283 /* The cpu_boot init_task->ret_stack will never be freed */
3284 for_each_online_cpu(cpu) 3284 for_each_online_cpu(cpu) {
3285 ftrace_graph_init_task(idle_task(cpu)); 3285 if (!idle_task(cpu)->ret_stack)
3286 ftrace_graph_init_task(idle_task(cpu));
3287 }
3286 3288
3287 do { 3289 do {
3288 ret = alloc_retstack_tasklist(ret_stack_list); 3290 ret = alloc_retstack_tasklist(ret_stack_list);
@@ -3374,18 +3376,25 @@ void unregister_ftrace_graph(void)
3374/* Allocate a return stack for newly created task */ 3376/* Allocate a return stack for newly created task */
3375void ftrace_graph_init_task(struct task_struct *t) 3377void ftrace_graph_init_task(struct task_struct *t)
3376{ 3378{
3379 /* Make sure we do not use the parent ret_stack */
3380 t->ret_stack = NULL;
3381
3377 if (ftrace_graph_active) { 3382 if (ftrace_graph_active) {
3378 t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH 3383 struct ftrace_ret_stack *ret_stack;
3384
3385 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3379 * sizeof(struct ftrace_ret_stack), 3386 * sizeof(struct ftrace_ret_stack),
3380 GFP_KERNEL); 3387 GFP_KERNEL);
3381 if (!t->ret_stack) 3388 if (!ret_stack)
3382 return; 3389 return;
3383 t->curr_ret_stack = -1; 3390 t->curr_ret_stack = -1;
3384 atomic_set(&t->tracing_graph_pause, 0); 3391 atomic_set(&t->tracing_graph_pause, 0);
3385 atomic_set(&t->trace_overrun, 0); 3392 atomic_set(&t->trace_overrun, 0);
3386 t->ftrace_timestamp = 0; 3393 t->ftrace_timestamp = 0;
3387 } else 3394 /* make curr_ret_stack visable before we add the ret_stack */
3388 t->ret_stack = NULL; 3395 smp_wmb();
3396 t->ret_stack = ret_stack;
3397 }
3389} 3398}
3390 3399
3391void ftrace_graph_exit_task(struct task_struct *t) 3400void ftrace_graph_exit_task(struct task_struct *t)