diff options
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r-- | kernel/trace/ftrace.c | 52 |
1 files changed, 45 insertions, 7 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f3dadae83883..888b611897d3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void) | |||
3328 | /* The cpu_boot init_task->ret_stack will never be freed */ | 3328 | /* The cpu_boot init_task->ret_stack will never be freed */ |
3329 | for_each_online_cpu(cpu) { | 3329 | for_each_online_cpu(cpu) { |
3330 | if (!idle_task(cpu)->ret_stack) | 3330 | if (!idle_task(cpu)->ret_stack) |
3331 | ftrace_graph_init_task(idle_task(cpu)); | 3331 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); |
3332 | } | 3332 | } |
3333 | 3333 | ||
3334 | do { | 3334 | do { |
@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void) | |||
3418 | mutex_unlock(&ftrace_lock); | 3418 | mutex_unlock(&ftrace_lock); |
3419 | } | 3419 | } |
3420 | 3420 | ||
3421 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); | ||
3422 | |||
3423 | static void | ||
3424 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | ||
3425 | { | ||
3426 | atomic_set(&t->tracing_graph_pause, 0); | ||
3427 | atomic_set(&t->trace_overrun, 0); | ||
3428 | t->ftrace_timestamp = 0; | ||
3429 | /* make curr_ret_stack visable before we add the ret_stack */ | ||
3430 | smp_wmb(); | ||
3431 | t->ret_stack = ret_stack; | ||
3432 | } | ||
3433 | |||
3434 | /* | ||
3435 | * Allocate a return stack for the idle task. May be the first | ||
3436 | * time through, or it may be done by CPU hotplug online. | ||
3437 | */ | ||
3438 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) | ||
3439 | { | ||
3440 | t->curr_ret_stack = -1; | ||
3441 | /* | ||
3442 | * The idle task has no parent, it either has its own | ||
3443 | * stack or no stack at all. | ||
3444 | */ | ||
3445 | if (t->ret_stack) | ||
3446 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); | ||
3447 | |||
3448 | if (ftrace_graph_active) { | ||
3449 | struct ftrace_ret_stack *ret_stack; | ||
3450 | |||
3451 | ret_stack = per_cpu(idle_ret_stack, cpu); | ||
3452 | if (!ret_stack) { | ||
3453 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
3454 | * sizeof(struct ftrace_ret_stack), | ||
3455 | GFP_KERNEL); | ||
3456 | if (!ret_stack) | ||
3457 | return; | ||
3458 | per_cpu(idle_ret_stack, cpu) = ret_stack; | ||
3459 | } | ||
3460 | graph_init_task(t, ret_stack); | ||
3461 | } | ||
3462 | } | ||
3463 | |||
3421 | /* Allocate a return stack for newly created task */ | 3464 | /* Allocate a return stack for newly created task */ |
3422 | void ftrace_graph_init_task(struct task_struct *t) | 3465 | void ftrace_graph_init_task(struct task_struct *t) |
3423 | { | 3466 | { |
@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t) | |||
3433 | GFP_KERNEL); | 3476 | GFP_KERNEL); |
3434 | if (!ret_stack) | 3477 | if (!ret_stack) |
3435 | return; | 3478 | return; |
3436 | atomic_set(&t->tracing_graph_pause, 0); | 3479 | graph_init_task(t, ret_stack); |
3437 | atomic_set(&t->trace_overrun, 0); | ||
3438 | t->ftrace_timestamp = 0; | ||
3439 | /* make curr_ret_stack visable before we add the ret_stack */ | ||
3440 | smp_wmb(); | ||
3441 | t->ret_stack = ret_stack; | ||
3442 | } | 3480 | } |
3443 | } | 3481 | } |
3444 | 3482 | ||