aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/ftrace.h2
-rw-r--r--kernel/sched.c2
-rw-r--r--kernel/trace/ftrace.c52
3 files changed, 48 insertions, 8 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index dcd6a7c3a435..ca29e03c1fac 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void);
428 428
429extern void ftrace_graph_init_task(struct task_struct *t); 429extern void ftrace_graph_init_task(struct task_struct *t);
430extern void ftrace_graph_exit_task(struct task_struct *t); 430extern void ftrace_graph_exit_task(struct task_struct *t);
431extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
431 432
432static inline int task_curr_ret_stack(struct task_struct *t) 433static inline int task_curr_ret_stack(struct task_struct *t)
433{ 434{
@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void)
451 452
452static inline void ftrace_graph_init_task(struct task_struct *t) { } 453static inline void ftrace_graph_init_task(struct task_struct *t) { }
453static inline void ftrace_graph_exit_task(struct task_struct *t) { } 454static inline void ftrace_graph_exit_task(struct task_struct *t) { }
455static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
454 456
455static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, 457static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
456 trace_func_graph_ent_t entryfunc) 458 trace_func_graph_ent_t entryfunc)
diff --git a/kernel/sched.c b/kernel/sched.c
index 18d38e4ec7ba..fbe86cb04b61 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5571,7 +5571,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
5571 * The idle tasks have their own, simple scheduling class: 5571 * The idle tasks have their own, simple scheduling class:
5572 */ 5572 */
5573 idle->sched_class = &idle_sched_class; 5573 idle->sched_class = &idle_sched_class;
5574 ftrace_graph_init_task(idle); 5574 ftrace_graph_init_idle_task(idle, cpu);
5575} 5575}
5576 5576
5577/* 5577/*
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f3dadae83883..888b611897d3 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void)
3328 /* The cpu_boot init_task->ret_stack will never be freed */ 3328 /* The cpu_boot init_task->ret_stack will never be freed */
3329 for_each_online_cpu(cpu) { 3329 for_each_online_cpu(cpu) {
3330 if (!idle_task(cpu)->ret_stack) 3330 if (!idle_task(cpu)->ret_stack)
3331 ftrace_graph_init_task(idle_task(cpu)); 3331 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3332 } 3332 }
3333 3333
3334 do { 3334 do {
@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void)
3418 mutex_unlock(&ftrace_lock); 3418 mutex_unlock(&ftrace_lock);
3419} 3419}
3420 3420
3421static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3422
3423static void
3424graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3425{
3426 atomic_set(&t->tracing_graph_pause, 0);
3427 atomic_set(&t->trace_overrun, 0);
3428 t->ftrace_timestamp = 0;
3429 /* make curr_ret_stack visable before we add the ret_stack */
3430 smp_wmb();
3431 t->ret_stack = ret_stack;
3432}
3433
3434/*
3435 * Allocate a return stack for the idle task. May be the first
3436 * time through, or it may be done by CPU hotplug online.
3437 */
3438void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3439{
3440 t->curr_ret_stack = -1;
3441 /*
3442 * The idle task has no parent, it either has its own
3443 * stack or no stack at all.
3444 */
3445 if (t->ret_stack)
3446 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3447
3448 if (ftrace_graph_active) {
3449 struct ftrace_ret_stack *ret_stack;
3450
3451 ret_stack = per_cpu(idle_ret_stack, cpu);
3452 if (!ret_stack) {
3453 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3454 * sizeof(struct ftrace_ret_stack),
3455 GFP_KERNEL);
3456 if (!ret_stack)
3457 return;
3458 per_cpu(idle_ret_stack, cpu) = ret_stack;
3459 }
3460 graph_init_task(t, ret_stack);
3461 }
3462}
3463
3421/* Allocate a return stack for newly created task */ 3464/* Allocate a return stack for newly created task */
3422void ftrace_graph_init_task(struct task_struct *t) 3465void ftrace_graph_init_task(struct task_struct *t)
3423{ 3466{
@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t)
3433 GFP_KERNEL); 3476 GFP_KERNEL);
3434 if (!ret_stack) 3477 if (!ret_stack)
3435 return; 3478 return;
3436 atomic_set(&t->tracing_graph_pause, 0); 3479 graph_init_task(t, ret_stack);
3437 atomic_set(&t->trace_overrun, 0);
3438 t->ftrace_timestamp = 0;
3439 /* make curr_ret_stack visable before we add the ret_stack */
3440 smp_wmb();
3441 t->ret_stack = ret_stack;
3442 } 3480 }
3443} 3481}
3444 3482