aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/trace/ftrace.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/trace/ftrace.c')
-rw-r--r--kernel/trace/ftrace.c57
1 files changed, 47 insertions, 10 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f3dadae83883..ee24fa1935ac 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1268,7 +1268,7 @@ static int ftrace_update_code(struct module *mod)
1268 p->flags = 0L; 1268 p->flags = 0L;
1269 1269
1270 /* 1270 /*
1271 * Do the initial record convertion from mcount jump 1271 * Do the initial record conversion from mcount jump
1272 * to the NOP instructions. 1272 * to the NOP instructions.
1273 */ 1273 */
1274 if (!ftrace_code_disable(mod, p)) { 1274 if (!ftrace_code_disable(mod, p)) {
@@ -1467,7 +1467,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
1467 return t_hash_next(m, pos); 1467 return t_hash_next(m, pos);
1468 1468
1469 (*pos)++; 1469 (*pos)++;
1470 iter->pos = *pos; 1470 iter->pos = iter->func_pos = *pos;
1471 1471
1472 if (iter->flags & FTRACE_ITER_PRINTALL) 1472 if (iter->flags & FTRACE_ITER_PRINTALL)
1473 return t_hash_start(m, pos); 1473 return t_hash_start(m, pos);
@@ -1502,7 +1502,6 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
1502 if (!rec) 1502 if (!rec)
1503 return t_hash_start(m, pos); 1503 return t_hash_start(m, pos);
1504 1504
1505 iter->func_pos = *pos;
1506 iter->func = rec; 1505 iter->func = rec;
1507 1506
1508 return iter; 1507 return iter;
@@ -3328,7 +3327,7 @@ static int start_graph_tracing(void)
3328 /* The cpu_boot init_task->ret_stack will never be freed */ 3327 /* The cpu_boot init_task->ret_stack will never be freed */
3329 for_each_online_cpu(cpu) { 3328 for_each_online_cpu(cpu) {
3330 if (!idle_task(cpu)->ret_stack) 3329 if (!idle_task(cpu)->ret_stack)
3331 ftrace_graph_init_task(idle_task(cpu)); 3330 ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3332 } 3331 }
3333 3332
3334 do { 3333 do {
@@ -3418,6 +3417,49 @@ void unregister_ftrace_graph(void)
3418 mutex_unlock(&ftrace_lock); 3417 mutex_unlock(&ftrace_lock);
3419} 3418}
3420 3419
3420static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3421
3422static void
3423graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3424{
3425 atomic_set(&t->tracing_graph_pause, 0);
3426 atomic_set(&t->trace_overrun, 0);
3427 t->ftrace_timestamp = 0;
3428 /* make curr_ret_stack visible before we add the ret_stack */
3429 smp_wmb();
3430 t->ret_stack = ret_stack;
3431}
3432
3433/*
3434 * Allocate a return stack for the idle task. May be the first
3435 * time through, or it may be done by CPU hotplug online.
3436 */
3437void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3438{
3439 t->curr_ret_stack = -1;
3440 /*
3441 * The idle task has no parent, it either has its own
3442 * stack or no stack at all.
3443 */
3444 if (t->ret_stack)
3445 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3446
3447 if (ftrace_graph_active) {
3448 struct ftrace_ret_stack *ret_stack;
3449
3450 ret_stack = per_cpu(idle_ret_stack, cpu);
3451 if (!ret_stack) {
3452 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3453 * sizeof(struct ftrace_ret_stack),
3454 GFP_KERNEL);
3455 if (!ret_stack)
3456 return;
3457 per_cpu(idle_ret_stack, cpu) = ret_stack;
3458 }
3459 graph_init_task(t, ret_stack);
3460 }
3461}
3462
3421/* Allocate a return stack for newly created task */ 3463/* Allocate a return stack for newly created task */
3422void ftrace_graph_init_task(struct task_struct *t) 3464void ftrace_graph_init_task(struct task_struct *t)
3423{ 3465{
@@ -3433,12 +3475,7 @@ void ftrace_graph_init_task(struct task_struct *t)
3433 GFP_KERNEL); 3475 GFP_KERNEL);
3434 if (!ret_stack) 3476 if (!ret_stack)
3435 return; 3477 return;
3436 atomic_set(&t->tracing_graph_pause, 0); 3478 graph_init_task(t, ret_stack);
3437 atomic_set(&t->trace_overrun, 0);
3438 t->ftrace_timestamp = 0;
3439 /* make curr_ret_stack visable before we add the ret_stack */
3440 smp_wmb();
3441 t->ret_stack = ret_stack;
3442 } 3479 }
3443} 3480}
3444 3481