diff options
author | Ingo Molnar <mingo@elte.hu> | 2011-02-14 09:15:16 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-02-14 09:15:16 -0500 |
commit | bf1af3a809506645b9130755b713b008da14737f (patch) | |
tree | 6a13f9c622fa8322a6ca2795bd7bb4e9f8fd6160 | |
parent | 0de4b34d466bae571b50f41c7296b85248205e35 (diff) | |
parent | 868baf07b1a259f5f3803c1dc2777b6c358f83cf (diff) |
Merge branch 'tip/perf/urgent' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt/linux-2.6-trace into perf/core
-rw-r--r-- | include/linux/ftrace.h | 2 | ||||
-rw-r--r-- | kernel/sched.c | 2 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 52 |
3 files changed, 48 insertions, 8 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index dcd6a7c3a435..ca29e03c1fac 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h | |||
@@ -428,6 +428,7 @@ extern void unregister_ftrace_graph(void); | |||
428 | 428 | ||
429 | extern void ftrace_graph_init_task(struct task_struct *t); | 429 | extern void ftrace_graph_init_task(struct task_struct *t); |
430 | extern void ftrace_graph_exit_task(struct task_struct *t); | 430 | extern void ftrace_graph_exit_task(struct task_struct *t); |
431 | extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu); | ||
431 | 432 | ||
432 | static inline int task_curr_ret_stack(struct task_struct *t) | 433 | static inline int task_curr_ret_stack(struct task_struct *t) |
433 | { | 434 | { |
@@ -451,6 +452,7 @@ static inline void unpause_graph_tracing(void) | |||
451 | 452 | ||
452 | static inline void ftrace_graph_init_task(struct task_struct *t) { } | 453 | static inline void ftrace_graph_init_task(struct task_struct *t) { } |
453 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } | 454 | static inline void ftrace_graph_exit_task(struct task_struct *t) { } |
455 | static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { } | ||
454 | 456 | ||
455 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, | 457 | static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc, |
456 | trace_func_graph_ent_t entryfunc) | 458 | trace_func_graph_ent_t entryfunc) |
diff --git a/kernel/sched.c b/kernel/sched.c index 31cb5d5e1aac..e142e92f38da 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -5550,7 +5550,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) | |||
5550 | * The idle tasks have their own, simple scheduling class: | 5550 | * The idle tasks have their own, simple scheduling class: |
5551 | */ | 5551 | */ |
5552 | idle->sched_class = &idle_sched_class; | 5552 | idle->sched_class = &idle_sched_class; |
5553 | ftrace_graph_init_task(idle); | 5553 | ftrace_graph_init_idle_task(idle, cpu); |
5554 | } | 5554 | } |
5555 | 5555 | ||
5556 | /* | 5556 | /* |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index f3dadae83883..888b611897d3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3328,7 +3328,7 @@ static int start_graph_tracing(void) | |||
3328 | /* The cpu_boot init_task->ret_stack will never be freed */ | 3328 | /* The cpu_boot init_task->ret_stack will never be freed */ |
3329 | for_each_online_cpu(cpu) { | 3329 | for_each_online_cpu(cpu) { |
3330 | if (!idle_task(cpu)->ret_stack) | 3330 | if (!idle_task(cpu)->ret_stack) |
3331 | ftrace_graph_init_task(idle_task(cpu)); | 3331 | ftrace_graph_init_idle_task(idle_task(cpu), cpu); |
3332 | } | 3332 | } |
3333 | 3333 | ||
3334 | do { | 3334 | do { |
@@ -3418,6 +3418,49 @@ void unregister_ftrace_graph(void) | |||
3418 | mutex_unlock(&ftrace_lock); | 3418 | mutex_unlock(&ftrace_lock); |
3419 | } | 3419 | } |
3420 | 3420 | ||
3421 | static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack); | ||
3422 | |||
3423 | static void | ||
3424 | graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack) | ||
3425 | { | ||
3426 | atomic_set(&t->tracing_graph_pause, 0); | ||
3427 | atomic_set(&t->trace_overrun, 0); | ||
3428 | t->ftrace_timestamp = 0; | ||
3429 | /* make curr_ret_stack visable before we add the ret_stack */ | ||
3430 | smp_wmb(); | ||
3431 | t->ret_stack = ret_stack; | ||
3432 | } | ||
3433 | |||
3434 | /* | ||
3435 | * Allocate a return stack for the idle task. May be the first | ||
3436 | * time through, or it may be done by CPU hotplug online. | ||
3437 | */ | ||
3438 | void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) | ||
3439 | { | ||
3440 | t->curr_ret_stack = -1; | ||
3441 | /* | ||
3442 | * The idle task has no parent, it either has its own | ||
3443 | * stack or no stack at all. | ||
3444 | */ | ||
3445 | if (t->ret_stack) | ||
3446 | WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu)); | ||
3447 | |||
3448 | if (ftrace_graph_active) { | ||
3449 | struct ftrace_ret_stack *ret_stack; | ||
3450 | |||
3451 | ret_stack = per_cpu(idle_ret_stack, cpu); | ||
3452 | if (!ret_stack) { | ||
3453 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
3454 | * sizeof(struct ftrace_ret_stack), | ||
3455 | GFP_KERNEL); | ||
3456 | if (!ret_stack) | ||
3457 | return; | ||
3458 | per_cpu(idle_ret_stack, cpu) = ret_stack; | ||
3459 | } | ||
3460 | graph_init_task(t, ret_stack); | ||
3461 | } | ||
3462 | } | ||
3463 | |||
3421 | /* Allocate a return stack for newly created task */ | 3464 | /* Allocate a return stack for newly created task */ |
3422 | void ftrace_graph_init_task(struct task_struct *t) | 3465 | void ftrace_graph_init_task(struct task_struct *t) |
3423 | { | 3466 | { |
@@ -3433,12 +3476,7 @@ void ftrace_graph_init_task(struct task_struct *t) | |||
3433 | GFP_KERNEL); | 3476 | GFP_KERNEL); |
3434 | if (!ret_stack) | 3477 | if (!ret_stack) |
3435 | return; | 3478 | return; |
3436 | atomic_set(&t->tracing_graph_pause, 0); | 3479 | graph_init_task(t, ret_stack); |
3437 | atomic_set(&t->trace_overrun, 0); | ||
3438 | t->ftrace_timestamp = 0; | ||
3439 | /* make curr_ret_stack visable before we add the ret_stack */ | ||
3440 | smp_wmb(); | ||
3441 | t->ret_stack = ret_stack; | ||
3442 | } | 3480 | } |
3443 | } | 3481 | } |
3444 | 3482 | ||