diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 22:58:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 22:58:10 -0400 |
commit | 991ec02cdca33b03a132a0cacfe6f0aa0be9aa8d (patch) | |
tree | 50b5f4e2b9f138da57f76eca44fdcc80a2fcd428 /kernel/trace | |
parent | 862366118026a358882eefc70238dbcc3db37aac (diff) | |
parent | 84047e360af0394ac5861d433f26bbcf30f77dd1 (diff) |
Merge branch 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
function-graph: always initialize task ret_stack
function-graph: move initialization of new tasks up in fork
function-graph: add memory barriers for accessing task's ret_stack
function-graph: enable the stack after initialization of other variables
function-graph: only allocate init tasks if it was not already done
Manually fix trivial conflict in kernel/trace/ftrace.c
Diffstat (limited to 'kernel/trace')
-rw-r--r-- | kernel/trace/ftrace.c | 29 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 |
2 files changed, 25 insertions, 10 deletions
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d6973dfadb36..bb60732ade0c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3218,12 +3218,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | |||
3218 | } | 3218 | } |
3219 | 3219 | ||
3220 | if (t->ret_stack == NULL) { | 3220 | if (t->ret_stack == NULL) { |
3221 | t->curr_ret_stack = -1; | ||
3222 | /* Make sure IRQs see the -1 first: */ | ||
3223 | barrier(); | ||
3224 | t->ret_stack = ret_stack_list[start++]; | ||
3225 | atomic_set(&t->tracing_graph_pause, 0); | 3221 | atomic_set(&t->tracing_graph_pause, 0); |
3226 | atomic_set(&t->trace_overrun, 0); | 3222 | atomic_set(&t->trace_overrun, 0); |
3223 | t->curr_ret_stack = -1; | ||
3224 | /* Make sure the tasks see the -1 first: */ | ||
3225 | smp_wmb(); | ||
3226 | t->ret_stack = ret_stack_list[start++]; | ||
3227 | } | 3227 | } |
3228 | } while_each_thread(g, t); | 3228 | } while_each_thread(g, t); |
3229 | 3229 | ||
@@ -3281,8 +3281,10 @@ static int start_graph_tracing(void) | |||
3281 | return -ENOMEM; | 3281 | return -ENOMEM; |
3282 | 3282 | ||
3283 | /* The cpu_boot init_task->ret_stack will never be freed */ | 3283 | /* The cpu_boot init_task->ret_stack will never be freed */ |
3284 | for_each_online_cpu(cpu) | 3284 | for_each_online_cpu(cpu) { |
3285 | ftrace_graph_init_task(idle_task(cpu)); | 3285 | if (!idle_task(cpu)->ret_stack) |
3286 | ftrace_graph_init_task(idle_task(cpu)); | ||
3287 | } | ||
3286 | 3288 | ||
3287 | do { | 3289 | do { |
3288 | ret = alloc_retstack_tasklist(ret_stack_list); | 3290 | ret = alloc_retstack_tasklist(ret_stack_list); |
@@ -3374,18 +3376,25 @@ void unregister_ftrace_graph(void) | |||
3374 | /* Allocate a return stack for newly created task */ | 3376 | /* Allocate a return stack for newly created task */ |
3375 | void ftrace_graph_init_task(struct task_struct *t) | 3377 | void ftrace_graph_init_task(struct task_struct *t) |
3376 | { | 3378 | { |
3379 | /* Make sure we do not use the parent ret_stack */ | ||
3380 | t->ret_stack = NULL; | ||
3381 | |||
3377 | if (ftrace_graph_active) { | 3382 | if (ftrace_graph_active) { |
3378 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 3383 | struct ftrace_ret_stack *ret_stack; |
3384 | |||
3385 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
3379 | * sizeof(struct ftrace_ret_stack), | 3386 | * sizeof(struct ftrace_ret_stack), |
3380 | GFP_KERNEL); | 3387 | GFP_KERNEL); |
3381 | if (!t->ret_stack) | 3388 | if (!ret_stack) |
3382 | return; | 3389 | return; |
3383 | t->curr_ret_stack = -1; | 3390 | t->curr_ret_stack = -1; |
3384 | atomic_set(&t->tracing_graph_pause, 0); | 3391 | atomic_set(&t->tracing_graph_pause, 0); |
3385 | atomic_set(&t->trace_overrun, 0); | 3392 | atomic_set(&t->trace_overrun, 0); |
3386 | t->ftrace_timestamp = 0; | 3393 | t->ftrace_timestamp = 0; |
3387 | } else | 3394 | /* make curr_ret_stack visable before we add the ret_stack */ |
3388 | t->ret_stack = NULL; | 3395 | smp_wmb(); |
3396 | t->ret_stack = ret_stack; | ||
3397 | } | ||
3389 | } | 3398 | } |
3390 | 3399 | ||
3391 | void ftrace_graph_exit_task(struct task_struct *t) | 3400 | void ftrace_graph_exit_task(struct task_struct *t) |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 10f6ad7d85f6..8b592418d8b2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
65 | if (!current->ret_stack) | 65 | if (!current->ret_stack) |
66 | return -EBUSY; | 66 | return -EBUSY; |
67 | 67 | ||
68 | /* | ||
69 | * We must make sure the ret_stack is tested before we read | ||
70 | * anything else. | ||
71 | */ | ||
72 | smp_rmb(); | ||
73 | |||
68 | /* The return trace stack is full */ | 74 | /* The return trace stack is full */ |
69 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 75 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
70 | atomic_inc(¤t->trace_overrun); | 76 | atomic_inc(¤t->trace_overrun); |