diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 22:58:10 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-06-10 22:58:10 -0400 |
commit | 991ec02cdca33b03a132a0cacfe6f0aa0be9aa8d (patch) | |
tree | 50b5f4e2b9f138da57f76eca44fdcc80a2fcd428 /kernel | |
parent | 862366118026a358882eefc70238dbcc3db37aac (diff) | |
parent | 84047e360af0394ac5861d433f26bbcf30f77dd1 (diff) |
Merge branch 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'tracing-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
function-graph: always initialize task ret_stack
function-graph: move initialization of new tasks up in fork
function-graph: add memory barriers for accessing task's ret_stack
function-graph: enable the stack after initialization of other variables
function-graph: only allocate init tasks if it was not already done
Manually fix trivial conflict in kernel/trace/ftrace.c
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/fork.c | 10 | ||||
-rw-r--r-- | kernel/trace/ftrace.c | 29 | ||||
-rw-r--r-- | kernel/trace/trace_functions_graph.c | 6 |
3 files changed, 29 insertions, 16 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 5449efbc6427..bb762b4dd217 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -981,6 +981,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
981 | if (!p) | 981 | if (!p) |
982 | goto fork_out; | 982 | goto fork_out; |
983 | 983 | ||
984 | ftrace_graph_init_task(p); | ||
985 | |||
984 | rt_mutex_init_task(p); | 986 | rt_mutex_init_task(p); |
985 | 987 | ||
986 | #ifdef CONFIG_PROVE_LOCKING | 988 | #ifdef CONFIG_PROVE_LOCKING |
@@ -1130,8 +1132,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1130 | } | 1132 | } |
1131 | } | 1133 | } |
1132 | 1134 | ||
1133 | ftrace_graph_init_task(p); | ||
1134 | |||
1135 | p->pid = pid_nr(pid); | 1135 | p->pid = pid_nr(pid); |
1136 | p->tgid = p->pid; | 1136 | p->tgid = p->pid; |
1137 | if (clone_flags & CLONE_THREAD) | 1137 | if (clone_flags & CLONE_THREAD) |
@@ -1140,7 +1140,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1140 | if (current->nsproxy != p->nsproxy) { | 1140 | if (current->nsproxy != p->nsproxy) { |
1141 | retval = ns_cgroup_clone(p, pid); | 1141 | retval = ns_cgroup_clone(p, pid); |
1142 | if (retval) | 1142 | if (retval) |
1143 | goto bad_fork_free_graph; | 1143 | goto bad_fork_free_pid; |
1144 | } | 1144 | } |
1145 | 1145 | ||
1146 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1146 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
@@ -1232,7 +1232,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1232 | spin_unlock(¤t->sighand->siglock); | 1232 | spin_unlock(¤t->sighand->siglock); |
1233 | write_unlock_irq(&tasklist_lock); | 1233 | write_unlock_irq(&tasklist_lock); |
1234 | retval = -ERESTARTNOINTR; | 1234 | retval = -ERESTARTNOINTR; |
1235 | goto bad_fork_free_graph; | 1235 | goto bad_fork_free_pid; |
1236 | } | 1236 | } |
1237 | 1237 | ||
1238 | if (clone_flags & CLONE_THREAD) { | 1238 | if (clone_flags & CLONE_THREAD) { |
@@ -1267,8 +1267,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1267 | cgroup_post_fork(p); | 1267 | cgroup_post_fork(p); |
1268 | return p; | 1268 | return p; |
1269 | 1269 | ||
1270 | bad_fork_free_graph: | ||
1271 | ftrace_graph_exit_task(p); | ||
1272 | bad_fork_free_pid: | 1270 | bad_fork_free_pid: |
1273 | if (pid != &init_struct_pid) | 1271 | if (pid != &init_struct_pid) |
1274 | free_pid(pid); | 1272 | free_pid(pid); |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index d6973dfadb36..bb60732ade0c 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -3218,12 +3218,12 @@ static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list) | |||
3218 | } | 3218 | } |
3219 | 3219 | ||
3220 | if (t->ret_stack == NULL) { | 3220 | if (t->ret_stack == NULL) { |
3221 | t->curr_ret_stack = -1; | ||
3222 | /* Make sure IRQs see the -1 first: */ | ||
3223 | barrier(); | ||
3224 | t->ret_stack = ret_stack_list[start++]; | ||
3225 | atomic_set(&t->tracing_graph_pause, 0); | 3221 | atomic_set(&t->tracing_graph_pause, 0); |
3226 | atomic_set(&t->trace_overrun, 0); | 3222 | atomic_set(&t->trace_overrun, 0); |
3223 | t->curr_ret_stack = -1; | ||
3224 | /* Make sure the tasks see the -1 first: */ | ||
3225 | smp_wmb(); | ||
3226 | t->ret_stack = ret_stack_list[start++]; | ||
3227 | } | 3227 | } |
3228 | } while_each_thread(g, t); | 3228 | } while_each_thread(g, t); |
3229 | 3229 | ||
@@ -3281,8 +3281,10 @@ static int start_graph_tracing(void) | |||
3281 | return -ENOMEM; | 3281 | return -ENOMEM; |
3282 | 3282 | ||
3283 | /* The cpu_boot init_task->ret_stack will never be freed */ | 3283 | /* The cpu_boot init_task->ret_stack will never be freed */ |
3284 | for_each_online_cpu(cpu) | 3284 | for_each_online_cpu(cpu) { |
3285 | ftrace_graph_init_task(idle_task(cpu)); | 3285 | if (!idle_task(cpu)->ret_stack) |
3286 | ftrace_graph_init_task(idle_task(cpu)); | ||
3287 | } | ||
3286 | 3288 | ||
3287 | do { | 3289 | do { |
3288 | ret = alloc_retstack_tasklist(ret_stack_list); | 3290 | ret = alloc_retstack_tasklist(ret_stack_list); |
@@ -3374,18 +3376,25 @@ void unregister_ftrace_graph(void) | |||
3374 | /* Allocate a return stack for newly created task */ | 3376 | /* Allocate a return stack for newly created task */ |
3375 | void ftrace_graph_init_task(struct task_struct *t) | 3377 | void ftrace_graph_init_task(struct task_struct *t) |
3376 | { | 3378 | { |
3379 | /* Make sure we do not use the parent ret_stack */ | ||
3380 | t->ret_stack = NULL; | ||
3381 | |||
3377 | if (ftrace_graph_active) { | 3382 | if (ftrace_graph_active) { |
3378 | t->ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | 3383 | struct ftrace_ret_stack *ret_stack; |
3384 | |||
3385 | ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH | ||
3379 | * sizeof(struct ftrace_ret_stack), | 3386 | * sizeof(struct ftrace_ret_stack), |
3380 | GFP_KERNEL); | 3387 | GFP_KERNEL); |
3381 | if (!t->ret_stack) | 3388 | if (!ret_stack) |
3382 | return; | 3389 | return; |
3383 | t->curr_ret_stack = -1; | 3390 | t->curr_ret_stack = -1; |
3384 | atomic_set(&t->tracing_graph_pause, 0); | 3391 | atomic_set(&t->tracing_graph_pause, 0); |
3385 | atomic_set(&t->trace_overrun, 0); | 3392 | atomic_set(&t->trace_overrun, 0); |
3386 | t->ftrace_timestamp = 0; | 3393 | t->ftrace_timestamp = 0; |
3387 | } else | 3394 | /* make curr_ret_stack visable before we add the ret_stack */ |
3388 | t->ret_stack = NULL; | 3395 | smp_wmb(); |
3396 | t->ret_stack = ret_stack; | ||
3397 | } | ||
3389 | } | 3398 | } |
3390 | 3399 | ||
3391 | void ftrace_graph_exit_task(struct task_struct *t) | 3400 | void ftrace_graph_exit_task(struct task_struct *t) |
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c index 10f6ad7d85f6..8b592418d8b2 100644 --- a/kernel/trace/trace_functions_graph.c +++ b/kernel/trace/trace_functions_graph.c | |||
@@ -65,6 +65,12 @@ ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth) | |||
65 | if (!current->ret_stack) | 65 | if (!current->ret_stack) |
66 | return -EBUSY; | 66 | return -EBUSY; |
67 | 67 | ||
68 | /* | ||
69 | * We must make sure the ret_stack is tested before we read | ||
70 | * anything else. | ||
71 | */ | ||
72 | smp_rmb(); | ||
73 | |||
68 | /* The return trace stack is full */ | 74 | /* The return trace stack is full */ |
69 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { | 75 | if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) { |
70 | atomic_inc(¤t->trace_overrun); | 76 | atomic_inc(¤t->trace_overrun); |