aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-06-02 16:39:48 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-06-02 16:49:57 -0400
commitf7e8b616ed1cc6f790b82324bce8a2a60295e5c2 (patch)
treee46e50692d90ddcccf159accdcdd655d3dd0ffa5 /kernel/fork.c
parent26c01624a2a40f8a4ddf6449b65c9b1c418d0e72 (diff)
function-graph: move initialization of new tasks up in fork
When the function graph tracer is enabled, all new tasks must allocate a ret_stack to place the return address of functions. This is because the function graph tracer will replace the real return address with a call to the tracing of the exit function. This initialization happens in fork, but it happens too late. If fork fails, then it will call free_task and that calls the freeing of this ret_stack. But before initialization happens, the new (failed) task points to its parents ret_stack. If a fork failure happens during the function trace, it would be catastrophic for the parent. Also, there's no need to call ftrace_graph_exit_task from fork, since it is called by free_task which fork calls on failure. [ Impact: prevent crash during failed fork running function graph tracer ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b9e2edd00726..c4b1e35c430b 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -982,6 +982,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
982 if (!p) 982 if (!p)
983 goto fork_out; 983 goto fork_out;
984 984
985 ftrace_graph_init_task(p);
986
985 rt_mutex_init_task(p); 987 rt_mutex_init_task(p);
986 988
987#ifdef CONFIG_PROVE_LOCKING 989#ifdef CONFIG_PROVE_LOCKING
@@ -1131,8 +1133,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1131 } 1133 }
1132 } 1134 }
1133 1135
1134 ftrace_graph_init_task(p);
1135
1136 p->pid = pid_nr(pid); 1136 p->pid = pid_nr(pid);
1137 p->tgid = p->pid; 1137 p->tgid = p->pid;
1138 if (clone_flags & CLONE_THREAD) 1138 if (clone_flags & CLONE_THREAD)
@@ -1141,7 +1141,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1141 if (current->nsproxy != p->nsproxy) { 1141 if (current->nsproxy != p->nsproxy) {
1142 retval = ns_cgroup_clone(p, pid); 1142 retval = ns_cgroup_clone(p, pid);
1143 if (retval) 1143 if (retval)
1144 goto bad_fork_free_graph; 1144 goto bad_fork_free_pid;
1145 } 1145 }
1146 1146
1147 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1147 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1233,7 +1233,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1233 spin_unlock(&current->sighand->siglock); 1233 spin_unlock(&current->sighand->siglock);
1234 write_unlock_irq(&tasklist_lock); 1234 write_unlock_irq(&tasklist_lock);
1235 retval = -ERESTARTNOINTR; 1235 retval = -ERESTARTNOINTR;
1236 goto bad_fork_free_graph; 1236 goto bad_fork_free_pid;
1237 } 1237 }
1238 1238
1239 if (clone_flags & CLONE_THREAD) { 1239 if (clone_flags & CLONE_THREAD) {
@@ -1268,8 +1268,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1268 cgroup_post_fork(p); 1268 cgroup_post_fork(p);
1269 return p; 1269 return p;
1270 1270
1271bad_fork_free_graph:
1272 ftrace_graph_exit_task(p);
1273bad_fork_free_pid: 1271bad_fork_free_pid:
1274 if (pid != &init_struct_pid) 1272 if (pid != &init_struct_pid)
1275 free_pid(pid); 1273 free_pid(pid);