aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorSteven Rostedt <srostedt@redhat.com>2009-06-02 16:39:48 -0400
committerSteven Rostedt <rostedt@goodmis.org>2009-06-02 16:49:57 -0400
commitf7e8b616ed1cc6f790b82324bce8a2a60295e5c2 (patch)
treee46e50692d90ddcccf159accdcdd655d3dd0ffa5 /kernel/fork.c
parent26c01624a2a40f8a4ddf6449b65c9b1c418d0e72 (diff)
downloadkernel_samsung_tuna-f7e8b616ed1cc6f790b82324bce8a2a60295e5c2.zip
kernel_samsung_tuna-f7e8b616ed1cc6f790b82324bce8a2a60295e5c2.tar.gz
kernel_samsung_tuna-f7e8b616ed1cc6f790b82324bce8a2a60295e5c2.tar.bz2
function-graph: move initialization of new tasks up in fork
When the function graph tracer is enabled, all new tasks must allocate a ret_stack to place the return address of functions. This is because the function graph tracer will replace the real return address with a call to the tracing of the exit function. This initialization happens in fork, but it happens too late. If fork fails, then it will call free_task and that calls the freeing of this ret_stack. But before initialization happens, the new (failed) task points to its parents ret_stack. If a fork failure happens during the function trace, it would be catastrophic for the parent. Also, there's no need to call ftrace_graph_exit_task from fork, since it is called by free_task which fork calls on failure. [ Impact: prevent crash during failed fork running function graph tracer ] Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b9e2edd..c4b1e35 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -982,6 +982,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (!p)
goto fork_out;
+ ftrace_graph_init_task(p);
+
rt_mutex_init_task(p);
#ifdef CONFIG_PROVE_LOCKING
@@ -1131,8 +1133,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
}
}
- ftrace_graph_init_task(p);
-
p->pid = pid_nr(pid);
p->tgid = p->pid;
if (clone_flags & CLONE_THREAD)
@@ -1141,7 +1141,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
if (current->nsproxy != p->nsproxy) {
retval = ns_cgroup_clone(p, pid);
if (retval)
- goto bad_fork_free_graph;
+ goto bad_fork_free_pid;
}
p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1233,7 +1233,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
spin_unlock(&current->sighand->siglock);
write_unlock_irq(&tasklist_lock);
retval = -ERESTARTNOINTR;
- goto bad_fork_free_graph;
+ goto bad_fork_free_pid;
}
if (clone_flags & CLONE_THREAD) {
@@ -1268,8 +1268,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
cgroup_post_fork(p);
return p;
-bad_fork_free_graph:
- ftrace_graph_exit_task(p);
bad_fork_free_pid:
if (pid != &init_struct_pid)
free_pid(pid);