aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c98
1 files changed, 32 insertions, 66 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b99d73e971a4..8214ba7c8bb1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -37,6 +37,7 @@
37#include <linux/swap.h> 37#include <linux/swap.h>
38#include <linux/syscalls.h> 38#include <linux/syscalls.h>
39#include <linux/jiffies.h> 39#include <linux/jiffies.h>
40#include <linux/tracehook.h>
40#include <linux/futex.h> 41#include <linux/futex.h>
41#include <linux/task_io_accounting_ops.h> 42#include <linux/task_io_accounting_ops.h>
42#include <linux/rcupdate.h> 43#include <linux/rcupdate.h>
@@ -656,13 +657,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
656 path_get(&old->root); 657 path_get(&old->root);
657 fs->pwd = old->pwd; 658 fs->pwd = old->pwd;
658 path_get(&old->pwd); 659 path_get(&old->pwd);
659 if (old->altroot.dentry) {
660 fs->altroot = old->altroot;
661 path_get(&old->altroot);
662 } else {
663 fs->altroot.mnt = NULL;
664 fs->altroot.dentry = NULL;
665 }
666 read_unlock(&old->lock); 660 read_unlock(&old->lock);
667 } 661 }
668 return fs; 662 return fs;
@@ -812,12 +806,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
812 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 806 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
813 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 807 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
814 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 808 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
815#ifdef CONFIG_TASK_XACCT 809 task_io_accounting_init(&sig->ioac);
816 sig->rchar = sig->wchar = sig->syscr = sig->syscw = 0;
817#endif
818#ifdef CONFIG_TASK_IO_ACCOUNTING
819 memset(&sig->ioac, 0, sizeof(sig->ioac));
820#endif
821 sig->sum_sched_runtime = 0; 810 sig->sum_sched_runtime = 0;
822 INIT_LIST_HEAD(&sig->cpu_timers[0]); 811 INIT_LIST_HEAD(&sig->cpu_timers[0]);
823 INIT_LIST_HEAD(&sig->cpu_timers[1]); 812 INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -865,8 +854,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
865 854
866 new_flags &= ~PF_SUPERPRIV; 855 new_flags &= ~PF_SUPERPRIV;
867 new_flags |= PF_FORKNOEXEC; 856 new_flags |= PF_FORKNOEXEC;
868 if (!(clone_flags & CLONE_PTRACE)) 857 new_flags |= PF_STARTING;
869 p->ptrace = 0;
870 p->flags = new_flags; 858 p->flags = new_flags;
871 clear_freeze_flag(p); 859 clear_freeze_flag(p);
872} 860}
@@ -907,7 +895,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
907 struct pt_regs *regs, 895 struct pt_regs *regs,
908 unsigned long stack_size, 896 unsigned long stack_size,
909 int __user *child_tidptr, 897 int __user *child_tidptr,
910 struct pid *pid) 898 struct pid *pid,
899 int trace)
911{ 900{
912 int retval; 901 int retval;
913 struct task_struct *p; 902 struct task_struct *p;
@@ -1000,13 +989,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1000 p->last_switch_timestamp = 0; 989 p->last_switch_timestamp = 0;
1001#endif 990#endif
1002 991
1003#ifdef CONFIG_TASK_XACCT 992 task_io_accounting_init(&p->ioac);
1004 p->rchar = 0; /* I/O counter: bytes read */
1005 p->wchar = 0; /* I/O counter: bytes written */
1006 p->syscr = 0; /* I/O counter: read syscalls */
1007 p->syscw = 0; /* I/O counter: write syscalls */
1008#endif
1009 task_io_accounting_init(p);
1010 acct_clear_integrals(p); 993 acct_clear_integrals(p);
1011 994
1012 p->it_virt_expires = cputime_zero; 995 p->it_virt_expires = cputime_zero;
@@ -1163,8 +1146,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1163 */ 1146 */
1164 p->group_leader = p; 1147 p->group_leader = p;
1165 INIT_LIST_HEAD(&p->thread_group); 1148 INIT_LIST_HEAD(&p->thread_group);
1166 INIT_LIST_HEAD(&p->ptrace_entry);
1167 INIT_LIST_HEAD(&p->ptraced);
1168 1149
1169 /* Now that the task is set up, run cgroup callbacks if 1150 /* Now that the task is set up, run cgroup callbacks if
1170 * necessary. We need to run them before the task is visible 1151 * necessary. We need to run them before the task is visible
@@ -1195,7 +1176,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1195 p->real_parent = current->real_parent; 1176 p->real_parent = current->real_parent;
1196 else 1177 else
1197 p->real_parent = current; 1178 p->real_parent = current;
1198 p->parent = p->real_parent;
1199 1179
1200 spin_lock(&current->sighand->siglock); 1180 spin_lock(&current->sighand->siglock);
1201 1181
@@ -1237,8 +1217,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1237 1217
1238 if (likely(p->pid)) { 1218 if (likely(p->pid)) {
1239 list_add_tail(&p->sibling, &p->real_parent->children); 1219 list_add_tail(&p->sibling, &p->real_parent->children);
1240 if (unlikely(p->ptrace & PT_PTRACED)) 1220 tracehook_finish_clone(p, clone_flags, trace);
1241 __ptrace_link(p, current->parent);
1242 1221
1243 if (thread_group_leader(p)) { 1222 if (thread_group_leader(p)) {
1244 if (clone_flags & CLONE_NEWPID) 1223 if (clone_flags & CLONE_NEWPID)
@@ -1323,29 +1302,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1323 struct pt_regs regs; 1302 struct pt_regs regs;
1324 1303
1325 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1304 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1326 &init_struct_pid); 1305 &init_struct_pid, 0);
1327 if (!IS_ERR(task)) 1306 if (!IS_ERR(task))
1328 init_idle(task, cpu); 1307 init_idle(task, cpu);
1329 1308
1330 return task; 1309 return task;
1331} 1310}
1332 1311
1333static int fork_traceflag(unsigned clone_flags)
1334{
1335 if (clone_flags & CLONE_UNTRACED)
1336 return 0;
1337 else if (clone_flags & CLONE_VFORK) {
1338 if (current->ptrace & PT_TRACE_VFORK)
1339 return PTRACE_EVENT_VFORK;
1340 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1341 if (current->ptrace & PT_TRACE_CLONE)
1342 return PTRACE_EVENT_CLONE;
1343 } else if (current->ptrace & PT_TRACE_FORK)
1344 return PTRACE_EVENT_FORK;
1345
1346 return 0;
1347}
1348
1349/* 1312/*
1350 * Ok, this is the main fork-routine. 1313 * Ok, this is the main fork-routine.
1351 * 1314 *
@@ -1380,14 +1343,14 @@ long do_fork(unsigned long clone_flags,
1380 } 1343 }
1381 } 1344 }
1382 1345
1383 if (unlikely(current->ptrace)) { 1346 /*
1384 trace = fork_traceflag (clone_flags); 1347 * When called from kernel_thread, don't do user tracing stuff.
1385 if (trace) 1348 */
1386 clone_flags |= CLONE_PTRACE; 1349 if (likely(user_mode(regs)))
1387 } 1350 trace = tracehook_prepare_clone(clone_flags);
1388 1351
1389 p = copy_process(clone_flags, stack_start, regs, stack_size, 1352 p = copy_process(clone_flags, stack_start, regs, stack_size,
1390 child_tidptr, NULL); 1353 child_tidptr, NULL, trace);
1391 /* 1354 /*
1392 * Do this prior waking up the new thread - the thread pointer 1355 * Do this prior waking up the new thread - the thread pointer
1393 * might get invalid after that point, if the thread exits quickly. 1356 * might get invalid after that point, if the thread exits quickly.
@@ -1405,32 +1368,35 @@ long do_fork(unsigned long clone_flags,
1405 init_completion(&vfork); 1368 init_completion(&vfork);
1406 } 1369 }
1407 1370
1408 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1371 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1372
1373 /*
1374 * We set PF_STARTING at creation in case tracing wants to
1375 * use this to distinguish a fully live task from one that
1376 * hasn't gotten to tracehook_report_clone() yet. Now we
1377 * clear it and set the child going.
1378 */
1379 p->flags &= ~PF_STARTING;
1380
1381 if (unlikely(clone_flags & CLONE_STOPPED)) {
1409 /* 1382 /*
1410 * We'll start up with an immediate SIGSTOP. 1383 * We'll start up with an immediate SIGSTOP.
1411 */ 1384 */
1412 sigaddset(&p->pending.signal, SIGSTOP); 1385 sigaddset(&p->pending.signal, SIGSTOP);
1413 set_tsk_thread_flag(p, TIF_SIGPENDING); 1386 set_tsk_thread_flag(p, TIF_SIGPENDING);
1414 }
1415
1416 if (!(clone_flags & CLONE_STOPPED))
1417 wake_up_new_task(p, clone_flags);
1418 else
1419 __set_task_state(p, TASK_STOPPED); 1387 __set_task_state(p, TASK_STOPPED);
1420 1388 } else {
1421 if (unlikely (trace)) { 1389 wake_up_new_task(p, clone_flags);
1422 current->ptrace_message = nr;
1423 ptrace_notify ((trace << 8) | SIGTRAP);
1424 } 1390 }
1425 1391
1392 tracehook_report_clone_complete(trace, regs,
1393 clone_flags, nr, p);
1394
1426 if (clone_flags & CLONE_VFORK) { 1395 if (clone_flags & CLONE_VFORK) {
1427 freezer_do_not_count(); 1396 freezer_do_not_count();
1428 wait_for_completion(&vfork); 1397 wait_for_completion(&vfork);
1429 freezer_count(); 1398 freezer_count();
1430 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1399 tracehook_report_vfork_done(p, nr);
1431 current->ptrace_message = nr;
1432 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1433 }
1434 } 1400 }
1435 } else { 1401 } else {
1436 nr = PTR_ERR(p); 1402 nr = PTR_ERR(p);
@@ -1442,7 +1408,7 @@ long do_fork(unsigned long clone_flags,
1442#define ARCH_MIN_MMSTRUCT_ALIGN 0 1408#define ARCH_MIN_MMSTRUCT_ALIGN 0
1443#endif 1409#endif
1444 1410
1445static void sighand_ctor(struct kmem_cache *cachep, void *data) 1411static void sighand_ctor(void *data)
1446{ 1412{
1447 struct sighand_struct *sighand = data; 1413 struct sighand_struct *sighand = data;
1448 1414