aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c101
1 files changed, 35 insertions, 66 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index b99d73e971a4..7ce2ebe84796 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -27,6 +27,7 @@
27#include <linux/key.h> 27#include <linux/key.h>
28#include <linux/binfmts.h> 28#include <linux/binfmts.h>
29#include <linux/mman.h> 29#include <linux/mman.h>
30#include <linux/mmu_notifier.h>
30#include <linux/fs.h> 31#include <linux/fs.h>
31#include <linux/nsproxy.h> 32#include <linux/nsproxy.h>
32#include <linux/capability.h> 33#include <linux/capability.h>
@@ -37,6 +38,7 @@
37#include <linux/swap.h> 38#include <linux/swap.h>
38#include <linux/syscalls.h> 39#include <linux/syscalls.h>
39#include <linux/jiffies.h> 40#include <linux/jiffies.h>
41#include <linux/tracehook.h>
40#include <linux/futex.h> 42#include <linux/futex.h>
41#include <linux/task_io_accounting_ops.h> 43#include <linux/task_io_accounting_ops.h>
42#include <linux/rcupdate.h> 44#include <linux/rcupdate.h>
@@ -413,6 +415,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
413 415
414 if (likely(!mm_alloc_pgd(mm))) { 416 if (likely(!mm_alloc_pgd(mm))) {
415 mm->def_flags = 0; 417 mm->def_flags = 0;
418 mmu_notifier_mm_init(mm);
416 return mm; 419 return mm;
417 } 420 }
418 421
@@ -445,6 +448,7 @@ void __mmdrop(struct mm_struct *mm)
445 BUG_ON(mm == &init_mm); 448 BUG_ON(mm == &init_mm);
446 mm_free_pgd(mm); 449 mm_free_pgd(mm);
447 destroy_context(mm); 450 destroy_context(mm);
451 mmu_notifier_mm_destroy(mm);
448 free_mm(mm); 452 free_mm(mm);
449} 453}
450EXPORT_SYMBOL_GPL(__mmdrop); 454EXPORT_SYMBOL_GPL(__mmdrop);
@@ -656,13 +660,6 @@ static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
656 path_get(&old->root); 660 path_get(&old->root);
657 fs->pwd = old->pwd; 661 fs->pwd = old->pwd;
658 path_get(&old->pwd); 662 path_get(&old->pwd);
659 if (old->altroot.dentry) {
660 fs->altroot = old->altroot;
661 path_get(&old->altroot);
662 } else {
663 fs->altroot.mnt = NULL;
664 fs->altroot.dentry = NULL;
665 }
666 read_unlock(&old->lock); 663 read_unlock(&old->lock);
667 } 664 }
668 return fs; 665 return fs;
@@ -812,12 +809,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
812 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 809 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
813 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 810 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
814 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 811 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
815#ifdef CONFIG_TASK_XACCT 812 task_io_accounting_init(&sig->ioac);
816 sig->rchar = sig->wchar = sig->syscr = sig->syscw = 0;
817#endif
818#ifdef CONFIG_TASK_IO_ACCOUNTING
819 memset(&sig->ioac, 0, sizeof(sig->ioac));
820#endif
821 sig->sum_sched_runtime = 0; 813 sig->sum_sched_runtime = 0;
822 INIT_LIST_HEAD(&sig->cpu_timers[0]); 814 INIT_LIST_HEAD(&sig->cpu_timers[0]);
823 INIT_LIST_HEAD(&sig->cpu_timers[1]); 815 INIT_LIST_HEAD(&sig->cpu_timers[1]);
@@ -865,8 +857,7 @@ static void copy_flags(unsigned long clone_flags, struct task_struct *p)
865 857
866 new_flags &= ~PF_SUPERPRIV; 858 new_flags &= ~PF_SUPERPRIV;
867 new_flags |= PF_FORKNOEXEC; 859 new_flags |= PF_FORKNOEXEC;
868 if (!(clone_flags & CLONE_PTRACE)) 860 new_flags |= PF_STARTING;
869 p->ptrace = 0;
870 p->flags = new_flags; 861 p->flags = new_flags;
871 clear_freeze_flag(p); 862 clear_freeze_flag(p);
872} 863}
@@ -907,7 +898,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
907 struct pt_regs *regs, 898 struct pt_regs *regs,
908 unsigned long stack_size, 899 unsigned long stack_size,
909 int __user *child_tidptr, 900 int __user *child_tidptr,
910 struct pid *pid) 901 struct pid *pid,
902 int trace)
911{ 903{
912 int retval; 904 int retval;
913 struct task_struct *p; 905 struct task_struct *p;
@@ -1000,13 +992,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1000 p->last_switch_timestamp = 0; 992 p->last_switch_timestamp = 0;
1001#endif 993#endif
1002 994
1003#ifdef CONFIG_TASK_XACCT 995 task_io_accounting_init(&p->ioac);
1004 p->rchar = 0; /* I/O counter: bytes read */
1005 p->wchar = 0; /* I/O counter: bytes written */
1006 p->syscr = 0; /* I/O counter: read syscalls */
1007 p->syscw = 0; /* I/O counter: write syscalls */
1008#endif
1009 task_io_accounting_init(p);
1010 acct_clear_integrals(p); 996 acct_clear_integrals(p);
1011 997
1012 p->it_virt_expires = cputime_zero; 998 p->it_virt_expires = cputime_zero;
@@ -1163,8 +1149,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1163 */ 1149 */
1164 p->group_leader = p; 1150 p->group_leader = p;
1165 INIT_LIST_HEAD(&p->thread_group); 1151 INIT_LIST_HEAD(&p->thread_group);
1166 INIT_LIST_HEAD(&p->ptrace_entry);
1167 INIT_LIST_HEAD(&p->ptraced);
1168 1152
1169 /* Now that the task is set up, run cgroup callbacks if 1153 /* Now that the task is set up, run cgroup callbacks if
1170 * necessary. We need to run them before the task is visible 1154 * necessary. We need to run them before the task is visible
@@ -1195,7 +1179,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1195 p->real_parent = current->real_parent; 1179 p->real_parent = current->real_parent;
1196 else 1180 else
1197 p->real_parent = current; 1181 p->real_parent = current;
1198 p->parent = p->real_parent;
1199 1182
1200 spin_lock(&current->sighand->siglock); 1183 spin_lock(&current->sighand->siglock);
1201 1184
@@ -1237,8 +1220,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1237 1220
1238 if (likely(p->pid)) { 1221 if (likely(p->pid)) {
1239 list_add_tail(&p->sibling, &p->real_parent->children); 1222 list_add_tail(&p->sibling, &p->real_parent->children);
1240 if (unlikely(p->ptrace & PT_PTRACED)) 1223 tracehook_finish_clone(p, clone_flags, trace);
1241 __ptrace_link(p, current->parent);
1242 1224
1243 if (thread_group_leader(p)) { 1225 if (thread_group_leader(p)) {
1244 if (clone_flags & CLONE_NEWPID) 1226 if (clone_flags & CLONE_NEWPID)
@@ -1323,29 +1305,13 @@ struct task_struct * __cpuinit fork_idle(int cpu)
1323 struct pt_regs regs; 1305 struct pt_regs regs;
1324 1306
1325 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL, 1307 task = copy_process(CLONE_VM, 0, idle_regs(&regs), 0, NULL,
1326 &init_struct_pid); 1308 &init_struct_pid, 0);
1327 if (!IS_ERR(task)) 1309 if (!IS_ERR(task))
1328 init_idle(task, cpu); 1310 init_idle(task, cpu);
1329 1311
1330 return task; 1312 return task;
1331} 1313}
1332 1314
1333static int fork_traceflag(unsigned clone_flags)
1334{
1335 if (clone_flags & CLONE_UNTRACED)
1336 return 0;
1337 else if (clone_flags & CLONE_VFORK) {
1338 if (current->ptrace & PT_TRACE_VFORK)
1339 return PTRACE_EVENT_VFORK;
1340 } else if ((clone_flags & CSIGNAL) != SIGCHLD) {
1341 if (current->ptrace & PT_TRACE_CLONE)
1342 return PTRACE_EVENT_CLONE;
1343 } else if (current->ptrace & PT_TRACE_FORK)
1344 return PTRACE_EVENT_FORK;
1345
1346 return 0;
1347}
1348
1349/* 1315/*
1350 * Ok, this is the main fork-routine. 1316 * Ok, this is the main fork-routine.
1351 * 1317 *
@@ -1380,14 +1346,14 @@ long do_fork(unsigned long clone_flags,
1380 } 1346 }
1381 } 1347 }
1382 1348
1383 if (unlikely(current->ptrace)) { 1349 /*
1384 trace = fork_traceflag (clone_flags); 1350 * When called from kernel_thread, don't do user tracing stuff.
1385 if (trace) 1351 */
1386 clone_flags |= CLONE_PTRACE; 1352 if (likely(user_mode(regs)))
1387 } 1353 trace = tracehook_prepare_clone(clone_flags);
1388 1354
1389 p = copy_process(clone_flags, stack_start, regs, stack_size, 1355 p = copy_process(clone_flags, stack_start, regs, stack_size,
1390 child_tidptr, NULL); 1356 child_tidptr, NULL, trace);
1391 /* 1357 /*
1392 * Do this prior waking up the new thread - the thread pointer 1358 * Do this prior waking up the new thread - the thread pointer
1393 * might get invalid after that point, if the thread exits quickly. 1359 * might get invalid after that point, if the thread exits quickly.
@@ -1405,32 +1371,35 @@ long do_fork(unsigned long clone_flags,
1405 init_completion(&vfork); 1371 init_completion(&vfork);
1406 } 1372 }
1407 1373
1408 if ((p->ptrace & PT_PTRACED) || (clone_flags & CLONE_STOPPED)) { 1374 tracehook_report_clone(trace, regs, clone_flags, nr, p);
1375
1376 /*
1377 * We set PF_STARTING at creation in case tracing wants to
1378 * use this to distinguish a fully live task from one that
1379 * hasn't gotten to tracehook_report_clone() yet. Now we
1380 * clear it and set the child going.
1381 */
1382 p->flags &= ~PF_STARTING;
1383
1384 if (unlikely(clone_flags & CLONE_STOPPED)) {
1409 /* 1385 /*
1410 * We'll start up with an immediate SIGSTOP. 1386 * We'll start up with an immediate SIGSTOP.
1411 */ 1387 */
1412 sigaddset(&p->pending.signal, SIGSTOP); 1388 sigaddset(&p->pending.signal, SIGSTOP);
1413 set_tsk_thread_flag(p, TIF_SIGPENDING); 1389 set_tsk_thread_flag(p, TIF_SIGPENDING);
1414 }
1415
1416 if (!(clone_flags & CLONE_STOPPED))
1417 wake_up_new_task(p, clone_flags);
1418 else
1419 __set_task_state(p, TASK_STOPPED); 1390 __set_task_state(p, TASK_STOPPED);
1420 1391 } else {
1421 if (unlikely (trace)) { 1392 wake_up_new_task(p, clone_flags);
1422 current->ptrace_message = nr;
1423 ptrace_notify ((trace << 8) | SIGTRAP);
1424 } 1393 }
1425 1394
1395 tracehook_report_clone_complete(trace, regs,
1396 clone_flags, nr, p);
1397
1426 if (clone_flags & CLONE_VFORK) { 1398 if (clone_flags & CLONE_VFORK) {
1427 freezer_do_not_count(); 1399 freezer_do_not_count();
1428 wait_for_completion(&vfork); 1400 wait_for_completion(&vfork);
1429 freezer_count(); 1401 freezer_count();
1430 if (unlikely (current->ptrace & PT_TRACE_VFORK_DONE)) { 1402 tracehook_report_vfork_done(p, nr);
1431 current->ptrace_message = nr;
1432 ptrace_notify ((PTRACE_EVENT_VFORK_DONE << 8) | SIGTRAP);
1433 }
1434 } 1403 }
1435 } else { 1404 } else {
1436 nr = PTR_ERR(p); 1405 nr = PTR_ERR(p);
@@ -1442,7 +1411,7 @@ long do_fork(unsigned long clone_flags,
1442#define ARCH_MIN_MMSTRUCT_ALIGN 0 1411#define ARCH_MIN_MMSTRUCT_ALIGN 0
1443#endif 1412#endif
1444 1413
1445static void sighand_ctor(struct kmem_cache *cachep, void *data) 1414static void sighand_ctor(void *data)
1446{ 1415{
1447 struct sighand_struct *sighand = data; 1416 struct sighand_struct *sighand = data;
1448 1417