aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c46
1 files changed, 27 insertions, 19 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 875ffbdd96d0..467746b3f0aa 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -61,8 +61,8 @@
61#include <linux/proc_fs.h> 61#include <linux/proc_fs.h>
62#include <linux/blkdev.h> 62#include <linux/blkdev.h>
63#include <linux/fs_struct.h> 63#include <linux/fs_struct.h>
64#include <trace/sched.h>
65#include <linux/magic.h> 64#include <linux/magic.h>
65#include <linux/perf_counter.h>
66 66
67#include <asm/pgtable.h> 67#include <asm/pgtable.h>
68#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
@@ -71,6 +71,8 @@
71#include <asm/cacheflush.h> 71#include <asm/cacheflush.h>
72#include <asm/tlbflush.h> 72#include <asm/tlbflush.h>
73 73
74#include <trace/events/sched.h>
75
74/* 76/*
75 * Protected counters by write_lock_irq(&tasklist_lock) 77 * Protected counters by write_lock_irq(&tasklist_lock)
76 */ 78 */
@@ -83,8 +85,6 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
83 85
84__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 86__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
85 87
86DEFINE_TRACE(sched_process_fork);
87
88int nr_processes(void) 88int nr_processes(void)
89{ 89{
90 int cpu; 90 int cpu;
@@ -178,7 +178,7 @@ void __init fork_init(unsigned long mempages)
178 /* create a slab on which task_structs can be allocated */ 178 /* create a slab on which task_structs can be allocated */
179 task_struct_cachep = 179 task_struct_cachep =
180 kmem_cache_create("task_struct", sizeof(struct task_struct), 180 kmem_cache_create("task_struct", sizeof(struct task_struct),
181 ARCH_MIN_TASKALIGN, SLAB_PANIC, NULL); 181 ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
182#endif 182#endif
183 183
184 /* do the arch specific task caches init */ 184 /* do the arch specific task caches init */
@@ -982,6 +982,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
982 if (!p) 982 if (!p)
983 goto fork_out; 983 goto fork_out;
984 984
985 ftrace_graph_init_task(p);
986
985 rt_mutex_init_task(p); 987 rt_mutex_init_task(p);
986 988
987#ifdef CONFIG_PROVE_LOCKING 989#ifdef CONFIG_PROVE_LOCKING
@@ -1027,7 +1029,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1027 p->vfork_done = NULL; 1029 p->vfork_done = NULL;
1028 spin_lock_init(&p->alloc_lock); 1030 spin_lock_init(&p->alloc_lock);
1029 1031
1030 clear_tsk_thread_flag(p, TIF_SIGPENDING);
1031 init_sigpending(&p->pending); 1032 init_sigpending(&p->pending);
1032 1033
1033 p->utime = cputime_zero; 1034 p->utime = cputime_zero;
@@ -1089,12 +1090,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1089#ifdef CONFIG_DEBUG_MUTEXES 1090#ifdef CONFIG_DEBUG_MUTEXES
1090 p->blocked_on = NULL; /* not blocked yet */ 1091 p->blocked_on = NULL; /* not blocked yet */
1091#endif 1092#endif
1092 if (unlikely(current->ptrace)) 1093
1093 ptrace_fork(p, clone_flags); 1094 p->bts = NULL;
1094 1095
1095 /* Perform scheduler related setup. Assign this task to a CPU. */ 1096 /* Perform scheduler related setup. Assign this task to a CPU. */
1096 sched_fork(p, clone_flags); 1097 sched_fork(p, clone_flags);
1097 1098
1099 retval = perf_counter_init_task(p);
1100 if (retval)
1101 goto bad_fork_cleanup_policy;
1102
1098 if ((retval = audit_alloc(p))) 1103 if ((retval = audit_alloc(p)))
1099 goto bad_fork_cleanup_policy; 1104 goto bad_fork_cleanup_policy;
1100 /* copy all the process information */ 1105 /* copy all the process information */
@@ -1131,8 +1136,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1131 } 1136 }
1132 } 1137 }
1133 1138
1134 ftrace_graph_init_task(p);
1135
1136 p->pid = pid_nr(pid); 1139 p->pid = pid_nr(pid);
1137 p->tgid = p->pid; 1140 p->tgid = p->pid;
1138 if (clone_flags & CLONE_THREAD) 1141 if (clone_flags & CLONE_THREAD)
@@ -1141,7 +1144,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1141 if (current->nsproxy != p->nsproxy) { 1144 if (current->nsproxy != p->nsproxy) {
1142 retval = ns_cgroup_clone(p, pid); 1145 retval = ns_cgroup_clone(p, pid);
1143 if (retval) 1146 if (retval)
1144 goto bad_fork_free_graph; 1147 goto bad_fork_free_pid;
1145 } 1148 }
1146 1149
1147 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1150 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1233,7 +1236,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1233 spin_unlock(&current->sighand->siglock); 1236 spin_unlock(&current->sighand->siglock);
1234 write_unlock_irq(&tasklist_lock); 1237 write_unlock_irq(&tasklist_lock);
1235 retval = -ERESTARTNOINTR; 1238 retval = -ERESTARTNOINTR;
1236 goto bad_fork_free_graph; 1239 goto bad_fork_free_pid;
1237 } 1240 }
1238 1241
1239 if (clone_flags & CLONE_THREAD) { 1242 if (clone_flags & CLONE_THREAD) {
@@ -1268,8 +1271,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1268 cgroup_post_fork(p); 1271 cgroup_post_fork(p);
1269 return p; 1272 return p;
1270 1273
1271bad_fork_free_graph:
1272 ftrace_graph_exit_task(p);
1273bad_fork_free_pid: 1274bad_fork_free_pid:
1274 if (pid != &init_struct_pid) 1275 if (pid != &init_struct_pid)
1275 free_pid(pid); 1276 free_pid(pid);
@@ -1293,6 +1294,7 @@ bad_fork_cleanup_semundo:
1293bad_fork_cleanup_audit: 1294bad_fork_cleanup_audit:
1294 audit_free(p); 1295 audit_free(p);
1295bad_fork_cleanup_policy: 1296bad_fork_cleanup_policy:
1297 perf_counter_free_task(p);
1296#ifdef CONFIG_NUMA 1298#ifdef CONFIG_NUMA
1297 mpol_put(p->mempolicy); 1299 mpol_put(p->mempolicy);
1298bad_fork_cleanup_cgroup: 1300bad_fork_cleanup_cgroup:
@@ -1406,6 +1408,12 @@ long do_fork(unsigned long clone_flags,
1406 if (clone_flags & CLONE_VFORK) { 1408 if (clone_flags & CLONE_VFORK) {
1407 p->vfork_done = &vfork; 1409 p->vfork_done = &vfork;
1408 init_completion(&vfork); 1410 init_completion(&vfork);
1411 } else if (!(clone_flags & CLONE_VM)) {
1412 /*
1413 * vfork will do an exec which will call
1414 * set_task_comm()
1415 */
1416 perf_counter_fork(p);
1409 } 1417 }
1410 1418
1411 audit_finish_fork(p); 1419 audit_finish_fork(p);
@@ -1461,20 +1469,20 @@ void __init proc_caches_init(void)
1461{ 1469{
1462 sighand_cachep = kmem_cache_create("sighand_cache", 1470 sighand_cachep = kmem_cache_create("sighand_cache",
1463 sizeof(struct sighand_struct), 0, 1471 sizeof(struct sighand_struct), 0,
1464 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU, 1472 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_DESTROY_BY_RCU|
1465 sighand_ctor); 1473 SLAB_NOTRACK, sighand_ctor);
1466 signal_cachep = kmem_cache_create("signal_cache", 1474 signal_cachep = kmem_cache_create("signal_cache",
1467 sizeof(struct signal_struct), 0, 1475 sizeof(struct signal_struct), 0,
1468 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1476 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1469 files_cachep = kmem_cache_create("files_cache", 1477 files_cachep = kmem_cache_create("files_cache",
1470 sizeof(struct files_struct), 0, 1478 sizeof(struct files_struct), 0,
1471 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1479 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1472 fs_cachep = kmem_cache_create("fs_cache", 1480 fs_cachep = kmem_cache_create("fs_cache",
1473 sizeof(struct fs_struct), 0, 1481 sizeof(struct fs_struct), 0,
1474 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1482 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1475 mm_cachep = kmem_cache_create("mm_struct", 1483 mm_cachep = kmem_cache_create("mm_struct",
1476 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1484 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1477 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1485 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
1478 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); 1486 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1479 mmap_init(); 1487 mmap_init();
1480} 1488}