aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-29 03:45:15 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-29 03:45:15 -0500
commite1df957670aef74ffd9a4ad93e6d2c90bf6b4845 (patch)
treebca1fcfef55b3e3e82c9a822b4ac6428fce2b419 /kernel/fork.c
parent2b583d8bc8d7105b58d7481a4a0ceb718dac49c6 (diff)
parent3c92ec8ae91ecf59d88c798301833d7cf83f2179 (diff)
Merge branch 'linus' into perfcounters/core
Conflicts: fs/exec.c include/linux/init_task.h Simple context conflicts.
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c76
1 files changed, 40 insertions, 36 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index e2078608ef59..cb706599057f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -47,6 +47,7 @@
47#include <linux/mount.h> 47#include <linux/mount.h>
48#include <linux/audit.h> 48#include <linux/audit.h>
49#include <linux/memcontrol.h> 49#include <linux/memcontrol.h>
50#include <linux/ftrace.h>
50#include <linux/profile.h> 51#include <linux/profile.h>
51#include <linux/rmap.h> 52#include <linux/rmap.h>
52#include <linux/acct.h> 53#include <linux/acct.h>
@@ -80,6 +81,8 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0;
80 81
81__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ 82__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
82 83
84DEFINE_TRACE(sched_process_fork);
85
83int nr_processes(void) 86int nr_processes(void)
84{ 87{
85 int cpu; 88 int cpu;
@@ -137,6 +140,7 @@ void free_task(struct task_struct *tsk)
137 prop_local_destroy_single(&tsk->dirties); 140 prop_local_destroy_single(&tsk->dirties);
138 free_thread_info(tsk->stack); 141 free_thread_info(tsk->stack);
139 rt_mutex_debug_task_free(tsk); 142 rt_mutex_debug_task_free(tsk);
143 ftrace_graph_exit_task(tsk);
140 free_task_struct(tsk); 144 free_task_struct(tsk);
141} 145}
142EXPORT_SYMBOL(free_task); 146EXPORT_SYMBOL(free_task);
@@ -147,9 +151,8 @@ void __put_task_struct(struct task_struct *tsk)
147 WARN_ON(atomic_read(&tsk->usage)); 151 WARN_ON(atomic_read(&tsk->usage));
148 WARN_ON(tsk == current); 152 WARN_ON(tsk == current);
149 153
150 security_task_free(tsk); 154 put_cred(tsk->real_cred);
151 free_uid(tsk->user); 155 put_cred(tsk->cred);
152 put_group_info(tsk->group_info);
153 delayacct_tsk_free(tsk); 156 delayacct_tsk_free(tsk);
154 157
155 if (!profile_handoff_task(tsk)) 158 if (!profile_handoff_task(tsk))
@@ -818,12 +821,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
818 if (!sig) 821 if (!sig)
819 return -ENOMEM; 822 return -ENOMEM;
820 823
821 ret = copy_thread_group_keys(tsk);
822 if (ret < 0) {
823 kmem_cache_free(signal_cachep, sig);
824 return ret;
825 }
826
827 atomic_set(&sig->count, 1); 824 atomic_set(&sig->count, 1);
828 atomic_set(&sig->live, 1); 825 atomic_set(&sig->live, 1);
829 init_waitqueue_head(&sig->wait_chldexit); 826 init_waitqueue_head(&sig->wait_chldexit);
@@ -868,7 +865,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
868void __cleanup_signal(struct signal_struct *sig) 865void __cleanup_signal(struct signal_struct *sig)
869{ 866{
870 thread_group_cputime_free(sig); 867 thread_group_cputime_free(sig);
871 exit_thread_group_keys(sig);
872 tty_kref_put(sig->tty); 868 tty_kref_put(sig->tty);
873 kmem_cache_free(signal_cachep, sig); 869 kmem_cache_free(signal_cachep, sig);
874} 870}
@@ -985,16 +981,16 @@ static struct task_struct *copy_process(unsigned long clone_flags,
985 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); 981 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
986#endif 982#endif
987 retval = -EAGAIN; 983 retval = -EAGAIN;
988 if (atomic_read(&p->user->processes) >= 984 if (atomic_read(&p->real_cred->user->processes) >=
989 p->signal->rlim[RLIMIT_NPROC].rlim_cur) { 985 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
990 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && 986 if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
991 p->user != current->nsproxy->user_ns->root_user) 987 p->real_cred->user != INIT_USER)
992 goto bad_fork_free; 988 goto bad_fork_free;
993 } 989 }
994 990
995 atomic_inc(&p->user->__count); 991 retval = copy_creds(p, clone_flags);
996 atomic_inc(&p->user->processes); 992 if (retval < 0)
997 get_group_info(p->group_info); 993 goto bad_fork_free;
998 994
999 /* 995 /*
1000 * If multiple threads are within copy_process(), then this check 996 * If multiple threads are within copy_process(), then this check
@@ -1049,10 +1045,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1049 do_posix_clock_monotonic_gettime(&p->start_time); 1045 do_posix_clock_monotonic_gettime(&p->start_time);
1050 p->real_start_time = p->start_time; 1046 p->real_start_time = p->start_time;
1051 monotonic_to_bootbased(&p->real_start_time); 1047 monotonic_to_bootbased(&p->real_start_time);
1052#ifdef CONFIG_SECURITY
1053 p->security = NULL;
1054#endif
1055 p->cap_bset = current->cap_bset;
1056 p->io_context = NULL; 1048 p->io_context = NULL;
1057 p->audit_context = NULL; 1049 p->audit_context = NULL;
1058 cgroup_fork(p); 1050 cgroup_fork(p);
@@ -1093,14 +1085,14 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1093#ifdef CONFIG_DEBUG_MUTEXES 1085#ifdef CONFIG_DEBUG_MUTEXES
1094 p->blocked_on = NULL; /* not blocked yet */ 1086 p->blocked_on = NULL; /* not blocked yet */
1095#endif 1087#endif
1088 if (unlikely(ptrace_reparented(current)))
1089 ptrace_fork(p, clone_flags);
1096 1090
1097 /* Perform scheduler related setup. Assign this task to a CPU. */ 1091 /* Perform scheduler related setup. Assign this task to a CPU. */
1098 sched_fork(p, clone_flags); 1092 sched_fork(p, clone_flags);
1099 1093
1100 if ((retval = security_task_alloc(p)))
1101 goto bad_fork_cleanup_policy;
1102 if ((retval = audit_alloc(p))) 1094 if ((retval = audit_alloc(p)))
1103 goto bad_fork_cleanup_security; 1095 goto bad_fork_cleanup_policy;
1104 /* copy all the process information */ 1096 /* copy all the process information */
1105 if ((retval = copy_semundo(clone_flags, p))) 1097 if ((retval = copy_semundo(clone_flags, p)))
1106 goto bad_fork_cleanup_audit; 1098 goto bad_fork_cleanup_audit;
@@ -1114,10 +1106,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1114 goto bad_fork_cleanup_sighand; 1106 goto bad_fork_cleanup_sighand;
1115 if ((retval = copy_mm(clone_flags, p))) 1107 if ((retval = copy_mm(clone_flags, p)))
1116 goto bad_fork_cleanup_signal; 1108 goto bad_fork_cleanup_signal;
1117 if ((retval = copy_keys(clone_flags, p)))
1118 goto bad_fork_cleanup_mm;
1119 if ((retval = copy_namespaces(clone_flags, p))) 1109 if ((retval = copy_namespaces(clone_flags, p)))
1120 goto bad_fork_cleanup_keys; 1110 goto bad_fork_cleanup_mm;
1121 if ((retval = copy_io(clone_flags, p))) 1111 if ((retval = copy_io(clone_flags, p)))
1122 goto bad_fork_cleanup_namespaces; 1112 goto bad_fork_cleanup_namespaces;
1123 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); 1113 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs);
@@ -1137,6 +1127,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1137 } 1127 }
1138 } 1128 }
1139 1129
1130 ftrace_graph_init_task(p);
1131
1140 p->pid = pid_nr(pid); 1132 p->pid = pid_nr(pid);
1141 p->tgid = p->pid; 1133 p->tgid = p->pid;
1142 if (clone_flags & CLONE_THREAD) 1134 if (clone_flags & CLONE_THREAD)
@@ -1145,7 +1137,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1145 if (current->nsproxy != p->nsproxy) { 1137 if (current->nsproxy != p->nsproxy) {
1146 retval = ns_cgroup_clone(p, pid); 1138 retval = ns_cgroup_clone(p, pid);
1147 if (retval) 1139 if (retval)
1148 goto bad_fork_free_pid; 1140 goto bad_fork_free_graph;
1149 } 1141 }
1150 1142
1151 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; 1143 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
@@ -1238,7 +1230,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1238 spin_unlock(&current->sighand->siglock); 1230 spin_unlock(&current->sighand->siglock);
1239 write_unlock_irq(&tasklist_lock); 1231 write_unlock_irq(&tasklist_lock);
1240 retval = -ERESTARTNOINTR; 1232 retval = -ERESTARTNOINTR;
1241 goto bad_fork_free_pid; 1233 goto bad_fork_free_graph;
1242 } 1234 }
1243 1235
1244 if (clone_flags & CLONE_THREAD) { 1236 if (clone_flags & CLONE_THREAD) {
@@ -1275,6 +1267,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1275 cgroup_post_fork(p); 1267 cgroup_post_fork(p);
1276 return p; 1268 return p;
1277 1269
1270bad_fork_free_graph:
1271 ftrace_graph_exit_task(p);
1278bad_fork_free_pid: 1272bad_fork_free_pid:
1279 if (pid != &init_struct_pid) 1273 if (pid != &init_struct_pid)
1280 free_pid(pid); 1274 free_pid(pid);
@@ -1282,8 +1276,6 @@ bad_fork_cleanup_io:
1282 put_io_context(p->io_context); 1276 put_io_context(p->io_context);
1283bad_fork_cleanup_namespaces: 1277bad_fork_cleanup_namespaces:
1284 exit_task_namespaces(p); 1278 exit_task_namespaces(p);
1285bad_fork_cleanup_keys:
1286 exit_keys(p);
1287bad_fork_cleanup_mm: 1279bad_fork_cleanup_mm:
1288 if (p->mm) 1280 if (p->mm)
1289 mmput(p->mm); 1281 mmput(p->mm);
@@ -1299,8 +1291,6 @@ bad_fork_cleanup_semundo:
1299 exit_sem(p); 1291 exit_sem(p);
1300bad_fork_cleanup_audit: 1292bad_fork_cleanup_audit:
1301 audit_free(p); 1293 audit_free(p);
1302bad_fork_cleanup_security:
1303 security_task_free(p);
1304bad_fork_cleanup_policy: 1294bad_fork_cleanup_policy:
1305#ifdef CONFIG_NUMA 1295#ifdef CONFIG_NUMA
1306 mpol_put(p->mempolicy); 1296 mpol_put(p->mempolicy);
@@ -1313,9 +1303,9 @@ bad_fork_cleanup_cgroup:
1313bad_fork_cleanup_put_domain: 1303bad_fork_cleanup_put_domain:
1314 module_put(task_thread_info(p)->exec_domain->module); 1304 module_put(task_thread_info(p)->exec_domain->module);
1315bad_fork_cleanup_count: 1305bad_fork_cleanup_count:
1316 put_group_info(p->group_info); 1306 atomic_dec(&p->cred->user->processes);
1317 atomic_dec(&p->user->processes); 1307 put_cred(p->real_cred);
1318 free_uid(p->user); 1308 put_cred(p->cred);
1319bad_fork_free: 1309bad_fork_free:
1320 free_task(p); 1310 free_task(p);
1321fork_out: 1311fork_out:
@@ -1359,6 +1349,21 @@ long do_fork(unsigned long clone_flags,
1359 long nr; 1349 long nr;
1360 1350
1361 /* 1351 /*
1352 * Do some preliminary argument and permissions checking before we
1353 * actually start allocating stuff
1354 */
1355 if (clone_flags & CLONE_NEWUSER) {
1356 if (clone_flags & CLONE_THREAD)
1357 return -EINVAL;
1358 /* hopefully this check will go away when userns support is
1359 * complete
1360 */
1361 if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
1362 !capable(CAP_SETGID))
1363 return -EPERM;
1364 }
1365
1366 /*
1362 * We hope to recycle these flags after 2.6.26 1367 * We hope to recycle these flags after 2.6.26
1363 */ 1368 */
1364 if (unlikely(clone_flags & CLONE_STOPPED)) { 1369 if (unlikely(clone_flags & CLONE_STOPPED)) {
@@ -1606,8 +1611,7 @@ asmlinkage long sys_unshare(unsigned long unshare_flags)
1606 err = -EINVAL; 1611 err = -EINVAL;
1607 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND| 1612 if (unshare_flags & ~(CLONE_THREAD|CLONE_FS|CLONE_NEWNS|CLONE_SIGHAND|
1608 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM| 1613 CLONE_VM|CLONE_FILES|CLONE_SYSVSEM|
1609 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWUSER| 1614 CLONE_NEWUTS|CLONE_NEWIPC|CLONE_NEWNET))
1610 CLONE_NEWNET))
1611 goto bad_unshare_out; 1615 goto bad_unshare_out;
1612 1616
1613 /* 1617 /*