diff options
author | Robert Richter <robert.richter@amd.com> | 2010-06-04 05:33:10 -0400 |
---|---|---|
committer | Robert Richter <robert.richter@amd.com> | 2010-06-04 05:33:10 -0400 |
commit | d8a382d2662822248a97ce9d670b90e68aefbd3a (patch) | |
tree | 4f5bbd5d0a5881ed42de611402ea4ac2c6d6ff48 /kernel/fork.c | |
parent | 45c34e05c4e3d36e7c44e790241ea11a1d90d54e (diff) | |
parent | c6df8d5ab87a246942d138321e1721edbb69f6e1 (diff) |
Merge remote branch 'tip/perf/urgent' into oprofile/urgent
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 55 |
1 files changed, 31 insertions, 24 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 44b0791b0a2e..b6cce14ba047 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -165,6 +165,18 @@ void free_task(struct task_struct *tsk) | |||
165 | } | 165 | } |
166 | EXPORT_SYMBOL(free_task); | 166 | EXPORT_SYMBOL(free_task); |
167 | 167 | ||
168 | static inline void free_signal_struct(struct signal_struct *sig) | ||
169 | { | ||
170 | taskstats_tgid_free(sig); | ||
171 | kmem_cache_free(signal_cachep, sig); | ||
172 | } | ||
173 | |||
174 | static inline void put_signal_struct(struct signal_struct *sig) | ||
175 | { | ||
176 | if (atomic_dec_and_test(&sig->sigcnt)) | ||
177 | free_signal_struct(sig); | ||
178 | } | ||
179 | |||
168 | void __put_task_struct(struct task_struct *tsk) | 180 | void __put_task_struct(struct task_struct *tsk) |
169 | { | 181 | { |
170 | WARN_ON(!tsk->exit_state); | 182 | WARN_ON(!tsk->exit_state); |
@@ -173,6 +185,7 @@ void __put_task_struct(struct task_struct *tsk) | |||
173 | 185 | ||
174 | exit_creds(tsk); | 186 | exit_creds(tsk); |
175 | delayacct_tsk_free(tsk); | 187 | delayacct_tsk_free(tsk); |
188 | put_signal_struct(tsk->signal); | ||
176 | 189 | ||
177 | if (!profile_handoff_task(tsk)) | 190 | if (!profile_handoff_task(tsk)) |
178 | free_task(tsk); | 191 | free_task(tsk); |
@@ -864,8 +877,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
864 | if (!sig) | 877 | if (!sig) |
865 | return -ENOMEM; | 878 | return -ENOMEM; |
866 | 879 | ||
867 | atomic_set(&sig->count, 1); | 880 | sig->nr_threads = 1; |
868 | atomic_set(&sig->live, 1); | 881 | atomic_set(&sig->live, 1); |
882 | atomic_set(&sig->sigcnt, 1); | ||
869 | init_waitqueue_head(&sig->wait_chldexit); | 883 | init_waitqueue_head(&sig->wait_chldexit); |
870 | if (clone_flags & CLONE_NEWPID) | 884 | if (clone_flags & CLONE_NEWPID) |
871 | sig->flags |= SIGNAL_UNKILLABLE; | 885 | sig->flags |= SIGNAL_UNKILLABLE; |
@@ -889,13 +903,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
889 | return 0; | 903 | return 0; |
890 | } | 904 | } |
891 | 905 | ||
892 | void __cleanup_signal(struct signal_struct *sig) | ||
893 | { | ||
894 | thread_group_cputime_free(sig); | ||
895 | tty_kref_put(sig->tty); | ||
896 | kmem_cache_free(signal_cachep, sig); | ||
897 | } | ||
898 | |||
899 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) | 906 | static void copy_flags(unsigned long clone_flags, struct task_struct *p) |
900 | { | 907 | { |
901 | unsigned long new_flags = p->flags; | 908 | unsigned long new_flags = p->flags; |
@@ -1112,10 +1119,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1112 | p->memcg_batch.memcg = NULL; | 1119 | p->memcg_batch.memcg = NULL; |
1113 | #endif | 1120 | #endif |
1114 | 1121 | ||
1115 | p->bts = NULL; | ||
1116 | |||
1117 | p->stack_start = stack_start; | ||
1118 | |||
1119 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1122 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
1120 | sched_fork(p, clone_flags); | 1123 | sched_fork(p, clone_flags); |
1121 | 1124 | ||
@@ -1249,8 +1252,9 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1249 | } | 1252 | } |
1250 | 1253 | ||
1251 | if (clone_flags & CLONE_THREAD) { | 1254 | if (clone_flags & CLONE_THREAD) { |
1252 | atomic_inc(¤t->signal->count); | 1255 | current->signal->nr_threads++; |
1253 | atomic_inc(¤t->signal->live); | 1256 | atomic_inc(¤t->signal->live); |
1257 | atomic_inc(¤t->signal->sigcnt); | ||
1254 | p->group_leader = current->group_leader; | 1258 | p->group_leader = current->group_leader; |
1255 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); | 1259 | list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); |
1256 | } | 1260 | } |
@@ -1263,7 +1267,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1263 | p->nsproxy->pid_ns->child_reaper = p; | 1267 | p->nsproxy->pid_ns->child_reaper = p; |
1264 | 1268 | ||
1265 | p->signal->leader_pid = pid; | 1269 | p->signal->leader_pid = pid; |
1266 | tty_kref_put(p->signal->tty); | ||
1267 | p->signal->tty = tty_kref_get(current->signal->tty); | 1270 | p->signal->tty = tty_kref_get(current->signal->tty); |
1268 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 1271 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
1269 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1272 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
@@ -1296,7 +1299,7 @@ bad_fork_cleanup_mm: | |||
1296 | mmput(p->mm); | 1299 | mmput(p->mm); |
1297 | bad_fork_cleanup_signal: | 1300 | bad_fork_cleanup_signal: |
1298 | if (!(clone_flags & CLONE_THREAD)) | 1301 | if (!(clone_flags & CLONE_THREAD)) |
1299 | __cleanup_signal(p->signal); | 1302 | free_signal_struct(p->signal); |
1300 | bad_fork_cleanup_sighand: | 1303 | bad_fork_cleanup_sighand: |
1301 | __cleanup_sighand(p->sighand); | 1304 | __cleanup_sighand(p->sighand); |
1302 | bad_fork_cleanup_fs: | 1305 | bad_fork_cleanup_fs: |
@@ -1331,6 +1334,16 @@ noinline struct pt_regs * __cpuinit __attribute__((weak)) idle_regs(struct pt_re | |||
1331 | return regs; | 1334 | return regs; |
1332 | } | 1335 | } |
1333 | 1336 | ||
1337 | static inline void init_idle_pids(struct pid_link *links) | ||
1338 | { | ||
1339 | enum pid_type type; | ||
1340 | |||
1341 | for (type = PIDTYPE_PID; type < PIDTYPE_MAX; ++type) { | ||
1342 | INIT_HLIST_NODE(&links[type].node); /* not really needed */ | ||
1343 | links[type].pid = &init_struct_pid; | ||
1344 | } | ||
1345 | } | ||
1346 | |||
1334 | struct task_struct * __cpuinit fork_idle(int cpu) | 1347 | struct task_struct * __cpuinit fork_idle(int cpu) |
1335 | { | 1348 | { |
1336 | struct task_struct *task; | 1349 | struct task_struct *task; |
@@ -1338,8 +1351,10 @@ struct task_struct * __cpuinit fork_idle(int cpu) | |||
1338 | 1351 | ||
1339 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, | 1352 | task = copy_process(CLONE_VM, 0, idle_regs(®s), 0, NULL, |
1340 | &init_struct_pid, 0); | 1353 | &init_struct_pid, 0); |
1341 | if (!IS_ERR(task)) | 1354 | if (!IS_ERR(task)) { |
1355 | init_idle_pids(task->pids); | ||
1342 | init_idle(task, cpu); | 1356 | init_idle(task, cpu); |
1357 | } | ||
1343 | 1358 | ||
1344 | return task; | 1359 | return task; |
1345 | } | 1360 | } |
@@ -1511,14 +1526,6 @@ static void check_unshare_flags(unsigned long *flags_ptr) | |||
1511 | *flags_ptr |= CLONE_SIGHAND; | 1526 | *flags_ptr |= CLONE_SIGHAND; |
1512 | 1527 | ||
1513 | /* | 1528 | /* |
1514 | * If unsharing signal handlers and the task was created | ||
1515 | * using CLONE_THREAD, then must unshare the thread | ||
1516 | */ | ||
1517 | if ((*flags_ptr & CLONE_SIGHAND) && | ||
1518 | (atomic_read(¤t->signal->count) > 1)) | ||
1519 | *flags_ptr |= CLONE_THREAD; | ||
1520 | |||
1521 | /* | ||
1522 | * If unsharing namespace, must also unshare filesystem information. | 1529 | * If unsharing namespace, must also unshare filesystem information. |
1523 | */ | 1530 | */ |
1524 | if (*flags_ptr & CLONE_NEWNS) | 1531 | if (*flags_ptr & CLONE_NEWNS) |