aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c61
1 files changed, 32 insertions, 29 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 021e1138556e..1020977b57ca 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -49,6 +49,7 @@
49#include <linux/ftrace.h> 49#include <linux/ftrace.h>
50#include <linux/profile.h> 50#include <linux/profile.h>
51#include <linux/rmap.h> 51#include <linux/rmap.h>
52#include <linux/ksm.h>
52#include <linux/acct.h> 53#include <linux/acct.h>
53#include <linux/tsacct_kern.h> 54#include <linux/tsacct_kern.h>
54#include <linux/cn_proc.h> 55#include <linux/cn_proc.h>
@@ -61,7 +62,7 @@
61#include <linux/blkdev.h> 62#include <linux/blkdev.h>
62#include <linux/fs_struct.h> 63#include <linux/fs_struct.h>
63#include <linux/magic.h> 64#include <linux/magic.h>
64#include <linux/perf_counter.h> 65#include <linux/perf_event.h>
65 66
66#include <asm/pgtable.h> 67#include <asm/pgtable.h>
67#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
@@ -136,9 +137,17 @@ struct kmem_cache *vm_area_cachep;
136/* SLAB cache for mm_struct structures (tsk->mm) */ 137/* SLAB cache for mm_struct structures (tsk->mm) */
137static struct kmem_cache *mm_cachep; 138static struct kmem_cache *mm_cachep;
138 139
140static void account_kernel_stack(struct thread_info *ti, int account)
141{
142 struct zone *zone = page_zone(virt_to_page(ti));
143
144 mod_zone_page_state(zone, NR_KERNEL_STACK, account);
145}
146
139void free_task(struct task_struct *tsk) 147void free_task(struct task_struct *tsk)
140{ 148{
141 prop_local_destroy_single(&tsk->dirties); 149 prop_local_destroy_single(&tsk->dirties);
150 account_kernel_stack(tsk->stack, -1);
142 free_thread_info(tsk->stack); 151 free_thread_info(tsk->stack);
143 rt_mutex_debug_task_free(tsk); 152 rt_mutex_debug_task_free(tsk);
144 ftrace_graph_exit_task(tsk); 153 ftrace_graph_exit_task(tsk);
@@ -152,8 +161,7 @@ void __put_task_struct(struct task_struct *tsk)
152 WARN_ON(atomic_read(&tsk->usage)); 161 WARN_ON(atomic_read(&tsk->usage));
153 WARN_ON(tsk == current); 162 WARN_ON(tsk == current);
154 163
155 put_cred(tsk->real_cred); 164 exit_creds(tsk);
156 put_cred(tsk->cred);
157 delayacct_tsk_free(tsk); 165 delayacct_tsk_free(tsk);
158 166
159 if (!profile_handoff_task(tsk)) 167 if (!profile_handoff_task(tsk))
@@ -254,6 +262,9 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
254 tsk->btrace_seq = 0; 262 tsk->btrace_seq = 0;
255#endif 263#endif
256 tsk->splice_pipe = NULL; 264 tsk->splice_pipe = NULL;
265
266 account_kernel_stack(ti, 1);
267
257 return tsk; 268 return tsk;
258 269
259out: 270out:
@@ -289,6 +300,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
289 rb_link = &mm->mm_rb.rb_node; 300 rb_link = &mm->mm_rb.rb_node;
290 rb_parent = NULL; 301 rb_parent = NULL;
291 pprev = &mm->mmap; 302 pprev = &mm->mmap;
303 retval = ksm_fork(mm, oldmm);
304 if (retval)
305 goto out;
292 306
293 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 307 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
294 struct file *file; 308 struct file *file;
@@ -425,8 +439,8 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
425 atomic_set(&mm->mm_count, 1); 439 atomic_set(&mm->mm_count, 1);
426 init_rwsem(&mm->mmap_sem); 440 init_rwsem(&mm->mmap_sem);
427 INIT_LIST_HEAD(&mm->mmlist); 441 INIT_LIST_HEAD(&mm->mmlist);
428 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; 442 mm->flags = (current->mm) ?
429 mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0; 443 (current->mm->flags & MMF_INIT_MASK) : default_dump_filter;
430 mm->core_state = NULL; 444 mm->core_state = NULL;
431 mm->nr_ptes = 0; 445 mm->nr_ptes = 0;
432 set_mm_counter(mm, file_rss, 0); 446 set_mm_counter(mm, file_rss, 0);
@@ -487,6 +501,7 @@ void mmput(struct mm_struct *mm)
487 501
488 if (atomic_dec_and_test(&mm->mm_users)) { 502 if (atomic_dec_and_test(&mm->mm_users)) {
489 exit_aio(mm); 503 exit_aio(mm);
504 ksm_exit(mm);
490 exit_mmap(mm); 505 exit_mmap(mm);
491 set_mm_exe_file(mm, NULL); 506 set_mm_exe_file(mm, NULL);
492 if (!list_empty(&mm->mmlist)) { 507 if (!list_empty(&mm->mmlist)) {
@@ -816,11 +831,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
816{ 831{
817 struct signal_struct *sig; 832 struct signal_struct *sig;
818 833
819 if (clone_flags & CLONE_THREAD) { 834 if (clone_flags & CLONE_THREAD)
820 atomic_inc(&current->signal->count);
821 atomic_inc(&current->signal->live);
822 return 0; 835 return 0;
823 }
824 836
825 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 837 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
826 tsk->signal = sig; 838 tsk->signal = sig;
@@ -868,6 +880,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
868 880
869 tty_audit_fork(sig); 881 tty_audit_fork(sig);
870 882
883 sig->oom_adj = current->signal->oom_adj;
884
871 return 0; 885 return 0;
872} 886}
873 887
@@ -878,16 +892,6 @@ void __cleanup_signal(struct signal_struct *sig)
878 kmem_cache_free(signal_cachep, sig); 892 kmem_cache_free(signal_cachep, sig);
879} 893}
880 894
881static void cleanup_signal(struct task_struct *tsk)
882{
883 struct signal_struct *sig = tsk->signal;
884
885 atomic_dec(&sig->live);
886
887 if (atomic_dec_and_test(&sig->count))
888 __cleanup_signal(sig);
889}
890
891static void copy_flags(unsigned long clone_flags, struct task_struct *p) 895static void copy_flags(unsigned long clone_flags, struct task_struct *p)
892{ 896{
893 unsigned long new_flags = p->flags; 897 unsigned long new_flags = p->flags;
@@ -1022,10 +1026,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1022 copy_flags(clone_flags, p); 1026 copy_flags(clone_flags, p);
1023 INIT_LIST_HEAD(&p->children); 1027 INIT_LIST_HEAD(&p->children);
1024 INIT_LIST_HEAD(&p->sibling); 1028 INIT_LIST_HEAD(&p->sibling);
1025#ifdef CONFIG_PREEMPT_RCU 1029 rcu_copy_process(p);
1026 p->rcu_read_lock_nesting = 0;
1027 p->rcu_flipctr_idx = 0;
1028#endif /* #ifdef CONFIG_PREEMPT_RCU */
1029 p->vfork_done = NULL; 1030 p->vfork_done = NULL;
1030 spin_lock_init(&p->alloc_lock); 1031 spin_lock_init(&p->alloc_lock);
1031 1032
@@ -1096,7 +1097,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1096 /* Perform scheduler related setup. Assign this task to a CPU. */ 1097 /* Perform scheduler related setup. Assign this task to a CPU. */
1097 sched_fork(p, clone_flags); 1098 sched_fork(p, clone_flags);
1098 1099
1099 retval = perf_counter_init_task(p); 1100 retval = perf_event_init_task(p);
1100 if (retval) 1101 if (retval)
1101 goto bad_fork_cleanup_policy; 1102 goto bad_fork_cleanup_policy;
1102 1103
@@ -1240,6 +1241,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1240 } 1241 }
1241 1242
1242 if (clone_flags & CLONE_THREAD) { 1243 if (clone_flags & CLONE_THREAD) {
1244 atomic_inc(&current->signal->count);
1245 atomic_inc(&current->signal->live);
1243 p->group_leader = current->group_leader; 1246 p->group_leader = current->group_leader;
1244 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1247 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1245 } 1248 }
@@ -1269,7 +1272,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1269 write_unlock_irq(&tasklist_lock); 1272 write_unlock_irq(&tasklist_lock);
1270 proc_fork_connector(p); 1273 proc_fork_connector(p);
1271 cgroup_post_fork(p); 1274 cgroup_post_fork(p);
1272 perf_counter_fork(p); 1275 perf_event_fork(p);
1273 return p; 1276 return p;
1274 1277
1275bad_fork_free_pid: 1278bad_fork_free_pid:
@@ -1283,7 +1286,8 @@ bad_fork_cleanup_mm:
1283 if (p->mm) 1286 if (p->mm)
1284 mmput(p->mm); 1287 mmput(p->mm);
1285bad_fork_cleanup_signal: 1288bad_fork_cleanup_signal:
1286 cleanup_signal(p); 1289 if (!(clone_flags & CLONE_THREAD))
1290 __cleanup_signal(p->signal);
1287bad_fork_cleanup_sighand: 1291bad_fork_cleanup_sighand:
1288 __cleanup_sighand(p->sighand); 1292 __cleanup_sighand(p->sighand);
1289bad_fork_cleanup_fs: 1293bad_fork_cleanup_fs:
@@ -1295,7 +1299,7 @@ bad_fork_cleanup_semundo:
1295bad_fork_cleanup_audit: 1299bad_fork_cleanup_audit:
1296 audit_free(p); 1300 audit_free(p);
1297bad_fork_cleanup_policy: 1301bad_fork_cleanup_policy:
1298 perf_counter_free_task(p); 1302 perf_event_free_task(p);
1299#ifdef CONFIG_NUMA 1303#ifdef CONFIG_NUMA
1300 mpol_put(p->mempolicy); 1304 mpol_put(p->mempolicy);
1301bad_fork_cleanup_cgroup: 1305bad_fork_cleanup_cgroup:
@@ -1308,8 +1312,7 @@ bad_fork_cleanup_put_domain:
1308 module_put(task_thread_info(p)->exec_domain->module); 1312 module_put(task_thread_info(p)->exec_domain->module);
1309bad_fork_cleanup_count: 1313bad_fork_cleanup_count:
1310 atomic_dec(&p->cred->user->processes); 1314 atomic_dec(&p->cred->user->processes);
1311 put_cred(p->real_cred); 1315 exit_creds(p);
1312 put_cred(p->cred);
1313bad_fork_free: 1316bad_fork_free:
1314 free_task(p); 1317 free_task(p);
1315fork_out: 1318fork_out: