aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
committerLen Brown <len.brown@intel.com>2009-04-05 02:14:15 -0400
commit478c6a43fcbc6c11609f8cee7c7b57223907754f (patch)
treea7f7952099da60d33032aed6de9c0c56c9f8779e /kernel/fork.c
parent8a3f257c704e02aee9869decd069a806b45be3f1 (diff)
parent6bb597507f9839b13498781e481f5458aea33620 (diff)
Merge branch 'linus' into release
Conflicts: arch/x86/kernel/cpu/cpufreq/longhaul.c Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c77
1 files changed, 38 insertions, 39 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 4854c2c4a82e..660c2b8765bc 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -60,7 +60,9 @@
60#include <linux/tty.h> 60#include <linux/tty.h>
61#include <linux/proc_fs.h> 61#include <linux/proc_fs.h>
62#include <linux/blkdev.h> 62#include <linux/blkdev.h>
63#include <linux/fs_struct.h>
63#include <trace/sched.h> 64#include <trace/sched.h>
65#include <linux/magic.h>
64 66
65#include <asm/pgtable.h> 67#include <asm/pgtable.h>
66#include <asm/pgalloc.h> 68#include <asm/pgalloc.h>
@@ -212,6 +214,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
212{ 214{
213 struct task_struct *tsk; 215 struct task_struct *tsk;
214 struct thread_info *ti; 216 struct thread_info *ti;
217 unsigned long *stackend;
218
215 int err; 219 int err;
216 220
217 prepare_to_copy(orig); 221 prepare_to_copy(orig);
@@ -237,6 +241,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
237 goto out; 241 goto out;
238 242
239 setup_thread_stack(tsk, orig); 243 setup_thread_stack(tsk, orig);
244 stackend = end_of_stack(tsk);
245 *stackend = STACK_END_MAGIC; /* for overflow detection */
240 246
241#ifdef CONFIG_CC_STACKPROTECTOR 247#ifdef CONFIG_CC_STACKPROTECTOR
242 tsk->stack_canary = get_random_int(); 248 tsk->stack_canary = get_random_int();
@@ -279,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
279 mm->free_area_cache = oldmm->mmap_base; 285 mm->free_area_cache = oldmm->mmap_base;
280 mm->cached_hole_size = ~0UL; 286 mm->cached_hole_size = ~0UL;
281 mm->map_count = 0; 287 mm->map_count = 0;
282 cpus_clear(mm->cpu_vm_mask); 288 cpumask_clear(mm_cpumask(mm));
283 mm->mm_rb = RB_ROOT; 289 mm->mm_rb = RB_ROOT;
284 rb_link = &mm->mm_rb.rb_node; 290 rb_link = &mm->mm_rb.rb_node;
285 rb_parent = NULL; 291 rb_parent = NULL;
@@ -676,38 +682,21 @@ fail_nomem:
676 return retval; 682 return retval;
677} 683}
678 684
679static struct fs_struct *__copy_fs_struct(struct fs_struct *old)
680{
681 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
682 /* We don't need to lock fs - think why ;-) */
683 if (fs) {
684 atomic_set(&fs->count, 1);
685 rwlock_init(&fs->lock);
686 fs->umask = old->umask;
687 read_lock(&old->lock);
688 fs->root = old->root;
689 path_get(&old->root);
690 fs->pwd = old->pwd;
691 path_get(&old->pwd);
692 read_unlock(&old->lock);
693 }
694 return fs;
695}
696
697struct fs_struct *copy_fs_struct(struct fs_struct *old)
698{
699 return __copy_fs_struct(old);
700}
701
702EXPORT_SYMBOL_GPL(copy_fs_struct);
703
704static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) 685static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
705{ 686{
687 struct fs_struct *fs = current->fs;
706 if (clone_flags & CLONE_FS) { 688 if (clone_flags & CLONE_FS) {
707 atomic_inc(&current->fs->count); 689 /* tsk->fs is already what we want */
690 write_lock(&fs->lock);
691 if (fs->in_exec) {
692 write_unlock(&fs->lock);
693 return -EAGAIN;
694 }
695 fs->users++;
696 write_unlock(&fs->lock);
708 return 0; 697 return 0;
709 } 698 }
710 tsk->fs = __copy_fs_struct(current->fs); 699 tsk->fs = copy_fs_struct(fs);
711 if (!tsk->fs) 700 if (!tsk->fs)
712 return -ENOMEM; 701 return -ENOMEM;
713 return 0; 702 return 0;
@@ -836,6 +825,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
836 atomic_set(&sig->live, 1); 825 atomic_set(&sig->live, 1);
837 init_waitqueue_head(&sig->wait_chldexit); 826 init_waitqueue_head(&sig->wait_chldexit);
838 sig->flags = 0; 827 sig->flags = 0;
828 if (clone_flags & CLONE_NEWPID)
829 sig->flags |= SIGNAL_UNKILLABLE;
839 sig->group_exit_code = 0; 830 sig->group_exit_code = 0;
840 sig->group_exit_task = NULL; 831 sig->group_exit_task = NULL;
841 sig->group_stop_count = 0; 832 sig->group_stop_count = 0;
@@ -1120,7 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1120 goto bad_fork_cleanup_mm; 1111 goto bad_fork_cleanup_mm;
1121 if ((retval = copy_io(clone_flags, p))) 1112 if ((retval = copy_io(clone_flags, p)))
1122 goto bad_fork_cleanup_namespaces; 1113 goto bad_fork_cleanup_namespaces;
1123 retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); 1114 retval = copy_thread(clone_flags, stack_start, stack_size, p, regs);
1124 if (retval) 1115 if (retval)
1125 goto bad_fork_cleanup_io; 1116 goto bad_fork_cleanup_io;
1126 1117
@@ -1258,8 +1249,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1258 p->signal->leader_pid = pid; 1249 p->signal->leader_pid = pid;
1259 tty_kref_put(p->signal->tty); 1250 tty_kref_put(p->signal->tty);
1260 p->signal->tty = tty_kref_get(current->signal->tty); 1251 p->signal->tty = tty_kref_get(current->signal->tty);
1261 set_task_pgrp(p, task_pgrp_nr(current));
1262 set_task_session(p, task_session_nr(current));
1263 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1252 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
1264 attach_pid(p, PIDTYPE_SID, task_session(current)); 1253 attach_pid(p, PIDTYPE_SID, task_session(current));
1265 list_add_tail_rcu(&p->tasks, &init_task.tasks); 1254 list_add_tail_rcu(&p->tasks, &init_task.tasks);
@@ -1483,6 +1472,7 @@ void __init proc_caches_init(void)
1483 mm_cachep = kmem_cache_create("mm_struct", 1472 mm_cachep = kmem_cache_create("mm_struct",
1484 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, 1473 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
1485 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 1474 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
1475 vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
1486 mmap_init(); 1476 mmap_init();
1487} 1477}
1488 1478
@@ -1538,12 +1528,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
1538{ 1528{
1539 struct fs_struct *fs = current->fs; 1529 struct fs_struct *fs = current->fs;
1540 1530
1541 if ((unshare_flags & CLONE_FS) && 1531 if (!(unshare_flags & CLONE_FS) || !fs)
1542 (fs && atomic_read(&fs->count) > 1)) { 1532 return 0;
1543 *new_fsp = __copy_fs_struct(current->fs); 1533
1544 if (!*new_fsp) 1534 /* don't need lock here; in the worst case we'll do useless copy */
1545 return -ENOMEM; 1535 if (fs->users == 1)
1546 } 1536 return 0;
1537
1538 *new_fsp = copy_fs_struct(fs);
1539 if (!*new_fsp)
1540 return -ENOMEM;
1547 1541
1548 return 0; 1542 return 0;
1549} 1543}
@@ -1659,8 +1653,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1659 1653
1660 if (new_fs) { 1654 if (new_fs) {
1661 fs = current->fs; 1655 fs = current->fs;
1656 write_lock(&fs->lock);
1662 current->fs = new_fs; 1657 current->fs = new_fs;
1663 new_fs = fs; 1658 if (--fs->users)
1659 new_fs = NULL;
1660 else
1661 new_fs = fs;
1662 write_unlock(&fs->lock);
1664 } 1663 }
1665 1664
1666 if (new_mm) { 1665 if (new_mm) {
@@ -1699,7 +1698,7 @@ bad_unshare_cleanup_sigh:
1699 1698
1700bad_unshare_cleanup_fs: 1699bad_unshare_cleanup_fs:
1701 if (new_fs) 1700 if (new_fs)
1702 put_fs_struct(new_fs); 1701 free_fs_struct(new_fs);
1703 1702
1704bad_unshare_cleanup_thread: 1703bad_unshare_cleanup_thread:
1705bad_unshare_out: 1704bad_unshare_out: