diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 72 |
1 files changed, 33 insertions, 39 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 6715ebc3761d..660c2b8765bc 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -60,6 +60,7 @@ | |||
| 60 | #include <linux/tty.h> | 60 | #include <linux/tty.h> |
| 61 | #include <linux/proc_fs.h> | 61 | #include <linux/proc_fs.h> |
| 62 | #include <linux/blkdev.h> | 62 | #include <linux/blkdev.h> |
| 63 | #include <linux/fs_struct.h> | ||
| 63 | #include <trace/sched.h> | 64 | #include <trace/sched.h> |
| 64 | #include <linux/magic.h> | 65 | #include <linux/magic.h> |
| 65 | 66 | ||
| @@ -284,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 284 | mm->free_area_cache = oldmm->mmap_base; | 285 | mm->free_area_cache = oldmm->mmap_base; |
| 285 | mm->cached_hole_size = ~0UL; | 286 | mm->cached_hole_size = ~0UL; |
| 286 | mm->map_count = 0; | 287 | mm->map_count = 0; |
| 287 | cpus_clear(mm->cpu_vm_mask); | 288 | cpumask_clear(mm_cpumask(mm)); |
| 288 | mm->mm_rb = RB_ROOT; | 289 | mm->mm_rb = RB_ROOT; |
| 289 | rb_link = &mm->mm_rb.rb_node; | 290 | rb_link = &mm->mm_rb.rb_node; |
| 290 | rb_parent = NULL; | 291 | rb_parent = NULL; |
| @@ -681,38 +682,21 @@ fail_nomem: | |||
| 681 | return retval; | 682 | return retval; |
| 682 | } | 683 | } |
| 683 | 684 | ||
| 684 | static struct fs_struct *__copy_fs_struct(struct fs_struct *old) | ||
| 685 | { | ||
| 686 | struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); | ||
| 687 | /* We don't need to lock fs - think why ;-) */ | ||
| 688 | if (fs) { | ||
| 689 | atomic_set(&fs->count, 1); | ||
| 690 | rwlock_init(&fs->lock); | ||
| 691 | fs->umask = old->umask; | ||
| 692 | read_lock(&old->lock); | ||
| 693 | fs->root = old->root; | ||
| 694 | path_get(&old->root); | ||
| 695 | fs->pwd = old->pwd; | ||
| 696 | path_get(&old->pwd); | ||
| 697 | read_unlock(&old->lock); | ||
| 698 | } | ||
| 699 | return fs; | ||
| 700 | } | ||
| 701 | |||
| 702 | struct fs_struct *copy_fs_struct(struct fs_struct *old) | ||
| 703 | { | ||
| 704 | return __copy_fs_struct(old); | ||
| 705 | } | ||
| 706 | |||
| 707 | EXPORT_SYMBOL_GPL(copy_fs_struct); | ||
| 708 | |||
| 709 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | 685 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
| 710 | { | 686 | { |
| 687 | struct fs_struct *fs = current->fs; | ||
| 711 | if (clone_flags & CLONE_FS) { | 688 | if (clone_flags & CLONE_FS) { |
| 712 | atomic_inc(¤t->fs->count); | 689 | /* tsk->fs is already what we want */ |
| 690 | write_lock(&fs->lock); | ||
| 691 | if (fs->in_exec) { | ||
| 692 | write_unlock(&fs->lock); | ||
| 693 | return -EAGAIN; | ||
| 694 | } | ||
| 695 | fs->users++; | ||
| 696 | write_unlock(&fs->lock); | ||
| 713 | return 0; | 697 | return 0; |
| 714 | } | 698 | } |
| 715 | tsk->fs = __copy_fs_struct(current->fs); | 699 | tsk->fs = copy_fs_struct(fs); |
| 716 | if (!tsk->fs) | 700 | if (!tsk->fs) |
| 717 | return -ENOMEM; | 701 | return -ENOMEM; |
| 718 | return 0; | 702 | return 0; |
| @@ -841,6 +825,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 841 | atomic_set(&sig->live, 1); | 825 | atomic_set(&sig->live, 1); |
| 842 | init_waitqueue_head(&sig->wait_chldexit); | 826 | init_waitqueue_head(&sig->wait_chldexit); |
| 843 | sig->flags = 0; | 827 | sig->flags = 0; |
| 828 | if (clone_flags & CLONE_NEWPID) | ||
| 829 | sig->flags |= SIGNAL_UNKILLABLE; | ||
| 844 | sig->group_exit_code = 0; | 830 | sig->group_exit_code = 0; |
| 845 | sig->group_exit_task = NULL; | 831 | sig->group_exit_task = NULL; |
| 846 | sig->group_stop_count = 0; | 832 | sig->group_stop_count = 0; |
| @@ -1125,7 +1111,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1125 | goto bad_fork_cleanup_mm; | 1111 | goto bad_fork_cleanup_mm; |
| 1126 | if ((retval = copy_io(clone_flags, p))) | 1112 | if ((retval = copy_io(clone_flags, p))) |
| 1127 | goto bad_fork_cleanup_namespaces; | 1113 | goto bad_fork_cleanup_namespaces; |
| 1128 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); | 1114 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); |
| 1129 | if (retval) | 1115 | if (retval) |
| 1130 | goto bad_fork_cleanup_io; | 1116 | goto bad_fork_cleanup_io; |
| 1131 | 1117 | ||
| @@ -1263,8 +1249,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1263 | p->signal->leader_pid = pid; | 1249 | p->signal->leader_pid = pid; |
| 1264 | tty_kref_put(p->signal->tty); | 1250 | tty_kref_put(p->signal->tty); |
| 1265 | p->signal->tty = tty_kref_get(current->signal->tty); | 1251 | p->signal->tty = tty_kref_get(current->signal->tty); |
| 1266 | set_task_pgrp(p, task_pgrp_nr(current)); | ||
| 1267 | set_task_session(p, task_session_nr(current)); | ||
| 1268 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 1252 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
| 1269 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1253 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
| 1270 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1254 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
| @@ -1488,6 +1472,7 @@ void __init proc_caches_init(void) | |||
| 1488 | mm_cachep = kmem_cache_create("mm_struct", | 1472 | mm_cachep = kmem_cache_create("mm_struct", |
| 1489 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1473 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
| 1490 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1474 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
| 1475 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); | ||
| 1491 | mmap_init(); | 1476 | mmap_init(); |
| 1492 | } | 1477 | } |
| 1493 | 1478 | ||
| @@ -1543,12 +1528,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) | |||
| 1543 | { | 1528 | { |
| 1544 | struct fs_struct *fs = current->fs; | 1529 | struct fs_struct *fs = current->fs; |
| 1545 | 1530 | ||
| 1546 | if ((unshare_flags & CLONE_FS) && | 1531 | if (!(unshare_flags & CLONE_FS) || !fs) |
| 1547 | (fs && atomic_read(&fs->count) > 1)) { | 1532 | return 0; |
| 1548 | *new_fsp = __copy_fs_struct(current->fs); | 1533 | |
| 1549 | if (!*new_fsp) | 1534 | /* don't need lock here; in the worst case we'll do useless copy */ |
| 1550 | return -ENOMEM; | 1535 | if (fs->users == 1) |
| 1551 | } | 1536 | return 0; |
| 1537 | |||
| 1538 | *new_fsp = copy_fs_struct(fs); | ||
| 1539 | if (!*new_fsp) | ||
| 1540 | return -ENOMEM; | ||
| 1552 | 1541 | ||
| 1553 | return 0; | 1542 | return 0; |
| 1554 | } | 1543 | } |
| @@ -1664,8 +1653,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
| 1664 | 1653 | ||
| 1665 | if (new_fs) { | 1654 | if (new_fs) { |
| 1666 | fs = current->fs; | 1655 | fs = current->fs; |
| 1656 | write_lock(&fs->lock); | ||
| 1667 | current->fs = new_fs; | 1657 | current->fs = new_fs; |
| 1668 | new_fs = fs; | 1658 | if (--fs->users) |
| 1659 | new_fs = NULL; | ||
| 1660 | else | ||
| 1661 | new_fs = fs; | ||
| 1662 | write_unlock(&fs->lock); | ||
| 1669 | } | 1663 | } |
| 1670 | 1664 | ||
| 1671 | if (new_mm) { | 1665 | if (new_mm) { |
| @@ -1704,7 +1698,7 @@ bad_unshare_cleanup_sigh: | |||
| 1704 | 1698 | ||
| 1705 | bad_unshare_cleanup_fs: | 1699 | bad_unshare_cleanup_fs: |
| 1706 | if (new_fs) | 1700 | if (new_fs) |
| 1707 | put_fs_struct(new_fs); | 1701 | free_fs_struct(new_fs); |
| 1708 | 1702 | ||
| 1709 | bad_unshare_cleanup_thread: | 1703 | bad_unshare_cleanup_thread: |
| 1710 | bad_unshare_out: | 1704 | bad_unshare_out: |
