diff options
Diffstat (limited to 'kernel/fork.c')
-rw-r--r-- | kernel/fork.c | 101 |
1 files changed, 49 insertions, 52 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 6d5dbb7a13e2..989c7c202b3d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -60,7 +60,9 @@ | |||
60 | #include <linux/tty.h> | 60 | #include <linux/tty.h> |
61 | #include <linux/proc_fs.h> | 61 | #include <linux/proc_fs.h> |
62 | #include <linux/blkdev.h> | 62 | #include <linux/blkdev.h> |
63 | #include <linux/fs_struct.h> | ||
63 | #include <trace/sched.h> | 64 | #include <trace/sched.h> |
65 | #include <linux/magic.h> | ||
64 | 66 | ||
65 | #include <asm/pgtable.h> | 67 | #include <asm/pgtable.h> |
66 | #include <asm/pgalloc.h> | 68 | #include <asm/pgalloc.h> |
@@ -212,6 +214,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
212 | { | 214 | { |
213 | struct task_struct *tsk; | 215 | struct task_struct *tsk; |
214 | struct thread_info *ti; | 216 | struct thread_info *ti; |
217 | unsigned long *stackend; | ||
218 | |||
215 | int err; | 219 | int err; |
216 | 220 | ||
217 | prepare_to_copy(orig); | 221 | prepare_to_copy(orig); |
@@ -237,6 +241,8 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) | |||
237 | goto out; | 241 | goto out; |
238 | 242 | ||
239 | setup_thread_stack(tsk, orig); | 243 | setup_thread_stack(tsk, orig); |
244 | stackend = end_of_stack(tsk); | ||
245 | *stackend = STACK_END_MAGIC; /* for overflow detection */ | ||
240 | 246 | ||
241 | #ifdef CONFIG_CC_STACKPROTECTOR | 247 | #ifdef CONFIG_CC_STACKPROTECTOR |
242 | tsk->stack_canary = get_random_int(); | 248 | tsk->stack_canary = get_random_int(); |
@@ -279,7 +285,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
279 | mm->free_area_cache = oldmm->mmap_base; | 285 | mm->free_area_cache = oldmm->mmap_base; |
280 | mm->cached_hole_size = ~0UL; | 286 | mm->cached_hole_size = ~0UL; |
281 | mm->map_count = 0; | 287 | mm->map_count = 0; |
282 | cpus_clear(mm->cpu_vm_mask); | 288 | cpumask_clear(mm_cpumask(mm)); |
283 | mm->mm_rb = RB_ROOT; | 289 | mm->mm_rb = RB_ROOT; |
284 | rb_link = &mm->mm_rb.rb_node; | 290 | rb_link = &mm->mm_rb.rb_node; |
285 | rb_parent = NULL; | 291 | rb_parent = NULL; |
@@ -639,6 +645,9 @@ static int copy_mm(unsigned long clone_flags, struct task_struct * tsk) | |||
639 | 645 | ||
640 | tsk->min_flt = tsk->maj_flt = 0; | 646 | tsk->min_flt = tsk->maj_flt = 0; |
641 | tsk->nvcsw = tsk->nivcsw = 0; | 647 | tsk->nvcsw = tsk->nivcsw = 0; |
648 | #ifdef CONFIG_DETECT_HUNG_TASK | ||
649 | tsk->last_switch_count = tsk->nvcsw + tsk->nivcsw; | ||
650 | #endif | ||
642 | 651 | ||
643 | tsk->mm = NULL; | 652 | tsk->mm = NULL; |
644 | tsk->active_mm = NULL; | 653 | tsk->active_mm = NULL; |
@@ -676,38 +685,21 @@ fail_nomem: | |||
676 | return retval; | 685 | return retval; |
677 | } | 686 | } |
678 | 687 | ||
679 | static struct fs_struct *__copy_fs_struct(struct fs_struct *old) | ||
680 | { | ||
681 | struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); | ||
682 | /* We don't need to lock fs - think why ;-) */ | ||
683 | if (fs) { | ||
684 | atomic_set(&fs->count, 1); | ||
685 | rwlock_init(&fs->lock); | ||
686 | fs->umask = old->umask; | ||
687 | read_lock(&old->lock); | ||
688 | fs->root = old->root; | ||
689 | path_get(&old->root); | ||
690 | fs->pwd = old->pwd; | ||
691 | path_get(&old->pwd); | ||
692 | read_unlock(&old->lock); | ||
693 | } | ||
694 | return fs; | ||
695 | } | ||
696 | |||
697 | struct fs_struct *copy_fs_struct(struct fs_struct *old) | ||
698 | { | ||
699 | return __copy_fs_struct(old); | ||
700 | } | ||
701 | |||
702 | EXPORT_SYMBOL_GPL(copy_fs_struct); | ||
703 | |||
704 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) | 688 | static int copy_fs(unsigned long clone_flags, struct task_struct *tsk) |
705 | { | 689 | { |
690 | struct fs_struct *fs = current->fs; | ||
706 | if (clone_flags & CLONE_FS) { | 691 | if (clone_flags & CLONE_FS) { |
707 | atomic_inc(¤t->fs->count); | 692 | /* tsk->fs is already what we want */ |
693 | write_lock(&fs->lock); | ||
694 | if (fs->in_exec) { | ||
695 | write_unlock(&fs->lock); | ||
696 | return -EAGAIN; | ||
697 | } | ||
698 | fs->users++; | ||
699 | write_unlock(&fs->lock); | ||
708 | return 0; | 700 | return 0; |
709 | } | 701 | } |
710 | tsk->fs = __copy_fs_struct(current->fs); | 702 | tsk->fs = copy_fs_struct(fs); |
711 | if (!tsk->fs) | 703 | if (!tsk->fs) |
712 | return -ENOMEM; | 704 | return -ENOMEM; |
713 | return 0; | 705 | return 0; |
@@ -836,6 +828,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
836 | atomic_set(&sig->live, 1); | 828 | atomic_set(&sig->live, 1); |
837 | init_waitqueue_head(&sig->wait_chldexit); | 829 | init_waitqueue_head(&sig->wait_chldexit); |
838 | sig->flags = 0; | 830 | sig->flags = 0; |
831 | if (clone_flags & CLONE_NEWPID) | ||
832 | sig->flags |= SIGNAL_UNKILLABLE; | ||
839 | sig->group_exit_code = 0; | 833 | sig->group_exit_code = 0; |
840 | sig->group_exit_task = NULL; | 834 | sig->group_exit_task = NULL; |
841 | sig->group_stop_count = 0; | 835 | sig->group_stop_count = 0; |
@@ -851,13 +845,14 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
851 | sig->tty_old_pgrp = NULL; | 845 | sig->tty_old_pgrp = NULL; |
852 | sig->tty = NULL; | 846 | sig->tty = NULL; |
853 | 847 | ||
854 | sig->cutime = sig->cstime = cputime_zero; | 848 | sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; |
855 | sig->gtime = cputime_zero; | 849 | sig->gtime = cputime_zero; |
856 | sig->cgtime = cputime_zero; | 850 | sig->cgtime = cputime_zero; |
857 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; | 851 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; |
858 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; | 852 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; |
859 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; | 853 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; |
860 | task_io_accounting_init(&sig->ioac); | 854 | task_io_accounting_init(&sig->ioac); |
855 | sig->sum_sched_runtime = 0; | ||
861 | taskstats_tgid_init(sig); | 856 | taskstats_tgid_init(sig); |
862 | 857 | ||
863 | task_lock(current->group_leader); | 858 | task_lock(current->group_leader); |
@@ -1040,11 +1035,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1040 | 1035 | ||
1041 | p->default_timer_slack_ns = current->timer_slack_ns; | 1036 | p->default_timer_slack_ns = current->timer_slack_ns; |
1042 | 1037 | ||
1043 | #ifdef CONFIG_DETECT_SOFTLOCKUP | ||
1044 | p->last_switch_count = 0; | ||
1045 | p->last_switch_timestamp = 0; | ||
1046 | #endif | ||
1047 | |||
1048 | task_io_accounting_init(&p->ioac); | 1038 | task_io_accounting_init(&p->ioac); |
1049 | acct_clear_integrals(p); | 1039 | acct_clear_integrals(p); |
1050 | 1040 | ||
@@ -1094,7 +1084,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1094 | #ifdef CONFIG_DEBUG_MUTEXES | 1084 | #ifdef CONFIG_DEBUG_MUTEXES |
1095 | p->blocked_on = NULL; /* not blocked yet */ | 1085 | p->blocked_on = NULL; /* not blocked yet */ |
1096 | #endif | 1086 | #endif |
1097 | if (unlikely(ptrace_reparented(current))) | 1087 | if (unlikely(current->ptrace)) |
1098 | ptrace_fork(p, clone_flags); | 1088 | ptrace_fork(p, clone_flags); |
1099 | 1089 | ||
1100 | /* Perform scheduler related setup. Assign this task to a CPU. */ | 1090 | /* Perform scheduler related setup. Assign this task to a CPU. */ |
@@ -1119,7 +1109,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1119 | goto bad_fork_cleanup_mm; | 1109 | goto bad_fork_cleanup_mm; |
1120 | if ((retval = copy_io(clone_flags, p))) | 1110 | if ((retval = copy_io(clone_flags, p))) |
1121 | goto bad_fork_cleanup_namespaces; | 1111 | goto bad_fork_cleanup_namespaces; |
1122 | retval = copy_thread(0, clone_flags, stack_start, stack_size, p, regs); | 1112 | retval = copy_thread(clone_flags, stack_start, stack_size, p, regs); |
1123 | if (retval) | 1113 | if (retval) |
1124 | goto bad_fork_cleanup_io; | 1114 | goto bad_fork_cleanup_io; |
1125 | 1115 | ||
@@ -1178,10 +1168,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1178 | #endif | 1168 | #endif |
1179 | clear_all_latency_tracing(p); | 1169 | clear_all_latency_tracing(p); |
1180 | 1170 | ||
1181 | /* Our parent execution domain becomes current domain | ||
1182 | These must match for thread signalling to apply */ | ||
1183 | p->parent_exec_id = p->self_exec_id; | ||
1184 | |||
1185 | /* ok, now we should be set up.. */ | 1171 | /* ok, now we should be set up.. */ |
1186 | p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); | 1172 | p->exit_signal = (clone_flags & CLONE_THREAD) ? -1 : (clone_flags & CSIGNAL); |
1187 | p->pdeath_signal = 0; | 1173 | p->pdeath_signal = 0; |
@@ -1219,10 +1205,13 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1219 | set_task_cpu(p, smp_processor_id()); | 1205 | set_task_cpu(p, smp_processor_id()); |
1220 | 1206 | ||
1221 | /* CLONE_PARENT re-uses the old parent */ | 1207 | /* CLONE_PARENT re-uses the old parent */ |
1222 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) | 1208 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
1223 | p->real_parent = current->real_parent; | 1209 | p->real_parent = current->real_parent; |
1224 | else | 1210 | p->parent_exec_id = current->parent_exec_id; |
1211 | } else { | ||
1225 | p->real_parent = current; | 1212 | p->real_parent = current; |
1213 | p->parent_exec_id = current->self_exec_id; | ||
1214 | } | ||
1226 | 1215 | ||
1227 | spin_lock(¤t->sighand->siglock); | 1216 | spin_lock(¤t->sighand->siglock); |
1228 | 1217 | ||
@@ -1258,8 +1247,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1258 | p->signal->leader_pid = pid; | 1247 | p->signal->leader_pid = pid; |
1259 | tty_kref_put(p->signal->tty); | 1248 | tty_kref_put(p->signal->tty); |
1260 | p->signal->tty = tty_kref_get(current->signal->tty); | 1249 | p->signal->tty = tty_kref_get(current->signal->tty); |
1261 | set_task_pgrp(p, task_pgrp_nr(current)); | ||
1262 | set_task_session(p, task_session_nr(current)); | ||
1263 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); | 1250 | attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); |
1264 | attach_pid(p, PIDTYPE_SID, task_session(current)); | 1251 | attach_pid(p, PIDTYPE_SID, task_session(current)); |
1265 | list_add_tail_rcu(&p->tasks, &init_task.tasks); | 1252 | list_add_tail_rcu(&p->tasks, &init_task.tasks); |
@@ -1483,6 +1470,7 @@ void __init proc_caches_init(void) | |||
1483 | mm_cachep = kmem_cache_create("mm_struct", | 1470 | mm_cachep = kmem_cache_create("mm_struct", |
1484 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, | 1471 | sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN, |
1485 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); | 1472 | SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); |
1473 | vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC); | ||
1486 | mmap_init(); | 1474 | mmap_init(); |
1487 | } | 1475 | } |
1488 | 1476 | ||
@@ -1538,12 +1526,16 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp) | |||
1538 | { | 1526 | { |
1539 | struct fs_struct *fs = current->fs; | 1527 | struct fs_struct *fs = current->fs; |
1540 | 1528 | ||
1541 | if ((unshare_flags & CLONE_FS) && | 1529 | if (!(unshare_flags & CLONE_FS) || !fs) |
1542 | (fs && atomic_read(&fs->count) > 1)) { | 1530 | return 0; |
1543 | *new_fsp = __copy_fs_struct(current->fs); | 1531 | |
1544 | if (!*new_fsp) | 1532 | /* don't need lock here; in the worst case we'll do useless copy */ |
1545 | return -ENOMEM; | 1533 | if (fs->users == 1) |
1546 | } | 1534 | return 0; |
1535 | |||
1536 | *new_fsp = copy_fs_struct(fs); | ||
1537 | if (!*new_fsp) | ||
1538 | return -ENOMEM; | ||
1547 | 1539 | ||
1548 | return 0; | 1540 | return 0; |
1549 | } | 1541 | } |
@@ -1659,8 +1651,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
1659 | 1651 | ||
1660 | if (new_fs) { | 1652 | if (new_fs) { |
1661 | fs = current->fs; | 1653 | fs = current->fs; |
1654 | write_lock(&fs->lock); | ||
1662 | current->fs = new_fs; | 1655 | current->fs = new_fs; |
1663 | new_fs = fs; | 1656 | if (--fs->users) |
1657 | new_fs = NULL; | ||
1658 | else | ||
1659 | new_fs = fs; | ||
1660 | write_unlock(&fs->lock); | ||
1664 | } | 1661 | } |
1665 | 1662 | ||
1666 | if (new_mm) { | 1663 | if (new_mm) { |
@@ -1699,7 +1696,7 @@ bad_unshare_cleanup_sigh: | |||
1699 | 1696 | ||
1700 | bad_unshare_cleanup_fs: | 1697 | bad_unshare_cleanup_fs: |
1701 | if (new_fs) | 1698 | if (new_fs) |
1702 | put_fs_struct(new_fs); | 1699 | free_fs_struct(new_fs); |
1703 | 1700 | ||
1704 | bad_unshare_cleanup_thread: | 1701 | bad_unshare_cleanup_thread: |
1705 | bad_unshare_out: | 1702 | bad_unshare_out: |