diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 80 |
1 files changed, 21 insertions, 59 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 5b2959b3ffc2..4799c5f0e6d0 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -87,6 +87,14 @@ DEFINE_PER_CPU(unsigned long, process_counts) = 0; | |||
| 87 | 87 | ||
| 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ | 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
| 89 | 89 | ||
| 90 | #ifdef CONFIG_PROVE_RCU | ||
| 91 | int lockdep_tasklist_lock_is_held(void) | ||
| 92 | { | ||
| 93 | return lockdep_is_held(&tasklist_lock); | ||
| 94 | } | ||
| 95 | EXPORT_SYMBOL_GPL(lockdep_tasklist_lock_is_held); | ||
| 96 | #endif /* #ifdef CONFIG_PROVE_RCU */ | ||
| 97 | |||
| 90 | int nr_processes(void) | 98 | int nr_processes(void) |
| 91 | { | 99 | { |
| 92 | int cpu; | 100 | int cpu; |
| @@ -328,15 +336,17 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 328 | if (!tmp) | 336 | if (!tmp) |
| 329 | goto fail_nomem; | 337 | goto fail_nomem; |
| 330 | *tmp = *mpnt; | 338 | *tmp = *mpnt; |
| 339 | INIT_LIST_HEAD(&tmp->anon_vma_chain); | ||
| 331 | pol = mpol_dup(vma_policy(mpnt)); | 340 | pol = mpol_dup(vma_policy(mpnt)); |
| 332 | retval = PTR_ERR(pol); | 341 | retval = PTR_ERR(pol); |
| 333 | if (IS_ERR(pol)) | 342 | if (IS_ERR(pol)) |
| 334 | goto fail_nomem_policy; | 343 | goto fail_nomem_policy; |
| 335 | vma_set_policy(tmp, pol); | 344 | vma_set_policy(tmp, pol); |
| 345 | if (anon_vma_fork(tmp, mpnt)) | ||
| 346 | goto fail_nomem_anon_vma_fork; | ||
| 336 | tmp->vm_flags &= ~VM_LOCKED; | 347 | tmp->vm_flags &= ~VM_LOCKED; |
| 337 | tmp->vm_mm = mm; | 348 | tmp->vm_mm = mm; |
| 338 | tmp->vm_next = NULL; | 349 | tmp->vm_next = NULL; |
| 339 | anon_vma_link(tmp); | ||
| 340 | file = tmp->vm_file; | 350 | file = tmp->vm_file; |
| 341 | if (file) { | 351 | if (file) { |
| 342 | struct inode *inode = file->f_path.dentry->d_inode; | 352 | struct inode *inode = file->f_path.dentry->d_inode; |
| @@ -391,6 +401,8 @@ out: | |||
| 391 | flush_tlb_mm(oldmm); | 401 | flush_tlb_mm(oldmm); |
| 392 | up_write(&oldmm->mmap_sem); | 402 | up_write(&oldmm->mmap_sem); |
| 393 | return retval; | 403 | return retval; |
| 404 | fail_nomem_anon_vma_fork: | ||
| 405 | mpol_put(pol); | ||
| 394 | fail_nomem_policy: | 406 | fail_nomem_policy: |
| 395 | kmem_cache_free(vm_area_cachep, tmp); | 407 | kmem_cache_free(vm_area_cachep, tmp); |
| 396 | fail_nomem: | 408 | fail_nomem: |
| @@ -454,8 +466,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
| 454 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; | 466 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; |
| 455 | mm->core_state = NULL; | 467 | mm->core_state = NULL; |
| 456 | mm->nr_ptes = 0; | 468 | mm->nr_ptes = 0; |
| 457 | set_mm_counter(mm, file_rss, 0); | 469 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
| 458 | set_mm_counter(mm, anon_rss, 0); | ||
| 459 | spin_lock_init(&mm->page_table_lock); | 470 | spin_lock_init(&mm->page_table_lock); |
| 460 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 471 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
| 461 | mm->cached_hole_size = ~0UL; | 472 | mm->cached_hole_size = ~0UL; |
| @@ -824,23 +835,14 @@ void __cleanup_sighand(struct sighand_struct *sighand) | |||
| 824 | */ | 835 | */ |
| 825 | static void posix_cpu_timers_init_group(struct signal_struct *sig) | 836 | static void posix_cpu_timers_init_group(struct signal_struct *sig) |
| 826 | { | 837 | { |
| 838 | unsigned long cpu_limit; | ||
| 839 | |||
| 827 | /* Thread group counters. */ | 840 | /* Thread group counters. */ |
| 828 | thread_group_cputime_init(sig); | 841 | thread_group_cputime_init(sig); |
| 829 | 842 | ||
| 830 | /* Expiration times and increments. */ | 843 | cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
| 831 | sig->it[CPUCLOCK_PROF].expires = cputime_zero; | 844 | if (cpu_limit != RLIM_INFINITY) { |
| 832 | sig->it[CPUCLOCK_PROF].incr = cputime_zero; | 845 | sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); |
| 833 | sig->it[CPUCLOCK_VIRT].expires = cputime_zero; | ||
| 834 | sig->it[CPUCLOCK_VIRT].incr = cputime_zero; | ||
| 835 | |||
| 836 | /* Cached expiration times. */ | ||
| 837 | sig->cputime_expires.prof_exp = cputime_zero; | ||
| 838 | sig->cputime_expires.virt_exp = cputime_zero; | ||
| 839 | sig->cputime_expires.sched_exp = 0; | ||
| 840 | |||
| 841 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | ||
| 842 | sig->cputime_expires.prof_exp = | ||
| 843 | secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); | ||
| 844 | sig->cputimer.running = 1; | 846 | sig->cputimer.running = 1; |
| 845 | } | 847 | } |
| 846 | 848 | ||
| @@ -857,7 +859,7 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 857 | if (clone_flags & CLONE_THREAD) | 859 | if (clone_flags & CLONE_THREAD) |
| 858 | return 0; | 860 | return 0; |
| 859 | 861 | ||
| 860 | sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); | 862 | sig = kmem_cache_zalloc(signal_cachep, GFP_KERNEL); |
| 861 | tsk->signal = sig; | 863 | tsk->signal = sig; |
| 862 | if (!sig) | 864 | if (!sig) |
| 863 | return -ENOMEM; | 865 | return -ENOMEM; |
| @@ -865,46 +867,21 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
| 865 | atomic_set(&sig->count, 1); | 867 | atomic_set(&sig->count, 1); |
| 866 | atomic_set(&sig->live, 1); | 868 | atomic_set(&sig->live, 1); |
| 867 | init_waitqueue_head(&sig->wait_chldexit); | 869 | init_waitqueue_head(&sig->wait_chldexit); |
| 868 | sig->flags = 0; | ||
| 869 | if (clone_flags & CLONE_NEWPID) | 870 | if (clone_flags & CLONE_NEWPID) |
| 870 | sig->flags |= SIGNAL_UNKILLABLE; | 871 | sig->flags |= SIGNAL_UNKILLABLE; |
| 871 | sig->group_exit_code = 0; | ||
| 872 | sig->group_exit_task = NULL; | ||
| 873 | sig->group_stop_count = 0; | ||
| 874 | sig->curr_target = tsk; | 872 | sig->curr_target = tsk; |
| 875 | init_sigpending(&sig->shared_pending); | 873 | init_sigpending(&sig->shared_pending); |
| 876 | INIT_LIST_HEAD(&sig->posix_timers); | 874 | INIT_LIST_HEAD(&sig->posix_timers); |
| 877 | 875 | ||
| 878 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | 876 | hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); |
| 879 | sig->it_real_incr.tv64 = 0; | ||
| 880 | sig->real_timer.function = it_real_fn; | 877 | sig->real_timer.function = it_real_fn; |
| 881 | 878 | ||
| 882 | sig->leader = 0; /* session leadership doesn't inherit */ | ||
| 883 | sig->tty_old_pgrp = NULL; | ||
| 884 | sig->tty = NULL; | ||
| 885 | |||
| 886 | sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; | ||
| 887 | sig->gtime = cputime_zero; | ||
| 888 | sig->cgtime = cputime_zero; | ||
| 889 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | ||
| 890 | sig->prev_utime = sig->prev_stime = cputime_zero; | ||
| 891 | #endif | ||
| 892 | sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; | ||
| 893 | sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; | ||
| 894 | sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; | ||
| 895 | sig->maxrss = sig->cmaxrss = 0; | ||
| 896 | task_io_accounting_init(&sig->ioac); | ||
| 897 | sig->sum_sched_runtime = 0; | ||
| 898 | taskstats_tgid_init(sig); | ||
| 899 | |||
| 900 | task_lock(current->group_leader); | 879 | task_lock(current->group_leader); |
| 901 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); | 880 | memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); |
| 902 | task_unlock(current->group_leader); | 881 | task_unlock(current->group_leader); |
| 903 | 882 | ||
| 904 | posix_cpu_timers_init_group(sig); | 883 | posix_cpu_timers_init_group(sig); |
| 905 | 884 | ||
| 906 | acct_init_pacct(&sig->pacct); | ||
| 907 | |||
| 908 | tty_audit_fork(sig); | 885 | tty_audit_fork(sig); |
| 909 | 886 | ||
| 910 | sig->oom_adj = current->signal->oom_adj; | 887 | sig->oom_adj = current->signal->oom_adj; |
| @@ -1033,7 +1010,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1033 | #endif | 1010 | #endif |
| 1034 | retval = -EAGAIN; | 1011 | retval = -EAGAIN; |
| 1035 | if (atomic_read(&p->real_cred->user->processes) >= | 1012 | if (atomic_read(&p->real_cred->user->processes) >= |
| 1036 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { | 1013 | task_rlimit(p, RLIMIT_NPROC)) { |
| 1037 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 1014 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && |
| 1038 | p->real_cred->user != INIT_USER) | 1015 | p->real_cred->user != INIT_USER) |
| 1039 | goto bad_fork_free; | 1016 | goto bad_fork_free; |
| @@ -1241,21 +1218,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1241 | /* Need tasklist lock for parent etc handling! */ | 1218 | /* Need tasklist lock for parent etc handling! */ |
| 1242 | write_lock_irq(&tasklist_lock); | 1219 | write_lock_irq(&tasklist_lock); |
| 1243 | 1220 | ||
| 1244 | /* | ||
| 1245 | * The task hasn't been attached yet, so its cpus_allowed mask will | ||
| 1246 | * not be changed, nor will its assigned CPU. | ||
| 1247 | * | ||
| 1248 | * The cpus_allowed mask of the parent may have changed after it was | ||
| 1249 | * copied first time - so re-copy it here, then check the child's CPU | ||
| 1250 | * to ensure it is on a valid CPU (and if not, just force it back to | ||
| 1251 | * parent's CPU). This avoids alot of nasty races. | ||
| 1252 | */ | ||
| 1253 | p->cpus_allowed = current->cpus_allowed; | ||
| 1254 | p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; | ||
| 1255 | if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || | ||
| 1256 | !cpu_online(task_cpu(p)))) | ||
| 1257 | set_task_cpu(p, smp_processor_id()); | ||
| 1258 | |||
| 1259 | /* CLONE_PARENT re-uses the old parent */ | 1221 | /* CLONE_PARENT re-uses the old parent */ |
| 1260 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { | 1222 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
| 1261 | p->real_parent = current->real_parent; | 1223 | p->real_parent = current->real_parent; |
