diff options
Diffstat (limited to 'kernel/fork.c')
| -rw-r--r-- | kernel/fork.c | 35 |
1 files changed, 13 insertions, 22 deletions
diff --git a/kernel/fork.c b/kernel/fork.c index 5b2959b3ffc2..b0ec34abc0bb 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -86,6 +86,7 @@ int max_threads; /* tunable limit on nr_threads */ | |||
| 86 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; | 86 | DEFINE_PER_CPU(unsigned long, process_counts) = 0; |
| 87 | 87 | ||
| 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ | 88 | __cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */ |
| 89 | EXPORT_SYMBOL_GPL(tasklist_lock); | ||
| 89 | 90 | ||
| 90 | int nr_processes(void) | 91 | int nr_processes(void) |
| 91 | { | 92 | { |
| @@ -328,15 +329,17 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 328 | if (!tmp) | 329 | if (!tmp) |
| 329 | goto fail_nomem; | 330 | goto fail_nomem; |
| 330 | *tmp = *mpnt; | 331 | *tmp = *mpnt; |
| 332 | INIT_LIST_HEAD(&tmp->anon_vma_chain); | ||
| 331 | pol = mpol_dup(vma_policy(mpnt)); | 333 | pol = mpol_dup(vma_policy(mpnt)); |
| 332 | retval = PTR_ERR(pol); | 334 | retval = PTR_ERR(pol); |
| 333 | if (IS_ERR(pol)) | 335 | if (IS_ERR(pol)) |
| 334 | goto fail_nomem_policy; | 336 | goto fail_nomem_policy; |
| 335 | vma_set_policy(tmp, pol); | 337 | vma_set_policy(tmp, pol); |
| 338 | if (anon_vma_fork(tmp, mpnt)) | ||
| 339 | goto fail_nomem_anon_vma_fork; | ||
| 336 | tmp->vm_flags &= ~VM_LOCKED; | 340 | tmp->vm_flags &= ~VM_LOCKED; |
| 337 | tmp->vm_mm = mm; | 341 | tmp->vm_mm = mm; |
| 338 | tmp->vm_next = NULL; | 342 | tmp->vm_next = NULL; |
| 339 | anon_vma_link(tmp); | ||
| 340 | file = tmp->vm_file; | 343 | file = tmp->vm_file; |
| 341 | if (file) { | 344 | if (file) { |
| 342 | struct inode *inode = file->f_path.dentry->d_inode; | 345 | struct inode *inode = file->f_path.dentry->d_inode; |
| @@ -391,6 +394,8 @@ out: | |||
| 391 | flush_tlb_mm(oldmm); | 394 | flush_tlb_mm(oldmm); |
| 392 | up_write(&oldmm->mmap_sem); | 395 | up_write(&oldmm->mmap_sem); |
| 393 | return retval; | 396 | return retval; |
| 397 | fail_nomem_anon_vma_fork: | ||
| 398 | mpol_put(pol); | ||
| 394 | fail_nomem_policy: | 399 | fail_nomem_policy: |
| 395 | kmem_cache_free(vm_area_cachep, tmp); | 400 | kmem_cache_free(vm_area_cachep, tmp); |
| 396 | fail_nomem: | 401 | fail_nomem: |
| @@ -454,8 +459,7 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p) | |||
| 454 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; | 459 | (current->mm->flags & MMF_INIT_MASK) : default_dump_filter; |
| 455 | mm->core_state = NULL; | 460 | mm->core_state = NULL; |
| 456 | mm->nr_ptes = 0; | 461 | mm->nr_ptes = 0; |
| 457 | set_mm_counter(mm, file_rss, 0); | 462 | memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); |
| 458 | set_mm_counter(mm, anon_rss, 0); | ||
| 459 | spin_lock_init(&mm->page_table_lock); | 463 | spin_lock_init(&mm->page_table_lock); |
| 460 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 464 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
| 461 | mm->cached_hole_size = ~0UL; | 465 | mm->cached_hole_size = ~0UL; |
| @@ -824,6 +828,8 @@ void __cleanup_sighand(struct sighand_struct *sighand) | |||
| 824 | */ | 828 | */ |
| 825 | static void posix_cpu_timers_init_group(struct signal_struct *sig) | 829 | static void posix_cpu_timers_init_group(struct signal_struct *sig) |
| 826 | { | 830 | { |
| 831 | unsigned long cpu_limit; | ||
| 832 | |||
| 827 | /* Thread group counters. */ | 833 | /* Thread group counters. */ |
| 828 | thread_group_cputime_init(sig); | 834 | thread_group_cputime_init(sig); |
| 829 | 835 | ||
| @@ -838,9 +844,9 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig) | |||
| 838 | sig->cputime_expires.virt_exp = cputime_zero; | 844 | sig->cputime_expires.virt_exp = cputime_zero; |
| 839 | sig->cputime_expires.sched_exp = 0; | 845 | sig->cputime_expires.sched_exp = 0; |
| 840 | 846 | ||
| 841 | if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { | 847 | cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur); |
| 842 | sig->cputime_expires.prof_exp = | 848 | if (cpu_limit != RLIM_INFINITY) { |
| 843 | secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur); | 849 | sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit); |
| 844 | sig->cputimer.running = 1; | 850 | sig->cputimer.running = 1; |
| 845 | } | 851 | } |
| 846 | 852 | ||
| @@ -1033,7 +1039,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1033 | #endif | 1039 | #endif |
| 1034 | retval = -EAGAIN; | 1040 | retval = -EAGAIN; |
| 1035 | if (atomic_read(&p->real_cred->user->processes) >= | 1041 | if (atomic_read(&p->real_cred->user->processes) >= |
| 1036 | p->signal->rlim[RLIMIT_NPROC].rlim_cur) { | 1042 | task_rlimit(p, RLIMIT_NPROC)) { |
| 1037 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && | 1043 | if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && |
| 1038 | p->real_cred->user != INIT_USER) | 1044 | p->real_cred->user != INIT_USER) |
| 1039 | goto bad_fork_free; | 1045 | goto bad_fork_free; |
| @@ -1241,21 +1247,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
| 1241 | /* Need tasklist lock for parent etc handling! */ | 1247 | /* Need tasklist lock for parent etc handling! */ |
| 1242 | write_lock_irq(&tasklist_lock); | 1248 | write_lock_irq(&tasklist_lock); |
| 1243 | 1249 | ||
| 1244 | /* | ||
| 1245 | * The task hasn't been attached yet, so its cpus_allowed mask will | ||
| 1246 | * not be changed, nor will its assigned CPU. | ||
| 1247 | * | ||
| 1248 | * The cpus_allowed mask of the parent may have changed after it was | ||
| 1249 | * copied first time - so re-copy it here, then check the child's CPU | ||
| 1250 | * to ensure it is on a valid CPU (and if not, just force it back to | ||
| 1251 | * parent's CPU). This avoids alot of nasty races. | ||
| 1252 | */ | ||
| 1253 | p->cpus_allowed = current->cpus_allowed; | ||
| 1254 | p->rt.nr_cpus_allowed = current->rt.nr_cpus_allowed; | ||
| 1255 | if (unlikely(!cpu_isset(task_cpu(p), p->cpus_allowed) || | ||
| 1256 | !cpu_online(task_cpu(p)))) | ||
| 1257 | set_task_cpu(p, smp_processor_id()); | ||
| 1258 | |||
| 1259 | /* CLONE_PARENT re-uses the old parent */ | 1250 | /* CLONE_PARENT re-uses the old parent */ |
| 1260 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { | 1251 | if (clone_flags & (CLONE_PARENT|CLONE_THREAD)) { |
| 1261 | p->real_parent = current->real_parent; | 1252 | p->real_parent = current->real_parent; |
