aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/fork.c
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@parallels.com>2014-08-08 17:21:56 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-08 18:57:23 -0400
commit41f727fde1fe40efeb4fef6fdce74ff794be5aeb (patch)
treeb4946b74270cfe286580f44659000aef0d204277 /kernel/fork.c
parent8f053ac11f96cc6edcabcbb154c9cf06c5d63333 (diff)
fork/exec: cleanup mm initialization
mm initialization on fork/exec is spread all over the place, which makes the code look inconsistent. We have mm_init(), which is supposed to init/nullify mm's internals, but it doesn't init all the fields it should: - on fork ->mmap,mm_rb,vmacache_seqnum,map_count,mm_cpumask,locked_vm are zeroed in dup_mmap(); - on fork ->pmd_huge_pte is zeroed in dup_mm(), immediately before calling mm_init(); - ->cpu_vm_mask_var ptr is initialized by mm_init_cpumask(), which is called before mm_init() on both fork and exec; - ->context is initialized by init_new_context(), which is called after mm_init() on both fork and exec; Let's consolidate all the initializations in mm_init() to make the code look cleaner. Signed-off-by: Vladimir Davydov <vdavydov@parallels.com> Cc: Oleg Nesterov <oleg@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel/fork.c')
-rw-r--r--kernel/fork.c47
1 files changed, 20 insertions, 27 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index f6f5086c9e7d..418b52a9ec6a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -374,12 +374,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
374 */ 374 */
375 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING); 375 down_write_nested(&mm->mmap_sem, SINGLE_DEPTH_NESTING);
376 376
377 mm->locked_vm = 0;
378 mm->mmap = NULL;
379 mm->vmacache_seqnum = 0;
380 mm->map_count = 0;
381 cpumask_clear(mm_cpumask(mm));
382 mm->mm_rb = RB_ROOT;
383 rb_link = &mm->mm_rb.rb_node; 377 rb_link = &mm->mm_rb.rb_node;
384 rb_parent = NULL; 378 rb_parent = NULL;
385 pprev = &mm->mmap; 379 pprev = &mm->mmap;
@@ -538,17 +532,27 @@ static void mm_init_aio(struct mm_struct *mm)
538 532
539static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p) 533static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
540{ 534{
535 mm->mmap = NULL;
536 mm->mm_rb = RB_ROOT;
537 mm->vmacache_seqnum = 0;
541 atomic_set(&mm->mm_users, 1); 538 atomic_set(&mm->mm_users, 1);
542 atomic_set(&mm->mm_count, 1); 539 atomic_set(&mm->mm_count, 1);
543 init_rwsem(&mm->mmap_sem); 540 init_rwsem(&mm->mmap_sem);
544 INIT_LIST_HEAD(&mm->mmlist); 541 INIT_LIST_HEAD(&mm->mmlist);
545 mm->core_state = NULL; 542 mm->core_state = NULL;
546 atomic_long_set(&mm->nr_ptes, 0); 543 atomic_long_set(&mm->nr_ptes, 0);
544 mm->map_count = 0;
545 mm->locked_vm = 0;
547 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); 546 memset(&mm->rss_stat, 0, sizeof(mm->rss_stat));
548 spin_lock_init(&mm->page_table_lock); 547 spin_lock_init(&mm->page_table_lock);
548 mm_init_cpumask(mm);
549 mm_init_aio(mm); 549 mm_init_aio(mm);
550 mm_init_owner(mm, p); 550 mm_init_owner(mm, p);
551 mmu_notifier_mm_init(mm);
551 clear_tlb_flush_pending(mm); 552 clear_tlb_flush_pending(mm);
553#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
554 mm->pmd_huge_pte = NULL;
555#endif
552 556
553 if (current->mm) { 557 if (current->mm) {
554 mm->flags = current->mm->flags & MMF_INIT_MASK; 558 mm->flags = current->mm->flags & MMF_INIT_MASK;
@@ -558,11 +562,17 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p)
558 mm->def_flags = 0; 562 mm->def_flags = 0;
559 } 563 }
560 564
561 if (likely(!mm_alloc_pgd(mm))) { 565 if (mm_alloc_pgd(mm))
562 mmu_notifier_mm_init(mm); 566 goto fail_nopgd;
563 return mm; 567
564 } 568 if (init_new_context(p, mm))
569 goto fail_nocontext;
565 570
571 return mm;
572
573fail_nocontext:
574 mm_free_pgd(mm);
575fail_nopgd:
566 free_mm(mm); 576 free_mm(mm);
567 return NULL; 577 return NULL;
568} 578}
@@ -596,7 +606,6 @@ struct mm_struct *mm_alloc(void)
596 return NULL; 606 return NULL;
597 607
598 memset(mm, 0, sizeof(*mm)); 608 memset(mm, 0, sizeof(*mm));
599 mm_init_cpumask(mm);
600 return mm_init(mm, current); 609 return mm_init(mm, current);
601} 610}
602 611
@@ -828,17 +837,10 @@ static struct mm_struct *dup_mm(struct task_struct *tsk)
828 goto fail_nomem; 837 goto fail_nomem;
829 838
830 memcpy(mm, oldmm, sizeof(*mm)); 839 memcpy(mm, oldmm, sizeof(*mm));
831 mm_init_cpumask(mm);
832 840
833#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
834 mm->pmd_huge_pte = NULL;
835#endif
836 if (!mm_init(mm, tsk)) 841 if (!mm_init(mm, tsk))
837 goto fail_nomem; 842 goto fail_nomem;
838 843
839 if (init_new_context(tsk, mm))
840 goto fail_nocontext;
841
842 dup_mm_exe_file(oldmm, mm); 844 dup_mm_exe_file(oldmm, mm);
843 845
844 err = dup_mmap(mm, oldmm); 846 err = dup_mmap(mm, oldmm);
@@ -860,15 +862,6 @@ free_pt:
860 862
861fail_nomem: 863fail_nomem:
862 return NULL; 864 return NULL;
863
864fail_nocontext:
865 /*
866 * If init_new_context() failed, we cannot use mmput() to free the mm
867 * because it calls destroy_context()
868 */
869 mm_free_pgd(mm);
870 free_mm(mm);
871 return NULL;
872} 865}
873 866
874static int copy_mm(unsigned long clone_flags, struct task_struct *tsk) 867static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)