diff options
-rw-r--r-- | mm/memory.c | 11 |
1 files changed, 11 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index e046b7e4b530..a596c1172248 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -498,6 +498,17 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, | |||
498 | unsigned long addr = vma->vm_start; | 498 | unsigned long addr = vma->vm_start; |
499 | unsigned long end = vma->vm_end; | 499 | unsigned long end = vma->vm_end; |
500 | 500 | ||
501 | /* | ||
502 | * Don't copy ptes where a page fault will fill them correctly. | ||
503 | * Fork becomes much lighter when there are big shared or private | ||
504 | * readonly mappings. The tradeoff is that copy_page_range is more | ||
505 | * efficient than faulting. | ||
506 | */ | ||
507 | if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_RESERVED))) { | ||
508 | if (!vma->anon_vma) | ||
509 | return 0; | ||
510 | } | ||
511 | |||
501 | if (is_vm_hugetlb_page(vma)) | 512 | if (is_vm_hugetlb_page(vma)) |
502 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); | 513 | return copy_hugetlb_page_range(dst_mm, src_mm, vma); |
503 | 514 | ||