aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-12-16 13:21:23 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-12-16 13:21:23 -0500
commit4d7672b46244abffea1953e55688c0ea143dd617 (patch)
tree9f3bdf438bcb0d5f6e723665ced23308fffb8368 /mm/memory.c
parent281ab031a8c9e5b593142eb4ec59a87faae8676a (diff)
Make sure we copy pages inserted with "vm_insert_page()" on fork
The logic that decides that a fork() might be able to avoid copying a VM area when it can be re-created by page faults didn't know about the new vm_insert_page() case. Also make some things a bit more anal wrt VM_PFNMAP. Pointed out by Hugh Dickins <hugh@veritas.com> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c3
1 files changed, 2 insertions, 1 deletions
diff --git a/mm/memory.c b/mm/memory.c
index d22f78c8a381..d8dde07a3656 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -574,7 +574,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
574 * readonly mappings. The tradeoff is that copy_page_range is more 574 * readonly mappings. The tradeoff is that copy_page_range is more
575 * efficient than faulting. 575 * efficient than faulting.
576 */ 576 */
577 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP))) { 577 if (!(vma->vm_flags & (VM_HUGETLB|VM_NONLINEAR|VM_PFNMAP|VM_INSERTPAGE))) {
578 if (!vma->anon_vma) 578 if (!vma->anon_vma)
579 return 0; 579 return 0;
580 } 580 }
@@ -1228,6 +1228,7 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr, struct page *
1228 return -EFAULT; 1228 return -EFAULT;
1229 if (!page_count(page)) 1229 if (!page_count(page))
1230 return -EINVAL; 1230 return -EINVAL;
1231 vma->vm_flags |= VM_INSERTPAGE;
1231 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot); 1232 return insert_page(vma->vm_mm, addr, page, vma->vm_page_prot);
1232} 1233}
1233EXPORT_SYMBOL(vm_insert_page); 1234EXPORT_SYMBOL(vm_insert_page);