aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/rmap.h1
-rw-r--r--mm/memory.c7
-rw-r--r--mm/rmap.c24
3 files changed, 32 insertions, 0 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 62da2001d55c..72be23b1480a 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -125,6 +125,7 @@ static inline void anon_vma_merge(struct vm_area_struct *vma,
125/* 125/*
126 * rmap interfaces called when adding or removing pte of page 126 * rmap interfaces called when adding or removing pte of page
127 */ 127 */
128void page_move_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
128void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 129void page_add_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
129void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long); 130void page_add_new_anon_rmap(struct page *, struct vm_area_struct *, unsigned long);
130void page_add_file_rmap(struct page *); 131void page_add_file_rmap(struct page *);
diff --git a/mm/memory.c b/mm/memory.c
index dc785b438d70..d1153e37e9ba 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2138,6 +2138,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
2138 page_cache_release(old_page); 2138 page_cache_release(old_page);
2139 } 2139 }
2140 reuse = reuse_swap_page(old_page); 2140 reuse = reuse_swap_page(old_page);
2141 if (reuse)
2142 /*
2143 * The page is all ours. Move it to our anon_vma so
2144 * the rmap code will not search our parent or siblings.
2145 * Protected against the rmap code by the page lock.
2146 */
2147 page_move_anon_rmap(old_page, vma, address);
2141 unlock_page(old_page); 2148 unlock_page(old_page);
2142 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == 2149 } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) ==
2143 (VM_WRITE|VM_SHARED))) { 2150 (VM_WRITE|VM_SHARED))) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 23ecd0a892df..28bcdc433d88 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -716,6 +716,30 @@ int page_mkclean(struct page *page)
716EXPORT_SYMBOL_GPL(page_mkclean); 716EXPORT_SYMBOL_GPL(page_mkclean);
717 717
718/** 718/**
719 * page_move_anon_rmap - move a page to our anon_vma
720 * @page: the page to move to our anon_vma
721 * @vma: the vma the page belongs to
722 * @address: the user virtual address mapped
723 *
724 * When a page belongs exclusively to one process after a COW event,
725 * that page can be moved into the anon_vma that belongs to just that
726 * process, so the rmap code will not search the parent or sibling
727 * processes.
728 */
729void page_move_anon_rmap(struct page *page,
730 struct vm_area_struct *vma, unsigned long address)
731{
732 struct anon_vma *anon_vma = vma->anon_vma;
733
734 VM_BUG_ON(!PageLocked(page));
735 VM_BUG_ON(!anon_vma);
736 VM_BUG_ON(page->index != linear_page_index(vma, address));
737
738 anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
739 page->mapping = (struct address_space *) anon_vma;
740}
741
742/**
719 * __page_set_anon_rmap - setup new anonymous rmap 743 * __page_set_anon_rmap - setup new anonymous rmap
720 * @page: the page to add the mapping to 744 * @page: the page to add the mapping to
721 * @vma: the vm area in which the mapping is added 745 * @vma: the vm area in which the mapping is added