diff options
author | Rik van Riel <riel@redhat.com> | 2010-03-05 16:42:09 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-03-06 14:26:26 -0500 |
commit | c44b674323f4a2480dbeb65d4b487fa5f06f49e0 (patch) | |
tree | b753050e6752eb2fc961ad3ea5dfdf88ef88364d /mm | |
parent | 033a64b56aed798991de18d226085dfb1ccd858d (diff) |
rmap: move exclusively owned pages to own anon_vma in do_wp_page()
When the parent process breaks the COW on a page, both the original which
is mapped at child and the new page which is mapped parent end up in that
same anon_vma. Generally this won't be a problem, but for some workloads
it could preserve the O(N) rmap scanning complexity.
A simple fix is to ensure that, when a page which is mapped child gets
reused in do_wp_page, because we already are the exclusive owner, the page
gets moved to our own exclusive child's anon_vma.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Larry Woodman <lwoodman@redhat.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hugh.dickins@tiscali.co.uk>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memory.c | 7 | ||||
-rw-r--r-- | mm/rmap.c | 24 |
2 files changed, 31 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index dc785b438d70..d1153e37e9ba 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -2138,6 +2138,13 @@ static int do_wp_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2138 | page_cache_release(old_page); | 2138 | page_cache_release(old_page); |
2139 | } | 2139 | } |
2140 | reuse = reuse_swap_page(old_page); | 2140 | reuse = reuse_swap_page(old_page); |
2141 | if (reuse) | ||
2142 | /* | ||
2143 | * The page is all ours. Move it to our anon_vma so | ||
2144 | * the rmap code will not search our parent or siblings. | ||
2145 | * Protected against the rmap code by the page lock. | ||
2146 | */ | ||
2147 | page_move_anon_rmap(old_page, vma, address); | ||
2141 | unlock_page(old_page); | 2148 | unlock_page(old_page); |
2142 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == | 2149 | } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == |
2143 | (VM_WRITE|VM_SHARED))) { | 2150 | (VM_WRITE|VM_SHARED))) { |
@@ -716,6 +716,30 @@ int page_mkclean(struct page *page) | |||
716 | EXPORT_SYMBOL_GPL(page_mkclean); | 716 | EXPORT_SYMBOL_GPL(page_mkclean); |
717 | 717 | ||
718 | /** | 718 | /** |
719 | * page_move_anon_rmap - move a page to our anon_vma | ||
720 | * @page: the page to move to our anon_vma | ||
721 | * @vma: the vma the page belongs to | ||
722 | * @address: the user virtual address mapped | ||
723 | * | ||
724 | * When a page belongs exclusively to one process after a COW event, | ||
725 | * that page can be moved into the anon_vma that belongs to just that | ||
726 | * process, so the rmap code will not search the parent or sibling | ||
727 | * processes. | ||
728 | */ | ||
729 | void page_move_anon_rmap(struct page *page, | ||
730 | struct vm_area_struct *vma, unsigned long address) | ||
731 | { | ||
732 | struct anon_vma *anon_vma = vma->anon_vma; | ||
733 | |||
734 | VM_BUG_ON(!PageLocked(page)); | ||
735 | VM_BUG_ON(!anon_vma); | ||
736 | VM_BUG_ON(page->index != linear_page_index(vma, address)); | ||
737 | |||
738 | anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; | ||
739 | page->mapping = (struct address_space *) anon_vma; | ||
740 | } | ||
741 | |||
742 | /** | ||
719 | * __page_set_anon_rmap - setup new anonymous rmap | 743 | * __page_set_anon_rmap - setup new anonymous rmap |
720 | * @page: the page to add the mapping to | 744 | * @page: the page to add the mapping to |
721 | * @vma: the vm area in which the mapping is added | 745 | * @vma: the vm area in which the mapping is added |