aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-10-08 19:31:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:41 -0400
commit108d6642ad81bb1d62b401490a334d2c12397517 (patch)
tree27df7d1777d80b9dddeaefaac928b726ff82a816 /mm/mremap.c
parent9826a516ff77c5820e591211e4f3e58ff36f46be (diff)
mm anon rmap: remove anon_vma_moveto_tail
mremap() had a clever optimization where move_ptes() did not take the anon_vma lock to avoid a race with anon rmap users such as page migration. Instead, the avc's were ordered in such a way that the origin vma was always visited by rmap before the destination. This ordering and the use of page table locks rmap usage safe. However, we want to replace the use of linked lists in anon rmap with an interval tree, and this will make it harder to impose such ordering as the interval tree will always be sorted by the avc->vma->vm_pgoff value. For now, let's replace the anon_vma_moveto_tail() ordering function with proper anon_vma locking in move_ptes(). Once we have the anon interval tree in place, we will re-introduce an optimization to avoid taking these locks in the most common cases. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Daniel Santos <daniel.santos@pobox.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index cc06d0e48d05..5588bb6e9295 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -74,6 +74,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
74 unsigned long new_addr) 74 unsigned long new_addr)
75{ 75{
76 struct address_space *mapping = NULL; 76 struct address_space *mapping = NULL;
77 struct anon_vma *anon_vma = vma->anon_vma;
77 struct mm_struct *mm = vma->vm_mm; 78 struct mm_struct *mm = vma->vm_mm;
78 pte_t *old_pte, *new_pte, pte; 79 pte_t *old_pte, *new_pte, pte;
79 spinlock_t *old_ptl, *new_ptl; 80 spinlock_t *old_ptl, *new_ptl;
@@ -88,6 +89,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
88 mapping = vma->vm_file->f_mapping; 89 mapping = vma->vm_file->f_mapping;
89 mutex_lock(&mapping->i_mmap_mutex); 90 mutex_lock(&mapping->i_mmap_mutex);
90 } 91 }
92 if (anon_vma)
93 anon_vma_lock(anon_vma);
91 94
92 /* 95 /*
93 * We don't have to worry about the ordering of src and dst 96 * We don't have to worry about the ordering of src and dst
@@ -114,6 +117,8 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
114 spin_unlock(new_ptl); 117 spin_unlock(new_ptl);
115 pte_unmap(new_pte - 1); 118 pte_unmap(new_pte - 1);
116 pte_unmap_unlock(old_pte - 1, old_ptl); 119 pte_unmap_unlock(old_pte - 1, old_ptl);
120 if (anon_vma)
121 anon_vma_unlock(anon_vma);
117 if (mapping) 122 if (mapping)
118 mutex_unlock(&mapping->i_mmap_mutex); 123 mutex_unlock(&mapping->i_mmap_mutex);
119} 124}
@@ -221,15 +226,6 @@ static unsigned long move_vma(struct vm_area_struct *vma,
221 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len); 226 moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
222 if (moved_len < old_len) { 227 if (moved_len < old_len) {
223 /* 228 /*
224 * Before moving the page tables from the new vma to
225 * the old vma, we need to be sure the old vma is
226 * queued after new vma in the same_anon_vma list to
227 * prevent SMP races with rmap_walk (that could lead
228 * rmap_walk to miss some page table).
229 */
230 anon_vma_moveto_tail(vma);
231
232 /*
233 * On error, move entries back from new area to old, 229 * On error, move entries back from new area to old,
234 * which will succeed since page tables still there, 230 * which will succeed since page tables still there,
235 * and then proceed to unmap new area instead of old. 231 * and then proceed to unmap new area instead of old.