aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm.h
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-10-08 19:31:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:42 -0400
commit38a76013ad809beb0b52f60d365c960d035bd83c (patch)
treec63ba707ab17dd1ff1e90650faf74570daa3cf9f /include/linux/mm.h
parent523d4e2008fd4a68b1a164e63e8c75b7b20f07e0 (diff)
mm: avoid taking rmap locks in move_ptes()
During mremap(), the destination VMA is generally placed after the original vma in rmap traversal order: in move_vma(), we always have new_pgoff >= vma->vm_pgoff, and as a result new_vma->vm_pgoff >= vma->vm_pgoff unless vma_merge() merged the new vma with an adjacent one. When the destination VMA is placed after the original in rmap traversal order, we can avoid taking the rmap locks in move_ptes(). Essentially, this reintroduces the optimization that had been disabled in "mm anon rmap: remove anon_vma_moveto_tail". The difference is that we don't try to impose the rmap traversal order; instead we just rely on things being in the desired order in the common case and fall back to taking locks in the uncommon case. Also we skip the i_mmap_mutex in addition to the anon_vma lock: in both cases, the vmas are traversed in increasing vm_pgoff order with ties resolved in tree insertion order. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Rik van Riel <riel@redhat.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Daniel Santos <daniel.santos@pobox.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm.h')
-rw-r--r--include/linux/mm.h6
1 files changed, 4 insertions, 2 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 0e6f9c9f2123..0d5f823ce3fc 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1060,7 +1060,8 @@ vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
1060 1060
1061extern unsigned long move_page_tables(struct vm_area_struct *vma, 1061extern unsigned long move_page_tables(struct vm_area_struct *vma,
1062 unsigned long old_addr, struct vm_area_struct *new_vma, 1062 unsigned long old_addr, struct vm_area_struct *new_vma,
1063 unsigned long new_addr, unsigned long len); 1063 unsigned long new_addr, unsigned long len,
1064 bool need_rmap_locks);
1064extern unsigned long do_mremap(unsigned long addr, 1065extern unsigned long do_mremap(unsigned long addr,
1065 unsigned long old_len, unsigned long new_len, 1066 unsigned long old_len, unsigned long new_len,
1066 unsigned long flags, unsigned long new_addr); 1067 unsigned long flags, unsigned long new_addr);
@@ -1410,7 +1411,8 @@ extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
1410 struct rb_node **, struct rb_node *); 1411 struct rb_node **, struct rb_node *);
1411extern void unlink_file_vma(struct vm_area_struct *); 1412extern void unlink_file_vma(struct vm_area_struct *);
1412extern struct vm_area_struct *copy_vma(struct vm_area_struct **, 1413extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
1413 unsigned long addr, unsigned long len, pgoff_t pgoff); 1414 unsigned long addr, unsigned long len, pgoff_t pgoff,
1415 bool *need_rmap_locks);
1414extern void exit_mmap(struct mm_struct *); 1416extern void exit_mmap(struct mm_struct *);
1415 1417
1416extern int mm_take_all_locks(struct mm_struct *mm); 1418extern int mm_take_all_locks(struct mm_struct *mm);