aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2010-03-05 16:42:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2010-03-06 14:26:26 -0500
commitfc148a5f7e0532750c312385c7ee9fa3e9311f34 (patch)
treedfd132ed225a113f73c61f5e2018e5644bb3f677 /mm/mmap.c
parentc44b674323f4a2480dbeb65d4b487fa5f06f49e0 (diff)
mm: remove VM_LOCK_RMAP code
When a VMA is in an inconsistent state during setup or teardown, the worst that can happen is that the rmap code will not be able to find the page. The mapping is in the process of being torn down (PTEs just got invalidated by munmap), or set up (no PTEs have been instantiated yet). It is also impossible for the rmap code to follow a pointer to an already freed VMA, because the rmap code holds the anon_vma->lock, which the VMA teardown code needs to take before the VMA is removed from the anon_vma chain. Hence, we should not need the VM_LOCK_RMAP locking at all. Signed-off-by: Rik van Riel <riel@redhat.com> Cc: Nick Piggin <npiggin@suse.de> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c15
1 files changed, 0 insertions, 15 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 6a0c15db7f60..f1b4448626bf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -554,9 +554,7 @@ again: remove_next = 1 + (end > next->vm_end);
554 */ 554 */
555 if (importer && !importer->anon_vma) { 555 if (importer && !importer->anon_vma) {
556 /* Block reverse map lookups until things are set up. */ 556 /* Block reverse map lookups until things are set up. */
557 importer->vm_flags |= VM_LOCK_RMAP;
558 if (anon_vma_clone(importer, vma)) { 557 if (anon_vma_clone(importer, vma)) {
559 importer->vm_flags &= ~VM_LOCK_RMAP;
560 return -ENOMEM; 558 return -ENOMEM;
561 } 559 }
562 importer->anon_vma = anon_vma; 560 importer->anon_vma = anon_vma;
@@ -618,11 +616,6 @@ again: remove_next = 1 + (end > next->vm_end);
618 __vma_unlink(mm, next, vma); 616 __vma_unlink(mm, next, vma);
619 if (file) 617 if (file)
620 __remove_shared_vm_struct(next, file, mapping); 618 __remove_shared_vm_struct(next, file, mapping);
621 /*
622 * This VMA is now dead, no need for rmap to follow it.
623 * Call anon_vma_merge below, outside of i_mmap_lock.
624 */
625 next->vm_flags |= VM_LOCK_RMAP;
626 } else if (insert) { 619 } else if (insert) {
627 /* 620 /*
628 * split_vma has split insert from vma, and needs 621 * split_vma has split insert from vma, and needs
@@ -635,20 +628,12 @@ again: remove_next = 1 + (end > next->vm_end);
635 if (mapping) 628 if (mapping)
636 spin_unlock(&mapping->i_mmap_lock); 629 spin_unlock(&mapping->i_mmap_lock);
637 630
638 /*
639 * The current VMA has been set up. It is now safe for the
640 * rmap code to get from the pages to the ptes.
641 */
642 if (anon_vma && importer)
643 importer->vm_flags &= ~VM_LOCK_RMAP;
644
645 if (remove_next) { 631 if (remove_next) {
646 if (file) { 632 if (file) {
647 fput(file); 633 fput(file);
648 if (next->vm_flags & VM_EXECUTABLE) 634 if (next->vm_flags & VM_EXECUTABLE)
649 removed_exe_file_vma(mm); 635 removed_exe_file_vma(mm);
650 } 636 }
651 /* Protected by mmap_sem and VM_LOCK_RMAP. */
652 if (next->anon_vma) 637 if (next->anon_vma)
653 anon_vma_merge(vma, next); 638 anon_vma_merge(vma, next);
654 mm->map_count--; 639 mm->map_count--;