diff options
-rw-r--r-- | include/linux/mm.h | 4 | ||||
-rw-r--r-- | mm/mmap.c | 15 | ||||
-rw-r--r-- | mm/rmap.c | 12 |
3 files changed, 0 insertions, 31 deletions
diff --git a/include/linux/mm.h b/include/linux/mm.h index 8e2841a2f441..3899395a03de 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -97,11 +97,7 @@ extern unsigned int kobjsize(const void *objp); | |||
97 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ | 97 | #define VM_NORESERVE 0x00200000 /* should the VM suppress accounting */ |
98 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ | 98 | #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */ |
99 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ | 99 | #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */ |
100 | #ifdef CONFIG_MMU | ||
101 | #define VM_LOCK_RMAP 0x01000000 /* Do not follow this rmap (mmu mmap) */ | ||
102 | #else | ||
103 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ | 100 | #define VM_MAPPED_COPY 0x01000000 /* T if mapped copy of data (nommu mmap) */ |
104 | #endif | ||
105 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ | 101 | #define VM_INSERTPAGE 0x02000000 /* The vma has had "vm_insert_page()" done on it */ |
106 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ | 102 | #define VM_ALWAYSDUMP 0x04000000 /* Always include in core dumps */ |
107 | 103 | ||
@@ -554,9 +554,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
554 | */ | 554 | */ |
555 | if (importer && !importer->anon_vma) { | 555 | if (importer && !importer->anon_vma) { |
556 | /* Block reverse map lookups until things are set up. */ | 556 | /* Block reverse map lookups until things are set up. */ |
557 | importer->vm_flags |= VM_LOCK_RMAP; | ||
558 | if (anon_vma_clone(importer, vma)) { | 557 | if (anon_vma_clone(importer, vma)) { |
559 | importer->vm_flags &= ~VM_LOCK_RMAP; | ||
560 | return -ENOMEM; | 558 | return -ENOMEM; |
561 | } | 559 | } |
562 | importer->anon_vma = anon_vma; | 560 | importer->anon_vma = anon_vma; |
@@ -618,11 +616,6 @@ again: remove_next = 1 + (end > next->vm_end); | |||
618 | __vma_unlink(mm, next, vma); | 616 | __vma_unlink(mm, next, vma); |
619 | if (file) | 617 | if (file) |
620 | __remove_shared_vm_struct(next, file, mapping); | 618 | __remove_shared_vm_struct(next, file, mapping); |
621 | /* | ||
622 | * This VMA is now dead, no need for rmap to follow it. | ||
623 | * Call anon_vma_merge below, outside of i_mmap_lock. | ||
624 | */ | ||
625 | next->vm_flags |= VM_LOCK_RMAP; | ||
626 | } else if (insert) { | 619 | } else if (insert) { |
627 | /* | 620 | /* |
628 | * split_vma has split insert from vma, and needs | 621 | * split_vma has split insert from vma, and needs |
@@ -635,20 +628,12 @@ again: remove_next = 1 + (end > next->vm_end); | |||
635 | if (mapping) | 628 | if (mapping) |
636 | spin_unlock(&mapping->i_mmap_lock); | 629 | spin_unlock(&mapping->i_mmap_lock); |
637 | 630 | ||
638 | /* | ||
639 | * The current VMA has been set up. It is now safe for the | ||
640 | * rmap code to get from the pages to the ptes. | ||
641 | */ | ||
642 | if (anon_vma && importer) | ||
643 | importer->vm_flags &= ~VM_LOCK_RMAP; | ||
644 | |||
645 | if (remove_next) { | 631 | if (remove_next) { |
646 | if (file) { | 632 | if (file) { |
647 | fput(file); | 633 | fput(file); |
648 | if (next->vm_flags & VM_EXECUTABLE) | 634 | if (next->vm_flags & VM_EXECUTABLE) |
649 | removed_exe_file_vma(mm); | 635 | removed_exe_file_vma(mm); |
650 | } | 636 | } |
651 | /* Protected by mmap_sem and VM_LOCK_RMAP. */ | ||
652 | if (next->anon_vma) | 637 | if (next->anon_vma) |
653 | anon_vma_merge(vma, next); | 638 | anon_vma_merge(vma, next); |
654 | mm->map_count--; | 639 | mm->map_count--; |
@@ -329,18 +329,6 @@ vma_address(struct page *page, struct vm_area_struct *vma) | |||
329 | /* page should be within @vma mapping range */ | 329 | /* page should be within @vma mapping range */ |
330 | return -EFAULT; | 330 | return -EFAULT; |
331 | } | 331 | } |
332 | if (unlikely(vma->vm_flags & VM_LOCK_RMAP)) { | ||
333 | /* | ||
334 | * This VMA is being unlinked or is not yet linked into the | ||
335 | * VMA tree. Do not try to follow this rmap. This race | ||
336 | * condition can result in page_referenced() ignoring a | ||
337 | * reference or in try_to_unmap() failing to unmap a page. | ||
338 | * The VMA cannot be freed under us because we hold the | ||
339 | * anon_vma->lock, which the munmap code takes while | ||
340 | * unlinking the anon_vmas from the VMA. | ||
341 | */ | ||
342 | return -EFAULT; | ||
343 | } | ||
344 | return address; | 332 | return address; |
345 | } | 333 | } |
346 | 334 | ||