aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2015-02-10 17:09:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-10 17:30:31 -0500
commit27ba0644ea9dfe6e7693abc85837b60e40583b96 (patch)
treea1e04ba5026728711bde87cb2f336d2444ee6ffe /mm/mmap.c
parent1da4b35b001481df99a6dcab12d5d39a876f7056 (diff)
rmap: drop support of non-linear mappings
We don't create non-linear mappings anymore. Let's drop code which handles them in rmap. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c24
1 files changed, 7 insertions, 17 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index e023dc5e59a8..14d84666e8ba 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -243,10 +243,7 @@ static void __remove_shared_vm_struct(struct vm_area_struct *vma,
243 mapping_unmap_writable(mapping); 243 mapping_unmap_writable(mapping);
244 244
245 flush_dcache_mmap_lock(mapping); 245 flush_dcache_mmap_lock(mapping);
246 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 246 vma_interval_tree_remove(vma, &mapping->i_mmap);
247 list_del_init(&vma->shared.nonlinear);
248 else
249 vma_interval_tree_remove(vma, &mapping->i_mmap);
250 flush_dcache_mmap_unlock(mapping); 247 flush_dcache_mmap_unlock(mapping);
251} 248}
252 249
@@ -649,10 +646,7 @@ static void __vma_link_file(struct vm_area_struct *vma)
649 atomic_inc(&mapping->i_mmap_writable); 646 atomic_inc(&mapping->i_mmap_writable);
650 647
651 flush_dcache_mmap_lock(mapping); 648 flush_dcache_mmap_lock(mapping);
652 if (unlikely(vma->vm_flags & VM_NONLINEAR)) 649 vma_interval_tree_insert(vma, &mapping->i_mmap);
653 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
654 else
655 vma_interval_tree_insert(vma, &mapping->i_mmap);
656 flush_dcache_mmap_unlock(mapping); 650 flush_dcache_mmap_unlock(mapping);
657 } 651 }
658} 652}
@@ -789,14 +783,11 @@ again: remove_next = 1 + (end > next->vm_end);
789 783
790 if (file) { 784 if (file) {
791 mapping = file->f_mapping; 785 mapping = file->f_mapping;
792 if (!(vma->vm_flags & VM_NONLINEAR)) { 786 root = &mapping->i_mmap;
793 root = &mapping->i_mmap; 787 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
794 uprobe_munmap(vma, vma->vm_start, vma->vm_end);
795 788
796 if (adjust_next) 789 if (adjust_next)
797 uprobe_munmap(next, next->vm_start, 790 uprobe_munmap(next, next->vm_start, next->vm_end);
798 next->vm_end);
799 }
800 791
801 i_mmap_lock_write(mapping); 792 i_mmap_lock_write(mapping);
802 if (insert) { 793 if (insert) {
@@ -3177,8 +3168,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3177 * 3168 *
3178 * mmap_sem in write mode is required in order to block all operations 3169 * mmap_sem in write mode is required in order to block all operations
3179 * that could modify pagetables and free pages without need of 3170 * that could modify pagetables and free pages without need of
3180 * altering the vma layout (for example populate_range() with 3171 * altering the vma layout. It's also needed in write mode to avoid new
3181 * nonlinear vmas). It's also needed in write mode to avoid new
3182 * anon_vmas to be associated with existing vmas. 3172 * anon_vmas to be associated with existing vmas.
3183 * 3173 *
3184 * A single task can't take more than one mm_take_all_locks() in a row 3174 * A single task can't take more than one mm_take_all_locks() in a row