aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2014-12-12 19:54:21 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 15:42:45 -0500
commit83cde9e8ba95d180eaefefe834958fbf7008cf39 (patch)
tree041dd6f0bc4e41baec1a46422683596111e1e2a8
parent8b28f621bea6f84d44adf7e804b73aff1e09105b (diff)
mm: use new helper functions around the i_mmap_mutex
Convert all open coded mutex_lock/unlock calls to the i_mmap_[lock/unlock]_write() helpers. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: "Kirill A. Shutemov" <kirill@shutemov.name> Acked-by: Hugh Dickins <hughd@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--fs/hugetlbfs/inode.c4
-rw-r--r--kernel/events/uprobes.c4
-rw-r--r--kernel/fork.c4
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/fremap.c4
-rw-r--r--mm/hugetlb.c12
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory.c8
-rw-r--r--mm/mmap.c14
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/nommu.c14
-rw-r--r--mm/rmap.c4
12 files changed, 40 insertions, 40 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 1e2872b25343..a082709aa427 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -412,10 +412,10 @@ static int hugetlb_vmtruncate(struct inode *inode, loff_t offset)
412 pgoff = offset >> PAGE_SHIFT; 412 pgoff = offset >> PAGE_SHIFT;
413 413
414 i_size_write(inode, offset); 414 i_size_write(inode, offset);
415 mutex_lock(&mapping->i_mmap_mutex); 415 i_mmap_lock_write(mapping);
416 if (!RB_EMPTY_ROOT(&mapping->i_mmap)) 416 if (!RB_EMPTY_ROOT(&mapping->i_mmap))
417 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff); 417 hugetlb_vmtruncate_list(&mapping->i_mmap, pgoff);
418 mutex_unlock(&mapping->i_mmap_mutex); 418 i_mmap_unlock_write(mapping);
419 truncate_hugepages(inode, offset); 419 truncate_hugepages(inode, offset);
420 return 0; 420 return 0;
421} 421}
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index ed8f2cde34c5..aac81bf9df09 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -724,7 +724,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
724 int more = 0; 724 int more = 0;
725 725
726 again: 726 again:
727 mutex_lock(&mapping->i_mmap_mutex); 727 i_mmap_lock_write(mapping);
728 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 728 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
729 if (!valid_vma(vma, is_register)) 729 if (!valid_vma(vma, is_register))
730 continue; 730 continue;
@@ -755,7 +755,7 @@ build_map_info(struct address_space *mapping, loff_t offset, bool is_register)
755 info->mm = vma->vm_mm; 755 info->mm = vma->vm_mm;
756 info->vaddr = offset_to_vaddr(vma, offset); 756 info->vaddr = offset_to_vaddr(vma, offset);
757 } 757 }
758 mutex_unlock(&mapping->i_mmap_mutex); 758 i_mmap_unlock_write(mapping);
759 759
760 if (!more) 760 if (!more)
761 goto out; 761 goto out;
diff --git a/kernel/fork.c b/kernel/fork.c
index 9ca84189cfc2..4dc2ddade9f1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -433,7 +433,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
433 get_file(file); 433 get_file(file);
434 if (tmp->vm_flags & VM_DENYWRITE) 434 if (tmp->vm_flags & VM_DENYWRITE)
435 atomic_dec(&inode->i_writecount); 435 atomic_dec(&inode->i_writecount);
436 mutex_lock(&mapping->i_mmap_mutex); 436 i_mmap_lock_write(mapping);
437 if (tmp->vm_flags & VM_SHARED) 437 if (tmp->vm_flags & VM_SHARED)
438 atomic_inc(&mapping->i_mmap_writable); 438 atomic_inc(&mapping->i_mmap_writable);
439 flush_dcache_mmap_lock(mapping); 439 flush_dcache_mmap_lock(mapping);
@@ -445,7 +445,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
445 vma_interval_tree_insert_after(tmp, mpnt, 445 vma_interval_tree_insert_after(tmp, mpnt,
446 &mapping->i_mmap); 446 &mapping->i_mmap);
447 flush_dcache_mmap_unlock(mapping); 447 flush_dcache_mmap_unlock(mapping);
448 mutex_unlock(&mapping->i_mmap_mutex); 448 i_mmap_unlock_write(mapping);
449 } 449 }
450 450
451 /* 451 /*
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index d8d9fe3f685c..bad746bde4a2 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -182,7 +182,7 @@ __xip_unmap (struct address_space * mapping,
182 return; 182 return;
183 183
184retry: 184retry:
185 mutex_lock(&mapping->i_mmap_mutex); 185 i_mmap_lock_write(mapping);
186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 186 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
187 mm = vma->vm_mm; 187 mm = vma->vm_mm;
188 address = vma->vm_start + 188 address = vma->vm_start +
@@ -202,7 +202,7 @@ retry:
202 page_cache_release(page); 202 page_cache_release(page);
203 } 203 }
204 } 204 }
205 mutex_unlock(&mapping->i_mmap_mutex); 205 i_mmap_unlock_write(mapping);
206 206
207 if (locked) { 207 if (locked) {
208 mutex_unlock(&xip_sparse_mutex); 208 mutex_unlock(&xip_sparse_mutex);
diff --git a/mm/fremap.c b/mm/fremap.c
index 72b8fa361433..11ef7ec40d13 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -238,13 +238,13 @@ get_write_lock:
238 } 238 }
239 goto out_freed; 239 goto out_freed;
240 } 240 }
241 mutex_lock(&mapping->i_mmap_mutex); 241 i_mmap_lock_write(mapping);
242 flush_dcache_mmap_lock(mapping); 242 flush_dcache_mmap_lock(mapping);
243 vma->vm_flags |= VM_NONLINEAR; 243 vma->vm_flags |= VM_NONLINEAR;
244 vma_interval_tree_remove(vma, &mapping->i_mmap); 244 vma_interval_tree_remove(vma, &mapping->i_mmap);
245 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); 245 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
246 flush_dcache_mmap_unlock(mapping); 246 flush_dcache_mmap_unlock(mapping);
247 mutex_unlock(&mapping->i_mmap_mutex); 247 i_mmap_unlock_write(mapping);
248 } 248 }
249 249
250 if (vma->vm_flags & VM_LOCKED) { 250 if (vma->vm_flags & VM_LOCKED) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 919b86a2164d..ffe19304cc09 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2774,7 +2774,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2774 * this mapping should be shared between all the VMAs, 2774 * this mapping should be shared between all the VMAs,
2775 * __unmap_hugepage_range() is called as the lock is already held 2775 * __unmap_hugepage_range() is called as the lock is already held
2776 */ 2776 */
2777 mutex_lock(&mapping->i_mmap_mutex); 2777 i_mmap_lock_write(mapping);
2778 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) { 2778 vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
2779 /* Do not unmap the current VMA */ 2779 /* Do not unmap the current VMA */
2780 if (iter_vma == vma) 2780 if (iter_vma == vma)
@@ -2791,7 +2791,7 @@ static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2791 unmap_hugepage_range(iter_vma, address, 2791 unmap_hugepage_range(iter_vma, address,
2792 address + huge_page_size(h), page); 2792 address + huge_page_size(h), page);
2793 } 2793 }
2794 mutex_unlock(&mapping->i_mmap_mutex); 2794 i_mmap_unlock_write(mapping);
2795} 2795}
2796 2796
2797/* 2797/*
@@ -3348,7 +3348,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3348 flush_cache_range(vma, address, end); 3348 flush_cache_range(vma, address, end);
3349 3349
3350 mmu_notifier_invalidate_range_start(mm, start, end); 3350 mmu_notifier_invalidate_range_start(mm, start, end);
3351 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 3351 i_mmap_lock_write(vma->vm_file->f_mapping);
3352 for (; address < end; address += huge_page_size(h)) { 3352 for (; address < end; address += huge_page_size(h)) {
3353 spinlock_t *ptl; 3353 spinlock_t *ptl;
3354 ptep = huge_pte_offset(mm, address); 3354 ptep = huge_pte_offset(mm, address);
@@ -3376,7 +3376,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
3376 * and that page table be reused and filled with junk. 3376 * and that page table be reused and filled with junk.
3377 */ 3377 */
3378 flush_tlb_range(vma, start, end); 3378 flush_tlb_range(vma, start, end);
3379 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 3379 i_mmap_unlock_write(vma->vm_file->f_mapping);
3380 mmu_notifier_invalidate_range_end(mm, start, end); 3380 mmu_notifier_invalidate_range_end(mm, start, end);
3381 3381
3382 return pages << h->order; 3382 return pages << h->order;
@@ -3544,7 +3544,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3544 if (!vma_shareable(vma, addr)) 3544 if (!vma_shareable(vma, addr))
3545 return (pte_t *)pmd_alloc(mm, pud, addr); 3545 return (pte_t *)pmd_alloc(mm, pud, addr);
3546 3546
3547 mutex_lock(&mapping->i_mmap_mutex); 3547 i_mmap_lock_write(mapping);
3548 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) { 3548 vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
3549 if (svma == vma) 3549 if (svma == vma)
3550 continue; 3550 continue;
@@ -3572,7 +3572,7 @@ pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud)
3572 spin_unlock(ptl); 3572 spin_unlock(ptl);
3573out: 3573out:
3574 pte = (pte_t *)pmd_alloc(mm, pud, addr); 3574 pte = (pte_t *)pmd_alloc(mm, pud, addr);
3575 mutex_unlock(&mapping->i_mmap_mutex); 3575 i_mmap_unlock_write(mapping);
3576 return pte; 3576 return pte;
3577} 3577}
3578 3578
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index e5ee0ca7ae85..5e2b26dab8dc 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -466,7 +466,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
466 struct task_struct *tsk; 466 struct task_struct *tsk;
467 struct address_space *mapping = page->mapping; 467 struct address_space *mapping = page->mapping;
468 468
469 mutex_lock(&mapping->i_mmap_mutex); 469 i_mmap_lock_write(mapping);
470 read_lock(&tasklist_lock); 470 read_lock(&tasklist_lock);
471 for_each_process(tsk) { 471 for_each_process(tsk) {
472 pgoff_t pgoff = page_to_pgoff(page); 472 pgoff_t pgoff = page_to_pgoff(page);
@@ -488,7 +488,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
488 } 488 }
489 } 489 }
490 read_unlock(&tasklist_lock); 490 read_unlock(&tasklist_lock);
491 mutex_unlock(&mapping->i_mmap_mutex); 491 i_mmap_unlock_write(mapping);
492} 492}
493 493
494/* 494/*
diff --git a/mm/memory.c b/mm/memory.c
index 4b5a282e1107..039fab699a1a 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1326,9 +1326,9 @@ static void unmap_single_vma(struct mmu_gather *tlb,
1326 * safe to do nothing in this case. 1326 * safe to do nothing in this case.
1327 */ 1327 */
1328 if (vma->vm_file) { 1328 if (vma->vm_file) {
1329 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex); 1329 i_mmap_lock_write(vma->vm_file->f_mapping);
1330 __unmap_hugepage_range_final(tlb, vma, start, end, NULL); 1330 __unmap_hugepage_range_final(tlb, vma, start, end, NULL);
1331 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex); 1331 i_mmap_unlock_write(vma->vm_file->f_mapping);
1332 } 1332 }
1333 } else 1333 } else
1334 unmap_page_range(tlb, vma, start, end, details); 1334 unmap_page_range(tlb, vma, start, end, details);
@@ -2377,12 +2377,12 @@ void unmap_mapping_range(struct address_space *mapping,
2377 details.last_index = ULONG_MAX; 2377 details.last_index = ULONG_MAX;
2378 2378
2379 2379
2380 mutex_lock(&mapping->i_mmap_mutex); 2380 i_mmap_lock_write(mapping);
2381 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap))) 2381 if (unlikely(!RB_EMPTY_ROOT(&mapping->i_mmap)))
2382 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2382 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2383 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2383 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2384 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2384 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2385 mutex_unlock(&mapping->i_mmap_mutex); 2385 i_mmap_unlock_write(mapping);
2386} 2386}
2387EXPORT_SYMBOL(unmap_mapping_range); 2387EXPORT_SYMBOL(unmap_mapping_range);
2388 2388
diff --git a/mm/mmap.c b/mm/mmap.c
index b6c0a77fc1c8..ecd6ecf48778 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -260,9 +260,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
260 260
261 if (file) { 261 if (file) {
262 struct address_space *mapping = file->f_mapping; 262 struct address_space *mapping = file->f_mapping;
263 mutex_lock(&mapping->i_mmap_mutex); 263 i_mmap_lock_write(mapping);
264 __remove_shared_vm_struct(vma, file, mapping); 264 __remove_shared_vm_struct(vma, file, mapping);
265 mutex_unlock(&mapping->i_mmap_mutex); 265 i_mmap_unlock_write(mapping);
266 } 266 }
267} 267}
268 268
@@ -674,14 +674,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
674 674
675 if (vma->vm_file) { 675 if (vma->vm_file) {
676 mapping = vma->vm_file->f_mapping; 676 mapping = vma->vm_file->f_mapping;
677 mutex_lock(&mapping->i_mmap_mutex); 677 i_mmap_lock_write(mapping);
678 } 678 }
679 679
680 __vma_link(mm, vma, prev, rb_link, rb_parent); 680 __vma_link(mm, vma, prev, rb_link, rb_parent);
681 __vma_link_file(vma); 681 __vma_link_file(vma);
682 682
683 if (mapping) 683 if (mapping)
684 mutex_unlock(&mapping->i_mmap_mutex); 684 i_mmap_unlock_write(mapping);
685 685
686 mm->map_count++; 686 mm->map_count++;
687 validate_mm(mm); 687 validate_mm(mm);
@@ -796,7 +796,7 @@ again: remove_next = 1 + (end > next->vm_end);
796 next->vm_end); 796 next->vm_end);
797 } 797 }
798 798
799 mutex_lock(&mapping->i_mmap_mutex); 799 i_mmap_lock_write(mapping);
800 if (insert) { 800 if (insert) {
801 /* 801 /*
802 * Put into interval tree now, so instantiated pages 802 * Put into interval tree now, so instantiated pages
@@ -883,7 +883,7 @@ again: remove_next = 1 + (end > next->vm_end);
883 anon_vma_unlock_write(anon_vma); 883 anon_vma_unlock_write(anon_vma);
884 } 884 }
885 if (mapping) 885 if (mapping)
886 mutex_unlock(&mapping->i_mmap_mutex); 886 i_mmap_unlock_write(mapping);
887 887
888 if (root) { 888 if (root) {
889 uprobe_mmap(vma); 889 uprobe_mmap(vma);
@@ -3182,7 +3182,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
3182 * AS_MM_ALL_LOCKS can't change to 0 from under us 3182 * AS_MM_ALL_LOCKS can't change to 0 from under us
3183 * because we hold the mm_all_locks_mutex. 3183 * because we hold the mm_all_locks_mutex.
3184 */ 3184 */
3185 mutex_unlock(&mapping->i_mmap_mutex); 3185 i_mmap_unlock_write(mapping);
3186 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 3186 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3187 &mapping->flags)) 3187 &mapping->flags))
3188 BUG(); 3188 BUG();
diff --git a/mm/mremap.c b/mm/mremap.c
index b147f66f4c40..426b448d6447 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
119 if (need_rmap_locks) { 119 if (need_rmap_locks) {
120 if (vma->vm_file) { 120 if (vma->vm_file) {
121 mapping = vma->vm_file->f_mapping; 121 mapping = vma->vm_file->f_mapping;
122 mutex_lock(&mapping->i_mmap_mutex); 122 i_mmap_lock_write(mapping);
123 } 123 }
124 if (vma->anon_vma) { 124 if (vma->anon_vma) {
125 anon_vma = vma->anon_vma; 125 anon_vma = vma->anon_vma;
@@ -156,7 +156,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
156 if (anon_vma) 156 if (anon_vma)
157 anon_vma_unlock_write(anon_vma); 157 anon_vma_unlock_write(anon_vma);
158 if (mapping) 158 if (mapping)
159 mutex_unlock(&mapping->i_mmap_mutex); 159 i_mmap_unlock_write(mapping);
160} 160}
161 161
162#define LATENCY_LIMIT (64 * PAGE_SIZE) 162#define LATENCY_LIMIT (64 * PAGE_SIZE)
diff --git a/mm/nommu.c b/mm/nommu.c
index bd1808e194a7..52a576553581 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -722,11 +722,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
722 if (vma->vm_file) { 722 if (vma->vm_file) {
723 mapping = vma->vm_file->f_mapping; 723 mapping = vma->vm_file->f_mapping;
724 724
725 mutex_lock(&mapping->i_mmap_mutex); 725 i_mmap_lock_write(mapping);
726 flush_dcache_mmap_lock(mapping); 726 flush_dcache_mmap_lock(mapping);
727 vma_interval_tree_insert(vma, &mapping->i_mmap); 727 vma_interval_tree_insert(vma, &mapping->i_mmap);
728 flush_dcache_mmap_unlock(mapping); 728 flush_dcache_mmap_unlock(mapping);
729 mutex_unlock(&mapping->i_mmap_mutex); 729 i_mmap_unlock_write(mapping);
730 } 730 }
731 731
732 /* add the VMA to the tree */ 732 /* add the VMA to the tree */
@@ -795,11 +795,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
795 if (vma->vm_file) { 795 if (vma->vm_file) {
796 mapping = vma->vm_file->f_mapping; 796 mapping = vma->vm_file->f_mapping;
797 797
798 mutex_lock(&mapping->i_mmap_mutex); 798 i_mmap_lock_write(mapping);
799 flush_dcache_mmap_lock(mapping); 799 flush_dcache_mmap_lock(mapping);
800 vma_interval_tree_remove(vma, &mapping->i_mmap); 800 vma_interval_tree_remove(vma, &mapping->i_mmap);
801 flush_dcache_mmap_unlock(mapping); 801 flush_dcache_mmap_unlock(mapping);
802 mutex_unlock(&mapping->i_mmap_mutex); 802 i_mmap_unlock_write(mapping);
803 } 803 }
804 804
805 /* remove from the MM's tree and list */ 805 /* remove from the MM's tree and list */
@@ -2094,14 +2094,14 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2094 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2094 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2095 2095
2096 down_write(&nommu_region_sem); 2096 down_write(&nommu_region_sem);
2097 mutex_lock(&inode->i_mapping->i_mmap_mutex); 2097 i_mmap_lock_write(inode->i_mapping);
2098 2098
2099 /* search for VMAs that fall within the dead zone */ 2099 /* search for VMAs that fall within the dead zone */
2100 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { 2100 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2101 /* found one - only interested if it's shared out of the page 2101 /* found one - only interested if it's shared out of the page
2102 * cache */ 2102 * cache */
2103 if (vma->vm_flags & VM_SHARED) { 2103 if (vma->vm_flags & VM_SHARED) {
2104 mutex_unlock(&inode->i_mapping->i_mmap_mutex); 2104 i_mmap_unlock_write(inode->i_mapping);
2105 up_write(&nommu_region_sem); 2105 up_write(&nommu_region_sem);
2106 return -ETXTBSY; /* not quite true, but near enough */ 2106 return -ETXTBSY; /* not quite true, but near enough */
2107 } 2107 }
@@ -2129,7 +2129,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2129 } 2129 }
2130 } 2130 }
2131 2131
2132 mutex_unlock(&inode->i_mapping->i_mmap_mutex); 2132 i_mmap_unlock_write(inode->i_mapping);
2133 up_write(&nommu_region_sem); 2133 up_write(&nommu_region_sem);
2134 return 0; 2134 return 0;
2135} 2135}
diff --git a/mm/rmap.c b/mm/rmap.c
index 45eba36fd673..bea03f6bec61 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1690,7 +1690,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1690 1690
1691 if (!mapping) 1691 if (!mapping)
1692 return ret; 1692 return ret;
1693 mutex_lock(&mapping->i_mmap_mutex); 1693 i_mmap_lock_write(mapping);
1694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) { 1694 vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
1695 unsigned long address = vma_address(page, vma); 1695 unsigned long address = vma_address(page, vma);
1696 1696
@@ -1713,7 +1713,7 @@ static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
1713 ret = rwc->file_nonlinear(page, mapping, rwc->arg); 1713 ret = rwc->file_nonlinear(page, mapping, rwc->arg);
1714 1714
1715done: 1715done:
1716 mutex_unlock(&mapping->i_mmap_mutex); 1716 i_mmap_unlock_write(mapping);
1717 return ret; 1717 return ret;
1718} 1718}
1719 1719