aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:12:06 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:18 -0400
commit3d48ae45e72390ddf8cc5256ac32ed6f7a19cbea (patch)
tree1f46db3a8424090dd8e0b58991fa5acc1a73e680 /mm
parent97a894136f29802da19a15541de3c019e1ca147e (diff)
mm: Convert i_mmap_lock to a mutex
Straightforward conversion of i_mmap_lock to a mutex. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c10
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/fremap.c4
-rw-r--r--mm/hugetlb.c14
-rw-r--r--mm/memory-failure.c4
-rw-r--r--mm/memory.c4
-rw-r--r--mm/mmap.c22
-rw-r--r--mm/mremap.c4
-rw-r--r--mm/rmap.c28
9 files changed, 47 insertions, 47 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 8144f87dcbb4..88354ae0b1fd 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -58,16 +58,16 @@
58/* 58/*
59 * Lock ordering: 59 * Lock ordering:
60 * 60 *
61 * ->i_mmap_lock (truncate_pagecache) 61 * ->i_mmap_mutex (truncate_pagecache)
62 * ->private_lock (__free_pte->__set_page_dirty_buffers) 62 * ->private_lock (__free_pte->__set_page_dirty_buffers)
63 * ->swap_lock (exclusive_swap_page, others) 63 * ->swap_lock (exclusive_swap_page, others)
64 * ->mapping->tree_lock 64 * ->mapping->tree_lock
65 * 65 *
66 * ->i_mutex 66 * ->i_mutex
67 * ->i_mmap_lock (truncate->unmap_mapping_range) 67 * ->i_mmap_mutex (truncate->unmap_mapping_range)
68 * 68 *
69 * ->mmap_sem 69 * ->mmap_sem
70 * ->i_mmap_lock 70 * ->i_mmap_mutex
71 * ->page_table_lock or pte_lock (various, mainly in memory.c) 71 * ->page_table_lock or pte_lock (various, mainly in memory.c)
72 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock) 72 * ->mapping->tree_lock (arch-dependent flush_dcache_mmap_lock)
73 * 73 *
@@ -84,7 +84,7 @@
84 * sb_lock (fs/fs-writeback.c) 84 * sb_lock (fs/fs-writeback.c)
85 * ->mapping->tree_lock (__sync_single_inode) 85 * ->mapping->tree_lock (__sync_single_inode)
86 * 86 *
87 * ->i_mmap_lock 87 * ->i_mmap_mutex
88 * ->anon_vma.lock (vma_adjust) 88 * ->anon_vma.lock (vma_adjust)
89 * 89 *
90 * ->anon_vma.lock 90 * ->anon_vma.lock
@@ -106,7 +106,7 @@
106 * 106 *
107 * (code doesn't rely on that order, so you could switch it around) 107 * (code doesn't rely on that order, so you could switch it around)
108 * ->tasklist_lock (memory_failure, collect_procs_ao) 108 * ->tasklist_lock (memory_failure, collect_procs_ao)
109 * ->i_mmap_lock 109 * ->i_mmap_mutex
110 */ 110 */
111 111
112/* 112/*
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index 83364df74a33..93356cd12828 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -183,7 +183,7 @@ __xip_unmap (struct address_space * mapping,
183 return; 183 return;
184 184
185retry: 185retry:
186 spin_lock(&mapping->i_mmap_lock); 186 mutex_lock(&mapping->i_mmap_mutex);
187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 187 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
188 mm = vma->vm_mm; 188 mm = vma->vm_mm;
189 address = vma->vm_start + 189 address = vma->vm_start +
@@ -201,7 +201,7 @@ retry:
201 page_cache_release(page); 201 page_cache_release(page);
202 } 202 }
203 } 203 }
204 spin_unlock(&mapping->i_mmap_lock); 204 mutex_unlock(&mapping->i_mmap_mutex);
205 205
206 if (locked) { 206 if (locked) {
207 mutex_unlock(&xip_sparse_mutex); 207 mutex_unlock(&xip_sparse_mutex);
diff --git a/mm/fremap.c b/mm/fremap.c
index ec520c7b28df..7f4123056e06 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -211,13 +211,13 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
211 } 211 }
212 goto out; 212 goto out;
213 } 213 }
214 spin_lock(&mapping->i_mmap_lock); 214 mutex_lock(&mapping->i_mmap_mutex);
215 flush_dcache_mmap_lock(mapping); 215 flush_dcache_mmap_lock(mapping);
216 vma->vm_flags |= VM_NONLINEAR; 216 vma->vm_flags |= VM_NONLINEAR;
217 vma_prio_tree_remove(vma, &mapping->i_mmap); 217 vma_prio_tree_remove(vma, &mapping->i_mmap);
218 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear); 218 vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
219 flush_dcache_mmap_unlock(mapping); 219 flush_dcache_mmap_unlock(mapping);
220 spin_unlock(&mapping->i_mmap_lock); 220 mutex_unlock(&mapping->i_mmap_mutex);
221 } 221 }
222 222
223 if (vma->vm_flags & VM_LOCKED) { 223 if (vma->vm_flags & VM_LOCKED) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index bbb4a5bbb958..5fd68b95c671 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2205,7 +2205,7 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2205 unsigned long sz = huge_page_size(h); 2205 unsigned long sz = huge_page_size(h);
2206 2206
2207 /* 2207 /*
2208 * A page gathering list, protected by per file i_mmap_lock. The 2208 * A page gathering list, protected by per file i_mmap_mutex. The
2209 * lock is used to avoid list corruption from multiple unmapping 2209 * lock is used to avoid list corruption from multiple unmapping
2210 * of the same page since we are using page->lru. 2210 * of the same page since we are using page->lru.
2211 */ 2211 */
@@ -2274,9 +2274,9 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start, 2274void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
2275 unsigned long end, struct page *ref_page) 2275 unsigned long end, struct page *ref_page)
2276{ 2276{
2277 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2277 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2278 __unmap_hugepage_range(vma, start, end, ref_page); 2278 __unmap_hugepage_range(vma, start, end, ref_page);
2279 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2279 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2280} 2280}
2281 2281
2282/* 2282/*
@@ -2308,7 +2308,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2308 * this mapping should be shared between all the VMAs, 2308 * this mapping should be shared between all the VMAs,
2309 * __unmap_hugepage_range() is called as the lock is already held 2309 * __unmap_hugepage_range() is called as the lock is already held
2310 */ 2310 */
2311 spin_lock(&mapping->i_mmap_lock); 2311 mutex_lock(&mapping->i_mmap_mutex);
2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 2312 vma_prio_tree_foreach(iter_vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
2313 /* Do not unmap the current VMA */ 2313 /* Do not unmap the current VMA */
2314 if (iter_vma == vma) 2314 if (iter_vma == vma)
@@ -2326,7 +2326,7 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
2326 address, address + huge_page_size(h), 2326 address, address + huge_page_size(h),
2327 page); 2327 page);
2328 } 2328 }
2329 spin_unlock(&mapping->i_mmap_lock); 2329 mutex_unlock(&mapping->i_mmap_mutex);
2330 2330
2331 return 1; 2331 return 1;
2332} 2332}
@@ -2810,7 +2810,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2810 BUG_ON(address >= end); 2810 BUG_ON(address >= end);
2811 flush_cache_range(vma, address, end); 2811 flush_cache_range(vma, address, end);
2812 2812
2813 spin_lock(&vma->vm_file->f_mapping->i_mmap_lock); 2813 mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
2814 spin_lock(&mm->page_table_lock); 2814 spin_lock(&mm->page_table_lock);
2815 for (; address < end; address += huge_page_size(h)) { 2815 for (; address < end; address += huge_page_size(h)) {
2816 ptep = huge_pte_offset(mm, address); 2816 ptep = huge_pte_offset(mm, address);
@@ -2825,7 +2825,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
2825 } 2825 }
2826 } 2826 }
2827 spin_unlock(&mm->page_table_lock); 2827 spin_unlock(&mm->page_table_lock);
2828 spin_unlock(&vma->vm_file->f_mapping->i_mmap_lock); 2828 mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
2829 2829
2830 flush_tlb_range(vma, start, end); 2830 flush_tlb_range(vma, start, end);
2831} 2831}
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 2b9a5eef39e0..12178ec32ab5 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -429,7 +429,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
429 */ 429 */
430 430
431 read_lock(&tasklist_lock); 431 read_lock(&tasklist_lock);
432 spin_lock(&mapping->i_mmap_lock); 432 mutex_lock(&mapping->i_mmap_mutex);
433 for_each_process(tsk) { 433 for_each_process(tsk) {
434 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 434 pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
435 435
@@ -449,7 +449,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
449 add_to_kill(tsk, page, vma, to_kill, tkc); 449 add_to_kill(tsk, page, vma, to_kill, tkc);
450 } 450 }
451 } 451 }
452 spin_unlock(&mapping->i_mmap_lock); 452 mutex_unlock(&mapping->i_mmap_mutex);
453 read_unlock(&tasklist_lock); 453 read_unlock(&tasklist_lock);
454} 454}
455 455
diff --git a/mm/memory.c b/mm/memory.c
index 18655878b9f8..7bbe4d3df756 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2667,12 +2667,12 @@ void unmap_mapping_range(struct address_space *mapping,
2667 details.last_index = ULONG_MAX; 2667 details.last_index = ULONG_MAX;
2668 2668
2669 2669
2670 spin_lock(&mapping->i_mmap_lock); 2670 mutex_lock(&mapping->i_mmap_mutex);
2671 if (unlikely(!prio_tree_empty(&mapping->i_mmap))) 2671 if (unlikely(!prio_tree_empty(&mapping->i_mmap)))
2672 unmap_mapping_range_tree(&mapping->i_mmap, &details); 2672 unmap_mapping_range_tree(&mapping->i_mmap, &details);
2673 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear))) 2673 if (unlikely(!list_empty(&mapping->i_mmap_nonlinear)))
2674 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details); 2674 unmap_mapping_range_list(&mapping->i_mmap_nonlinear, &details);
2675 spin_unlock(&mapping->i_mmap_lock); 2675 mutex_unlock(&mapping->i_mmap_mutex);
2676} 2676}
2677EXPORT_SYMBOL(unmap_mapping_range); 2677EXPORT_SYMBOL(unmap_mapping_range);
2678 2678
diff --git a/mm/mmap.c b/mm/mmap.c
index 50cb04bb56bf..26efbfca0b20 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -194,7 +194,7 @@ error:
194} 194}
195 195
196/* 196/*
197 * Requires inode->i_mapping->i_mmap_lock 197 * Requires inode->i_mapping->i_mmap_mutex
198 */ 198 */
199static void __remove_shared_vm_struct(struct vm_area_struct *vma, 199static void __remove_shared_vm_struct(struct vm_area_struct *vma,
200 struct file *file, struct address_space *mapping) 200 struct file *file, struct address_space *mapping)
@@ -222,9 +222,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
222 222
223 if (file) { 223 if (file) {
224 struct address_space *mapping = file->f_mapping; 224 struct address_space *mapping = file->f_mapping;
225 spin_lock(&mapping->i_mmap_lock); 225 mutex_lock(&mapping->i_mmap_mutex);
226 __remove_shared_vm_struct(vma, file, mapping); 226 __remove_shared_vm_struct(vma, file, mapping);
227 spin_unlock(&mapping->i_mmap_lock); 227 mutex_unlock(&mapping->i_mmap_mutex);
228 } 228 }
229} 229}
230 230
@@ -446,13 +446,13 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
446 mapping = vma->vm_file->f_mapping; 446 mapping = vma->vm_file->f_mapping;
447 447
448 if (mapping) 448 if (mapping)
449 spin_lock(&mapping->i_mmap_lock); 449 mutex_lock(&mapping->i_mmap_mutex);
450 450
451 __vma_link(mm, vma, prev, rb_link, rb_parent); 451 __vma_link(mm, vma, prev, rb_link, rb_parent);
452 __vma_link_file(vma); 452 __vma_link_file(vma);
453 453
454 if (mapping) 454 if (mapping)
455 spin_unlock(&mapping->i_mmap_lock); 455 mutex_unlock(&mapping->i_mmap_mutex);
456 456
457 mm->map_count++; 457 mm->map_count++;
458 validate_mm(mm); 458 validate_mm(mm);
@@ -555,7 +555,7 @@ again: remove_next = 1 + (end > next->vm_end);
555 mapping = file->f_mapping; 555 mapping = file->f_mapping;
556 if (!(vma->vm_flags & VM_NONLINEAR)) 556 if (!(vma->vm_flags & VM_NONLINEAR))
557 root = &mapping->i_mmap; 557 root = &mapping->i_mmap;
558 spin_lock(&mapping->i_mmap_lock); 558 mutex_lock(&mapping->i_mmap_mutex);
559 if (insert) { 559 if (insert) {
560 /* 560 /*
561 * Put into prio_tree now, so instantiated pages 561 * Put into prio_tree now, so instantiated pages
@@ -622,7 +622,7 @@ again: remove_next = 1 + (end > next->vm_end);
622 if (anon_vma) 622 if (anon_vma)
623 anon_vma_unlock(anon_vma); 623 anon_vma_unlock(anon_vma);
624 if (mapping) 624 if (mapping)
625 spin_unlock(&mapping->i_mmap_lock); 625 mutex_unlock(&mapping->i_mmap_mutex);
626 626
627 if (remove_next) { 627 if (remove_next) {
628 if (file) { 628 if (file) {
@@ -2290,7 +2290,7 @@ void exit_mmap(struct mm_struct *mm)
2290 2290
2291/* Insert vm structure into process list sorted by address 2291/* Insert vm structure into process list sorted by address
2292 * and into the inode's i_mmap tree. If vm_file is non-NULL 2292 * and into the inode's i_mmap tree. If vm_file is non-NULL
2293 * then i_mmap_lock is taken here. 2293 * then i_mmap_mutex is taken here.
2294 */ 2294 */
2295int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma) 2295int insert_vm_struct(struct mm_struct * mm, struct vm_area_struct * vma)
2296{ 2296{
@@ -2532,7 +2532,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2532 */ 2532 */
2533 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 2533 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2534 BUG(); 2534 BUG();
2535 spin_lock_nest_lock(&mapping->i_mmap_lock, &mm->mmap_sem); 2535 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
2536 } 2536 }
2537} 2537}
2538 2538
@@ -2559,7 +2559,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2559 * vma in this mm is backed by the same anon_vma or address_space. 2559 * vma in this mm is backed by the same anon_vma or address_space.
2560 * 2560 *
2561 * We can take all the locks in random order because the VM code 2561 * We can take all the locks in random order because the VM code
2562 * taking i_mmap_lock or anon_vma->lock outside the mmap_sem never 2562 * taking i_mmap_mutex or anon_vma->lock outside the mmap_sem never
2563 * takes more than one of them in a row. Secondly we're protected 2563 * takes more than one of them in a row. Secondly we're protected
2564 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 2564 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
2565 * 2565 *
@@ -2631,7 +2631,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
2631 * AS_MM_ALL_LOCKS can't change to 0 from under us 2631 * AS_MM_ALL_LOCKS can't change to 0 from under us
2632 * because we hold the mm_all_locks_mutex. 2632 * because we hold the mm_all_locks_mutex.
2633 */ 2633 */
2634 spin_unlock(&mapping->i_mmap_lock); 2634 mutex_unlock(&mapping->i_mmap_mutex);
2635 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 2635 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
2636 &mapping->flags)) 2636 &mapping->flags))
2637 BUG(); 2637 BUG();
diff --git a/mm/mremap.c b/mm/mremap.c
index 909e1e1e99b1..506fa44403df 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -93,7 +93,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
93 * and we propagate stale pages into the dst afterward. 93 * and we propagate stale pages into the dst afterward.
94 */ 94 */
95 mapping = vma->vm_file->f_mapping; 95 mapping = vma->vm_file->f_mapping;
96 spin_lock(&mapping->i_mmap_lock); 96 mutex_lock(&mapping->i_mmap_mutex);
97 } 97 }
98 98
99 /* 99 /*
@@ -122,7 +122,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
122 pte_unmap(new_pte - 1); 122 pte_unmap(new_pte - 1);
123 pte_unmap_unlock(old_pte - 1, old_ptl); 123 pte_unmap_unlock(old_pte - 1, old_ptl);
124 if (mapping) 124 if (mapping)
125 spin_unlock(&mapping->i_mmap_lock); 125 mutex_unlock(&mapping->i_mmap_mutex);
126 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end); 126 mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
127} 127}
128 128
diff --git a/mm/rmap.c b/mm/rmap.c
index 522e4a93cadd..f0ef7ea5423a 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -24,7 +24,7 @@
24 * inode->i_alloc_sem (vmtruncate_range) 24 * inode->i_alloc_sem (vmtruncate_range)
25 * mm->mmap_sem 25 * mm->mmap_sem
26 * page->flags PG_locked (lock_page) 26 * page->flags PG_locked (lock_page)
27 * mapping->i_mmap_lock 27 * mapping->i_mmap_mutex
28 * anon_vma->lock 28 * anon_vma->lock
29 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 30 * zone->lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -646,14 +646,14 @@ static int page_referenced_file(struct page *page,
646 * The page lock not only makes sure that page->mapping cannot 646 * The page lock not only makes sure that page->mapping cannot
647 * suddenly be NULLified by truncation, it makes sure that the 647 * suddenly be NULLified by truncation, it makes sure that the
648 * structure at mapping cannot be freed and reused yet, 648 * structure at mapping cannot be freed and reused yet,
649 * so we can safely take mapping->i_mmap_lock. 649 * so we can safely take mapping->i_mmap_mutex.
650 */ 650 */
651 BUG_ON(!PageLocked(page)); 651 BUG_ON(!PageLocked(page));
652 652
653 spin_lock(&mapping->i_mmap_lock); 653 mutex_lock(&mapping->i_mmap_mutex);
654 654
655 /* 655 /*
656 * i_mmap_lock does not stabilize mapcount at all, but mapcount 656 * i_mmap_mutex does not stabilize mapcount at all, but mapcount
657 * is more likely to be accurate if we note it after spinning. 657 * is more likely to be accurate if we note it after spinning.
658 */ 658 */
659 mapcount = page_mapcount(page); 659 mapcount = page_mapcount(page);
@@ -675,7 +675,7 @@ static int page_referenced_file(struct page *page,
675 break; 675 break;
676 } 676 }
677 677
678 spin_unlock(&mapping->i_mmap_lock); 678 mutex_unlock(&mapping->i_mmap_mutex);
679 return referenced; 679 return referenced;
680} 680}
681 681
@@ -762,7 +762,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
762 762
763 BUG_ON(PageAnon(page)); 763 BUG_ON(PageAnon(page));
764 764
765 spin_lock(&mapping->i_mmap_lock); 765 mutex_lock(&mapping->i_mmap_mutex);
766 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 766 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
767 if (vma->vm_flags & VM_SHARED) { 767 if (vma->vm_flags & VM_SHARED) {
768 unsigned long address = vma_address(page, vma); 768 unsigned long address = vma_address(page, vma);
@@ -771,7 +771,7 @@ static int page_mkclean_file(struct address_space *mapping, struct page *page)
771 ret += page_mkclean_one(page, vma, address); 771 ret += page_mkclean_one(page, vma, address);
772 } 772 }
773 } 773 }
774 spin_unlock(&mapping->i_mmap_lock); 774 mutex_unlock(&mapping->i_mmap_mutex);
775 return ret; 775 return ret;
776} 776}
777 777
@@ -1119,7 +1119,7 @@ out_mlock:
1119 /* 1119 /*
1120 * We need mmap_sem locking, Otherwise VM_LOCKED check makes 1120 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1121 * unstable result and race. Plus, We can't wait here because 1121 * unstable result and race. Plus, We can't wait here because
1122 * we now hold anon_vma->lock or mapping->i_mmap_lock. 1122 * we now hold anon_vma->lock or mapping->i_mmap_mutex.
1123 * if trylock failed, the page remain in evictable lru and later 1123 * if trylock failed, the page remain in evictable lru and later
1124 * vmscan could retry to move the page to unevictable lru if the 1124 * vmscan could retry to move the page to unevictable lru if the
1125 * page is actually mlocked. 1125 * page is actually mlocked.
@@ -1345,7 +1345,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1345 unsigned long max_nl_size = 0; 1345 unsigned long max_nl_size = 0;
1346 unsigned int mapcount; 1346 unsigned int mapcount;
1347 1347
1348 spin_lock(&mapping->i_mmap_lock); 1348 mutex_lock(&mapping->i_mmap_mutex);
1349 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1349 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1350 unsigned long address = vma_address(page, vma); 1350 unsigned long address = vma_address(page, vma);
1351 if (address == -EFAULT) 1351 if (address == -EFAULT)
@@ -1391,7 +1391,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1391 mapcount = page_mapcount(page); 1391 mapcount = page_mapcount(page);
1392 if (!mapcount) 1392 if (!mapcount)
1393 goto out; 1393 goto out;
1394 cond_resched_lock(&mapping->i_mmap_lock); 1394 cond_resched();
1395 1395
1396 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 1396 max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
1397 if (max_nl_cursor == 0) 1397 if (max_nl_cursor == 0)
@@ -1413,7 +1413,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1413 } 1413 }
1414 vma->vm_private_data = (void *) max_nl_cursor; 1414 vma->vm_private_data = (void *) max_nl_cursor;
1415 } 1415 }
1416 cond_resched_lock(&mapping->i_mmap_lock); 1416 cond_resched();
1417 max_nl_cursor += CLUSTER_SIZE; 1417 max_nl_cursor += CLUSTER_SIZE;
1418 } while (max_nl_cursor <= max_nl_size); 1418 } while (max_nl_cursor <= max_nl_size);
1419 1419
@@ -1425,7 +1425,7 @@ static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
1425 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 1425 list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
1426 vma->vm_private_data = NULL; 1426 vma->vm_private_data = NULL;
1427out: 1427out:
1428 spin_unlock(&mapping->i_mmap_lock); 1428 mutex_unlock(&mapping->i_mmap_mutex);
1429 return ret; 1429 return ret;
1430} 1430}
1431 1431
@@ -1544,7 +1544,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1544 1544
1545 if (!mapping) 1545 if (!mapping)
1546 return ret; 1546 return ret;
1547 spin_lock(&mapping->i_mmap_lock); 1547 mutex_lock(&mapping->i_mmap_mutex);
1548 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 1548 vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1549 unsigned long address = vma_address(page, vma); 1549 unsigned long address = vma_address(page, vma);
1550 if (address == -EFAULT) 1550 if (address == -EFAULT)
@@ -1558,7 +1558,7 @@ static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1558 * never contain migration ptes. Decide what to do about this 1558 * never contain migration ptes. Decide what to do about this
1559 * limitation to linear when we need rmap_walk() on nonlinear. 1559 * limitation to linear when we need rmap_walk() on nonlinear.
1560 */ 1560 */
1561 spin_unlock(&mapping->i_mmap_lock); 1561 mutex_unlock(&mapping->i_mmap_mutex);
1562 return ret; 1562 return ret;
1563} 1563}
1564 1564