aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mremap.c')
-rw-r--r--mm/mremap.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mm/mremap.c b/mm/mremap.c
index b147f66f4c40..84aa36f9f308 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -99,7 +99,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
99 spinlock_t *old_ptl, *new_ptl; 99 spinlock_t *old_ptl, *new_ptl;
100 100
101 /* 101 /*
102 * When need_rmap_locks is true, we take the i_mmap_mutex and anon_vma 102 * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
103 * locks to ensure that rmap will always observe either the old or the 103 * locks to ensure that rmap will always observe either the old or the
104 * new ptes. This is the easiest way to avoid races with 104 * new ptes. This is the easiest way to avoid races with
105 * truncate_pagecache(), page migration, etc... 105 * truncate_pagecache(), page migration, etc...
@@ -119,7 +119,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
119 if (need_rmap_locks) { 119 if (need_rmap_locks) {
120 if (vma->vm_file) { 120 if (vma->vm_file) {
121 mapping = vma->vm_file->f_mapping; 121 mapping = vma->vm_file->f_mapping;
122 mutex_lock(&mapping->i_mmap_mutex); 122 i_mmap_lock_write(mapping);
123 } 123 }
124 if (vma->anon_vma) { 124 if (vma->anon_vma) {
125 anon_vma = vma->anon_vma; 125 anon_vma = vma->anon_vma;
@@ -156,7 +156,7 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
156 if (anon_vma) 156 if (anon_vma)
157 anon_vma_unlock_write(anon_vma); 157 anon_vma_unlock_write(anon_vma);
158 if (mapping) 158 if (mapping)
159 mutex_unlock(&mapping->i_mmap_mutex); 159 i_mmap_unlock_write(mapping);
160} 160}
161 161
162#define LATENCY_LIMIT (64 * PAGE_SIZE) 162#define LATENCY_LIMIT (64 * PAGE_SIZE)