aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c25
1 files changed, 20 insertions, 5 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index b3f00b616b81..84b12624ceb0 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -3184,10 +3184,16 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3184 * mapping->flags avoid to take the same lock twice, if more than one 3184 * mapping->flags avoid to take the same lock twice, if more than one
3185 * vma in this mm is backed by the same anon_vma or address_space. 3185 * vma in this mm is backed by the same anon_vma or address_space.
3186 * 3186 *
3187 * We can take all the locks in random order because the VM code 3187 * We take locks in following order, accordingly to comment at beginning
3188 * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never 3188 * of mm/rmap.c:
3189 * takes more than one of them in a row. Secondly we're protected 3189 * - all hugetlbfs_i_mmap_rwsem_key locks (aka mapping->i_mmap_rwsem for
3190 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 3190 * hugetlb mapping);
3191 * - all i_mmap_rwsem locks;
3192 * - all anon_vma->rwseml
3193 *
3194 * We can take all locks within these types randomly because the VM code
3195 * doesn't nest them and we protected from parallel mm_take_all_locks() by
3196 * mm_all_locks_mutex.
3191 * 3197 *
3192 * mm_take_all_locks() and mm_drop_all_locks are expensive operations 3198 * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3193 * that may have to take thousand of locks. 3199 * that may have to take thousand of locks.
@@ -3206,7 +3212,16 @@ int mm_take_all_locks(struct mm_struct *mm)
3206 for (vma = mm->mmap; vma; vma = vma->vm_next) { 3212 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3207 if (signal_pending(current)) 3213 if (signal_pending(current))
3208 goto out_unlock; 3214 goto out_unlock;
3209 if (vma->vm_file && vma->vm_file->f_mapping) 3215 if (vma->vm_file && vma->vm_file->f_mapping &&
3216 is_vm_hugetlb_page(vma))
3217 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3218 }
3219
3220 for (vma = mm->mmap; vma; vma = vma->vm_next) {
3221 if (signal_pending(current))
3222 goto out_unlock;
3223 if (vma->vm_file && vma->vm_file->f_mapping &&
3224 !is_vm_hugetlb_page(vma))
3210 vm_lock_mapping(mm, vma->vm_file->f_mapping); 3225 vm_lock_mapping(mm, vma->vm_file->f_mapping);
3211 } 3226 }
3212 3227