diff options
Diffstat (limited to 'mm/mmap.c')
-rw-r--r-- | mm/mmap.c | 24 |
1 files changed, 13 insertions, 11 deletions
@@ -232,7 +232,7 @@ error: | |||
232 | } | 232 | } |
233 | 233 | ||
234 | /* | 234 | /* |
235 | * Requires inode->i_mapping->i_mmap_mutex | 235 | * Requires inode->i_mapping->i_mmap_rwsem |
236 | */ | 236 | */ |
237 | static void __remove_shared_vm_struct(struct vm_area_struct *vma, | 237 | static void __remove_shared_vm_struct(struct vm_area_struct *vma, |
238 | struct file *file, struct address_space *mapping) | 238 | struct file *file, struct address_space *mapping) |
@@ -260,9 +260,9 @@ void unlink_file_vma(struct vm_area_struct *vma) | |||
260 | 260 | ||
261 | if (file) { | 261 | if (file) { |
262 | struct address_space *mapping = file->f_mapping; | 262 | struct address_space *mapping = file->f_mapping; |
263 | mutex_lock(&mapping->i_mmap_mutex); | 263 | i_mmap_lock_write(mapping); |
264 | __remove_shared_vm_struct(vma, file, mapping); | 264 | __remove_shared_vm_struct(vma, file, mapping); |
265 | mutex_unlock(&mapping->i_mmap_mutex); | 265 | i_mmap_unlock_write(mapping); |
266 | } | 266 | } |
267 | } | 267 | } |
268 | 268 | ||
@@ -674,14 +674,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma, | |||
674 | 674 | ||
675 | if (vma->vm_file) { | 675 | if (vma->vm_file) { |
676 | mapping = vma->vm_file->f_mapping; | 676 | mapping = vma->vm_file->f_mapping; |
677 | mutex_lock(&mapping->i_mmap_mutex); | 677 | i_mmap_lock_write(mapping); |
678 | } | 678 | } |
679 | 679 | ||
680 | __vma_link(mm, vma, prev, rb_link, rb_parent); | 680 | __vma_link(mm, vma, prev, rb_link, rb_parent); |
681 | __vma_link_file(vma); | 681 | __vma_link_file(vma); |
682 | 682 | ||
683 | if (mapping) | 683 | if (mapping) |
684 | mutex_unlock(&mapping->i_mmap_mutex); | 684 | i_mmap_unlock_write(mapping); |
685 | 685 | ||
686 | mm->map_count++; | 686 | mm->map_count++; |
687 | validate_mm(mm); | 687 | validate_mm(mm); |
@@ -796,7 +796,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
796 | next->vm_end); | 796 | next->vm_end); |
797 | } | 797 | } |
798 | 798 | ||
799 | mutex_lock(&mapping->i_mmap_mutex); | 799 | i_mmap_lock_write(mapping); |
800 | if (insert) { | 800 | if (insert) { |
801 | /* | 801 | /* |
802 | * Put into interval tree now, so instantiated pages | 802 | * Put into interval tree now, so instantiated pages |
@@ -883,7 +883,7 @@ again: remove_next = 1 + (end > next->vm_end); | |||
883 | anon_vma_unlock_write(anon_vma); | 883 | anon_vma_unlock_write(anon_vma); |
884 | } | 884 | } |
885 | if (mapping) | 885 | if (mapping) |
886 | mutex_unlock(&mapping->i_mmap_mutex); | 886 | i_mmap_unlock_write(mapping); |
887 | 887 | ||
888 | if (root) { | 888 | if (root) { |
889 | uprobe_mmap(vma); | 889 | uprobe_mmap(vma); |
@@ -2362,6 +2362,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr) | |||
2362 | } | 2362 | } |
2363 | #endif | 2363 | #endif |
2364 | 2364 | ||
2365 | EXPORT_SYMBOL_GPL(find_extend_vma); | ||
2366 | |||
2365 | /* | 2367 | /* |
2366 | * Ok - we have the memory areas we should free on the vma list, | 2368 | * Ok - we have the memory areas we should free on the vma list, |
2367 | * so release them, and do the vma updates. | 2369 | * so release them, and do the vma updates. |
@@ -2791,7 +2793,7 @@ void exit_mmap(struct mm_struct *mm) | |||
2791 | 2793 | ||
2792 | /* Insert vm structure into process list sorted by address | 2794 | /* Insert vm structure into process list sorted by address |
2793 | * and into the inode's i_mmap tree. If vm_file is non-NULL | 2795 | * and into the inode's i_mmap tree. If vm_file is non-NULL |
2794 | * then i_mmap_mutex is taken here. | 2796 | * then i_mmap_rwsem is taken here. |
2795 | */ | 2797 | */ |
2796 | int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) | 2798 | int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) |
2797 | { | 2799 | { |
@@ -3086,7 +3088,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) | |||
3086 | */ | 3088 | */ |
3087 | if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) | 3089 | if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) |
3088 | BUG(); | 3090 | BUG(); |
3089 | mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem); | 3091 | down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem); |
3090 | } | 3092 | } |
3091 | } | 3093 | } |
3092 | 3094 | ||
@@ -3113,7 +3115,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping) | |||
3113 | * vma in this mm is backed by the same anon_vma or address_space. | 3115 | * vma in this mm is backed by the same anon_vma or address_space. |
3114 | * | 3116 | * |
3115 | * We can take all the locks in random order because the VM code | 3117 | * We can take all the locks in random order because the VM code |
3116 | * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never | 3118 | * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never |
3117 | * takes more than one of them in a row. Secondly we're protected | 3119 | * takes more than one of them in a row. Secondly we're protected |
3118 | * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. | 3120 | * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. |
3119 | * | 3121 | * |
@@ -3182,7 +3184,7 @@ static void vm_unlock_mapping(struct address_space *mapping) | |||
3182 | * AS_MM_ALL_LOCKS can't change to 0 from under us | 3184 | * AS_MM_ALL_LOCKS can't change to 0 from under us |
3183 | * because we hold the mm_all_locks_mutex. | 3185 | * because we hold the mm_all_locks_mutex. |
3184 | */ | 3186 | */ |
3185 | mutex_unlock(&mapping->i_mmap_mutex); | 3187 | i_mmap_unlock_write(mapping); |
3186 | if (!test_and_clear_bit(AS_MM_ALL_LOCKS, | 3188 | if (!test_and_clear_bit(AS_MM_ALL_LOCKS, |
3187 | &mapping->flags)) | 3189 | &mapping->flags)) |
3188 | BUG(); | 3190 | BUG(); |