aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:00:36 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 16:00:36 -0500
commit78a45c6f067824cf5d0a9fedea7339ac2e28603c (patch)
treeb4f78c8b6b9059ddace0a18c11629b8d2045f793 /mm/mmap.c
parentf96fe225677b3efb74346ebd56fafe3997b02afa (diff)
parent29d293b6007b91a4463f05bc8d0b26e0e65c5816 (diff)
Merge branch 'akpm' (second patch-bomb from Andrew)
Merge second patchbomb from Andrew Morton: - the rest of MM - misc fs fixes - add execveat() syscall - new ratelimit feature for fault-injection - decompressor updates - ipc/ updates - fallocate feature creep - fsnotify cleanups - a few other misc things * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (99 commits) cgroups: Documentation: fix trivial typos and wrong paragraph numberings parisc: percpu: update comments referring to __get_cpu_var percpu: update local_ops.txt to reflect this_cpu operations percpu: remove __get_cpu_var and __raw_get_cpu_var macros fsnotify: remove destroy_list from fsnotify_mark fsnotify: unify inode and mount marks handling fallocate: create FAN_MODIFY and IN_MODIFY events mm/cma: make kmemleak ignore CMA regions slub: fix cpuset check in get_any_partial slab: fix cpuset check in fallback_alloc shmdt: use i_size_read() instead of ->i_size ipc/shm.c: fix overly aggressive shmdt() when calls span multiple segments ipc/msg: increase MSGMNI, remove scaling ipc/sem.c: increase SEMMSL, SEMMNI, SEMOPM ipc/sem.c: change memory barrier in sem_lock() to smp_rmb() lib/decompress.c: consistency of compress formats for kernel image decompress_bunzip2: off by one in get_next_block() usr/Kconfig: make initrd compression algorithm selection not expert fault-inject: add ratelimit option ratelimit: add initialization macro ...
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c24
1 files changed, 13 insertions, 11 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index b6c0a77fc1c8..7b36aa7cc89a 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -232,7 +232,7 @@ error:
232} 232}
233 233
234/* 234/*
235 * Requires inode->i_mapping->i_mmap_mutex 235 * Requires inode->i_mapping->i_mmap_rwsem
236 */ 236 */
237static void __remove_shared_vm_struct(struct vm_area_struct *vma, 237static void __remove_shared_vm_struct(struct vm_area_struct *vma,
238 struct file *file, struct address_space *mapping) 238 struct file *file, struct address_space *mapping)
@@ -260,9 +260,9 @@ void unlink_file_vma(struct vm_area_struct *vma)
260 260
261 if (file) { 261 if (file) {
262 struct address_space *mapping = file->f_mapping; 262 struct address_space *mapping = file->f_mapping;
263 mutex_lock(&mapping->i_mmap_mutex); 263 i_mmap_lock_write(mapping);
264 __remove_shared_vm_struct(vma, file, mapping); 264 __remove_shared_vm_struct(vma, file, mapping);
265 mutex_unlock(&mapping->i_mmap_mutex); 265 i_mmap_unlock_write(mapping);
266 } 266 }
267} 267}
268 268
@@ -674,14 +674,14 @@ static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
674 674
675 if (vma->vm_file) { 675 if (vma->vm_file) {
676 mapping = vma->vm_file->f_mapping; 676 mapping = vma->vm_file->f_mapping;
677 mutex_lock(&mapping->i_mmap_mutex); 677 i_mmap_lock_write(mapping);
678 } 678 }
679 679
680 __vma_link(mm, vma, prev, rb_link, rb_parent); 680 __vma_link(mm, vma, prev, rb_link, rb_parent);
681 __vma_link_file(vma); 681 __vma_link_file(vma);
682 682
683 if (mapping) 683 if (mapping)
684 mutex_unlock(&mapping->i_mmap_mutex); 684 i_mmap_unlock_write(mapping);
685 685
686 mm->map_count++; 686 mm->map_count++;
687 validate_mm(mm); 687 validate_mm(mm);
@@ -796,7 +796,7 @@ again: remove_next = 1 + (end > next->vm_end);
796 next->vm_end); 796 next->vm_end);
797 } 797 }
798 798
799 mutex_lock(&mapping->i_mmap_mutex); 799 i_mmap_lock_write(mapping);
800 if (insert) { 800 if (insert) {
801 /* 801 /*
802 * Put into interval tree now, so instantiated pages 802 * Put into interval tree now, so instantiated pages
@@ -883,7 +883,7 @@ again: remove_next = 1 + (end > next->vm_end);
883 anon_vma_unlock_write(anon_vma); 883 anon_vma_unlock_write(anon_vma);
884 } 884 }
885 if (mapping) 885 if (mapping)
886 mutex_unlock(&mapping->i_mmap_mutex); 886 i_mmap_unlock_write(mapping);
887 887
888 if (root) { 888 if (root) {
889 uprobe_mmap(vma); 889 uprobe_mmap(vma);
@@ -2362,6 +2362,8 @@ find_extend_vma(struct mm_struct *mm, unsigned long addr)
2362} 2362}
2363#endif 2363#endif
2364 2364
2365EXPORT_SYMBOL_GPL(find_extend_vma);
2366
2365/* 2367/*
2366 * Ok - we have the memory areas we should free on the vma list, 2368 * Ok - we have the memory areas we should free on the vma list,
2367 * so release them, and do the vma updates. 2369 * so release them, and do the vma updates.
@@ -2791,7 +2793,7 @@ void exit_mmap(struct mm_struct *mm)
2791 2793
2792/* Insert vm structure into process list sorted by address 2794/* Insert vm structure into process list sorted by address
2793 * and into the inode's i_mmap tree. If vm_file is non-NULL 2795 * and into the inode's i_mmap tree. If vm_file is non-NULL
2794 * then i_mmap_mutex is taken here. 2796 * then i_mmap_rwsem is taken here.
2795 */ 2797 */
2796int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma) 2798int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2797{ 2799{
@@ -3086,7 +3088,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3086 */ 3088 */
3087 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags)) 3089 if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
3088 BUG(); 3090 BUG();
3089 mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem); 3091 down_write_nest_lock(&mapping->i_mmap_rwsem, &mm->mmap_sem);
3090 } 3092 }
3091} 3093}
3092 3094
@@ -3113,7 +3115,7 @@ static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
3113 * vma in this mm is backed by the same anon_vma or address_space. 3115 * vma in this mm is backed by the same anon_vma or address_space.
3114 * 3116 *
3115 * We can take all the locks in random order because the VM code 3117 * We can take all the locks in random order because the VM code
3116 * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never 3118 * taking i_mmap_rwsem or anon_vma->rwsem outside the mmap_sem never
3117 * takes more than one of them in a row. Secondly we're protected 3119 * takes more than one of them in a row. Secondly we're protected
3118 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex. 3120 * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
3119 * 3121 *
@@ -3182,7 +3184,7 @@ static void vm_unlock_mapping(struct address_space *mapping)
3182 * AS_MM_ALL_LOCKS can't change to 0 from under us 3184 * AS_MM_ALL_LOCKS can't change to 0 from under us
3183 * because we hold the mm_all_locks_mutex. 3185 * because we hold the mm_all_locks_mutex.
3184 */ 3186 */
3185 mutex_unlock(&mapping->i_mmap_mutex); 3187 i_mmap_unlock_write(mapping);
3186 if (!test_and_clear_bit(AS_MM_ALL_LOCKS, 3188 if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3187 &mapping->flags)) 3189 &mapping->flags))
3188 BUG(); 3190 BUG();