summaryrefslogtreecommitdiffstats
path: root/mm/nommu.c
diff options
context:
space:
mode:
authorDavidlohr Bueso <dave@stgolabs.net>2014-12-12 19:54:39 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-13 15:42:46 -0500
commit1acf2e040721564d579297646862b8ea3dd4511b (patch)
treec51ec6206f123b9d09caed4878d4958314d497db /mm/nommu.c
parentd28eb9c861f41aa2af4cfcc5eeeddff42b13d31e (diff)
mm/nommu: share the i_mmap_rwsem
Shrinking/truncate logic can call nommu_shrink_inode_mappings() to verify that any shared mappings of the inode in question aren't broken (dead zone). afaict the only user being ramfs to handle the size change attribute. Pretty much a no-brainer to share the lock. Signed-off-by: Davidlohr Bueso <dbueso@suse.de> Acked-by: "Kirill A. Shutemov" <kirill@shutemov.name> Acked-by: Hugh Dickins <hughd@google.com> Cc: Oleg Nesterov <oleg@redhat.com> Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Rik van Riel <riel@redhat.com> Cc: Srikar Dronamraju <srikar@linux.vnet.ibm.com> Acked-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/nommu.c')
-rw-r--r--mm/nommu.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 52a576553581..cd519e1cd8a7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -2094,14 +2094,14 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2094 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2094 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2095 2095
2096 down_write(&nommu_region_sem); 2096 down_write(&nommu_region_sem);
2097 i_mmap_lock_write(inode->i_mapping); 2097 i_mmap_lock_read(inode->i_mapping);
2098 2098
2099 /* search for VMAs that fall within the dead zone */ 2099 /* search for VMAs that fall within the dead zone */
2100 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) { 2100 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, low, high) {
2101 /* found one - only interested if it's shared out of the page 2101 /* found one - only interested if it's shared out of the page
2102 * cache */ 2102 * cache */
2103 if (vma->vm_flags & VM_SHARED) { 2103 if (vma->vm_flags & VM_SHARED) {
2104 i_mmap_unlock_write(inode->i_mapping); 2104 i_mmap_unlock_read(inode->i_mapping);
2105 up_write(&nommu_region_sem); 2105 up_write(&nommu_region_sem);
2106 return -ETXTBSY; /* not quite true, but near enough */ 2106 return -ETXTBSY; /* not quite true, but near enough */
2107 } 2107 }
@@ -2113,8 +2113,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2113 * we don't check for any regions that start beyond the EOF as there 2113 * we don't check for any regions that start beyond the EOF as there
2114 * shouldn't be any 2114 * shouldn't be any
2115 */ 2115 */
2116 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 2116 vma_interval_tree_foreach(vma, &inode->i_mapping->i_mmap, 0, ULONG_MAX) {
2117 0, ULONG_MAX) {
2118 if (!(vma->vm_flags & VM_SHARED)) 2117 if (!(vma->vm_flags & VM_SHARED))
2119 continue; 2118 continue;
2120 2119
@@ -2129,7 +2128,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2129 } 2128 }
2130 } 2129 }
2131 2130
2132 i_mmap_unlock_write(inode->i_mapping); 2131 i_mmap_unlock_read(inode->i_mapping);
2133 up_write(&nommu_region_sem); 2132 up_write(&nommu_region_sem);
2134 return 0; 2133 return 0;
2135} 2134}