aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2012-02-23 08:50:35 -0500
committerLuis Henriques <luis.henriques@canonical.com>2012-03-26 05:26:34 -0400
commit5ea05711c36f9d5971c8d4be1497cc3dc086c8b8 (patch)
treed45f384570b0c9f4e215a67094d73d7143d5d06f
parent8db72667de592d78dc36d7befb5a6b9403ed3521 (diff)
NOMMU: Lock i_mmap_mutex for access to the VMA prio list
BugLink: http://bugs.launchpad.net/bugs/954576 commit 918e556ec214ed2f584e4cac56d7b29e4bb6bf27 upstream. Lock i_mmap_mutex for access to the VMA prio list to prevent concurrent access. Currently, certain parts of the mmap handling are protected by the region mutex, but not all. Reported-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: David Howells <dhowells@redhat.com> Acked-by: Al Viro <viro@zeniv.linux.org.uk> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--mm/nommu.c7
1 files changed, 7 insertions, 0 deletions
diff --git a/mm/nommu.c b/mm/nommu.c
index 9edc897a397..839775875e7 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -697,9 +697,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma)
697 if (vma->vm_file) { 697 if (vma->vm_file) {
698 mapping = vma->vm_file->f_mapping; 698 mapping = vma->vm_file->f_mapping;
699 699
700 mutex_lock(&mapping->i_mmap_mutex);
700 flush_dcache_mmap_lock(mapping); 701 flush_dcache_mmap_lock(mapping);
701 vma_prio_tree_insert(vma, &mapping->i_mmap); 702 vma_prio_tree_insert(vma, &mapping->i_mmap);
702 flush_dcache_mmap_unlock(mapping); 703 flush_dcache_mmap_unlock(mapping);
704 mutex_unlock(&mapping->i_mmap_mutex);
703 } 705 }
704 706
705 /* add the VMA to the tree */ 707 /* add the VMA to the tree */
@@ -761,9 +763,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
761 if (vma->vm_file) { 763 if (vma->vm_file) {
762 mapping = vma->vm_file->f_mapping; 764 mapping = vma->vm_file->f_mapping;
763 765
766 mutex_lock(&mapping->i_mmap_mutex);
764 flush_dcache_mmap_lock(mapping); 767 flush_dcache_mmap_lock(mapping);
765 vma_prio_tree_remove(vma, &mapping->i_mmap); 768 vma_prio_tree_remove(vma, &mapping->i_mmap);
766 flush_dcache_mmap_unlock(mapping); 769 flush_dcache_mmap_unlock(mapping);
770 mutex_unlock(&mapping->i_mmap_mutex);
767 } 771 }
768 772
769 /* remove from the MM's tree and list */ 773 /* remove from the MM's tree and list */
@@ -2061,6 +2065,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2061 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 2065 high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
2062 2066
2063 down_write(&nommu_region_sem); 2067 down_write(&nommu_region_sem);
2068 mutex_lock(&inode->i_mapping->i_mmap_mutex);
2064 2069
2065 /* search for VMAs that fall within the dead zone */ 2070 /* search for VMAs that fall within the dead zone */
2066 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, 2071 vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap,
@@ -2068,6 +2073,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2068 /* found one - only interested if it's shared out of the page 2073 /* found one - only interested if it's shared out of the page
2069 * cache */ 2074 * cache */
2070 if (vma->vm_flags & VM_SHARED) { 2075 if (vma->vm_flags & VM_SHARED) {
2076 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2071 up_write(&nommu_region_sem); 2077 up_write(&nommu_region_sem);
2072 return -ETXTBSY; /* not quite true, but near enough */ 2078 return -ETXTBSY; /* not quite true, but near enough */
2073 } 2079 }
@@ -2095,6 +2101,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size,
2095 } 2101 }
2096 } 2102 }
2097 2103
2104 mutex_unlock(&inode->i_mapping->i_mmap_mutex);
2098 up_write(&nommu_region_sem); 2105 up_write(&nommu_region_sem);
2099 return 0; 2106 return 0;
2100} 2107}