aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
authorMichel Lespinasse <walken@google.com>2012-12-12 16:52:25 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 20:38:35 -0500
commit4128997b5f0e7ad583a5f3990051b8188b39055c (patch)
tree239c6c1b6c79fab5b42fd5927614229a57df5de5 /mm/mmap.c
parentc95d26c2ffd3931123d6a7a2489530ccbd36da7a (diff)
mm: protect against concurrent vma expansion
expand_stack() runs with a shared mmap_sem lock. Because of this, there could be multiple concurrent stack expansions in the same mm, which may cause problems in the vma gap update code. I propose to solve this by taking the mm->page_table_lock around such vma expansions, in order to avoid the concurrency issue. We only have to worry about concurrent expand_stack() calls here, since we hold a shared mmap_sem lock and all vma modificaitons other than expand_stack() are done under an exclusive mmap_sem lock. I previously tried to achieve the same effect by making sure all growable vmas in a given mm would share the same anon_vma, which we already lock here. However this turned out to be difficult - all of the schemes I tried for refcounting the growable anon_vma and clearing turned out ugly. So, I'm now proposing only the minimal fix. The overhead of taking the page table lock during stack expansion is expected to be small: glibc doesn't use expandable stacks for the threads it creates, so having multiple growable stacks is actually uncommon and we don't expect the page table lock to get bounced between threads. Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 9ed3a06242a0..2b7d9e78a569 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2069,6 +2069,18 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2069 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) { 2069 if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2070 error = acct_stack_growth(vma, size, grow); 2070 error = acct_stack_growth(vma, size, grow);
2071 if (!error) { 2071 if (!error) {
2072 /*
2073 * vma_gap_update() doesn't support concurrent
2074 * updates, but we only hold a shared mmap_sem
2075 * lock here, so we need to protect against
2076 * concurrent vma expansions.
2077 * vma_lock_anon_vma() doesn't help here, as
2078 * we don't guarantee that all growable vmas
2079 * in a mm share the same root anon vma.
2080 * So, we reuse mm->page_table_lock to guard
2081 * against concurrent vma expansions.
2082 */
2083 spin_lock(&vma->vm_mm->page_table_lock);
2072 anon_vma_interval_tree_pre_update_vma(vma); 2084 anon_vma_interval_tree_pre_update_vma(vma);
2073 vma->vm_end = address; 2085 vma->vm_end = address;
2074 anon_vma_interval_tree_post_update_vma(vma); 2086 anon_vma_interval_tree_post_update_vma(vma);
@@ -2076,6 +2088,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2076 vma_gap_update(vma->vm_next); 2088 vma_gap_update(vma->vm_next);
2077 else 2089 else
2078 vma->vm_mm->highest_vm_end = address; 2090 vma->vm_mm->highest_vm_end = address;
2091 spin_unlock(&vma->vm_mm->page_table_lock);
2092
2079 perf_event_mmap(vma); 2093 perf_event_mmap(vma);
2080 } 2094 }
2081 } 2095 }
@@ -2126,11 +2140,25 @@ int expand_downwards(struct vm_area_struct *vma,
2126 if (grow <= vma->vm_pgoff) { 2140 if (grow <= vma->vm_pgoff) {
2127 error = acct_stack_growth(vma, size, grow); 2141 error = acct_stack_growth(vma, size, grow);
2128 if (!error) { 2142 if (!error) {
2143 /*
2144 * vma_gap_update() doesn't support concurrent
2145 * updates, but we only hold a shared mmap_sem
2146 * lock here, so we need to protect against
2147 * concurrent vma expansions.
2148 * vma_lock_anon_vma() doesn't help here, as
2149 * we don't guarantee that all growable vmas
2150 * in a mm share the same root anon vma.
2151 * So, we reuse mm->page_table_lock to guard
2152 * against concurrent vma expansions.
2153 */
2154 spin_lock(&vma->vm_mm->page_table_lock);
2129 anon_vma_interval_tree_pre_update_vma(vma); 2155 anon_vma_interval_tree_pre_update_vma(vma);
2130 vma->vm_start = address; 2156 vma->vm_start = address;
2131 vma->vm_pgoff -= grow; 2157 vma->vm_pgoff -= grow;
2132 anon_vma_interval_tree_post_update_vma(vma); 2158 anon_vma_interval_tree_post_update_vma(vma);
2133 vma_gap_update(vma); 2159 vma_gap_update(vma);
2160 spin_unlock(&vma->vm_mm->page_table_lock);
2161
2134 perf_event_mmap(vma); 2162 perf_event_mmap(vma);
2135 } 2163 }
2136 } 2164 }