aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2009-01-29 20:46:42 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-29 20:46:42 -0500
commitde33c8db5910cda599899dd431cc30d7c1018cbf (patch)
tree4ee9ba8685bae5d1719a3158284d0c197c83afde
parent18e352e4a73465349711a9324767e1b2453383e2 (diff)
Fix OOPS in mmap_region() when merging adjacent VM_LOCKED file segments
As of commit ba470de43188cdbff795b5da43a1474523c6c2fb ("map: handle mlocked pages during map, remap, unmap") we now use the 'vma' variable at the end of mmap_region() to handle the page-in of newly mapped mlocked pages. However, if we merged adjacent vma's together, the vma we're using may be stale. We historically consciously avoided using it after the merge operation, but that got overlooked when redoing the locked page handling. This commit simplifies mmap_region() by doing any vma merges early, avoiding the issue entirely, and 'vma' will always be valid. As pointed out by Hugh Dickins, this depends on any drivers that change the page offset of flags to have set one of the VM_SPECIAL bits (so that they cannot trigger the early merge logic), but that's true in general. Reported-and-tested-by: Maksim Yevmenkin <maksim.yevmenkin@gmail.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Cc: Nick Piggin <npiggin@suse.de> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Hugh Dickins <hugh@veritas.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/mmap.c26
1 files changed, 6 insertions, 20 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index 8d95902e9a38..d3fa10a726cf 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1134,16 +1134,11 @@ munmap_back:
1134 } 1134 }
1135 1135
1136 /* 1136 /*
1137 * Can we just expand an old private anonymous mapping? 1137 * Can we just expand an old mapping?
1138 * The VM_SHARED test is necessary because shmem_zero_setup
1139 * will create the file object for a shared anonymous map below.
1140 */ 1138 */
1141 if (!file && !(vm_flags & VM_SHARED)) { 1139 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff, NULL);
1142 vma = vma_merge(mm, prev, addr, addr + len, vm_flags, 1140 if (vma)
1143 NULL, NULL, pgoff, NULL); 1141 goto out;
1144 if (vma)
1145 goto out;
1146 }
1147 1142
1148 /* 1143 /*
1149 * Determine the object being mapped and call the appropriate 1144 * Determine the object being mapped and call the appropriate
@@ -1206,17 +1201,8 @@ munmap_back:
1206 if (vma_wants_writenotify(vma)) 1201 if (vma_wants_writenotify(vma))
1207 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED); 1202 vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1208 1203
1209 if (file && vma_merge(mm, prev, addr, vma->vm_end, 1204 vma_link(mm, vma, prev, rb_link, rb_parent);
1210 vma->vm_flags, NULL, file, pgoff, vma_policy(vma))) { 1205 file = vma->vm_file;
1211 mpol_put(vma_policy(vma));
1212 kmem_cache_free(vm_area_cachep, vma);
1213 fput(file);
1214 if (vm_flags & VM_EXECUTABLE)
1215 removed_exe_file_vma(mm);
1216 } else {
1217 vma_link(mm, vma, prev, rb_link, rb_parent);
1218 file = vma->vm_file;
1219 }
1220 1206
1221 /* Once vma denies write, undo our temporary denial count */ 1207 /* Once vma denies write, undo our temporary denial count */
1222 if (correct_wcount) 1208 if (correct_wcount)