aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 21:16:08 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:38 -0400
commit7ee78232501ea9de2b6c8f10d32c9a0fee541357 (patch)
tree2041a36a13bdd8b096dfbf52b63a87739ea97d6b
parentfd3e42fcc888a773572282575d2fdbf5cfd6216e (diff)
[PATCH] mm: dup_mmap down new mmap_sem
One anomaly remains from when Andrea rationalized the responsibilities of mmap_sem and page_table_lock: in dup_mmap we add vmas to the child holding its page_table_lock, but not the mmap_sem which normally guards the vma list and rbtree. Which could be an issue for unuse_mm: though since it just walks down the list (today with page_table_lock, tomorrow not), it's probably okay. Will need a memory barrier? Oh, keep it simple, Nick and I agreed, no harm in taking child's mmap_sem here. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--kernel/fork.c9
1 files changed, 4 insertions, 5 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 0e7fe4a8a8df..2a587b3224e3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -192,6 +192,8 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
192 192
193 down_write(&oldmm->mmap_sem); 193 down_write(&oldmm->mmap_sem);
194 flush_cache_mm(oldmm); 194 flush_cache_mm(oldmm);
195 down_write(&mm->mmap_sem);
196
195 mm->locked_vm = 0; 197 mm->locked_vm = 0;
196 mm->mmap = NULL; 198 mm->mmap = NULL;
197 mm->mmap_cache = NULL; 199 mm->mmap_cache = NULL;
@@ -251,10 +253,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
251 } 253 }
252 254
253 /* 255 /*
254 * Link in the new vma and copy the page table entries: 256 * Link in the new vma and copy the page table entries.
255 * link in first so that swapoff can see swap entries.
256 * Note that, exceptionally, here the vma is inserted
257 * without holding mm->mmap_sem.
258 */ 257 */
259 spin_lock(&mm->page_table_lock); 258 spin_lock(&mm->page_table_lock);
260 *pprev = tmp; 259 *pprev = tmp;
@@ -275,8 +274,8 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
275 goto out; 274 goto out;
276 } 275 }
277 retval = 0; 276 retval = 0;
278
279out: 277out:
278 up_write(&mm->mmap_sem);
280 flush_tlb_mm(oldmm); 279 flush_tlb_mm(oldmm);
281 up_write(&oldmm->mmap_sem); 280 up_write(&oldmm->mmap_sem);
282 return retval; 281 return retval;