aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRik van Riel <riel@redhat.com>2010-08-09 20:18:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-08-09 23:44:55 -0400
commit76545066c8521f3e32c849744744842b4df25b79 (patch)
tree978b6b003f63e1e22618586b7d9c2dd8ef363614
parent012f18004da33ba672e3c60838cc4898126174d3 (diff)
mm: extend KSM refcounts to the anon_vma root
KSM reference counts can cause an anon_vma to exist after the processe it belongs to have already exited. Because the anon_vma lock now lives in the root anon_vma, we need to ensure that the root anon_vma stays around until after all the "child" anon_vmas have been freed. The obvious way to do this is to have a "child" anon_vma take a reference to the root in anon_vma_fork. When the anon_vma is freed at munmap or process exit, we drop the refcount in anon_vma_unlink and possibly free the root anon_vma. The KSM anon_vma reference count function also needs to be modified to deal with the possibility of freeing 2 levels of anon_vma. The easiest way to do this is to break out the KSM magic and make it generic. When compiling without CONFIG_KSM, this code is compiled out. Signed-off-by: Rik van Riel <riel@redhat.com> Tested-by: Larry Woodman <lwoodman@redhat.com> Acked-by: Larry Woodman <lwoodman@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Linus Torvalds <torvalds@linux-foundation.org> Tested-by: Dave Young <hidave.darkstar@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/rmap.h15
-rw-r--r--mm/ksm.c17
-rw-r--r--mm/migrate.c10
-rw-r--r--mm/rmap.c46
4 files changed, 69 insertions, 19 deletions
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index af43cb9a0506..dc9b3c0bf5d4 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -81,6 +81,13 @@ static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
81{ 81{
82 return atomic_read(&anon_vma->external_refcount); 82 return atomic_read(&anon_vma->external_refcount);
83} 83}
84
85static inline void get_anon_vma(struct anon_vma *anon_vma)
86{
87 atomic_inc(&anon_vma->external_refcount);
88}
89
90void drop_anon_vma(struct anon_vma *);
84#else 91#else
85static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma) 92static inline void anonvma_external_refcount_init(struct anon_vma *anon_vma)
86{ 93{
@@ -90,6 +97,14 @@ static inline int anonvma_external_refcount(struct anon_vma *anon_vma)
90{ 97{
91 return 0; 98 return 0;
92} 99}
100
101static inline void get_anon_vma(struct anon_vma *anon_vma)
102{
103}
104
105static inline void drop_anon_vma(struct anon_vma *anon_vma)
106{
107}
93#endif /* CONFIG_KSM */ 108#endif /* CONFIG_KSM */
94 109
95static inline struct anon_vma *page_anon_vma(struct page *page) 110static inline struct anon_vma *page_anon_vma(struct page *page)
diff --git a/mm/ksm.c b/mm/ksm.c
index da6037c261f1..9f2acc998a37 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -318,19 +318,14 @@ static void hold_anon_vma(struct rmap_item *rmap_item,
318 struct anon_vma *anon_vma) 318 struct anon_vma *anon_vma)
319{ 319{
320 rmap_item->anon_vma = anon_vma; 320 rmap_item->anon_vma = anon_vma;
321 atomic_inc(&anon_vma->external_refcount); 321 get_anon_vma(anon_vma);
322} 322}
323 323
324static void drop_anon_vma(struct rmap_item *rmap_item) 324static void ksm_drop_anon_vma(struct rmap_item *rmap_item)
325{ 325{
326 struct anon_vma *anon_vma = rmap_item->anon_vma; 326 struct anon_vma *anon_vma = rmap_item->anon_vma;
327 327
328 if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { 328 drop_anon_vma(anon_vma);
329 int empty = list_empty(&anon_vma->head);
330 anon_vma_unlock(anon_vma);
331 if (empty)
332 anon_vma_free(anon_vma);
333 }
334} 329}
335 330
336/* 331/*
@@ -415,7 +410,7 @@ static void break_cow(struct rmap_item *rmap_item)
415 * It is not an accident that whenever we want to break COW 410 * It is not an accident that whenever we want to break COW
416 * to undo, we also need to drop a reference to the anon_vma. 411 * to undo, we also need to drop a reference to the anon_vma.
417 */ 412 */
418 drop_anon_vma(rmap_item); 413 ksm_drop_anon_vma(rmap_item);
419 414
420 down_read(&mm->mmap_sem); 415 down_read(&mm->mmap_sem);
421 if (ksm_test_exit(mm)) 416 if (ksm_test_exit(mm))
@@ -470,7 +465,7 @@ static void remove_node_from_stable_tree(struct stable_node *stable_node)
470 ksm_pages_sharing--; 465 ksm_pages_sharing--;
471 else 466 else
472 ksm_pages_shared--; 467 ksm_pages_shared--;
473 drop_anon_vma(rmap_item); 468 ksm_drop_anon_vma(rmap_item);
474 rmap_item->address &= PAGE_MASK; 469 rmap_item->address &= PAGE_MASK;
475 cond_resched(); 470 cond_resched();
476 } 471 }
@@ -558,7 +553,7 @@ static void remove_rmap_item_from_tree(struct rmap_item *rmap_item)
558 else 553 else
559 ksm_pages_shared--; 554 ksm_pages_shared--;
560 555
561 drop_anon_vma(rmap_item); 556 ksm_drop_anon_vma(rmap_item);
562 rmap_item->address &= PAGE_MASK; 557 rmap_item->address &= PAGE_MASK;
563 558
564 } else if (rmap_item->address & UNSTABLE_FLAG) { 559 } else if (rmap_item->address & UNSTABLE_FLAG) {
diff --git a/mm/migrate.c b/mm/migrate.c
index 5208fa1d9712..38e7cad782f4 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -639,7 +639,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
639 * exist when the page is remapped later 639 * exist when the page is remapped later
640 */ 640 */
641 anon_vma = page_anon_vma(page); 641 anon_vma = page_anon_vma(page);
642 atomic_inc(&anon_vma->external_refcount); 642 get_anon_vma(anon_vma);
643 } 643 }
644 } 644 }
645 645
@@ -682,12 +682,8 @@ skip_unmap:
682rcu_unlock: 682rcu_unlock:
683 683
684 /* Drop an anon_vma reference if we took one */ 684 /* Drop an anon_vma reference if we took one */
685 if (anon_vma && atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) { 685 if (anon_vma)
686 int empty = list_empty(&anon_vma->head); 686 drop_anon_vma(anon_vma);
687 anon_vma_unlock(anon_vma);
688 if (empty)
689 anon_vma_free(anon_vma);
690 }
691 687
692 if (rcu_locked) 688 if (rcu_locked)
693 rcu_read_unlock(); 689 rcu_read_unlock();
diff --git a/mm/rmap.c b/mm/rmap.c
index caa48b27371b..07e9814c7a41 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -235,6 +235,12 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
235 * lock any of the anon_vmas in this anon_vma tree. 235 * lock any of the anon_vmas in this anon_vma tree.
236 */ 236 */
237 anon_vma->root = pvma->anon_vma->root; 237 anon_vma->root = pvma->anon_vma->root;
238 /*
239 * With KSM refcounts, an anon_vma can stay around longer than the
240 * process it belongs to. The root anon_vma needs to be pinned
241 * until this anon_vma is freed, because the lock lives in the root.
242 */
243 get_anon_vma(anon_vma->root);
238 /* Mark this anon_vma as the one where our new (COWed) pages go. */ 244 /* Mark this anon_vma as the one where our new (COWed) pages go. */
239 vma->anon_vma = anon_vma; 245 vma->anon_vma = anon_vma;
240 anon_vma_chain_link(vma, avc, anon_vma); 246 anon_vma_chain_link(vma, avc, anon_vma);
@@ -264,8 +270,12 @@ static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
264 empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma); 270 empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
265 anon_vma_unlock(anon_vma); 271 anon_vma_unlock(anon_vma);
266 272
267 if (empty) 273 if (empty) {
274 /* We no longer need the root anon_vma */
275 if (anon_vma->root != anon_vma)
276 drop_anon_vma(anon_vma->root);
268 anon_vma_free(anon_vma); 277 anon_vma_free(anon_vma);
278 }
269} 279}
270 280
271void unlink_anon_vmas(struct vm_area_struct *vma) 281void unlink_anon_vmas(struct vm_area_struct *vma)
@@ -1382,6 +1392,40 @@ int try_to_munlock(struct page *page)
1382 return try_to_unmap_file(page, TTU_MUNLOCK); 1392 return try_to_unmap_file(page, TTU_MUNLOCK);
1383} 1393}
1384 1394
1395#if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
1396/*
1397 * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root
1398 * if necessary. Be careful to do all the tests under the lock. Once
1399 * we know we are the last user, nobody else can get a reference and we
1400 * can do the freeing without the lock.
1401 */
1402void drop_anon_vma(struct anon_vma *anon_vma)
1403{
1404 if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) {
1405 struct anon_vma *root = anon_vma->root;
1406 int empty = list_empty(&anon_vma->head);
1407 int last_root_user = 0;
1408 int root_empty = 0;
1409
1410 /*
1411 * The refcount on a non-root anon_vma got dropped. Drop
1412 * the refcount on the root and check if we need to free it.
1413 */
1414 if (empty && anon_vma != root) {
1415 last_root_user = atomic_dec_and_test(&root->external_refcount);
1416 root_empty = list_empty(&root->head);
1417 }
1418 anon_vma_unlock(anon_vma);
1419
1420 if (empty) {
1421 anon_vma_free(anon_vma);
1422 if (root_empty && last_root_user)
1423 anon_vma_free(root);
1424 }
1425 }
1426}
1427#endif
1428
1385#ifdef CONFIG_MIGRATION 1429#ifdef CONFIG_MIGRATION
1386/* 1430/*
1387 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file(): 1431 * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():