aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-05-24 20:12:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:19 -0400
commit746b18d421da7f27e948e8af1ad82b6d0309324d (patch)
treed4e756977673b7b8166c00e34a21d85c5e26ea6e /mm
parent6111e4ca6829a0e8b092b8e5eeb6b5366091f29c (diff)
mm: use refcounts for page_lock_anon_vma()
Convert page_lock_anon_vma() over to use refcounts. This is done to prepare for the conversion of anon_vma from spinlock to mutex. Sadly this inceases the cost of page_lock_anon_vma() from one to two atomics, a follow up patch addresses this, lets keep that simple for now. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Hugh Dickins <hughd@google.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: David Miller <davem@davemloft.net> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Russell King <rmk@arm.linux.org.uk> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Tony Luck <tony.luck@intel.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Namhyung Kim <namhyung@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/migrate.c17
-rw-r--r--mm/rmap.c42
2 files changed, 31 insertions, 28 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 34132f8e9109..e4a5c912983d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -721,15 +721,11 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
721 * Only page_lock_anon_vma() understands the subtleties of 721 * Only page_lock_anon_vma() understands the subtleties of
722 * getting a hold on an anon_vma from outside one of its mms. 722 * getting a hold on an anon_vma from outside one of its mms.
723 */ 723 */
724 anon_vma = page_lock_anon_vma(page); 724 anon_vma = page_get_anon_vma(page);
725 if (anon_vma) { 725 if (anon_vma) {
726 /* 726 /*
727 * Take a reference count on the anon_vma if the 727 * Anon page
728 * page is mapped so that it is guaranteed to
729 * exist when the page is remapped later
730 */ 728 */
731 get_anon_vma(anon_vma);
732 page_unlock_anon_vma(anon_vma);
733 } else if (PageSwapCache(page)) { 729 } else if (PageSwapCache(page)) {
734 /* 730 /*
735 * We cannot be sure that the anon_vma of an unmapped 731 * We cannot be sure that the anon_vma of an unmapped
@@ -857,13 +853,8 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
857 lock_page(hpage); 853 lock_page(hpage);
858 } 854 }
859 855
860 if (PageAnon(hpage)) { 856 if (PageAnon(hpage))
861 anon_vma = page_lock_anon_vma(hpage); 857 anon_vma = page_get_anon_vma(hpage);
862 if (anon_vma) {
863 get_anon_vma(anon_vma);
864 page_unlock_anon_vma(anon_vma);
865 }
866 }
867 858
868 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 859 try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
869 860
diff --git a/mm/rmap.c b/mm/rmap.c
index cc140811af56..d271845d7d15 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -337,9 +337,9 @@ void __init anon_vma_init(void)
337 * that the anon_vma pointer from page->mapping is valid if there is a 337 * that the anon_vma pointer from page->mapping is valid if there is a
338 * mapcount, we can dereference the anon_vma after observing those. 338 * mapcount, we can dereference the anon_vma after observing those.
339 */ 339 */
340struct anon_vma *page_lock_anon_vma(struct page *page) 340struct anon_vma *page_get_anon_vma(struct page *page)
341{ 341{
342 struct anon_vma *anon_vma, *root_anon_vma; 342 struct anon_vma *anon_vma = NULL;
343 unsigned long anon_mapping; 343 unsigned long anon_mapping;
344 344
345 rcu_read_lock(); 345 rcu_read_lock();
@@ -350,30 +350,42 @@ struct anon_vma *page_lock_anon_vma(struct page *page)
350 goto out; 350 goto out;
351 351
352 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 352 anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
353 root_anon_vma = ACCESS_ONCE(anon_vma->root); 353 if (!atomic_inc_not_zero(&anon_vma->refcount)) {
354 spin_lock(&root_anon_vma->lock); 354 anon_vma = NULL;
355 goto out;
356 }
355 357
356 /* 358 /*
357 * If this page is still mapped, then its anon_vma cannot have been 359 * If this page is still mapped, then its anon_vma cannot have been
358 * freed. But if it has been unmapped, we have no security against 360 * freed. But if it has been unmapped, we have no security against the
359 * the anon_vma structure being freed and reused (for another anon_vma: 361 * anon_vma structure being freed and reused (for another anon_vma:
360 * SLAB_DESTROY_BY_RCU guarantees that - so the spin_lock above cannot 362 * SLAB_DESTROY_BY_RCU guarantees that - so the atomic_inc_not_zero()
361 * corrupt): with anon_vma_prepare() or anon_vma_fork() redirecting 363 * above cannot corrupt).
362 * anon_vma->root before page_unlock_anon_vma() is called to unlock.
363 */ 364 */
364 if (page_mapped(page)) 365 if (!page_mapped(page)) {
365 return anon_vma; 366 put_anon_vma(anon_vma);
366 367 anon_vma = NULL;
367 spin_unlock(&root_anon_vma->lock); 368 }
368out: 369out:
369 rcu_read_unlock(); 370 rcu_read_unlock();
370 return NULL; 371
372 return anon_vma;
373}
374
375struct anon_vma *page_lock_anon_vma(struct page *page)
376{
377 struct anon_vma *anon_vma = page_get_anon_vma(page);
378
379 if (anon_vma)
380 anon_vma_lock(anon_vma);
381
382 return anon_vma;
371} 383}
372 384
373void page_unlock_anon_vma(struct anon_vma *anon_vma) 385void page_unlock_anon_vma(struct anon_vma *anon_vma)
374{ 386{
375 anon_vma_unlock(anon_vma); 387 anon_vma_unlock(anon_vma);
376 rcu_read_unlock(); 388 put_anon_vma(anon_vma);
377} 389}
378 390
379/* 391/*