diff options
author | Mel Gorman <mel@csn.ul.ie> | 2010-05-24 17:32:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-25 11:06:59 -0400 |
commit | 3fe2011ff51e92500010a495df4be86745fbbda9 (patch) | |
tree | f3dc07b9d41ce5ba15805c2d3b4bc0a1a7916832 /mm/migrate.c | |
parent | 67b9509b2c68ae38cecb83a239881cb0ddf087dc (diff) |
mm: migration: allow the migration of PageSwapCache pages
PageAnon pages that are unmapped may or may not have an anon_vma so are
not currently migrated. However, a swap cache page can be migrated and
fits this description. This patch identifies page swap caches and allows
them to be migrated but ensures that no attempt to made to remap the pages
would would potentially try to access an already freed anon_vma.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 53 |
1 files changed, 36 insertions, 17 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index b114635962dc..4afd6fe3c074 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -485,7 +485,8 @@ static int fallback_migrate_page(struct address_space *mapping, | |||
485 | * < 0 - error code | 485 | * < 0 - error code |
486 | * == 0 - success | 486 | * == 0 - success |
487 | */ | 487 | */ |
488 | static int move_to_new_page(struct page *newpage, struct page *page) | 488 | static int move_to_new_page(struct page *newpage, struct page *page, |
489 | int remap_swapcache) | ||
489 | { | 490 | { |
490 | struct address_space *mapping; | 491 | struct address_space *mapping; |
491 | int rc; | 492 | int rc; |
@@ -520,10 +521,12 @@ static int move_to_new_page(struct page *newpage, struct page *page) | |||
520 | else | 521 | else |
521 | rc = fallback_migrate_page(mapping, newpage, page); | 522 | rc = fallback_migrate_page(mapping, newpage, page); |
522 | 523 | ||
523 | if (!rc) | 524 | if (rc) { |
524 | remove_migration_ptes(page, newpage); | ||
525 | else | ||
526 | newpage->mapping = NULL; | 525 | newpage->mapping = NULL; |
526 | } else { | ||
527 | if (remap_swapcache) | ||
528 | remove_migration_ptes(page, newpage); | ||
529 | } | ||
527 | 530 | ||
528 | unlock_page(newpage); | 531 | unlock_page(newpage); |
529 | 532 | ||
@@ -540,6 +543,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
540 | int rc = 0; | 543 | int rc = 0; |
541 | int *result = NULL; | 544 | int *result = NULL; |
542 | struct page *newpage = get_new_page(page, private, &result); | 545 | struct page *newpage = get_new_page(page, private, &result); |
546 | int remap_swapcache = 1; | ||
543 | int rcu_locked = 0; | 547 | int rcu_locked = 0; |
544 | int charge = 0; | 548 | int charge = 0; |
545 | struct mem_cgroup *mem = NULL; | 549 | struct mem_cgroup *mem = NULL; |
@@ -601,18 +605,33 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
601 | rcu_read_lock(); | 605 | rcu_read_lock(); |
602 | rcu_locked = 1; | 606 | rcu_locked = 1; |
603 | 607 | ||
604 | /* | 608 | /* Determine how to safely use anon_vma */ |
605 | * If the page has no mappings any more, just bail. An | 609 | if (!page_mapped(page)) { |
606 | * unmapped anon page is likely to be freed soon but worse, | 610 | if (!PageSwapCache(page)) |
607 | * it's possible its anon_vma disappeared between when | 611 | goto rcu_unlock; |
608 | * the page was isolated and when we reached here while | ||
609 | * the RCU lock was not held | ||
610 | */ | ||
611 | if (!page_mapped(page)) | ||
612 | goto rcu_unlock; | ||
613 | 612 | ||
614 | anon_vma = page_anon_vma(page); | 613 | /* |
615 | atomic_inc(&anon_vma->external_refcount); | 614 | * We cannot be sure that the anon_vma of an unmapped |
615 | * swapcache page is safe to use because we don't | ||
616 | * know in advance if the VMA that this page belonged | ||
617 | * to still exists. If the VMA and others sharing the | ||
618 | * data have been freed, then the anon_vma could | ||
619 | * already be invalid. | ||
620 | * | ||
621 | * To avoid this possibility, swapcache pages get | ||
622 | * migrated but are not remapped when migration | ||
623 | * completes | ||
624 | */ | ||
625 | remap_swapcache = 0; | ||
626 | } else { | ||
627 | /* | ||
628 | * Take a reference count on the anon_vma if the | ||
629 | * page is mapped so that it is guaranteed to | ||
630 | * exist when the page is remapped later | ||
631 | */ | ||
632 | anon_vma = page_anon_vma(page); | ||
633 | atomic_inc(&anon_vma->external_refcount); | ||
634 | } | ||
616 | } | 635 | } |
617 | 636 | ||
618 | /* | 637 | /* |
@@ -647,9 +666,9 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
647 | 666 | ||
648 | skip_unmap: | 667 | skip_unmap: |
649 | if (!page_mapped(page)) | 668 | if (!page_mapped(page)) |
650 | rc = move_to_new_page(newpage, page); | 669 | rc = move_to_new_page(newpage, page, remap_swapcache); |
651 | 670 | ||
652 | if (rc) | 671 | if (rc && remap_swapcache) |
653 | remove_migration_ptes(page, page); | 672 | remove_migration_ptes(page, page); |
654 | rcu_unlock: | 673 | rcu_unlock: |
655 | 674 | ||