diff options
author | Minchan Kim <minchan.kim@gmail.com> | 2011-10-31 20:06:57 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-10-31 20:30:45 -0400 |
commit | 0dabec93de633a87adfbbe1d800a4c56cd19d73b (patch) | |
tree | 51850bc562f8f95d284dbd7baeecfaefd573fccf | |
parent | f80c0673610e36ae29d63e3297175e22f70dde5f (diff) |
mm: migration: clean up unmap_and_move()
unmap_and_move() is one a big messy function. Clean it up.
Signed-off-by: Minchan Kim <minchan.kim@gmail.com>
Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/migrate.c | 75 |
1 files changed, 40 insertions, 35 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 14d0a6a632f6..33358f878111 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -621,38 +621,18 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
621 | return rc; | 621 | return rc; |
622 | } | 622 | } |
623 | 623 | ||
624 | /* | 624 | static int __unmap_and_move(struct page *page, struct page *newpage, |
625 | * Obtain the lock on page, remove all ptes and migrate the page | 625 | int force, bool offlining, bool sync) |
626 | * to the newly allocated page in newpage. | ||
627 | */ | ||
628 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | ||
629 | struct page *page, int force, bool offlining, bool sync) | ||
630 | { | 626 | { |
631 | int rc = 0; | 627 | int rc = -EAGAIN; |
632 | int *result = NULL; | ||
633 | struct page *newpage = get_new_page(page, private, &result); | ||
634 | int remap_swapcache = 1; | 628 | int remap_swapcache = 1; |
635 | int charge = 0; | 629 | int charge = 0; |
636 | struct mem_cgroup *mem; | 630 | struct mem_cgroup *mem; |
637 | struct anon_vma *anon_vma = NULL; | 631 | struct anon_vma *anon_vma = NULL; |
638 | 632 | ||
639 | if (!newpage) | ||
640 | return -ENOMEM; | ||
641 | |||
642 | if (page_count(page) == 1) { | ||
643 | /* page was freed from under us. So we are done. */ | ||
644 | goto move_newpage; | ||
645 | } | ||
646 | if (unlikely(PageTransHuge(page))) | ||
647 | if (unlikely(split_huge_page(page))) | ||
648 | goto move_newpage; | ||
649 | |||
650 | /* prepare cgroup just returns 0 or -ENOMEM */ | ||
651 | rc = -EAGAIN; | ||
652 | |||
653 | if (!trylock_page(page)) { | 633 | if (!trylock_page(page)) { |
654 | if (!force || !sync) | 634 | if (!force || !sync) |
655 | goto move_newpage; | 635 | goto out; |
656 | 636 | ||
657 | /* | 637 | /* |
658 | * It's not safe for direct compaction to call lock_page. | 638 | * It's not safe for direct compaction to call lock_page. |
@@ -668,7 +648,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
668 | * altogether. | 648 | * altogether. |
669 | */ | 649 | */ |
670 | if (current->flags & PF_MEMALLOC) | 650 | if (current->flags & PF_MEMALLOC) |
671 | goto move_newpage; | 651 | goto out; |
672 | 652 | ||
673 | lock_page(page); | 653 | lock_page(page); |
674 | } | 654 | } |
@@ -785,27 +765,52 @@ uncharge: | |||
785 | mem_cgroup_end_migration(mem, page, newpage, rc == 0); | 765 | mem_cgroup_end_migration(mem, page, newpage, rc == 0); |
786 | unlock: | 766 | unlock: |
787 | unlock_page(page); | 767 | unlock_page(page); |
768 | out: | ||
769 | return rc; | ||
770 | } | ||
788 | 771 | ||
789 | move_newpage: | 772 | /* |
773 | * Obtain the lock on page, remove all ptes and migrate the page | ||
774 | * to the newly allocated page in newpage. | ||
775 | */ | ||
776 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | ||
777 | struct page *page, int force, bool offlining, bool sync) | ||
778 | { | ||
779 | int rc = 0; | ||
780 | int *result = NULL; | ||
781 | struct page *newpage = get_new_page(page, private, &result); | ||
782 | |||
783 | if (!newpage) | ||
784 | return -ENOMEM; | ||
785 | |||
786 | if (page_count(page) == 1) { | ||
787 | /* page was freed from under us. So we are done. */ | ||
788 | goto out; | ||
789 | } | ||
790 | |||
791 | if (unlikely(PageTransHuge(page))) | ||
792 | if (unlikely(split_huge_page(page))) | ||
793 | goto out; | ||
794 | |||
795 | rc = __unmap_and_move(page, newpage, force, offlining, sync); | ||
796 | out: | ||
790 | if (rc != -EAGAIN) { | 797 | if (rc != -EAGAIN) { |
791 | /* | 798 | /* |
792 | * A page that has been migrated has all references | 799 | * A page that has been migrated has all references |
793 | * removed and will be freed. A page that has not been | 800 | * removed and will be freed. A page that has not been |
794 | * migrated will have kepts its references and be | 801 | * migrated will have kepts its references and be |
795 | * restored. | 802 | * restored. |
796 | */ | 803 | */ |
797 | list_del(&page->lru); | 804 | list_del(&page->lru); |
798 | dec_zone_page_state(page, NR_ISOLATED_ANON + | 805 | dec_zone_page_state(page, NR_ISOLATED_ANON + |
799 | page_is_file_cache(page)); | 806 | page_is_file_cache(page)); |
800 | putback_lru_page(page); | 807 | putback_lru_page(page); |
801 | } | 808 | } |
802 | |||
803 | /* | 809 | /* |
804 | * Move the new page to the LRU. If migration was not successful | 810 | * Move the new page to the LRU. If migration was not successful |
805 | * then this will free the page. | 811 | * then this will free the page. |
806 | */ | 812 | */ |
807 | putback_lru_page(newpage); | 813 | putback_lru_page(newpage); |
808 | |||
809 | if (result) { | 814 | if (result) { |
810 | if (rc) | 815 | if (rc) |
811 | *result = rc; | 816 | *result = rc; |