diff options
author | Mel Gorman <mel@csn.ul.ie> | 2011-01-13 18:45:57 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:34 -0500 |
commit | 77f1fe6b08b13a87391549c8a820ddc817b6f50e (patch) | |
tree | 720865bd0994da3787b6f37d33b2ee4c26a2de6c /mm/migrate.c | |
parent | 3e7d344970673c5334cf7b5bb27c8c0942b06126 (diff) |
mm: migration: allow migration to operate asynchronously and avoid synchronous compaction in the faster path
Migration synchronously waits for writeback if the initial passes fails.
Callers of memory compaction do not necessarily want this behaviour if the
caller is latency sensitive or expects that synchronous migration is not
going to have a significantly better success rate.
This patch adds a sync parameter to migrate_pages() allowing the caller to
indicate if wait_on_page_writeback() is allowed within migration or not.
For reclaim/compaction, try_to_compact_pages() is first called
asynchronously, direct reclaim runs and then try_to_compact_pages() is
called synchronously as there is a greater expectation that it'll succeed.
[akpm@linux-foundation.org: build/merge fix]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 22 |
1 files changed, 13 insertions, 9 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 94875b265928..dc47f6c40353 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -614,7 +614,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
614 | * to the newly allocated page in newpage. | 614 | * to the newly allocated page in newpage. |
615 | */ | 615 | */ |
616 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 616 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, |
617 | struct page *page, int force, int offlining) | 617 | struct page *page, int force, int offlining, bool sync) |
618 | { | 618 | { |
619 | int rc = 0; | 619 | int rc = 0; |
620 | int *result = NULL; | 620 | int *result = NULL; |
@@ -682,7 +682,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
682 | BUG_ON(charge); | 682 | BUG_ON(charge); |
683 | 683 | ||
684 | if (PageWriteback(page)) { | 684 | if (PageWriteback(page)) { |
685 | if (!force) | 685 | if (!force || !sync) |
686 | goto uncharge; | 686 | goto uncharge; |
687 | wait_on_page_writeback(page); | 687 | wait_on_page_writeback(page); |
688 | } | 688 | } |
@@ -827,7 +827,7 @@ move_newpage: | |||
827 | */ | 827 | */ |
828 | static int unmap_and_move_huge_page(new_page_t get_new_page, | 828 | static int unmap_and_move_huge_page(new_page_t get_new_page, |
829 | unsigned long private, struct page *hpage, | 829 | unsigned long private, struct page *hpage, |
830 | int force, int offlining) | 830 | int force, int offlining, bool sync) |
831 | { | 831 | { |
832 | int rc = 0; | 832 | int rc = 0; |
833 | int *result = NULL; | 833 | int *result = NULL; |
@@ -841,7 +841,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page, | |||
841 | rc = -EAGAIN; | 841 | rc = -EAGAIN; |
842 | 842 | ||
843 | if (!trylock_page(hpage)) { | 843 | if (!trylock_page(hpage)) { |
844 | if (!force) | 844 | if (!force || !sync) |
845 | goto out; | 845 | goto out; |
846 | lock_page(hpage); | 846 | lock_page(hpage); |
847 | } | 847 | } |
@@ -909,7 +909,8 @@ out: | |||
909 | * Return: Number of pages not migrated or error code. | 909 | * Return: Number of pages not migrated or error code. |
910 | */ | 910 | */ |
911 | int migrate_pages(struct list_head *from, | 911 | int migrate_pages(struct list_head *from, |
912 | new_page_t get_new_page, unsigned long private, int offlining) | 912 | new_page_t get_new_page, unsigned long private, int offlining, |
913 | bool sync) | ||
913 | { | 914 | { |
914 | int retry = 1; | 915 | int retry = 1; |
915 | int nr_failed = 0; | 916 | int nr_failed = 0; |
@@ -929,7 +930,8 @@ int migrate_pages(struct list_head *from, | |||
929 | cond_resched(); | 930 | cond_resched(); |
930 | 931 | ||
931 | rc = unmap_and_move(get_new_page, private, | 932 | rc = unmap_and_move(get_new_page, private, |
932 | page, pass > 2, offlining); | 933 | page, pass > 2, offlining, |
934 | sync); | ||
933 | 935 | ||
934 | switch(rc) { | 936 | switch(rc) { |
935 | case -ENOMEM: | 937 | case -ENOMEM: |
@@ -958,7 +960,8 @@ out: | |||
958 | } | 960 | } |
959 | 961 | ||
960 | int migrate_huge_pages(struct list_head *from, | 962 | int migrate_huge_pages(struct list_head *from, |
961 | new_page_t get_new_page, unsigned long private, int offlining) | 963 | new_page_t get_new_page, unsigned long private, int offlining, |
964 | bool sync) | ||
962 | { | 965 | { |
963 | int retry = 1; | 966 | int retry = 1; |
964 | int nr_failed = 0; | 967 | int nr_failed = 0; |
@@ -974,7 +977,8 @@ int migrate_huge_pages(struct list_head *from, | |||
974 | cond_resched(); | 977 | cond_resched(); |
975 | 978 | ||
976 | rc = unmap_and_move_huge_page(get_new_page, | 979 | rc = unmap_and_move_huge_page(get_new_page, |
977 | private, page, pass > 2, offlining); | 980 | private, page, pass > 2, offlining, |
981 | sync); | ||
978 | 982 | ||
979 | switch(rc) { | 983 | switch(rc) { |
980 | case -ENOMEM: | 984 | case -ENOMEM: |
@@ -1107,7 +1111,7 @@ set_status: | |||
1107 | err = 0; | 1111 | err = 0; |
1108 | if (!list_empty(&pagelist)) { | 1112 | if (!list_empty(&pagelist)) { |
1109 | err = migrate_pages(&pagelist, new_page_node, | 1113 | err = migrate_pages(&pagelist, new_page_node, |
1110 | (unsigned long)pm, 0); | 1114 | (unsigned long)pm, 0, true); |
1111 | if (err) | 1115 | if (err) |
1112 | putback_lru_pages(&pagelist); | 1116 | putback_lru_pages(&pagelist); |
1113 | } | 1117 | } |