diff options
author | Hugh Dickins <hughd@google.com> | 2013-02-22 19:35:14 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2013-02-23 20:50:19 -0500 |
commit | 9c620e2bc5aa4256c102ada34e6c76204ed5898b (patch) | |
tree | 667dc829d7c5fa0261658fa6488f67cbd52bfb7e /mm/migrate.c | |
parent | b79bc0a0c79e06cc87e17530e9c1c56c6f297e17 (diff) |
mm: remove offlining arg to migrate_pages
No functional change, but the only purpose of the offlining argument to
migrate_pages() etc, was to ensure that __unmap_and_move() could migrate a
KSM page for memory hotremove (which took ksm_thread_mutex) but not for
other callers. Now all cases are safe, remove the arg.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Petr Holasek <pholasek@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Izik Eidus <izik.eidus@ravellosystems.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r-- | mm/migrate.c | 35 |
1 files changed, 13 insertions, 22 deletions
diff --git a/mm/migrate.c b/mm/migrate.c index 20a03eb0667f..3bbaf5d230b0 100644 --- a/mm/migrate.c +++ b/mm/migrate.c | |||
@@ -701,7 +701,7 @@ static int move_to_new_page(struct page *newpage, struct page *page, | |||
701 | } | 701 | } |
702 | 702 | ||
703 | static int __unmap_and_move(struct page *page, struct page *newpage, | 703 | static int __unmap_and_move(struct page *page, struct page *newpage, |
704 | int force, bool offlining, enum migrate_mode mode) | 704 | int force, enum migrate_mode mode) |
705 | { | 705 | { |
706 | int rc = -EAGAIN; | 706 | int rc = -EAGAIN; |
707 | int remap_swapcache = 1; | 707 | int remap_swapcache = 1; |
@@ -847,8 +847,7 @@ out: | |||
847 | * to the newly allocated page in newpage. | 847 | * to the newly allocated page in newpage. |
848 | */ | 848 | */ |
849 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, | 849 | static int unmap_and_move(new_page_t get_new_page, unsigned long private, |
850 | struct page *page, int force, bool offlining, | 850 | struct page *page, int force, enum migrate_mode mode) |
851 | enum migrate_mode mode) | ||
852 | { | 851 | { |
853 | int rc = 0; | 852 | int rc = 0; |
854 | int *result = NULL; | 853 | int *result = NULL; |
@@ -866,7 +865,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private, | |||
866 | if (unlikely(split_huge_page(page))) | 865 | if (unlikely(split_huge_page(page))) |
867 | goto out; | 866 | goto out; |
868 | 867 | ||
869 | rc = __unmap_and_move(page, newpage, force, offlining, mode); | 868 | rc = __unmap_and_move(page, newpage, force, mode); |
870 | 869 | ||
871 | if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { | 870 | if (unlikely(rc == MIGRATEPAGE_BALLOON_SUCCESS)) { |
872 | /* | 871 | /* |
@@ -926,8 +925,7 @@ out: | |||
926 | */ | 925 | */ |
927 | static int unmap_and_move_huge_page(new_page_t get_new_page, | 926 | static int unmap_and_move_huge_page(new_page_t get_new_page, |
928 | unsigned long private, struct page *hpage, | 927 | unsigned long private, struct page *hpage, |
929 | int force, bool offlining, | 928 | int force, enum migrate_mode mode) |
930 | enum migrate_mode mode) | ||
931 | { | 929 | { |
932 | int rc = 0; | 930 | int rc = 0; |
933 | int *result = NULL; | 931 | int *result = NULL; |
@@ -989,9 +987,8 @@ out: | |||
989 | * | 987 | * |
990 | * Return: Number of pages not migrated or error code. | 988 | * Return: Number of pages not migrated or error code. |
991 | */ | 989 | */ |
992 | int migrate_pages(struct list_head *from, | 990 | int migrate_pages(struct list_head *from, new_page_t get_new_page, |
993 | new_page_t get_new_page, unsigned long private, bool offlining, | 991 | unsigned long private, enum migrate_mode mode, int reason) |
994 | enum migrate_mode mode, int reason) | ||
995 | { | 992 | { |
996 | int retry = 1; | 993 | int retry = 1; |
997 | int nr_failed = 0; | 994 | int nr_failed = 0; |
@@ -1012,8 +1009,7 @@ int migrate_pages(struct list_head *from, | |||
1012 | cond_resched(); | 1009 | cond_resched(); |
1013 | 1010 | ||
1014 | rc = unmap_and_move(get_new_page, private, | 1011 | rc = unmap_and_move(get_new_page, private, |
1015 | page, pass > 2, offlining, | 1012 | page, pass > 2, mode); |
1016 | mode); | ||
1017 | 1013 | ||
1018 | switch(rc) { | 1014 | switch(rc) { |
1019 | case -ENOMEM: | 1015 | case -ENOMEM: |
@@ -1046,15 +1042,13 @@ out: | |||
1046 | } | 1042 | } |
1047 | 1043 | ||
1048 | int migrate_huge_page(struct page *hpage, new_page_t get_new_page, | 1044 | int migrate_huge_page(struct page *hpage, new_page_t get_new_page, |
1049 | unsigned long private, bool offlining, | 1045 | unsigned long private, enum migrate_mode mode) |
1050 | enum migrate_mode mode) | ||
1051 | { | 1046 | { |
1052 | int pass, rc; | 1047 | int pass, rc; |
1053 | 1048 | ||
1054 | for (pass = 0; pass < 10; pass++) { | 1049 | for (pass = 0; pass < 10; pass++) { |
1055 | rc = unmap_and_move_huge_page(get_new_page, | 1050 | rc = unmap_and_move_huge_page(get_new_page, private, |
1056 | private, hpage, pass > 2, offlining, | 1051 | hpage, pass > 2, mode); |
1057 | mode); | ||
1058 | switch (rc) { | 1052 | switch (rc) { |
1059 | case -ENOMEM: | 1053 | case -ENOMEM: |
1060 | goto out; | 1054 | goto out; |
@@ -1177,8 +1171,7 @@ set_status: | |||
1177 | err = 0; | 1171 | err = 0; |
1178 | if (!list_empty(&pagelist)) { | 1172 | if (!list_empty(&pagelist)) { |
1179 | err = migrate_pages(&pagelist, new_page_node, | 1173 | err = migrate_pages(&pagelist, new_page_node, |
1180 | (unsigned long)pm, 0, MIGRATE_SYNC, | 1174 | (unsigned long)pm, MIGRATE_SYNC, MR_SYSCALL); |
1181 | MR_SYSCALL); | ||
1182 | if (err) | 1175 | if (err) |
1183 | putback_lru_pages(&pagelist); | 1176 | putback_lru_pages(&pagelist); |
1184 | } | 1177 | } |
@@ -1613,10 +1606,8 @@ int migrate_misplaced_page(struct page *page, int node) | |||
1613 | goto out; | 1606 | goto out; |
1614 | 1607 | ||
1615 | list_add(&page->lru, &migratepages); | 1608 | list_add(&page->lru, &migratepages); |
1616 | nr_remaining = migrate_pages(&migratepages, | 1609 | nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_page, |
1617 | alloc_misplaced_dst_page, | 1610 | node, MIGRATE_ASYNC, MR_NUMA_MISPLACED); |
1618 | node, false, MIGRATE_ASYNC, | ||
1619 | MR_NUMA_MISPLACED); | ||
1620 | if (nr_remaining) { | 1611 | if (nr_remaining) { |
1621 | putback_lru_pages(&migratepages); | 1612 | putback_lru_pages(&migratepages); |
1622 | isolated = 0; | 1613 | isolated = 0; |