aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2013-02-22 19:35:13 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:19 -0500
commitb79bc0a0c79e06cc87e17530e9c1c56c6f297e17 (patch)
tree617d8e3b2d8be512373c3351a92630c28248ecee /mm
parent4146d2d673e8d6abf9b30a5b5dd8cd95f29632eb (diff)
ksm: enable KSM page migration
Migration of KSM pages is now safe: remove the PageKsm restrictions from mempolicy.c and migrate.c. But keep PageKsm out of __unmap_and_move()'s anon_vma contortions, which are irrelevant to KSM: it looks as if that code was preventing hotremove migration of KSM pages, unless they happened to be in swapcache. There is some question as to whether enforcing a NUMA mempolicy migration ought to migrate KSM pages, mapped into entirely unrelated processes; but moving page_mapcount > 1 is only permitted with MPOL_MF_MOVE_ALL anyway, and it seems reasonable to assume that you wouldn't set MADV_MERGEABLE on any area where this is a worry. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Petr Holasek <pholasek@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Izik Eidus <izik.eidus@ravellosystems.com> Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com> Cc: KOSAKI Motohiro <kosaki.motohiro@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/mempolicy.c3
-rw-r--r--mm/migrate.c21
2 files changed, 4 insertions, 20 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 2ae78e255e08..d344c36db63f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -496,9 +496,8 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
496 /* 496 /*
497 * vm_normal_page() filters out zero pages, but there might 497 * vm_normal_page() filters out zero pages, but there might
498 * still be PageReserved pages to skip, perhaps in a VDSO. 498 * still be PageReserved pages to skip, perhaps in a VDSO.
499 * And we cannot move PageKsm pages sensibly or safely yet.
500 */ 499 */
501 if (PageReserved(page) || PageKsm(page)) 500 if (PageReserved(page))
502 continue; 501 continue;
503 nid = page_to_nid(page); 502 nid = page_to_nid(page);
504 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT)) 503 if (node_isset(nid, *nodes) == !!(flags & MPOL_MF_INVERT))
diff --git a/mm/migrate.c b/mm/migrate.c
index e545ce7ddc17..20a03eb0667f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -731,20 +731,6 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
731 lock_page(page); 731 lock_page(page);
732 } 732 }
733 733
734 /*
735 * Only memory hotplug's offline_pages() caller has locked out KSM,
736 * and can safely migrate a KSM page. The other cases have skipped
737 * PageKsm along with PageReserved - but it is only now when we have
738 * the page lock that we can be certain it will not go KSM beneath us
739 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
740 * its pagecount raised, but only here do we take the page lock which
741 * serializes that).
742 */
743 if (PageKsm(page) && !offlining) {
744 rc = -EBUSY;
745 goto unlock;
746 }
747
748 /* charge against new page */ 734 /* charge against new page */
749 mem_cgroup_prepare_migration(page, newpage, &mem); 735 mem_cgroup_prepare_migration(page, newpage, &mem);
750 736
@@ -771,7 +757,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
771 * File Caches may use write_page() or lock_page() in migration, then, 757 * File Caches may use write_page() or lock_page() in migration, then,
772 * just care Anon page here. 758 * just care Anon page here.
773 */ 759 */
774 if (PageAnon(page)) { 760 if (PageAnon(page) && !PageKsm(page)) {
775 /* 761 /*
776 * Only page_lock_anon_vma_read() understands the subtleties of 762 * Only page_lock_anon_vma_read() understands the subtleties of
777 * getting a hold on an anon_vma from outside one of its mms. 763 * getting a hold on an anon_vma from outside one of its mms.
@@ -851,7 +837,6 @@ uncharge:
851 mem_cgroup_end_migration(mem, page, newpage, 837 mem_cgroup_end_migration(mem, page, newpage,
852 (rc == MIGRATEPAGE_SUCCESS || 838 (rc == MIGRATEPAGE_SUCCESS ||
853 rc == MIGRATEPAGE_BALLOON_SUCCESS)); 839 rc == MIGRATEPAGE_BALLOON_SUCCESS));
854unlock:
855 unlock_page(page); 840 unlock_page(page);
856out: 841out:
857 return rc; 842 return rc;
@@ -1155,7 +1140,7 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
1155 goto set_status; 1140 goto set_status;
1156 1141
1157 /* Use PageReserved to check for zero page */ 1142 /* Use PageReserved to check for zero page */
1158 if (PageReserved(page) || PageKsm(page)) 1143 if (PageReserved(page))
1159 goto put_and_set; 1144 goto put_and_set;
1160 1145
1161 pp->page = page; 1146 pp->page = page;
@@ -1317,7 +1302,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
1317 1302
1318 err = -ENOENT; 1303 err = -ENOENT;
1319 /* Use PageReserved to check for zero page */ 1304 /* Use PageReserved to check for zero page */
1320 if (!page || PageReserved(page) || PageKsm(page)) 1305 if (!page || PageReserved(page))
1321 goto set_status; 1306 goto set_status;
1322 1307
1323 err = page_to_nid(page); 1308 err = page_to_nid(page);