aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 0b714747c028..2a0ea3ef509e 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -543,7 +543,7 @@ static int move_to_new_page(struct page *newpage, struct page *page)
543 * to the newly allocated page in newpage. 543 * to the newly allocated page in newpage.
544 */ 544 */
545static int unmap_and_move(new_page_t get_new_page, unsigned long private, 545static int unmap_and_move(new_page_t get_new_page, unsigned long private,
546 struct page *page, int force) 546 struct page *page, int force, int offlining)
547{ 547{
548 int rc = 0; 548 int rc = 0;
549 int *result = NULL; 549 int *result = NULL;
@@ -569,6 +569,20 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
569 lock_page(page); 569 lock_page(page);
570 } 570 }
571 571
572 /*
573 * Only memory hotplug's offline_pages() caller has locked out KSM,
574 * and can safely migrate a KSM page. The other cases have skipped
575 * PageKsm along with PageReserved - but it is only now when we have
576 * the page lock that we can be certain it will not go KSM beneath us
577 * (KSM will not upgrade a page from PageAnon to PageKsm when it sees
578 * its pagecount raised, but only here do we take the page lock which
579 * serializes that).
580 */
581 if (PageKsm(page) && !offlining) {
582 rc = -EBUSY;
583 goto unlock;
584 }
585
572 /* charge against new page */ 586 /* charge against new page */
573 charge = mem_cgroup_prepare_migration(page, &mem); 587 charge = mem_cgroup_prepare_migration(page, &mem);
574 if (charge == -ENOMEM) { 588 if (charge == -ENOMEM) {
@@ -685,7 +699,7 @@ move_newpage:
685 * Return: Number of pages not migrated or error code. 699 * Return: Number of pages not migrated or error code.
686 */ 700 */
687int migrate_pages(struct list_head *from, 701int migrate_pages(struct list_head *from,
688 new_page_t get_new_page, unsigned long private) 702 new_page_t get_new_page, unsigned long private, int offlining)
689{ 703{
690 int retry = 1; 704 int retry = 1;
691 int nr_failed = 0; 705 int nr_failed = 0;
@@ -705,7 +719,7 @@ int migrate_pages(struct list_head *from,
705 cond_resched(); 719 cond_resched();
706 720
707 rc = unmap_and_move(get_new_page, private, 721 rc = unmap_and_move(get_new_page, private,
708 page, pass > 2); 722 page, pass > 2, offlining);
709 723
710 switch(rc) { 724 switch(rc) {
711 case -ENOMEM: 725 case -ENOMEM:
@@ -801,7 +815,8 @@ static int do_move_page_to_node_array(struct mm_struct *mm,
801 if (!page) 815 if (!page)
802 goto set_status; 816 goto set_status;
803 817
804 if (PageReserved(page)) /* Check for zero page */ 818 /* Use PageReserved to check for zero page */
819 if (PageReserved(page) || PageKsm(page))
805 goto put_and_set; 820 goto put_and_set;
806 821
807 pp->page = page; 822 pp->page = page;
@@ -838,7 +853,7 @@ set_status:
838 err = 0; 853 err = 0;
839 if (!list_empty(&pagelist)) 854 if (!list_empty(&pagelist))
840 err = migrate_pages(&pagelist, new_page_node, 855 err = migrate_pages(&pagelist, new_page_node,
841 (unsigned long)pm); 856 (unsigned long)pm, 0);
842 857
843 up_read(&mm->mmap_sem); 858 up_read(&mm->mmap_sem);
844 return err; 859 return err;
@@ -959,7 +974,7 @@ static void do_pages_stat_array(struct mm_struct *mm, unsigned long nr_pages,
959 974
960 err = -ENOENT; 975 err = -ENOENT;
961 /* Use PageReserved to check for zero page */ 976 /* Use PageReserved to check for zero page */
962 if (!page || PageReserved(page)) 977 if (!page || PageReserved(page) || PageKsm(page))
963 goto set_status; 978 goto set_status;
964 979
965 err = page_to_nid(page); 980 err = page_to_nid(page);