summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>2018-04-20 17:55:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-04-20 20:18:35 -0400
commite71769ae52609ea0044a9901709042e5634c2306 (patch)
treefb590bf62ea72a883da02c555b1833c2db1d45e8 /mm/migrate.c
parent2e898e4c0a3897ccd434adac5abb8330194f527b (diff)
mm: enable thp migration for shmem thp
My testing for the latest kernel supporting thp migration showed an infinite loop in offlining the memory block that is filled with shmem thps. We can get out of the loop with a signal, but kernel should return with failure in this case. What happens in the loop is that scan_movable_pages() repeats returning the same pfn without any progress. That's because page migration always fails for shmem thps. In memory offline code, memory blocks containing unmovable pages should be prevented from being offline targets by has_unmovable_pages() inside start_isolate_page_range(). So it's possible to change migratability for non-anonymous thps to avoid the issue, but it introduces more complex and thp-specific handling in migration code, so it might not good. So this patch is suggesting to fix the issue by enabling thp migration for shmem thp. Both of anon/shmem thp are migratable so we don't need precheck about the type of thps. Link: http://lkml.kernel.org/r/20180406030706.GA2434@hori1.linux.bs1.fc.nec.co.jp Fixes: commit 72b39cfc4d75 ("mm, memory_hotplug: do not fail offlining too early") Signed-off-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Zi Yan <zi.yan@sent.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c19
1 files changed, 16 insertions, 3 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 70ef794cccae..568433023831 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -472,7 +472,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
472 pslot = radix_tree_lookup_slot(&mapping->i_pages, 472 pslot = radix_tree_lookup_slot(&mapping->i_pages,
473 page_index(page)); 473 page_index(page));
474 474
475 expected_count += 1 + page_has_private(page); 475 expected_count += hpage_nr_pages(page) + page_has_private(page);
476 if (page_count(page) != expected_count || 476 if (page_count(page) != expected_count ||
477 radix_tree_deref_slot_protected(pslot, 477 radix_tree_deref_slot_protected(pslot,
478 &mapping->i_pages.xa_lock) != page) { 478 &mapping->i_pages.xa_lock) != page) {
@@ -505,7 +505,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
505 */ 505 */
506 newpage->index = page->index; 506 newpage->index = page->index;
507 newpage->mapping = page->mapping; 507 newpage->mapping = page->mapping;
508 get_page(newpage); /* add cache reference */ 508 page_ref_add(newpage, hpage_nr_pages(page)); /* add cache reference */
509 if (PageSwapBacked(page)) { 509 if (PageSwapBacked(page)) {
510 __SetPageSwapBacked(newpage); 510 __SetPageSwapBacked(newpage);
511 if (PageSwapCache(page)) { 511 if (PageSwapCache(page)) {
@@ -524,13 +524,26 @@ int migrate_page_move_mapping(struct address_space *mapping,
524 } 524 }
525 525
526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage); 526 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
527 if (PageTransHuge(page)) {
528 int i;
529 int index = page_index(page);
530
531 for (i = 0; i < HPAGE_PMD_NR; i++) {
532 pslot = radix_tree_lookup_slot(&mapping->i_pages,
533 index + i);
534 radix_tree_replace_slot(&mapping->i_pages, pslot,
535 newpage + i);
536 }
537 } else {
538 radix_tree_replace_slot(&mapping->i_pages, pslot, newpage);
539 }
527 540
528 /* 541 /*
529 * Drop cache reference from old page by unfreezing 542 * Drop cache reference from old page by unfreezing
530 * to one less reference. 543 * to one less reference.
531 * We know this isn't the last reference. 544 * We know this isn't the last reference.
532 */ 545 */
533 page_ref_unfreeze(page, expected_count - 1); 546 page_ref_unfreeze(page, expected_count - hpage_nr_pages(page));
534 547
535 xa_unlock(&mapping->i_pages); 548 xa_unlock(&mapping->i_pages);
536 /* Leave irq disabled to prevent preemption while updating stats */ 549 /* Leave irq disabled to prevent preemption while updating stats */