aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2015-11-05 21:50:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-05 22:34:48 -0500
commitcf4b769abb8aef01f887543cb8308c0d8671367c (patch)
tree9b9c0f65cd1b83cadf99581c944b513f20bcd8e8 /mm/migrate.c
parent470f119f012068e5d94458c98dc4eec102f88cd3 (diff)
mm: page migration avoid touching newpage until no going back
We have had trouble in the past from the way in which page migration's newpage is initialized in dribs and drabs - see commit 8bdd63809160 ("mm: fix direct reclaim writeback regression") which proposed a cleanup. We have no actual problem now, but I think the procedure would be clearer (and alternative get_new_page pools safer to implement) if we assert that newpage is not touched until we are sure that it's going to be used - except for taking the trylock on it in __unmap_and_move(). So shift the early initializations from move_to_new_page() into migrate_page_move_mapping(), mapping and NULL-mapping paths. Similarly migrate_huge_page_move_mapping(), but its NULL-mapping path can just be deleted: you cannot reach hugetlbfs_migrate_page() with a NULL mapping. Adjust stages 3 to 8 in the Documentation file accordingly. Signed-off-by: Hugh Dickins <hughd@google.com> Cc: Christoph Lameter <cl@linux.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Rik van Riel <riel@redhat.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Davidlohr Bueso <dave@stgolabs.net> Cc: Oleg Nesterov <oleg@redhat.com> Cc: Sasha Levin <sasha.levin@oracle.com> Cc: Dmitry Vyukov <dvyukov@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c49
1 files changed, 21 insertions, 28 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 08a7b6c4c266..3067e40e7be9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -320,6 +320,14 @@ int migrate_page_move_mapping(struct address_space *mapping,
320 /* Anonymous page without mapping */ 320 /* Anonymous page without mapping */
321 if (page_count(page) != expected_count) 321 if (page_count(page) != expected_count)
322 return -EAGAIN; 322 return -EAGAIN;
323
324 /* No turning back from here */
325 set_page_memcg(newpage, page_memcg(page));
326 newpage->index = page->index;
327 newpage->mapping = page->mapping;
328 if (PageSwapBacked(page))
329 SetPageSwapBacked(newpage);
330
323 return MIGRATEPAGE_SUCCESS; 331 return MIGRATEPAGE_SUCCESS;
324 } 332 }
325 333
@@ -355,8 +363,15 @@ int migrate_page_move_mapping(struct address_space *mapping,
355 } 363 }
356 364
357 /* 365 /*
358 * Now we know that no one else is looking at the page. 366 * Now we know that no one else is looking at the page:
367 * no turning back from here.
359 */ 368 */
369 set_page_memcg(newpage, page_memcg(page));
370 newpage->index = page->index;
371 newpage->mapping = page->mapping;
372 if (PageSwapBacked(page))
373 SetPageSwapBacked(newpage);
374
360 get_page(newpage); /* add cache reference */ 375 get_page(newpage); /* add cache reference */
361 if (PageSwapCache(page)) { 376 if (PageSwapCache(page)) {
362 SetPageSwapCache(newpage); 377 SetPageSwapCache(newpage);
@@ -403,12 +418,6 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
403 int expected_count; 418 int expected_count;
404 void **pslot; 419 void **pslot;
405 420
406 if (!mapping) {
407 if (page_count(page) != 1)
408 return -EAGAIN;
409 return MIGRATEPAGE_SUCCESS;
410 }
411
412 spin_lock_irq(&mapping->tree_lock); 421 spin_lock_irq(&mapping->tree_lock);
413 422
414 pslot = radix_tree_lookup_slot(&mapping->page_tree, 423 pslot = radix_tree_lookup_slot(&mapping->page_tree,
@@ -426,6 +435,9 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
426 return -EAGAIN; 435 return -EAGAIN;
427 } 436 }
428 437
438 set_page_memcg(newpage, page_memcg(page));
439 newpage->index = page->index;
440 newpage->mapping = page->mapping;
429 get_page(newpage); 441 get_page(newpage);
430 442
431 radix_tree_replace_slot(pslot, newpage); 443 radix_tree_replace_slot(pslot, newpage);
@@ -730,21 +742,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
730 VM_BUG_ON_PAGE(!PageLocked(page), page); 742 VM_BUG_ON_PAGE(!PageLocked(page), page);
731 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 743 VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
732 744
733 /* Prepare mapping for the new page.*/
734 newpage->index = page->index;
735 newpage->mapping = page->mapping;
736 if (PageSwapBacked(page))
737 SetPageSwapBacked(newpage);
738
739 /*
740 * Indirectly called below, migrate_page_copy() copies PG_dirty and thus
741 * needs newpage's memcg set to transfer memcg dirty page accounting.
742 * So perform memcg migration in two steps:
743 * 1. set newpage->mem_cgroup (here)
744 * 2. clear page->mem_cgroup (below)
745 */
746 set_page_memcg(newpage, page_memcg(page));
747
748 mapping = page_mapping(page); 745 mapping = page_mapping(page);
749 if (!mapping) 746 if (!mapping)
750 rc = migrate_page(mapping, newpage, page, mode); 747 rc = migrate_page(mapping, newpage, page, mode);
@@ -767,9 +764,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
767 set_page_memcg(page, NULL); 764 set_page_memcg(page, NULL);
768 if (!PageAnon(page)) 765 if (!PageAnon(page))
769 page->mapping = NULL; 766 page->mapping = NULL;
770 } else {
771 set_page_memcg(newpage, NULL);
772 newpage->mapping = NULL;
773 } 767 }
774 return rc; 768 return rc;
775} 769}
@@ -971,10 +965,9 @@ out:
971 * it. Otherwise, putback_lru_page() will drop the reference grabbed 965 * it. Otherwise, putback_lru_page() will drop the reference grabbed
972 * during isolation. 966 * during isolation.
973 */ 967 */
974 if (put_new_page) { 968 if (put_new_page)
975 ClearPageSwapBacked(newpage);
976 put_new_page(newpage, private); 969 put_new_page(newpage, private);
977 } else if (unlikely(__is_movable_balloon_page(newpage))) { 970 else if (unlikely(__is_movable_balloon_page(newpage))) {
978 /* drop our reference, page already in the balloon */ 971 /* drop our reference, page already in the balloon */
979 put_page(newpage); 972 put_page(newpage);
980 } else 973 } else