summaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2016-03-15 17:57:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-03-15 19:55:16 -0400
commit6a93ca8fde3cfce0f00f02281139a377c83e8d8c (patch)
tree63846308d4ab27360afdb31330cb8dbf4251cbfe /mm/migrate.c
parent23047a96d7cfcfca1a6d026ecaec526ea4803e9e (diff)
mm: migrate: do not touch page->mem_cgroup of live pages
Changing a page's memcg association complicates dealing with the page, so we want to limit this as much as possible. Page migration e.g. does not have to do that. Just like page cache replacement, it can forcibly charge a replacement page, and then uncharge the old page when it gets freed. Temporarily overcharging the cgroup by a single page is not an issue in practice, and charging is so cheap nowadays that this is much preferrable to the headache of messing with live pages. The only place that still changes the page->mem_cgroup binding of live pages is when pages move along with a task to another cgroup. But that path isolates the page from the LRU, takes the page lock, and the move lock (lock_page_memcg()). That means page->mem_cgroup is always stable in callers that have the page isolated from the LRU or locked. Lighter unlocked paths, like writeback accounting, can use lock_page_memcg(). [akpm@linux-foundation.org: fix build] [vdavydov@virtuozzo.com: fix lockdep splat] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vladimir Davydov <vdavydov@virtuozzo.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c15
1 files changed, 9 insertions, 6 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 432ecd0172cd..848327d4a7ed 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -326,12 +326,13 @@ int migrate_page_move_mapping(struct address_space *mapping,
326 return -EAGAIN; 326 return -EAGAIN;
327 327
328 /* No turning back from here */ 328 /* No turning back from here */
329 set_page_memcg(newpage, page_memcg(page));
330 newpage->index = page->index; 329 newpage->index = page->index;
331 newpage->mapping = page->mapping; 330 newpage->mapping = page->mapping;
332 if (PageSwapBacked(page)) 331 if (PageSwapBacked(page))
333 SetPageSwapBacked(newpage); 332 SetPageSwapBacked(newpage);
334 333
334 mem_cgroup_migrate(page, newpage);
335
335 return MIGRATEPAGE_SUCCESS; 336 return MIGRATEPAGE_SUCCESS;
336 } 337 }
337 338
@@ -373,7 +374,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
373 * Now we know that no one else is looking at the page: 374 * Now we know that no one else is looking at the page:
374 * no turning back from here. 375 * no turning back from here.
375 */ 376 */
376 set_page_memcg(newpage, page_memcg(page));
377 newpage->index = page->index; 377 newpage->index = page->index;
378 newpage->mapping = page->mapping; 378 newpage->mapping = page->mapping;
379 if (PageSwapBacked(page)) 379 if (PageSwapBacked(page))
@@ -428,6 +428,8 @@ int migrate_page_move_mapping(struct address_space *mapping,
428 } 428 }
429 local_irq_enable(); 429 local_irq_enable();
430 430
431 mem_cgroup_migrate(page, newpage);
432
431 return MIGRATEPAGE_SUCCESS; 433 return MIGRATEPAGE_SUCCESS;
432} 434}
433 435
@@ -458,9 +460,9 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
458 return -EAGAIN; 460 return -EAGAIN;
459 } 461 }
460 462
461 set_page_memcg(newpage, page_memcg(page));
462 newpage->index = page->index; 463 newpage->index = page->index;
463 newpage->mapping = page->mapping; 464 newpage->mapping = page->mapping;
465
464 get_page(newpage); 466 get_page(newpage);
465 467
466 radix_tree_replace_slot(pslot, newpage); 468 radix_tree_replace_slot(pslot, newpage);
@@ -468,6 +470,9 @@ int migrate_huge_page_move_mapping(struct address_space *mapping,
468 page_unfreeze_refs(page, expected_count - 1); 470 page_unfreeze_refs(page, expected_count - 1);
469 471
470 spin_unlock_irq(&mapping->tree_lock); 472 spin_unlock_irq(&mapping->tree_lock);
473
474 mem_cgroup_migrate(page, newpage);
475
471 return MIGRATEPAGE_SUCCESS; 476 return MIGRATEPAGE_SUCCESS;
472} 477}
473 478
@@ -775,7 +780,6 @@ static int move_to_new_page(struct page *newpage, struct page *page,
775 * page is freed; but stats require that PageAnon be left as PageAnon. 780 * page is freed; but stats require that PageAnon be left as PageAnon.
776 */ 781 */
777 if (rc == MIGRATEPAGE_SUCCESS) { 782 if (rc == MIGRATEPAGE_SUCCESS) {
778 set_page_memcg(page, NULL);
779 if (!PageAnon(page)) 783 if (!PageAnon(page))
780 page->mapping = NULL; 784 page->mapping = NULL;
781 } 785 }
@@ -1842,8 +1846,7 @@ fail_putback:
1842 } 1846 }
1843 1847
1844 mlock_migrate_page(new_page, page); 1848 mlock_migrate_page(new_page, page);
1845 set_page_memcg(new_page, page_memcg(page)); 1849 mem_cgroup_migrate(page, new_page);
1846 set_page_memcg(page, NULL);
1847 page_remove_rmap(page, true); 1850 page_remove_rmap(page, true);
1848 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED); 1851 set_page_owner_migrate_reason(new_page, MR_NUMA_MISPLACED);
1849 1852