aboutsummaryrefslogtreecommitdiffstats
path: root/mm/migrate.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2013-02-22 19:34:27 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:16 -0500
commit3abef4e6c23feef4aa9ab161ae138d6d39ae69f3 (patch)
treeaf0648448caa7715fca89ba78c9bca606a1d9e74 /mm/migrate.c
parent34f0315adb58af3b01f59d05b2bce267474e71cb (diff)
mm: numa: take THP into account when migrating pages for NUMA balancing
Wanpeng Li pointed out that numamigrate_isolate_page() assumes that only one base page is being migrated when in fact it can also be checking THP. The consequences are that a migration will be attempted when a target node is nearly full and fail later. It's unlikely to be user-visible but it should be fixed. While we are there, migrate_balanced_pgdat() should treat nr_migrate_pages as an unsigned long as it is treated as a watermark. Signed-off-by: Mel Gorman <mgorman@suse.de> Suggested-by: Wanpeng Li <liwanp@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Simon Jeons <simon.jeons@gmail.com> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/migrate.c')
-rw-r--r--mm/migrate.c6
1 files changed, 4 insertions, 2 deletions
diff --git a/mm/migrate.c b/mm/migrate.c
index 2fd8b4af4744..77f4e70df24d 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1461,7 +1461,7 @@ int migrate_vmas(struct mm_struct *mm, const nodemask_t *to,
1461 * pages. Currently it only checks the watermarks which crude 1461 * pages. Currently it only checks the watermarks which crude
1462 */ 1462 */
1463static bool migrate_balanced_pgdat(struct pglist_data *pgdat, 1463static bool migrate_balanced_pgdat(struct pglist_data *pgdat,
1464 int nr_migrate_pages) 1464 unsigned long nr_migrate_pages)
1465{ 1465{
1466 int z; 1466 int z;
1467 for (z = pgdat->nr_zones - 1; z >= 0; z--) { 1467 for (z = pgdat->nr_zones - 1; z >= 0; z--) {
@@ -1559,8 +1559,10 @@ int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page)
1559{ 1559{
1560 int ret = 0; 1560 int ret = 0;
1561 1561
1562 VM_BUG_ON(compound_order(page) && !PageTransHuge(page));
1563
1562 /* Avoid migrating to a node that is nearly full */ 1564 /* Avoid migrating to a node that is nearly full */
1563 if (migrate_balanced_pgdat(pgdat, 1)) { 1565 if (migrate_balanced_pgdat(pgdat, 1UL << compound_order(page))) {
1564 int page_lru; 1566 int page_lru;
1565 1567
1566 if (isolate_lru_page(page)) { 1568 if (isolate_lru_page(page)) {