summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-06-04 19:08:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:06 -0400
commite0b9daeb453e602a95ea43853dc12d385558ce1f (patch)
treefebaa53d23065ef97ab8608ca4e9ec9c47e36f2c /mm/page_alloc.c
parent35979ef3393110ff3c12c6b94552208d3bdf1a36 (diff)
mm, compaction: embed migration mode in compact_control
We're going to want to manipulate the migration mode for compaction in the page allocator, and currently compact_control's sync field is only a bool. Currently, we only do MIGRATE_ASYNC or MIGRATE_SYNC_LIGHT compaction depending on the value of this bool. Convert the bool to enum migrate_mode and pass the migration mode in directly. Later, we'll want to avoid MIGRATE_SYNC_LIGHT for thp allocations in the pagefault patch to avoid unnecessary latency. This also alters compaction triggered from sysfs, either for the entire system or for a node, to force MIGRATE_SYNC. [akpm@linux-foundation.org: fix build] [iamjoonsoo.kim@lge.com: use MIGRATE_SYNC in alloc_contig_range()] Signed-off-by: David Rientjes <rientjes@google.com> Suggested-by: Mel Gorman <mgorman@suse.de> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Greg Thelen <gthelen@google.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c39
1 files changed, 17 insertions, 22 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 027d0294413a..afb29da0576c 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2217,7 +2217,7 @@ static struct page *
2217__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2217__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2218 struct zonelist *zonelist, enum zone_type high_zoneidx, 2218 struct zonelist *zonelist, enum zone_type high_zoneidx,
2219 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2219 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2220 int migratetype, bool sync_migration, 2220 int migratetype, enum migrate_mode mode,
2221 bool *contended_compaction, bool *deferred_compaction, 2221 bool *contended_compaction, bool *deferred_compaction,
2222 unsigned long *did_some_progress) 2222 unsigned long *did_some_progress)
2223{ 2223{
@@ -2231,7 +2231,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2231 2231
2232 current->flags |= PF_MEMALLOC; 2232 current->flags |= PF_MEMALLOC;
2233 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 2233 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
2234 nodemask, sync_migration, 2234 nodemask, mode,
2235 contended_compaction); 2235 contended_compaction);
2236 current->flags &= ~PF_MEMALLOC; 2236 current->flags &= ~PF_MEMALLOC;
2237 2237
@@ -2264,7 +2264,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2264 * As async compaction considers a subset of pageblocks, only 2264 * As async compaction considers a subset of pageblocks, only
2265 * defer if the failure was a sync compaction failure. 2265 * defer if the failure was a sync compaction failure.
2266 */ 2266 */
2267 if (sync_migration) 2267 if (mode != MIGRATE_ASYNC)
2268 defer_compaction(preferred_zone, order); 2268 defer_compaction(preferred_zone, order);
2269 2269
2270 cond_resched(); 2270 cond_resched();
@@ -2277,9 +2277,8 @@ static inline struct page *
2277__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 2277__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2278 struct zonelist *zonelist, enum zone_type high_zoneidx, 2278 struct zonelist *zonelist, enum zone_type high_zoneidx,
2279 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 2279 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2280 int migratetype, bool sync_migration, 2280 int migratetype, enum migrate_mode mode, bool *contended_compaction,
2281 bool *contended_compaction, bool *deferred_compaction, 2281 bool *deferred_compaction, unsigned long *did_some_progress)
2282 unsigned long *did_some_progress)
2283{ 2282{
2284 return NULL; 2283 return NULL;
2285} 2284}
@@ -2474,7 +2473,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2474 int alloc_flags; 2473 int alloc_flags;
2475 unsigned long pages_reclaimed = 0; 2474 unsigned long pages_reclaimed = 0;
2476 unsigned long did_some_progress; 2475 unsigned long did_some_progress;
2477 bool sync_migration = false; 2476 enum migrate_mode migration_mode = MIGRATE_ASYNC;
2478 bool deferred_compaction = false; 2477 bool deferred_compaction = false;
2479 bool contended_compaction = false; 2478 bool contended_compaction = false;
2480 2479
@@ -2568,17 +2567,15 @@ rebalance:
2568 * Try direct compaction. The first pass is asynchronous. Subsequent 2567 * Try direct compaction. The first pass is asynchronous. Subsequent
2569 * attempts after direct reclaim are synchronous 2568 * attempts after direct reclaim are synchronous
2570 */ 2569 */
2571 page = __alloc_pages_direct_compact(gfp_mask, order, 2570 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
2572 zonelist, high_zoneidx, 2571 high_zoneidx, nodemask, alloc_flags,
2573 nodemask, 2572 preferred_zone, migratetype,
2574 alloc_flags, preferred_zone, 2573 migration_mode, &contended_compaction,
2575 migratetype, sync_migration,
2576 &contended_compaction,
2577 &deferred_compaction, 2574 &deferred_compaction,
2578 &did_some_progress); 2575 &did_some_progress);
2579 if (page) 2576 if (page)
2580 goto got_pg; 2577 goto got_pg;
2581 sync_migration = true; 2578 migration_mode = MIGRATE_SYNC_LIGHT;
2582 2579
2583 /* 2580 /*
2584 * If compaction is deferred for high-order allocations, it is because 2581 * If compaction is deferred for high-order allocations, it is because
@@ -2653,12 +2650,10 @@ rebalance:
2653 * direct reclaim and reclaim/compaction depends on compaction 2650 * direct reclaim and reclaim/compaction depends on compaction
2654 * being called after reclaim so call directly if necessary 2651 * being called after reclaim so call directly if necessary
2655 */ 2652 */
2656 page = __alloc_pages_direct_compact(gfp_mask, order, 2653 page = __alloc_pages_direct_compact(gfp_mask, order, zonelist,
2657 zonelist, high_zoneidx, 2654 high_zoneidx, nodemask, alloc_flags,
2658 nodemask, 2655 preferred_zone, migratetype,
2659 alloc_flags, preferred_zone, 2656 migration_mode, &contended_compaction,
2660 migratetype, sync_migration,
2661 &contended_compaction,
2662 &deferred_compaction, 2657 &deferred_compaction,
2663 &did_some_progress); 2658 &did_some_progress);
2664 if (page) 2659 if (page)
@@ -6218,7 +6213,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
6218 cc->nr_migratepages -= nr_reclaimed; 6213 cc->nr_migratepages -= nr_reclaimed;
6219 6214
6220 ret = migrate_pages(&cc->migratepages, alloc_migrate_target, 6215 ret = migrate_pages(&cc->migratepages, alloc_migrate_target,
6221 NULL, 0, MIGRATE_SYNC, MR_CMA); 6216 NULL, 0, cc->mode, MR_CMA);
6222 } 6217 }
6223 if (ret < 0) { 6218 if (ret < 0) {
6224 putback_movable_pages(&cc->migratepages); 6219 putback_movable_pages(&cc->migratepages);
@@ -6257,7 +6252,7 @@ int alloc_contig_range(unsigned long start, unsigned long end,
6257 .nr_migratepages = 0, 6252 .nr_migratepages = 0,
6258 .order = -1, 6253 .order = -1,
6259 .zone = page_zone(pfn_to_page(start)), 6254 .zone = page_zone(pfn_to_page(start)),
6260 .sync = true, 6255 .mode = MIGRATE_SYNC,
6261 .ignore_skip_hint = true, 6256 .ignore_skip_hint = true,
6262 }; 6257 };
6263 INIT_LIST_HEAD(&cc.migratepages); 6258 INIT_LIST_HEAD(&cc.migratepages);