aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2011-01-13 18:45:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:34 -0500
commit77f1fe6b08b13a87391549c8a820ddc817b6f50e (patch)
tree720865bd0994da3787b6f37d33b2ee4c26a2de6c /mm/compaction.c
parent3e7d344970673c5334cf7b5bb27c8c0942b06126 (diff)
mm: migration: allow migration to operate asynchronously and avoid synchronous compaction in the faster path
Migration synchronously waits for writeback if the initial passes fails. Callers of memory compaction do not necessarily want this behaviour if the caller is latency sensitive or expects that synchronous migration is not going to have a significantly better success rate. This patch adds a sync parameter to migrate_pages() allowing the caller to indicate if wait_on_page_writeback() is allowed within migration or not. For reclaim/compaction, try_to_compact_pages() is first called asynchronously, direct reclaim runs and then try_to_compact_pages() is called synchronously as there is a greater expectation that it'll succeed. [akpm@linux-foundation.org: build/merge fix] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 8fe917ec7c1..47fca106934 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -33,6 +33,7 @@ struct compact_control {
33 unsigned long nr_migratepages; /* Number of pages to migrate */ 33 unsigned long nr_migratepages; /* Number of pages to migrate */
34 unsigned long free_pfn; /* isolate_freepages search base */ 34 unsigned long free_pfn; /* isolate_freepages search base */
35 unsigned long migrate_pfn; /* isolate_migratepages search base */ 35 unsigned long migrate_pfn; /* isolate_migratepages search base */
36 bool sync; /* Synchronous migration */
36 37
37 /* Account for isolated anon and file pages */ 38 /* Account for isolated anon and file pages */
38 unsigned long nr_anon; 39 unsigned long nr_anon;
@@ -455,7 +456,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
455 456
456 nr_migrate = cc->nr_migratepages; 457 nr_migrate = cc->nr_migratepages;
457 migrate_pages(&cc->migratepages, compaction_alloc, 458 migrate_pages(&cc->migratepages, compaction_alloc,
458 (unsigned long)cc, 0); 459 (unsigned long)cc, 0,
460 cc->sync);
459 update_nr_listpages(cc); 461 update_nr_listpages(cc);
460 nr_remaining = cc->nr_migratepages; 462 nr_remaining = cc->nr_migratepages;
461 463
@@ -482,7 +484,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
482} 484}
483 485
484unsigned long compact_zone_order(struct zone *zone, 486unsigned long compact_zone_order(struct zone *zone,
485 int order, gfp_t gfp_mask) 487 int order, gfp_t gfp_mask,
488 bool sync)
486{ 489{
487 struct compact_control cc = { 490 struct compact_control cc = {
488 .nr_freepages = 0, 491 .nr_freepages = 0,
@@ -490,6 +493,7 @@ unsigned long compact_zone_order(struct zone *zone,
490 .order = order, 493 .order = order,
491 .migratetype = allocflags_to_migratetype(gfp_mask), 494 .migratetype = allocflags_to_migratetype(gfp_mask),
492 .zone = zone, 495 .zone = zone,
496 .sync = sync,
493 }; 497 };
494 INIT_LIST_HEAD(&cc.freepages); 498 INIT_LIST_HEAD(&cc.freepages);
495 INIT_LIST_HEAD(&cc.migratepages); 499 INIT_LIST_HEAD(&cc.migratepages);
@@ -505,11 +509,13 @@ int sysctl_extfrag_threshold = 500;
505 * @order: The order of the current allocation 509 * @order: The order of the current allocation
506 * @gfp_mask: The GFP mask of the current allocation 510 * @gfp_mask: The GFP mask of the current allocation
507 * @nodemask: The allowed nodes to allocate from 511 * @nodemask: The allowed nodes to allocate from
512 * @sync: Whether migration is synchronous or not
508 * 513 *
509 * This is the main entry point for direct page compaction. 514 * This is the main entry point for direct page compaction.
510 */ 515 */
511unsigned long try_to_compact_pages(struct zonelist *zonelist, 516unsigned long try_to_compact_pages(struct zonelist *zonelist,
512 int order, gfp_t gfp_mask, nodemask_t *nodemask) 517 int order, gfp_t gfp_mask, nodemask_t *nodemask,
518 bool sync)
513{ 519{
514 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 520 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
515 int may_enter_fs = gfp_mask & __GFP_FS; 521 int may_enter_fs = gfp_mask & __GFP_FS;
@@ -533,7 +539,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
533 nodemask) { 539 nodemask) {
534 int status; 540 int status;
535 541
536 status = compact_zone_order(zone, order, gfp_mask); 542 status = compact_zone_order(zone, order, gfp_mask, sync);
537 rc = max(status, rc); 543 rc = max(status, rc);
538 544
539 /* If a normal allocation would succeed, stop compacting */ 545 /* If a normal allocation would succeed, stop compacting */