diff options
author | David Rientjes <rientjes@google.com> | 2014-06-04 19:08:28 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-06-04 19:54:06 -0400 |
commit | e0b9daeb453e602a95ea43853dc12d385558ce1f (patch) | |
tree | febaa53d23065ef97ab8608ca4e9ec9c47e36f2c /mm/compaction.c | |
parent | 35979ef3393110ff3c12c6b94552208d3bdf1a36 (diff) |
mm, compaction: embed migration mode in compact_control
We're going to want to manipulate the migration mode for compaction in the
page allocator, and currently compact_control's sync field is only a bool.
Currently, we only do MIGRATE_ASYNC or MIGRATE_SYNC_LIGHT compaction
depending on the value of this bool. Convert the bool to enum
migrate_mode and pass the migration mode in directly. Later, we'll want
to avoid MIGRATE_SYNC_LIGHT for thp allocations in the pagefault patch to
avoid unnecessary latency.
This also alters compaction triggered from sysfs, either for the entire
system or for a node, to force MIGRATE_SYNC.
[akpm@linux-foundation.org: fix build]
[iamjoonsoo.kim@lge.com: use MIGRATE_SYNC in alloc_contig_range()]
Signed-off-by: David Rientjes <rientjes@google.com>
Suggested-by: Mel Gorman <mgorman@suse.de>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Greg Thelen <gthelen@google.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 36 |
1 files changed, 19 insertions, 17 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 70c0f8cda33f..217a6ad9a20e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -161,7 +161,8 @@ static void update_pageblock_skip(struct compact_control *cc, | |||
161 | return; | 161 | return; |
162 | if (pfn > zone->compact_cached_migrate_pfn[0]) | 162 | if (pfn > zone->compact_cached_migrate_pfn[0]) |
163 | zone->compact_cached_migrate_pfn[0] = pfn; | 163 | zone->compact_cached_migrate_pfn[0] = pfn; |
164 | if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1]) | 164 | if (cc->mode != MIGRATE_ASYNC && |
165 | pfn > zone->compact_cached_migrate_pfn[1]) | ||
165 | zone->compact_cached_migrate_pfn[1] = pfn; | 166 | zone->compact_cached_migrate_pfn[1] = pfn; |
166 | } else { | 167 | } else { |
167 | if (cc->finished_update_free) | 168 | if (cc->finished_update_free) |
@@ -208,7 +209,7 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags, | |||
208 | } | 209 | } |
209 | 210 | ||
210 | /* async aborts if taking too long or contended */ | 211 | /* async aborts if taking too long or contended */ |
211 | if (!cc->sync) { | 212 | if (cc->mode == MIGRATE_ASYNC) { |
212 | cc->contended = true; | 213 | cc->contended = true; |
213 | return false; | 214 | return false; |
214 | } | 215 | } |
@@ -473,7 +474,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
473 | bool locked = false; | 474 | bool locked = false; |
474 | struct page *page = NULL, *valid_page = NULL; | 475 | struct page *page = NULL, *valid_page = NULL; |
475 | bool set_unsuitable = true; | 476 | bool set_unsuitable = true; |
476 | const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | | 477 | const isolate_mode_t mode = (cc->mode == MIGRATE_ASYNC ? |
478 | ISOLATE_ASYNC_MIGRATE : 0) | | ||
477 | (unevictable ? ISOLATE_UNEVICTABLE : 0); | 479 | (unevictable ? ISOLATE_UNEVICTABLE : 0); |
478 | 480 | ||
479 | /* | 481 | /* |
@@ -483,7 +485,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
483 | */ | 485 | */ |
484 | while (unlikely(too_many_isolated(zone))) { | 486 | while (unlikely(too_many_isolated(zone))) { |
485 | /* async migration should just abort */ | 487 | /* async migration should just abort */ |
486 | if (!cc->sync) | 488 | if (cc->mode == MIGRATE_ASYNC) |
487 | return 0; | 489 | return 0; |
488 | 490 | ||
489 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 491 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
@@ -548,7 +550,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
548 | * the minimum amount of work satisfies the allocation | 550 | * the minimum amount of work satisfies the allocation |
549 | */ | 551 | */ |
550 | mt = get_pageblock_migratetype(page); | 552 | mt = get_pageblock_migratetype(page); |
551 | if (!cc->sync && !migrate_async_suitable(mt)) { | 553 | if (cc->mode == MIGRATE_ASYNC && |
554 | !migrate_async_suitable(mt)) { | ||
552 | set_unsuitable = false; | 555 | set_unsuitable = false; |
553 | goto next_pageblock; | 556 | goto next_pageblock; |
554 | } | 557 | } |
@@ -981,6 +984,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
981 | int ret; | 984 | int ret; |
982 | unsigned long start_pfn = zone->zone_start_pfn; | 985 | unsigned long start_pfn = zone->zone_start_pfn; |
983 | unsigned long end_pfn = zone_end_pfn(zone); | 986 | unsigned long end_pfn = zone_end_pfn(zone); |
987 | const bool sync = cc->mode != MIGRATE_ASYNC; | ||
984 | 988 | ||
985 | ret = compaction_suitable(zone, cc->order); | 989 | ret = compaction_suitable(zone, cc->order); |
986 | switch (ret) { | 990 | switch (ret) { |
@@ -1006,7 +1010,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1006 | * information on where the scanners should start but check that it | 1010 | * information on where the scanners should start but check that it |
1007 | * is initialised by ensuring the values are within zone boundaries. | 1011 | * is initialised by ensuring the values are within zone boundaries. |
1008 | */ | 1012 | */ |
1009 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync]; | 1013 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; |
1010 | cc->free_pfn = zone->compact_cached_free_pfn; | 1014 | cc->free_pfn = zone->compact_cached_free_pfn; |
1011 | if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { | 1015 | if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { |
1012 | cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); | 1016 | cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); |
@@ -1040,8 +1044,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1040 | 1044 | ||
1041 | nr_migrate = cc->nr_migratepages; | 1045 | nr_migrate = cc->nr_migratepages; |
1042 | err = migrate_pages(&cc->migratepages, compaction_alloc, | 1046 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
1043 | compaction_free, (unsigned long)cc, | 1047 | compaction_free, (unsigned long)cc, cc->mode, |
1044 | cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC, | ||
1045 | MR_COMPACTION); | 1048 | MR_COMPACTION); |
1046 | update_nr_listpages(cc); | 1049 | update_nr_listpages(cc); |
1047 | nr_remaining = cc->nr_migratepages; | 1050 | nr_remaining = cc->nr_migratepages; |
@@ -1074,9 +1077,8 @@ out: | |||
1074 | return ret; | 1077 | return ret; |
1075 | } | 1078 | } |
1076 | 1079 | ||
1077 | static unsigned long compact_zone_order(struct zone *zone, | 1080 | static unsigned long compact_zone_order(struct zone *zone, int order, |
1078 | int order, gfp_t gfp_mask, | 1081 | gfp_t gfp_mask, enum migrate_mode mode, bool *contended) |
1079 | bool sync, bool *contended) | ||
1080 | { | 1082 | { |
1081 | unsigned long ret; | 1083 | unsigned long ret; |
1082 | struct compact_control cc = { | 1084 | struct compact_control cc = { |
@@ -1085,7 +1087,7 @@ static unsigned long compact_zone_order(struct zone *zone, | |||
1085 | .order = order, | 1087 | .order = order, |
1086 | .migratetype = allocflags_to_migratetype(gfp_mask), | 1088 | .migratetype = allocflags_to_migratetype(gfp_mask), |
1087 | .zone = zone, | 1089 | .zone = zone, |
1088 | .sync = sync, | 1090 | .mode = mode, |
1089 | }; | 1091 | }; |
1090 | INIT_LIST_HEAD(&cc.freepages); | 1092 | INIT_LIST_HEAD(&cc.freepages); |
1091 | INIT_LIST_HEAD(&cc.migratepages); | 1093 | INIT_LIST_HEAD(&cc.migratepages); |
@@ -1107,7 +1109,7 @@ int sysctl_extfrag_threshold = 500; | |||
1107 | * @order: The order of the current allocation | 1109 | * @order: The order of the current allocation |
1108 | * @gfp_mask: The GFP mask of the current allocation | 1110 | * @gfp_mask: The GFP mask of the current allocation |
1109 | * @nodemask: The allowed nodes to allocate from | 1111 | * @nodemask: The allowed nodes to allocate from |
1110 | * @sync: Whether migration is synchronous or not | 1112 | * @mode: The migration mode for async, sync light, or sync migration |
1111 | * @contended: Return value that is true if compaction was aborted due to lock contention | 1113 | * @contended: Return value that is true if compaction was aborted due to lock contention |
1112 | * @page: Optionally capture a free page of the requested order during compaction | 1114 | * @page: Optionally capture a free page of the requested order during compaction |
1113 | * | 1115 | * |
@@ -1115,7 +1117,7 @@ int sysctl_extfrag_threshold = 500; | |||
1115 | */ | 1117 | */ |
1116 | unsigned long try_to_compact_pages(struct zonelist *zonelist, | 1118 | unsigned long try_to_compact_pages(struct zonelist *zonelist, |
1117 | int order, gfp_t gfp_mask, nodemask_t *nodemask, | 1119 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
1118 | bool sync, bool *contended) | 1120 | enum migrate_mode mode, bool *contended) |
1119 | { | 1121 | { |
1120 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); | 1122 | enum zone_type high_zoneidx = gfp_zone(gfp_mask); |
1121 | int may_enter_fs = gfp_mask & __GFP_FS; | 1123 | int may_enter_fs = gfp_mask & __GFP_FS; |
@@ -1140,7 +1142,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
1140 | nodemask) { | 1142 | nodemask) { |
1141 | int status; | 1143 | int status; |
1142 | 1144 | ||
1143 | status = compact_zone_order(zone, order, gfp_mask, sync, | 1145 | status = compact_zone_order(zone, order, gfp_mask, mode, |
1144 | contended); | 1146 | contended); |
1145 | rc = max(status, rc); | 1147 | rc = max(status, rc); |
1146 | 1148 | ||
@@ -1190,7 +1192,7 @@ void compact_pgdat(pg_data_t *pgdat, int order) | |||
1190 | { | 1192 | { |
1191 | struct compact_control cc = { | 1193 | struct compact_control cc = { |
1192 | .order = order, | 1194 | .order = order, |
1193 | .sync = false, | 1195 | .mode = MIGRATE_ASYNC, |
1194 | }; | 1196 | }; |
1195 | 1197 | ||
1196 | if (!order) | 1198 | if (!order) |
@@ -1203,7 +1205,7 @@ static void compact_node(int nid) | |||
1203 | { | 1205 | { |
1204 | struct compact_control cc = { | 1206 | struct compact_control cc = { |
1205 | .order = -1, | 1207 | .order = -1, |
1206 | .sync = true, | 1208 | .mode = MIGRATE_SYNC, |
1207 | .ignore_skip_hint = true, | 1209 | .ignore_skip_hint = true, |
1208 | }; | 1210 | }; |
1209 | 1211 | ||