diff options
author | Vlastimil Babka <vbabka@suse.cz> | 2014-12-10 18:43:34 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-10 20:41:06 -0500 |
commit | fdaf7f5c40f3d20690c236298418acf72eb664b5 (patch) | |
tree | fe409c2e2da6fe339b40b65d763613e020c59e7a | |
parent | 6bace090a25455cb1dffaa9ab4aabc36dbd44d4a (diff) |
mm, compaction: more focused lru and pcplists draining
The goal of memory compaction is to create high-order freepages through
page migration. Page migration however puts pages on the per-cpu lru_add
cache, which is later flushed to per-cpu pcplists, and only after pcplists
are drained the pages can actually merge. This can happen due to the
per-cpu caches becoming full through further freeing, or explicitly.
During direct compaction, it is useful to do the draining explicitly so
that pages merge as soon as possible and compaction can detect success
immediately and keep the latency impact at minimum. However the current
implementation is far from ideal. Draining is done only in
__alloc_pages_direct_compact(), after all zones were already compacted,
and the decisions to continue or stop compaction in individual zones was
done without the last batch of migrations being merged. It is also
missing the draining of lru_add cache before the pcplists.
This patch moves the draining for direct compaction into compact_zone().
It adds the missing lru_cache draining and uses the newly introduced
single zone pcplists draining to reduce overhead and avoid impact on
unrelated zones. Draining is only performed when it can actually lead to
merging of a page of desired order (passed by cc->order). This means it
is only done when migration occurred in the previously scanned cc->order
aligned block(s) and the migration scanner is now pointing to the next
cc->order aligned block.
The patch has been tested with stress-highalloc benchmark from mmtests.
Although overal allocation success rates of the benchmark were not
affected, the number of detected compaction successes has doubled. This
suggests that allocations were previously successful due to implicit
merging caused by background activity, making a later allocation attempt
succeed immediately, but not attributing the success to compaction. Since
stress-highalloc always tries to allocate almost the whole memory, it
cannot show the improvement in its reported success rate metric. However
after this patch, compaction should detect success and terminate earlier,
reducing the direct compaction latencies in a real scenario.
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Michal Nazarewicz <mina86@mina86.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Christoph Lameter <cl@linux.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | mm/compaction.c | 43 | ||||
-rw-r--r-- | mm/page_alloc.c | 4 |
2 files changed, 42 insertions, 5 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 8f211bd2ea0d..546e571e9d60 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1158,6 +1158,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1158 | unsigned long end_pfn = zone_end_pfn(zone); | 1158 | unsigned long end_pfn = zone_end_pfn(zone); |
1159 | const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); | 1159 | const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); |
1160 | const bool sync = cc->mode != MIGRATE_ASYNC; | 1160 | const bool sync = cc->mode != MIGRATE_ASYNC; |
1161 | unsigned long last_migrated_pfn = 0; | ||
1161 | 1162 | ||
1162 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, | 1163 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, |
1163 | cc->classzone_idx); | 1164 | cc->classzone_idx); |
@@ -1203,6 +1204,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1203 | while ((ret = compact_finished(zone, cc, migratetype)) == | 1204 | while ((ret = compact_finished(zone, cc, migratetype)) == |
1204 | COMPACT_CONTINUE) { | 1205 | COMPACT_CONTINUE) { |
1205 | int err; | 1206 | int err; |
1207 | unsigned long isolate_start_pfn = cc->migrate_pfn; | ||
1206 | 1208 | ||
1207 | switch (isolate_migratepages(zone, cc)) { | 1209 | switch (isolate_migratepages(zone, cc)) { |
1208 | case ISOLATE_ABORT: | 1210 | case ISOLATE_ABORT: |
@@ -1211,7 +1213,12 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1211 | cc->nr_migratepages = 0; | 1213 | cc->nr_migratepages = 0; |
1212 | goto out; | 1214 | goto out; |
1213 | case ISOLATE_NONE: | 1215 | case ISOLATE_NONE: |
1214 | continue; | 1216 | /* |
1217 | * We haven't isolated and migrated anything, but | ||
1218 | * there might still be unflushed migrations from | ||
1219 | * previous cc->order aligned block. | ||
1220 | */ | ||
1221 | goto check_drain; | ||
1215 | case ISOLATE_SUCCESS: | 1222 | case ISOLATE_SUCCESS: |
1216 | ; | 1223 | ; |
1217 | } | 1224 | } |
@@ -1236,6 +1243,40 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1236 | goto out; | 1243 | goto out; |
1237 | } | 1244 | } |
1238 | } | 1245 | } |
1246 | |||
1247 | /* | ||
1248 | * Record where we could have freed pages by migration and not | ||
1249 | * yet flushed them to buddy allocator. We use the pfn that | ||
1250 | * isolate_migratepages() started from in this loop iteration | ||
1251 | * - this is the lowest page that could have been isolated and | ||
1252 | * then freed by migration. | ||
1253 | */ | ||
1254 | if (!last_migrated_pfn) | ||
1255 | last_migrated_pfn = isolate_start_pfn; | ||
1256 | |||
1257 | check_drain: | ||
1258 | /* | ||
1259 | * Has the migration scanner moved away from the previous | ||
1260 | * cc->order aligned block where we migrated from? If yes, | ||
1261 | * flush the pages that were freed, so that they can merge and | ||
1262 | * compact_finished() can detect immediately if allocation | ||
1263 | * would succeed. | ||
1264 | */ | ||
1265 | if (cc->order > 0 && last_migrated_pfn) { | ||
1266 | int cpu; | ||
1267 | unsigned long current_block_start = | ||
1268 | cc->migrate_pfn & ~((1UL << cc->order) - 1); | ||
1269 | |||
1270 | if (last_migrated_pfn < current_block_start) { | ||
1271 | cpu = get_cpu(); | ||
1272 | lru_add_drain_cpu(cpu); | ||
1273 | drain_local_pages(zone); | ||
1274 | put_cpu(); | ||
1275 | /* No more flushing until we migrate again */ | ||
1276 | last_migrated_pfn = 0; | ||
1277 | } | ||
1278 | } | ||
1279 | |||
1239 | } | 1280 | } |
1240 | 1281 | ||
1241 | out: | 1282 | out: |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index edb0ce1e7cf3..7352aa45a335 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2359,10 +2359,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2359 | */ | 2359 | */ |
2360 | count_vm_event(COMPACTSTALL); | 2360 | count_vm_event(COMPACTSTALL); |
2361 | 2361 | ||
2362 | /* Page migration frees to the PCP lists but we want merging */ | ||
2363 | drain_pages(get_cpu()); | ||
2364 | put_cpu(); | ||
2365 | |||
2366 | page = get_page_from_freelist(gfp_mask, nodemask, | 2362 | page = get_page_from_freelist(gfp_mask, nodemask, |
2367 | order, zonelist, high_zoneidx, | 2363 | order, zonelist, high_zoneidx, |
2368 | alloc_flags & ~ALLOC_NO_WATERMARKS, | 2364 | alloc_flags & ~ALLOC_NO_WATERMARKS, |