diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 89 |
1 files changed, 61 insertions, 28 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 81e1eaa2a2cf..613c59e928cb 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -89,11 +89,6 @@ static void map_pages(struct list_head *list) | |||
89 | list_splice(&tmp_list, list); | 89 | list_splice(&tmp_list, list); |
90 | } | 90 | } |
91 | 91 | ||
92 | static inline bool migrate_async_suitable(int migratetype) | ||
93 | { | ||
94 | return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; | ||
95 | } | ||
96 | |||
97 | #ifdef CONFIG_COMPACTION | 92 | #ifdef CONFIG_COMPACTION |
98 | 93 | ||
99 | int PageMovable(struct page *page) | 94 | int PageMovable(struct page *page) |
@@ -988,13 +983,26 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn, | |||
988 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ | 983 | #endif /* CONFIG_COMPACTION || CONFIG_CMA */ |
989 | #ifdef CONFIG_COMPACTION | 984 | #ifdef CONFIG_COMPACTION |
990 | 985 | ||
991 | /* Returns true if the page is within a block suitable for migration to */ | 986 | static bool suitable_migration_source(struct compact_control *cc, |
992 | static bool suitable_migration_target(struct compact_control *cc, | ||
993 | struct page *page) | 987 | struct page *page) |
994 | { | 988 | { |
995 | if (cc->ignore_block_suitable) | 989 | int block_mt; |
990 | |||
991 | if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) | ||
996 | return true; | 992 | return true; |
997 | 993 | ||
994 | block_mt = get_pageblock_migratetype(page); | ||
995 | |||
996 | if (cc->migratetype == MIGRATE_MOVABLE) | ||
997 | return is_migrate_movable(block_mt); | ||
998 | else | ||
999 | return block_mt == cc->migratetype; | ||
1000 | } | ||
1001 | |||
1002 | /* Returns true if the page is within a block suitable for migration to */ | ||
1003 | static bool suitable_migration_target(struct compact_control *cc, | ||
1004 | struct page *page) | ||
1005 | { | ||
998 | /* If the page is a large free page, then disallow migration */ | 1006 | /* If the page is a large free page, then disallow migration */ |
999 | if (PageBuddy(page)) { | 1007 | if (PageBuddy(page)) { |
1000 | /* | 1008 | /* |
@@ -1006,8 +1014,11 @@ static bool suitable_migration_target(struct compact_control *cc, | |||
1006 | return false; | 1014 | return false; |
1007 | } | 1015 | } |
1008 | 1016 | ||
1017 | if (cc->ignore_block_suitable) | ||
1018 | return true; | ||
1019 | |||
1009 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ | 1020 | /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ |
1010 | if (migrate_async_suitable(get_pageblock_migratetype(page))) | 1021 | if (is_migrate_movable(get_pageblock_migratetype(page))) |
1011 | return true; | 1022 | return true; |
1012 | 1023 | ||
1013 | /* Otherwise skip the block */ | 1024 | /* Otherwise skip the block */ |
@@ -1242,8 +1253,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone, | |||
1242 | * Async compaction is optimistic to see if the minimum amount | 1253 | * Async compaction is optimistic to see if the minimum amount |
1243 | * of work satisfies the allocation. | 1254 | * of work satisfies the allocation. |
1244 | */ | 1255 | */ |
1245 | if (cc->mode == MIGRATE_ASYNC && | 1256 | if (!suitable_migration_source(cc, page)) |
1246 | !migrate_async_suitable(get_pageblock_migratetype(page))) | ||
1247 | continue; | 1257 | continue; |
1248 | 1258 | ||
1249 | /* Perform the isolation */ | 1259 | /* Perform the isolation */ |
@@ -1276,11 +1286,11 @@ static inline bool is_via_compact_memory(int order) | |||
1276 | return order == -1; | 1286 | return order == -1; |
1277 | } | 1287 | } |
1278 | 1288 | ||
1279 | static enum compact_result __compact_finished(struct zone *zone, struct compact_control *cc, | 1289 | static enum compact_result __compact_finished(struct zone *zone, |
1280 | const int migratetype) | 1290 | struct compact_control *cc) |
1281 | { | 1291 | { |
1282 | unsigned int order; | 1292 | unsigned int order; |
1283 | unsigned long watermark; | 1293 | const int migratetype = cc->migratetype; |
1284 | 1294 | ||
1285 | if (cc->contended || fatal_signal_pending(current)) | 1295 | if (cc->contended || fatal_signal_pending(current)) |
1286 | return COMPACT_CONTENDED; | 1296 | return COMPACT_CONTENDED; |
@@ -1308,12 +1318,16 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_ | |||
1308 | if (is_via_compact_memory(cc->order)) | 1318 | if (is_via_compact_memory(cc->order)) |
1309 | return COMPACT_CONTINUE; | 1319 | return COMPACT_CONTINUE; |
1310 | 1320 | ||
1311 | /* Compaction run is not finished if the watermark is not met */ | 1321 | if (cc->finishing_block) { |
1312 | watermark = zone->watermark[cc->alloc_flags & ALLOC_WMARK_MASK]; | 1322 | /* |
1313 | 1323 | * We have finished the pageblock, but better check again that | |
1314 | if (!zone_watermark_ok(zone, cc->order, watermark, cc->classzone_idx, | 1324 | * we really succeeded. |
1315 | cc->alloc_flags)) | 1325 | */ |
1316 | return COMPACT_CONTINUE; | 1326 | if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages)) |
1327 | cc->finishing_block = false; | ||
1328 | else | ||
1329 | return COMPACT_CONTINUE; | ||
1330 | } | ||
1317 | 1331 | ||
1318 | /* Direct compactor: Is a suitable page free? */ | 1332 | /* Direct compactor: Is a suitable page free? */ |
1319 | for (order = cc->order; order < MAX_ORDER; order++) { | 1333 | for (order = cc->order; order < MAX_ORDER; order++) { |
@@ -1335,20 +1349,40 @@ static enum compact_result __compact_finished(struct zone *zone, struct compact_ | |||
1335 | * other migratetype buddy lists. | 1349 | * other migratetype buddy lists. |
1336 | */ | 1350 | */ |
1337 | if (find_suitable_fallback(area, order, migratetype, | 1351 | if (find_suitable_fallback(area, order, migratetype, |
1338 | true, &can_steal) != -1) | 1352 | true, &can_steal) != -1) { |
1339 | return COMPACT_SUCCESS; | 1353 | |
1354 | /* movable pages are OK in any pageblock */ | ||
1355 | if (migratetype == MIGRATE_MOVABLE) | ||
1356 | return COMPACT_SUCCESS; | ||
1357 | |||
1358 | /* | ||
1359 | * We are stealing for a non-movable allocation. Make | ||
1360 | * sure we finish compacting the current pageblock | ||
1361 | * first so it is as free as possible and we won't | ||
1362 | * have to steal another one soon. This only applies | ||
1363 | * to sync compaction, as async compaction operates | ||
1364 | * on pageblocks of the same migratetype. | ||
1365 | */ | ||
1366 | if (cc->mode == MIGRATE_ASYNC || | ||
1367 | IS_ALIGNED(cc->migrate_pfn, | ||
1368 | pageblock_nr_pages)) { | ||
1369 | return COMPACT_SUCCESS; | ||
1370 | } | ||
1371 | |||
1372 | cc->finishing_block = true; | ||
1373 | return COMPACT_CONTINUE; | ||
1374 | } | ||
1340 | } | 1375 | } |
1341 | 1376 | ||
1342 | return COMPACT_NO_SUITABLE_PAGE; | 1377 | return COMPACT_NO_SUITABLE_PAGE; |
1343 | } | 1378 | } |
1344 | 1379 | ||
1345 | static enum compact_result compact_finished(struct zone *zone, | 1380 | static enum compact_result compact_finished(struct zone *zone, |
1346 | struct compact_control *cc, | 1381 | struct compact_control *cc) |
1347 | const int migratetype) | ||
1348 | { | 1382 | { |
1349 | int ret; | 1383 | int ret; |
1350 | 1384 | ||
1351 | ret = __compact_finished(zone, cc, migratetype); | 1385 | ret = __compact_finished(zone, cc); |
1352 | trace_mm_compaction_finished(zone, cc->order, ret); | 1386 | trace_mm_compaction_finished(zone, cc->order, ret); |
1353 | if (ret == COMPACT_NO_SUITABLE_PAGE) | 1387 | if (ret == COMPACT_NO_SUITABLE_PAGE) |
1354 | ret = COMPACT_CONTINUE; | 1388 | ret = COMPACT_CONTINUE; |
@@ -1481,9 +1515,9 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro | |||
1481 | enum compact_result ret; | 1515 | enum compact_result ret; |
1482 | unsigned long start_pfn = zone->zone_start_pfn; | 1516 | unsigned long start_pfn = zone->zone_start_pfn; |
1483 | unsigned long end_pfn = zone_end_pfn(zone); | 1517 | unsigned long end_pfn = zone_end_pfn(zone); |
1484 | const int migratetype = gfpflags_to_migratetype(cc->gfp_mask); | ||
1485 | const bool sync = cc->mode != MIGRATE_ASYNC; | 1518 | const bool sync = cc->mode != MIGRATE_ASYNC; |
1486 | 1519 | ||
1520 | cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); | ||
1487 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, | 1521 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, |
1488 | cc->classzone_idx); | 1522 | cc->classzone_idx); |
1489 | /* Compaction is likely to fail */ | 1523 | /* Compaction is likely to fail */ |
@@ -1533,8 +1567,7 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro | |||
1533 | 1567 | ||
1534 | migrate_prep_local(); | 1568 | migrate_prep_local(); |
1535 | 1569 | ||
1536 | while ((ret = compact_finished(zone, cc, migratetype)) == | 1570 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { |
1537 | COMPACT_CONTINUE) { | ||
1538 | int err; | 1571 | int err; |
1539 | 1572 | ||
1540 | switch (isolate_migratepages(zone, cc)) { | 1573 | switch (isolate_migratepages(zone, cc)) { |