diff options
author | Mel Gorman <mgorman@techsingularity.net> | 2019-03-05 18:44:36 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2019-03-06 00:07:16 -0500 |
commit | 40cacbcb324036233a927418441323459d28d19b (patch) | |
tree | 2e6d9f1b5cca32ca9d0b7f6cbfcf3237dc4c7473 /mm/compaction.c | |
parent | 566e54e113eb2b669f9300db2c2df400cbb06646 (diff) |
mm, compaction: remove unnecessary zone parameter in some instances
A zone parameter is passed into a number of top-level compaction
functions despite the fact that it's already in compact_control. This
is harmless but it did need an audit to check if zone actually ever
changes meaningfully. This patches removes the parameter in a number of
top-level functions. The change could be much deeper but this was
enough to briefly clarify the flow.
No functional change.
Link: http://lkml.kernel.org/r/20190118175136.31341-5-mgorman@techsingularity.net
Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Dan Carpenter <dan.carpenter@oracle.com>
Cc: David Rientjes <rientjes@google.com>
Cc: YueHaibing <yuehaibing@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 54 |
1 files changed, 26 insertions, 28 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index e59dd7a7564c..163841e1b167 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -1300,8 +1300,7 @@ static inline bool is_via_compact_memory(int order) | |||
1300 | return order == -1; | 1300 | return order == -1; |
1301 | } | 1301 | } |
1302 | 1302 | ||
1303 | static enum compact_result __compact_finished(struct zone *zone, | 1303 | static enum compact_result __compact_finished(struct compact_control *cc) |
1304 | struct compact_control *cc) | ||
1305 | { | 1304 | { |
1306 | unsigned int order; | 1305 | unsigned int order; |
1307 | const int migratetype = cc->migratetype; | 1306 | const int migratetype = cc->migratetype; |
@@ -1312,7 +1311,7 @@ static enum compact_result __compact_finished(struct zone *zone, | |||
1312 | /* Compaction run completes if the migrate and free scanner meet */ | 1311 | /* Compaction run completes if the migrate and free scanner meet */ |
1313 | if (compact_scanners_met(cc)) { | 1312 | if (compact_scanners_met(cc)) { |
1314 | /* Let the next compaction start anew. */ | 1313 | /* Let the next compaction start anew. */ |
1315 | reset_cached_positions(zone); | 1314 | reset_cached_positions(cc->zone); |
1316 | 1315 | ||
1317 | /* | 1316 | /* |
1318 | * Mark that the PG_migrate_skip information should be cleared | 1317 | * Mark that the PG_migrate_skip information should be cleared |
@@ -1321,7 +1320,7 @@ static enum compact_result __compact_finished(struct zone *zone, | |||
1321 | * based on an allocation request. | 1320 | * based on an allocation request. |
1322 | */ | 1321 | */ |
1323 | if (cc->direct_compaction) | 1322 | if (cc->direct_compaction) |
1324 | zone->compact_blockskip_flush = true; | 1323 | cc->zone->compact_blockskip_flush = true; |
1325 | 1324 | ||
1326 | if (cc->whole_zone) | 1325 | if (cc->whole_zone) |
1327 | return COMPACT_COMPLETE; | 1326 | return COMPACT_COMPLETE; |
@@ -1345,7 +1344,7 @@ static enum compact_result __compact_finished(struct zone *zone, | |||
1345 | 1344 | ||
1346 | /* Direct compactor: Is a suitable page free? */ | 1345 | /* Direct compactor: Is a suitable page free? */ |
1347 | for (order = cc->order; order < MAX_ORDER; order++) { | 1346 | for (order = cc->order; order < MAX_ORDER; order++) { |
1348 | struct free_area *area = &zone->free_area[order]; | 1347 | struct free_area *area = &cc->zone->free_area[order]; |
1349 | bool can_steal; | 1348 | bool can_steal; |
1350 | 1349 | ||
1351 | /* Job done if page is free of the right migratetype */ | 1350 | /* Job done if page is free of the right migratetype */ |
@@ -1391,13 +1390,12 @@ static enum compact_result __compact_finished(struct zone *zone, | |||
1391 | return COMPACT_NO_SUITABLE_PAGE; | 1390 | return COMPACT_NO_SUITABLE_PAGE; |
1392 | } | 1391 | } |
1393 | 1392 | ||
1394 | static enum compact_result compact_finished(struct zone *zone, | 1393 | static enum compact_result compact_finished(struct compact_control *cc) |
1395 | struct compact_control *cc) | ||
1396 | { | 1394 | { |
1397 | int ret; | 1395 | int ret; |
1398 | 1396 | ||
1399 | ret = __compact_finished(zone, cc); | 1397 | ret = __compact_finished(cc); |
1400 | trace_mm_compaction_finished(zone, cc->order, ret); | 1398 | trace_mm_compaction_finished(cc->zone, cc->order, ret); |
1401 | if (ret == COMPACT_NO_SUITABLE_PAGE) | 1399 | if (ret == COMPACT_NO_SUITABLE_PAGE) |
1402 | ret = COMPACT_CONTINUE; | 1400 | ret = COMPACT_CONTINUE; |
1403 | 1401 | ||
@@ -1524,16 +1522,16 @@ bool compaction_zonelist_suitable(struct alloc_context *ac, int order, | |||
1524 | return false; | 1522 | return false; |
1525 | } | 1523 | } |
1526 | 1524 | ||
1527 | static enum compact_result compact_zone(struct zone *zone, struct compact_control *cc) | 1525 | static enum compact_result compact_zone(struct compact_control *cc) |
1528 | { | 1526 | { |
1529 | enum compact_result ret; | 1527 | enum compact_result ret; |
1530 | unsigned long start_pfn = zone->zone_start_pfn; | 1528 | unsigned long start_pfn = cc->zone->zone_start_pfn; |
1531 | unsigned long end_pfn = zone_end_pfn(zone); | 1529 | unsigned long end_pfn = zone_end_pfn(cc->zone); |
1532 | unsigned long last_migrated_pfn; | 1530 | unsigned long last_migrated_pfn; |
1533 | const bool sync = cc->mode != MIGRATE_ASYNC; | 1531 | const bool sync = cc->mode != MIGRATE_ASYNC; |
1534 | 1532 | ||
1535 | cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); | 1533 | cc->migratetype = gfpflags_to_migratetype(cc->gfp_mask); |
1536 | ret = compaction_suitable(zone, cc->order, cc->alloc_flags, | 1534 | ret = compaction_suitable(cc->zone, cc->order, cc->alloc_flags, |
1537 | cc->classzone_idx); | 1535 | cc->classzone_idx); |
1538 | /* Compaction is likely to fail */ | 1536 | /* Compaction is likely to fail */ |
1539 | if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) | 1537 | if (ret == COMPACT_SUCCESS || ret == COMPACT_SKIPPED) |
@@ -1546,8 +1544,8 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro | |||
1546 | * Clear pageblock skip if there were failures recently and compaction | 1544 | * Clear pageblock skip if there were failures recently and compaction |
1547 | * is about to be retried after being deferred. | 1545 | * is about to be retried after being deferred. |
1548 | */ | 1546 | */ |
1549 | if (compaction_restarting(zone, cc->order)) | 1547 | if (compaction_restarting(cc->zone, cc->order)) |
1550 | __reset_isolation_suitable(zone); | 1548 | __reset_isolation_suitable(cc->zone); |
1551 | 1549 | ||
1552 | /* | 1550 | /* |
1553 | * Setup to move all movable pages to the end of the zone. Used cached | 1551 | * Setup to move all movable pages to the end of the zone. Used cached |
@@ -1559,16 +1557,16 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro | |||
1559 | cc->migrate_pfn = start_pfn; | 1557 | cc->migrate_pfn = start_pfn; |
1560 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); | 1558 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
1561 | } else { | 1559 | } else { |
1562 | cc->migrate_pfn = zone->compact_cached_migrate_pfn[sync]; | 1560 | cc->migrate_pfn = cc->zone->compact_cached_migrate_pfn[sync]; |
1563 | cc->free_pfn = zone->compact_cached_free_pfn; | 1561 | cc->free_pfn = cc->zone->compact_cached_free_pfn; |
1564 | if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { | 1562 | if (cc->free_pfn < start_pfn || cc->free_pfn >= end_pfn) { |
1565 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); | 1563 | cc->free_pfn = pageblock_start_pfn(end_pfn - 1); |
1566 | zone->compact_cached_free_pfn = cc->free_pfn; | 1564 | cc->zone->compact_cached_free_pfn = cc->free_pfn; |
1567 | } | 1565 | } |
1568 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { | 1566 | if (cc->migrate_pfn < start_pfn || cc->migrate_pfn >= end_pfn) { |
1569 | cc->migrate_pfn = start_pfn; | 1567 | cc->migrate_pfn = start_pfn; |
1570 | zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; | 1568 | cc->zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn; |
1571 | zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; | 1569 | cc->zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn; |
1572 | } | 1570 | } |
1573 | 1571 | ||
1574 | if (cc->migrate_pfn == start_pfn) | 1572 | if (cc->migrate_pfn == start_pfn) |
@@ -1582,11 +1580,11 @@ static enum compact_result compact_zone(struct zone *zone, struct compact_contro | |||
1582 | 1580 | ||
1583 | migrate_prep_local(); | 1581 | migrate_prep_local(); |
1584 | 1582 | ||
1585 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { | 1583 | while ((ret = compact_finished(cc)) == COMPACT_CONTINUE) { |
1586 | int err; | 1584 | int err; |
1587 | unsigned long start_pfn = cc->migrate_pfn; | 1585 | unsigned long start_pfn = cc->migrate_pfn; |
1588 | 1586 | ||
1589 | switch (isolate_migratepages(zone, cc)) { | 1587 | switch (isolate_migratepages(cc->zone, cc)) { |
1590 | case ISOLATE_ABORT: | 1588 | case ISOLATE_ABORT: |
1591 | ret = COMPACT_CONTENDED; | 1589 | ret = COMPACT_CONTENDED; |
1592 | putback_movable_pages(&cc->migratepages); | 1590 | putback_movable_pages(&cc->migratepages); |
@@ -1653,7 +1651,7 @@ check_drain: | |||
1653 | if (last_migrated_pfn < current_block_start) { | 1651 | if (last_migrated_pfn < current_block_start) { |
1654 | cpu = get_cpu(); | 1652 | cpu = get_cpu(); |
1655 | lru_add_drain_cpu(cpu); | 1653 | lru_add_drain_cpu(cpu); |
1656 | drain_local_pages(zone); | 1654 | drain_local_pages(cc->zone); |
1657 | put_cpu(); | 1655 | put_cpu(); |
1658 | /* No more flushing until we migrate again */ | 1656 | /* No more flushing until we migrate again */ |
1659 | last_migrated_pfn = 0; | 1657 | last_migrated_pfn = 0; |
@@ -1678,8 +1676,8 @@ out: | |||
1678 | * Only go back, not forward. The cached pfn might have been | 1676 | * Only go back, not forward. The cached pfn might have been |
1679 | * already reset to zone end in compact_finished() | 1677 | * already reset to zone end in compact_finished() |
1680 | */ | 1678 | */ |
1681 | if (free_pfn > zone->compact_cached_free_pfn) | 1679 | if (free_pfn > cc->zone->compact_cached_free_pfn) |
1682 | zone->compact_cached_free_pfn = free_pfn; | 1680 | cc->zone->compact_cached_free_pfn = free_pfn; |
1683 | } | 1681 | } |
1684 | 1682 | ||
1685 | count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); | 1683 | count_compact_events(COMPACTMIGRATE_SCANNED, cc->total_migrate_scanned); |
@@ -1716,7 +1714,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order, | |||
1716 | INIT_LIST_HEAD(&cc.freepages); | 1714 | INIT_LIST_HEAD(&cc.freepages); |
1717 | INIT_LIST_HEAD(&cc.migratepages); | 1715 | INIT_LIST_HEAD(&cc.migratepages); |
1718 | 1716 | ||
1719 | ret = compact_zone(zone, &cc); | 1717 | ret = compact_zone(&cc); |
1720 | 1718 | ||
1721 | VM_BUG_ON(!list_empty(&cc.freepages)); | 1719 | VM_BUG_ON(!list_empty(&cc.freepages)); |
1722 | VM_BUG_ON(!list_empty(&cc.migratepages)); | 1720 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
@@ -1834,7 +1832,7 @@ static void compact_node(int nid) | |||
1834 | INIT_LIST_HEAD(&cc.freepages); | 1832 | INIT_LIST_HEAD(&cc.freepages); |
1835 | INIT_LIST_HEAD(&cc.migratepages); | 1833 | INIT_LIST_HEAD(&cc.migratepages); |
1836 | 1834 | ||
1837 | compact_zone(zone, &cc); | 1835 | compact_zone(&cc); |
1838 | 1836 | ||
1839 | VM_BUG_ON(!list_empty(&cc.freepages)); | 1837 | VM_BUG_ON(!list_empty(&cc.freepages)); |
1840 | VM_BUG_ON(!list_empty(&cc.migratepages)); | 1838 | VM_BUG_ON(!list_empty(&cc.migratepages)); |
@@ -1968,7 +1966,7 @@ static void kcompactd_do_work(pg_data_t *pgdat) | |||
1968 | 1966 | ||
1969 | if (kthread_should_stop()) | 1967 | if (kthread_should_stop()) |
1970 | return; | 1968 | return; |
1971 | status = compact_zone(zone, &cc); | 1969 | status = compact_zone(&cc); |
1972 | 1970 | ||
1973 | if (status == COMPACT_SUCCESS) { | 1971 | if (status == COMPACT_SUCCESS) { |
1974 | compaction_defer_reset(zone, cc.order, false); | 1972 | compaction_defer_reset(zone, cc.order, false); |