summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2016-07-28 18:49:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita5508cd83f10f663e05d212cb81f600a3af46e40 (patch)
treeb52a09bd0be8e756a48b6673c7746b3b0e1b6726 /mm/compaction.c
parent2516035499b9555f6acd373c9f12e44bcb50dbec (diff)
mm, compaction: introduce direct compaction priority
In the context of direct compaction, for some types of allocations we would like the compaction to either succeed or definitely fail while trying as hard as possible. Current async/sync_light migration mode is insufficient, as there are heuristics such as caching scanner positions, marking pageblocks as unsuitable or deferring compaction for a zone. At least the final compaction attempt should be able to override these heuristics. To communicate how hard compaction should try, we replace migration mode with a new enum compact_priority and change the relevant function signatures. In compact_zone_order() where struct compact_control is constructed, the priority is mapped to suitable control flags. This patch itself has no functional change, as the current priority levels are mapped back to the same migration modes as before. Expanding them will be done next. Note that !CONFIG_COMPACTION variant of try_to_compact_pages() is removed, as the only caller exists under CONFIG_COMPACTION. Link: http://lkml.kernel.org/r/20160721073614.24395-8-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c13
1 files changed, 7 insertions, 6 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index fee1118c8b94..4719a391242f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1626,7 +1626,7 @@ out:
1626} 1626}
1627 1627
1628static enum compact_result compact_zone_order(struct zone *zone, int order, 1628static enum compact_result compact_zone_order(struct zone *zone, int order,
1629 gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1629 gfp_t gfp_mask, enum compact_priority prio, int *contended,
1630 unsigned int alloc_flags, int classzone_idx) 1630 unsigned int alloc_flags, int classzone_idx)
1631{ 1631{
1632 enum compact_result ret; 1632 enum compact_result ret;
@@ -1636,7 +1636,8 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
1636 .order = order, 1636 .order = order,
1637 .gfp_mask = gfp_mask, 1637 .gfp_mask = gfp_mask,
1638 .zone = zone, 1638 .zone = zone,
1639 .mode = mode, 1639 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1640 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
1640 .alloc_flags = alloc_flags, 1641 .alloc_flags = alloc_flags,
1641 .classzone_idx = classzone_idx, 1642 .classzone_idx = classzone_idx,
1642 .direct_compaction = true, 1643 .direct_compaction = true,
@@ -1669,7 +1670,7 @@ int sysctl_extfrag_threshold = 500;
1669 */ 1670 */
1670enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 1671enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1671 unsigned int alloc_flags, const struct alloc_context *ac, 1672 unsigned int alloc_flags, const struct alloc_context *ac,
1672 enum migrate_mode mode, int *contended) 1673 enum compact_priority prio, int *contended)
1673{ 1674{
1674 int may_enter_fs = gfp_mask & __GFP_FS; 1675 int may_enter_fs = gfp_mask & __GFP_FS;
1675 int may_perform_io = gfp_mask & __GFP_IO; 1676 int may_perform_io = gfp_mask & __GFP_IO;
@@ -1684,7 +1685,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1684 if (!may_enter_fs || !may_perform_io) 1685 if (!may_enter_fs || !may_perform_io)
1685 return COMPACT_SKIPPED; 1686 return COMPACT_SKIPPED;
1686 1687
1687 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1688 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
1688 1689
1689 /* Compact each zone in the list */ 1690 /* Compact each zone in the list */
1690 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 1691 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
@@ -1697,7 +1698,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1697 continue; 1698 continue;
1698 } 1699 }
1699 1700
1700 status = compact_zone_order(zone, order, gfp_mask, mode, 1701 status = compact_zone_order(zone, order, gfp_mask, prio,
1701 &zone_contended, alloc_flags, 1702 &zone_contended, alloc_flags,
1702 ac_classzone_idx(ac)); 1703 ac_classzone_idx(ac));
1703 rc = max(status, rc); 1704 rc = max(status, rc);
@@ -1731,7 +1732,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1731 goto break_loop; 1732 goto break_loop;
1732 } 1733 }
1733 1734
1734 if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE || 1735 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
1735 status == COMPACT_PARTIAL_SKIPPED)) { 1736 status == COMPACT_PARTIAL_SKIPPED)) {
1736 /* 1737 /*
1737 * We think that allocation won't succeed in this zone 1738 * We think that allocation won't succeed in this zone