aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2016-07-28 18:49:28 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commita5508cd83f10f663e05d212cb81f600a3af46e40 (patch)
treeb52a09bd0be8e756a48b6673c7746b3b0e1b6726
parent2516035499b9555f6acd373c9f12e44bcb50dbec (diff)
mm, compaction: introduce direct compaction priority
In the context of direct compaction, for some types of allocations we would like the compaction to either succeed or definitely fail while trying as hard as possible. Current async/sync_light migration mode is insufficient, as there are heuristics such as caching scanner positions, marking pageblocks as unsuitable or deferring compaction for a zone. At least the final compaction attempt should be able to override these heuristics. To communicate how hard compaction should try, we replace migration mode with a new enum compact_priority and change the relevant function signatures. In compact_zone_order() where struct compact_control is constructed, the priority is mapped to suitable control flags. This patch itself has no functional change, as the current priority levels are mapped back to the same migration modes as before. Expanding them will be done next. Note that !CONFIG_COMPACTION variant of try_to_compact_pages() is removed, as the only caller exists under CONFIG_COMPACTION. Link: http://lkml.kernel.org/r/20160721073614.24395-8-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/compaction.h22
-rw-r--r--include/trace/events/compaction.h12
-rw-r--r--mm/compaction.c13
-rw-r--r--mm/page_alloc.c28
4 files changed, 40 insertions, 35 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 1a02dab16646..0980a6ce4436 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,6 +1,18 @@
1#ifndef _LINUX_COMPACTION_H 1#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H 2#define _LINUX_COMPACTION_H
3 3
4/*
5 * Determines how hard direct compaction should try to succeed.
6 * Lower value means higher priority, analogically to reclaim priority.
7 */
8enum compact_priority {
9 COMPACT_PRIO_SYNC_LIGHT,
10 MIN_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
11 DEF_COMPACT_PRIORITY = COMPACT_PRIO_SYNC_LIGHT,
12 COMPACT_PRIO_ASYNC,
13 INIT_COMPACT_PRIORITY = COMPACT_PRIO_ASYNC
14};
15
4/* Return values for compact_zone() and try_to_compact_pages() */ 16/* Return values for compact_zone() and try_to_compact_pages() */
5/* When adding new states, please adjust include/trace/events/compaction.h */ 17/* When adding new states, please adjust include/trace/events/compaction.h */
6enum compact_result { 18enum compact_result {
@@ -66,7 +78,7 @@ extern int fragmentation_index(struct zone *zone, unsigned int order);
66extern enum compact_result try_to_compact_pages(gfp_t gfp_mask, 78extern enum compact_result try_to_compact_pages(gfp_t gfp_mask,
67 unsigned int order, 79 unsigned int order,
68 unsigned int alloc_flags, const struct alloc_context *ac, 80 unsigned int alloc_flags, const struct alloc_context *ac,
69 enum migrate_mode mode, int *contended); 81 enum compact_priority prio, int *contended);
70extern void compact_pgdat(pg_data_t *pgdat, int order); 82extern void compact_pgdat(pg_data_t *pgdat, int order);
71extern void reset_isolation_suitable(pg_data_t *pgdat); 83extern void reset_isolation_suitable(pg_data_t *pgdat);
72extern enum compact_result compaction_suitable(struct zone *zone, int order, 84extern enum compact_result compaction_suitable(struct zone *zone, int order,
@@ -151,14 +163,6 @@ extern void kcompactd_stop(int nid);
151extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx); 163extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
152 164
153#else 165#else
154static inline enum compact_result try_to_compact_pages(gfp_t gfp_mask,
155 unsigned int order, int alloc_flags,
156 const struct alloc_context *ac,
157 enum migrate_mode mode, int *contended)
158{
159 return COMPACT_CONTINUE;
160}
161
162static inline void compact_pgdat(pg_data_t *pgdat, int order) 166static inline void compact_pgdat(pg_data_t *pgdat, int order)
163{ 167{
164} 168}
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index 36e2d6fb1360..c2ba402ab256 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -226,26 +226,26 @@ TRACE_EVENT(mm_compaction_try_to_compact_pages,
226 TP_PROTO( 226 TP_PROTO(
227 int order, 227 int order,
228 gfp_t gfp_mask, 228 gfp_t gfp_mask,
229 enum migrate_mode mode), 229 int prio),
230 230
231 TP_ARGS(order, gfp_mask, mode), 231 TP_ARGS(order, gfp_mask, prio),
232 232
233 TP_STRUCT__entry( 233 TP_STRUCT__entry(
234 __field(int, order) 234 __field(int, order)
235 __field(gfp_t, gfp_mask) 235 __field(gfp_t, gfp_mask)
236 __field(enum migrate_mode, mode) 236 __field(int, prio)
237 ), 237 ),
238 238
239 TP_fast_assign( 239 TP_fast_assign(
240 __entry->order = order; 240 __entry->order = order;
241 __entry->gfp_mask = gfp_mask; 241 __entry->gfp_mask = gfp_mask;
242 __entry->mode = mode; 242 __entry->prio = prio;
243 ), 243 ),
244 244
245 TP_printk("order=%d gfp_mask=0x%x mode=%d", 245 TP_printk("order=%d gfp_mask=0x%x priority=%d",
246 __entry->order, 246 __entry->order,
247 __entry->gfp_mask, 247 __entry->gfp_mask,
248 (int)__entry->mode) 248 __entry->prio)
249); 249);
250 250
251DECLARE_EVENT_CLASS(mm_compaction_suitable_template, 251DECLARE_EVENT_CLASS(mm_compaction_suitable_template,
diff --git a/mm/compaction.c b/mm/compaction.c
index fee1118c8b94..4719a391242f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1626,7 +1626,7 @@ out:
1626} 1626}
1627 1627
1628static enum compact_result compact_zone_order(struct zone *zone, int order, 1628static enum compact_result compact_zone_order(struct zone *zone, int order,
1629 gfp_t gfp_mask, enum migrate_mode mode, int *contended, 1629 gfp_t gfp_mask, enum compact_priority prio, int *contended,
1630 unsigned int alloc_flags, int classzone_idx) 1630 unsigned int alloc_flags, int classzone_idx)
1631{ 1631{
1632 enum compact_result ret; 1632 enum compact_result ret;
@@ -1636,7 +1636,8 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
1636 .order = order, 1636 .order = order,
1637 .gfp_mask = gfp_mask, 1637 .gfp_mask = gfp_mask,
1638 .zone = zone, 1638 .zone = zone,
1639 .mode = mode, 1639 .mode = (prio == COMPACT_PRIO_ASYNC) ?
1640 MIGRATE_ASYNC : MIGRATE_SYNC_LIGHT,
1640 .alloc_flags = alloc_flags, 1641 .alloc_flags = alloc_flags,
1641 .classzone_idx = classzone_idx, 1642 .classzone_idx = classzone_idx,
1642 .direct_compaction = true, 1643 .direct_compaction = true,
@@ -1669,7 +1670,7 @@ int sysctl_extfrag_threshold = 500;
1669 */ 1670 */
1670enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order, 1671enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1671 unsigned int alloc_flags, const struct alloc_context *ac, 1672 unsigned int alloc_flags, const struct alloc_context *ac,
1672 enum migrate_mode mode, int *contended) 1673 enum compact_priority prio, int *contended)
1673{ 1674{
1674 int may_enter_fs = gfp_mask & __GFP_FS; 1675 int may_enter_fs = gfp_mask & __GFP_FS;
1675 int may_perform_io = gfp_mask & __GFP_IO; 1676 int may_perform_io = gfp_mask & __GFP_IO;
@@ -1684,7 +1685,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1684 if (!may_enter_fs || !may_perform_io) 1685 if (!may_enter_fs || !may_perform_io)
1685 return COMPACT_SKIPPED; 1686 return COMPACT_SKIPPED;
1686 1687
1687 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, mode); 1688 trace_mm_compaction_try_to_compact_pages(order, gfp_mask, prio);
1688 1689
1689 /* Compact each zone in the list */ 1690 /* Compact each zone in the list */
1690 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx, 1691 for_each_zone_zonelist_nodemask(zone, z, ac->zonelist, ac->high_zoneidx,
@@ -1697,7 +1698,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1697 continue; 1698 continue;
1698 } 1699 }
1699 1700
1700 status = compact_zone_order(zone, order, gfp_mask, mode, 1701 status = compact_zone_order(zone, order, gfp_mask, prio,
1701 &zone_contended, alloc_flags, 1702 &zone_contended, alloc_flags,
1702 ac_classzone_idx(ac)); 1703 ac_classzone_idx(ac));
1703 rc = max(status, rc); 1704 rc = max(status, rc);
@@ -1731,7 +1732,7 @@ enum compact_result try_to_compact_pages(gfp_t gfp_mask, unsigned int order,
1731 goto break_loop; 1732 goto break_loop;
1732 } 1733 }
1733 1734
1734 if (mode != MIGRATE_ASYNC && (status == COMPACT_COMPLETE || 1735 if (prio != COMPACT_PRIO_ASYNC && (status == COMPACT_COMPLETE ||
1735 status == COMPACT_PARTIAL_SKIPPED)) { 1736 status == COMPACT_PARTIAL_SKIPPED)) {
1736 /* 1737 /*
1737 * We think that allocation won't succeed in this zone 1738 * We think that allocation won't succeed in this zone
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 09ba67487897..26c6fe74f5c5 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3096,7 +3096,7 @@ out:
3096static struct page * 3096static struct page *
3097__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3097__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3098 unsigned int alloc_flags, const struct alloc_context *ac, 3098 unsigned int alloc_flags, const struct alloc_context *ac,
3099 enum migrate_mode mode, enum compact_result *compact_result) 3099 enum compact_priority prio, enum compact_result *compact_result)
3100{ 3100{
3101 struct page *page; 3101 struct page *page;
3102 int contended_compaction; 3102 int contended_compaction;
@@ -3106,7 +3106,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3106 3106
3107 current->flags |= PF_MEMALLOC; 3107 current->flags |= PF_MEMALLOC;
3108 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3108 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3109 mode, &contended_compaction); 3109 prio, &contended_compaction);
3110 current->flags &= ~PF_MEMALLOC; 3110 current->flags &= ~PF_MEMALLOC;
3111 3111
3112 if (*compact_result <= COMPACT_INACTIVE) 3112 if (*compact_result <= COMPACT_INACTIVE)
@@ -3160,7 +3160,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3160 3160
3161static inline bool 3161static inline bool
3162should_compact_retry(struct alloc_context *ac, int order, int alloc_flags, 3162should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3163 enum compact_result compact_result, enum migrate_mode *migrate_mode, 3163 enum compact_result compact_result,
3164 enum compact_priority *compact_priority,
3164 int compaction_retries) 3165 int compaction_retries)
3165{ 3166{
3166 int max_retries = MAX_COMPACT_RETRIES; 3167 int max_retries = MAX_COMPACT_RETRIES;
@@ -3171,11 +3172,11 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3171 /* 3172 /*
3172 * compaction considers all the zone as desperately out of memory 3173 * compaction considers all the zone as desperately out of memory
3173 * so it doesn't really make much sense to retry except when the 3174 * so it doesn't really make much sense to retry except when the
3174 * failure could be caused by weak migration mode. 3175 * failure could be caused by insufficient priority
3175 */ 3176 */
3176 if (compaction_failed(compact_result)) { 3177 if (compaction_failed(compact_result)) {
3177 if (*migrate_mode == MIGRATE_ASYNC) { 3178 if (*compact_priority > MIN_COMPACT_PRIORITY) {
3178 *migrate_mode = MIGRATE_SYNC_LIGHT; 3179 (*compact_priority)--;
3179 return true; 3180 return true;
3180 } 3181 }
3181 return false; 3182 return false;
@@ -3209,7 +3210,7 @@ should_compact_retry(struct alloc_context *ac, int order, int alloc_flags,
3209static inline struct page * 3210static inline struct page *
3210__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 3211__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3211 unsigned int alloc_flags, const struct alloc_context *ac, 3212 unsigned int alloc_flags, const struct alloc_context *ac,
3212 enum migrate_mode mode, enum compact_result *compact_result) 3213 enum compact_priority prio, enum compact_result *compact_result)
3213{ 3214{
3214 *compact_result = COMPACT_SKIPPED; 3215 *compact_result = COMPACT_SKIPPED;
3215 return NULL; 3216 return NULL;
@@ -3218,7 +3219,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3218static inline bool 3219static inline bool
3219should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags, 3220should_compact_retry(struct alloc_context *ac, unsigned int order, int alloc_flags,
3220 enum compact_result compact_result, 3221 enum compact_result compact_result,
3221 enum migrate_mode *migrate_mode, 3222 enum compact_priority *compact_priority,
3222 int compaction_retries) 3223 int compaction_retries)
3223{ 3224{
3224 struct zone *zone; 3225 struct zone *zone;
@@ -3473,7 +3474,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3473 struct page *page = NULL; 3474 struct page *page = NULL;
3474 unsigned int alloc_flags; 3475 unsigned int alloc_flags;
3475 unsigned long did_some_progress; 3476 unsigned long did_some_progress;
3476 enum migrate_mode migration_mode = MIGRATE_SYNC_LIGHT; 3477 enum compact_priority compact_priority = DEF_COMPACT_PRIORITY;
3477 enum compact_result compact_result; 3478 enum compact_result compact_result;
3478 int compaction_retries = 0; 3479 int compaction_retries = 0;
3479 int no_progress_loops = 0; 3480 int no_progress_loops = 0;
@@ -3525,7 +3526,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3525 !gfp_pfmemalloc_allowed(gfp_mask)) { 3526 !gfp_pfmemalloc_allowed(gfp_mask)) {
3526 page = __alloc_pages_direct_compact(gfp_mask, order, 3527 page = __alloc_pages_direct_compact(gfp_mask, order,
3527 alloc_flags, ac, 3528 alloc_flags, ac,
3528 MIGRATE_ASYNC, 3529 INIT_COMPACT_PRIORITY,
3529 &compact_result); 3530 &compact_result);
3530 if (page) 3531 if (page)
3531 goto got_pg; 3532 goto got_pg;
@@ -3558,7 +3559,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3558 * sync compaction could be very expensive, so keep 3559 * sync compaction could be very expensive, so keep
3559 * using async compaction. 3560 * using async compaction.
3560 */ 3561 */
3561 migration_mode = MIGRATE_ASYNC; 3562 compact_priority = INIT_COMPACT_PRIORITY;
3562 } 3563 }
3563 } 3564 }
3564 3565
@@ -3624,8 +3625,7 @@ retry:
3624 3625
3625 /* Try direct compaction and then allocating */ 3626 /* Try direct compaction and then allocating */
3626 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac, 3627 page = __alloc_pages_direct_compact(gfp_mask, order, alloc_flags, ac,
3627 migration_mode, 3628 compact_priority, &compact_result);
3628 &compact_result);
3629 if (page) 3629 if (page)
3630 goto got_pg; 3630 goto got_pg;
3631 3631
@@ -3665,7 +3665,7 @@ retry:
3665 */ 3665 */
3666 if (did_some_progress > 0 && 3666 if (did_some_progress > 0 &&
3667 should_compact_retry(ac, order, alloc_flags, 3667 should_compact_retry(ac, order, alloc_flags,
3668 compact_result, &migration_mode, 3668 compact_result, &compact_priority,
3669 compaction_retries)) 3669 compaction_retries))
3670 goto retry; 3670 goto retry;
3671 3671