aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c71
1 files changed, 71 insertions, 0 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index b6ede459c1bb..b68736c8a1ce 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -124,6 +124,77 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
124} 124}
125 125
126#ifdef CONFIG_COMPACTION 126#ifdef CONFIG_COMPACTION
127
128/* Do not skip compaction more than 64 times */
129#define COMPACT_MAX_DEFER_SHIFT 6
130
131/*
132 * Compaction is deferred when compaction fails to result in a page
133 * allocation success. 1 << compact_defer_limit compactions are skipped up
134 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
135 */
136void defer_compaction(struct zone *zone, int order)
137{
138 zone->compact_considered = 0;
139 zone->compact_defer_shift++;
140
141 if (order < zone->compact_order_failed)
142 zone->compact_order_failed = order;
143
144 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
145 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
146
147 trace_mm_compaction_defer_compaction(zone, order);
148}
149
150/* Returns true if compaction should be skipped this time */
151bool compaction_deferred(struct zone *zone, int order)
152{
153 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
154
155 if (order < zone->compact_order_failed)
156 return false;
157
158 /* Avoid possible overflow */
159 if (++zone->compact_considered > defer_limit)
160 zone->compact_considered = defer_limit;
161
162 if (zone->compact_considered >= defer_limit)
163 return false;
164
165 trace_mm_compaction_deferred(zone, order);
166
167 return true;
168}
169
170/*
171 * Update defer tracking counters after successful compaction of given order,
172 * which means an allocation either succeeded (alloc_success == true) or is
173 * expected to succeed.
174 */
175void compaction_defer_reset(struct zone *zone, int order,
176 bool alloc_success)
177{
178 if (alloc_success) {
179 zone->compact_considered = 0;
180 zone->compact_defer_shift = 0;
181 }
182 if (order >= zone->compact_order_failed)
183 zone->compact_order_failed = order + 1;
184
185 trace_mm_compaction_defer_reset(zone, order);
186}
187
188/* Returns true if restarting compaction after many failures */
189bool compaction_restarting(struct zone *zone, int order)
190{
191 if (order < zone->compact_order_failed)
192 return false;
193
194 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
195 zone->compact_considered >= 1UL << zone->compact_defer_shift;
196}
197
127/* Returns true if the pageblock should be scanned for pages to isolate. */ 198/* Returns true if the pageblock should be scanned for pages to isolate. */
128static inline bool isolation_suitable(struct compact_control *cc, 199static inline bool isolation_suitable(struct compact_control *cc,
129 struct page *page) 200 struct page *page)