aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2015-02-11 18:27:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:04 -0500
commit24e2716f63e613cf15d3beba3faa0711bcacc427 (patch)
tree7ab8b73ff2977b76e4a3486730614999ea42e4d3 /mm/compaction.c
parent837d026d560c5ef26abeca0441713d82e4e82cad (diff)
mm/compaction: add tracepoint to observe behaviour of compaction defer
Compaction deferring logic is heavy hammer that block the way to the compaction. It doesn't consider overall system state, so it could prevent user from doing compaction falsely. In other words, even if system has enough range of memory to compact, compaction would be skipped due to compaction deferring logic. This patch add new tracepoint to understand work of deferring logic. This will also help to check compaction success and fail. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c71
1 files changed, 71 insertions, 0 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index b6ede459c1bb..b68736c8a1ce 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -124,6 +124,77 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
124} 124}
125 125
126#ifdef CONFIG_COMPACTION 126#ifdef CONFIG_COMPACTION
127
128/* Do not skip compaction more than 64 times */
129#define COMPACT_MAX_DEFER_SHIFT 6
130
131/*
132 * Compaction is deferred when compaction fails to result in a page
133 * allocation success. 1 << compact_defer_limit compactions are skipped up
134 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
135 */
136void defer_compaction(struct zone *zone, int order)
137{
138 zone->compact_considered = 0;
139 zone->compact_defer_shift++;
140
141 if (order < zone->compact_order_failed)
142 zone->compact_order_failed = order;
143
144 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
145 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
146
147 trace_mm_compaction_defer_compaction(zone, order);
148}
149
150/* Returns true if compaction should be skipped this time */
151bool compaction_deferred(struct zone *zone, int order)
152{
153 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
154
155 if (order < zone->compact_order_failed)
156 return false;
157
158 /* Avoid possible overflow */
159 if (++zone->compact_considered > defer_limit)
160 zone->compact_considered = defer_limit;
161
162 if (zone->compact_considered >= defer_limit)
163 return false;
164
165 trace_mm_compaction_deferred(zone, order);
166
167 return true;
168}
169
170/*
171 * Update defer tracking counters after successful compaction of given order,
172 * which means an allocation either succeeded (alloc_success == true) or is
173 * expected to succeed.
174 */
175void compaction_defer_reset(struct zone *zone, int order,
176 bool alloc_success)
177{
178 if (alloc_success) {
179 zone->compact_considered = 0;
180 zone->compact_defer_shift = 0;
181 }
182 if (order >= zone->compact_order_failed)
183 zone->compact_order_failed = order + 1;
184
185 trace_mm_compaction_defer_reset(zone, order);
186}
187
188/* Returns true if restarting compaction after many failures */
189bool compaction_restarting(struct zone *zone, int order)
190{
191 if (order < zone->compact_order_failed)
192 return false;
193
194 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
195 zone->compact_considered >= 1UL << zone->compact_defer_shift;
196}
197
127/* Returns true if the pageblock should be scanned for pages to isolate. */ 198/* Returns true if the pageblock should be scanned for pages to isolate. */
128static inline bool isolation_suitable(struct compact_control *cc, 199static inline bool isolation_suitable(struct compact_control *cc,
129 struct page *page) 200 struct page *page)