aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2015-02-11 18:27:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-11 20:06:04 -0500
commit24e2716f63e613cf15d3beba3faa0711bcacc427 (patch)
tree7ab8b73ff2977b76e4a3486730614999ea42e4d3
parent837d026d560c5ef26abeca0441713d82e4e82cad (diff)
mm/compaction: add tracepoint to observe behaviour of compaction defer
Compaction deferring logic is heavy hammer that block the way to the compaction. It doesn't consider overall system state, so it could prevent user from doing compaction falsely. In other words, even if system has enough range of memory to compact, compaction would be skipped due to compaction deferring logic. This patch add new tracepoint to understand work of deferring logic. This will also help to check compaction success and fail. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/compaction.h65
-rw-r--r--include/trace/events/compaction.h56
-rw-r--r--mm/compaction.c71
3 files changed, 132 insertions, 60 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 501d7513aac1..a014559e4a49 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -44,66 +44,11 @@ extern void reset_isolation_suitable(pg_data_t *pgdat);
44extern unsigned long compaction_suitable(struct zone *zone, int order, 44extern unsigned long compaction_suitable(struct zone *zone, int order,
45 int alloc_flags, int classzone_idx); 45 int alloc_flags, int classzone_idx);
46 46
47/* Do not skip compaction more than 64 times */ 47extern void defer_compaction(struct zone *zone, int order);
48#define COMPACT_MAX_DEFER_SHIFT 6 48extern bool compaction_deferred(struct zone *zone, int order);
49 49extern void compaction_defer_reset(struct zone *zone, int order,
50/* 50 bool alloc_success);
51 * Compaction is deferred when compaction fails to result in a page 51extern bool compaction_restarting(struct zone *zone, int order);
52 * allocation success. 1 << compact_defer_limit compactions are skipped up
53 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
54 */
55static inline void defer_compaction(struct zone *zone, int order)
56{
57 zone->compact_considered = 0;
58 zone->compact_defer_shift++;
59
60 if (order < zone->compact_order_failed)
61 zone->compact_order_failed = order;
62
63 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
64 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
65}
66
67/* Returns true if compaction should be skipped this time */
68static inline bool compaction_deferred(struct zone *zone, int order)
69{
70 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
71
72 if (order < zone->compact_order_failed)
73 return false;
74
75 /* Avoid possible overflow */
76 if (++zone->compact_considered > defer_limit)
77 zone->compact_considered = defer_limit;
78
79 return zone->compact_considered < defer_limit;
80}
81
82/*
83 * Update defer tracking counters after successful compaction of given order,
84 * which means an allocation either succeeded (alloc_success == true) or is
85 * expected to succeed.
86 */
87static inline void compaction_defer_reset(struct zone *zone, int order,
88 bool alloc_success)
89{
90 if (alloc_success) {
91 zone->compact_considered = 0;
92 zone->compact_defer_shift = 0;
93 }
94 if (order >= zone->compact_order_failed)
95 zone->compact_order_failed = order + 1;
96}
97
98/* Returns true if restarting compaction after many failures */
99static inline bool compaction_restarting(struct zone *zone, int order)
100{
101 if (order < zone->compact_order_failed)
102 return false;
103
104 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
105 zone->compact_considered >= 1UL << zone->compact_defer_shift;
106}
107 52
108#else 53#else
109static inline unsigned long try_to_compact_pages(gfp_t gfp_mask, 54static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index d46535801f63..9a6a3fe0fb51 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -238,6 +238,62 @@ DEFINE_EVENT(mm_compaction_suitable_template, mm_compaction_suitable,
238 TP_ARGS(zone, order, ret) 238 TP_ARGS(zone, order, ret)
239); 239);
240 240
241#ifdef CONFIG_COMPACTION
242DECLARE_EVENT_CLASS(mm_compaction_defer_template,
243
244 TP_PROTO(struct zone *zone, int order),
245
246 TP_ARGS(zone, order),
247
248 TP_STRUCT__entry(
249 __field(int, nid)
250 __field(char *, name)
251 __field(int, order)
252 __field(unsigned int, considered)
253 __field(unsigned int, defer_shift)
254 __field(int, order_failed)
255 ),
256
257 TP_fast_assign(
258 __entry->nid = zone_to_nid(zone);
259 __entry->name = (char *)zone->name;
260 __entry->order = order;
261 __entry->considered = zone->compact_considered;
262 __entry->defer_shift = zone->compact_defer_shift;
263 __entry->order_failed = zone->compact_order_failed;
264 ),
265
266 TP_printk("node=%d zone=%-8s order=%d order_failed=%d consider=%u limit=%lu",
267 __entry->nid,
268 __entry->name,
269 __entry->order,
270 __entry->order_failed,
271 __entry->considered,
272 1UL << __entry->defer_shift)
273);
274
275DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_deferred,
276
277 TP_PROTO(struct zone *zone, int order),
278
279 TP_ARGS(zone, order)
280);
281
282DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_compaction,
283
284 TP_PROTO(struct zone *zone, int order),
285
286 TP_ARGS(zone, order)
287);
288
289DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
290
291 TP_PROTO(struct zone *zone, int order),
292
293 TP_ARGS(zone, order)
294);
295#endif
296
241#endif /* _TRACE_COMPACTION_H */ 297#endif /* _TRACE_COMPACTION_H */
242 298
243/* This part must be outside protection */ 299/* This part must be outside protection */
diff --git a/mm/compaction.c b/mm/compaction.c
index b6ede459c1bb..b68736c8a1ce 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -124,6 +124,77 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
124} 124}
125 125
126#ifdef CONFIG_COMPACTION 126#ifdef CONFIG_COMPACTION
127
128/* Do not skip compaction more than 64 times */
129#define COMPACT_MAX_DEFER_SHIFT 6
130
131/*
132 * Compaction is deferred when compaction fails to result in a page
133 * allocation success. 1 << compact_defer_limit compactions are skipped up
134 * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
135 */
136void defer_compaction(struct zone *zone, int order)
137{
138 zone->compact_considered = 0;
139 zone->compact_defer_shift++;
140
141 if (order < zone->compact_order_failed)
142 zone->compact_order_failed = order;
143
144 if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
145 zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
146
147 trace_mm_compaction_defer_compaction(zone, order);
148}
149
150/* Returns true if compaction should be skipped this time */
151bool compaction_deferred(struct zone *zone, int order)
152{
153 unsigned long defer_limit = 1UL << zone->compact_defer_shift;
154
155 if (order < zone->compact_order_failed)
156 return false;
157
158 /* Avoid possible overflow */
159 if (++zone->compact_considered > defer_limit)
160 zone->compact_considered = defer_limit;
161
162 if (zone->compact_considered >= defer_limit)
163 return false;
164
165 trace_mm_compaction_deferred(zone, order);
166
167 return true;
168}
169
170/*
171 * Update defer tracking counters after successful compaction of given order,
172 * which means an allocation either succeeded (alloc_success == true) or is
173 * expected to succeed.
174 */
175void compaction_defer_reset(struct zone *zone, int order,
176 bool alloc_success)
177{
178 if (alloc_success) {
179 zone->compact_considered = 0;
180 zone->compact_defer_shift = 0;
181 }
182 if (order >= zone->compact_order_failed)
183 zone->compact_order_failed = order + 1;
184
185 trace_mm_compaction_defer_reset(zone, order);
186}
187
188/* Returns true if restarting compaction after many failures */
189bool compaction_restarting(struct zone *zone, int order)
190{
191 if (order < zone->compact_order_failed)
192 return false;
193
194 return zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT &&
195 zone->compact_considered >= 1UL << zone->compact_defer_shift;
196}
197
127/* Returns true if the pageblock should be scanned for pages to isolate. */ 198/* Returns true if the pageblock should be scanned for pages to isolate. */
128static inline bool isolation_suitable(struct compact_control *cc, 199static inline bool isolation_suitable(struct compact_control *cc,
129 struct page *page) 200 struct page *page)