aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2012-10-08 19:32:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-09 03:22:49 -0400
commitf40d1e42bb988d2a26e8e111ea4c4c7bac819b7e (patch)
tree71c930024b943dfd5dee0b5bd912f73747e4478e /mm
parent2a1402aa044b55c2d30ab0ed9405693ef06fb07c (diff)
mm: compaction: acquire the zone->lock as late as possible
Compaction's free scanner acquires the zone->lock when checking for PageBuddy pages and isolating them. It does this even if there are no PageBuddy pages in the range. This patch defers acquiring the zone lock for as long as possible. In the event there are no free pages in the pageblock then the lock will not be acquired at all which reduces contention on zone->lock. Signed-off-by: Mel Gorman <mgorman@suse.de> Acked-by: Rik van Riel <riel@redhat.com> Cc: Richard Davies <richard@arachsys.com> Cc: Shaohua Li <shli@kernel.org> Cc: Avi Kivity <avi@redhat.com> Acked-by: Rafael Aquini <aquini@redhat.com> Acked-by: Minchan Kim <minchan@kernel.org> Tested-by: Peter Ujfalusi <peter.ujfalusi@ti.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c140
1 files changed, 76 insertions, 64 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 832c4183dccc..bdf6e13045ea 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -93,6 +93,27 @@ static inline bool compact_trylock_irqsave(spinlock_t *lock,
93 return compact_checklock_irqsave(lock, flags, false, cc); 93 return compact_checklock_irqsave(lock, flags, false, cc);
94} 94}
95 95
96/* Returns true if the page is within a block suitable for migration to */
97static bool suitable_migration_target(struct page *page)
98{
99 int migratetype = get_pageblock_migratetype(page);
100
101 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
102 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
103 return false;
104
105 /* If the page is a large free page, then allow migration */
106 if (PageBuddy(page) && page_order(page) >= pageblock_order)
107 return true;
108
109 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
110 if (migrate_async_suitable(migratetype))
111 return true;
112
113 /* Otherwise skip the block */
114 return false;
115}
116
96static void compact_capture_page(struct compact_control *cc) 117static void compact_capture_page(struct compact_control *cc)
97{ 118{
98 unsigned long flags; 119 unsigned long flags;
@@ -153,38 +174,56 @@ static void compact_capture_page(struct compact_control *cc)
153 * pages inside of the pageblock (even though it may still end up isolating 174 * pages inside of the pageblock (even though it may still end up isolating
154 * some pages). 175 * some pages).
155 */ 176 */
156static unsigned long isolate_freepages_block(unsigned long blockpfn, 177static unsigned long isolate_freepages_block(struct compact_control *cc,
178 unsigned long blockpfn,
157 unsigned long end_pfn, 179 unsigned long end_pfn,
158 struct list_head *freelist, 180 struct list_head *freelist,
159 bool strict) 181 bool strict)
160{ 182{
161 int nr_scanned = 0, total_isolated = 0; 183 int nr_scanned = 0, total_isolated = 0;
162 struct page *cursor; 184 struct page *cursor;
185 unsigned long nr_strict_required = end_pfn - blockpfn;
186 unsigned long flags;
187 bool locked = false;
163 188
164 cursor = pfn_to_page(blockpfn); 189 cursor = pfn_to_page(blockpfn);
165 190
166 /* Isolate free pages. This assumes the block is valid */ 191 /* Isolate free pages. */
167 for (; blockpfn < end_pfn; blockpfn++, cursor++) { 192 for (; blockpfn < end_pfn; blockpfn++, cursor++) {
168 int isolated, i; 193 int isolated, i;
169 struct page *page = cursor; 194 struct page *page = cursor;
170 195
171 if (!pfn_valid_within(blockpfn)) {
172 if (strict)
173 return 0;
174 continue;
175 }
176 nr_scanned++; 196 nr_scanned++;
197 if (!pfn_valid_within(blockpfn))
198 continue;
199 if (!PageBuddy(page))
200 continue;
201
202 /*
203 * The zone lock must be held to isolate freepages.
204 * Unfortunately this is a very coarse lock and can be
205 * heavily contended if there are parallel allocations
206 * or parallel compactions. For async compaction do not
207 * spin on the lock and we acquire the lock as late as
208 * possible.
209 */
210 locked = compact_checklock_irqsave(&cc->zone->lock, &flags,
211 locked, cc);
212 if (!locked)
213 break;
214
215 /* Recheck this is a suitable migration target under lock */
216 if (!strict && !suitable_migration_target(page))
217 break;
177 218
178 if (!PageBuddy(page)) { 219 /* Recheck this is a buddy page under lock */
179 if (strict) 220 if (!PageBuddy(page))
180 return 0;
181 continue; 221 continue;
182 }
183 222
184 /* Found a free page, break it into order-0 pages */ 223 /* Found a free page, break it into order-0 pages */
185 isolated = split_free_page(page); 224 isolated = split_free_page(page);
186 if (!isolated && strict) 225 if (!isolated && strict)
187 return 0; 226 break;
188 total_isolated += isolated; 227 total_isolated += isolated;
189 for (i = 0; i < isolated; i++) { 228 for (i = 0; i < isolated; i++) {
190 list_add(&page->lru, freelist); 229 list_add(&page->lru, freelist);
@@ -199,6 +238,18 @@ static unsigned long isolate_freepages_block(unsigned long blockpfn,
199 } 238 }
200 239
201 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 240 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
241
242 /*
243 * If strict isolation is requested by CMA then check that all the
244 * pages requested were isolated. If there were any failures, 0 is
245 * returned and CMA will fail.
246 */
247 if (strict && nr_strict_required != total_isolated)
248 total_isolated = 0;
249
250 if (locked)
251 spin_unlock_irqrestore(&cc->zone->lock, flags);
252
202 return total_isolated; 253 return total_isolated;
203} 254}
204 255
@@ -218,12 +269,17 @@ static unsigned long isolate_freepages_block(unsigned long blockpfn,
218unsigned long 269unsigned long
219isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) 270isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
220{ 271{
221 unsigned long isolated, pfn, block_end_pfn, flags; 272 unsigned long isolated, pfn, block_end_pfn;
222 struct zone *zone = NULL; 273 struct zone *zone = NULL;
223 LIST_HEAD(freelist); 274 LIST_HEAD(freelist);
224 275
276 /* cc needed for isolate_freepages_block to acquire zone->lock */
277 struct compact_control cc = {
278 .sync = true,
279 };
280
225 if (pfn_valid(start_pfn)) 281 if (pfn_valid(start_pfn))
226 zone = page_zone(pfn_to_page(start_pfn)); 282 cc.zone = zone = page_zone(pfn_to_page(start_pfn));
227 283
228 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { 284 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
229 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn))) 285 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
@@ -236,10 +292,8 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
236 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); 292 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
237 block_end_pfn = min(block_end_pfn, end_pfn); 293 block_end_pfn = min(block_end_pfn, end_pfn);
238 294
239 spin_lock_irqsave(&zone->lock, flags); 295 isolated = isolate_freepages_block(&cc, pfn, block_end_pfn,
240 isolated = isolate_freepages_block(pfn, block_end_pfn,
241 &freelist, true); 296 &freelist, true);
242 spin_unlock_irqrestore(&zone->lock, flags);
243 297
244 /* 298 /*
245 * In strict mode, isolate_freepages_block() returns 0 if 299 * In strict mode, isolate_freepages_block() returns 0 if
@@ -483,29 +537,6 @@ next_pageblock:
483 537
484#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 538#endif /* CONFIG_COMPACTION || CONFIG_CMA */
485#ifdef CONFIG_COMPACTION 539#ifdef CONFIG_COMPACTION
486
487/* Returns true if the page is within a block suitable for migration to */
488static bool suitable_migration_target(struct page *page)
489{
490
491 int migratetype = get_pageblock_migratetype(page);
492
493 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
494 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
495 return false;
496
497 /* If the page is a large free page, then allow migration */
498 if (PageBuddy(page) && page_order(page) >= pageblock_order)
499 return true;
500
501 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
502 if (migrate_async_suitable(migratetype))
503 return true;
504
505 /* Otherwise skip the block */
506 return false;
507}
508
509/* 540/*
510 * Returns the start pfn of the last page block in a zone. This is the starting 541 * Returns the start pfn of the last page block in a zone. This is the starting
511 * point for full compaction of a zone. Compaction searches for free pages from 542 * point for full compaction of a zone. Compaction searches for free pages from
@@ -529,7 +560,6 @@ static void isolate_freepages(struct zone *zone,
529{ 560{
530 struct page *page; 561 struct page *page;
531 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn; 562 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
532 unsigned long flags;
533 int nr_freepages = cc->nr_freepages; 563 int nr_freepages = cc->nr_freepages;
534 struct list_head *freelist = &cc->freepages; 564 struct list_head *freelist = &cc->freepages;
535 565
@@ -577,30 +607,12 @@ static void isolate_freepages(struct zone *zone,
577 if (!suitable_migration_target(page)) 607 if (!suitable_migration_target(page))
578 continue; 608 continue;
579 609
580 /* 610 /* Found a block suitable for isolating free pages from */
581 * Found a block suitable for isolating free pages from. Now
582 * we disabled interrupts, double check things are ok and
583 * isolate the pages. This is to minimise the time IRQs
584 * are disabled
585 */
586 isolated = 0; 611 isolated = 0;
587 612 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
588 /* 613 isolated = isolate_freepages_block(cc, pfn, end_pfn,
589 * The zone lock must be held to isolate freepages. This 614 freelist, false);
590 * unfortunately this is a very coarse lock and can be 615 nr_freepages += isolated;
591 * heavily contended if there are parallel allocations
592 * or parallel compactions. For async compaction do not
593 * spin on the lock
594 */
595 if (!compact_trylock_irqsave(&zone->lock, &flags, cc))
596 break;
597 if (suitable_migration_target(page)) {
598 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
599 isolated = isolate_freepages_block(pfn, end_pfn,
600 freelist, false);
601 nr_freepages += isolated;
602 }
603 spin_unlock_irqrestore(&zone->lock, flags);
604 616
605 /* 617 /*
606 * Record the highest PFN we isolated pages from. When next 618 * Record the highest PFN we isolated pages from. When next