aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-10-09 18:27:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:54 -0400
commit69b7189f12e0064237630e8c6bb64cad710bb268 (patch)
tree5d486d214c347cba828477f8597db05617515c12 /mm
parent8b44d2791f912566a7ef58c71a7f9cbd16c3eeae (diff)
mm, compaction: skip rechecks when lock was already held
Compaction scanners try to lock zone locks as late as possible by checking many page or pageblock properties opportunistically without lock and skipping them if not unsuitable. For pages that pass the initial checks, some properties have to be checked again safely under lock. However, if the lock was already held from a previous iteration in the initial checks, the rechecks are unnecessary. This patch therefore skips the rechecks when the lock was already held. This is now possible to do, since we don't (potentially) drop and reacquire the lock between the initial checks and the safe rechecks anymore. Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Acked-by: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c53
1 files changed, 31 insertions, 22 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 74770e40cfe5..5039c964f5c8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -367,22 +367,30 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
367 goto isolate_fail; 367 goto isolate_fail;
368 368
369 /* 369 /*
370 * The zone lock must be held to isolate freepages. 370 * If we already hold the lock, we can skip some rechecking.
371 * Unfortunately this is a very coarse lock and can be 371 * Note that if we hold the lock now, checked_pageblock was
372 * heavily contended if there are parallel allocations 372 * already set in some previous iteration (or strict is true),
373 * or parallel compactions. For async compaction do not 373 * so it is correct to skip the suitable migration target
374 * spin on the lock and we acquire the lock as late as 374 * recheck as well.
375 * possible.
376 */ 375 */
377 if (!locked) 376 if (!locked) {
377 /*
378 * The zone lock must be held to isolate freepages.
379 * Unfortunately this is a very coarse lock and can be
380 * heavily contended if there are parallel allocations
381 * or parallel compactions. For async compaction do not
382 * spin on the lock and we acquire the lock as late as
383 * possible.
384 */
378 locked = compact_trylock_irqsave(&cc->zone->lock, 385 locked = compact_trylock_irqsave(&cc->zone->lock,
379 &flags, cc); 386 &flags, cc);
380 if (!locked) 387 if (!locked)
381 break; 388 break;
382 389
383 /* Recheck this is a buddy page under lock */ 390 /* Recheck this is a buddy page under lock */
384 if (!PageBuddy(page)) 391 if (!PageBuddy(page))
385 goto isolate_fail; 392 goto isolate_fail;
393 }
386 394
387 /* Found a free page, break it into order-0 pages */ 395 /* Found a free page, break it into order-0 pages */
388 isolated = split_free_page(page); 396 isolated = split_free_page(page);
@@ -644,19 +652,20 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
644 page_count(page) > page_mapcount(page)) 652 page_count(page) > page_mapcount(page))
645 continue; 653 continue;
646 654
647 /* If the lock is not held, try to take it */ 655 /* If we already hold the lock, we can skip some rechecking */
648 if (!locked) 656 if (!locked) {
649 locked = compact_trylock_irqsave(&zone->lru_lock, 657 locked = compact_trylock_irqsave(&zone->lru_lock,
650 &flags, cc); 658 &flags, cc);
651 if (!locked) 659 if (!locked)
652 break; 660 break;
653 661
654 /* Recheck PageLRU and PageTransHuge under lock */ 662 /* Recheck PageLRU and PageTransHuge under lock */
655 if (!PageLRU(page)) 663 if (!PageLRU(page))
656 continue; 664 continue;
657 if (PageTransHuge(page)) { 665 if (PageTransHuge(page)) {
658 low_pfn += (1 << compound_order(page)) - 1; 666 low_pfn += (1 << compound_order(page)) - 1;
659 continue; 667 continue;
668 }
660 } 669 }
661 670
662 lruvec = mem_cgroup_page_lruvec(page, zone); 671 lruvec = mem_cgroup_page_lruvec(page, zone);