diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 53 |
1 files changed, 31 insertions, 22 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 74770e40cfe5..5039c964f5c8 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -367,22 +367,30 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
367 | goto isolate_fail; | 367 | goto isolate_fail; |
368 | 368 | ||
369 | /* | 369 | /* |
370 | * The zone lock must be held to isolate freepages. | 370 | * If we already hold the lock, we can skip some rechecking. |
371 | * Unfortunately this is a very coarse lock and can be | 371 | * Note that if we hold the lock now, checked_pageblock was |
372 | * heavily contended if there are parallel allocations | 372 | * already set in some previous iteration (or strict is true), |
373 | * or parallel compactions. For async compaction do not | 373 | * so it is correct to skip the suitable migration target |
374 | * spin on the lock and we acquire the lock as late as | 374 | * recheck as well. |
375 | * possible. | ||
376 | */ | 375 | */ |
377 | if (!locked) | 376 | if (!locked) { |
377 | /* | ||
378 | * The zone lock must be held to isolate freepages. | ||
379 | * Unfortunately this is a very coarse lock and can be | ||
380 | * heavily contended if there are parallel allocations | ||
381 | * or parallel compactions. For async compaction do not | ||
382 | * spin on the lock and we acquire the lock as late as | ||
383 | * possible. | ||
384 | */ | ||
378 | locked = compact_trylock_irqsave(&cc->zone->lock, | 385 | locked = compact_trylock_irqsave(&cc->zone->lock, |
379 | &flags, cc); | 386 | &flags, cc); |
380 | if (!locked) | 387 | if (!locked) |
381 | break; | 388 | break; |
382 | 389 | ||
383 | /* Recheck this is a buddy page under lock */ | 390 | /* Recheck this is a buddy page under lock */ |
384 | if (!PageBuddy(page)) | 391 | if (!PageBuddy(page)) |
385 | goto isolate_fail; | 392 | goto isolate_fail; |
393 | } | ||
386 | 394 | ||
387 | /* Found a free page, break it into order-0 pages */ | 395 | /* Found a free page, break it into order-0 pages */ |
388 | isolated = split_free_page(page); | 396 | isolated = split_free_page(page); |
@@ -644,19 +652,20 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn, | |||
644 | page_count(page) > page_mapcount(page)) | 652 | page_count(page) > page_mapcount(page)) |
645 | continue; | 653 | continue; |
646 | 654 | ||
647 | /* If the lock is not held, try to take it */ | 655 | /* If we already hold the lock, we can skip some rechecking */ |
648 | if (!locked) | 656 | if (!locked) { |
649 | locked = compact_trylock_irqsave(&zone->lru_lock, | 657 | locked = compact_trylock_irqsave(&zone->lru_lock, |
650 | &flags, cc); | 658 | &flags, cc); |
651 | if (!locked) | 659 | if (!locked) |
652 | break; | 660 | break; |
653 | 661 | ||
654 | /* Recheck PageLRU and PageTransHuge under lock */ | 662 | /* Recheck PageLRU and PageTransHuge under lock */ |
655 | if (!PageLRU(page)) | 663 | if (!PageLRU(page)) |
656 | continue; | 664 | continue; |
657 | if (PageTransHuge(page)) { | 665 | if (PageTransHuge(page)) { |
658 | low_pfn += (1 << compound_order(page)) - 1; | 666 | low_pfn += (1 << compound_order(page)) - 1; |
659 | continue; | 667 | continue; |
668 | } | ||
660 | } | 669 | } |
661 | 670 | ||
662 | lruvec = mem_cgroup_page_lruvec(page, zone); | 671 | lruvec = mem_cgroup_page_lruvec(page, zone); |