aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c142
1 files changed, 119 insertions, 23 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index da7d35ea510..840ee288e29 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -235,7 +235,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
235 */ 235 */
236 while (unlikely(too_many_isolated(zone))) { 236 while (unlikely(too_many_isolated(zone))) {
237 /* async migration should just abort */ 237 /* async migration should just abort */
238 if (!cc->sync) 238 if (cc->mode != COMPACT_SYNC)
239 return 0; 239 return 0;
240 240
241 congestion_wait(BLK_RW_ASYNC, HZ/10); 241 congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -303,7 +303,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
303 * satisfies the allocation 303 * satisfies the allocation
304 */ 304 */
305 pageblock_nr = low_pfn >> pageblock_order; 305 pageblock_nr = low_pfn >> pageblock_order;
306 if (!cc->sync && last_pageblock_nr != pageblock_nr && 306 if (cc->mode != COMPACT_SYNC &&
307 last_pageblock_nr != pageblock_nr &&
307 !migrate_async_suitable(get_pageblock_migratetype(page))) { 308 !migrate_async_suitable(get_pageblock_migratetype(page))) {
308 low_pfn += pageblock_nr_pages; 309 low_pfn += pageblock_nr_pages;
309 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 310 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -324,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
324 continue; 325 continue;
325 } 326 }
326 327
327 if (!cc->sync) 328 if (cc->mode != COMPACT_SYNC)
328 mode |= ISOLATE_ASYNC_MIGRATE; 329 mode |= ISOLATE_ASYNC_MIGRATE;
329 330
330 /* Try isolate the page */ 331 /* Try isolate the page */
@@ -357,27 +358,90 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
357 358
358#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 359#endif /* CONFIG_COMPACTION || CONFIG_CMA */
359#ifdef CONFIG_COMPACTION 360#ifdef CONFIG_COMPACTION
361/*
362 * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
363 * converted to MIGRATE_MOVABLE type, false otherwise.
364 */
365static bool rescue_unmovable_pageblock(struct page *page)
366{
367 unsigned long pfn, start_pfn, end_pfn;
368 struct page *start_page, *end_page;
369
370 pfn = page_to_pfn(page);
371 start_pfn = pfn & ~(pageblock_nr_pages - 1);
372 end_pfn = start_pfn + pageblock_nr_pages;
373
374 start_page = pfn_to_page(start_pfn);
375 end_page = pfn_to_page(end_pfn);
376
377 /* Do not deal with pageblocks that overlap zones */
378 if (page_zone(start_page) != page_zone(end_page))
379 return false;
380
381 for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
382 page++) {
383 if (!pfn_valid_within(pfn))
384 continue;
385
386 if (PageBuddy(page)) {
387 int order = page_order(page);
388
389 pfn += (1 << order) - 1;
390 page += (1 << order) - 1;
391
392 continue;
393 } else if (page_count(page) == 0 || PageLRU(page))
394 continue;
395
396 return false;
397 }
398
399 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
400 move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
401 return true;
402}
360 403
361/* Returns true if the page is within a block suitable for migration to */ 404enum smt_result {
362static bool suitable_migration_target(struct page *page) 405 GOOD_AS_MIGRATION_TARGET,
406 FAIL_UNMOVABLE_TARGET,
407 FAIL_BAD_TARGET,
408};
409
410/*
411 * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
412 * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
413 * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
414 */
415static enum smt_result suitable_migration_target(struct page *page,
416 struct compact_control *cc)
363{ 417{
364 418
365 int migratetype = get_pageblock_migratetype(page); 419 int migratetype = get_pageblock_migratetype(page);
366 420
367 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 421 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
368 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 422 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
369 return false; 423 return FAIL_BAD_TARGET;
370 424
371 /* If the page is a large free page, then allow migration */ 425 /* If the page is a large free page, then allow migration */
372 if (PageBuddy(page) && page_order(page) >= pageblock_order) 426 if (PageBuddy(page) && page_order(page) >= pageblock_order)
373 return true; 427 return GOOD_AS_MIGRATION_TARGET;
374 428
375 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 429 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
376 if (migrate_async_suitable(migratetype)) 430 if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
377 return true; 431 migrate_async_suitable(migratetype))
432 return GOOD_AS_MIGRATION_TARGET;
433
434 if (cc->mode == COMPACT_ASYNC_MOVABLE &&
435 migratetype == MIGRATE_UNMOVABLE)
436 return FAIL_UNMOVABLE_TARGET;
437
438 if (cc->mode != COMPACT_ASYNC_MOVABLE &&
439 migratetype == MIGRATE_UNMOVABLE &&
440 rescue_unmovable_pageblock(page))
441 return GOOD_AS_MIGRATION_TARGET;
378 442
379 /* Otherwise skip the block */ 443 /* Otherwise skip the block */
380 return false; 444 return FAIL_BAD_TARGET;
381} 445}
382 446
383/* 447/*
@@ -411,6 +475,13 @@ static void isolate_freepages(struct zone *zone,
411 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 475 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
412 476
413 /* 477 /*
478 * isolate_freepages() may be called more than once during
479 * compact_zone_order() run and we want only the most recent
480 * count.
481 */
482 cc->nr_pageblocks_skipped = 0;
483
484 /*
414 * Isolate free pages until enough are available to migrate the 485 * Isolate free pages until enough are available to migrate the
415 * pages on cc->migratepages. We stop searching if the migrate 486 * pages on cc->migratepages. We stop searching if the migrate
416 * and free page scanners meet or enough free pages are isolated. 487 * and free page scanners meet or enough free pages are isolated.
@@ -418,6 +489,7 @@ static void isolate_freepages(struct zone *zone,
418 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 489 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
419 pfn -= pageblock_nr_pages) { 490 pfn -= pageblock_nr_pages) {
420 unsigned long isolated; 491 unsigned long isolated;
492 enum smt_result ret;
421 493
422 if (!pfn_valid(pfn)) 494 if (!pfn_valid(pfn))
423 continue; 495 continue;
@@ -434,9 +506,12 @@ static void isolate_freepages(struct zone *zone,
434 continue; 506 continue;
435 507
436 /* Check the block is suitable for migration */ 508 /* Check the block is suitable for migration */
437 if (!suitable_migration_target(page)) 509 ret = suitable_migration_target(page, cc);
510 if (ret != GOOD_AS_MIGRATION_TARGET) {
511 if (ret == FAIL_UNMOVABLE_TARGET)
512 cc->nr_pageblocks_skipped++;
438 continue; 513 continue;
439 514 }
440 /* 515 /*
441 * Found a block suitable for isolating free pages from. Now 516 * Found a block suitable for isolating free pages from. Now
442 * we disabled interrupts, double check things are ok and 517 * we disabled interrupts, double check things are ok and
@@ -445,12 +520,14 @@ static void isolate_freepages(struct zone *zone,
445 */ 520 */
446 isolated = 0; 521 isolated = 0;
447 spin_lock_irqsave(&zone->lock, flags); 522 spin_lock_irqsave(&zone->lock, flags);
448 if (suitable_migration_target(page)) { 523 ret = suitable_migration_target(page, cc);
524 if (ret == GOOD_AS_MIGRATION_TARGET) {
449 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 525 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
450 isolated = isolate_freepages_block(pfn, end_pfn, 526 isolated = isolate_freepages_block(pfn, end_pfn,
451 freelist, false); 527 freelist, false);
452 nr_freepages += isolated; 528 nr_freepages += isolated;
453 } 529 } else if (ret == FAIL_UNMOVABLE_TARGET)
530 cc->nr_pageblocks_skipped++;
454 spin_unlock_irqrestore(&zone->lock, flags); 531 spin_unlock_irqrestore(&zone->lock, flags);
455 532
456 /* 533 /*
@@ -682,8 +759,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
682 759
683 nr_migrate = cc->nr_migratepages; 760 nr_migrate = cc->nr_migratepages;
684 err = migrate_pages(&cc->migratepages, compaction_alloc, 761 err = migrate_pages(&cc->migratepages, compaction_alloc,
685 (unsigned long)cc, false, 762 (unsigned long)&cc->freepages, false,
686 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 763 (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
764 : MIGRATE_ASYNC);
687 update_nr_listpages(cc); 765 update_nr_listpages(cc);
688 nr_remaining = cc->nr_migratepages; 766 nr_remaining = cc->nr_migratepages;
689 767
@@ -712,7 +790,8 @@ out:
712 790
713static unsigned long compact_zone_order(struct zone *zone, 791static unsigned long compact_zone_order(struct zone *zone,
714 int order, gfp_t gfp_mask, 792 int order, gfp_t gfp_mask,
715 bool sync) 793 enum compact_mode mode,
794 unsigned long *nr_pageblocks_skipped)
716{ 795{
717 struct compact_control cc = { 796 struct compact_control cc = {
718 .nr_freepages = 0, 797 .nr_freepages = 0,
@@ -720,12 +799,17 @@ static unsigned long compact_zone_order(struct zone *zone,
720 .order = order, 799 .order = order,
721 .migratetype = allocflags_to_migratetype(gfp_mask), 800 .migratetype = allocflags_to_migratetype(gfp_mask),
722 .zone = zone, 801 .zone = zone,
723 .sync = sync, 802 .mode = mode,
724 }; 803 };
804 unsigned long rc;
805
725 INIT_LIST_HEAD(&cc.freepages); 806 INIT_LIST_HEAD(&cc.freepages);
726 INIT_LIST_HEAD(&cc.migratepages); 807 INIT_LIST_HEAD(&cc.migratepages);
727 808
728 return compact_zone(zone, &cc); 809 rc = compact_zone(zone, &cc);
810 *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
811
812 return rc;
729} 813}
730 814
731int sysctl_extfrag_threshold = 500; 815int sysctl_extfrag_threshold = 500;
@@ -750,6 +834,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
750 struct zoneref *z; 834 struct zoneref *z;
751 struct zone *zone; 835 struct zone *zone;
752 int rc = COMPACT_SKIPPED; 836 int rc = COMPACT_SKIPPED;
837 unsigned long nr_pageblocks_skipped;
838 enum compact_mode mode;
753 839
754 /* 840 /*
755 * Check whether it is worth even starting compaction. The order check is 841 * Check whether it is worth even starting compaction. The order check is
@@ -766,12 +852,22 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
766 nodemask) { 852 nodemask) {
767 int status; 853 int status;
768 854
769 status = compact_zone_order(zone, order, gfp_mask, sync); 855 mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
856retry:
857 status = compact_zone_order(zone, order, gfp_mask, mode,
858 &nr_pageblocks_skipped);
770 rc = max(status, rc); 859 rc = max(status, rc);
771 860
772 /* If a normal allocation would succeed, stop compacting */ 861 /* If a normal allocation would succeed, stop compacting */
773 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 862 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
774 break; 863 break;
864
865 if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
866 if (nr_pageblocks_skipped) {
867 mode = COMPACT_ASYNC_UNMOVABLE;
868 goto retry;
869 }
870 }
775 } 871 }
776 872
777 return rc; 873 return rc;
@@ -805,7 +901,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
805 if (ok && cc->order > zone->compact_order_failed) 901 if (ok && cc->order > zone->compact_order_failed)
806 zone->compact_order_failed = cc->order + 1; 902 zone->compact_order_failed = cc->order + 1;
807 /* Currently async compaction is never deferred. */ 903 /* Currently async compaction is never deferred. */
808 else if (!ok && cc->sync) 904 else if (!ok && cc->mode == COMPACT_SYNC)
809 defer_compaction(zone, cc->order); 905 defer_compaction(zone, cc->order);
810 } 906 }
811 907
@@ -820,7 +916,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
820{ 916{
821 struct compact_control cc = { 917 struct compact_control cc = {
822 .order = order, 918 .order = order,
823 .sync = false, 919 .mode = COMPACT_ASYNC_MOVABLE,
824 }; 920 };
825 921
826 return __compact_pgdat(pgdat, &cc); 922 return __compact_pgdat(pgdat, &cc);
@@ -830,7 +926,7 @@ static int compact_node(int nid)
830{ 926{
831 struct compact_control cc = { 927 struct compact_control cc = {
832 .order = -1, 928 .order = -1,
833 .sync = true, 929 .mode = COMPACT_SYNC,
834 }; 930 };
835 931
836 return __compact_pgdat(NODE_DATA(nid), &cc); 932 return __compact_pgdat(NODE_DATA(nid), &cc);