aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com>2012-05-29 18:06:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:22 -0400
commit5ceb9ce6fe9462a298bb2cd5c9f1ca6cb80a0199 (patch)
tree52a6d3c27eceb8848a8e8d7e3de9e39ff8cc680c
parent238305bb4d418c95977162ba13c11880685fc731 (diff)
mm: compaction: handle incorrect MIGRATE_UNMOVABLE type pageblocks
When MIGRATE_UNMOVABLE pages are freed from MIGRATE_UNMOVABLE type pageblock (and some MIGRATE_MOVABLE pages are left in it) waiting until an allocation takes ownership of the block may take too long. The type of the pageblock remains unchanged so the pageblock cannot be used as a migration target during compaction. Fix it by: * Adding enum compact_mode (COMPACT_ASYNC_[MOVABLE,UNMOVABLE], and COMPACT_SYNC) and then converting sync field in struct compact_control to use it. * Adding nr_pageblocks_skipped field to struct compact_control and tracking how many destination pageblocks were of MIGRATE_UNMOVABLE type. If COMPACT_ASYNC_MOVABLE mode compaction ran fully in try_to_compact_pages() (COMPACT_COMPLETE) it implies that there is not a suitable page for allocation. In this case then check how if there were enough MIGRATE_UNMOVABLE pageblocks to try a second pass in COMPACT_ASYNC_UNMOVABLE mode. * Scanning the MIGRATE_UNMOVABLE pageblocks (during COMPACT_SYNC and COMPACT_ASYNC_UNMOVABLE compaction modes) and building a count based on finding PageBuddy pages, page_count(page) == 0 or PageLRU pages. If all pages within the MIGRATE_UNMOVABLE pageblock are in one of those three sets change the whole pageblock type to MIGRATE_MOVABLE. My particular test case (on a ARM EXYNOS4 device with 512 MiB, which means 131072 standard 4KiB pages in 'Normal' zone) is to: - allocate 120000 pages for kernel's usage - free every second page (60000 pages) of memory just allocated - allocate and use 60000 pages from user space - free remaining 60000 pages of kernel memory (now we have fragmented memory occupied mostly by user space pages) - try to allocate 100 order-9 (2048 KiB) pages for kernel's usage The results: - with compaction disabled I get 11 successful allocations - with compaction enabled - 14 successful allocations - with this patch I'm able to get all 100 successful allocations NOTE: If we can make kswapd aware of order-0 request during compaction, we can enhance kswapd with changing mode to COMPACT_ASYNC_FULL (COMPACT_ASYNC_MOVABLE + COMPACT_ASYNC_UNMOVABLE). Please see the following thread: http://marc.info/?l=linux-mm&m=133552069417068&w=2 [minchan@kernel.org: minor cleanups] Cc: Mel Gorman <mgorman@suse.de> Cc: Minchan Kim <minchan@kernel.org> Cc: Rik van Riel <riel@redhat.com> Cc: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/compaction.h19
-rw-r--r--mm/compaction.c142
-rw-r--r--mm/internal.h9
-rw-r--r--mm/page_alloc.c8
4 files changed, 150 insertions, 28 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 51a90b7f2d60..e988037abd2a 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_COMPACTION_H 1#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H 2#define _LINUX_COMPACTION_H
3 3
4#include <linux/node.h>
5
4/* Return values for compact_zone() and try_to_compact_pages() */ 6/* Return values for compact_zone() and try_to_compact_pages() */
5/* compaction didn't start as it was not possible or direct reclaim was more suitable */ 7/* compaction didn't start as it was not possible or direct reclaim was more suitable */
6#define COMPACT_SKIPPED 0 8#define COMPACT_SKIPPED 0
@@ -11,6 +13,23 @@
11/* The full zone was compacted */ 13/* The full zone was compacted */
12#define COMPACT_COMPLETE 3 14#define COMPACT_COMPLETE 3
13 15
16/*
17 * compaction supports three modes
18 *
19 * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
20 * MIGRATE_MOVABLE pageblocks as migration sources and targets.
21 * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
22 * MIGRATE_MOVABLE pageblocks as migration sources.
23 * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
24 * targets and convers them to MIGRATE_MOVABLE if possible
25 * COMPACT_SYNC uses synchronous migration and scans all pageblocks
26 */
27enum compact_mode {
28 COMPACT_ASYNC_MOVABLE,
29 COMPACT_ASYNC_UNMOVABLE,
30 COMPACT_SYNC,
31};
32
14#ifdef CONFIG_COMPACTION 33#ifdef CONFIG_COMPACTION
15extern int sysctl_compact_memory; 34extern int sysctl_compact_memory;
16extern int sysctl_compaction_handler(struct ctl_table *table, int write, 35extern int sysctl_compaction_handler(struct ctl_table *table, int write,
diff --git a/mm/compaction.c b/mm/compaction.c
index da7d35ea5103..840ee288e296 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -235,7 +235,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
235 */ 235 */
236 while (unlikely(too_many_isolated(zone))) { 236 while (unlikely(too_many_isolated(zone))) {
237 /* async migration should just abort */ 237 /* async migration should just abort */
238 if (!cc->sync) 238 if (cc->mode != COMPACT_SYNC)
239 return 0; 239 return 0;
240 240
241 congestion_wait(BLK_RW_ASYNC, HZ/10); 241 congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -303,7 +303,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
303 * satisfies the allocation 303 * satisfies the allocation
304 */ 304 */
305 pageblock_nr = low_pfn >> pageblock_order; 305 pageblock_nr = low_pfn >> pageblock_order;
306 if (!cc->sync && last_pageblock_nr != pageblock_nr && 306 if (cc->mode != COMPACT_SYNC &&
307 last_pageblock_nr != pageblock_nr &&
307 !migrate_async_suitable(get_pageblock_migratetype(page))) { 308 !migrate_async_suitable(get_pageblock_migratetype(page))) {
308 low_pfn += pageblock_nr_pages; 309 low_pfn += pageblock_nr_pages;
309 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 310 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -324,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
324 continue; 325 continue;
325 } 326 }
326 327
327 if (!cc->sync) 328 if (cc->mode != COMPACT_SYNC)
328 mode |= ISOLATE_ASYNC_MIGRATE; 329 mode |= ISOLATE_ASYNC_MIGRATE;
329 330
330 /* Try isolate the page */ 331 /* Try isolate the page */
@@ -357,27 +358,90 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
357 358
358#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 359#endif /* CONFIG_COMPACTION || CONFIG_CMA */
359#ifdef CONFIG_COMPACTION 360#ifdef CONFIG_COMPACTION
361/*
362 * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
363 * converted to MIGRATE_MOVABLE type, false otherwise.
364 */
365static bool rescue_unmovable_pageblock(struct page *page)
366{
367 unsigned long pfn, start_pfn, end_pfn;
368 struct page *start_page, *end_page;
369
370 pfn = page_to_pfn(page);
371 start_pfn = pfn & ~(pageblock_nr_pages - 1);
372 end_pfn = start_pfn + pageblock_nr_pages;
373
374 start_page = pfn_to_page(start_pfn);
375 end_page = pfn_to_page(end_pfn);
376
377 /* Do not deal with pageblocks that overlap zones */
378 if (page_zone(start_page) != page_zone(end_page))
379 return false;
380
381 for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
382 page++) {
383 if (!pfn_valid_within(pfn))
384 continue;
385
386 if (PageBuddy(page)) {
387 int order = page_order(page);
388
389 pfn += (1 << order) - 1;
390 page += (1 << order) - 1;
391
392 continue;
393 } else if (page_count(page) == 0 || PageLRU(page))
394 continue;
395
396 return false;
397 }
398
399 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
400 move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
401 return true;
402}
360 403
361/* Returns true if the page is within a block suitable for migration to */ 404enum smt_result {
362static bool suitable_migration_target(struct page *page) 405 GOOD_AS_MIGRATION_TARGET,
406 FAIL_UNMOVABLE_TARGET,
407 FAIL_BAD_TARGET,
408};
409
410/*
411 * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
412 * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
413 * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
414 */
415static enum smt_result suitable_migration_target(struct page *page,
416 struct compact_control *cc)
363{ 417{
364 418
365 int migratetype = get_pageblock_migratetype(page); 419 int migratetype = get_pageblock_migratetype(page);
366 420
367 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 421 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
368 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 422 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
369 return false; 423 return FAIL_BAD_TARGET;
370 424
371 /* If the page is a large free page, then allow migration */ 425 /* If the page is a large free page, then allow migration */
372 if (PageBuddy(page) && page_order(page) >= pageblock_order) 426 if (PageBuddy(page) && page_order(page) >= pageblock_order)
373 return true; 427 return GOOD_AS_MIGRATION_TARGET;
374 428
375 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 429 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
376 if (migrate_async_suitable(migratetype)) 430 if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
377 return true; 431 migrate_async_suitable(migratetype))
432 return GOOD_AS_MIGRATION_TARGET;
433
434 if (cc->mode == COMPACT_ASYNC_MOVABLE &&
435 migratetype == MIGRATE_UNMOVABLE)
436 return FAIL_UNMOVABLE_TARGET;
437
438 if (cc->mode != COMPACT_ASYNC_MOVABLE &&
439 migratetype == MIGRATE_UNMOVABLE &&
440 rescue_unmovable_pageblock(page))
441 return GOOD_AS_MIGRATION_TARGET;
378 442
379 /* Otherwise skip the block */ 443 /* Otherwise skip the block */
380 return false; 444 return FAIL_BAD_TARGET;
381} 445}
382 446
383/* 447/*
@@ -411,6 +475,13 @@ static void isolate_freepages(struct zone *zone,
411 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 475 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
412 476
413 /* 477 /*
478 * isolate_freepages() may be called more than once during
479 * compact_zone_order() run and we want only the most recent
480 * count.
481 */
482 cc->nr_pageblocks_skipped = 0;
483
484 /*
414 * Isolate free pages until enough are available to migrate the 485 * Isolate free pages until enough are available to migrate the
415 * pages on cc->migratepages. We stop searching if the migrate 486 * pages on cc->migratepages. We stop searching if the migrate
416 * and free page scanners meet or enough free pages are isolated. 487 * and free page scanners meet or enough free pages are isolated.
@@ -418,6 +489,7 @@ static void isolate_freepages(struct zone *zone,
418 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 489 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
419 pfn -= pageblock_nr_pages) { 490 pfn -= pageblock_nr_pages) {
420 unsigned long isolated; 491 unsigned long isolated;
492 enum smt_result ret;
421 493
422 if (!pfn_valid(pfn)) 494 if (!pfn_valid(pfn))
423 continue; 495 continue;
@@ -434,9 +506,12 @@ static void isolate_freepages(struct zone *zone,
434 continue; 506 continue;
435 507
436 /* Check the block is suitable for migration */ 508 /* Check the block is suitable for migration */
437 if (!suitable_migration_target(page)) 509 ret = suitable_migration_target(page, cc);
510 if (ret != GOOD_AS_MIGRATION_TARGET) {
511 if (ret == FAIL_UNMOVABLE_TARGET)
512 cc->nr_pageblocks_skipped++;
438 continue; 513 continue;
439 514 }
440 /* 515 /*
441 * Found a block suitable for isolating free pages from. Now 516 * Found a block suitable for isolating free pages from. Now
442 * we disabled interrupts, double check things are ok and 517 * we disabled interrupts, double check things are ok and
@@ -445,12 +520,14 @@ static void isolate_freepages(struct zone *zone,
445 */ 520 */
446 isolated = 0; 521 isolated = 0;
447 spin_lock_irqsave(&zone->lock, flags); 522 spin_lock_irqsave(&zone->lock, flags);
448 if (suitable_migration_target(page)) { 523 ret = suitable_migration_target(page, cc);
524 if (ret == GOOD_AS_MIGRATION_TARGET) {
449 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 525 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
450 isolated = isolate_freepages_block(pfn, end_pfn, 526 isolated = isolate_freepages_block(pfn, end_pfn,
451 freelist, false); 527 freelist, false);
452 nr_freepages += isolated; 528 nr_freepages += isolated;
453 } 529 } else if (ret == FAIL_UNMOVABLE_TARGET)
530 cc->nr_pageblocks_skipped++;
454 spin_unlock_irqrestore(&zone->lock, flags); 531 spin_unlock_irqrestore(&zone->lock, flags);
455 532
456 /* 533 /*
@@ -682,8 +759,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
682 759
683 nr_migrate = cc->nr_migratepages; 760 nr_migrate = cc->nr_migratepages;
684 err = migrate_pages(&cc->migratepages, compaction_alloc, 761 err = migrate_pages(&cc->migratepages, compaction_alloc,
685 (unsigned long)cc, false, 762 (unsigned long)&cc->freepages, false,
686 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 763 (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
764 : MIGRATE_ASYNC);
687 update_nr_listpages(cc); 765 update_nr_listpages(cc);
688 nr_remaining = cc->nr_migratepages; 766 nr_remaining = cc->nr_migratepages;
689 767
@@ -712,7 +790,8 @@ out:
712 790
713static unsigned long compact_zone_order(struct zone *zone, 791static unsigned long compact_zone_order(struct zone *zone,
714 int order, gfp_t gfp_mask, 792 int order, gfp_t gfp_mask,
715 bool sync) 793 enum compact_mode mode,
794 unsigned long *nr_pageblocks_skipped)
716{ 795{
717 struct compact_control cc = { 796 struct compact_control cc = {
718 .nr_freepages = 0, 797 .nr_freepages = 0,
@@ -720,12 +799,17 @@ static unsigned long compact_zone_order(struct zone *zone,
720 .order = order, 799 .order = order,
721 .migratetype = allocflags_to_migratetype(gfp_mask), 800 .migratetype = allocflags_to_migratetype(gfp_mask),
722 .zone = zone, 801 .zone = zone,
723 .sync = sync, 802 .mode = mode,
724 }; 803 };
804 unsigned long rc;
805
725 INIT_LIST_HEAD(&cc.freepages); 806 INIT_LIST_HEAD(&cc.freepages);
726 INIT_LIST_HEAD(&cc.migratepages); 807 INIT_LIST_HEAD(&cc.migratepages);
727 808
728 return compact_zone(zone, &cc); 809 rc = compact_zone(zone, &cc);
810 *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
811
812 return rc;
729} 813}
730 814
731int sysctl_extfrag_threshold = 500; 815int sysctl_extfrag_threshold = 500;
@@ -750,6 +834,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
750 struct zoneref *z; 834 struct zoneref *z;
751 struct zone *zone; 835 struct zone *zone;
752 int rc = COMPACT_SKIPPED; 836 int rc = COMPACT_SKIPPED;
837 unsigned long nr_pageblocks_skipped;
838 enum compact_mode mode;
753 839
754 /* 840 /*
755 * Check whether it is worth even starting compaction. The order check is 841 * Check whether it is worth even starting compaction. The order check is
@@ -766,12 +852,22 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
766 nodemask) { 852 nodemask) {
767 int status; 853 int status;
768 854
769 status = compact_zone_order(zone, order, gfp_mask, sync); 855 mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
856retry:
857 status = compact_zone_order(zone, order, gfp_mask, mode,
858 &nr_pageblocks_skipped);
770 rc = max(status, rc); 859 rc = max(status, rc);
771 860
772 /* If a normal allocation would succeed, stop compacting */ 861 /* If a normal allocation would succeed, stop compacting */
773 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 862 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
774 break; 863 break;
864
865 if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
866 if (nr_pageblocks_skipped) {
867 mode = COMPACT_ASYNC_UNMOVABLE;
868 goto retry;
869 }
870 }
775 } 871 }
776 872
777 return rc; 873 return rc;
@@ -805,7 +901,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
805 if (ok && cc->order > zone->compact_order_failed) 901 if (ok && cc->order > zone->compact_order_failed)
806 zone->compact_order_failed = cc->order + 1; 902 zone->compact_order_failed = cc->order + 1;
807 /* Currently async compaction is never deferred. */ 903 /* Currently async compaction is never deferred. */
808 else if (!ok && cc->sync) 904 else if (!ok && cc->mode == COMPACT_SYNC)
809 defer_compaction(zone, cc->order); 905 defer_compaction(zone, cc->order);
810 } 906 }
811 907
@@ -820,7 +916,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
820{ 916{
821 struct compact_control cc = { 917 struct compact_control cc = {
822 .order = order, 918 .order = order,
823 .sync = false, 919 .mode = COMPACT_ASYNC_MOVABLE,
824 }; 920 };
825 921
826 return __compact_pgdat(pgdat, &cc); 922 return __compact_pgdat(pgdat, &cc);
@@ -830,7 +926,7 @@ static int compact_node(int nid)
830{ 926{
831 struct compact_control cc = { 927 struct compact_control cc = {
832 .order = -1, 928 .order = -1,
833 .sync = true, 929 .mode = COMPACT_SYNC,
834 }; 930 };
835 931
836 return __compact_pgdat(NODE_DATA(nid), &cc); 932 return __compact_pgdat(NODE_DATA(nid), &cc);
diff --git a/mm/internal.h b/mm/internal.h
index 8b0fc8da8028..4194ab9dc19b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -94,6 +94,9 @@ extern void putback_lru_page(struct page *page);
94/* 94/*
95 * in mm/page_alloc.c 95 * in mm/page_alloc.c
96 */ 96 */
97extern void set_pageblock_migratetype(struct page *page, int migratetype);
98extern int move_freepages_block(struct zone *zone, struct page *page,
99 int migratetype);
97extern void __free_pages_bootmem(struct page *page, unsigned int order); 100extern void __free_pages_bootmem(struct page *page, unsigned int order);
98extern void prep_compound_page(struct page *page, unsigned long order); 101extern void prep_compound_page(struct page *page, unsigned long order);
99#ifdef CONFIG_MEMORY_FAILURE 102#ifdef CONFIG_MEMORY_FAILURE
@@ -101,6 +104,7 @@ extern bool is_free_buddy_page(struct page *page);
101#endif 104#endif
102 105
103#if defined CONFIG_COMPACTION || defined CONFIG_CMA 106#if defined CONFIG_COMPACTION || defined CONFIG_CMA
107#include <linux/compaction.h>
104 108
105/* 109/*
106 * in mm/compaction.c 110 * in mm/compaction.c
@@ -119,11 +123,14 @@ struct compact_control {
119 unsigned long nr_migratepages; /* Number of pages to migrate */ 123 unsigned long nr_migratepages; /* Number of pages to migrate */
120 unsigned long free_pfn; /* isolate_freepages search base */ 124 unsigned long free_pfn; /* isolate_freepages search base */
121 unsigned long migrate_pfn; /* isolate_migratepages search base */ 125 unsigned long migrate_pfn; /* isolate_migratepages search base */
122 bool sync; /* Synchronous migration */ 126 enum compact_mode mode; /* Compaction mode */
123 127
124 int order; /* order a direct compactor needs */ 128 int order; /* order a direct compactor needs */
125 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 129 int migratetype; /* MOVABLE, RECLAIMABLE etc */
126 struct zone *zone; 130 struct zone *zone;
131
132 /* Number of UNMOVABLE destination pageblocks skipped during scan */
133 unsigned long nr_pageblocks_skipped;
127}; 134};
128 135
129unsigned long 136unsigned long
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 84f2c599d5d4..457b4de122f4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
219 219
220int page_group_by_mobility_disabled __read_mostly; 220int page_group_by_mobility_disabled __read_mostly;
221 221
222static void set_pageblock_migratetype(struct page *page, int migratetype) 222void set_pageblock_migratetype(struct page *page, int migratetype)
223{ 223{
224 224
225 if (unlikely(page_group_by_mobility_disabled)) 225 if (unlikely(page_group_by_mobility_disabled))
@@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
954 return pages_moved; 954 return pages_moved;
955} 955}
956 956
957static int move_freepages_block(struct zone *zone, struct page *page, 957int move_freepages_block(struct zone *zone, struct page *page,
958 int migratetype) 958 int migratetype)
959{ 959{
960 unsigned long start_pfn, end_pfn; 960 unsigned long start_pfn, end_pfn;
961 struct page *start_page, *end_page; 961 struct page *start_page, *end_page;
@@ -5657,7 +5657,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5657 .nr_migratepages = 0, 5657 .nr_migratepages = 0,
5658 .order = -1, 5658 .order = -1,
5659 .zone = page_zone(pfn_to_page(start)), 5659 .zone = page_zone(pfn_to_page(start)),
5660 .sync = true, 5660 .mode = COMPACT_SYNC,
5661 }; 5661 };
5662 INIT_LIST_HEAD(&cc.migratepages); 5662 INIT_LIST_HEAD(&cc.migratepages);
5663 5663