aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2011-01-13 18:45:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:34 -0500
commit77f1fe6b08b13a87391549c8a820ddc817b6f50e (patch)
tree720865bd0994da3787b6f37d33b2ee4c26a2de6c
parent3e7d344970673c5334cf7b5bb27c8c0942b06126 (diff)
mm: migration: allow migration to operate asynchronously and avoid synchronous compaction in the faster path
Migration synchronously waits for writeback if the initial passes fails. Callers of memory compaction do not necessarily want this behaviour if the caller is latency sensitive or expects that synchronous migration is not going to have a significantly better success rate. This patch adds a sync parameter to migrate_pages() allowing the caller to indicate if wait_on_page_writeback() is allowed within migration or not. For reclaim/compaction, try_to_compact_pages() is first called asynchronously, direct reclaim runs and then try_to_compact_pages() is called synchronously as there is a greater expectation that it'll succeed. [akpm@linux-foundation.org: build/merge fix] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/compaction.h10
-rw-r--r--include/linux/migrate.h12
-rw-r--r--mm/compaction.c14
-rw-r--r--mm/memory-failure.c8
-rw-r--r--mm/memory_hotplug.c3
-rw-r--r--mm/mempolicy.c4
-rw-r--r--mm/migrate.c22
-rw-r--r--mm/page_alloc.c21
-rw-r--r--mm/vmscan.c3
9 files changed, 63 insertions, 34 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 2592883d862..72cba403478 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -21,10 +21,11 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write,
21 21
22extern int fragmentation_index(struct zone *zone, unsigned int order); 22extern int fragmentation_index(struct zone *zone, unsigned int order);
23extern unsigned long try_to_compact_pages(struct zonelist *zonelist, 23extern unsigned long try_to_compact_pages(struct zonelist *zonelist,
24 int order, gfp_t gfp_mask, nodemask_t *mask); 24 int order, gfp_t gfp_mask, nodemask_t *mask,
25 bool sync);
25extern unsigned long compaction_suitable(struct zone *zone, int order); 26extern unsigned long compaction_suitable(struct zone *zone, int order);
26extern unsigned long compact_zone_order(struct zone *zone, int order, 27extern unsigned long compact_zone_order(struct zone *zone, int order,
27 gfp_t gfp_mask); 28 gfp_t gfp_mask, bool sync);
28 29
29/* Do not skip compaction more than 64 times */ 30/* Do not skip compaction more than 64 times */
30#define COMPACT_MAX_DEFER_SHIFT 6 31#define COMPACT_MAX_DEFER_SHIFT 6
@@ -57,7 +58,8 @@ static inline bool compaction_deferred(struct zone *zone)
57 58
58#else 59#else
59static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, 60static inline unsigned long try_to_compact_pages(struct zonelist *zonelist,
60 int order, gfp_t gfp_mask, nodemask_t *nodemask) 61 int order, gfp_t gfp_mask, nodemask_t *nodemask,
62 bool sync)
61{ 63{
62 return COMPACT_CONTINUE; 64 return COMPACT_CONTINUE;
63} 65}
@@ -68,7 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
68} 70}
69 71
70static inline unsigned long compact_zone_order(struct zone *zone, int order, 72static inline unsigned long compact_zone_order(struct zone *zone, int order,
71 gfp_t gfp_mask) 73 gfp_t gfp_mask, bool sync)
72{ 74{
73 return 0; 75 return 0;
74} 76}
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 085527fb826..fa31902803f 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,9 +13,11 @@ extern void putback_lru_pages(struct list_head *l);
13extern int migrate_page(struct address_space *, 13extern int migrate_page(struct address_space *,
14 struct page *, struct page *); 14 struct page *, struct page *);
15extern int migrate_pages(struct list_head *l, new_page_t x, 15extern int migrate_pages(struct list_head *l, new_page_t x,
16 unsigned long private, int offlining); 16 unsigned long private, int offlining,
17 bool sync);
17extern int migrate_huge_pages(struct list_head *l, new_page_t x, 18extern int migrate_huge_pages(struct list_head *l, new_page_t x,
18 unsigned long private, int offlining); 19 unsigned long private, int offlining,
20 bool sync);
19 21
20extern int fail_migrate_page(struct address_space *, 22extern int fail_migrate_page(struct address_space *,
21 struct page *, struct page *); 23 struct page *, struct page *);
@@ -33,9 +35,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
33 35
34static inline void putback_lru_pages(struct list_head *l) {} 36static inline void putback_lru_pages(struct list_head *l) {}
35static inline int migrate_pages(struct list_head *l, new_page_t x, 37static inline int migrate_pages(struct list_head *l, new_page_t x,
36 unsigned long private, int offlining) { return -ENOSYS; } 38 unsigned long private, int offlining,
39 bool sync) { return -ENOSYS; }
37static inline int migrate_huge_pages(struct list_head *l, new_page_t x, 40static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
38 unsigned long private, int offlining) { return -ENOSYS; } 41 unsigned long private, int offlining,
42 bool sync) { return -ENOSYS; }
39 43
40static inline int migrate_prep(void) { return -ENOSYS; } 44static inline int migrate_prep(void) { return -ENOSYS; }
41static inline int migrate_prep_local(void) { return -ENOSYS; } 45static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/mm/compaction.c b/mm/compaction.c
index 8fe917ec7c1..47fca106934 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -33,6 +33,7 @@ struct compact_control {
33 unsigned long nr_migratepages; /* Number of pages to migrate */ 33 unsigned long nr_migratepages; /* Number of pages to migrate */
34 unsigned long free_pfn; /* isolate_freepages search base */ 34 unsigned long free_pfn; /* isolate_freepages search base */
35 unsigned long migrate_pfn; /* isolate_migratepages search base */ 35 unsigned long migrate_pfn; /* isolate_migratepages search base */
36 bool sync; /* Synchronous migration */
36 37
37 /* Account for isolated anon and file pages */ 38 /* Account for isolated anon and file pages */
38 unsigned long nr_anon; 39 unsigned long nr_anon;
@@ -455,7 +456,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
455 456
456 nr_migrate = cc->nr_migratepages; 457 nr_migrate = cc->nr_migratepages;
457 migrate_pages(&cc->migratepages, compaction_alloc, 458 migrate_pages(&cc->migratepages, compaction_alloc,
458 (unsigned long)cc, 0); 459 (unsigned long)cc, 0,
460 cc->sync);
459 update_nr_listpages(cc); 461 update_nr_listpages(cc);
460 nr_remaining = cc->nr_migratepages; 462 nr_remaining = cc->nr_migratepages;
461 463
@@ -482,7 +484,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
482} 484}
483 485
484unsigned long compact_zone_order(struct zone *zone, 486unsigned long compact_zone_order(struct zone *zone,
485 int order, gfp_t gfp_mask) 487 int order, gfp_t gfp_mask,
488 bool sync)
486{ 489{
487 struct compact_control cc = { 490 struct compact_control cc = {
488 .nr_freepages = 0, 491 .nr_freepages = 0,
@@ -490,6 +493,7 @@ unsigned long compact_zone_order(struct zone *zone,
490 .order = order, 493 .order = order,
491 .migratetype = allocflags_to_migratetype(gfp_mask), 494 .migratetype = allocflags_to_migratetype(gfp_mask),
492 .zone = zone, 495 .zone = zone,
496 .sync = sync,
493 }; 497 };
494 INIT_LIST_HEAD(&cc.freepages); 498 INIT_LIST_HEAD(&cc.freepages);
495 INIT_LIST_HEAD(&cc.migratepages); 499 INIT_LIST_HEAD(&cc.migratepages);
@@ -505,11 +509,13 @@ int sysctl_extfrag_threshold = 500;
505 * @order: The order of the current allocation 509 * @order: The order of the current allocation
506 * @gfp_mask: The GFP mask of the current allocation 510 * @gfp_mask: The GFP mask of the current allocation
507 * @nodemask: The allowed nodes to allocate from 511 * @nodemask: The allowed nodes to allocate from
512 * @sync: Whether migration is synchronous or not
508 * 513 *
509 * This is the main entry point for direct page compaction. 514 * This is the main entry point for direct page compaction.
510 */ 515 */
511unsigned long try_to_compact_pages(struct zonelist *zonelist, 516unsigned long try_to_compact_pages(struct zonelist *zonelist,
512 int order, gfp_t gfp_mask, nodemask_t *nodemask) 517 int order, gfp_t gfp_mask, nodemask_t *nodemask,
518 bool sync)
513{ 519{
514 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 520 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
515 int may_enter_fs = gfp_mask & __GFP_FS; 521 int may_enter_fs = gfp_mask & __GFP_FS;
@@ -533,7 +539,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
533 nodemask) { 539 nodemask) {
534 int status; 540 int status;
535 541
536 status = compact_zone_order(zone, order, gfp_mask); 542 status = compact_zone_order(zone, order, gfp_mask, sync);
537 rc = max(status, rc); 543 rc = max(status, rc);
538 544
539 /* If a normal allocation would succeed, stop compacting */ 545 /* If a normal allocation would succeed, stop compacting */
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 46ab2c044b0..2323a8039a9 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1290,9 +1290,10 @@ static int soft_offline_huge_page(struct page *page, int flags)
1290 /* Keep page count to indicate a given hugepage is isolated. */ 1290 /* Keep page count to indicate a given hugepage is isolated. */
1291 1291
1292 list_add(&hpage->lru, &pagelist); 1292 list_add(&hpage->lru, &pagelist);
1293 ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); 1293 ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
1294 true);
1294 if (ret) { 1295 if (ret) {
1295 putback_lru_pages(&pagelist); 1296 putback_lru_pages(&pagelist);
1296 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n", 1297 pr_debug("soft offline: %#lx: migration failed %d, type %lx\n",
1297 pfn, ret, page->flags); 1298 pfn, ret, page->flags);
1298 if (ret > 0) 1299 if (ret > 0)
@@ -1413,7 +1414,8 @@ int soft_offline_page(struct page *page, int flags)
1413 LIST_HEAD(pagelist); 1414 LIST_HEAD(pagelist);
1414 1415
1415 list_add(&page->lru, &pagelist); 1416 list_add(&page->lru, &pagelist);
1416 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0); 1417 ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
1418 0, true);
1417 if (ret) { 1419 if (ret) {
1418 pr_info("soft offline: %#lx: migration failed %d, type %lx\n", 1420 pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
1419 pfn, ret, page->flags); 1421 pfn, ret, page->flags);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 2c6523af547..584fc5588fd 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -733,7 +733,8 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
733 goto out; 733 goto out;
734 } 734 }
735 /* this function returns # of failed pages */ 735 /* this function returns # of failed pages */
736 ret = migrate_pages(&source, hotremove_migrate_alloc, 0, 1); 736 ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
737 1, true);
737 if (ret) 738 if (ret)
738 putback_lru_pages(&source); 739 putback_lru_pages(&source);
739 } 740 }
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 11ff260fb28..9db27459308 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -935,7 +935,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
935 return PTR_ERR(vma); 935 return PTR_ERR(vma);
936 936
937 if (!list_empty(&pagelist)) { 937 if (!list_empty(&pagelist)) {
938 err = migrate_pages(&pagelist, new_node_page, dest, 0); 938 err = migrate_pages(&pagelist, new_node_page, dest, 0, true);
939 if (err) 939 if (err)
940 putback_lru_pages(&pagelist); 940 putback_lru_pages(&pagelist);
941 } 941 }
@@ -1155,7 +1155,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1155 1155
1156 if (!list_empty(&pagelist)) { 1156 if (!list_empty(&pagelist)) {
1157 nr_failed = migrate_pages(&pagelist, new_vma_page, 1157 nr_failed = migrate_pages(&pagelist, new_vma_page,
1158 (unsigned long)vma, 0); 1158 (unsigned long)vma, 0, true);
1159 if (nr_failed) 1159 if (nr_failed)
1160 putback_lru_pages(&pagelist); 1160 putback_lru_pages(&pagelist);
1161 } 1161 }
diff --git a/mm/migrate.c b/mm/migrate.c
index 94875b26592..dc47f6c4035 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -614,7 +614,7 @@ static int move_to_new_page(struct page *newpage, struct page *page,
614 * to the newly allocated page in newpage. 614 * to the newly allocated page in newpage.
615 */ 615 */
616static int unmap_and_move(new_page_t get_new_page, unsigned long private, 616static int unmap_and_move(new_page_t get_new_page, unsigned long private,
617 struct page *page, int force, int offlining) 617 struct page *page, int force, int offlining, bool sync)
618{ 618{
619 int rc = 0; 619 int rc = 0;
620 int *result = NULL; 620 int *result = NULL;
@@ -682,7 +682,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
682 BUG_ON(charge); 682 BUG_ON(charge);
683 683
684 if (PageWriteback(page)) { 684 if (PageWriteback(page)) {
685 if (!force) 685 if (!force || !sync)
686 goto uncharge; 686 goto uncharge;
687 wait_on_page_writeback(page); 687 wait_on_page_writeback(page);
688 } 688 }
@@ -827,7 +827,7 @@ move_newpage:
827 */ 827 */
828static int unmap_and_move_huge_page(new_page_t get_new_page, 828static int unmap_and_move_huge_page(new_page_t get_new_page,
829 unsigned long private, struct page *hpage, 829 unsigned long private, struct page *hpage,
830 int force, int offlining) 830 int force, int offlining, bool sync)
831{ 831{
832 int rc = 0; 832 int rc = 0;
833 int *result = NULL; 833 int *result = NULL;
@@ -841,7 +841,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
841 rc = -EAGAIN; 841 rc = -EAGAIN;
842 842
843 if (!trylock_page(hpage)) { 843 if (!trylock_page(hpage)) {
844 if (!force) 844 if (!force || !sync)
845 goto out; 845 goto out;
846 lock_page(hpage); 846 lock_page(hpage);
847 } 847 }
@@ -909,7 +909,8 @@ out:
909 * Return: Number of pages not migrated or error code. 909 * Return: Number of pages not migrated or error code.
910 */ 910 */
911int migrate_pages(struct list_head *from, 911int migrate_pages(struct list_head *from,
912 new_page_t get_new_page, unsigned long private, int offlining) 912 new_page_t get_new_page, unsigned long private, int offlining,
913 bool sync)
913{ 914{
914 int retry = 1; 915 int retry = 1;
915 int nr_failed = 0; 916 int nr_failed = 0;
@@ -929,7 +930,8 @@ int migrate_pages(struct list_head *from,
929 cond_resched(); 930 cond_resched();
930 931
931 rc = unmap_and_move(get_new_page, private, 932 rc = unmap_and_move(get_new_page, private,
932 page, pass > 2, offlining); 933 page, pass > 2, offlining,
934 sync);
933 935
934 switch(rc) { 936 switch(rc) {
935 case -ENOMEM: 937 case -ENOMEM:
@@ -958,7 +960,8 @@ out:
958} 960}
959 961
960int migrate_huge_pages(struct list_head *from, 962int migrate_huge_pages(struct list_head *from,
961 new_page_t get_new_page, unsigned long private, int offlining) 963 new_page_t get_new_page, unsigned long private, int offlining,
964 bool sync)
962{ 965{
963 int retry = 1; 966 int retry = 1;
964 int nr_failed = 0; 967 int nr_failed = 0;
@@ -974,7 +977,8 @@ int migrate_huge_pages(struct list_head *from,
974 cond_resched(); 977 cond_resched();
975 978
976 rc = unmap_and_move_huge_page(get_new_page, 979 rc = unmap_and_move_huge_page(get_new_page,
977 private, page, pass > 2, offlining); 980 private, page, pass > 2, offlining,
981 sync);
978 982
979 switch(rc) { 983 switch(rc) {
980 case -ENOMEM: 984 case -ENOMEM:
@@ -1107,7 +1111,7 @@ set_status:
1107 err = 0; 1111 err = 0;
1108 if (!list_empty(&pagelist)) { 1112 if (!list_empty(&pagelist)) {
1109 err = migrate_pages(&pagelist, new_page_node, 1113 err = migrate_pages(&pagelist, new_page_node,
1110 (unsigned long)pm, 0); 1114 (unsigned long)pm, 0, true);
1111 if (err) 1115 if (err)
1112 putback_lru_pages(&pagelist); 1116 putback_lru_pages(&pagelist);
1113 } 1117 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03a66a31bfc..0fd486467b4 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1812,7 +1812,8 @@ static struct page *
1812__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1812__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1813 struct zonelist *zonelist, enum zone_type high_zoneidx, 1813 struct zonelist *zonelist, enum zone_type high_zoneidx,
1814 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1814 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1815 int migratetype, unsigned long *did_some_progress) 1815 int migratetype, unsigned long *did_some_progress,
1816 bool sync_migration)
1816{ 1817{
1817 struct page *page; 1818 struct page *page;
1818 struct task_struct *tsk = current; 1819 struct task_struct *tsk = current;
@@ -1822,7 +1823,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1822 1823
1823 tsk->flags |= PF_MEMALLOC; 1824 tsk->flags |= PF_MEMALLOC;
1824 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 1825 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1825 nodemask); 1826 nodemask, sync_migration);
1826 tsk->flags &= ~PF_MEMALLOC; 1827 tsk->flags &= ~PF_MEMALLOC;
1827 if (*did_some_progress != COMPACT_SKIPPED) { 1828 if (*did_some_progress != COMPACT_SKIPPED) {
1828 1829
@@ -1859,7 +1860,8 @@ static inline struct page *
1859__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1860__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1860 struct zonelist *zonelist, enum zone_type high_zoneidx, 1861 struct zonelist *zonelist, enum zone_type high_zoneidx,
1861 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1862 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1862 int migratetype, unsigned long *did_some_progress) 1863 int migratetype, unsigned long *did_some_progress,
1864 bool sync_migration)
1863{ 1865{
1864 return NULL; 1866 return NULL;
1865} 1867}
@@ -2001,6 +2003,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2001 unsigned long pages_reclaimed = 0; 2003 unsigned long pages_reclaimed = 0;
2002 unsigned long did_some_progress; 2004 unsigned long did_some_progress;
2003 struct task_struct *p = current; 2005 struct task_struct *p = current;
2006 bool sync_migration = false;
2004 2007
2005 /* 2008 /*
2006 * In the slowpath, we sanity check order to avoid ever trying to 2009 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2063,14 +2066,19 @@ rebalance:
2063 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2066 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2064 goto nopage; 2067 goto nopage;
2065 2068
2066 /* Try direct compaction */ 2069 /*
2070 * Try direct compaction. The first pass is asynchronous. Subsequent
2071 * attempts after direct reclaim are synchronous
2072 */
2067 page = __alloc_pages_direct_compact(gfp_mask, order, 2073 page = __alloc_pages_direct_compact(gfp_mask, order,
2068 zonelist, high_zoneidx, 2074 zonelist, high_zoneidx,
2069 nodemask, 2075 nodemask,
2070 alloc_flags, preferred_zone, 2076 alloc_flags, preferred_zone,
2071 migratetype, &did_some_progress); 2077 migratetype, &did_some_progress,
2078 sync_migration);
2072 if (page) 2079 if (page)
2073 goto got_pg; 2080 goto got_pg;
2081 sync_migration = true;
2074 2082
2075 /* Try direct reclaim and then allocating */ 2083 /* Try direct reclaim and then allocating */
2076 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2084 page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2134,7 +2142,8 @@ rebalance:
2134 zonelist, high_zoneidx, 2142 zonelist, high_zoneidx,
2135 nodemask, 2143 nodemask,
2136 alloc_flags, preferred_zone, 2144 alloc_flags, preferred_zone,
2137 migratetype, &did_some_progress); 2145 migratetype, &did_some_progress,
2146 sync_migration);
2138 if (page) 2147 if (page)
2139 goto got_pg; 2148 goto got_pg;
2140 } 2149 }
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 10ebd74a423..8320d115c85 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2377,7 +2377,8 @@ loop_again:
2377 * would ordinarily call try_to_compact_pages() 2377 * would ordinarily call try_to_compact_pages()
2378 */ 2378 */
2379 if (sc.order > PAGE_ALLOC_COSTLY_ORDER) 2379 if (sc.order > PAGE_ALLOC_COSTLY_ORDER)
2380 compact_zone_order(zone, sc.order, sc.gfp_mask); 2380 compact_zone_order(zone, sc.order, sc.gfp_mask,
2381 false);
2381 2382
2382 if (!zone_watermark_ok_safe(zone, order, 2383 if (!zone_watermark_ok_safe(zone, order,
2383 high_wmark_pages(zone), end_zone, 0)) { 2384 high_wmark_pages(zone), end_zone, 0)) {