aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c259
1 files changed, 200 insertions, 59 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 4d709ee59013..6cc604bd5649 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,6 +16,9 @@
16#include <linux/sysfs.h> 16#include <linux/sysfs.h>
17#include "internal.h" 17#include "internal.h"
18 18
19#define CREATE_TRACE_POINTS
20#include <trace/events/compaction.h>
21
19/* 22/*
20 * compact_control is used to track pages being migrated and the free pages 23 * compact_control is used to track pages being migrated and the free pages
21 * they are being migrated to during memory compaction. The free_pfn starts 24 * they are being migrated to during memory compaction. The free_pfn starts
@@ -30,6 +33,7 @@ struct compact_control {
30 unsigned long nr_migratepages; /* Number of pages to migrate */ 33 unsigned long nr_migratepages; /* Number of pages to migrate */
31 unsigned long free_pfn; /* isolate_freepages search base */ 34 unsigned long free_pfn; /* isolate_freepages search base */
32 unsigned long migrate_pfn; /* isolate_migratepages search base */ 35 unsigned long migrate_pfn; /* isolate_migratepages search base */
36 bool sync; /* Synchronous migration */
33 37
34 /* Account for isolated anon and file pages */ 38 /* Account for isolated anon and file pages */
35 unsigned long nr_anon; 39 unsigned long nr_anon;
@@ -60,7 +64,7 @@ static unsigned long isolate_freepages_block(struct zone *zone,
60 struct list_head *freelist) 64 struct list_head *freelist)
61{ 65{
62 unsigned long zone_end_pfn, end_pfn; 66 unsigned long zone_end_pfn, end_pfn;
63 int total_isolated = 0; 67 int nr_scanned = 0, total_isolated = 0;
64 struct page *cursor; 68 struct page *cursor;
65 69
66 /* Get the last PFN we should scan for free pages at */ 70 /* Get the last PFN we should scan for free pages at */
@@ -81,6 +85,7 @@ static unsigned long isolate_freepages_block(struct zone *zone,
81 85
82 if (!pfn_valid_within(blockpfn)) 86 if (!pfn_valid_within(blockpfn))
83 continue; 87 continue;
88 nr_scanned++;
84 89
85 if (!PageBuddy(page)) 90 if (!PageBuddy(page))
86 continue; 91 continue;
@@ -100,6 +105,7 @@ static unsigned long isolate_freepages_block(struct zone *zone,
100 } 105 }
101 } 106 }
102 107
108 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
103 return total_isolated; 109 return total_isolated;
104} 110}
105 111
@@ -138,16 +144,26 @@ static void isolate_freepages(struct zone *zone,
138 int nr_freepages = cc->nr_freepages; 144 int nr_freepages = cc->nr_freepages;
139 struct list_head *freelist = &cc->freepages; 145 struct list_head *freelist = &cc->freepages;
140 146
147 /*
148 * Initialise the free scanner. The starting point is where we last
149 * scanned from (or the end of the zone if starting). The low point
150 * is the end of the pageblock the migration scanner is using.
151 */
141 pfn = cc->free_pfn; 152 pfn = cc->free_pfn;
142 low_pfn = cc->migrate_pfn + pageblock_nr_pages; 153 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
143 high_pfn = low_pfn; 154
155 /*
156 * Take care that if the migration scanner is at the end of the zone
157 * that the free scanner does not accidentally move to the next zone
158 * in the next isolation cycle.
159 */
160 high_pfn = min(low_pfn, pfn);
144 161
145 /* 162 /*
146 * Isolate free pages until enough are available to migrate the 163 * Isolate free pages until enough are available to migrate the
147 * pages on cc->migratepages. We stop searching if the migrate 164 * pages on cc->migratepages. We stop searching if the migrate
148 * and free page scanners meet or enough free pages are isolated. 165 * and free page scanners meet or enough free pages are isolated.
149 */ 166 */
150 spin_lock_irqsave(&zone->lock, flags);
151 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 167 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
152 pfn -= pageblock_nr_pages) { 168 pfn -= pageblock_nr_pages) {
153 unsigned long isolated; 169 unsigned long isolated;
@@ -170,9 +186,19 @@ static void isolate_freepages(struct zone *zone,
170 if (!suitable_migration_target(page)) 186 if (!suitable_migration_target(page))
171 continue; 187 continue;
172 188
173 /* Found a block suitable for isolating free pages from */ 189 /*
174 isolated = isolate_freepages_block(zone, pfn, freelist); 190 * Found a block suitable for isolating free pages from. Now
175 nr_freepages += isolated; 191 * we disabled interrupts, double check things are ok and
192 * isolate the pages. This is to minimise the time IRQs
193 * are disabled
194 */
195 isolated = 0;
196 spin_lock_irqsave(&zone->lock, flags);
197 if (suitable_migration_target(page)) {
198 isolated = isolate_freepages_block(zone, pfn, freelist);
199 nr_freepages += isolated;
200 }
201 spin_unlock_irqrestore(&zone->lock, flags);
176 202
177 /* 203 /*
178 * Record the highest PFN we isolated pages from. When next 204 * Record the highest PFN we isolated pages from. When next
@@ -182,7 +208,6 @@ static void isolate_freepages(struct zone *zone,
182 if (isolated) 208 if (isolated)
183 high_pfn = max(high_pfn, pfn); 209 high_pfn = max(high_pfn, pfn);
184 } 210 }
185 spin_unlock_irqrestore(&zone->lock, flags);
186 211
187 /* split_free_page does not map the pages */ 212 /* split_free_page does not map the pages */
188 list_for_each_entry(page, freelist, lru) { 213 list_for_each_entry(page, freelist, lru) {
@@ -226,14 +251,23 @@ static bool too_many_isolated(struct zone *zone)
226 return isolated > (inactive + active) / 2; 251 return isolated > (inactive + active) / 2;
227} 252}
228 253
254/* possible outcome of isolate_migratepages */
255typedef enum {
256 ISOLATE_ABORT, /* Abort compaction now */
257 ISOLATE_NONE, /* No pages isolated, continue scanning */
258 ISOLATE_SUCCESS, /* Pages isolated, migrate */
259} isolate_migrate_t;
260
229/* 261/*
230 * Isolate all pages that can be migrated from the block pointed to by 262 * Isolate all pages that can be migrated from the block pointed to by
231 * the migrate scanner within compact_control. 263 * the migrate scanner within compact_control.
232 */ 264 */
233static unsigned long isolate_migratepages(struct zone *zone, 265static isolate_migrate_t isolate_migratepages(struct zone *zone,
234 struct compact_control *cc) 266 struct compact_control *cc)
235{ 267{
236 unsigned long low_pfn, end_pfn; 268 unsigned long low_pfn, end_pfn;
269 unsigned long last_pageblock_nr = 0, pageblock_nr;
270 unsigned long nr_scanned = 0, nr_isolated = 0;
237 struct list_head *migratelist = &cc->migratepages; 271 struct list_head *migratelist = &cc->migratepages;
238 272
239 /* Do not scan outside zone boundaries */ 273 /* Do not scan outside zone boundaries */
@@ -245,7 +279,7 @@ static unsigned long isolate_migratepages(struct zone *zone,
245 /* Do not cross the free scanner or scan within a memory hole */ 279 /* Do not cross the free scanner or scan within a memory hole */
246 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { 280 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
247 cc->migrate_pfn = end_pfn; 281 cc->migrate_pfn = end_pfn;
248 return 0; 282 return ISOLATE_NONE;
249 } 283 }
250 284
251 /* 285 /*
@@ -254,33 +288,85 @@ static unsigned long isolate_migratepages(struct zone *zone,
254 * delay for some time until fewer pages are isolated 288 * delay for some time until fewer pages are isolated
255 */ 289 */
256 while (unlikely(too_many_isolated(zone))) { 290 while (unlikely(too_many_isolated(zone))) {
291 /* async migration should just abort */
292 if (!cc->sync)
293 return ISOLATE_ABORT;
294
257 congestion_wait(BLK_RW_ASYNC, HZ/10); 295 congestion_wait(BLK_RW_ASYNC, HZ/10);
258 296
259 if (fatal_signal_pending(current)) 297 if (fatal_signal_pending(current))
260 return 0; 298 return ISOLATE_ABORT;
261 } 299 }
262 300
263 /* Time to isolate some pages for migration */ 301 /* Time to isolate some pages for migration */
302 cond_resched();
264 spin_lock_irq(&zone->lru_lock); 303 spin_lock_irq(&zone->lru_lock);
265 for (; low_pfn < end_pfn; low_pfn++) { 304 for (; low_pfn < end_pfn; low_pfn++) {
266 struct page *page; 305 struct page *page;
306 bool locked = true;
307
308 /* give a chance to irqs before checking need_resched() */
309 if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) {
310 spin_unlock_irq(&zone->lru_lock);
311 locked = false;
312 }
313 if (need_resched() || spin_is_contended(&zone->lru_lock)) {
314 if (locked)
315 spin_unlock_irq(&zone->lru_lock);
316 cond_resched();
317 spin_lock_irq(&zone->lru_lock);
318 if (fatal_signal_pending(current))
319 break;
320 } else if (!locked)
321 spin_lock_irq(&zone->lru_lock);
322
267 if (!pfn_valid_within(low_pfn)) 323 if (!pfn_valid_within(low_pfn))
268 continue; 324 continue;
325 nr_scanned++;
269 326
270 /* Get the page and skip if free */ 327 /* Get the page and skip if free */
271 page = pfn_to_page(low_pfn); 328 page = pfn_to_page(low_pfn);
272 if (PageBuddy(page)) 329 if (PageBuddy(page))
273 continue; 330 continue;
274 331
332 /*
333 * For async migration, also only scan in MOVABLE blocks. Async
334 * migration is optimistic to see if the minimum amount of work
335 * satisfies the allocation
336 */
337 pageblock_nr = low_pfn >> pageblock_order;
338 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
339 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) {
340 low_pfn += pageblock_nr_pages;
341 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
342 last_pageblock_nr = pageblock_nr;
343 continue;
344 }
345
346 if (!PageLRU(page))
347 continue;
348
349 /*
350 * PageLRU is set, and lru_lock excludes isolation,
351 * splitting and collapsing (collapsing has already
352 * happened if PageLRU is set).
353 */
354 if (PageTransHuge(page)) {
355 low_pfn += (1 << compound_order(page)) - 1;
356 continue;
357 }
358
275 /* Try isolate the page */ 359 /* Try isolate the page */
276 if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0) 360 if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
277 continue; 361 continue;
278 362
363 VM_BUG_ON(PageTransCompound(page));
364
279 /* Successfully isolated */ 365 /* Successfully isolated */
280 del_page_from_lru_list(zone, page, page_lru(page)); 366 del_page_from_lru_list(zone, page, page_lru(page));
281 list_add(&page->lru, migratelist); 367 list_add(&page->lru, migratelist);
282 mem_cgroup_del_lru(page);
283 cc->nr_migratepages++; 368 cc->nr_migratepages++;
369 nr_isolated++;
284 370
285 /* Avoid isolating too much */ 371 /* Avoid isolating too much */
286 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX) 372 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
@@ -292,7 +378,9 @@ static unsigned long isolate_migratepages(struct zone *zone,
292 spin_unlock_irq(&zone->lru_lock); 378 spin_unlock_irq(&zone->lru_lock);
293 cc->migrate_pfn = low_pfn; 379 cc->migrate_pfn = low_pfn;
294 380
295 return cc->nr_migratepages; 381 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
382
383 return ISOLATE_SUCCESS;
296} 384}
297 385
298/* 386/*
@@ -342,10 +430,10 @@ static void update_nr_listpages(struct compact_control *cc)
342} 430}
343 431
344static int compact_finished(struct zone *zone, 432static int compact_finished(struct zone *zone,
345 struct compact_control *cc) 433 struct compact_control *cc)
346{ 434{
347 unsigned int order; 435 unsigned int order;
348 unsigned long watermark = low_wmark_pages(zone) + (1 << cc->order); 436 unsigned long watermark;
349 437
350 if (fatal_signal_pending(current)) 438 if (fatal_signal_pending(current))
351 return COMPACT_PARTIAL; 439 return COMPACT_PARTIAL;
@@ -354,11 +442,18 @@ static int compact_finished(struct zone *zone,
354 if (cc->free_pfn <= cc->migrate_pfn) 442 if (cc->free_pfn <= cc->migrate_pfn)
355 return COMPACT_COMPLETE; 443 return COMPACT_COMPLETE;
356 444
357 /* Compaction run is not finished if the watermark is not met */ 445 /*
358 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) 446 * order == -1 is expected when compacting via
447 * /proc/sys/vm/compact_memory
448 */
449 if (cc->order == -1)
359 return COMPACT_CONTINUE; 450 return COMPACT_CONTINUE;
360 451
361 if (cc->order == -1) 452 /* Compaction run is not finished if the watermark is not met */
453 watermark = low_wmark_pages(zone);
454 watermark += (1 << cc->order);
455
456 if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0))
362 return COMPACT_CONTINUE; 457 return COMPACT_CONTINUE;
363 458
364 /* Direct compactor: Is a suitable page free? */ 459 /* Direct compactor: Is a suitable page free? */
@@ -375,10 +470,71 @@ static int compact_finished(struct zone *zone,
375 return COMPACT_CONTINUE; 470 return COMPACT_CONTINUE;
376} 471}
377 472
473/*
474 * compaction_suitable: Is this suitable to run compaction on this zone now?
475 * Returns
476 * COMPACT_SKIPPED - If there are too few free pages for compaction
477 * COMPACT_PARTIAL - If the allocation would succeed without compaction
478 * COMPACT_CONTINUE - If compaction should run now
479 */
480unsigned long compaction_suitable(struct zone *zone, int order)
481{
482 int fragindex;
483 unsigned long watermark;
484
485 /*
486 * order == -1 is expected when compacting via
487 * /proc/sys/vm/compact_memory
488 */
489 if (order == -1)
490 return COMPACT_CONTINUE;
491
492 /*
493 * Watermarks for order-0 must be met for compaction. Note the 2UL.
494 * This is because during migration, copies of pages need to be
495 * allocated and for a short time, the footprint is higher
496 */
497 watermark = low_wmark_pages(zone) + (2UL << order);
498 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
499 return COMPACT_SKIPPED;
500
501 /*
502 * fragmentation index determines if allocation failures are due to
503 * low memory or external fragmentation
504 *
505 * index of -1000 implies allocations might succeed depending on
506 * watermarks
507 * index towards 0 implies failure is due to lack of memory
508 * index towards 1000 implies failure is due to fragmentation
509 *
510 * Only compact if a failure would be due to fragmentation.
511 */
512 fragindex = fragmentation_index(zone, order);
513 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
514 return COMPACT_SKIPPED;
515
516 if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark,
517 0, 0))
518 return COMPACT_PARTIAL;
519
520 return COMPACT_CONTINUE;
521}
522
378static int compact_zone(struct zone *zone, struct compact_control *cc) 523static int compact_zone(struct zone *zone, struct compact_control *cc)
379{ 524{
380 int ret; 525 int ret;
381 526
527 ret = compaction_suitable(zone, cc->order);
528 switch (ret) {
529 case COMPACT_PARTIAL:
530 case COMPACT_SKIPPED:
531 /* Compaction is likely to fail */
532 return ret;
533 case COMPACT_CONTINUE:
534 /* Fall through to compaction */
535 ;
536 }
537
382 /* Setup to move all movable pages to the end of the zone */ 538 /* Setup to move all movable pages to the end of the zone */
383 cc->migrate_pfn = zone->zone_start_pfn; 539 cc->migrate_pfn = zone->zone_start_pfn;
384 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; 540 cc->free_pfn = cc->migrate_pfn + zone->spanned_pages;
@@ -388,13 +544,22 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
388 544
389 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { 545 while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) {
390 unsigned long nr_migrate, nr_remaining; 546 unsigned long nr_migrate, nr_remaining;
547 int err;
391 548
392 if (!isolate_migratepages(zone, cc)) 549 switch (isolate_migratepages(zone, cc)) {
550 case ISOLATE_ABORT:
551 ret = COMPACT_PARTIAL;
552 goto out;
553 case ISOLATE_NONE:
393 continue; 554 continue;
555 case ISOLATE_SUCCESS:
556 ;
557 }
394 558
395 nr_migrate = cc->nr_migratepages; 559 nr_migrate = cc->nr_migratepages;
396 migrate_pages(&cc->migratepages, compaction_alloc, 560 err = migrate_pages(&cc->migratepages, compaction_alloc,
397 (unsigned long)cc, 0); 561 (unsigned long)cc, false,
562 cc->sync);
398 update_nr_listpages(cc); 563 update_nr_listpages(cc);
399 nr_remaining = cc->nr_migratepages; 564 nr_remaining = cc->nr_migratepages;
400 565
@@ -402,15 +567,18 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
402 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining); 567 count_vm_events(COMPACTPAGES, nr_migrate - nr_remaining);
403 if (nr_remaining) 568 if (nr_remaining)
404 count_vm_events(COMPACTPAGEFAILED, nr_remaining); 569 count_vm_events(COMPACTPAGEFAILED, nr_remaining);
570 trace_mm_compaction_migratepages(nr_migrate - nr_remaining,
571 nr_remaining);
405 572
406 /* Release LRU pages not migrated */ 573 /* Release LRU pages not migrated */
407 if (!list_empty(&cc->migratepages)) { 574 if (err) {
408 putback_lru_pages(&cc->migratepages); 575 putback_lru_pages(&cc->migratepages);
409 cc->nr_migratepages = 0; 576 cc->nr_migratepages = 0;
410 } 577 }
411 578
412 } 579 }
413 580
581out:
414 /* Release free pages and check accounting */ 582 /* Release free pages and check accounting */
415 cc->nr_freepages -= release_freepages(&cc->freepages); 583 cc->nr_freepages -= release_freepages(&cc->freepages);
416 VM_BUG_ON(cc->nr_freepages != 0); 584 VM_BUG_ON(cc->nr_freepages != 0);
@@ -418,8 +586,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
418 return ret; 586 return ret;
419} 587}
420 588
421static unsigned long compact_zone_order(struct zone *zone, 589unsigned long compact_zone_order(struct zone *zone,
422 int order, gfp_t gfp_mask) 590 int order, gfp_t gfp_mask,
591 bool sync)
423{ 592{
424 struct compact_control cc = { 593 struct compact_control cc = {
425 .nr_freepages = 0, 594 .nr_freepages = 0,
@@ -427,6 +596,7 @@ static unsigned long compact_zone_order(struct zone *zone,
427 .order = order, 596 .order = order,
428 .migratetype = allocflags_to_migratetype(gfp_mask), 597 .migratetype = allocflags_to_migratetype(gfp_mask),
429 .zone = zone, 598 .zone = zone,
599 .sync = sync,
430 }; 600 };
431 INIT_LIST_HEAD(&cc.freepages); 601 INIT_LIST_HEAD(&cc.freepages);
432 INIT_LIST_HEAD(&cc.migratepages); 602 INIT_LIST_HEAD(&cc.migratepages);
@@ -442,16 +612,17 @@ int sysctl_extfrag_threshold = 500;
442 * @order: The order of the current allocation 612 * @order: The order of the current allocation
443 * @gfp_mask: The GFP mask of the current allocation 613 * @gfp_mask: The GFP mask of the current allocation
444 * @nodemask: The allowed nodes to allocate from 614 * @nodemask: The allowed nodes to allocate from
615 * @sync: Whether migration is synchronous or not
445 * 616 *
446 * This is the main entry point for direct page compaction. 617 * This is the main entry point for direct page compaction.
447 */ 618 */
448unsigned long try_to_compact_pages(struct zonelist *zonelist, 619unsigned long try_to_compact_pages(struct zonelist *zonelist,
449 int order, gfp_t gfp_mask, nodemask_t *nodemask) 620 int order, gfp_t gfp_mask, nodemask_t *nodemask,
621 bool sync)
450{ 622{
451 enum zone_type high_zoneidx = gfp_zone(gfp_mask); 623 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
452 int may_enter_fs = gfp_mask & __GFP_FS; 624 int may_enter_fs = gfp_mask & __GFP_FS;
453 int may_perform_io = gfp_mask & __GFP_IO; 625 int may_perform_io = gfp_mask & __GFP_IO;
454 unsigned long watermark;
455 struct zoneref *z; 626 struct zoneref *z;
456 struct zone *zone; 627 struct zone *zone;
457 int rc = COMPACT_SKIPPED; 628 int rc = COMPACT_SKIPPED;
@@ -461,7 +632,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
461 * made because an assumption is made that the page allocator can satisfy 632 * made because an assumption is made that the page allocator can satisfy
462 * the "cheaper" orders without taking special steps 633 * the "cheaper" orders without taking special steps
463 */ 634 */
464 if (order <= PAGE_ALLOC_COSTLY_ORDER || !may_enter_fs || !may_perform_io) 635 if (!order || !may_enter_fs || !may_perform_io)
465 return rc; 636 return rc;
466 637
467 count_vm_event(COMPACTSTALL); 638 count_vm_event(COMPACTSTALL);
@@ -469,43 +640,13 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
469 /* Compact each zone in the list */ 640 /* Compact each zone in the list */
470 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx, 641 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
471 nodemask) { 642 nodemask) {
472 int fragindex;
473 int status; 643 int status;
474 644
475 /* 645 status = compact_zone_order(zone, order, gfp_mask, sync);
476 * Watermarks for order-0 must be met for compaction. Note
477 * the 2UL. This is because during migration, copies of
478 * pages need to be allocated and for a short time, the
479 * footprint is higher
480 */
481 watermark = low_wmark_pages(zone) + (2UL << order);
482 if (!zone_watermark_ok(zone, 0, watermark, 0, 0))
483 continue;
484
485 /*
486 * fragmentation index determines if allocation failures are
487 * due to low memory or external fragmentation
488 *
489 * index of -1 implies allocations might succeed depending
490 * on watermarks
491 * index towards 0 implies failure is due to lack of memory
492 * index towards 1000 implies failure is due to fragmentation
493 *
494 * Only compact if a failure would be due to fragmentation.
495 */
496 fragindex = fragmentation_index(zone, order);
497 if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold)
498 continue;
499
500 if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) {
501 rc = COMPACT_PARTIAL;
502 break;
503 }
504
505 status = compact_zone_order(zone, order, gfp_mask);
506 rc = max(status, rc); 646 rc = max(status, rc);
507 647
508 if (zone_watermark_ok(zone, order, watermark, 0, 0)) 648 /* If a normal allocation would succeed, stop compacting */
649 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
509 break; 650 break;
510 } 651 }
511 652