diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 76 |
1 files changed, 54 insertions, 22 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 021a2960ef9e..6cc604bd5649 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -144,9 +144,20 @@ static void isolate_freepages(struct zone *zone, | |||
144 | int nr_freepages = cc->nr_freepages; | 144 | int nr_freepages = cc->nr_freepages; |
145 | struct list_head *freelist = &cc->freepages; | 145 | struct list_head *freelist = &cc->freepages; |
146 | 146 | ||
147 | /* | ||
148 | * Initialise the free scanner. The starting point is where we last | ||
149 | * scanned from (or the end of the zone if starting). The low point | ||
150 | * is the end of the pageblock the migration scanner is using. | ||
151 | */ | ||
147 | pfn = cc->free_pfn; | 152 | pfn = cc->free_pfn; |
148 | low_pfn = cc->migrate_pfn + pageblock_nr_pages; | 153 | low_pfn = cc->migrate_pfn + pageblock_nr_pages; |
149 | high_pfn = low_pfn; | 154 | |
155 | /* | ||
156 | * Take care that if the migration scanner is at the end of the zone | ||
157 | * that the free scanner does not accidentally move to the next zone | ||
158 | * in the next isolation cycle. | ||
159 | */ | ||
160 | high_pfn = min(low_pfn, pfn); | ||
150 | 161 | ||
151 | /* | 162 | /* |
152 | * Isolate free pages until enough are available to migrate the | 163 | * Isolate free pages until enough are available to migrate the |
@@ -240,11 +251,18 @@ static bool too_many_isolated(struct zone *zone) | |||
240 | return isolated > (inactive + active) / 2; | 251 | return isolated > (inactive + active) / 2; |
241 | } | 252 | } |
242 | 253 | ||
254 | /* possible outcome of isolate_migratepages */ | ||
255 | typedef enum { | ||
256 | ISOLATE_ABORT, /* Abort compaction now */ | ||
257 | ISOLATE_NONE, /* No pages isolated, continue scanning */ | ||
258 | ISOLATE_SUCCESS, /* Pages isolated, migrate */ | ||
259 | } isolate_migrate_t; | ||
260 | |||
243 | /* | 261 | /* |
244 | * Isolate all pages that can be migrated from the block pointed to by | 262 | * Isolate all pages that can be migrated from the block pointed to by |
245 | * the migrate scanner within compact_control. | 263 | * the migrate scanner within compact_control. |
246 | */ | 264 | */ |
247 | static unsigned long isolate_migratepages(struct zone *zone, | 265 | static isolate_migrate_t isolate_migratepages(struct zone *zone, |
248 | struct compact_control *cc) | 266 | struct compact_control *cc) |
249 | { | 267 | { |
250 | unsigned long low_pfn, end_pfn; | 268 | unsigned long low_pfn, end_pfn; |
@@ -261,7 +279,7 @@ static unsigned long isolate_migratepages(struct zone *zone, | |||
261 | /* Do not cross the free scanner or scan within a memory hole */ | 279 | /* Do not cross the free scanner or scan within a memory hole */ |
262 | if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { | 280 | if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) { |
263 | cc->migrate_pfn = end_pfn; | 281 | cc->migrate_pfn = end_pfn; |
264 | return 0; | 282 | return ISOLATE_NONE; |
265 | } | 283 | } |
266 | 284 | ||
267 | /* | 285 | /* |
@@ -270,10 +288,14 @@ static unsigned long isolate_migratepages(struct zone *zone, | |||
270 | * delay for some time until fewer pages are isolated | 288 | * delay for some time until fewer pages are isolated |
271 | */ | 289 | */ |
272 | while (unlikely(too_many_isolated(zone))) { | 290 | while (unlikely(too_many_isolated(zone))) { |
291 | /* async migration should just abort */ | ||
292 | if (!cc->sync) | ||
293 | return ISOLATE_ABORT; | ||
294 | |||
273 | congestion_wait(BLK_RW_ASYNC, HZ/10); | 295 | congestion_wait(BLK_RW_ASYNC, HZ/10); |
274 | 296 | ||
275 | if (fatal_signal_pending(current)) | 297 | if (fatal_signal_pending(current)) |
276 | return 0; | 298 | return ISOLATE_ABORT; |
277 | } | 299 | } |
278 | 300 | ||
279 | /* Time to isolate some pages for migration */ | 301 | /* Time to isolate some pages for migration */ |
@@ -358,7 +380,7 @@ static unsigned long isolate_migratepages(struct zone *zone, | |||
358 | 380 | ||
359 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); | 381 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); |
360 | 382 | ||
361 | return cc->nr_migratepages; | 383 | return ISOLATE_SUCCESS; |
362 | } | 384 | } |
363 | 385 | ||
364 | /* | 386 | /* |
@@ -420,13 +442,6 @@ static int compact_finished(struct zone *zone, | |||
420 | if (cc->free_pfn <= cc->migrate_pfn) | 442 | if (cc->free_pfn <= cc->migrate_pfn) |
421 | return COMPACT_COMPLETE; | 443 | return COMPACT_COMPLETE; |
422 | 444 | ||
423 | /* Compaction run is not finished if the watermark is not met */ | ||
424 | watermark = low_wmark_pages(zone); | ||
425 | watermark += (1 << cc->order); | ||
426 | |||
427 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) | ||
428 | return COMPACT_CONTINUE; | ||
429 | |||
430 | /* | 445 | /* |
431 | * order == -1 is expected when compacting via | 446 | * order == -1 is expected when compacting via |
432 | * /proc/sys/vm/compact_memory | 447 | * /proc/sys/vm/compact_memory |
@@ -434,6 +449,13 @@ static int compact_finished(struct zone *zone, | |||
434 | if (cc->order == -1) | 449 | if (cc->order == -1) |
435 | return COMPACT_CONTINUE; | 450 | return COMPACT_CONTINUE; |
436 | 451 | ||
452 | /* Compaction run is not finished if the watermark is not met */ | ||
453 | watermark = low_wmark_pages(zone); | ||
454 | watermark += (1 << cc->order); | ||
455 | |||
456 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) | ||
457 | return COMPACT_CONTINUE; | ||
458 | |||
437 | /* Direct compactor: Is a suitable page free? */ | 459 | /* Direct compactor: Is a suitable page free? */ |
438 | for (order = cc->order; order < MAX_ORDER; order++) { | 460 | for (order = cc->order; order < MAX_ORDER; order++) { |
439 | /* Job done if page is free of the right migratetype */ | 461 | /* Job done if page is free of the right migratetype */ |
@@ -461,6 +483,13 @@ unsigned long compaction_suitable(struct zone *zone, int order) | |||
461 | unsigned long watermark; | 483 | unsigned long watermark; |
462 | 484 | ||
463 | /* | 485 | /* |
486 | * order == -1 is expected when compacting via | ||
487 | * /proc/sys/vm/compact_memory | ||
488 | */ | ||
489 | if (order == -1) | ||
490 | return COMPACT_CONTINUE; | ||
491 | |||
492 | /* | ||
464 | * Watermarks for order-0 must be met for compaction. Note the 2UL. | 493 | * Watermarks for order-0 must be met for compaction. Note the 2UL. |
465 | * This is because during migration, copies of pages need to be | 494 | * This is because during migration, copies of pages need to be |
466 | * allocated and for a short time, the footprint is higher | 495 | * allocated and for a short time, the footprint is higher |
@@ -470,17 +499,11 @@ unsigned long compaction_suitable(struct zone *zone, int order) | |||
470 | return COMPACT_SKIPPED; | 499 | return COMPACT_SKIPPED; |
471 | 500 | ||
472 | /* | 501 | /* |
473 | * order == -1 is expected when compacting via | ||
474 | * /proc/sys/vm/compact_memory | ||
475 | */ | ||
476 | if (order == -1) | ||
477 | return COMPACT_CONTINUE; | ||
478 | |||
479 | /* | ||
480 | * fragmentation index determines if allocation failures are due to | 502 | * fragmentation index determines if allocation failures are due to |
481 | * low memory or external fragmentation | 503 | * low memory or external fragmentation |
482 | * | 504 | * |
483 | * index of -1 implies allocations might succeed dependingon watermarks | 505 | * index of -1000 implies allocations might succeed depending on |
506 | * watermarks | ||
484 | * index towards 0 implies failure is due to lack of memory | 507 | * index towards 0 implies failure is due to lack of memory |
485 | * index towards 1000 implies failure is due to fragmentation | 508 | * index towards 1000 implies failure is due to fragmentation |
486 | * | 509 | * |
@@ -490,7 +513,8 @@ unsigned long compaction_suitable(struct zone *zone, int order) | |||
490 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) | 513 | if (fragindex >= 0 && fragindex <= sysctl_extfrag_threshold) |
491 | return COMPACT_SKIPPED; | 514 | return COMPACT_SKIPPED; |
492 | 515 | ||
493 | if (fragindex == -1 && zone_watermark_ok(zone, order, watermark, 0, 0)) | 516 | if (fragindex == -1000 && zone_watermark_ok(zone, order, watermark, |
517 | 0, 0)) | ||
494 | return COMPACT_PARTIAL; | 518 | return COMPACT_PARTIAL; |
495 | 519 | ||
496 | return COMPACT_CONTINUE; | 520 | return COMPACT_CONTINUE; |
@@ -522,8 +546,15 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
522 | unsigned long nr_migrate, nr_remaining; | 546 | unsigned long nr_migrate, nr_remaining; |
523 | int err; | 547 | int err; |
524 | 548 | ||
525 | if (!isolate_migratepages(zone, cc)) | 549 | switch (isolate_migratepages(zone, cc)) { |
550 | case ISOLATE_ABORT: | ||
551 | ret = COMPACT_PARTIAL; | ||
552 | goto out; | ||
553 | case ISOLATE_NONE: | ||
526 | continue; | 554 | continue; |
555 | case ISOLATE_SUCCESS: | ||
556 | ; | ||
557 | } | ||
527 | 558 | ||
528 | nr_migrate = cc->nr_migratepages; | 559 | nr_migrate = cc->nr_migratepages; |
529 | err = migrate_pages(&cc->migratepages, compaction_alloc, | 560 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
@@ -547,6 +578,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
547 | 578 | ||
548 | } | 579 | } |
549 | 580 | ||
581 | out: | ||
550 | /* Release free pages and check accounting */ | 582 | /* Release free pages and check accounting */ |
551 | cc->nr_freepages -= release_freepages(&cc->freepages); | 583 | cc->nr_freepages -= release_freepages(&cc->freepages); |
552 | VM_BUG_ON(cc->nr_freepages != 0); | 584 | VM_BUG_ON(cc->nr_freepages != 0); |