aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c24
1 files changed, 23 insertions, 1 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 71a58f67f481..d9ebebe1a2aa 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -313,12 +313,34 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
313 } else if (!locked) 313 } else if (!locked)
314 spin_lock_irq(&zone->lru_lock); 314 spin_lock_irq(&zone->lru_lock);
315 315
316 /*
317 * migrate_pfn does not necessarily start aligned to a
318 * pageblock. Ensure that pfn_valid is called when moving
319 * into a new MAX_ORDER_NR_PAGES range in case of large
320 * memory holes within the zone
321 */
322 if ((low_pfn & (MAX_ORDER_NR_PAGES - 1)) == 0) {
323 if (!pfn_valid(low_pfn)) {
324 low_pfn += MAX_ORDER_NR_PAGES - 1;
325 continue;
326 }
327 }
328
316 if (!pfn_valid_within(low_pfn)) 329 if (!pfn_valid_within(low_pfn))
317 continue; 330 continue;
318 nr_scanned++; 331 nr_scanned++;
319 332
320 /* Get the page and skip if free */ 333 /*
334 * Get the page and ensure the page is within the same zone.
335 * See the comment in isolate_freepages about overlapping
336 * nodes. It is deliberate that the new zone lock is not taken
337 * as memory compaction should not move pages between nodes.
338 */
321 page = pfn_to_page(low_pfn); 339 page = pfn_to_page(low_pfn);
340 if (page_zone(page) != zone)
341 continue;
342
343 /* Skip if free */
322 if (PageBuddy(page)) 344 if (PageBuddy(page))
323 continue; 345 continue;
324 346