diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 125 |
1 files changed, 108 insertions, 17 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index db76361a3117..d9dbb97e607b 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -50,6 +50,79 @@ static inline bool migrate_async_suitable(int migratetype) | |||
50 | return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; | 50 | return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE; |
51 | } | 51 | } |
52 | 52 | ||
53 | #ifdef CONFIG_COMPACTION | ||
54 | /* Returns true if the pageblock should be scanned for pages to isolate. */ | ||
55 | static inline bool isolation_suitable(struct compact_control *cc, | ||
56 | struct page *page) | ||
57 | { | ||
58 | if (cc->ignore_skip_hint) | ||
59 | return true; | ||
60 | |||
61 | return !get_pageblock_skip(page); | ||
62 | } | ||
63 | |||
64 | /* | ||
65 | * This function is called to clear all cached information on pageblocks that | ||
66 | * should be skipped for page isolation when the migrate and free page scanner | ||
67 | * meet. | ||
68 | */ | ||
69 | static void reset_isolation_suitable(struct zone *zone) | ||
70 | { | ||
71 | unsigned long start_pfn = zone->zone_start_pfn; | ||
72 | unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; | ||
73 | unsigned long pfn; | ||
74 | |||
75 | /* | ||
76 | * Do not reset more than once every five seconds. If allocations are | ||
77 | * failing sufficiently quickly to allow this to happen then continually | ||
78 | * scanning for compaction is not going to help. The choice of five | ||
79 | * seconds is arbitrary but will mitigate excessive scanning. | ||
80 | */ | ||
81 | if (time_before(jiffies, zone->compact_blockskip_expire)) | ||
82 | return; | ||
83 | zone->compact_blockskip_expire = jiffies + (HZ * 5); | ||
84 | |||
85 | /* Walk the zone and mark every pageblock as suitable for isolation */ | ||
86 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | ||
87 | struct page *page; | ||
88 | |||
89 | cond_resched(); | ||
90 | |||
91 | if (!pfn_valid(pfn)) | ||
92 | continue; | ||
93 | |||
94 | page = pfn_to_page(pfn); | ||
95 | if (zone != page_zone(page)) | ||
96 | continue; | ||
97 | |||
98 | clear_pageblock_skip(page); | ||
99 | } | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * If no pages were isolated then mark this pageblock to be skipped in the | ||
104 | * future. The information is later cleared by reset_isolation_suitable(). | ||
105 | */ | ||
106 | static void update_pageblock_skip(struct page *page, unsigned long nr_isolated) | ||
107 | { | ||
108 | if (!page) | ||
109 | return; | ||
110 | |||
111 | if (!nr_isolated) | ||
112 | set_pageblock_skip(page); | ||
113 | } | ||
114 | #else | ||
115 | static inline bool isolation_suitable(struct compact_control *cc, | ||
116 | struct page *page) | ||
117 | { | ||
118 | return true; | ||
119 | } | ||
120 | |||
121 | static void update_pageblock_skip(struct page *page, unsigned long nr_isolated) | ||
122 | { | ||
123 | } | ||
124 | #endif /* CONFIG_COMPACTION */ | ||
125 | |||
53 | static inline bool should_release_lock(spinlock_t *lock) | 126 | static inline bool should_release_lock(spinlock_t *lock) |
54 | { | 127 | { |
55 | return need_resched() || spin_is_contended(lock); | 128 | return need_resched() || spin_is_contended(lock); |
@@ -181,7 +254,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
181 | bool strict) | 254 | bool strict) |
182 | { | 255 | { |
183 | int nr_scanned = 0, total_isolated = 0; | 256 | int nr_scanned = 0, total_isolated = 0; |
184 | struct page *cursor; | 257 | struct page *cursor, *valid_page = NULL; |
185 | unsigned long nr_strict_required = end_pfn - blockpfn; | 258 | unsigned long nr_strict_required = end_pfn - blockpfn; |
186 | unsigned long flags; | 259 | unsigned long flags; |
187 | bool locked = false; | 260 | bool locked = false; |
@@ -196,6 +269,8 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
196 | nr_scanned++; | 269 | nr_scanned++; |
197 | if (!pfn_valid_within(blockpfn)) | 270 | if (!pfn_valid_within(blockpfn)) |
198 | continue; | 271 | continue; |
272 | if (!valid_page) | ||
273 | valid_page = page; | ||
199 | if (!PageBuddy(page)) | 274 | if (!PageBuddy(page)) |
200 | continue; | 275 | continue; |
201 | 276 | ||
@@ -250,6 +325,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
250 | if (locked) | 325 | if (locked) |
251 | spin_unlock_irqrestore(&cc->zone->lock, flags); | 326 | spin_unlock_irqrestore(&cc->zone->lock, flags); |
252 | 327 | ||
328 | /* Update the pageblock-skip if the whole pageblock was scanned */ | ||
329 | if (blockpfn == end_pfn) | ||
330 | update_pageblock_skip(valid_page, total_isolated); | ||
331 | |||
253 | return total_isolated; | 332 | return total_isolated; |
254 | } | 333 | } |
255 | 334 | ||
@@ -267,22 +346,14 @@ static unsigned long isolate_freepages_block(struct compact_control *cc, | |||
267 | * a free page). | 346 | * a free page). |
268 | */ | 347 | */ |
269 | unsigned long | 348 | unsigned long |
270 | isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) | 349 | isolate_freepages_range(struct compact_control *cc, |
350 | unsigned long start_pfn, unsigned long end_pfn) | ||
271 | { | 351 | { |
272 | unsigned long isolated, pfn, block_end_pfn; | 352 | unsigned long isolated, pfn, block_end_pfn; |
273 | struct zone *zone = NULL; | ||
274 | LIST_HEAD(freelist); | 353 | LIST_HEAD(freelist); |
275 | 354 | ||
276 | /* cc needed for isolate_freepages_block to acquire zone->lock */ | ||
277 | struct compact_control cc = { | ||
278 | .sync = true, | ||
279 | }; | ||
280 | |||
281 | if (pfn_valid(start_pfn)) | ||
282 | cc.zone = zone = page_zone(pfn_to_page(start_pfn)); | ||
283 | |||
284 | for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { | 355 | for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) { |
285 | if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn))) | 356 | if (!pfn_valid(pfn) || cc->zone != page_zone(pfn_to_page(pfn))) |
286 | break; | 357 | break; |
287 | 358 | ||
288 | /* | 359 | /* |
@@ -292,7 +363,7 @@ isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn) | |||
292 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); | 363 | block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages); |
293 | block_end_pfn = min(block_end_pfn, end_pfn); | 364 | block_end_pfn = min(block_end_pfn, end_pfn); |
294 | 365 | ||
295 | isolated = isolate_freepages_block(&cc, pfn, block_end_pfn, | 366 | isolated = isolate_freepages_block(cc, pfn, block_end_pfn, |
296 | &freelist, true); | 367 | &freelist, true); |
297 | 368 | ||
298 | /* | 369 | /* |
@@ -387,6 +458,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
387 | struct lruvec *lruvec; | 458 | struct lruvec *lruvec; |
388 | unsigned long flags; | 459 | unsigned long flags; |
389 | bool locked = false; | 460 | bool locked = false; |
461 | struct page *page = NULL, *valid_page = NULL; | ||
390 | 462 | ||
391 | /* | 463 | /* |
392 | * Ensure that there are not too many pages isolated from the LRU | 464 | * Ensure that there are not too many pages isolated from the LRU |
@@ -407,8 +479,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
407 | /* Time to isolate some pages for migration */ | 479 | /* Time to isolate some pages for migration */ |
408 | cond_resched(); | 480 | cond_resched(); |
409 | for (; low_pfn < end_pfn; low_pfn++) { | 481 | for (; low_pfn < end_pfn; low_pfn++) { |
410 | struct page *page; | ||
411 | |||
412 | /* give a chance to irqs before checking need_resched() */ | 482 | /* give a chance to irqs before checking need_resched() */ |
413 | if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) { | 483 | if (locked && !((low_pfn+1) % SWAP_CLUSTER_MAX)) { |
414 | if (should_release_lock(&zone->lru_lock)) { | 484 | if (should_release_lock(&zone->lru_lock)) { |
@@ -444,6 +514,14 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
444 | if (page_zone(page) != zone) | 514 | if (page_zone(page) != zone) |
445 | continue; | 515 | continue; |
446 | 516 | ||
517 | if (!valid_page) | ||
518 | valid_page = page; | ||
519 | |||
520 | /* If isolation recently failed, do not retry */ | ||
521 | pageblock_nr = low_pfn >> pageblock_order; | ||
522 | if (!isolation_suitable(cc, page)) | ||
523 | goto next_pageblock; | ||
524 | |||
447 | /* Skip if free */ | 525 | /* Skip if free */ |
448 | if (PageBuddy(page)) | 526 | if (PageBuddy(page)) |
449 | continue; | 527 | continue; |
@@ -453,7 +531,6 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
453 | * migration is optimistic to see if the minimum amount of work | 531 | * migration is optimistic to see if the minimum amount of work |
454 | * satisfies the allocation | 532 | * satisfies the allocation |
455 | */ | 533 | */ |
456 | pageblock_nr = low_pfn >> pageblock_order; | ||
457 | if (!cc->sync && last_pageblock_nr != pageblock_nr && | 534 | if (!cc->sync && last_pageblock_nr != pageblock_nr && |
458 | !migrate_async_suitable(get_pageblock_migratetype(page))) { | 535 | !migrate_async_suitable(get_pageblock_migratetype(page))) { |
459 | goto next_pageblock; | 536 | goto next_pageblock; |
@@ -530,6 +607,10 @@ next_pageblock: | |||
530 | if (locked) | 607 | if (locked) |
531 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 608 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
532 | 609 | ||
610 | /* Update the pageblock-skip if the whole pageblock was scanned */ | ||
611 | if (low_pfn == end_pfn) | ||
612 | update_pageblock_skip(valid_page, nr_isolated); | ||
613 | |||
533 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); | 614 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); |
534 | 615 | ||
535 | return low_pfn; | 616 | return low_pfn; |
@@ -593,6 +674,10 @@ static void isolate_freepages(struct zone *zone, | |||
593 | if (!suitable_migration_target(page)) | 674 | if (!suitable_migration_target(page)) |
594 | continue; | 675 | continue; |
595 | 676 | ||
677 | /* If isolation recently failed, do not retry */ | ||
678 | if (!isolation_suitable(cc, page)) | ||
679 | continue; | ||
680 | |||
596 | /* Found a block suitable for isolating free pages from */ | 681 | /* Found a block suitable for isolating free pages from */ |
597 | isolated = 0; | 682 | isolated = 0; |
598 | end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); | 683 | end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); |
@@ -709,8 +794,10 @@ static int compact_finished(struct zone *zone, | |||
709 | return COMPACT_PARTIAL; | 794 | return COMPACT_PARTIAL; |
710 | 795 | ||
711 | /* Compaction run completes if the migrate and free scanner meet */ | 796 | /* Compaction run completes if the migrate and free scanner meet */ |
712 | if (cc->free_pfn <= cc->migrate_pfn) | 797 | if (cc->free_pfn <= cc->migrate_pfn) { |
798 | reset_isolation_suitable(cc->zone); | ||
713 | return COMPACT_COMPLETE; | 799 | return COMPACT_COMPLETE; |
800 | } | ||
714 | 801 | ||
715 | /* | 802 | /* |
716 | * order == -1 is expected when compacting via | 803 | * order == -1 is expected when compacting via |
@@ -818,6 +905,10 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
818 | cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; | 905 | cc->free_pfn = cc->migrate_pfn + zone->spanned_pages; |
819 | cc->free_pfn &= ~(pageblock_nr_pages-1); | 906 | cc->free_pfn &= ~(pageblock_nr_pages-1); |
820 | 907 | ||
908 | /* Clear pageblock skip if there are numerous alloc failures */ | ||
909 | if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT) | ||
910 | reset_isolation_suitable(zone); | ||
911 | |||
821 | migrate_prep_local(); | 912 | migrate_prep_local(); |
822 | 913 | ||
823 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { | 914 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { |