diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 65 |
1 files changed, 37 insertions, 28 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 8be430b812de..021a2960ef9e 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -42,8 +42,6 @@ struct compact_control { | |||
42 | unsigned int order; /* order a direct compactor needs */ | 42 | unsigned int order; /* order a direct compactor needs */ |
43 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ | 43 | int migratetype; /* MOVABLE, RECLAIMABLE etc */ |
44 | struct zone *zone; | 44 | struct zone *zone; |
45 | |||
46 | int compact_mode; | ||
47 | }; | 45 | }; |
48 | 46 | ||
49 | static unsigned long release_freepages(struct list_head *freelist) | 47 | static unsigned long release_freepages(struct list_head *freelist) |
@@ -155,7 +153,6 @@ static void isolate_freepages(struct zone *zone, | |||
155 | * pages on cc->migratepages. We stop searching if the migrate | 153 | * pages on cc->migratepages. We stop searching if the migrate |
156 | * and free page scanners meet or enough free pages are isolated. | 154 | * and free page scanners meet or enough free pages are isolated. |
157 | */ | 155 | */ |
158 | spin_lock_irqsave(&zone->lock, flags); | ||
159 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; | 156 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; |
160 | pfn -= pageblock_nr_pages) { | 157 | pfn -= pageblock_nr_pages) { |
161 | unsigned long isolated; | 158 | unsigned long isolated; |
@@ -178,9 +175,19 @@ static void isolate_freepages(struct zone *zone, | |||
178 | if (!suitable_migration_target(page)) | 175 | if (!suitable_migration_target(page)) |
179 | continue; | 176 | continue; |
180 | 177 | ||
181 | /* Found a block suitable for isolating free pages from */ | 178 | /* |
182 | isolated = isolate_freepages_block(zone, pfn, freelist); | 179 | * Found a block suitable for isolating free pages from. Now |
183 | nr_freepages += isolated; | 180 | * we disabled interrupts, double check things are ok and |
181 | * isolate the pages. This is to minimise the time IRQs | ||
182 | * are disabled | ||
183 | */ | ||
184 | isolated = 0; | ||
185 | spin_lock_irqsave(&zone->lock, flags); | ||
186 | if (suitable_migration_target(page)) { | ||
187 | isolated = isolate_freepages_block(zone, pfn, freelist); | ||
188 | nr_freepages += isolated; | ||
189 | } | ||
190 | spin_unlock_irqrestore(&zone->lock, flags); | ||
184 | 191 | ||
185 | /* | 192 | /* |
186 | * Record the highest PFN we isolated pages from. When next | 193 | * Record the highest PFN we isolated pages from. When next |
@@ -190,7 +197,6 @@ static void isolate_freepages(struct zone *zone, | |||
190 | if (isolated) | 197 | if (isolated) |
191 | high_pfn = max(high_pfn, pfn); | 198 | high_pfn = max(high_pfn, pfn); |
192 | } | 199 | } |
193 | spin_unlock_irqrestore(&zone->lock, flags); | ||
194 | 200 | ||
195 | /* split_free_page does not map the pages */ | 201 | /* split_free_page does not map the pages */ |
196 | list_for_each_entry(page, freelist, lru) { | 202 | list_for_each_entry(page, freelist, lru) { |
@@ -271,9 +277,27 @@ static unsigned long isolate_migratepages(struct zone *zone, | |||
271 | } | 277 | } |
272 | 278 | ||
273 | /* Time to isolate some pages for migration */ | 279 | /* Time to isolate some pages for migration */ |
280 | cond_resched(); | ||
274 | spin_lock_irq(&zone->lru_lock); | 281 | spin_lock_irq(&zone->lru_lock); |
275 | for (; low_pfn < end_pfn; low_pfn++) { | 282 | for (; low_pfn < end_pfn; low_pfn++) { |
276 | struct page *page; | 283 | struct page *page; |
284 | bool locked = true; | ||
285 | |||
286 | /* give a chance to irqs before checking need_resched() */ | ||
287 | if (!((low_pfn+1) % SWAP_CLUSTER_MAX)) { | ||
288 | spin_unlock_irq(&zone->lru_lock); | ||
289 | locked = false; | ||
290 | } | ||
291 | if (need_resched() || spin_is_contended(&zone->lru_lock)) { | ||
292 | if (locked) | ||
293 | spin_unlock_irq(&zone->lru_lock); | ||
294 | cond_resched(); | ||
295 | spin_lock_irq(&zone->lru_lock); | ||
296 | if (fatal_signal_pending(current)) | ||
297 | break; | ||
298 | } else if (!locked) | ||
299 | spin_lock_irq(&zone->lru_lock); | ||
300 | |||
277 | if (!pfn_valid_within(low_pfn)) | 301 | if (!pfn_valid_within(low_pfn)) |
278 | continue; | 302 | continue; |
279 | nr_scanned++; | 303 | nr_scanned++; |
@@ -397,10 +421,7 @@ static int compact_finished(struct zone *zone, | |||
397 | return COMPACT_COMPLETE; | 421 | return COMPACT_COMPLETE; |
398 | 422 | ||
399 | /* Compaction run is not finished if the watermark is not met */ | 423 | /* Compaction run is not finished if the watermark is not met */ |
400 | if (cc->compact_mode != COMPACT_MODE_KSWAPD) | 424 | watermark = low_wmark_pages(zone); |
401 | watermark = low_wmark_pages(zone); | ||
402 | else | ||
403 | watermark = high_wmark_pages(zone); | ||
404 | watermark += (1 << cc->order); | 425 | watermark += (1 << cc->order); |
405 | 426 | ||
406 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) | 427 | if (!zone_watermark_ok(zone, cc->order, watermark, 0, 0)) |
@@ -413,15 +434,6 @@ static int compact_finished(struct zone *zone, | |||
413 | if (cc->order == -1) | 434 | if (cc->order == -1) |
414 | return COMPACT_CONTINUE; | 435 | return COMPACT_CONTINUE; |
415 | 436 | ||
416 | /* | ||
417 | * Generating only one page of the right order is not enough | ||
418 | * for kswapd, we must continue until we're above the high | ||
419 | * watermark as a pool for high order GFP_ATOMIC allocations | ||
420 | * too. | ||
421 | */ | ||
422 | if (cc->compact_mode == COMPACT_MODE_KSWAPD) | ||
423 | return COMPACT_CONTINUE; | ||
424 | |||
425 | /* Direct compactor: Is a suitable page free? */ | 437 | /* Direct compactor: Is a suitable page free? */ |
426 | for (order = cc->order; order < MAX_ORDER; order++) { | 438 | for (order = cc->order; order < MAX_ORDER; order++) { |
427 | /* Job done if page is free of the right migratetype */ | 439 | /* Job done if page is free of the right migratetype */ |
@@ -508,12 +520,13 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
508 | 520 | ||
509 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { | 521 | while ((ret = compact_finished(zone, cc)) == COMPACT_CONTINUE) { |
510 | unsigned long nr_migrate, nr_remaining; | 522 | unsigned long nr_migrate, nr_remaining; |
523 | int err; | ||
511 | 524 | ||
512 | if (!isolate_migratepages(zone, cc)) | 525 | if (!isolate_migratepages(zone, cc)) |
513 | continue; | 526 | continue; |
514 | 527 | ||
515 | nr_migrate = cc->nr_migratepages; | 528 | nr_migrate = cc->nr_migratepages; |
516 | migrate_pages(&cc->migratepages, compaction_alloc, | 529 | err = migrate_pages(&cc->migratepages, compaction_alloc, |
517 | (unsigned long)cc, false, | 530 | (unsigned long)cc, false, |
518 | cc->sync); | 531 | cc->sync); |
519 | update_nr_listpages(cc); | 532 | update_nr_listpages(cc); |
@@ -527,7 +540,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
527 | nr_remaining); | 540 | nr_remaining); |
528 | 541 | ||
529 | /* Release LRU pages not migrated */ | 542 | /* Release LRU pages not migrated */ |
530 | if (!list_empty(&cc->migratepages)) { | 543 | if (err) { |
531 | putback_lru_pages(&cc->migratepages); | 544 | putback_lru_pages(&cc->migratepages); |
532 | cc->nr_migratepages = 0; | 545 | cc->nr_migratepages = 0; |
533 | } | 546 | } |
@@ -543,8 +556,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
543 | 556 | ||
544 | unsigned long compact_zone_order(struct zone *zone, | 557 | unsigned long compact_zone_order(struct zone *zone, |
545 | int order, gfp_t gfp_mask, | 558 | int order, gfp_t gfp_mask, |
546 | bool sync, | 559 | bool sync) |
547 | int compact_mode) | ||
548 | { | 560 | { |
549 | struct compact_control cc = { | 561 | struct compact_control cc = { |
550 | .nr_freepages = 0, | 562 | .nr_freepages = 0, |
@@ -553,7 +565,6 @@ unsigned long compact_zone_order(struct zone *zone, | |||
553 | .migratetype = allocflags_to_migratetype(gfp_mask), | 565 | .migratetype = allocflags_to_migratetype(gfp_mask), |
554 | .zone = zone, | 566 | .zone = zone, |
555 | .sync = sync, | 567 | .sync = sync, |
556 | .compact_mode = compact_mode, | ||
557 | }; | 568 | }; |
558 | INIT_LIST_HEAD(&cc.freepages); | 569 | INIT_LIST_HEAD(&cc.freepages); |
559 | INIT_LIST_HEAD(&cc.migratepages); | 570 | INIT_LIST_HEAD(&cc.migratepages); |
@@ -599,8 +610,7 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
599 | nodemask) { | 610 | nodemask) { |
600 | int status; | 611 | int status; |
601 | 612 | ||
602 | status = compact_zone_order(zone, order, gfp_mask, sync, | 613 | status = compact_zone_order(zone, order, gfp_mask, sync); |
603 | COMPACT_MODE_DIRECT_RECLAIM); | ||
604 | rc = max(status, rc); | 614 | rc = max(status, rc); |
605 | 615 | ||
606 | /* If a normal allocation would succeed, stop compacting */ | 616 | /* If a normal allocation would succeed, stop compacting */ |
@@ -631,7 +641,6 @@ static int compact_node(int nid) | |||
631 | .nr_freepages = 0, | 641 | .nr_freepages = 0, |
632 | .nr_migratepages = 0, | 642 | .nr_migratepages = 0, |
633 | .order = -1, | 643 | .order = -1, |
634 | .compact_mode = COMPACT_MODE_DIRECT_RECLAIM, | ||
635 | }; | 644 | }; |
636 | 645 | ||
637 | zone = &pgdat->node_zones[zoneid]; | 646 | zone = &pgdat->node_zones[zoneid]; |