diff options
author | Rik van Riel <riel@redhat.com> | 2012-03-21 19:33:52 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 20:54:56 -0400 |
commit | aff622495c9a0b56148192e53bdec539f5e147f2 (patch) | |
tree | 78f6400d8b6bec3279483006a0e9543e47aa833e /mm | |
parent | 7be62de99adcab4449d416977b4274985c5fe023 (diff) |
vmscan: only defer compaction for failed order and higher
Currently a failed order-9 (transparent hugepage) compaction can lead to
memory compaction being temporarily disabled for a memory zone. Even if
we only need compaction for an order 2 allocation, eg. for jumbo frames
networking.
The fix is relatively straightforward: keep track of the highest order at
which compaction is succeeding, and only defer compaction for orders at
which compaction is failing.
Signed-off-by: Rik van Riel <riel@redhat.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Acked-by: Mel Gorman <mel@csn.ul.ie>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Hillf Danton <dhillf@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/compaction.c | 12 | ||||
-rw-r--r-- | mm/page_alloc.c | 6 | ||||
-rw-r--r-- | mm/vmscan.c | 2 |
3 files changed, 16 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 36f0f61f4a24..c4b344a95032 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -695,9 +695,19 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) | |||
695 | INIT_LIST_HEAD(&cc->freepages); | 695 | INIT_LIST_HEAD(&cc->freepages); |
696 | INIT_LIST_HEAD(&cc->migratepages); | 696 | INIT_LIST_HEAD(&cc->migratepages); |
697 | 697 | ||
698 | if (cc->order < 0 || !compaction_deferred(zone)) | 698 | if (cc->order < 0 || !compaction_deferred(zone, cc->order)) |
699 | compact_zone(zone, cc); | 699 | compact_zone(zone, cc); |
700 | 700 | ||
701 | if (cc->order > 0) { | ||
702 | int ok = zone_watermark_ok(zone, cc->order, | ||
703 | low_wmark_pages(zone), 0, 0); | ||
704 | if (ok && cc->order > zone->compact_order_failed) | ||
705 | zone->compact_order_failed = cc->order + 1; | ||
706 | /* Currently async compaction is never deferred. */ | ||
707 | else if (!ok && cc->sync) | ||
708 | defer_compaction(zone, cc->order); | ||
709 | } | ||
710 | |||
701 | VM_BUG_ON(!list_empty(&cc->freepages)); | 711 | VM_BUG_ON(!list_empty(&cc->freepages)); |
702 | VM_BUG_ON(!list_empty(&cc->migratepages)); | 712 | VM_BUG_ON(!list_empty(&cc->migratepages)); |
703 | } | 713 | } |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a13ded1938f0..572b93ea475c 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1990,7 +1990,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
1990 | if (!order) | 1990 | if (!order) |
1991 | return NULL; | 1991 | return NULL; |
1992 | 1992 | ||
1993 | if (compaction_deferred(preferred_zone)) { | 1993 | if (compaction_deferred(preferred_zone, order)) { |
1994 | *deferred_compaction = true; | 1994 | *deferred_compaction = true; |
1995 | return NULL; | 1995 | return NULL; |
1996 | } | 1996 | } |
@@ -2012,6 +2012,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2012 | if (page) { | 2012 | if (page) { |
2013 | preferred_zone->compact_considered = 0; | 2013 | preferred_zone->compact_considered = 0; |
2014 | preferred_zone->compact_defer_shift = 0; | 2014 | preferred_zone->compact_defer_shift = 0; |
2015 | if (order >= preferred_zone->compact_order_failed) | ||
2016 | preferred_zone->compact_order_failed = order + 1; | ||
2015 | count_vm_event(COMPACTSUCCESS); | 2017 | count_vm_event(COMPACTSUCCESS); |
2016 | return page; | 2018 | return page; |
2017 | } | 2019 | } |
@@ -2028,7 +2030,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2028 | * defer if the failure was a sync compaction failure. | 2030 | * defer if the failure was a sync compaction failure. |
2029 | */ | 2031 | */ |
2030 | if (sync_migration) | 2032 | if (sync_migration) |
2031 | defer_compaction(preferred_zone); | 2033 | defer_compaction(preferred_zone, order); |
2032 | 2034 | ||
2033 | cond_resched(); | 2035 | cond_resched(); |
2034 | } | 2036 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index b2b4c4a0ada2..87e4d6a6dc11 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -2198,7 +2198,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc) | |||
2198 | * If compaction is deferred, reclaim up to a point where | 2198 | * If compaction is deferred, reclaim up to a point where |
2199 | * compaction will have a chance of success when re-enabled | 2199 | * compaction will have a chance of success when re-enabled |
2200 | */ | 2200 | */ |
2201 | if (compaction_deferred(zone)) | 2201 | if (compaction_deferred(zone, sc->order)) |
2202 | return watermark_ok; | 2202 | return watermark_ok; |
2203 | 2203 | ||
2204 | /* If compaction is not ready to start, keep reclaiming */ | 2204 | /* If compaction is not ready to start, keep reclaiming */ |