diff options
author | Mel Gorman <mel@csn.ul.ie> | 2010-05-24 17:32:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-05-25 11:07:00 -0400 |
commit | 4f92e2586b43a2402e116055d4edda704f911b5b (patch) | |
tree | 6a765ebeba951c02a7878bcea52a4769ad2e45c2 | |
parent | 5e7719058079a1423ccce56148b0aaa56b2df821 (diff) |
mm: compaction: defer compaction using an exponential backoff when compaction fails
The fragmentation index may indicate that a failure is due to external
fragmentation but after a compaction run completes, it is still possible
for an allocation to fail. There are two obvious reasons as to why
o Page migration cannot move all pages so fragmentation remains
o A suitable page may exist but watermarks are not met
In the event of compaction followed by an allocation failure, this patch
defers further compaction in the zone (1 << compact_defer_shift) times.
If the next compaction attempt also fails, compact_defer_shift is
increased up to a maximum of 6. If compaction succeeds, the defer
counters are reset again.
The zone that is deferred is the first zone in the zonelist - i.e. the
preferred zone. To defer compaction in the other zones, the information
would need to be stored in the zonelist or implemented similar to the
zonelist_cache. This would impact the fast-paths and is not justified at
this time.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Rik van Riel <riel@redhat.com>
Cc: Minchan Kim <minchan.kim@gmail.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Christoph Lameter <cl@linux-foundation.org>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/compaction.h | 39 | ||||
-rw-r--r-- | include/linux/mmzone.h | 9 | ||||
-rw-r--r-- | mm/page_alloc.c | 5 |
3 files changed, 52 insertions, 1 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 3719325c6091..5ac51552d908 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
@@ -22,6 +22,36 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | |||
22 | extern int fragmentation_index(struct zone *zone, unsigned int order); | 22 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
23 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | 23 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, |
24 | int order, gfp_t gfp_mask, nodemask_t *mask); | 24 | int order, gfp_t gfp_mask, nodemask_t *mask); |
25 | |||
26 | /* Do not skip compaction more than 64 times */ | ||
27 | #define COMPACT_MAX_DEFER_SHIFT 6 | ||
28 | |||
29 | /* | ||
30 | * Compaction is deferred when compaction fails to result in a page | ||
31 | * allocation success. 1 << compact_defer_limit compactions are skipped up | ||
32 | * to a limit of 1 << COMPACT_MAX_DEFER_SHIFT | ||
33 | */ | ||
34 | static inline void defer_compaction(struct zone *zone) | ||
35 | { | ||
36 | zone->compact_considered = 0; | ||
37 | zone->compact_defer_shift++; | ||
38 | |||
39 | if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT) | ||
40 | zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT; | ||
41 | } | ||
42 | |||
43 | /* Returns true if compaction should be skipped this time */ | ||
44 | static inline bool compaction_deferred(struct zone *zone) | ||
45 | { | ||
46 | unsigned long defer_limit = 1UL << zone->compact_defer_shift; | ||
47 | |||
48 | /* Avoid possible overflow */ | ||
49 | if (++zone->compact_considered > defer_limit) | ||
50 | zone->compact_considered = defer_limit; | ||
51 | |||
52 | return zone->compact_considered < (1UL << zone->compact_defer_shift); | ||
53 | } | ||
54 | |||
25 | #else | 55 | #else |
26 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | 56 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, |
27 | int order, gfp_t gfp_mask, nodemask_t *nodemask) | 57 | int order, gfp_t gfp_mask, nodemask_t *nodemask) |
@@ -29,6 +59,15 @@ static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | |||
29 | return COMPACT_CONTINUE; | 59 | return COMPACT_CONTINUE; |
30 | } | 60 | } |
31 | 61 | ||
62 | static inline void defer_compaction(struct zone *zone) | ||
63 | { | ||
64 | } | ||
65 | |||
66 | static inline bool compaction_deferred(struct zone *zone) | ||
67 | { | ||
68 | return 1; | ||
69 | } | ||
70 | |||
32 | #endif /* CONFIG_COMPACTION */ | 71 | #endif /* CONFIG_COMPACTION */ |
33 | 72 | ||
34 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) | 73 | #if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA) |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index cf9e458e96b0..fd55f725a09e 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -321,6 +321,15 @@ struct zone { | |||
321 | unsigned long *pageblock_flags; | 321 | unsigned long *pageblock_flags; |
322 | #endif /* CONFIG_SPARSEMEM */ | 322 | #endif /* CONFIG_SPARSEMEM */ |
323 | 323 | ||
324 | #ifdef CONFIG_COMPACTION | ||
325 | /* | ||
326 | * On compaction failure, 1<<compact_defer_shift compactions | ||
327 | * are skipped before trying again. The number attempted since | ||
328 | * last failure is tracked with compact_considered. | ||
329 | */ | ||
330 | unsigned int compact_considered; | ||
331 | unsigned int compact_defer_shift; | ||
332 | #endif | ||
324 | 333 | ||
325 | ZONE_PADDING(_pad1_) | 334 | ZONE_PADDING(_pad1_) |
326 | 335 | ||
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index cd88a860f088..95ad42de5a87 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1769,7 +1769,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
1769 | { | 1769 | { |
1770 | struct page *page; | 1770 | struct page *page; |
1771 | 1771 | ||
1772 | if (!order) | 1772 | if (!order || compaction_deferred(preferred_zone)) |
1773 | return NULL; | 1773 | return NULL; |
1774 | 1774 | ||
1775 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, | 1775 | *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, |
@@ -1785,6 +1785,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
1785 | alloc_flags, preferred_zone, | 1785 | alloc_flags, preferred_zone, |
1786 | migratetype); | 1786 | migratetype); |
1787 | if (page) { | 1787 | if (page) { |
1788 | preferred_zone->compact_considered = 0; | ||
1789 | preferred_zone->compact_defer_shift = 0; | ||
1788 | count_vm_event(COMPACTSUCCESS); | 1790 | count_vm_event(COMPACTSUCCESS); |
1789 | return page; | 1791 | return page; |
1790 | } | 1792 | } |
@@ -1795,6 +1797,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
1795 | * but not enough to satisfy watermarks. | 1797 | * but not enough to satisfy watermarks. |
1796 | */ | 1798 | */ |
1797 | count_vm_event(COMPACTFAIL); | 1799 | count_vm_event(COMPACTFAIL); |
1800 | defer_compaction(preferred_zone); | ||
1798 | 1801 | ||
1799 | cond_resched(); | 1802 | cond_resched(); |
1800 | } | 1803 | } |