diff options
author | Mel Gorman <mel@csn.ul.ie> | 2011-01-13 18:45:57 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-01-13 20:32:34 -0500 |
commit | 77f1fe6b08b13a87391549c8a820ddc817b6f50e (patch) | |
tree | 720865bd0994da3787b6f37d33b2ee4c26a2de6c /include/linux/compaction.h | |
parent | 3e7d344970673c5334cf7b5bb27c8c0942b06126 (diff) |
mm: migration: allow migration to operate asynchronously and avoid synchronous compaction in the faster path
Migration synchronously waits for writeback if the initial passes fails.
Callers of memory compaction do not necessarily want this behaviour if the
caller is latency sensitive or expects that synchronous migration is not
going to have a significantly better success rate.
This patch adds a sync parameter to migrate_pages() allowing the caller to
indicate if wait_on_page_writeback() is allowed within migration or not.
For reclaim/compaction, try_to_compact_pages() is first called
asynchronously, direct reclaim runs and then try_to_compact_pages() is
called synchronously as there is a greater expectation that it'll succeed.
[akpm@linux-foundation.org: build/merge fix]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/compaction.h')
-rw-r--r-- | include/linux/compaction.h | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/include/linux/compaction.h b/include/linux/compaction.h index 2592883d862d..72cba4034785 100644 --- a/include/linux/compaction.h +++ b/include/linux/compaction.h | |||
@@ -21,10 +21,11 @@ extern int sysctl_extfrag_handler(struct ctl_table *table, int write, | |||
21 | 21 | ||
22 | extern int fragmentation_index(struct zone *zone, unsigned int order); | 22 | extern int fragmentation_index(struct zone *zone, unsigned int order); |
23 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, | 23 | extern unsigned long try_to_compact_pages(struct zonelist *zonelist, |
24 | int order, gfp_t gfp_mask, nodemask_t *mask); | 24 | int order, gfp_t gfp_mask, nodemask_t *mask, |
25 | bool sync); | ||
25 | extern unsigned long compaction_suitable(struct zone *zone, int order); | 26 | extern unsigned long compaction_suitable(struct zone *zone, int order); |
26 | extern unsigned long compact_zone_order(struct zone *zone, int order, | 27 | extern unsigned long compact_zone_order(struct zone *zone, int order, |
27 | gfp_t gfp_mask); | 28 | gfp_t gfp_mask, bool sync); |
28 | 29 | ||
29 | /* Do not skip compaction more than 64 times */ | 30 | /* Do not skip compaction more than 64 times */ |
30 | #define COMPACT_MAX_DEFER_SHIFT 6 | 31 | #define COMPACT_MAX_DEFER_SHIFT 6 |
@@ -57,7 +58,8 @@ static inline bool compaction_deferred(struct zone *zone) | |||
57 | 58 | ||
58 | #else | 59 | #else |
59 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, | 60 | static inline unsigned long try_to_compact_pages(struct zonelist *zonelist, |
60 | int order, gfp_t gfp_mask, nodemask_t *nodemask) | 61 | int order, gfp_t gfp_mask, nodemask_t *nodemask, |
62 | bool sync) | ||
61 | { | 63 | { |
62 | return COMPACT_CONTINUE; | 64 | return COMPACT_CONTINUE; |
63 | } | 65 | } |
@@ -68,7 +70,7 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order) | |||
68 | } | 70 | } |
69 | 71 | ||
70 | static inline unsigned long compact_zone_order(struct zone *zone, int order, | 72 | static inline unsigned long compact_zone_order(struct zone *zone, int order, |
71 | gfp_t gfp_mask) | 73 | gfp_t gfp_mask, bool sync) |
72 | { | 74 | { |
73 | return 0; | 75 | return 0; |
74 | } | 76 | } |