aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/migrate.h
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2011-01-13 18:45:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:34 -0500
commit77f1fe6b08b13a87391549c8a820ddc817b6f50e (patch)
tree720865bd0994da3787b6f37d33b2ee4c26a2de6c /include/linux/migrate.h
parent3e7d344970673c5334cf7b5bb27c8c0942b06126 (diff)
mm: migration: allow migration to operate asynchronously and avoid synchronous compaction in the faster path
Migration synchronously waits for writeback if the initial passes fails. Callers of memory compaction do not necessarily want this behaviour if the caller is latency sensitive or expects that synchronous migration is not going to have a significantly better success rate. This patch adds a sync parameter to migrate_pages() allowing the caller to indicate if wait_on_page_writeback() is allowed within migration or not. For reclaim/compaction, try_to_compact_pages() is first called asynchronously, direct reclaim runs and then try_to_compact_pages() is called synchronously as there is a greater expectation that it'll succeed. [akpm@linux-foundation.org: build/merge fix] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/migrate.h')
-rw-r--r--include/linux/migrate.h12
1 files changed, 8 insertions, 4 deletions
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 085527fb8261..fa31902803fd 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -13,9 +13,11 @@ extern void putback_lru_pages(struct list_head *l);
13extern int migrate_page(struct address_space *, 13extern int migrate_page(struct address_space *,
14 struct page *, struct page *); 14 struct page *, struct page *);
15extern int migrate_pages(struct list_head *l, new_page_t x, 15extern int migrate_pages(struct list_head *l, new_page_t x,
16 unsigned long private, int offlining); 16 unsigned long private, int offlining,
17 bool sync);
17extern int migrate_huge_pages(struct list_head *l, new_page_t x, 18extern int migrate_huge_pages(struct list_head *l, new_page_t x,
18 unsigned long private, int offlining); 19 unsigned long private, int offlining,
20 bool sync);
19 21
20extern int fail_migrate_page(struct address_space *, 22extern int fail_migrate_page(struct address_space *,
21 struct page *, struct page *); 23 struct page *, struct page *);
@@ -33,9 +35,11 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
33 35
34static inline void putback_lru_pages(struct list_head *l) {} 36static inline void putback_lru_pages(struct list_head *l) {}
35static inline int migrate_pages(struct list_head *l, new_page_t x, 37static inline int migrate_pages(struct list_head *l, new_page_t x,
36 unsigned long private, int offlining) { return -ENOSYS; } 38 unsigned long private, int offlining,
39 bool sync) { return -ENOSYS; }
37static inline int migrate_huge_pages(struct list_head *l, new_page_t x, 40static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
38 unsigned long private, int offlining) { return -ENOSYS; } 41 unsigned long private, int offlining,
42 bool sync) { return -ENOSYS; }
39 43
40static inline int migrate_prep(void) { return -ENOSYS; } 44static inline int migrate_prep(void) { return -ENOSYS; }
41static inline int migrate_prep_local(void) { return -ENOSYS; } 45static inline int migrate_prep_local(void) { return -ENOSYS; }