aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2011-01-13 18:45:57 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:34 -0500
commit77f1fe6b08b13a87391549c8a820ddc817b6f50e (patch)
tree720865bd0994da3787b6f37d33b2ee4c26a2de6c /mm/page_alloc.c
parent3e7d344970673c5334cf7b5bb27c8c0942b06126 (diff)
mm: migration: allow migration to operate asynchronously and avoid synchronous compaction in the faster path
Migration synchronously waits for writeback if the initial passes fails. Callers of memory compaction do not necessarily want this behaviour if the caller is latency sensitive or expects that synchronous migration is not going to have a significantly better success rate. This patch adds a sync parameter to migrate_pages() allowing the caller to indicate if wait_on_page_writeback() is allowed within migration or not. For reclaim/compaction, try_to_compact_pages() is first called asynchronously, direct reclaim runs and then try_to_compact_pages() is called synchronously as there is a greater expectation that it'll succeed. [akpm@linux-foundation.org: build/merge fix] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c21
1 files changed, 15 insertions, 6 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 03a66a31bfcd..0fd486467b4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1812,7 +1812,8 @@ static struct page *
1812__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1812__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1813 struct zonelist *zonelist, enum zone_type high_zoneidx, 1813 struct zonelist *zonelist, enum zone_type high_zoneidx,
1814 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1814 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1815 int migratetype, unsigned long *did_some_progress) 1815 int migratetype, unsigned long *did_some_progress,
1816 bool sync_migration)
1816{ 1817{
1817 struct page *page; 1818 struct page *page;
1818 struct task_struct *tsk = current; 1819 struct task_struct *tsk = current;
@@ -1822,7 +1823,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1822 1823
1823 tsk->flags |= PF_MEMALLOC; 1824 tsk->flags |= PF_MEMALLOC;
1824 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask, 1825 *did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
1825 nodemask); 1826 nodemask, sync_migration);
1826 tsk->flags &= ~PF_MEMALLOC; 1827 tsk->flags &= ~PF_MEMALLOC;
1827 if (*did_some_progress != COMPACT_SKIPPED) { 1828 if (*did_some_progress != COMPACT_SKIPPED) {
1828 1829
@@ -1859,7 +1860,8 @@ static inline struct page *
1859__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, 1860__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
1860 struct zonelist *zonelist, enum zone_type high_zoneidx, 1861 struct zonelist *zonelist, enum zone_type high_zoneidx,
1861 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, 1862 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
1862 int migratetype, unsigned long *did_some_progress) 1863 int migratetype, unsigned long *did_some_progress,
1864 bool sync_migration)
1863{ 1865{
1864 return NULL; 1866 return NULL;
1865} 1867}
@@ -2001,6 +2003,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2001 unsigned long pages_reclaimed = 0; 2003 unsigned long pages_reclaimed = 0;
2002 unsigned long did_some_progress; 2004 unsigned long did_some_progress;
2003 struct task_struct *p = current; 2005 struct task_struct *p = current;
2006 bool sync_migration = false;
2004 2007
2005 /* 2008 /*
2006 * In the slowpath, we sanity check order to avoid ever trying to 2009 * In the slowpath, we sanity check order to avoid ever trying to
@@ -2063,14 +2066,19 @@ rebalance:
2063 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL)) 2066 if (test_thread_flag(TIF_MEMDIE) && !(gfp_mask & __GFP_NOFAIL))
2064 goto nopage; 2067 goto nopage;
2065 2068
2066 /* Try direct compaction */ 2069 /*
2070 * Try direct compaction. The first pass is asynchronous. Subsequent
2071 * attempts after direct reclaim are synchronous
2072 */
2067 page = __alloc_pages_direct_compact(gfp_mask, order, 2073 page = __alloc_pages_direct_compact(gfp_mask, order,
2068 zonelist, high_zoneidx, 2074 zonelist, high_zoneidx,
2069 nodemask, 2075 nodemask,
2070 alloc_flags, preferred_zone, 2076 alloc_flags, preferred_zone,
2071 migratetype, &did_some_progress); 2077 migratetype, &did_some_progress,
2078 sync_migration);
2072 if (page) 2079 if (page)
2073 goto got_pg; 2080 goto got_pg;
2081 sync_migration = true;
2074 2082
2075 /* Try direct reclaim and then allocating */ 2083 /* Try direct reclaim and then allocating */
2076 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2084 page = __alloc_pages_direct_reclaim(gfp_mask, order,
@@ -2134,7 +2142,8 @@ rebalance:
2134 zonelist, high_zoneidx, 2142 zonelist, high_zoneidx,
2135 nodemask, 2143 nodemask,
2136 alloc_flags, preferred_zone, 2144 alloc_flags, preferred_zone,
2137 migratetype, &did_some_progress); 2145 migratetype, &did_some_progress,
2146 sync_migration);
2138 if (page) 2147 if (page)
2139 goto got_pg; 2148 goto got_pg;
2140 } 2149 }