aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMarek Szyprowski <m.szyprowski@samsung.com>2012-01-25 06:09:52 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:35 -0400
commitbba9071087108d3de70bea274e35064cc480487b (patch)
treeb11ff34b84a8515063104c461e3e81f69ea83f01 /mm
parentcfd3da1e49bb95c355c01c0f502d657deb3d34a4 (diff)
mm: extract reclaim code from __alloc_pages_direct_reclaim()
This patch extracts common reclaim code from __alloc_pages_direct_reclaim() function to separate function: __perform_reclaim() which can be later used by alloc_contig_range(). Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c30
1 files changed, 21 insertions, 9 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 8be37bcda0b2..4615531dcf66 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2130,16 +2130,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2130} 2130}
2131#endif /* CONFIG_COMPACTION */ 2131#endif /* CONFIG_COMPACTION */
2132 2132
2133/* The really slow allocator path where we enter direct reclaim */ 2133/* Perform direct synchronous page reclaim */
2134static inline struct page * 2134static int
2135__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2135__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2136 struct zonelist *zonelist, enum zone_type high_zoneidx, 2136 nodemask_t *nodemask)
2137 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2138 int migratetype, unsigned long *did_some_progress)
2139{ 2137{
2140 struct page *page = NULL;
2141 struct reclaim_state reclaim_state; 2138 struct reclaim_state reclaim_state;
2142 bool drained = false; 2139 int progress;
2143 2140
2144 cond_resched(); 2141 cond_resched();
2145 2142
@@ -2150,7 +2147,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2150 reclaim_state.reclaimed_slab = 0; 2147 reclaim_state.reclaimed_slab = 0;
2151 current->reclaim_state = &reclaim_state; 2148 current->reclaim_state = &reclaim_state;
2152 2149
2153 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 2150 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2154 2151
2155 current->reclaim_state = NULL; 2152 current->reclaim_state = NULL;
2156 lockdep_clear_current_reclaim_state(); 2153 lockdep_clear_current_reclaim_state();
@@ -2158,6 +2155,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2158 2155
2159 cond_resched(); 2156 cond_resched();
2160 2157
2158 return progress;
2159}
2160
2161/* The really slow allocator path where we enter direct reclaim */
2162static inline struct page *
2163__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2164 struct zonelist *zonelist, enum zone_type high_zoneidx,
2165 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2166 int migratetype, unsigned long *did_some_progress)
2167{
2168 struct page *page = NULL;
2169 bool drained = false;
2170
2171 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2172 nodemask);
2161 if (unlikely(!(*did_some_progress))) 2173 if (unlikely(!(*did_some_progress)))
2162 return NULL; 2174 return NULL;
2163 2175