aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorYing Han <yinghan@google.com>2011-05-24 20:12:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-05-25 11:39:25 -0400
commita09ed5e00084448453c8bada4dcd31e5fbfc2f21 (patch)
tree493f5f2a93efb080cdcc28e793cbcfc7999e66eb /mm
parent7b1de5868b124d8f399d8791ed30a9b679d64d4d (diff)
vmscan: change shrink_slab() interfaces by passing shrink_control
Consolidate the existing parameters to shrink_slab() into a new shrink_control struct. This is needed later to pass the same struct to shrinkers. Signed-off-by: Ying Han <yinghan@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Minchan Kim <minchan.kim@gmail.com> Acked-by: Pavel Emelyanov <xemul@openvz.org> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Mel Gorman <mel@csn.ul.ie> Acked-by: Rik van Riel <riel@redhat.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Hugh Dickins <hughd@google.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memory-failure.c7
-rw-r--r--mm/vmscan.c46
2 files changed, 39 insertions, 14 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 369b80e81416..341341b2b47b 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -239,7 +239,12 @@ void shake_page(struct page *p, int access)
239 if (access) { 239 if (access) {
240 int nr; 240 int nr;
241 do { 241 do {
242 nr = shrink_slab(1000, GFP_KERNEL, 1000); 242 struct shrink_control shrink = {
243 .gfp_mask = GFP_KERNEL,
244 .nr_scanned = 1000,
245 };
246
247 nr = shrink_slab(&shrink, 1000);
243 if (page_count(p) == 1) 248 if (page_count(p) == 1)
244 break; 249 break;
245 } while (nr > 10); 250 } while (nr > 10);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 890f54184d9a..e4e245ed1a5b 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -222,11 +222,13 @@ EXPORT_SYMBOL(unregister_shrinker);
222 * 222 *
223 * Returns the number of slab objects which we shrunk. 223 * Returns the number of slab objects which we shrunk.
224 */ 224 */
225unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 225unsigned long shrink_slab(struct shrink_control *shrink,
226 unsigned long lru_pages) 226 unsigned long lru_pages)
227{ 227{
228 struct shrinker *shrinker; 228 struct shrinker *shrinker;
229 unsigned long ret = 0; 229 unsigned long ret = 0;
230 unsigned long scanned = shrink->nr_scanned;
231 gfp_t gfp_mask = shrink->gfp_mask;
230 232
231 if (scanned == 0) 233 if (scanned == 0)
232 scanned = SWAP_CLUSTER_MAX; 234 scanned = SWAP_CLUSTER_MAX;
@@ -2035,7 +2037,8 @@ static bool all_unreclaimable(struct zonelist *zonelist,
2035 * else, the number of pages reclaimed 2037 * else, the number of pages reclaimed
2036 */ 2038 */
2037static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2039static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2038 struct scan_control *sc) 2040 struct scan_control *sc,
2041 struct shrink_control *shrink)
2039{ 2042{
2040 int priority; 2043 int priority;
2041 unsigned long total_scanned = 0; 2044 unsigned long total_scanned = 0;
@@ -2069,7 +2072,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2069 lru_pages += zone_reclaimable_pages(zone); 2072 lru_pages += zone_reclaimable_pages(zone);
2070 } 2073 }
2071 2074
2072 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 2075 shrink->nr_scanned = sc->nr_scanned;
2076 shrink_slab(shrink, lru_pages);
2073 if (reclaim_state) { 2077 if (reclaim_state) {
2074 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2078 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2075 reclaim_state->reclaimed_slab = 0; 2079 reclaim_state->reclaimed_slab = 0;
@@ -2141,12 +2145,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2141 .mem_cgroup = NULL, 2145 .mem_cgroup = NULL,
2142 .nodemask = nodemask, 2146 .nodemask = nodemask,
2143 }; 2147 };
2148 struct shrink_control shrink = {
2149 .gfp_mask = sc.gfp_mask,
2150 };
2144 2151
2145 trace_mm_vmscan_direct_reclaim_begin(order, 2152 trace_mm_vmscan_direct_reclaim_begin(order,
2146 sc.may_writepage, 2153 sc.may_writepage,
2147 gfp_mask); 2154 gfp_mask);
2148 2155
2149 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2156 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2150 2157
2151 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2158 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2152 2159
@@ -2206,17 +2213,20 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2206 .order = 0, 2213 .order = 0,
2207 .mem_cgroup = mem_cont, 2214 .mem_cgroup = mem_cont,
2208 .nodemask = NULL, /* we don't care the placement */ 2215 .nodemask = NULL, /* we don't care the placement */
2216 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2217 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2218 };
2219 struct shrink_control shrink = {
2220 .gfp_mask = sc.gfp_mask,
2209 }; 2221 };
2210 2222
2211 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2212 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2213 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 2223 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
2214 2224
2215 trace_mm_vmscan_memcg_reclaim_begin(0, 2225 trace_mm_vmscan_memcg_reclaim_begin(0,
2216 sc.may_writepage, 2226 sc.may_writepage,
2217 sc.gfp_mask); 2227 sc.gfp_mask);
2218 2228
2219 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2229 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2220 2230
2221 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2231 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2222 2232
@@ -2344,6 +2354,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2344 .order = order, 2354 .order = order,
2345 .mem_cgroup = NULL, 2355 .mem_cgroup = NULL,
2346 }; 2356 };
2357 struct shrink_control shrink = {
2358 .gfp_mask = sc.gfp_mask,
2359 };
2347loop_again: 2360loop_again:
2348 total_scanned = 0; 2361 total_scanned = 0;
2349 sc.nr_reclaimed = 0; 2362 sc.nr_reclaimed = 0;
@@ -2443,8 +2456,8 @@ loop_again:
2443 end_zone, 0)) 2456 end_zone, 0))
2444 shrink_zone(priority, zone, &sc); 2457 shrink_zone(priority, zone, &sc);
2445 reclaim_state->reclaimed_slab = 0; 2458 reclaim_state->reclaimed_slab = 0;
2446 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2459 shrink.nr_scanned = sc.nr_scanned;
2447 lru_pages); 2460 nr_slab = shrink_slab(&shrink, lru_pages);
2448 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2461 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2449 total_scanned += sc.nr_scanned; 2462 total_scanned += sc.nr_scanned;
2450 2463
@@ -2796,7 +2809,10 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2796 .swappiness = vm_swappiness, 2809 .swappiness = vm_swappiness,
2797 .order = 0, 2810 .order = 0,
2798 }; 2811 };
2799 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2812 struct shrink_control shrink = {
2813 .gfp_mask = sc.gfp_mask,
2814 };
2815 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2800 struct task_struct *p = current; 2816 struct task_struct *p = current;
2801 unsigned long nr_reclaimed; 2817 unsigned long nr_reclaimed;
2802 2818
@@ -2805,7 +2821,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2805 reclaim_state.reclaimed_slab = 0; 2821 reclaim_state.reclaimed_slab = 0;
2806 p->reclaim_state = &reclaim_state; 2822 p->reclaim_state = &reclaim_state;
2807 2823
2808 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2824 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2809 2825
2810 p->reclaim_state = NULL; 2826 p->reclaim_state = NULL;
2811 lockdep_clear_current_reclaim_state(); 2827 lockdep_clear_current_reclaim_state();
@@ -2980,6 +2996,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2980 .swappiness = vm_swappiness, 2996 .swappiness = vm_swappiness,
2981 .order = order, 2997 .order = order,
2982 }; 2998 };
2999 struct shrink_control shrink = {
3000 .gfp_mask = sc.gfp_mask,
3001 };
2983 unsigned long nr_slab_pages0, nr_slab_pages1; 3002 unsigned long nr_slab_pages0, nr_slab_pages1;
2984 3003
2985 cond_resched(); 3004 cond_resched();
@@ -3006,6 +3025,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3006 } 3025 }
3007 3026
3008 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3027 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
3028 shrink.nr_scanned = sc.nr_scanned;
3009 if (nr_slab_pages0 > zone->min_slab_pages) { 3029 if (nr_slab_pages0 > zone->min_slab_pages) {
3010 /* 3030 /*
3011 * shrink_slab() does not currently allow us to determine how 3031 * shrink_slab() does not currently allow us to determine how
@@ -3021,7 +3041,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3021 unsigned long lru_pages = zone_reclaimable_pages(zone); 3041 unsigned long lru_pages = zone_reclaimable_pages(zone);
3022 3042
3023 /* No reclaimable slab or very low memory pressure */ 3043 /* No reclaimable slab or very low memory pressure */
3024 if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages)) 3044 if (!shrink_slab(&shrink, lru_pages))
3025 break; 3045 break;
3026 3046
3027 /* Freed enough memory */ 3047 /* Freed enough memory */