aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c80
1 files changed, 56 insertions, 24 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c9177202c8ce..7e0116150dc7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -202,6 +202,14 @@ void unregister_shrinker(struct shrinker *shrinker)
202} 202}
203EXPORT_SYMBOL(unregister_shrinker); 203EXPORT_SYMBOL(unregister_shrinker);
204 204
205static inline int do_shrinker_shrink(struct shrinker *shrinker,
206 struct shrink_control *sc,
207 unsigned long nr_to_scan)
208{
209 sc->nr_to_scan = nr_to_scan;
210 return (*shrinker->shrink)(shrinker, sc);
211}
212
205#define SHRINK_BATCH 128 213#define SHRINK_BATCH 128
206/* 214/*
207 * Call the shrink functions to age shrinkable caches 215 * Call the shrink functions to age shrinkable caches
@@ -222,25 +230,29 @@ EXPORT_SYMBOL(unregister_shrinker);
222 * 230 *
223 * Returns the number of slab objects which we shrunk. 231 * Returns the number of slab objects which we shrunk.
224 */ 232 */
225unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask, 233unsigned long shrink_slab(struct shrink_control *shrink,
226 unsigned long lru_pages) 234 unsigned long nr_pages_scanned,
235 unsigned long lru_pages)
227{ 236{
228 struct shrinker *shrinker; 237 struct shrinker *shrinker;
229 unsigned long ret = 0; 238 unsigned long ret = 0;
230 239
231 if (scanned == 0) 240 if (nr_pages_scanned == 0)
232 scanned = SWAP_CLUSTER_MAX; 241 nr_pages_scanned = SWAP_CLUSTER_MAX;
233 242
234 if (!down_read_trylock(&shrinker_rwsem)) 243 if (!down_read_trylock(&shrinker_rwsem)) {
235 return 1; /* Assume we'll be able to shrink next time */ 244 /* Assume we'll be able to shrink next time */
245 ret = 1;
246 goto out;
247 }
236 248
237 list_for_each_entry(shrinker, &shrinker_list, list) { 249 list_for_each_entry(shrinker, &shrinker_list, list) {
238 unsigned long long delta; 250 unsigned long long delta;
239 unsigned long total_scan; 251 unsigned long total_scan;
240 unsigned long max_pass; 252 unsigned long max_pass;
241 253
242 max_pass = (*shrinker->shrink)(shrinker, 0, gfp_mask); 254 max_pass = do_shrinker_shrink(shrinker, shrink, 0);
243 delta = (4 * scanned) / shrinker->seeks; 255 delta = (4 * nr_pages_scanned) / shrinker->seeks;
244 delta *= max_pass; 256 delta *= max_pass;
245 do_div(delta, lru_pages + 1); 257 do_div(delta, lru_pages + 1);
246 shrinker->nr += delta; 258 shrinker->nr += delta;
@@ -267,9 +279,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
267 int shrink_ret; 279 int shrink_ret;
268 int nr_before; 280 int nr_before;
269 281
270 nr_before = (*shrinker->shrink)(shrinker, 0, gfp_mask); 282 nr_before = do_shrinker_shrink(shrinker, shrink, 0);
271 shrink_ret = (*shrinker->shrink)(shrinker, this_scan, 283 shrink_ret = do_shrinker_shrink(shrinker, shrink,
272 gfp_mask); 284 this_scan);
273 if (shrink_ret == -1) 285 if (shrink_ret == -1)
274 break; 286 break;
275 if (shrink_ret < nr_before) 287 if (shrink_ret < nr_before)
@@ -283,6 +295,8 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
283 shrinker->nr += total_scan; 295 shrinker->nr += total_scan;
284 } 296 }
285 up_read(&shrinker_rwsem); 297 up_read(&shrinker_rwsem);
298out:
299 cond_resched();
286 return ret; 300 return ret;
287} 301}
288 302
@@ -1202,13 +1216,16 @@ int isolate_lru_page(struct page *page)
1202{ 1216{
1203 int ret = -EBUSY; 1217 int ret = -EBUSY;
1204 1218
1219 VM_BUG_ON(!page_count(page));
1220
1205 if (PageLRU(page)) { 1221 if (PageLRU(page)) {
1206 struct zone *zone = page_zone(page); 1222 struct zone *zone = page_zone(page);
1207 1223
1208 spin_lock_irq(&zone->lru_lock); 1224 spin_lock_irq(&zone->lru_lock);
1209 if (PageLRU(page) && get_page_unless_zero(page)) { 1225 if (PageLRU(page)) {
1210 int lru = page_lru(page); 1226 int lru = page_lru(page);
1211 ret = 0; 1227 ret = 0;
1228 get_page(page);
1212 ClearPageLRU(page); 1229 ClearPageLRU(page);
1213 1230
1214 del_page_from_lru_list(zone, page, lru); 1231 del_page_from_lru_list(zone, page, lru);
@@ -2027,7 +2044,8 @@ static bool all_unreclaimable(struct zonelist *zonelist,
2027 * else, the number of pages reclaimed 2044 * else, the number of pages reclaimed
2028 */ 2045 */
2029static unsigned long do_try_to_free_pages(struct zonelist *zonelist, 2046static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2030 struct scan_control *sc) 2047 struct scan_control *sc,
2048 struct shrink_control *shrink)
2031{ 2049{
2032 int priority; 2050 int priority;
2033 unsigned long total_scanned = 0; 2051 unsigned long total_scanned = 0;
@@ -2061,7 +2079,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2061 lru_pages += zone_reclaimable_pages(zone); 2079 lru_pages += zone_reclaimable_pages(zone);
2062 } 2080 }
2063 2081
2064 shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); 2082 shrink_slab(shrink, sc->nr_scanned, lru_pages);
2065 if (reclaim_state) { 2083 if (reclaim_state) {
2066 sc->nr_reclaimed += reclaim_state->reclaimed_slab; 2084 sc->nr_reclaimed += reclaim_state->reclaimed_slab;
2067 reclaim_state->reclaimed_slab = 0; 2085 reclaim_state->reclaimed_slab = 0;
@@ -2133,12 +2151,15 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2133 .mem_cgroup = NULL, 2151 .mem_cgroup = NULL,
2134 .nodemask = nodemask, 2152 .nodemask = nodemask,
2135 }; 2153 };
2154 struct shrink_control shrink = {
2155 .gfp_mask = sc.gfp_mask,
2156 };
2136 2157
2137 trace_mm_vmscan_direct_reclaim_begin(order, 2158 trace_mm_vmscan_direct_reclaim_begin(order,
2138 sc.may_writepage, 2159 sc.may_writepage,
2139 gfp_mask); 2160 gfp_mask);
2140 2161
2141 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2162 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2142 2163
2143 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed); 2164 trace_mm_vmscan_direct_reclaim_end(nr_reclaimed);
2144 2165
@@ -2198,17 +2219,20 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2198 .order = 0, 2219 .order = 0,
2199 .mem_cgroup = mem_cont, 2220 .mem_cgroup = mem_cont,
2200 .nodemask = NULL, /* we don't care the placement */ 2221 .nodemask = NULL, /* we don't care the placement */
2222 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2223 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK),
2224 };
2225 struct shrink_control shrink = {
2226 .gfp_mask = sc.gfp_mask,
2201 }; 2227 };
2202 2228
2203 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2204 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2205 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 2229 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
2206 2230
2207 trace_mm_vmscan_memcg_reclaim_begin(0, 2231 trace_mm_vmscan_memcg_reclaim_begin(0,
2208 sc.may_writepage, 2232 sc.may_writepage,
2209 sc.gfp_mask); 2233 sc.gfp_mask);
2210 2234
2211 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2235 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2212 2236
2213 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed); 2237 trace_mm_vmscan_memcg_reclaim_end(nr_reclaimed);
2214 2238
@@ -2287,7 +2311,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining,
2287 * must be balanced 2311 * must be balanced
2288 */ 2312 */
2289 if (order) 2313 if (order)
2290 return pgdat_balanced(pgdat, balanced, classzone_idx); 2314 return !pgdat_balanced(pgdat, balanced, classzone_idx);
2291 else 2315 else
2292 return !all_zones_ok; 2316 return !all_zones_ok;
2293} 2317}
@@ -2336,6 +2360,9 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2336 .order = order, 2360 .order = order,
2337 .mem_cgroup = NULL, 2361 .mem_cgroup = NULL,
2338 }; 2362 };
2363 struct shrink_control shrink = {
2364 .gfp_mask = sc.gfp_mask,
2365 };
2339loop_again: 2366loop_again:
2340 total_scanned = 0; 2367 total_scanned = 0;
2341 sc.nr_reclaimed = 0; 2368 sc.nr_reclaimed = 0;
@@ -2435,8 +2462,7 @@ loop_again:
2435 end_zone, 0)) 2462 end_zone, 0))
2436 shrink_zone(priority, zone, &sc); 2463 shrink_zone(priority, zone, &sc);
2437 reclaim_state->reclaimed_slab = 0; 2464 reclaim_state->reclaimed_slab = 0;
2438 nr_slab = shrink_slab(sc.nr_scanned, GFP_KERNEL, 2465 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
2439 lru_pages);
2440 sc.nr_reclaimed += reclaim_state->reclaimed_slab; 2466 sc.nr_reclaimed += reclaim_state->reclaimed_slab;
2441 total_scanned += sc.nr_scanned; 2467 total_scanned += sc.nr_scanned;
2442 2468
@@ -2788,7 +2814,10 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2788 .swappiness = vm_swappiness, 2814 .swappiness = vm_swappiness,
2789 .order = 0, 2815 .order = 0,
2790 }; 2816 };
2791 struct zonelist * zonelist = node_zonelist(numa_node_id(), sc.gfp_mask); 2817 struct shrink_control shrink = {
2818 .gfp_mask = sc.gfp_mask,
2819 };
2820 struct zonelist *zonelist = node_zonelist(numa_node_id(), sc.gfp_mask);
2792 struct task_struct *p = current; 2821 struct task_struct *p = current;
2793 unsigned long nr_reclaimed; 2822 unsigned long nr_reclaimed;
2794 2823
@@ -2797,7 +2826,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2797 reclaim_state.reclaimed_slab = 0; 2826 reclaim_state.reclaimed_slab = 0;
2798 p->reclaim_state = &reclaim_state; 2827 p->reclaim_state = &reclaim_state;
2799 2828
2800 nr_reclaimed = do_try_to_free_pages(zonelist, &sc); 2829 nr_reclaimed = do_try_to_free_pages(zonelist, &sc, &shrink);
2801 2830
2802 p->reclaim_state = NULL; 2831 p->reclaim_state = NULL;
2803 lockdep_clear_current_reclaim_state(); 2832 lockdep_clear_current_reclaim_state();
@@ -2972,6 +3001,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2972 .swappiness = vm_swappiness, 3001 .swappiness = vm_swappiness,
2973 .order = order, 3002 .order = order,
2974 }; 3003 };
3004 struct shrink_control shrink = {
3005 .gfp_mask = sc.gfp_mask,
3006 };
2975 unsigned long nr_slab_pages0, nr_slab_pages1; 3007 unsigned long nr_slab_pages0, nr_slab_pages1;
2976 3008
2977 cond_resched(); 3009 cond_resched();
@@ -3013,7 +3045,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3013 unsigned long lru_pages = zone_reclaimable_pages(zone); 3045 unsigned long lru_pages = zone_reclaimable_pages(zone);
3014 3046
3015 /* No reclaimable slab or very low memory pressure */ 3047 /* No reclaimable slab or very low memory pressure */
3016 if (!shrink_slab(sc.nr_scanned, gfp_mask, lru_pages)) 3048 if (!shrink_slab(&shrink, sc.nr_scanned, lru_pages))
3017 break; 3049 break;
3018 3050
3019 /* Freed enough memory */ 3051 /* Freed enough memory */