diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 44df66a98f2a..a6c5d0b28321 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -88,9 +88,6 @@ struct scan_control { | |||
88 | /* Can pages be swapped as part of reclaim? */ | 88 | /* Can pages be swapped as part of reclaim? */ |
89 | unsigned int may_swap:1; | 89 | unsigned int may_swap:1; |
90 | 90 | ||
91 | /* e.g. boosted watermark reclaim leaves slabs alone */ | ||
92 | unsigned int may_shrinkslab:1; | ||
93 | |||
94 | /* | 91 | /* |
95 | * Cgroups are not reclaimed below their configured memory.low, | 92 | * Cgroups are not reclaimed below their configured memory.low, |
96 | * unless we threaten to OOM. If any cgroups are skipped due to | 93 | * unless we threaten to OOM. If any cgroups are skipped due to |
@@ -699,7 +696,14 @@ static unsigned long shrink_slab(gfp_t gfp_mask, int nid, | |||
699 | unsigned long ret, freed = 0; | 696 | unsigned long ret, freed = 0; |
700 | struct shrinker *shrinker; | 697 | struct shrinker *shrinker; |
701 | 698 | ||
702 | if (!mem_cgroup_is_root(memcg)) | 699 | /* |
700 | * The root memcg might be allocated even though memcg is disabled | ||
701 | * via "cgroup_disable=memory" boot parameter. This could make | ||
702 | * mem_cgroup_is_root() return false, then just run memcg slab | ||
703 | * shrink, but skip global shrink. This may result in premature | ||
704 | * oom. | ||
705 | */ | ||
706 | if (!mem_cgroup_disabled() && !mem_cgroup_is_root(memcg)) | ||
703 | return shrink_slab_memcg(gfp_mask, nid, memcg, priority); | 707 | return shrink_slab_memcg(gfp_mask, nid, memcg, priority); |
704 | 708 | ||
705 | if (!down_read_trylock(&shrinker_rwsem)) | 709 | if (!down_read_trylock(&shrinker_rwsem)) |
@@ -2707,10 +2711,8 @@ static bool shrink_node(pg_data_t *pgdat, struct scan_control *sc) | |||
2707 | shrink_node_memcg(pgdat, memcg, sc, &lru_pages); | 2711 | shrink_node_memcg(pgdat, memcg, sc, &lru_pages); |
2708 | node_lru_pages += lru_pages; | 2712 | node_lru_pages += lru_pages; |
2709 | 2713 | ||
2710 | if (sc->may_shrinkslab) { | 2714 | shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, |
2711 | shrink_slab(sc->gfp_mask, pgdat->node_id, | 2715 | sc->priority); |
2712 | memcg, sc->priority); | ||
2713 | } | ||
2714 | 2716 | ||
2715 | /* Record the group's reclaim efficiency */ | 2717 | /* Record the group's reclaim efficiency */ |
2716 | vmpressure(sc->gfp_mask, memcg, false, | 2718 | vmpressure(sc->gfp_mask, memcg, false, |
@@ -3187,7 +3189,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
3187 | .may_writepage = !laptop_mode, | 3189 | .may_writepage = !laptop_mode, |
3188 | .may_unmap = 1, | 3190 | .may_unmap = 1, |
3189 | .may_swap = 1, | 3191 | .may_swap = 1, |
3190 | .may_shrinkslab = 1, | ||
3191 | }; | 3192 | }; |
3192 | 3193 | ||
3193 | /* | 3194 | /* |
@@ -3219,6 +3220,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
3219 | 3220 | ||
3220 | #ifdef CONFIG_MEMCG | 3221 | #ifdef CONFIG_MEMCG |
3221 | 3222 | ||
3223 | /* Only used by soft limit reclaim. Do not reuse for anything else. */ | ||
3222 | unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, | 3224 | unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, |
3223 | gfp_t gfp_mask, bool noswap, | 3225 | gfp_t gfp_mask, bool noswap, |
3224 | pg_data_t *pgdat, | 3226 | pg_data_t *pgdat, |
@@ -3231,11 +3233,11 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, | |||
3231 | .may_unmap = 1, | 3233 | .may_unmap = 1, |
3232 | .reclaim_idx = MAX_NR_ZONES - 1, | 3234 | .reclaim_idx = MAX_NR_ZONES - 1, |
3233 | .may_swap = !noswap, | 3235 | .may_swap = !noswap, |
3234 | .may_shrinkslab = 1, | ||
3235 | }; | 3236 | }; |
3236 | unsigned long lru_pages; | 3237 | unsigned long lru_pages; |
3237 | 3238 | ||
3238 | set_task_reclaim_state(current, &sc.reclaim_state); | 3239 | WARN_ON_ONCE(!current->reclaim_state); |
3240 | |||
3239 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 3241 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
3240 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 3242 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
3241 | 3243 | ||
@@ -3253,7 +3255,6 @@ unsigned long mem_cgroup_shrink_node(struct mem_cgroup *memcg, | |||
3253 | 3255 | ||
3254 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); | 3256 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |
3255 | 3257 | ||
3256 | set_task_reclaim_state(current, NULL); | ||
3257 | *nr_scanned = sc.nr_scanned; | 3258 | *nr_scanned = sc.nr_scanned; |
3258 | 3259 | ||
3259 | return sc.nr_reclaimed; | 3260 | return sc.nr_reclaimed; |
@@ -3279,7 +3280,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg, | |||
3279 | .may_writepage = !laptop_mode, | 3280 | .may_writepage = !laptop_mode, |
3280 | .may_unmap = 1, | 3281 | .may_unmap = 1, |
3281 | .may_swap = may_swap, | 3282 | .may_swap = may_swap, |
3282 | .may_shrinkslab = 1, | ||
3283 | }; | 3283 | }; |
3284 | 3284 | ||
3285 | set_task_reclaim_state(current, &sc.reclaim_state); | 3285 | set_task_reclaim_state(current, &sc.reclaim_state); |
@@ -3591,7 +3591,6 @@ restart: | |||
3591 | */ | 3591 | */ |
3592 | sc.may_writepage = !laptop_mode && !nr_boost_reclaim; | 3592 | sc.may_writepage = !laptop_mode && !nr_boost_reclaim; |
3593 | sc.may_swap = !nr_boost_reclaim; | 3593 | sc.may_swap = !nr_boost_reclaim; |
3594 | sc.may_shrinkslab = !nr_boost_reclaim; | ||
3595 | 3594 | ||
3596 | /* | 3595 | /* |
3597 | * Do some background aging of the anon list, to give | 3596 | * Do some background aging of the anon list, to give |