aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVladimir Davydov <vdavydov@virtuozzo.com>2016-01-20 18:02:59 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2016-01-20 20:09:18 -0500
commit3337767850b490eec5ca822f871241c981664737 (patch)
treea61677b1857166a205866166daf9fcf1c6eeb8b1 /mm
parent37e84351198be087335ad2b2253b35c7cc76a5ad (diff)
mm: vmscan: pass memcg to get_scan_count()
memcg will come in handy in get_scan_count(). It can already be used for getting swappiness immediately in get_scan_count() instead of passing it around. The following patches will add more memcg-related values, which will be used there. Signed-off-by: Vladimir Davydov <vdavydov@virtuozzo.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c20
1 files changed, 8 insertions, 12 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 05dd182f04fd..014ff89a4aa5 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1966,10 +1966,11 @@ enum scan_balance {
1966 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan 1966 * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan
1967 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan 1967 * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan
1968 */ 1968 */
1969static void get_scan_count(struct lruvec *lruvec, int swappiness, 1969static void get_scan_count(struct lruvec *lruvec, struct mem_cgroup *memcg,
1970 struct scan_control *sc, unsigned long *nr, 1970 struct scan_control *sc, unsigned long *nr,
1971 unsigned long *lru_pages) 1971 unsigned long *lru_pages)
1972{ 1972{
1973 int swappiness = mem_cgroup_swappiness(memcg);
1973 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 1974 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1974 u64 fraction[2]; 1975 u64 fraction[2];
1975 u64 denominator = 0; /* gcc */ 1976 u64 denominator = 0; /* gcc */
@@ -2193,9 +2194,10 @@ static inline void init_tlb_ubc(void)
2193/* 2194/*
2194 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 2195 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
2195 */ 2196 */
2196static void shrink_lruvec(struct lruvec *lruvec, int swappiness, 2197static void shrink_zone_memcg(struct zone *zone, struct mem_cgroup *memcg,
2197 struct scan_control *sc, unsigned long *lru_pages) 2198 struct scan_control *sc, unsigned long *lru_pages)
2198{ 2199{
2200 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2199 unsigned long nr[NR_LRU_LISTS]; 2201 unsigned long nr[NR_LRU_LISTS];
2200 unsigned long targets[NR_LRU_LISTS]; 2202 unsigned long targets[NR_LRU_LISTS];
2201 unsigned long nr_to_scan; 2203 unsigned long nr_to_scan;
@@ -2205,7 +2207,7 @@ static void shrink_lruvec(struct lruvec *lruvec, int swappiness,
2205 struct blk_plug plug; 2207 struct blk_plug plug;
2206 bool scan_adjusted; 2208 bool scan_adjusted;
2207 2209
2208 get_scan_count(lruvec, swappiness, sc, nr, lru_pages); 2210 get_scan_count(lruvec, memcg, sc, nr, lru_pages);
2209 2211
2210 /* Record the original scan target for proportional adjustments later */ 2212 /* Record the original scan target for proportional adjustments later */
2211 memcpy(targets, nr, sizeof(nr)); 2213 memcpy(targets, nr, sizeof(nr));
@@ -2409,8 +2411,6 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
2409 unsigned long lru_pages; 2411 unsigned long lru_pages;
2410 unsigned long reclaimed; 2412 unsigned long reclaimed;
2411 unsigned long scanned; 2413 unsigned long scanned;
2412 struct lruvec *lruvec;
2413 int swappiness;
2414 2414
2415 if (mem_cgroup_low(root, memcg)) { 2415 if (mem_cgroup_low(root, memcg)) {
2416 if (!sc->may_thrash) 2416 if (!sc->may_thrash)
@@ -2418,12 +2418,10 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
2418 mem_cgroup_events(memcg, MEMCG_LOW, 1); 2418 mem_cgroup_events(memcg, MEMCG_LOW, 1);
2419 } 2419 }
2420 2420
2421 lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2422 swappiness = mem_cgroup_swappiness(memcg);
2423 reclaimed = sc->nr_reclaimed; 2421 reclaimed = sc->nr_reclaimed;
2424 scanned = sc->nr_scanned; 2422 scanned = sc->nr_scanned;
2425 2423
2426 shrink_lruvec(lruvec, swappiness, sc, &lru_pages); 2424 shrink_zone_memcg(zone, memcg, sc, &lru_pages);
2427 zone_lru_pages += lru_pages; 2425 zone_lru_pages += lru_pages;
2428 2426
2429 if (memcg && is_classzone) 2427 if (memcg && is_classzone)
@@ -2893,8 +2891,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2893 .may_unmap = 1, 2891 .may_unmap = 1,
2894 .may_swap = !noswap, 2892 .may_swap = !noswap,
2895 }; 2893 };
2896 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2897 int swappiness = mem_cgroup_swappiness(memcg);
2898 unsigned long lru_pages; 2894 unsigned long lru_pages;
2899 2895
2900 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2896 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2911,7 +2907,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2911 * will pick up pages from other mem cgroup's as well. We hack 2907 * will pick up pages from other mem cgroup's as well. We hack
2912 * the priority and make it zero. 2908 * the priority and make it zero.
2913 */ 2909 */
2914 shrink_lruvec(lruvec, swappiness, &sc, &lru_pages); 2910 shrink_zone_memcg(zone, memcg, &sc, &lru_pages);
2915 2911
2916 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2912 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2917 2913