diff options
author | Johannes Weiner <hannes@cmpxchg.org> | 2014-08-06 19:06:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-08-06 21:01:18 -0400 |
commit | 02695175c79b9163c798cc1cb78c628d011c07a6 (patch) | |
tree | d0981d7146ea7b55ca0e615c9e5ff1a23c97a393 /mm | |
parent | 2344d7e44b870f9df67e505ee4e633217de752ba (diff) |
mm: vmscan: move swappiness out of scan_control
Swappiness is determined for each scanned memcg individually in
shrink_zone() and is not a parameter that applies throughout the reclaim
scan. Move it out of struct scan_control to prevent accidental use of a
stale value.
Signed-off-by: Johannes Weiner <hannes@cmpxchg.org>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Cc: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 27 |
1 files changed, 13 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 74a9e0ae09b0..c28b8981e56a 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -89,9 +89,6 @@ struct scan_control { | |||
89 | /* Scan (total_size >> priority) pages at once */ | 89 | /* Scan (total_size >> priority) pages at once */ |
90 | int priority; | 90 | int priority; |
91 | 91 | ||
92 | /* anon vs. file LRUs scanning "ratio" */ | ||
93 | int swappiness; | ||
94 | |||
95 | /* | 92 | /* |
96 | * The memory cgroup that hit its limit and as a result is the | 93 | * The memory cgroup that hit its limit and as a result is the |
97 | * primary target of this reclaim invocation. | 94 | * primary target of this reclaim invocation. |
@@ -1868,8 +1865,8 @@ enum scan_balance { | |||
1868 | * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan | 1865 | * nr[0] = anon inactive pages to scan; nr[1] = anon active pages to scan |
1869 | * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan | 1866 | * nr[2] = file inactive pages to scan; nr[3] = file active pages to scan |
1870 | */ | 1867 | */ |
1871 | static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, | 1868 | static void get_scan_count(struct lruvec *lruvec, int swappiness, |
1872 | unsigned long *nr) | 1869 | struct scan_control *sc, unsigned long *nr) |
1873 | { | 1870 | { |
1874 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; | 1871 | struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; |
1875 | u64 fraction[2]; | 1872 | u64 fraction[2]; |
@@ -1912,7 +1909,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, | |||
1912 | * using the memory controller's swap limit feature would be | 1909 | * using the memory controller's swap limit feature would be |
1913 | * too expensive. | 1910 | * too expensive. |
1914 | */ | 1911 | */ |
1915 | if (!global_reclaim(sc) && !sc->swappiness) { | 1912 | if (!global_reclaim(sc) && !swappiness) { |
1916 | scan_balance = SCAN_FILE; | 1913 | scan_balance = SCAN_FILE; |
1917 | goto out; | 1914 | goto out; |
1918 | } | 1915 | } |
@@ -1922,7 +1919,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, | |||
1922 | * system is close to OOM, scan both anon and file equally | 1919 | * system is close to OOM, scan both anon and file equally |
1923 | * (unless the swappiness setting disagrees with swapping). | 1920 | * (unless the swappiness setting disagrees with swapping). |
1924 | */ | 1921 | */ |
1925 | if (!sc->priority && sc->swappiness) { | 1922 | if (!sc->priority && swappiness) { |
1926 | scan_balance = SCAN_EQUAL; | 1923 | scan_balance = SCAN_EQUAL; |
1927 | goto out; | 1924 | goto out; |
1928 | } | 1925 | } |
@@ -1965,7 +1962,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, | |||
1965 | * With swappiness at 100, anonymous and file have the same priority. | 1962 | * With swappiness at 100, anonymous and file have the same priority. |
1966 | * This scanning priority is essentially the inverse of IO cost. | 1963 | * This scanning priority is essentially the inverse of IO cost. |
1967 | */ | 1964 | */ |
1968 | anon_prio = sc->swappiness; | 1965 | anon_prio = swappiness; |
1969 | file_prio = 200 - anon_prio; | 1966 | file_prio = 200 - anon_prio; |
1970 | 1967 | ||
1971 | /* | 1968 | /* |
@@ -2055,7 +2052,8 @@ out: | |||
2055 | /* | 2052 | /* |
2056 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. | 2053 | * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. |
2057 | */ | 2054 | */ |
2058 | static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) | 2055 | static void shrink_lruvec(struct lruvec *lruvec, int swappiness, |
2056 | struct scan_control *sc) | ||
2059 | { | 2057 | { |
2060 | unsigned long nr[NR_LRU_LISTS]; | 2058 | unsigned long nr[NR_LRU_LISTS]; |
2061 | unsigned long targets[NR_LRU_LISTS]; | 2059 | unsigned long targets[NR_LRU_LISTS]; |
@@ -2066,7 +2064,7 @@ static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc) | |||
2066 | struct blk_plug plug; | 2064 | struct blk_plug plug; |
2067 | bool scan_adjusted; | 2065 | bool scan_adjusted; |
2068 | 2066 | ||
2069 | get_scan_count(lruvec, sc, nr); | 2067 | get_scan_count(lruvec, swappiness, sc, nr); |
2070 | 2068 | ||
2071 | /* Record the original scan target for proportional adjustments later */ | 2069 | /* Record the original scan target for proportional adjustments later */ |
2072 | memcpy(targets, nr, sizeof(nr)); | 2070 | memcpy(targets, nr, sizeof(nr)); |
@@ -2263,11 +2261,12 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc) | |||
2263 | memcg = mem_cgroup_iter(root, NULL, &reclaim); | 2261 | memcg = mem_cgroup_iter(root, NULL, &reclaim); |
2264 | do { | 2262 | do { |
2265 | struct lruvec *lruvec; | 2263 | struct lruvec *lruvec; |
2264 | int swappiness; | ||
2266 | 2265 | ||
2267 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); | 2266 | lruvec = mem_cgroup_zone_lruvec(zone, memcg); |
2267 | swappiness = mem_cgroup_swappiness(memcg); | ||
2268 | 2268 | ||
2269 | sc->swappiness = mem_cgroup_swappiness(memcg); | 2269 | shrink_lruvec(lruvec, swappiness, sc); |
2270 | shrink_lruvec(lruvec, sc); | ||
2271 | 2270 | ||
2272 | /* | 2271 | /* |
2273 | * Direct reclaim and kswapd have to scan all memory | 2272 | * Direct reclaim and kswapd have to scan all memory |
@@ -2714,10 +2713,10 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, | |||
2714 | .may_swap = !noswap, | 2713 | .may_swap = !noswap, |
2715 | .order = 0, | 2714 | .order = 0, |
2716 | .priority = 0, | 2715 | .priority = 0, |
2717 | .swappiness = mem_cgroup_swappiness(memcg), | ||
2718 | .target_mem_cgroup = memcg, | 2716 | .target_mem_cgroup = memcg, |
2719 | }; | 2717 | }; |
2720 | struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); | 2718 | struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg); |
2719 | int swappiness = mem_cgroup_swappiness(memcg); | ||
2721 | 2720 | ||
2722 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 2721 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
2723 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 2722 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
@@ -2733,7 +2732,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg, | |||
2733 | * will pick up pages from other mem cgroup's as well. We hack | 2732 | * will pick up pages from other mem cgroup's as well. We hack |
2734 | * the priority and make it zero. | 2733 | * the priority and make it zero. |
2735 | */ | 2734 | */ |
2736 | shrink_lruvec(lruvec, &sc); | 2735 | shrink_lruvec(lruvec, swappiness, &sc); |
2737 | 2736 | ||
2738 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); | 2737 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |
2739 | 2738 | ||