aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJerome Marchand <jmarchan@redhat.com>2014-08-06 19:08:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:22 -0400
commit2ab051e11bfa3cbb7b24177f3d6aaed10a0d743e (patch)
tree5262105b4933396587db86a36cbd28d5db37ceac /mm
parent7c0db9e917f77e6de2a524b33b5436491850dc79 (diff)
memcg, vmscan: Fix forced scan of anonymous pages
When memory cgoups are enabled, the code that decides to force to scan anonymous pages in get_scan_count() compares global values (free, high_watermark) to a value that is restricted to a memory cgroup (file). It make the code over-eager to force anon scan. For instance, it will force anon scan when scanning a memcg that is mainly populated by anonymous page, even when there is plenty of file pages to get rid of in others memcgs, even when swappiness == 0. It breaks user's expectation about swappiness and hurts performance. This patch makes sure that forced anon scan only happens when there not enough file pages for the all zone, not just in one random memcg. [hannes@cmpxchg.org: cleanups] Signed-off-by: Jerome Marchand <jmarchan@redhat.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Reviewed-by: Rik van Riel <riel@redhat.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c20
1 files changed, 13 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 88ab53c9949a..d2f65c856350 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1930,11 +1930,6 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
1930 goto out; 1930 goto out;
1931 } 1931 }
1932 1932
1933 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1934 get_lru_size(lruvec, LRU_INACTIVE_ANON);
1935 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1936 get_lru_size(lruvec, LRU_INACTIVE_FILE);
1937
1938 /* 1933 /*
1939 * Prevent the reclaimer from falling into the cache trap: as 1934 * Prevent the reclaimer from falling into the cache trap: as
1940 * cache pages start out inactive, every cache fault will tip 1935 * cache pages start out inactive, every cache fault will tip
@@ -1945,9 +1940,14 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
1945 * anon pages. Try to detect this based on file LRU size. 1940 * anon pages. Try to detect this based on file LRU size.
1946 */ 1941 */
1947 if (global_reclaim(sc)) { 1942 if (global_reclaim(sc)) {
1948 unsigned long free = zone_page_state(zone, NR_FREE_PAGES); 1943 unsigned long zonefile;
1944 unsigned long zonefree;
1945
1946 zonefree = zone_page_state(zone, NR_FREE_PAGES);
1947 zonefile = zone_page_state(zone, NR_ACTIVE_FILE) +
1948 zone_page_state(zone, NR_INACTIVE_FILE);
1949 1949
1950 if (unlikely(file + free <= high_wmark_pages(zone))) { 1950 if (unlikely(zonefile + zonefree <= high_wmark_pages(zone))) {
1951 scan_balance = SCAN_ANON; 1951 scan_balance = SCAN_ANON;
1952 goto out; 1952 goto out;
1953 } 1953 }
@@ -1982,6 +1982,12 @@ static void get_scan_count(struct lruvec *lruvec, int swappiness,
1982 * 1982 *
1983 * anon in [0], file in [1] 1983 * anon in [0], file in [1]
1984 */ 1984 */
1985
1986 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1987 get_lru_size(lruvec, LRU_INACTIVE_ANON);
1988 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1989 get_lru_size(lruvec, LRU_INACTIVE_FILE);
1990
1985 spin_lock_irq(&zone->lru_lock); 1991 spin_lock_irq(&zone->lru_lock);
1986 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1992 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1987 reclaim_stat->recent_scanned[0] /= 2; 1993 reclaim_stat->recent_scanned[0] /= 2;