diff options
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 18 |
1 files changed, 18 insertions, 0 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 3f56c8deb3c0..32c661d66a45 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1916,6 +1916,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc, | |||
1916 | get_lru_size(lruvec, LRU_INACTIVE_FILE); | 1916 | get_lru_size(lruvec, LRU_INACTIVE_FILE); |
1917 | 1917 | ||
1918 | /* | 1918 | /* |
1919 | * Prevent the reclaimer from falling into the cache trap: as | ||
1920 | * cache pages start out inactive, every cache fault will tip | ||
1921 | * the scan balance towards the file LRU. And as the file LRU | ||
1922 | * shrinks, so does the window for rotation from references. | ||
1923 | * This means we have a runaway feedback loop where a tiny | ||
1924 | * thrashing file LRU becomes infinitely more attractive than | ||
1925 | * anon pages. Try to detect this based on file LRU size. | ||
1926 | */ | ||
1927 | if (global_reclaim(sc)) { | ||
1928 | unsigned long free = zone_page_state(zone, NR_FREE_PAGES); | ||
1929 | |||
1930 | if (unlikely(file + free <= high_wmark_pages(zone))) { | ||
1931 | scan_balance = SCAN_ANON; | ||
1932 | goto out; | ||
1933 | } | ||
1934 | } | ||
1935 | |||
1936 | /* | ||
1919 | * There is enough inactive page cache, do not reclaim | 1937 | * There is enough inactive page cache, do not reclaim |
1920 | * anything from the anonymous working set right now. | 1938 | * anything from the anonymous working set right now. |
1921 | */ | 1939 | */ |