aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c20
1 files changed, 19 insertions, 1 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 9b6497eda806..32c661d66a45 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1158,7 +1158,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
1158 TTU_UNMAP|TTU_IGNORE_ACCESS, 1158 TTU_UNMAP|TTU_IGNORE_ACCESS,
1159 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true); 1159 &dummy1, &dummy2, &dummy3, &dummy4, &dummy5, true);
1160 list_splice(&clean_pages, page_list); 1160 list_splice(&clean_pages, page_list);
1161 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret); 1161 mod_zone_page_state(zone, NR_ISOLATED_FILE, -ret);
1162 return ret; 1162 return ret;
1163} 1163}
1164 1164
@@ -1916,6 +1916,24 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1916 get_lru_size(lruvec, LRU_INACTIVE_FILE); 1916 get_lru_size(lruvec, LRU_INACTIVE_FILE);
1917 1917
1918 /* 1918 /*
1919 * Prevent the reclaimer from falling into the cache trap: as
1920 * cache pages start out inactive, every cache fault will tip
1921 * the scan balance towards the file LRU. And as the file LRU
1922 * shrinks, so does the window for rotation from references.
1923 * This means we have a runaway feedback loop where a tiny
1924 * thrashing file LRU becomes infinitely more attractive than
1925 * anon pages. Try to detect this based on file LRU size.
1926 */
1927 if (global_reclaim(sc)) {
1928 unsigned long free = zone_page_state(zone, NR_FREE_PAGES);
1929
1930 if (unlikely(file + free <= high_wmark_pages(zone))) {
1931 scan_balance = SCAN_ANON;
1932 goto out;
1933 }
1934 }
1935
1936 /*
1919 * There is enough inactive page cache, do not reclaim 1937 * There is enough inactive page cache, do not reclaim
1920 * anything from the anonymous working set right now. 1938 * anything from the anonymous working set right now.
1921 */ 1939 */