aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/vmscan.c16
1 files changed, 1 insertions, 15 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 06879ead7380..9b6497eda806 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1862,7 +1862,7 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1862 struct zone *zone = lruvec_zone(lruvec); 1862 struct zone *zone = lruvec_zone(lruvec);
1863 unsigned long anon_prio, file_prio; 1863 unsigned long anon_prio, file_prio;
1864 enum scan_balance scan_balance; 1864 enum scan_balance scan_balance;
1865 unsigned long anon, file, free; 1865 unsigned long anon, file;
1866 bool force_scan = false; 1866 bool force_scan = false;
1867 unsigned long ap, fp; 1867 unsigned long ap, fp;
1868 enum lru_list lru; 1868 enum lru_list lru;
@@ -1916,20 +1916,6 @@ static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1916 get_lru_size(lruvec, LRU_INACTIVE_FILE); 1916 get_lru_size(lruvec, LRU_INACTIVE_FILE);
1917 1917
1918 /* 1918 /*
1919 * If it's foreseeable that reclaiming the file cache won't be
1920 * enough to get the zone back into a desirable shape, we have
1921 * to swap. Better start now and leave the - probably heavily
1922 * thrashing - remaining file pages alone.
1923 */
1924 if (global_reclaim(sc)) {
1925 free = zone_page_state(zone, NR_FREE_PAGES);
1926 if (unlikely(file + free <= high_wmark_pages(zone))) {
1927 scan_balance = SCAN_ANON;
1928 goto out;
1929 }
1930 }
1931
1932 /*
1933 * There is enough inactive page cache, do not reclaim 1919 * There is enough inactive page cache, do not reclaim
1934 * anything from the anonymous working set right now. 1920 * anything from the anonymous working set right now.
1935 */ 1921 */