aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c40
1 files changed, 26 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 47a50962ce81..6771ea70bfe7 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -41,7 +41,6 @@
41#include <linux/memcontrol.h> 41#include <linux/memcontrol.h>
42#include <linux/delayacct.h> 42#include <linux/delayacct.h>
43#include <linux/sysctl.h> 43#include <linux/sysctl.h>
44#include <linux/compaction.h>
45 44
46#include <asm/tlbflush.h> 45#include <asm/tlbflush.h>
47#include <asm/div64.h> 46#include <asm/div64.h>
@@ -1842,16 +1841,28 @@ static inline bool should_continue_reclaim(struct zone *zone,
1842 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1841 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION))
1843 return false; 1842 return false;
1844 1843
1845 /* 1844 /* Consider stopping depending on scan and reclaim activity */
1846 * If we failed to reclaim and have scanned the full list, stop. 1845 if (sc->gfp_mask & __GFP_REPEAT) {
1847 * NOTE: Checking just nr_reclaimed would exit reclaim/compaction far 1846 /*
1848 * faster but obviously would be less likely to succeed 1847 * For __GFP_REPEAT allocations, stop reclaiming if the
1849 * allocation. If this is desirable, use GFP_REPEAT to decide 1848 * full LRU list has been scanned and we are still failing
1850 * if both reclaimed and scanned should be checked or just 1849 * to reclaim pages. This full LRU scan is potentially
1851 * reclaimed 1850 * expensive but a __GFP_REPEAT caller really wants to succeed
1852 */ 1851 */
1853 if (!nr_reclaimed && !nr_scanned) 1852 if (!nr_reclaimed && !nr_scanned)
1854 return false; 1853 return false;
1854 } else {
1855 /*
1856 * For non-__GFP_REPEAT allocations which can presumably
1857 * fail without consequence, stop if we failed to reclaim
1858 * any pages from the last SWAP_CLUSTER_MAX number of
1859 * pages that were scanned. This will return to the
1860 * caller faster at the risk reclaim/compaction and
1861 * the resulting allocation attempt fails
1862 */
1863 if (!nr_reclaimed)
1864 return false;
1865 }
1855 1866
1856 /* 1867 /*
1857 * If we have not reclaimed enough pages for compaction and the 1868 * If we have not reclaimed enough pages for compaction and the
@@ -1883,12 +1894,12 @@ static void shrink_zone(int priority, struct zone *zone,
1883 unsigned long nr[NR_LRU_LISTS]; 1894 unsigned long nr[NR_LRU_LISTS];
1884 unsigned long nr_to_scan; 1895 unsigned long nr_to_scan;
1885 enum lru_list l; 1896 enum lru_list l;
1886 unsigned long nr_reclaimed; 1897 unsigned long nr_reclaimed, nr_scanned;
1887 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1898 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1888 unsigned long nr_scanned = sc->nr_scanned;
1889 1899
1890restart: 1900restart:
1891 nr_reclaimed = 0; 1901 nr_reclaimed = 0;
1902 nr_scanned = sc->nr_scanned;
1892 get_scan_count(zone, sc, nr, priority); 1903 get_scan_count(zone, sc, nr, priority);
1893 1904
1894 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1905 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2084,7 +2095,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2084 struct zone *preferred_zone; 2095 struct zone *preferred_zone;
2085 2096
2086 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2097 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
2087 NULL, &preferred_zone); 2098 &cpuset_current_mems_allowed,
2099 &preferred_zone);
2088 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2100 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2089 } 2101 }
2090 } 2102 }