aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-09-15 09:08:05 -0400
committerJiri Kosina <jkosina@suse.cz>2011-09-15 09:08:18 -0400
commite060c38434b2caa78efe7cedaff4191040b65a15 (patch)
tree407361230bf6733f63d8e788e4b5e6566ee04818 /mm/vmscan.c
parent10e4ac572eeffe5317019bd7330b6058a400dfc2 (diff)
parentcc39c6a9bbdebfcf1a7dee64d83bf302bc38d941 (diff)
Merge branch 'master' into for-next
Fast-forward merge with Linus to be able to merge patches based on more recent version of the tree.
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c74
1 files changed, 39 insertions, 35 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 8e32698fab66..9fdfce7ba403 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -95,8 +95,6 @@ struct scan_control {
95 /* Can pages be swapped as part of reclaim? */ 95 /* Can pages be swapped as part of reclaim? */
96 int may_swap; 96 int may_swap;
97 97
98 int swappiness;
99
100 int order; 98 int order;
101 99
102 /* 100 /*
@@ -173,7 +171,8 @@ static unsigned long zone_nr_lru_pages(struct zone *zone,
173 struct scan_control *sc, enum lru_list lru) 171 struct scan_control *sc, enum lru_list lru)
174{ 172{
175 if (!scanning_global_lru(sc)) 173 if (!scanning_global_lru(sc))
176 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru); 174 return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup,
175 zone_to_nid(zone), zone_idx(zone), BIT(lru));
177 176
178 return zone_page_state(zone, NR_LRU_BASE + lru); 177 return zone_page_state(zone, NR_LRU_BASE + lru);
179} 178}
@@ -1770,6 +1769,13 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1770 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file); 1769 return shrink_inactive_list(nr_to_scan, zone, sc, priority, file);
1771} 1770}
1772 1771
1772static int vmscan_swappiness(struct scan_control *sc)
1773{
1774 if (scanning_global_lru(sc))
1775 return vm_swappiness;
1776 return mem_cgroup_swappiness(sc->mem_cgroup);
1777}
1778
1773/* 1779/*
1774 * Determine how aggressively the anon and file LRU lists should be 1780 * Determine how aggressively the anon and file LRU lists should be
1775 * scanned. The relative value of each set of LRU lists is determined 1781 * scanned. The relative value of each set of LRU lists is determined
@@ -1788,22 +1794,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1788 u64 fraction[2], denominator; 1794 u64 fraction[2], denominator;
1789 enum lru_list l; 1795 enum lru_list l;
1790 int noswap = 0; 1796 int noswap = 0;
1791 int force_scan = 0; 1797 bool force_scan = false;
1792 1798 unsigned long nr_force_scan[2];
1793
1794 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1795 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1796 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1797 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1798 1799
1799 if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { 1800 /* kswapd does zone balancing and needs to scan this zone */
1800 /* kswapd does zone balancing and need to scan this zone */ 1801 if (scanning_global_lru(sc) && current_is_kswapd())
1801 if (scanning_global_lru(sc) && current_is_kswapd()) 1802 force_scan = true;
1802 force_scan = 1; 1803 /* memcg may have small limit and need to avoid priority drop */
1803 /* memcg may have small limit and need to avoid priority drop */ 1804 if (!scanning_global_lru(sc))
1804 if (!scanning_global_lru(sc)) 1805 force_scan = true;
1805 force_scan = 1;
1806 }
1807 1806
1808 /* If we have no swap space, do not bother scanning anon pages. */ 1807 /* If we have no swap space, do not bother scanning anon pages. */
1809 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1808 if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1811,9 +1810,16 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1811 fraction[0] = 0; 1810 fraction[0] = 0;
1812 fraction[1] = 1; 1811 fraction[1] = 1;
1813 denominator = 1; 1812 denominator = 1;
1813 nr_force_scan[0] = 0;
1814 nr_force_scan[1] = SWAP_CLUSTER_MAX;
1814 goto out; 1815 goto out;
1815 } 1816 }
1816 1817
1818 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1819 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1820 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1821 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1822
1817 if (scanning_global_lru(sc)) { 1823 if (scanning_global_lru(sc)) {
1818 free = zone_page_state(zone, NR_FREE_PAGES); 1824 free = zone_page_state(zone, NR_FREE_PAGES);
1819 /* If we have very few page cache pages, 1825 /* If we have very few page cache pages,
@@ -1822,6 +1828,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1822 fraction[0] = 1; 1828 fraction[0] = 1;
1823 fraction[1] = 0; 1829 fraction[1] = 0;
1824 denominator = 1; 1830 denominator = 1;
1831 nr_force_scan[0] = SWAP_CLUSTER_MAX;
1832 nr_force_scan[1] = 0;
1825 goto out; 1833 goto out;
1826 } 1834 }
1827 } 1835 }
@@ -1830,8 +1838,8 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1830 * With swappiness at 100, anonymous and file have the same priority. 1838 * With swappiness at 100, anonymous and file have the same priority.
1831 * This scanning priority is essentially the inverse of IO cost. 1839 * This scanning priority is essentially the inverse of IO cost.
1832 */ 1840 */
1833 anon_prio = sc->swappiness; 1841 anon_prio = vmscan_swappiness(sc);
1834 file_prio = 200 - sc->swappiness; 1842 file_prio = 200 - vmscan_swappiness(sc);
1835 1843
1836 /* 1844 /*
1837 * OK, so we have swap space and a fair amount of page cache 1845 * OK, so we have swap space and a fair amount of page cache
@@ -1870,6 +1878,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1870 fraction[0] = ap; 1878 fraction[0] = ap;
1871 fraction[1] = fp; 1879 fraction[1] = fp;
1872 denominator = ap + fp + 1; 1880 denominator = ap + fp + 1;
1881 if (force_scan) {
1882 unsigned long scan = SWAP_CLUSTER_MAX;
1883 nr_force_scan[0] = div64_u64(scan * ap, denominator);
1884 nr_force_scan[1] = div64_u64(scan * fp, denominator);
1885 }
1873out: 1886out:
1874 for_each_evictable_lru(l) { 1887 for_each_evictable_lru(l) {
1875 int file = is_file_lru(l); 1888 int file = is_file_lru(l);
@@ -1890,12 +1903,8 @@ out:
1890 * memcg, priority drop can cause big latency. So, it's better 1903 * memcg, priority drop can cause big latency. So, it's better
1891 * to scan small amount. See may_noscan above. 1904 * to scan small amount. See may_noscan above.
1892 */ 1905 */
1893 if (!scan && force_scan) { 1906 if (!scan && force_scan)
1894 if (file) 1907 scan = nr_force_scan[file];
1895 scan = SWAP_CLUSTER_MAX;
1896 else if (!noswap)
1897 scan = SWAP_CLUSTER_MAX;
1898 }
1899 nr[l] = scan; 1908 nr[l] = scan;
1900 } 1909 }
1901} 1910}
@@ -2220,7 +2229,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2220 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2229 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2221 .may_unmap = 1, 2230 .may_unmap = 1,
2222 .may_swap = 1, 2231 .may_swap = 1,
2223 .swappiness = vm_swappiness,
2224 .order = order, 2232 .order = order,
2225 .mem_cgroup = NULL, 2233 .mem_cgroup = NULL,
2226 .nodemask = nodemask, 2234 .nodemask = nodemask,
@@ -2244,7 +2252,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2244 2252
2245unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 2253unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2246 gfp_t gfp_mask, bool noswap, 2254 gfp_t gfp_mask, bool noswap,
2247 unsigned int swappiness,
2248 struct zone *zone, 2255 struct zone *zone,
2249 unsigned long *nr_scanned) 2256 unsigned long *nr_scanned)
2250{ 2257{
@@ -2254,7 +2261,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2254 .may_writepage = !laptop_mode, 2261 .may_writepage = !laptop_mode,
2255 .may_unmap = 1, 2262 .may_unmap = 1,
2256 .may_swap = !noswap, 2263 .may_swap = !noswap,
2257 .swappiness = swappiness,
2258 .order = 0, 2264 .order = 0,
2259 .mem_cgroup = mem, 2265 .mem_cgroup = mem,
2260 }; 2266 };
@@ -2283,8 +2289,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
2283 2289
2284unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, 2290unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2285 gfp_t gfp_mask, 2291 gfp_t gfp_mask,
2286 bool noswap, 2292 bool noswap)
2287 unsigned int swappiness)
2288{ 2293{
2289 struct zonelist *zonelist; 2294 struct zonelist *zonelist;
2290 unsigned long nr_reclaimed; 2295 unsigned long nr_reclaimed;
@@ -2294,7 +2299,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
2294 .may_unmap = 1, 2299 .may_unmap = 1,
2295 .may_swap = !noswap, 2300 .may_swap = !noswap,
2296 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2301 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2297 .swappiness = swappiness,
2298 .order = 0, 2302 .order = 0,
2299 .mem_cgroup = mem_cont, 2303 .mem_cgroup = mem_cont,
2300 .nodemask = NULL, /* we don't care the placement */ 2304 .nodemask = NULL, /* we don't care the placement */
@@ -2445,7 +2449,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2445 * we want to put equal scanning pressure on each zone. 2449 * we want to put equal scanning pressure on each zone.
2446 */ 2450 */
2447 .nr_to_reclaim = ULONG_MAX, 2451 .nr_to_reclaim = ULONG_MAX,
2448 .swappiness = vm_swappiness,
2449 .order = order, 2452 .order = order,
2450 .mem_cgroup = NULL, 2453 .mem_cgroup = NULL,
2451 }; 2454 };
@@ -2494,6 +2497,9 @@ loop_again:
2494 high_wmark_pages(zone), 0, 0)) { 2497 high_wmark_pages(zone), 0, 0)) {
2495 end_zone = i; 2498 end_zone = i;
2496 break; 2499 break;
2500 } else {
2501 /* If balanced, clear the congested flag */
2502 zone_clear_flag(zone, ZONE_CONGESTED);
2497 } 2503 }
2498 } 2504 }
2499 if (i < 0) 2505 if (i < 0)
@@ -2915,7 +2921,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2915 .may_writepage = 1, 2921 .may_writepage = 1,
2916 .nr_to_reclaim = nr_to_reclaim, 2922 .nr_to_reclaim = nr_to_reclaim,
2917 .hibernation_mode = 1, 2923 .hibernation_mode = 1,
2918 .swappiness = vm_swappiness,
2919 .order = 0, 2924 .order = 0,
2920 }; 2925 };
2921 struct shrink_control shrink = { 2926 struct shrink_control shrink = {
@@ -3102,7 +3107,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3102 .nr_to_reclaim = max_t(unsigned long, nr_pages, 3107 .nr_to_reclaim = max_t(unsigned long, nr_pages,
3103 SWAP_CLUSTER_MAX), 3108 SWAP_CLUSTER_MAX),
3104 .gfp_mask = gfp_mask, 3109 .gfp_mask = gfp_mask,
3105 .swappiness = vm_swappiness,
3106 .order = order, 3110 .order = order,
3107 }; 3111 };
3108 struct shrink_control shrink = { 3112 struct shrink_control shrink = {