aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKonstantin Khlebnikov <khlebnikov@openvz.org>2012-05-29 18:06:57 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:26 -0400
commit9e3b2f8cd340e13353a44c9a34caef2848131ed7 (patch)
treecda727e0e4f87992826075ea75c8519bf8e78034 /mm/vmscan.c
parent3d58ab5c97fa2d145050242137ac39ca7d3bc2fc (diff)
mm/vmscan: store "priority" in struct scan_control
In memory reclaim some function have too many arguments - "priority" is one of them. It can be stored in struct scan_control - we construct them on the same level. Instead of an open coded loop we set the initial sc.priority, and do_try_to_free_pages() decreases it down to zero. Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org> Cc: Mel Gorman <mel@csn.ul.ie> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c117
1 files changed, 61 insertions, 56 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 0e2131deb2d3..77905eb3d8ad 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -78,6 +78,9 @@ struct scan_control {
78 78
79 int order; 79 int order;
80 80
81 /* Scan (total_size >> priority) pages at once */
82 int priority;
83
81 /* 84 /*
82 * The memory cgroup that hit its limit and as a result is the 85 * The memory cgroup that hit its limit and as a result is the
83 * primary target of this reclaim invocation. 86 * primary target of this reclaim invocation.
@@ -687,7 +690,6 @@ static enum page_references page_check_references(struct page *page,
687static unsigned long shrink_page_list(struct list_head *page_list, 690static unsigned long shrink_page_list(struct list_head *page_list,
688 struct mem_cgroup_zone *mz, 691 struct mem_cgroup_zone *mz,
689 struct scan_control *sc, 692 struct scan_control *sc,
690 int priority,
691 unsigned long *ret_nr_dirty, 693 unsigned long *ret_nr_dirty,
692 unsigned long *ret_nr_writeback) 694 unsigned long *ret_nr_writeback)
693{ 695{
@@ -790,7 +792,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
790 * unless under significant pressure. 792 * unless under significant pressure.
791 */ 793 */
792 if (page_is_file_cache(page) && 794 if (page_is_file_cache(page) &&
793 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { 795 (!current_is_kswapd() ||
796 sc->priority >= DEF_PRIORITY - 2)) {
794 /* 797 /*
795 * Immediately reclaim when written back. 798 * Immediately reclaim when written back.
796 * Similar in principal to deactivate_page() 799 * Similar in principal to deactivate_page()
@@ -1257,7 +1260,7 @@ update_isolated_counts(struct mem_cgroup_zone *mz,
1257 */ 1260 */
1258static noinline_for_stack unsigned long 1261static noinline_for_stack unsigned long
1259shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, 1262shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1260 struct scan_control *sc, int priority, enum lru_list lru) 1263 struct scan_control *sc, enum lru_list lru)
1261{ 1264{
1262 LIST_HEAD(page_list); 1265 LIST_HEAD(page_list);
1263 unsigned long nr_scanned; 1266 unsigned long nr_scanned;
@@ -1307,7 +1310,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1307 1310
1308 update_isolated_counts(mz, &page_list, &nr_anon, &nr_file); 1311 update_isolated_counts(mz, &page_list, &nr_anon, &nr_file);
1309 1312
1310 nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority, 1313 nr_reclaimed = shrink_page_list(&page_list, mz, sc,
1311 &nr_dirty, &nr_writeback); 1314 &nr_dirty, &nr_writeback);
1312 1315
1313 spin_lock_irq(&zone->lru_lock); 1316 spin_lock_irq(&zone->lru_lock);
@@ -1356,13 +1359,14 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1356 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1359 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
1357 * isolated page is PageWriteback 1360 * isolated page is PageWriteback
1358 */ 1361 */
1359 if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority))) 1362 if (nr_writeback && nr_writeback >=
1363 (nr_taken >> (DEF_PRIORITY - sc->priority)))
1360 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1364 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1361 1365
1362 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1366 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1363 zone_idx(zone), 1367 zone_idx(zone),
1364 nr_scanned, nr_reclaimed, 1368 nr_scanned, nr_reclaimed,
1365 priority, 1369 sc->priority,
1366 trace_shrink_flags(file)); 1370 trace_shrink_flags(file));
1367 return nr_reclaimed; 1371 return nr_reclaimed;
1368} 1372}
@@ -1426,7 +1430,7 @@ static void move_active_pages_to_lru(struct zone *zone,
1426static void shrink_active_list(unsigned long nr_to_scan, 1430static void shrink_active_list(unsigned long nr_to_scan,
1427 struct mem_cgroup_zone *mz, 1431 struct mem_cgroup_zone *mz,
1428 struct scan_control *sc, 1432 struct scan_control *sc,
1429 int priority, enum lru_list lru) 1433 enum lru_list lru)
1430{ 1434{
1431 unsigned long nr_taken; 1435 unsigned long nr_taken;
1432 unsigned long nr_scanned; 1436 unsigned long nr_scanned;
@@ -1609,17 +1613,17 @@ static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file)
1609 1613
1610static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1614static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1611 struct mem_cgroup_zone *mz, 1615 struct mem_cgroup_zone *mz,
1612 struct scan_control *sc, int priority) 1616 struct scan_control *sc)
1613{ 1617{
1614 int file = is_file_lru(lru); 1618 int file = is_file_lru(lru);
1615 1619
1616 if (is_active_lru(lru)) { 1620 if (is_active_lru(lru)) {
1617 if (inactive_list_is_low(mz, file)) 1621 if (inactive_list_is_low(mz, file))
1618 shrink_active_list(nr_to_scan, mz, sc, priority, lru); 1622 shrink_active_list(nr_to_scan, mz, sc, lru);
1619 return 0; 1623 return 0;
1620 } 1624 }
1621 1625
1622 return shrink_inactive_list(nr_to_scan, mz, sc, priority, lru); 1626 return shrink_inactive_list(nr_to_scan, mz, sc, lru);
1623} 1627}
1624 1628
1625static int vmscan_swappiness(struct scan_control *sc) 1629static int vmscan_swappiness(struct scan_control *sc)
@@ -1638,7 +1642,7 @@ static int vmscan_swappiness(struct scan_control *sc)
1638 * nr[0] = anon pages to scan; nr[1] = file pages to scan 1642 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1639 */ 1643 */
1640static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, 1644static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1641 unsigned long *nr, int priority) 1645 unsigned long *nr)
1642{ 1646{
1643 unsigned long anon, file, free; 1647 unsigned long anon, file, free;
1644 unsigned long anon_prio, file_prio; 1648 unsigned long anon_prio, file_prio;
@@ -1740,8 +1744,8 @@ out:
1740 unsigned long scan; 1744 unsigned long scan;
1741 1745
1742 scan = zone_nr_lru_pages(mz, lru); 1746 scan = zone_nr_lru_pages(mz, lru);
1743 if (priority || noswap || !vmscan_swappiness(sc)) { 1747 if (sc->priority || noswap || !vmscan_swappiness(sc)) {
1744 scan >>= priority; 1748 scan >>= sc->priority;
1745 if (!scan && force_scan) 1749 if (!scan && force_scan)
1746 scan = SWAP_CLUSTER_MAX; 1750 scan = SWAP_CLUSTER_MAX;
1747 scan = div64_u64(scan * fraction[file], denominator); 1751 scan = div64_u64(scan * fraction[file], denominator);
@@ -1751,11 +1755,11 @@ out:
1751} 1755}
1752 1756
1753/* Use reclaim/compaction for costly allocs or under memory pressure */ 1757/* Use reclaim/compaction for costly allocs or under memory pressure */
1754static bool in_reclaim_compaction(int priority, struct scan_control *sc) 1758static bool in_reclaim_compaction(struct scan_control *sc)
1755{ 1759{
1756 if (COMPACTION_BUILD && sc->order && 1760 if (COMPACTION_BUILD && sc->order &&
1757 (sc->order > PAGE_ALLOC_COSTLY_ORDER || 1761 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
1758 priority < DEF_PRIORITY - 2)) 1762 sc->priority < DEF_PRIORITY - 2))
1759 return true; 1763 return true;
1760 1764
1761 return false; 1765 return false;
@@ -1771,14 +1775,13 @@ static bool in_reclaim_compaction(int priority, struct scan_control *sc)
1771static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, 1775static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
1772 unsigned long nr_reclaimed, 1776 unsigned long nr_reclaimed,
1773 unsigned long nr_scanned, 1777 unsigned long nr_scanned,
1774 int priority,
1775 struct scan_control *sc) 1778 struct scan_control *sc)
1776{ 1779{
1777 unsigned long pages_for_compaction; 1780 unsigned long pages_for_compaction;
1778 unsigned long inactive_lru_pages; 1781 unsigned long inactive_lru_pages;
1779 1782
1780 /* If not in reclaim/compaction mode, stop */ 1783 /* If not in reclaim/compaction mode, stop */
1781 if (!in_reclaim_compaction(priority, sc)) 1784 if (!in_reclaim_compaction(sc))
1782 return false; 1785 return false;
1783 1786
1784 /* Consider stopping depending on scan and reclaim activity */ 1787 /* Consider stopping depending on scan and reclaim activity */
@@ -1829,7 +1832,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
1829/* 1832/*
1830 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1833 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
1831 */ 1834 */
1832static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz, 1835static void shrink_mem_cgroup_zone(struct mem_cgroup_zone *mz,
1833 struct scan_control *sc) 1836 struct scan_control *sc)
1834{ 1837{
1835 unsigned long nr[NR_LRU_LISTS]; 1838 unsigned long nr[NR_LRU_LISTS];
@@ -1842,7 +1845,7 @@ static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
1842restart: 1845restart:
1843 nr_reclaimed = 0; 1846 nr_reclaimed = 0;
1844 nr_scanned = sc->nr_scanned; 1847 nr_scanned = sc->nr_scanned;
1845 get_scan_count(mz, sc, nr, priority); 1848 get_scan_count(mz, sc, nr);
1846 1849
1847 blk_start_plug(&plug); 1850 blk_start_plug(&plug);
1848 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1851 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -1854,7 +1857,7 @@ restart:
1854 nr[lru] -= nr_to_scan; 1857 nr[lru] -= nr_to_scan;
1855 1858
1856 nr_reclaimed += shrink_list(lru, nr_to_scan, 1859 nr_reclaimed += shrink_list(lru, nr_to_scan,
1857 mz, sc, priority); 1860 mz, sc);
1858 } 1861 }
1859 } 1862 }
1860 /* 1863 /*
@@ -1865,7 +1868,8 @@ restart:
1865 * with multiple processes reclaiming pages, the total 1868 * with multiple processes reclaiming pages, the total
1866 * freeing target can get unreasonably large. 1869 * freeing target can get unreasonably large.
1867 */ 1870 */
1868 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 1871 if (nr_reclaimed >= nr_to_reclaim &&
1872 sc->priority < DEF_PRIORITY)
1869 break; 1873 break;
1870 } 1874 }
1871 blk_finish_plug(&plug); 1875 blk_finish_plug(&plug);
@@ -1877,24 +1881,22 @@ restart:
1877 */ 1881 */
1878 if (inactive_anon_is_low(mz)) 1882 if (inactive_anon_is_low(mz))
1879 shrink_active_list(SWAP_CLUSTER_MAX, mz, 1883 shrink_active_list(SWAP_CLUSTER_MAX, mz,
1880 sc, priority, LRU_ACTIVE_ANON); 1884 sc, LRU_ACTIVE_ANON);
1881 1885
1882 /* reclaim/compaction might need reclaim to continue */ 1886 /* reclaim/compaction might need reclaim to continue */
1883 if (should_continue_reclaim(mz, nr_reclaimed, 1887 if (should_continue_reclaim(mz, nr_reclaimed,
1884 sc->nr_scanned - nr_scanned, 1888 sc->nr_scanned - nr_scanned, sc))
1885 priority, sc))
1886 goto restart; 1889 goto restart;
1887 1890
1888 throttle_vm_writeout(sc->gfp_mask); 1891 throttle_vm_writeout(sc->gfp_mask);
1889} 1892}
1890 1893
1891static void shrink_zone(int priority, struct zone *zone, 1894static void shrink_zone(struct zone *zone, struct scan_control *sc)
1892 struct scan_control *sc)
1893{ 1895{
1894 struct mem_cgroup *root = sc->target_mem_cgroup; 1896 struct mem_cgroup *root = sc->target_mem_cgroup;
1895 struct mem_cgroup_reclaim_cookie reclaim = { 1897 struct mem_cgroup_reclaim_cookie reclaim = {
1896 .zone = zone, 1898 .zone = zone,
1897 .priority = priority, 1899 .priority = sc->priority,
1898 }; 1900 };
1899 struct mem_cgroup *memcg; 1901 struct mem_cgroup *memcg;
1900 1902
@@ -1905,7 +1907,7 @@ static void shrink_zone(int priority, struct zone *zone,
1905 .zone = zone, 1907 .zone = zone,
1906 }; 1908 };
1907 1909
1908 shrink_mem_cgroup_zone(priority, &mz, sc); 1910 shrink_mem_cgroup_zone(&mz, sc);
1909 /* 1911 /*
1910 * Limit reclaim has historically picked one memcg and 1912 * Limit reclaim has historically picked one memcg and
1911 * scanned it with decreasing priority levels until 1913 * scanned it with decreasing priority levels until
@@ -1981,8 +1983,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
1981 * the caller that it should consider retrying the allocation instead of 1983 * the caller that it should consider retrying the allocation instead of
1982 * further reclaim. 1984 * further reclaim.
1983 */ 1985 */
1984static bool shrink_zones(int priority, struct zonelist *zonelist, 1986static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
1985 struct scan_control *sc)
1986{ 1987{
1987 struct zoneref *z; 1988 struct zoneref *z;
1988 struct zone *zone; 1989 struct zone *zone;
@@ -2009,7 +2010,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
2009 if (global_reclaim(sc)) { 2010 if (global_reclaim(sc)) {
2010 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2011 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2011 continue; 2012 continue;
2012 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2013 if (zone->all_unreclaimable &&
2014 sc->priority != DEF_PRIORITY)
2013 continue; /* Let kswapd poll it */ 2015 continue; /* Let kswapd poll it */
2014 if (COMPACTION_BUILD) { 2016 if (COMPACTION_BUILD) {
2015 /* 2017 /*
@@ -2041,7 +2043,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
2041 /* need some check for avoid more shrink_zone() */ 2043 /* need some check for avoid more shrink_zone() */
2042 } 2044 }
2043 2045
2044 shrink_zone(priority, zone, sc); 2046 shrink_zone(zone, sc);
2045 } 2047 }
2046 2048
2047 return aborted_reclaim; 2049 return aborted_reclaim;
@@ -2092,7 +2094,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2092 struct scan_control *sc, 2094 struct scan_control *sc,
2093 struct shrink_control *shrink) 2095 struct shrink_control *shrink)
2094{ 2096{
2095 int priority;
2096 unsigned long total_scanned = 0; 2097 unsigned long total_scanned = 0;
2097 struct reclaim_state *reclaim_state = current->reclaim_state; 2098 struct reclaim_state *reclaim_state = current->reclaim_state;
2098 struct zoneref *z; 2099 struct zoneref *z;
@@ -2105,9 +2106,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2105 if (global_reclaim(sc)) 2106 if (global_reclaim(sc))
2106 count_vm_event(ALLOCSTALL); 2107 count_vm_event(ALLOCSTALL);
2107 2108
2108 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2109 do {
2109 sc->nr_scanned = 0; 2110 sc->nr_scanned = 0;
2110 aborted_reclaim = shrink_zones(priority, zonelist, sc); 2111 aborted_reclaim = shrink_zones(zonelist, sc);
2111 2112
2112 /* 2113 /*
2113 * Don't shrink slabs when reclaiming memory from 2114 * Don't shrink slabs when reclaiming memory from
@@ -2149,7 +2150,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2149 2150
2150 /* Take a nap, wait for some writeback to complete */ 2151 /* Take a nap, wait for some writeback to complete */
2151 if (!sc->hibernation_mode && sc->nr_scanned && 2152 if (!sc->hibernation_mode && sc->nr_scanned &&
2152 priority < DEF_PRIORITY - 2) { 2153 sc->priority < DEF_PRIORITY - 2) {
2153 struct zone *preferred_zone; 2154 struct zone *preferred_zone;
2154 2155
2155 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2156 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
@@ -2157,7 +2158,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2157 &preferred_zone); 2158 &preferred_zone);
2158 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2159 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2159 } 2160 }
2160 } 2161 } while (--sc->priority >= 0);
2161 2162
2162out: 2163out:
2163 delayacct_freepages_end(); 2164 delayacct_freepages_end();
@@ -2195,6 +2196,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2195 .may_unmap = 1, 2196 .may_unmap = 1,
2196 .may_swap = 1, 2197 .may_swap = 1,
2197 .order = order, 2198 .order = order,
2199 .priority = DEF_PRIORITY,
2198 .target_mem_cgroup = NULL, 2200 .target_mem_cgroup = NULL,
2199 .nodemask = nodemask, 2201 .nodemask = nodemask,
2200 }; 2202 };
@@ -2227,6 +2229,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2227 .may_unmap = 1, 2229 .may_unmap = 1,
2228 .may_swap = !noswap, 2230 .may_swap = !noswap,
2229 .order = 0, 2231 .order = 0,
2232 .priority = 0,
2230 .target_mem_cgroup = memcg, 2233 .target_mem_cgroup = memcg,
2231 }; 2234 };
2232 struct mem_cgroup_zone mz = { 2235 struct mem_cgroup_zone mz = {
@@ -2237,7 +2240,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2237 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2240 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2238 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2241 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2239 2242
2240 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0, 2243 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2241 sc.may_writepage, 2244 sc.may_writepage,
2242 sc.gfp_mask); 2245 sc.gfp_mask);
2243 2246
@@ -2248,7 +2251,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2248 * will pick up pages from other mem cgroup's as well. We hack 2251 * will pick up pages from other mem cgroup's as well. We hack
2249 * the priority and make it zero. 2252 * the priority and make it zero.
2250 */ 2253 */
2251 shrink_mem_cgroup_zone(0, &mz, &sc); 2254 shrink_mem_cgroup_zone(&mz, &sc);
2252 2255
2253 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2256 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2254 2257
@@ -2269,6 +2272,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2269 .may_swap = !noswap, 2272 .may_swap = !noswap,
2270 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2273 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2271 .order = 0, 2274 .order = 0,
2275 .priority = DEF_PRIORITY,
2272 .target_mem_cgroup = memcg, 2276 .target_mem_cgroup = memcg,
2273 .nodemask = NULL, /* we don't care the placement */ 2277 .nodemask = NULL, /* we don't care the placement */
2274 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2278 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2299,8 +2303,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2299} 2303}
2300#endif 2304#endif
2301 2305
2302static void age_active_anon(struct zone *zone, struct scan_control *sc, 2306static void age_active_anon(struct zone *zone, struct scan_control *sc)
2303 int priority)
2304{ 2307{
2305 struct mem_cgroup *memcg; 2308 struct mem_cgroup *memcg;
2306 2309
@@ -2316,7 +2319,7 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc,
2316 2319
2317 if (inactive_anon_is_low(&mz)) 2320 if (inactive_anon_is_low(&mz))
2318 shrink_active_list(SWAP_CLUSTER_MAX, &mz, 2321 shrink_active_list(SWAP_CLUSTER_MAX, &mz,
2319 sc, priority, LRU_ACTIVE_ANON); 2322 sc, LRU_ACTIVE_ANON);
2320 2323
2321 memcg = mem_cgroup_iter(NULL, memcg, NULL); 2324 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2322 } while (memcg); 2325 } while (memcg);
@@ -2425,7 +2428,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2425{ 2428{
2426 int all_zones_ok; 2429 int all_zones_ok;
2427 unsigned long balanced; 2430 unsigned long balanced;
2428 int priority;
2429 int i; 2431 int i;
2430 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2432 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2431 unsigned long total_scanned; 2433 unsigned long total_scanned;
@@ -2449,11 +2451,12 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2449 }; 2451 };
2450loop_again: 2452loop_again:
2451 total_scanned = 0; 2453 total_scanned = 0;
2454 sc.priority = DEF_PRIORITY;
2452 sc.nr_reclaimed = 0; 2455 sc.nr_reclaimed = 0;
2453 sc.may_writepage = !laptop_mode; 2456 sc.may_writepage = !laptop_mode;
2454 count_vm_event(PAGEOUTRUN); 2457 count_vm_event(PAGEOUTRUN);
2455 2458
2456 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2459 do {
2457 unsigned long lru_pages = 0; 2460 unsigned long lru_pages = 0;
2458 int has_under_min_watermark_zone = 0; 2461 int has_under_min_watermark_zone = 0;
2459 2462
@@ -2470,14 +2473,15 @@ loop_again:
2470 if (!populated_zone(zone)) 2473 if (!populated_zone(zone))
2471 continue; 2474 continue;
2472 2475
2473 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2476 if (zone->all_unreclaimable &&
2477 sc.priority != DEF_PRIORITY)
2474 continue; 2478 continue;
2475 2479
2476 /* 2480 /*
2477 * Do some background aging of the anon list, to give 2481 * Do some background aging of the anon list, to give
2478 * pages a chance to be referenced before reclaiming. 2482 * pages a chance to be referenced before reclaiming.
2479 */ 2483 */
2480 age_active_anon(zone, &sc, priority); 2484 age_active_anon(zone, &sc);
2481 2485
2482 /* 2486 /*
2483 * If the number of buffer_heads in the machine 2487 * If the number of buffer_heads in the machine
@@ -2525,7 +2529,8 @@ loop_again:
2525 if (!populated_zone(zone)) 2529 if (!populated_zone(zone))
2526 continue; 2530 continue;
2527 2531
2528 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2532 if (zone->all_unreclaimable &&
2533 sc.priority != DEF_PRIORITY)
2529 continue; 2534 continue;
2530 2535
2531 sc.nr_scanned = 0; 2536 sc.nr_scanned = 0;
@@ -2569,7 +2574,7 @@ loop_again:
2569 !zone_watermark_ok_safe(zone, testorder, 2574 !zone_watermark_ok_safe(zone, testorder,
2570 high_wmark_pages(zone) + balance_gap, 2575 high_wmark_pages(zone) + balance_gap,
2571 end_zone, 0)) { 2576 end_zone, 0)) {
2572 shrink_zone(priority, zone, &sc); 2577 shrink_zone(zone, &sc);
2573 2578
2574 reclaim_state->reclaimed_slab = 0; 2579 reclaim_state->reclaimed_slab = 0;
2575 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2580 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
@@ -2626,7 +2631,7 @@ loop_again:
2626 * OK, kswapd is getting into trouble. Take a nap, then take 2631 * OK, kswapd is getting into trouble. Take a nap, then take
2627 * another pass across the zones. 2632 * another pass across the zones.
2628 */ 2633 */
2629 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2634 if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
2630 if (has_under_min_watermark_zone) 2635 if (has_under_min_watermark_zone)
2631 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2636 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2632 else 2637 else
@@ -2641,7 +2646,7 @@ loop_again:
2641 */ 2646 */
2642 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2647 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2643 break; 2648 break;
2644 } 2649 } while (--sc.priority >= 0);
2645out: 2650out:
2646 2651
2647 /* 2652 /*
@@ -2691,7 +2696,8 @@ out:
2691 if (!populated_zone(zone)) 2696 if (!populated_zone(zone))
2692 continue; 2697 continue;
2693 2698
2694 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2699 if (zone->all_unreclaimable &&
2700 sc.priority != DEF_PRIORITY)
2695 continue; 2701 continue;
2696 2702
2697 /* Would compaction fail due to lack of free memory? */ 2703 /* Would compaction fail due to lack of free memory? */
@@ -2958,6 +2964,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2958 .nr_to_reclaim = nr_to_reclaim, 2964 .nr_to_reclaim = nr_to_reclaim,
2959 .hibernation_mode = 1, 2965 .hibernation_mode = 1,
2960 .order = 0, 2966 .order = 0,
2967 .priority = DEF_PRIORITY,
2961 }; 2968 };
2962 struct shrink_control shrink = { 2969 struct shrink_control shrink = {
2963 .gfp_mask = sc.gfp_mask, 2970 .gfp_mask = sc.gfp_mask,
@@ -3135,7 +3142,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3135 const unsigned long nr_pages = 1 << order; 3142 const unsigned long nr_pages = 1 << order;
3136 struct task_struct *p = current; 3143 struct task_struct *p = current;
3137 struct reclaim_state reclaim_state; 3144 struct reclaim_state reclaim_state;
3138 int priority;
3139 struct scan_control sc = { 3145 struct scan_control sc = {
3140 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3146 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3141 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3147 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -3144,6 +3150,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3144 SWAP_CLUSTER_MAX), 3150 SWAP_CLUSTER_MAX),
3145 .gfp_mask = gfp_mask, 3151 .gfp_mask = gfp_mask,
3146 .order = order, 3152 .order = order,
3153 .priority = ZONE_RECLAIM_PRIORITY,
3147 }; 3154 };
3148 struct shrink_control shrink = { 3155 struct shrink_control shrink = {
3149 .gfp_mask = sc.gfp_mask, 3156 .gfp_mask = sc.gfp_mask,
@@ -3166,11 +3173,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3166 * Free memory by calling shrink zone with increasing 3173 * Free memory by calling shrink zone with increasing
3167 * priorities until we have enough memory freed. 3174 * priorities until we have enough memory freed.
3168 */ 3175 */
3169 priority = ZONE_RECLAIM_PRIORITY;
3170 do { 3176 do {
3171 shrink_zone(priority, zone, &sc); 3177 shrink_zone(zone, &sc);
3172 priority--; 3178 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3173 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
3174 } 3179 }
3175 3180
3176 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3181 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);