aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c101
1 files changed, 53 insertions, 48 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 479e4671939..06e72693b45 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -60,8 +60,8 @@ struct scan_control {
60 60
61 int may_writepage; 61 int may_writepage;
62 62
63 /* Can pages be swapped as part of reclaim? */ 63 /* Can mapped pages be reclaimed? */
64 int may_swap; 64 int may_unmap;
65 65
66 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 66 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
67 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 67 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
@@ -78,6 +78,12 @@ struct scan_control {
78 /* Which cgroup do we reclaim from */ 78 /* Which cgroup do we reclaim from */
79 struct mem_cgroup *mem_cgroup; 79 struct mem_cgroup *mem_cgroup;
80 80
81 /*
82 * Nodemask of nodes allowed by the caller. If NULL, all nodes
83 * are scanned.
84 */
85 nodemask_t *nodemask;
86
81 /* Pluggable isolate pages callback */ 87 /* Pluggable isolate pages callback */
82 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst, 88 unsigned long (*isolate_pages)(unsigned long nr, struct list_head *dst,
83 unsigned long *scanned, int order, int mode, 89 unsigned long *scanned, int order, int mode,
@@ -214,8 +220,9 @@ unsigned long shrink_slab(unsigned long scanned, gfp_t gfp_mask,
214 do_div(delta, lru_pages + 1); 220 do_div(delta, lru_pages + 1);
215 shrinker->nr += delta; 221 shrinker->nr += delta;
216 if (shrinker->nr < 0) { 222 if (shrinker->nr < 0) {
217 printk(KERN_ERR "%s: nr=%ld\n", 223 printk(KERN_ERR "shrink_slab: %pF negative objects to "
218 __func__, shrinker->nr); 224 "delete nr=%ld\n",
225 shrinker->shrink, shrinker->nr);
219 shrinker->nr = max_pass; 226 shrinker->nr = max_pass;
220 } 227 }
221 228
@@ -606,7 +613,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
606 if (unlikely(!page_evictable(page, NULL))) 613 if (unlikely(!page_evictable(page, NULL)))
607 goto cull_mlocked; 614 goto cull_mlocked;
608 615
609 if (!sc->may_swap && page_mapped(page)) 616 if (!sc->may_unmap && page_mapped(page))
610 goto keep_locked; 617 goto keep_locked;
611 618
612 /* Double the slab pressure for mapped and swapcache pages */ 619 /* Double the slab pressure for mapped and swapcache pages */
@@ -1298,17 +1305,11 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1298 } 1305 }
1299 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved); 1306 __mod_zone_page_state(zone, NR_LRU_BASE + lru, pgmoved);
1300 pgdeactivate += pgmoved; 1307 pgdeactivate += pgmoved;
1301 if (buffer_heads_over_limit) {
1302 spin_unlock_irq(&zone->lru_lock);
1303 pagevec_strip(&pvec);
1304 spin_lock_irq(&zone->lru_lock);
1305 }
1306 __count_zone_vm_events(PGREFILL, zone, pgscanned); 1308 __count_zone_vm_events(PGREFILL, zone, pgscanned);
1307 __count_vm_events(PGDEACTIVATE, pgdeactivate); 1309 __count_vm_events(PGDEACTIVATE, pgdeactivate);
1308 spin_unlock_irq(&zone->lru_lock); 1310 spin_unlock_irq(&zone->lru_lock);
1309 if (vm_swap_full()) 1311 if (buffer_heads_over_limit)
1310 pagevec_swap_free(&pvec); 1312 pagevec_strip(&pvec);
1311
1312 pagevec_release(&pvec); 1313 pagevec_release(&pvec);
1313} 1314}
1314 1315
@@ -1543,7 +1544,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
1543 struct zone *zone; 1544 struct zone *zone;
1544 1545
1545 sc->all_unreclaimable = 1; 1546 sc->all_unreclaimable = 1;
1546 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 1547 for_each_zone_zonelist_nodemask(zone, z, zonelist, high_zoneidx,
1548 sc->nodemask) {
1547 if (!populated_zone(zone)) 1549 if (!populated_zone(zone))
1548 continue; 1550 continue;
1549 /* 1551 /*
@@ -1688,17 +1690,18 @@ out:
1688} 1690}
1689 1691
1690unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 1692unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1691 gfp_t gfp_mask) 1693 gfp_t gfp_mask, nodemask_t *nodemask)
1692{ 1694{
1693 struct scan_control sc = { 1695 struct scan_control sc = {
1694 .gfp_mask = gfp_mask, 1696 .gfp_mask = gfp_mask,
1695 .may_writepage = !laptop_mode, 1697 .may_writepage = !laptop_mode,
1696 .swap_cluster_max = SWAP_CLUSTER_MAX, 1698 .swap_cluster_max = SWAP_CLUSTER_MAX,
1697 .may_swap = 1, 1699 .may_unmap = 1,
1698 .swappiness = vm_swappiness, 1700 .swappiness = vm_swappiness,
1699 .order = order, 1701 .order = order,
1700 .mem_cgroup = NULL, 1702 .mem_cgroup = NULL,
1701 .isolate_pages = isolate_pages_global, 1703 .isolate_pages = isolate_pages_global,
1704 .nodemask = nodemask,
1702 }; 1705 };
1703 1706
1704 return do_try_to_free_pages(zonelist, &sc); 1707 return do_try_to_free_pages(zonelist, &sc);
@@ -1713,17 +1716,18 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1713{ 1716{
1714 struct scan_control sc = { 1717 struct scan_control sc = {
1715 .may_writepage = !laptop_mode, 1718 .may_writepage = !laptop_mode,
1716 .may_swap = 1, 1719 .may_unmap = 1,
1717 .swap_cluster_max = SWAP_CLUSTER_MAX, 1720 .swap_cluster_max = SWAP_CLUSTER_MAX,
1718 .swappiness = swappiness, 1721 .swappiness = swappiness,
1719 .order = 0, 1722 .order = 0,
1720 .mem_cgroup = mem_cont, 1723 .mem_cgroup = mem_cont,
1721 .isolate_pages = mem_cgroup_isolate_pages, 1724 .isolate_pages = mem_cgroup_isolate_pages,
1725 .nodemask = NULL, /* we don't care the placement */
1722 }; 1726 };
1723 struct zonelist *zonelist; 1727 struct zonelist *zonelist;
1724 1728
1725 if (noswap) 1729 if (noswap)
1726 sc.may_swap = 0; 1730 sc.may_unmap = 0;
1727 1731
1728 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1732 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1729 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1733 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
@@ -1762,7 +1766,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1762 struct reclaim_state *reclaim_state = current->reclaim_state; 1766 struct reclaim_state *reclaim_state = current->reclaim_state;
1763 struct scan_control sc = { 1767 struct scan_control sc = {
1764 .gfp_mask = GFP_KERNEL, 1768 .gfp_mask = GFP_KERNEL,
1765 .may_swap = 1, 1769 .may_unmap = 1,
1766 .swap_cluster_max = SWAP_CLUSTER_MAX, 1770 .swap_cluster_max = SWAP_CLUSTER_MAX,
1767 .swappiness = vm_swappiness, 1771 .swappiness = vm_swappiness,
1768 .order = order, 1772 .order = order,
@@ -2050,22 +2054,19 @@ unsigned long global_lru_pages(void)
2050#ifdef CONFIG_PM 2054#ifdef CONFIG_PM
2051/* 2055/*
2052 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages 2056 * Helper function for shrink_all_memory(). Tries to reclaim 'nr_pages' pages
2053 * from LRU lists system-wide, for given pass and priority, and returns the 2057 * from LRU lists system-wide, for given pass and priority.
2054 * number of reclaimed pages
2055 * 2058 *
2056 * For pass > 3 we also try to shrink the LRU lists that contain a few pages 2059 * For pass > 3 we also try to shrink the LRU lists that contain a few pages
2057 */ 2060 */
2058static unsigned long shrink_all_zones(unsigned long nr_pages, int prio, 2061static void shrink_all_zones(unsigned long nr_pages, int prio,
2059 int pass, struct scan_control *sc) 2062 int pass, struct scan_control *sc)
2060{ 2063{
2061 struct zone *zone; 2064 struct zone *zone;
2062 unsigned long ret = 0; 2065 unsigned long nr_reclaimed = 0;
2063 2066
2064 for_each_zone(zone) { 2067 for_each_populated_zone(zone) {
2065 enum lru_list l; 2068 enum lru_list l;
2066 2069
2067 if (!populated_zone(zone))
2068 continue;
2069 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY) 2070 if (zone_is_all_unreclaimable(zone) && prio != DEF_PRIORITY)
2070 continue; 2071 continue;
2071 2072
@@ -2084,14 +2085,16 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2084 2085
2085 zone->lru[l].nr_scan = 0; 2086 zone->lru[l].nr_scan = 0;
2086 nr_to_scan = min(nr_pages, lru_pages); 2087 nr_to_scan = min(nr_pages, lru_pages);
2087 ret += shrink_list(l, nr_to_scan, zone, 2088 nr_reclaimed += shrink_list(l, nr_to_scan, zone,
2088 sc, prio); 2089 sc, prio);
2089 if (ret >= nr_pages) 2090 if (nr_reclaimed >= nr_pages) {
2090 return ret; 2091 sc->nr_reclaimed = nr_reclaimed;
2092 return;
2093 }
2091 } 2094 }
2092 } 2095 }
2093 } 2096 }
2094 return ret; 2097 sc->nr_reclaimed = nr_reclaimed;
2095} 2098}
2096 2099
2097/* 2100/*
@@ -2105,13 +2108,11 @@ static unsigned long shrink_all_zones(unsigned long nr_pages, int prio,
2105unsigned long shrink_all_memory(unsigned long nr_pages) 2108unsigned long shrink_all_memory(unsigned long nr_pages)
2106{ 2109{
2107 unsigned long lru_pages, nr_slab; 2110 unsigned long lru_pages, nr_slab;
2108 unsigned long ret = 0;
2109 int pass; 2111 int pass;
2110 struct reclaim_state reclaim_state; 2112 struct reclaim_state reclaim_state;
2111 struct scan_control sc = { 2113 struct scan_control sc = {
2112 .gfp_mask = GFP_KERNEL, 2114 .gfp_mask = GFP_KERNEL,
2113 .may_swap = 0, 2115 .may_unmap = 0,
2114 .swap_cluster_max = nr_pages,
2115 .may_writepage = 1, 2116 .may_writepage = 1,
2116 .isolate_pages = isolate_pages_global, 2117 .isolate_pages = isolate_pages_global,
2117 }; 2118 };
@@ -2127,8 +2128,8 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2127 if (!reclaim_state.reclaimed_slab) 2128 if (!reclaim_state.reclaimed_slab)
2128 break; 2129 break;
2129 2130
2130 ret += reclaim_state.reclaimed_slab; 2131 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2131 if (ret >= nr_pages) 2132 if (sc.nr_reclaimed >= nr_pages)
2132 goto out; 2133 goto out;
2133 2134
2134 nr_slab -= reclaim_state.reclaimed_slab; 2135 nr_slab -= reclaim_state.reclaimed_slab;
@@ -2147,21 +2148,22 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2147 2148
2148 /* Force reclaiming mapped pages in the passes #3 and #4 */ 2149 /* Force reclaiming mapped pages in the passes #3 and #4 */
2149 if (pass > 2) 2150 if (pass > 2)
2150 sc.may_swap = 1; 2151 sc.may_unmap = 1;
2151 2152
2152 for (prio = DEF_PRIORITY; prio >= 0; prio--) { 2153 for (prio = DEF_PRIORITY; prio >= 0; prio--) {
2153 unsigned long nr_to_scan = nr_pages - ret; 2154 unsigned long nr_to_scan = nr_pages - sc.nr_reclaimed;
2154 2155
2155 sc.nr_scanned = 0; 2156 sc.nr_scanned = 0;
2156 ret += shrink_all_zones(nr_to_scan, prio, pass, &sc); 2157 sc.swap_cluster_max = nr_to_scan;
2157 if (ret >= nr_pages) 2158 shrink_all_zones(nr_to_scan, prio, pass, &sc);
2159 if (sc.nr_reclaimed >= nr_pages)
2158 goto out; 2160 goto out;
2159 2161
2160 reclaim_state.reclaimed_slab = 0; 2162 reclaim_state.reclaimed_slab = 0;
2161 shrink_slab(sc.nr_scanned, sc.gfp_mask, 2163 shrink_slab(sc.nr_scanned, sc.gfp_mask,
2162 global_lru_pages()); 2164 global_lru_pages());
2163 ret += reclaim_state.reclaimed_slab; 2165 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2164 if (ret >= nr_pages) 2166 if (sc.nr_reclaimed >= nr_pages)
2165 goto out; 2167 goto out;
2166 2168
2167 if (sc.nr_scanned && prio < DEF_PRIORITY - 2) 2169 if (sc.nr_scanned && prio < DEF_PRIORITY - 2)
@@ -2170,21 +2172,23 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2170 } 2172 }
2171 2173
2172 /* 2174 /*
2173 * If ret = 0, we could not shrink LRUs, but there may be something 2175 * If sc.nr_reclaimed = 0, we could not shrink LRUs, but there may be
2174 * in slab caches 2176 * something in slab caches
2175 */ 2177 */
2176 if (!ret) { 2178 if (!sc.nr_reclaimed) {
2177 do { 2179 do {
2178 reclaim_state.reclaimed_slab = 0; 2180 reclaim_state.reclaimed_slab = 0;
2179 shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages()); 2181 shrink_slab(nr_pages, sc.gfp_mask, global_lru_pages());
2180 ret += reclaim_state.reclaimed_slab; 2182 sc.nr_reclaimed += reclaim_state.reclaimed_slab;
2181 } while (ret < nr_pages && reclaim_state.reclaimed_slab > 0); 2183 } while (sc.nr_reclaimed < nr_pages &&
2184 reclaim_state.reclaimed_slab > 0);
2182 } 2185 }
2183 2186
2187
2184out: 2188out:
2185 current->reclaim_state = NULL; 2189 current->reclaim_state = NULL;
2186 2190
2187 return ret; 2191 return sc.nr_reclaimed;
2188} 2192}
2189#endif 2193#endif
2190 2194
@@ -2290,11 +2294,12 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2290 int priority; 2294 int priority;
2291 struct scan_control sc = { 2295 struct scan_control sc = {
2292 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 2296 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2293 .may_swap = !!(zone_reclaim_mode & RECLAIM_SWAP), 2297 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2294 .swap_cluster_max = max_t(unsigned long, nr_pages, 2298 .swap_cluster_max = max_t(unsigned long, nr_pages,
2295 SWAP_CLUSTER_MAX), 2299 SWAP_CLUSTER_MAX),
2296 .gfp_mask = gfp_mask, 2300 .gfp_mask = gfp_mask,
2297 .swappiness = vm_swappiness, 2301 .swappiness = vm_swappiness,
2302 .order = order,
2298 .isolate_pages = isolate_pages_global, 2303 .isolate_pages = isolate_pages_global,
2299 }; 2304 };
2300 unsigned long slab_reclaimable; 2305 unsigned long slab_reclaimable;