aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 39fdfb14eeaa..5fa3eda1f03f 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -63,6 +63,9 @@ struct scan_control {
63 /* Can mapped pages be reclaimed? */ 63 /* Can mapped pages be reclaimed? */
64 int may_unmap; 64 int may_unmap;
65 65
66 /* Can pages be swapped as part of reclaim? */
67 int may_swap;
68
66 /* This context's SWAP_CLUSTER_MAX. If freeing memory for 69 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
67 * suspend, we effectively ignore SWAP_CLUSTER_MAX. 70 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
68 * In this context, it doesn't matter that we scan the 71 * In this context, it doesn't matter that we scan the
@@ -1380,7 +1383,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1380 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1383 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1381 1384
1382 /* If we have no swap space, do not bother scanning anon pages. */ 1385 /* If we have no swap space, do not bother scanning anon pages. */
1383 if (nr_swap_pages <= 0) { 1386 if (!sc->may_swap || (nr_swap_pages <= 0)) {
1384 percent[0] = 0; 1387 percent[0] = 0;
1385 percent[1] = 100; 1388 percent[1] = 100;
1386 return; 1389 return;
@@ -1468,7 +1471,7 @@ static void shrink_zone(int priority, struct zone *zone,
1468 1471
1469 for_each_evictable_lru(l) { 1472 for_each_evictable_lru(l) {
1470 int file = is_file_lru(l); 1473 int file = is_file_lru(l);
1471 int scan; 1474 unsigned long scan;
1472 1475
1473 scan = zone_nr_pages(zone, sc, l); 1476 scan = zone_nr_pages(zone, sc, l);
1474 if (priority) { 1477 if (priority) {
@@ -1697,6 +1700,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1697 .may_writepage = !laptop_mode, 1700 .may_writepage = !laptop_mode,
1698 .swap_cluster_max = SWAP_CLUSTER_MAX, 1701 .swap_cluster_max = SWAP_CLUSTER_MAX,
1699 .may_unmap = 1, 1702 .may_unmap = 1,
1703 .may_swap = 1,
1700 .swappiness = vm_swappiness, 1704 .swappiness = vm_swappiness,
1701 .order = order, 1705 .order = order,
1702 .mem_cgroup = NULL, 1706 .mem_cgroup = NULL,
@@ -1717,6 +1721,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1717 struct scan_control sc = { 1721 struct scan_control sc = {
1718 .may_writepage = !laptop_mode, 1722 .may_writepage = !laptop_mode,
1719 .may_unmap = 1, 1723 .may_unmap = 1,
1724 .may_swap = !noswap,
1720 .swap_cluster_max = SWAP_CLUSTER_MAX, 1725 .swap_cluster_max = SWAP_CLUSTER_MAX,
1721 .swappiness = swappiness, 1726 .swappiness = swappiness,
1722 .order = 0, 1727 .order = 0,
@@ -1726,9 +1731,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1726 }; 1731 };
1727 struct zonelist *zonelist; 1732 struct zonelist *zonelist;
1728 1733
1729 if (noswap)
1730 sc.may_unmap = 0;
1731
1732 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 1734 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
1733 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 1735 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
1734 zonelist = NODE_DATA(numa_node_id())->node_zonelists; 1736 zonelist = NODE_DATA(numa_node_id())->node_zonelists;
@@ -1767,6 +1769,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1767 struct scan_control sc = { 1769 struct scan_control sc = {
1768 .gfp_mask = GFP_KERNEL, 1770 .gfp_mask = GFP_KERNEL,
1769 .may_unmap = 1, 1771 .may_unmap = 1,
1772 .may_swap = 1,
1770 .swap_cluster_max = SWAP_CLUSTER_MAX, 1773 .swap_cluster_max = SWAP_CLUSTER_MAX,
1771 .swappiness = vm_swappiness, 1774 .swappiness = vm_swappiness,
1772 .order = order, 1775 .order = order,
@@ -2088,13 +2091,13 @@ static void shrink_all_zones(unsigned long nr_pages, int prio,
2088 nr_reclaimed += shrink_list(l, nr_to_scan, zone, 2091 nr_reclaimed += shrink_list(l, nr_to_scan, zone,
2089 sc, prio); 2092 sc, prio);
2090 if (nr_reclaimed >= nr_pages) { 2093 if (nr_reclaimed >= nr_pages) {
2091 sc->nr_reclaimed = nr_reclaimed; 2094 sc->nr_reclaimed += nr_reclaimed;
2092 return; 2095 return;
2093 } 2096 }
2094 } 2097 }
2095 } 2098 }
2096 } 2099 }
2097 sc->nr_reclaimed = nr_reclaimed; 2100 sc->nr_reclaimed += nr_reclaimed;
2098} 2101}
2099 2102
2100/* 2103/*
@@ -2115,6 +2118,7 @@ unsigned long shrink_all_memory(unsigned long nr_pages)
2115 .may_unmap = 0, 2118 .may_unmap = 0,
2116 .may_writepage = 1, 2119 .may_writepage = 1,
2117 .isolate_pages = isolate_pages_global, 2120 .isolate_pages = isolate_pages_global,
2121 .nr_reclaimed = 0,
2118 }; 2122 };
2119 2123
2120 current->reclaim_state = &reclaim_state; 2124 current->reclaim_state = &reclaim_state;
@@ -2297,6 +2301,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2297 struct scan_control sc = { 2301 struct scan_control sc = {
2298 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 2302 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2299 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 2303 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2304 .may_swap = 1,
2300 .swap_cluster_max = max_t(unsigned long, nr_pages, 2305 .swap_cluster_max = max_t(unsigned long, nr_pages,
2301 SWAP_CLUSTER_MAX), 2306 SWAP_CLUSTER_MAX),
2302 .gfp_mask = gfp_mask, 2307 .gfp_mask = gfp_mask,