aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-12-14 20:59:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-12-15 11:53:18 -0500
commitece74b2e7acfb71453f3f39948cc667434550dbb (patch)
tree08749bc2d0a6169d1c1525c502a2faf13e84b244 /mm
parent4f0ddfdffc8bef3a5eb9154734d68a6053194948 (diff)
vmscan: kill sc.swap_cluster_max
Now, All caller of reclaim use swap_cluster_max as SWAP_CLUSTER_MAX. Then, we can remove it perfectly. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: Rik van Riel <riel@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/vmscan.c26
1 files changed, 6 insertions, 20 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d55d106ad179..2b1c74817a1e 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -71,12 +71,6 @@ struct scan_control {
71 /* Can pages be swapped as part of reclaim? */ 71 /* Can pages be swapped as part of reclaim? */
72 int may_swap; 72 int may_swap;
73 73
74 /* This context's SWAP_CLUSTER_MAX. If freeing memory for
75 * suspend, we effectively ignore SWAP_CLUSTER_MAX.
76 * In this context, it doesn't matter that we scan the
77 * whole list at once. */
78 int swap_cluster_max;
79
80 int swappiness; 74 int swappiness;
81 75
82 int all_unreclaimable; 76 int all_unreclaimable;
@@ -1137,7 +1131,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1137 unsigned long nr_anon; 1131 unsigned long nr_anon;
1138 unsigned long nr_file; 1132 unsigned long nr_file;
1139 1133
1140 nr_taken = sc->isolate_pages(sc->swap_cluster_max, 1134 nr_taken = sc->isolate_pages(SWAP_CLUSTER_MAX,
1141 &page_list, &nr_scan, sc->order, mode, 1135 &page_list, &nr_scan, sc->order, mode,
1142 zone, sc->mem_cgroup, 0, file); 1136 zone, sc->mem_cgroup, 0, file);
1143 1137
@@ -1572,15 +1566,14 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1572 * until we collected @swap_cluster_max pages to scan. 1566 * until we collected @swap_cluster_max pages to scan.
1573 */ 1567 */
1574static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, 1568static unsigned long nr_scan_try_batch(unsigned long nr_to_scan,
1575 unsigned long *nr_saved_scan, 1569 unsigned long *nr_saved_scan)
1576 unsigned long swap_cluster_max)
1577{ 1570{
1578 unsigned long nr; 1571 unsigned long nr;
1579 1572
1580 *nr_saved_scan += nr_to_scan; 1573 *nr_saved_scan += nr_to_scan;
1581 nr = *nr_saved_scan; 1574 nr = *nr_saved_scan;
1582 1575
1583 if (nr >= swap_cluster_max) 1576 if (nr >= SWAP_CLUSTER_MAX)
1584 *nr_saved_scan = 0; 1577 *nr_saved_scan = 0;
1585 else 1578 else
1586 nr = 0; 1579 nr = 0;
@@ -1599,7 +1592,6 @@ static void shrink_zone(int priority, struct zone *zone,
1599 unsigned long percent[2]; /* anon @ 0; file @ 1 */ 1592 unsigned long percent[2]; /* anon @ 0; file @ 1 */
1600 enum lru_list l; 1593 enum lru_list l;
1601 unsigned long nr_reclaimed = sc->nr_reclaimed; 1594 unsigned long nr_reclaimed = sc->nr_reclaimed;
1602 unsigned long swap_cluster_max = sc->swap_cluster_max;
1603 unsigned long nr_to_reclaim = sc->nr_to_reclaim; 1595 unsigned long nr_to_reclaim = sc->nr_to_reclaim;
1604 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); 1596 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1605 int noswap = 0; 1597 int noswap = 0;
@@ -1622,15 +1614,15 @@ static void shrink_zone(int priority, struct zone *zone,
1622 scan = (scan * percent[file]) / 100; 1614 scan = (scan * percent[file]) / 100;
1623 } 1615 }
1624 nr[l] = nr_scan_try_batch(scan, 1616 nr[l] = nr_scan_try_batch(scan,
1625 &reclaim_stat->nr_saved_scan[l], 1617 &reclaim_stat->nr_saved_scan[l]);
1626 swap_cluster_max);
1627 } 1618 }
1628 1619
1629 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1620 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
1630 nr[LRU_INACTIVE_FILE]) { 1621 nr[LRU_INACTIVE_FILE]) {
1631 for_each_evictable_lru(l) { 1622 for_each_evictable_lru(l) {
1632 if (nr[l]) { 1623 if (nr[l]) {
1633 nr_to_scan = min(nr[l], swap_cluster_max); 1624 nr_to_scan = min_t(unsigned long,
1625 nr[l], SWAP_CLUSTER_MAX);
1634 nr[l] -= nr_to_scan; 1626 nr[l] -= nr_to_scan;
1635 1627
1636 nr_reclaimed += shrink_list(l, nr_to_scan, 1628 nr_reclaimed += shrink_list(l, nr_to_scan,
@@ -1838,7 +1830,6 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
1838 struct scan_control sc = { 1830 struct scan_control sc = {
1839 .gfp_mask = gfp_mask, 1831 .gfp_mask = gfp_mask,
1840 .may_writepage = !laptop_mode, 1832 .may_writepage = !laptop_mode,
1841 .swap_cluster_max = SWAP_CLUSTER_MAX,
1842 .nr_to_reclaim = SWAP_CLUSTER_MAX, 1833 .nr_to_reclaim = SWAP_CLUSTER_MAX,
1843 .may_unmap = 1, 1834 .may_unmap = 1,
1844 .may_swap = 1, 1835 .may_swap = 1,
@@ -1863,7 +1854,6 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
1863 .may_writepage = !laptop_mode, 1854 .may_writepage = !laptop_mode,
1864 .may_unmap = 1, 1855 .may_unmap = 1,
1865 .may_swap = !noswap, 1856 .may_swap = !noswap,
1866 .swap_cluster_max = SWAP_CLUSTER_MAX,
1867 .swappiness = swappiness, 1857 .swappiness = swappiness,
1868 .order = 0, 1858 .order = 0,
1869 .mem_cgroup = mem, 1859 .mem_cgroup = mem,
@@ -1897,7 +1887,6 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont,
1897 .may_writepage = !laptop_mode, 1887 .may_writepage = !laptop_mode,
1898 .may_unmap = 1, 1888 .may_unmap = 1,
1899 .may_swap = !noswap, 1889 .may_swap = !noswap,
1900 .swap_cluster_max = SWAP_CLUSTER_MAX,
1901 .nr_to_reclaim = SWAP_CLUSTER_MAX, 1890 .nr_to_reclaim = SWAP_CLUSTER_MAX,
1902 .swappiness = swappiness, 1891 .swappiness = swappiness,
1903 .order = 0, 1892 .order = 0,
@@ -1969,7 +1958,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order)
1969 .gfp_mask = GFP_KERNEL, 1958 .gfp_mask = GFP_KERNEL,
1970 .may_unmap = 1, 1959 .may_unmap = 1,
1971 .may_swap = 1, 1960 .may_swap = 1,
1972 .swap_cluster_max = SWAP_CLUSTER_MAX,
1973 /* 1961 /*
1974 * kswapd doesn't want to be bailed out while reclaim. because 1962 * kswapd doesn't want to be bailed out while reclaim. because
1975 * we want to put equal scanning pressure on each zone. 1963 * we want to put equal scanning pressure on each zone.
@@ -2354,7 +2342,6 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
2354 .may_swap = 1, 2342 .may_swap = 1,
2355 .may_unmap = 1, 2343 .may_unmap = 1,
2356 .may_writepage = 1, 2344 .may_writepage = 1,
2357 .swap_cluster_max = SWAP_CLUSTER_MAX,
2358 .nr_to_reclaim = nr_to_reclaim, 2345 .nr_to_reclaim = nr_to_reclaim,
2359 .hibernation_mode = 1, 2346 .hibernation_mode = 1,
2360 .swappiness = vm_swappiness, 2347 .swappiness = vm_swappiness,
@@ -2539,7 +2526,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
2539 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 2526 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
2540 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 2527 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
2541 .may_swap = 1, 2528 .may_swap = 1,
2542 .swap_cluster_max = SWAP_CLUSTER_MAX,
2543 .nr_to_reclaim = max_t(unsigned long, nr_pages, 2529 .nr_to_reclaim = max_t(unsigned long, nr_pages,
2544 SWAP_CLUSTER_MAX), 2530 SWAP_CLUSTER_MAX),
2545 .gfp_mask = gfp_mask, 2531 .gfp_mask = gfp_mask,