diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-12-14 20:59:10 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-12-15 11:53:18 -0500 |
commit | 22fba33545b731408deab6e96b6e231ee05fd10b (patch) | |
tree | a522c19e19c2ce82f53ab781ec7901e3bdb8b200 /mm/vmscan.c | |
parent | cba5dd7fa535b7684cba68e17ac8be5b0083dc3d (diff) |
vmscan: separate sc.swap_cluster_max and sc.nr_max_reclaim
Currently, sc.scap_cluster_max has double meanings.
1) reclaim batch size as isolate_lru_pages()'s argument
2) reclaim baling out thresolds
The two meanings pretty unrelated. Thus, Let's separate it.
this patch doesn't change any behavior.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r-- | mm/vmscan.c | 25 |
1 files changed, 19 insertions, 6 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index cb69f717799f..7b0d5c784c7e 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -55,6 +55,9 @@ struct scan_control { | |||
55 | /* Number of pages freed so far during a call to shrink_zones() */ | 55 | /* Number of pages freed so far during a call to shrink_zones() */ |
56 | unsigned long nr_reclaimed; | 56 | unsigned long nr_reclaimed; |
57 | 57 | ||
58 | /* How many pages shrink_list() should reclaim */ | ||
59 | unsigned long nr_to_reclaim; | ||
60 | |||
58 | /* This context's GFP mask */ | 61 | /* This context's GFP mask */ |
59 | gfp_t gfp_mask; | 62 | gfp_t gfp_mask; |
60 | 63 | ||
@@ -1595,6 +1598,7 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1595 | enum lru_list l; | 1598 | enum lru_list l; |
1596 | unsigned long nr_reclaimed = sc->nr_reclaimed; | 1599 | unsigned long nr_reclaimed = sc->nr_reclaimed; |
1597 | unsigned long swap_cluster_max = sc->swap_cluster_max; | 1600 | unsigned long swap_cluster_max = sc->swap_cluster_max; |
1601 | unsigned long nr_to_reclaim = sc->nr_to_reclaim; | ||
1598 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | 1602 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); |
1599 | int noswap = 0; | 1603 | int noswap = 0; |
1600 | 1604 | ||
@@ -1639,8 +1643,7 @@ static void shrink_zone(int priority, struct zone *zone, | |||
1639 | * with multiple processes reclaiming pages, the total | 1643 | * with multiple processes reclaiming pages, the total |
1640 | * freeing target can get unreasonably large. | 1644 | * freeing target can get unreasonably large. |
1641 | */ | 1645 | */ |
1642 | if (nr_reclaimed > swap_cluster_max && | 1646 | if (nr_reclaimed > nr_to_reclaim && priority < DEF_PRIORITY) |
1643 | priority < DEF_PRIORITY && !current_is_kswapd()) | ||
1644 | break; | 1647 | break; |
1645 | } | 1648 | } |
1646 | 1649 | ||
@@ -1738,6 +1741,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1738 | struct zoneref *z; | 1741 | struct zoneref *z; |
1739 | struct zone *zone; | 1742 | struct zone *zone; |
1740 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | 1743 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); |
1744 | unsigned long writeback_threshold; | ||
1741 | 1745 | ||
1742 | delayacct_freepages_start(); | 1746 | delayacct_freepages_start(); |
1743 | 1747 | ||
@@ -1773,7 +1777,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1773 | } | 1777 | } |
1774 | } | 1778 | } |
1775 | total_scanned += sc->nr_scanned; | 1779 | total_scanned += sc->nr_scanned; |
1776 | if (sc->nr_reclaimed >= sc->swap_cluster_max) { | 1780 | if (sc->nr_reclaimed >= sc->nr_to_reclaim) { |
1777 | ret = sc->nr_reclaimed; | 1781 | ret = sc->nr_reclaimed; |
1778 | goto out; | 1782 | goto out; |
1779 | } | 1783 | } |
@@ -1785,8 +1789,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1785 | * that's undesirable in laptop mode, where we *want* lumpy | 1789 | * that's undesirable in laptop mode, where we *want* lumpy |
1786 | * writeout. So in laptop mode, write out the whole world. | 1790 | * writeout. So in laptop mode, write out the whole world. |
1787 | */ | 1791 | */ |
1788 | if (total_scanned > sc->swap_cluster_max + | 1792 | writeback_threshold = sc->nr_to_reclaim + sc->nr_to_reclaim / 2; |
1789 | sc->swap_cluster_max / 2) { | 1793 | if (total_scanned > writeback_threshold) { |
1790 | wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); | 1794 | wakeup_flusher_threads(laptop_mode ? 0 : total_scanned); |
1791 | sc->may_writepage = 1; | 1795 | sc->may_writepage = 1; |
1792 | } | 1796 | } |
@@ -1832,6 +1836,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
1832 | .gfp_mask = gfp_mask, | 1836 | .gfp_mask = gfp_mask, |
1833 | .may_writepage = !laptop_mode, | 1837 | .may_writepage = !laptop_mode, |
1834 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1838 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
1839 | .nr_to_reclaim = SWAP_CLUSTER_MAX, | ||
1835 | .may_unmap = 1, | 1840 | .may_unmap = 1, |
1836 | .may_swap = 1, | 1841 | .may_swap = 1, |
1837 | .swappiness = vm_swappiness, | 1842 | .swappiness = vm_swappiness, |
@@ -1890,6 +1895,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
1890 | .may_unmap = 1, | 1895 | .may_unmap = 1, |
1891 | .may_swap = !noswap, | 1896 | .may_swap = !noswap, |
1892 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1897 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
1898 | .nr_to_reclaim = SWAP_CLUSTER_MAX, | ||
1893 | .swappiness = swappiness, | 1899 | .swappiness = swappiness, |
1894 | .order = 0, | 1900 | .order = 0, |
1895 | .mem_cgroup = mem_cont, | 1901 | .mem_cgroup = mem_cont, |
@@ -1961,6 +1967,11 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order) | |||
1961 | .may_unmap = 1, | 1967 | .may_unmap = 1, |
1962 | .may_swap = 1, | 1968 | .may_swap = 1, |
1963 | .swap_cluster_max = SWAP_CLUSTER_MAX, | 1969 | .swap_cluster_max = SWAP_CLUSTER_MAX, |
1970 | /* | ||
1971 | * kswapd doesn't want to be bailed out while reclaim. because | ||
1972 | * we want to put equal scanning pressure on each zone. | ||
1973 | */ | ||
1974 | .nr_to_reclaim = ULONG_MAX, | ||
1964 | .swappiness = vm_swappiness, | 1975 | .swappiness = vm_swappiness, |
1965 | .order = order, | 1976 | .order = order, |
1966 | .mem_cgroup = NULL, | 1977 | .mem_cgroup = NULL, |
@@ -2630,7 +2641,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order) | |||
2630 | .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), | 2641 | .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), |
2631 | .may_swap = 1, | 2642 | .may_swap = 1, |
2632 | .swap_cluster_max = max_t(unsigned long, nr_pages, | 2643 | .swap_cluster_max = max_t(unsigned long, nr_pages, |
2633 | SWAP_CLUSTER_MAX), | 2644 | SWAP_CLUSTER_MAX), |
2645 | .nr_to_reclaim = max_t(unsigned long, nr_pages, | ||
2646 | SWAP_CLUSTER_MAX), | ||
2634 | .gfp_mask = gfp_mask, | 2647 | .gfp_mask = gfp_mask, |
2635 | .swappiness = vm_swappiness, | 2648 | .swappiness = vm_swappiness, |
2636 | .order = order, | 2649 | .order = order, |