diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2010-08-09 20:19:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-09 23:44:59 -0400 |
commit | c6a8a8c589b53f90854a07db3b5806ce111e826b (patch) | |
tree | c806b2f0d8f6f5b315f94daf864999b273d9530f /mm | |
parent | b00d3ea7cfe44e177ad5cd8141209d46478a7a51 (diff) |
vmscan: recalculate lru_pages on each priority
shrink_zones() need relatively long time and lru_pages can change
dramatically during shrink_zones(). So lru_pages should be recalculated
for each priority.
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/vmscan.c | 21 |
1 files changed, 8 insertions, 13 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c index 5b594d62ca01..6dafa45d79e4 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -1787,7 +1787,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1787 | bool all_unreclaimable; | 1787 | bool all_unreclaimable; |
1788 | unsigned long total_scanned = 0; | 1788 | unsigned long total_scanned = 0; |
1789 | struct reclaim_state *reclaim_state = current->reclaim_state; | 1789 | struct reclaim_state *reclaim_state = current->reclaim_state; |
1790 | unsigned long lru_pages = 0; | ||
1791 | struct zoneref *z; | 1790 | struct zoneref *z; |
1792 | struct zone *zone; | 1791 | struct zone *zone; |
1793 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); | 1792 | enum zone_type high_zoneidx = gfp_zone(sc->gfp_mask); |
@@ -1798,18 +1797,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1798 | 1797 | ||
1799 | if (scanning_global_lru(sc)) | 1798 | if (scanning_global_lru(sc)) |
1800 | count_vm_event(ALLOCSTALL); | 1799 | count_vm_event(ALLOCSTALL); |
1801 | /* | ||
1802 | * mem_cgroup will not do shrink_slab. | ||
1803 | */ | ||
1804 | if (scanning_global_lru(sc)) { | ||
1805 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | ||
1806 | |||
1807 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | ||
1808 | continue; | ||
1809 | |||
1810 | lru_pages += zone_reclaimable_pages(zone); | ||
1811 | } | ||
1812 | } | ||
1813 | 1800 | ||
1814 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { | 1801 | for (priority = DEF_PRIORITY; priority >= 0; priority--) { |
1815 | sc->nr_scanned = 0; | 1802 | sc->nr_scanned = 0; |
@@ -1821,6 +1808,14 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
1821 | * over limit cgroups | 1808 | * over limit cgroups |
1822 | */ | 1809 | */ |
1823 | if (scanning_global_lru(sc)) { | 1810 | if (scanning_global_lru(sc)) { |
1811 | unsigned long lru_pages = 0; | ||
1812 | for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { | ||
1813 | if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) | ||
1814 | continue; | ||
1815 | |||
1816 | lru_pages += zone_reclaimable_pages(zone); | ||
1817 | } | ||
1818 | |||
1824 | shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); | 1819 | shrink_slab(sc->nr_scanned, sc->gfp_mask, lru_pages); |
1825 | if (reclaim_state) { | 1820 | if (reclaim_state) { |
1826 | sc->nr_reclaimed += reclaim_state->reclaimed_slab; | 1821 | sc->nr_reclaimed += reclaim_state->reclaimed_slab; |