aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2011-09-14 19:21:52 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-08-01 15:26:55 -0400
commit33c17eafdeefb08fbb6ded946abcf024f76c9615 (patch)
treed79cdb7090a67ca6a5b02b3df5e1b56daf8d0f0f
parent71a07f4cf29615d30369760c022972d4875758b3 (diff)
mm: vmscan: fix force-scanning small targets without swap
commit a4d3e9e76337059406fcf3ead288c0df22a790e9 upstream. Stable note: Not tracked in Bugzilla. This patch augments an earlier commit that avoids scanning priority being artificially raised. The older fix was particularly important for small memcgs to avoid calling wait_iff_congested() unnecessarily. Without swap, anonymous pages are not scanned. As such, they should not count when considering force-scanning a small target if there is no swap. Otherwise, targets are not force-scanned even when their effective scan number is zero and the other conditions--kswapd/memcg--apply. This fixes 246e87a93934 ("memcg: fix get_scan_count() for small targets"). [akpm@linux-foundation.org: fix comment] Signed-off-by: Johannes Weiner <jweiner@redhat.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Ying Han <yinghan@google.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org> Signed-off-by: Mel Gorman <mgorman@suse.de> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--mm/vmscan.c27
1 files changed, 12 insertions, 15 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 130fa32441c..347bb4478f3 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1747,23 +1747,15 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1747 u64 fraction[2], denominator; 1747 u64 fraction[2], denominator;
1748 enum lru_list l; 1748 enum lru_list l;
1749 int noswap = 0; 1749 int noswap = 0;
1750 int force_scan = 0; 1750 bool force_scan = false;
1751 unsigned long nr_force_scan[2]; 1751 unsigned long nr_force_scan[2];
1752 1752
1753 1753 /* kswapd does zone balancing and needs to scan this zone */
1754 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + 1754 if (scanning_global_lru(sc) && current_is_kswapd())
1755 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); 1755 force_scan = true;
1756 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + 1756 /* memcg may have small limit and need to avoid priority drop */
1757 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); 1757 if (!scanning_global_lru(sc))
1758 1758 force_scan = true;
1759 if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
1760 /* kswapd does zone balancing and need to scan this zone */
1761 if (scanning_global_lru(sc) && current_is_kswapd())
1762 force_scan = 1;
1763 /* memcg may have small limit and need to avoid priority drop */
1764 if (!scanning_global_lru(sc))
1765 force_scan = 1;
1766 }
1767 1759
1768 /* If we have no swap space, do not bother scanning anon pages. */ 1760 /* If we have no swap space, do not bother scanning anon pages. */
1769 if (!sc->may_swap || (nr_swap_pages <= 0)) { 1761 if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1776,6 +1768,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1776 goto out; 1768 goto out;
1777 } 1769 }
1778 1770
1771 anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
1772 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
1773 file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
1774 zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
1775
1779 if (scanning_global_lru(sc)) { 1776 if (scanning_global_lru(sc)) {
1780 free = zone_page_state(zone, NR_FREE_PAGES); 1777 free = zone_page_state(zone, NR_FREE_PAGES);
1781 /* If we have very few page cache pages, 1778 /* If we have very few page cache pages,