aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2011-10-31 20:07:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:46 -0400
commitf11c0ca501af89fc07b0d9f17531ba3b68a4ef39 (patch)
treec66a24b4ca2778b940c01a2af78eca6abc0b3421 /mm/vmscan.c
parent4f31888c104687078f8d88c2f11eca1080c88464 (diff)
mm: vmscan: drop nr_force_scan[] from get_scan_count
The nr_force_scan[] tuple holds the effective scan numbers for anon and file pages in case the situation called for a forced scan and the regularly calculated scan numbers turned out zero. However, the effective scan number can always be assumed to be SWAP_CLUSTER_MAX right before the division into anon and file. The numerators and denominator are properly set up for all cases, be it force scan for just file, just anon, or both, to do the right thing. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Cc: Ying Han <yinghan@google.com> Cc: Balbir Singh <bsingharora@gmail.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c36
1 files changed, 12 insertions, 24 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index b1520b077858..d29b2bdb9e03 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1817,12 +1817,19 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1817 enum lru_list l; 1817 enum lru_list l;
1818 int noswap = 0; 1818 int noswap = 0;
1819 bool force_scan = false; 1819 bool force_scan = false;
1820 unsigned long nr_force_scan[2];
1821 1820
1822 /* kswapd does zone balancing and needs to scan this zone */ 1821 /*
1822 * If the zone or memcg is small, nr[l] can be 0. This
1823 * results in no scanning on this priority and a potential
1824 * priority drop. Global direct reclaim can go to the next
1825 * zone and tends to have no problems. Global kswapd is for
1826 * zone balancing and it needs to scan a minimum amount. When
1827 * reclaiming for a memcg, a priority drop can cause high
1828 * latencies, so it's better to scan a minimum amount there as
1829 * well.
1830 */
1823 if (scanning_global_lru(sc) && current_is_kswapd()) 1831 if (scanning_global_lru(sc) && current_is_kswapd())
1824 force_scan = true; 1832 force_scan = true;
1825 /* memcg may have small limit and need to avoid priority drop */
1826 if (!scanning_global_lru(sc)) 1833 if (!scanning_global_lru(sc))
1827 force_scan = true; 1834 force_scan = true;
1828 1835
@@ -1832,8 +1839,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1832 fraction[0] = 0; 1839 fraction[0] = 0;
1833 fraction[1] = 1; 1840 fraction[1] = 1;
1834 denominator = 1; 1841 denominator = 1;
1835 nr_force_scan[0] = 0;
1836 nr_force_scan[1] = SWAP_CLUSTER_MAX;
1837 goto out; 1842 goto out;
1838 } 1843 }
1839 1844
@@ -1850,8 +1855,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1850 fraction[0] = 1; 1855 fraction[0] = 1;
1851 fraction[1] = 0; 1856 fraction[1] = 0;
1852 denominator = 1; 1857 denominator = 1;
1853 nr_force_scan[0] = SWAP_CLUSTER_MAX;
1854 nr_force_scan[1] = 0;
1855 goto out; 1858 goto out;
1856 } 1859 }
1857 } 1860 }
@@ -1900,11 +1903,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
1900 fraction[0] = ap; 1903 fraction[0] = ap;
1901 fraction[1] = fp; 1904 fraction[1] = fp;
1902 denominator = ap + fp + 1; 1905 denominator = ap + fp + 1;
1903 if (force_scan) {
1904 unsigned long scan = SWAP_CLUSTER_MAX;
1905 nr_force_scan[0] = div64_u64(scan * ap, denominator);
1906 nr_force_scan[1] = div64_u64(scan * fp, denominator);
1907 }
1908out: 1906out:
1909 for_each_evictable_lru(l) { 1907 for_each_evictable_lru(l) {
1910 int file = is_file_lru(l); 1908 int file = is_file_lru(l);
@@ -1913,20 +1911,10 @@ out:
1913 scan = zone_nr_lru_pages(zone, sc, l); 1911 scan = zone_nr_lru_pages(zone, sc, l);
1914 if (priority || noswap) { 1912 if (priority || noswap) {
1915 scan >>= priority; 1913 scan >>= priority;
1914 if (!scan && force_scan)
1915 scan = SWAP_CLUSTER_MAX;
1916 scan = div64_u64(scan * fraction[file], denominator); 1916 scan = div64_u64(scan * fraction[file], denominator);
1917 } 1917 }
1918
1919 /*
1920 * If zone is small or memcg is small, nr[l] can be 0.
1921 * This results no-scan on this priority and priority drop down.
1922 * For global direct reclaim, it can visit next zone and tend
1923 * not to have problems. For global kswapd, it's for zone
1924 * balancing and it need to scan a small amounts. When using
1925 * memcg, priority drop can cause big latency. So, it's better
1926 * to scan small amount. See may_noscan above.
1927 */
1928 if (!scan && force_scan)
1929 scan = nr_force_scan[file];
1930 nr[l] = scan; 1918 nr[l] = scan;
1931 } 1919 }
1932} 1920}