aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-01-07 21:08:20 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:08 -0500
commit3e2f41f1f64744f7942980d93cc93dd3e5924560 (patch)
tree7b605c407b7470877fd9c5c853407f75edcbeb49 /mm/vmscan.c
parenta3d8e0549d913e30968fa02e505dfe02c0a23e0d (diff)
memcg: add zone_reclaim_stat
Introduce mem_cgroup_per_zone::reclaim_stat member and its statics collecting function. Now, get_scan_ratio() can calculate correct value on memcg reclaim. [hugh@veritas.com: avoid reclaim_stat oops when disabled] Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c27
1 files changed, 13 insertions, 14 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index d958d624d3ae..56fc7abe4d23 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -133,6 +133,9 @@ static DECLARE_RWSEM(shrinker_rwsem);
133static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, 133static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
134 struct scan_control *sc) 134 struct scan_control *sc)
135{ 135{
136 if (!scan_global_lru(sc))
137 return mem_cgroup_get_reclaim_stat(sc->mem_cgroup, zone);
138
136 return &zone->reclaim_stat; 139 return &zone->reclaim_stat;
137} 140}
138 141
@@ -1087,17 +1090,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1087 __mod_zone_page_state(zone, NR_INACTIVE_ANON, 1090 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1088 -count[LRU_INACTIVE_ANON]); 1091 -count[LRU_INACTIVE_ANON]);
1089 1092
1090 if (scan_global_lru(sc)) { 1093 if (scan_global_lru(sc))
1091 zone->pages_scanned += nr_scan; 1094 zone->pages_scanned += nr_scan;
1092 reclaim_stat->recent_scanned[0] += 1095
1093 count[LRU_INACTIVE_ANON]; 1096 reclaim_stat->recent_scanned[0] += count[LRU_INACTIVE_ANON];
1094 reclaim_stat->recent_scanned[0] += 1097 reclaim_stat->recent_scanned[0] += count[LRU_ACTIVE_ANON];
1095 count[LRU_ACTIVE_ANON]; 1098 reclaim_stat->recent_scanned[1] += count[LRU_INACTIVE_FILE];
1096 reclaim_stat->recent_scanned[1] += 1099 reclaim_stat->recent_scanned[1] += count[LRU_ACTIVE_FILE];
1097 count[LRU_INACTIVE_FILE]; 1100
1098 reclaim_stat->recent_scanned[1] +=
1099 count[LRU_ACTIVE_FILE];
1100 }
1101 spin_unlock_irq(&zone->lru_lock); 1101 spin_unlock_irq(&zone->lru_lock);
1102 1102
1103 nr_scanned += nr_scan; 1103 nr_scanned += nr_scan;
@@ -1155,7 +1155,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1155 SetPageLRU(page); 1155 SetPageLRU(page);
1156 lru = page_lru(page); 1156 lru = page_lru(page);
1157 add_page_to_lru_list(zone, page, lru); 1157 add_page_to_lru_list(zone, page, lru);
1158 if (PageActive(page) && scan_global_lru(sc)) { 1158 if (PageActive(page)) {
1159 int file = !!page_is_file_cache(page); 1159 int file = !!page_is_file_cache(page);
1160 reclaim_stat->recent_rotated[file]++; 1160 reclaim_stat->recent_rotated[file]++;
1161 } 1161 }
@@ -1230,8 +1230,8 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1230 */ 1230 */
1231 if (scan_global_lru(sc)) { 1231 if (scan_global_lru(sc)) {
1232 zone->pages_scanned += pgscanned; 1232 zone->pages_scanned += pgscanned;
1233 reclaim_stat->recent_scanned[!!file] += pgmoved;
1234 } 1233 }
1234 reclaim_stat->recent_scanned[!!file] += pgmoved;
1235 1235
1236 if (file) 1236 if (file)
1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved); 1237 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -pgmoved);
@@ -1272,8 +1272,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1272 * This helps balance scan pressure between file and anonymous 1272 * This helps balance scan pressure between file and anonymous
1273 * pages in get_scan_ratio. 1273 * pages in get_scan_ratio.
1274 */ 1274 */
1275 if (scan_global_lru(sc)) 1275 reclaim_stat->recent_rotated[!!file] += pgmoved;
1276 reclaim_stat->recent_rotated[!!file] += pgmoved;
1277 1276
1278 while (!list_empty(&l_inactive)) { 1277 while (!list_empty(&l_inactive)) {
1279 page = lru_to_page(&l_inactive); 1278 page = lru_to_page(&l_inactive);