aboutsummaryrefslogtreecommitdiffstats
path: root/mm/vmscan.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2009-01-07 21:08:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2009-01-08 11:31:07 -0500
commit6e9015716ae9b59e9635d692fddfcfb9582c146c (patch)
treee1876d3822c46a20e1c35b41580f5ef6b2f6e053 /mm/vmscan.c
parentf89eb90e33fd4e4e0cc1a6d20afd63c5a561885a (diff)
mm: introduce zone_reclaim struct
Add zone_reclam_stat struct for later enhancement. A later patch uses this. This patch doesn't any behavior change (yet). Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: Balbir Singh <balbir@in.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Hugh Dickins <hugh@veritas.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/vmscan.c')
-rw-r--r--mm/vmscan.c47
1 files changed, 30 insertions, 17 deletions
diff --git a/mm/vmscan.c b/mm/vmscan.c
index f75d924cb4f4..03ca923c6656 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -130,6 +130,12 @@ static DECLARE_RWSEM(shrinker_rwsem);
130#define scan_global_lru(sc) (1) 130#define scan_global_lru(sc) (1)
131#endif 131#endif
132 132
133static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone,
134 struct scan_control *sc)
135{
136 return &zone->reclaim_stat;
137}
138
133/* 139/*
134 * Add a shrinker callback to be called from the vm 140 * Add a shrinker callback to be called from the vm
135 */ 141 */
@@ -1029,6 +1035,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1029 struct pagevec pvec; 1035 struct pagevec pvec;
1030 unsigned long nr_scanned = 0; 1036 unsigned long nr_scanned = 0;
1031 unsigned long nr_reclaimed = 0; 1037 unsigned long nr_reclaimed = 0;
1038 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1032 1039
1033 pagevec_init(&pvec, 1); 1040 pagevec_init(&pvec, 1);
1034 1041
@@ -1072,10 +1079,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1072 1079
1073 if (scan_global_lru(sc)) { 1080 if (scan_global_lru(sc)) {
1074 zone->pages_scanned += nr_scan; 1081 zone->pages_scanned += nr_scan;
1075 zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; 1082 reclaim_stat->recent_scanned[0] +=
1076 zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; 1083 count[LRU_INACTIVE_ANON];
1077 zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; 1084 reclaim_stat->recent_scanned[0] +=
1078 zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; 1085 count[LRU_ACTIVE_ANON];
1086 reclaim_stat->recent_scanned[1] +=
1087 count[LRU_INACTIVE_FILE];
1088 reclaim_stat->recent_scanned[1] +=
1089 count[LRU_ACTIVE_FILE];
1079 } 1090 }
1080 spin_unlock_irq(&zone->lru_lock); 1091 spin_unlock_irq(&zone->lru_lock);
1081 1092
@@ -1136,7 +1147,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan,
1136 add_page_to_lru_list(zone, page, lru); 1147 add_page_to_lru_list(zone, page, lru);
1137 if (PageActive(page) && scan_global_lru(sc)) { 1148 if (PageActive(page) && scan_global_lru(sc)) {
1138 int file = !!page_is_file_cache(page); 1149 int file = !!page_is_file_cache(page);
1139 zone->recent_rotated[file]++; 1150 reclaim_stat->recent_rotated[file]++;
1140 } 1151 }
1141 if (!pagevec_add(&pvec, page)) { 1152 if (!pagevec_add(&pvec, page)) {
1142 spin_unlock_irq(&zone->lru_lock); 1153 spin_unlock_irq(&zone->lru_lock);
@@ -1196,6 +1207,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1196 struct page *page; 1207 struct page *page;
1197 struct pagevec pvec; 1208 struct pagevec pvec;
1198 enum lru_list lru; 1209 enum lru_list lru;
1210 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1199 1211
1200 lru_add_drain(); 1212 lru_add_drain();
1201 spin_lock_irq(&zone->lru_lock); 1213 spin_lock_irq(&zone->lru_lock);
@@ -1208,7 +1220,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1208 */ 1220 */
1209 if (scan_global_lru(sc)) { 1221 if (scan_global_lru(sc)) {
1210 zone->pages_scanned += pgscanned; 1222 zone->pages_scanned += pgscanned;
1211 zone->recent_scanned[!!file] += pgmoved; 1223 reclaim_stat->recent_scanned[!!file] += pgmoved;
1212 } 1224 }
1213 1225
1214 if (file) 1226 if (file)
@@ -1251,7 +1263,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
1251 * pages in get_scan_ratio. 1263 * pages in get_scan_ratio.
1252 */ 1264 */
1253 if (scan_global_lru(sc)) 1265 if (scan_global_lru(sc))
1254 zone->recent_rotated[!!file] += pgmoved; 1266 reclaim_stat->recent_rotated[!!file] += pgmoved;
1255 1267
1256 while (!list_empty(&l_inactive)) { 1268 while (!list_empty(&l_inactive)) {
1257 page = lru_to_page(&l_inactive); 1269 page = lru_to_page(&l_inactive);
@@ -1344,6 +1356,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1344 unsigned long anon, file, free; 1356 unsigned long anon, file, free;
1345 unsigned long anon_prio, file_prio; 1357 unsigned long anon_prio, file_prio;
1346 unsigned long ap, fp; 1358 unsigned long ap, fp;
1359 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
1347 1360
1348 /* If we have no swap space, do not bother scanning anon pages. */ 1361 /* If we have no swap space, do not bother scanning anon pages. */
1349 if (nr_swap_pages <= 0) { 1362 if (nr_swap_pages <= 0) {
@@ -1376,17 +1389,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1376 * 1389 *
1377 * anon in [0], file in [1] 1390 * anon in [0], file in [1]
1378 */ 1391 */
1379 if (unlikely(zone->recent_scanned[0] > anon / 4)) { 1392 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1380 spin_lock_irq(&zone->lru_lock); 1393 spin_lock_irq(&zone->lru_lock);
1381 zone->recent_scanned[0] /= 2; 1394 reclaim_stat->recent_scanned[0] /= 2;
1382 zone->recent_rotated[0] /= 2; 1395 reclaim_stat->recent_rotated[0] /= 2;
1383 spin_unlock_irq(&zone->lru_lock); 1396 spin_unlock_irq(&zone->lru_lock);
1384 } 1397 }
1385 1398
1386 if (unlikely(zone->recent_scanned[1] > file / 4)) { 1399 if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) {
1387 spin_lock_irq(&zone->lru_lock); 1400 spin_lock_irq(&zone->lru_lock);
1388 zone->recent_scanned[1] /= 2; 1401 reclaim_stat->recent_scanned[1] /= 2;
1389 zone->recent_rotated[1] /= 2; 1402 reclaim_stat->recent_rotated[1] /= 2;
1390 spin_unlock_irq(&zone->lru_lock); 1403 spin_unlock_irq(&zone->lru_lock);
1391 } 1404 }
1392 1405
@@ -1402,11 +1415,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc,
1402 * proportional to the fraction of recently scanned pages on 1415 * proportional to the fraction of recently scanned pages on
1403 * each list that were recently referenced and in active use. 1416 * each list that were recently referenced and in active use.
1404 */ 1417 */
1405 ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); 1418 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1);
1406 ap /= zone->recent_rotated[0] + 1; 1419 ap /= reclaim_stat->recent_rotated[0] + 1;
1407 1420
1408 fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); 1421 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1);
1409 fp /= zone->recent_rotated[1] + 1; 1422 fp /= reclaim_stat->recent_rotated[1] + 1;
1410 1423
1411 /* Normalize to percentages */ 1424 /* Normalize to percentages */
1412 percent[0] = 100 * ap / (ap + fp + 1); 1425 percent[0] = 100 * ap / (ap + fp + 1);