diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-01-07 21:08:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:07 -0500 |
commit | 6e9015716ae9b59e9635d692fddfcfb9582c146c (patch) | |
tree | e1876d3822c46a20e1c35b41580f5ef6b2f6e053 /mm | |
parent | f89eb90e33fd4e4e0cc1a6d20afd63c5a561885a (diff) |
mm: introduce zone_reclaim struct
Add zone_reclam_stat struct for later enhancement.
A later patch uses this. This patch doesn't any behavior change (yet).
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 8 | ||||
-rw-r--r-- | mm/swap.c | 12 | ||||
-rw-r--r-- | mm/vmscan.c | 47 |
3 files changed, 42 insertions, 25 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 7bf22e045318..5675b3073854 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -3523,10 +3523,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3523 | INIT_LIST_HEAD(&zone->lru[l].list); | 3523 | INIT_LIST_HEAD(&zone->lru[l].list); |
3524 | zone->lru[l].nr_scan = 0; | 3524 | zone->lru[l].nr_scan = 0; |
3525 | } | 3525 | } |
3526 | zone->recent_rotated[0] = 0; | 3526 | zone->reclaim_stat.recent_rotated[0] = 0; |
3527 | zone->recent_rotated[1] = 0; | 3527 | zone->reclaim_stat.recent_rotated[1] = 0; |
3528 | zone->recent_scanned[0] = 0; | 3528 | zone->reclaim_stat.recent_scanned[0] = 0; |
3529 | zone->recent_scanned[1] = 0; | 3529 | zone->reclaim_stat.recent_scanned[1] = 0; |
3530 | zap_zone_vm_stats(zone); | 3530 | zap_zone_vm_stats(zone); |
3531 | zone->flags = 0; | 3531 | zone->flags = 0; |
3532 | if (!size) | 3532 | if (!size) |
@@ -157,6 +157,7 @@ void rotate_reclaimable_page(struct page *page) | |||
157 | void activate_page(struct page *page) | 157 | void activate_page(struct page *page) |
158 | { | 158 | { |
159 | struct zone *zone = page_zone(page); | 159 | struct zone *zone = page_zone(page); |
160 | struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | ||
160 | 161 | ||
161 | spin_lock_irq(&zone->lru_lock); | 162 | spin_lock_irq(&zone->lru_lock); |
162 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 163 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
@@ -169,8 +170,8 @@ void activate_page(struct page *page) | |||
169 | add_page_to_lru_list(zone, page, lru); | 170 | add_page_to_lru_list(zone, page, lru); |
170 | __count_vm_event(PGACTIVATE); | 171 | __count_vm_event(PGACTIVATE); |
171 | 172 | ||
172 | zone->recent_rotated[!!file]++; | 173 | reclaim_stat->recent_rotated[!!file]++; |
173 | zone->recent_scanned[!!file]++; | 174 | reclaim_stat->recent_scanned[!!file]++; |
174 | } | 175 | } |
175 | spin_unlock_irq(&zone->lru_lock); | 176 | spin_unlock_irq(&zone->lru_lock); |
176 | } | 177 | } |
@@ -385,6 +386,8 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
385 | { | 386 | { |
386 | int i; | 387 | int i; |
387 | struct zone *zone = NULL; | 388 | struct zone *zone = NULL; |
389 | struct zone_reclaim_stat *reclaim_stat = NULL; | ||
390 | |||
388 | VM_BUG_ON(is_unevictable_lru(lru)); | 391 | VM_BUG_ON(is_unevictable_lru(lru)); |
389 | 392 | ||
390 | for (i = 0; i < pagevec_count(pvec); i++) { | 393 | for (i = 0; i < pagevec_count(pvec); i++) { |
@@ -396,6 +399,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
396 | if (zone) | 399 | if (zone) |
397 | spin_unlock_irq(&zone->lru_lock); | 400 | spin_unlock_irq(&zone->lru_lock); |
398 | zone = pagezone; | 401 | zone = pagezone; |
402 | reclaim_stat = &zone->reclaim_stat; | ||
399 | spin_lock_irq(&zone->lru_lock); | 403 | spin_lock_irq(&zone->lru_lock); |
400 | } | 404 | } |
401 | VM_BUG_ON(PageActive(page)); | 405 | VM_BUG_ON(PageActive(page)); |
@@ -403,10 +407,10 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
403 | VM_BUG_ON(PageLRU(page)); | 407 | VM_BUG_ON(PageLRU(page)); |
404 | SetPageLRU(page); | 408 | SetPageLRU(page); |
405 | file = is_file_lru(lru); | 409 | file = is_file_lru(lru); |
406 | zone->recent_scanned[file]++; | 410 | reclaim_stat->recent_scanned[file]++; |
407 | if (is_active_lru(lru)) { | 411 | if (is_active_lru(lru)) { |
408 | SetPageActive(page); | 412 | SetPageActive(page); |
409 | zone->recent_rotated[file]++; | 413 | reclaim_stat->recent_rotated[file]++; |
410 | } | 414 | } |
411 | add_page_to_lru_list(zone, page, lru); | 415 | add_page_to_lru_list(zone, page, lru); |
412 | } | 416 | } |
diff --git a/mm/vmscan.c b/mm/vmscan.c index f75d924cb4f4..03ca923c6656 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -130,6 +130,12 @@ static DECLARE_RWSEM(shrinker_rwsem); | |||
130 | #define scan_global_lru(sc) (1) | 130 | #define scan_global_lru(sc) (1) |
131 | #endif | 131 | #endif |
132 | 132 | ||
133 | static struct zone_reclaim_stat *get_reclaim_stat(struct zone *zone, | ||
134 | struct scan_control *sc) | ||
135 | { | ||
136 | return &zone->reclaim_stat; | ||
137 | } | ||
138 | |||
133 | /* | 139 | /* |
134 | * Add a shrinker callback to be called from the vm | 140 | * Add a shrinker callback to be called from the vm |
135 | */ | 141 | */ |
@@ -1029,6 +1035,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1029 | struct pagevec pvec; | 1035 | struct pagevec pvec; |
1030 | unsigned long nr_scanned = 0; | 1036 | unsigned long nr_scanned = 0; |
1031 | unsigned long nr_reclaimed = 0; | 1037 | unsigned long nr_reclaimed = 0; |
1038 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | ||
1032 | 1039 | ||
1033 | pagevec_init(&pvec, 1); | 1040 | pagevec_init(&pvec, 1); |
1034 | 1041 | ||
@@ -1072,10 +1079,14 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1072 | 1079 | ||
1073 | if (scan_global_lru(sc)) { | 1080 | if (scan_global_lru(sc)) { |
1074 | zone->pages_scanned += nr_scan; | 1081 | zone->pages_scanned += nr_scan; |
1075 | zone->recent_scanned[0] += count[LRU_INACTIVE_ANON]; | 1082 | reclaim_stat->recent_scanned[0] += |
1076 | zone->recent_scanned[0] += count[LRU_ACTIVE_ANON]; | 1083 | count[LRU_INACTIVE_ANON]; |
1077 | zone->recent_scanned[1] += count[LRU_INACTIVE_FILE]; | 1084 | reclaim_stat->recent_scanned[0] += |
1078 | zone->recent_scanned[1] += count[LRU_ACTIVE_FILE]; | 1085 | count[LRU_ACTIVE_ANON]; |
1086 | reclaim_stat->recent_scanned[1] += | ||
1087 | count[LRU_INACTIVE_FILE]; | ||
1088 | reclaim_stat->recent_scanned[1] += | ||
1089 | count[LRU_ACTIVE_FILE]; | ||
1079 | } | 1090 | } |
1080 | spin_unlock_irq(&zone->lru_lock); | 1091 | spin_unlock_irq(&zone->lru_lock); |
1081 | 1092 | ||
@@ -1136,7 +1147,7 @@ static unsigned long shrink_inactive_list(unsigned long max_scan, | |||
1136 | add_page_to_lru_list(zone, page, lru); | 1147 | add_page_to_lru_list(zone, page, lru); |
1137 | if (PageActive(page) && scan_global_lru(sc)) { | 1148 | if (PageActive(page) && scan_global_lru(sc)) { |
1138 | int file = !!page_is_file_cache(page); | 1149 | int file = !!page_is_file_cache(page); |
1139 | zone->recent_rotated[file]++; | 1150 | reclaim_stat->recent_rotated[file]++; |
1140 | } | 1151 | } |
1141 | if (!pagevec_add(&pvec, page)) { | 1152 | if (!pagevec_add(&pvec, page)) { |
1142 | spin_unlock_irq(&zone->lru_lock); | 1153 | spin_unlock_irq(&zone->lru_lock); |
@@ -1196,6 +1207,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1196 | struct page *page; | 1207 | struct page *page; |
1197 | struct pagevec pvec; | 1208 | struct pagevec pvec; |
1198 | enum lru_list lru; | 1209 | enum lru_list lru; |
1210 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | ||
1199 | 1211 | ||
1200 | lru_add_drain(); | 1212 | lru_add_drain(); |
1201 | spin_lock_irq(&zone->lru_lock); | 1213 | spin_lock_irq(&zone->lru_lock); |
@@ -1208,7 +1220,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1208 | */ | 1220 | */ |
1209 | if (scan_global_lru(sc)) { | 1221 | if (scan_global_lru(sc)) { |
1210 | zone->pages_scanned += pgscanned; | 1222 | zone->pages_scanned += pgscanned; |
1211 | zone->recent_scanned[!!file] += pgmoved; | 1223 | reclaim_stat->recent_scanned[!!file] += pgmoved; |
1212 | } | 1224 | } |
1213 | 1225 | ||
1214 | if (file) | 1226 | if (file) |
@@ -1251,7 +1263,7 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone, | |||
1251 | * pages in get_scan_ratio. | 1263 | * pages in get_scan_ratio. |
1252 | */ | 1264 | */ |
1253 | if (scan_global_lru(sc)) | 1265 | if (scan_global_lru(sc)) |
1254 | zone->recent_rotated[!!file] += pgmoved; | 1266 | reclaim_stat->recent_rotated[!!file] += pgmoved; |
1255 | 1267 | ||
1256 | while (!list_empty(&l_inactive)) { | 1268 | while (!list_empty(&l_inactive)) { |
1257 | page = lru_to_page(&l_inactive); | 1269 | page = lru_to_page(&l_inactive); |
@@ -1344,6 +1356,7 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1344 | unsigned long anon, file, free; | 1356 | unsigned long anon, file, free; |
1345 | unsigned long anon_prio, file_prio; | 1357 | unsigned long anon_prio, file_prio; |
1346 | unsigned long ap, fp; | 1358 | unsigned long ap, fp; |
1359 | struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc); | ||
1347 | 1360 | ||
1348 | /* If we have no swap space, do not bother scanning anon pages. */ | 1361 | /* If we have no swap space, do not bother scanning anon pages. */ |
1349 | if (nr_swap_pages <= 0) { | 1362 | if (nr_swap_pages <= 0) { |
@@ -1376,17 +1389,17 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1376 | * | 1389 | * |
1377 | * anon in [0], file in [1] | 1390 | * anon in [0], file in [1] |
1378 | */ | 1391 | */ |
1379 | if (unlikely(zone->recent_scanned[0] > anon / 4)) { | 1392 | if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { |
1380 | spin_lock_irq(&zone->lru_lock); | 1393 | spin_lock_irq(&zone->lru_lock); |
1381 | zone->recent_scanned[0] /= 2; | 1394 | reclaim_stat->recent_scanned[0] /= 2; |
1382 | zone->recent_rotated[0] /= 2; | 1395 | reclaim_stat->recent_rotated[0] /= 2; |
1383 | spin_unlock_irq(&zone->lru_lock); | 1396 | spin_unlock_irq(&zone->lru_lock); |
1384 | } | 1397 | } |
1385 | 1398 | ||
1386 | if (unlikely(zone->recent_scanned[1] > file / 4)) { | 1399 | if (unlikely(reclaim_stat->recent_scanned[1] > file / 4)) { |
1387 | spin_lock_irq(&zone->lru_lock); | 1400 | spin_lock_irq(&zone->lru_lock); |
1388 | zone->recent_scanned[1] /= 2; | 1401 | reclaim_stat->recent_scanned[1] /= 2; |
1389 | zone->recent_rotated[1] /= 2; | 1402 | reclaim_stat->recent_rotated[1] /= 2; |
1390 | spin_unlock_irq(&zone->lru_lock); | 1403 | spin_unlock_irq(&zone->lru_lock); |
1391 | } | 1404 | } |
1392 | 1405 | ||
@@ -1402,11 +1415,11 @@ static void get_scan_ratio(struct zone *zone, struct scan_control *sc, | |||
1402 | * proportional to the fraction of recently scanned pages on | 1415 | * proportional to the fraction of recently scanned pages on |
1403 | * each list that were recently referenced and in active use. | 1416 | * each list that were recently referenced and in active use. |
1404 | */ | 1417 | */ |
1405 | ap = (anon_prio + 1) * (zone->recent_scanned[0] + 1); | 1418 | ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); |
1406 | ap /= zone->recent_rotated[0] + 1; | 1419 | ap /= reclaim_stat->recent_rotated[0] + 1; |
1407 | 1420 | ||
1408 | fp = (file_prio + 1) * (zone->recent_scanned[1] + 1); | 1421 | fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); |
1409 | fp /= zone->recent_rotated[1] + 1; | 1422 | fp /= reclaim_stat->recent_rotated[1] + 1; |
1410 | 1423 | ||
1411 | /* Normalize to percentages */ | 1424 | /* Normalize to percentages */ |
1412 | percent[0] = 100 * ap / (ap + fp + 1); | 1425 | percent[0] = 100 * ap / (ap + fp + 1); |