diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-01-07 21:08:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:07 -0500 |
commit | 6e9015716ae9b59e9635d692fddfcfb9582c146c (patch) | |
tree | e1876d3822c46a20e1c35b41580f5ef6b2f6e053 /mm/swap.c | |
parent | f89eb90e33fd4e4e0cc1a6d20afd63c5a561885a (diff) |
mm: introduce zone_reclaim struct
Add zone_reclam_stat struct for later enhancement.
A later patch uses this. This patch doesn't any behavior change (yet).
Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 12 |
1 files changed, 8 insertions, 4 deletions
@@ -157,6 +157,7 @@ void rotate_reclaimable_page(struct page *page) | |||
157 | void activate_page(struct page *page) | 157 | void activate_page(struct page *page) |
158 | { | 158 | { |
159 | struct zone *zone = page_zone(page); | 159 | struct zone *zone = page_zone(page); |
160 | struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | ||
160 | 161 | ||
161 | spin_lock_irq(&zone->lru_lock); | 162 | spin_lock_irq(&zone->lru_lock); |
162 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 163 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
@@ -169,8 +170,8 @@ void activate_page(struct page *page) | |||
169 | add_page_to_lru_list(zone, page, lru); | 170 | add_page_to_lru_list(zone, page, lru); |
170 | __count_vm_event(PGACTIVATE); | 171 | __count_vm_event(PGACTIVATE); |
171 | 172 | ||
172 | zone->recent_rotated[!!file]++; | 173 | reclaim_stat->recent_rotated[!!file]++; |
173 | zone->recent_scanned[!!file]++; | 174 | reclaim_stat->recent_scanned[!!file]++; |
174 | } | 175 | } |
175 | spin_unlock_irq(&zone->lru_lock); | 176 | spin_unlock_irq(&zone->lru_lock); |
176 | } | 177 | } |
@@ -385,6 +386,8 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
385 | { | 386 | { |
386 | int i; | 387 | int i; |
387 | struct zone *zone = NULL; | 388 | struct zone *zone = NULL; |
389 | struct zone_reclaim_stat *reclaim_stat = NULL; | ||
390 | |||
388 | VM_BUG_ON(is_unevictable_lru(lru)); | 391 | VM_BUG_ON(is_unevictable_lru(lru)); |
389 | 392 | ||
390 | for (i = 0; i < pagevec_count(pvec); i++) { | 393 | for (i = 0; i < pagevec_count(pvec); i++) { |
@@ -396,6 +399,7 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
396 | if (zone) | 399 | if (zone) |
397 | spin_unlock_irq(&zone->lru_lock); | 400 | spin_unlock_irq(&zone->lru_lock); |
398 | zone = pagezone; | 401 | zone = pagezone; |
402 | reclaim_stat = &zone->reclaim_stat; | ||
399 | spin_lock_irq(&zone->lru_lock); | 403 | spin_lock_irq(&zone->lru_lock); |
400 | } | 404 | } |
401 | VM_BUG_ON(PageActive(page)); | 405 | VM_BUG_ON(PageActive(page)); |
@@ -403,10 +407,10 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
403 | VM_BUG_ON(PageLRU(page)); | 407 | VM_BUG_ON(PageLRU(page)); |
404 | SetPageLRU(page); | 408 | SetPageLRU(page); |
405 | file = is_file_lru(lru); | 409 | file = is_file_lru(lru); |
406 | zone->recent_scanned[file]++; | 410 | reclaim_stat->recent_scanned[file]++; |
407 | if (is_active_lru(lru)) { | 411 | if (is_active_lru(lru)) { |
408 | SetPageActive(page); | 412 | SetPageActive(page); |
409 | zone->recent_rotated[file]++; | 413 | reclaim_stat->recent_rotated[file]++; |
410 | } | 414 | } |
411 | add_page_to_lru_list(zone, page, lru); | 415 | add_page_to_lru_list(zone, page, lru); |
412 | } | 416 | } |