diff options
author | KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> | 2009-01-07 21:08:20 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-01-08 11:31:08 -0500 |
commit | 3e2f41f1f64744f7942980d93cc93dd3e5924560 (patch) | |
tree | 7b605c407b7470877fd9c5c853407f75edcbeb49 /mm/swap.c | |
parent | a3d8e0549d913e30968fa02e505dfe02c0a23e0d (diff) |
memcg: add zone_reclaim_stat
Introduce mem_cgroup_per_zone::reclaim_stat member and its statics
collecting function.
Now, get_scan_ratio() can calculate correct value on memcg reclaim.
[hugh@veritas.com: avoid reclaim_stat oops when disabled]
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Rik van Riel <riel@redhat.com>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: Balbir Singh <balbir@in.ibm.com>
Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp>
Cc: Hugh Dickins <hugh@veritas.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Signed-off-by: Hugh Dickins <hugh@veritas.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r-- | mm/swap.c | 34 |
1 files changed, 25 insertions, 9 deletions
@@ -151,13 +151,32 @@ void rotate_reclaimable_page(struct page *page) | |||
151 | } | 151 | } |
152 | } | 152 | } |
153 | 153 | ||
154 | static void update_page_reclaim_stat(struct zone *zone, struct page *page, | ||
155 | int file, int rotated) | ||
156 | { | ||
157 | struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | ||
158 | struct zone_reclaim_stat *memcg_reclaim_stat; | ||
159 | |||
160 | memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); | ||
161 | |||
162 | reclaim_stat->recent_scanned[file]++; | ||
163 | if (rotated) | ||
164 | reclaim_stat->recent_rotated[file]++; | ||
165 | |||
166 | if (!memcg_reclaim_stat) | ||
167 | return; | ||
168 | |||
169 | memcg_reclaim_stat->recent_scanned[file]++; | ||
170 | if (rotated) | ||
171 | memcg_reclaim_stat->recent_rotated[file]++; | ||
172 | } | ||
173 | |||
154 | /* | 174 | /* |
155 | * FIXME: speed this up? | 175 | * FIXME: speed this up? |
156 | */ | 176 | */ |
157 | void activate_page(struct page *page) | 177 | void activate_page(struct page *page) |
158 | { | 178 | { |
159 | struct zone *zone = page_zone(page); | 179 | struct zone *zone = page_zone(page); |
160 | struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; | ||
161 | 180 | ||
162 | spin_lock_irq(&zone->lru_lock); | 181 | spin_lock_irq(&zone->lru_lock); |
163 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { | 182 | if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { |
@@ -170,8 +189,7 @@ void activate_page(struct page *page) | |||
170 | add_page_to_lru_list(zone, page, lru); | 189 | add_page_to_lru_list(zone, page, lru); |
171 | __count_vm_event(PGACTIVATE); | 190 | __count_vm_event(PGACTIVATE); |
172 | 191 | ||
173 | reclaim_stat->recent_rotated[!!file]++; | 192 | update_page_reclaim_stat(zone, page, !!file, 1); |
174 | reclaim_stat->recent_scanned[!!file]++; | ||
175 | } | 193 | } |
176 | spin_unlock_irq(&zone->lru_lock); | 194 | spin_unlock_irq(&zone->lru_lock); |
177 | } | 195 | } |
@@ -386,7 +404,6 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
386 | { | 404 | { |
387 | int i; | 405 | int i; |
388 | struct zone *zone = NULL; | 406 | struct zone *zone = NULL; |
389 | struct zone_reclaim_stat *reclaim_stat = NULL; | ||
390 | 407 | ||
391 | VM_BUG_ON(is_unevictable_lru(lru)); | 408 | VM_BUG_ON(is_unevictable_lru(lru)); |
392 | 409 | ||
@@ -394,24 +411,23 @@ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) | |||
394 | struct page *page = pvec->pages[i]; | 411 | struct page *page = pvec->pages[i]; |
395 | struct zone *pagezone = page_zone(page); | 412 | struct zone *pagezone = page_zone(page); |
396 | int file; | 413 | int file; |
414 | int active; | ||
397 | 415 | ||
398 | if (pagezone != zone) { | 416 | if (pagezone != zone) { |
399 | if (zone) | 417 | if (zone) |
400 | spin_unlock_irq(&zone->lru_lock); | 418 | spin_unlock_irq(&zone->lru_lock); |
401 | zone = pagezone; | 419 | zone = pagezone; |
402 | reclaim_stat = &zone->reclaim_stat; | ||
403 | spin_lock_irq(&zone->lru_lock); | 420 | spin_lock_irq(&zone->lru_lock); |
404 | } | 421 | } |
405 | VM_BUG_ON(PageActive(page)); | 422 | VM_BUG_ON(PageActive(page)); |
406 | VM_BUG_ON(PageUnevictable(page)); | 423 | VM_BUG_ON(PageUnevictable(page)); |
407 | VM_BUG_ON(PageLRU(page)); | 424 | VM_BUG_ON(PageLRU(page)); |
408 | SetPageLRU(page); | 425 | SetPageLRU(page); |
426 | active = is_active_lru(lru); | ||
409 | file = is_file_lru(lru); | 427 | file = is_file_lru(lru); |
410 | reclaim_stat->recent_scanned[file]++; | 428 | if (active) |
411 | if (is_active_lru(lru)) { | ||
412 | SetPageActive(page); | 429 | SetPageActive(page); |
413 | reclaim_stat->recent_rotated[file]++; | 430 | update_page_reclaim_stat(zone, page, file, active); |
414 | } | ||
415 | add_page_to_lru_list(zone, page, lru); | 431 | add_page_to_lru_list(zone, page, lru); |
416 | } | 432 | } |
417 | if (zone) | 433 | if (zone) |