diff options
author | KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | 2008-02-07 03:14:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2008-02-07 11:42:20 -0500 |
commit | ff7283fa3a66823933991ad55a558a3a01d5ab27 (patch) | |
tree | e183c588497ce9f18fea1589fa66b6a000e2cbb8 /mm | |
parent | ae41be374293e70e1ed441d986afcc6e744ef9d9 (diff) |
bugfix for memory cgroup controller: avoid !PageLRU page in mem_cgroup_isolate_pages
This patch makes mem_cgroup_isolate_pages() to be
- ignore !PageLRU pages.
- fixes the bug that isolation makes no progress if page_zone(page) != zone
page once find. (just increment scan in this case.)
kswapd and memory migration removes a page from list when it handles
a page for reclaiming/migration.
Because __isolate_lru_page() doesn't moves page !PageLRU pages, it will
be safe to avoid touching !PageLRU() page and its page_cgroup.
Signed-off-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Balbir Singh <balbir@linux.vnet.ibm.com>
Cc: Pavel Emelianov <xemul@openvz.org>
Cc: Paul Menage <menage@google.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: "Eric W. Biederman" <ebiederm@xmission.com>
Cc: Nick Piggin <nickpiggin@yahoo.com.au>
Cc: Kirill Korotaev <dev@sw.ru>
Cc: Herbert Poetzl <herbert@13thfloor.at>
Cc: David Rientjes <rientjes@google.com>
Cc: Vaidyanathan Srinivasan <svaidy@linux.vnet.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/memcontrol.c | 13 |
1 files changed, 10 insertions, 3 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 128f45c16fa6..e8493fb2d69e 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -250,7 +250,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
250 | unsigned long scan; | 250 | unsigned long scan; |
251 | LIST_HEAD(pc_list); | 251 | LIST_HEAD(pc_list); |
252 | struct list_head *src; | 252 | struct list_head *src; |
253 | struct page_cgroup *pc; | 253 | struct page_cgroup *pc, *tmp; |
254 | 254 | ||
255 | if (active) | 255 | if (active) |
256 | src = &mem_cont->active_list; | 256 | src = &mem_cont->active_list; |
@@ -258,11 +258,18 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan, | |||
258 | src = &mem_cont->inactive_list; | 258 | src = &mem_cont->inactive_list; |
259 | 259 | ||
260 | spin_lock(&mem_cont->lru_lock); | 260 | spin_lock(&mem_cont->lru_lock); |
261 | for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { | 261 | scan = 0; |
262 | pc = list_entry(src->prev, struct page_cgroup, lru); | 262 | list_for_each_entry_safe_reverse(pc, tmp, src, lru) { |
263 | if (scan++ > nr_to_scan) | ||
264 | break; | ||
263 | page = pc->page; | 265 | page = pc->page; |
264 | VM_BUG_ON(!pc); | 266 | VM_BUG_ON(!pc); |
265 | 267 | ||
268 | if (unlikely(!PageLRU(page))) { | ||
269 | scan--; | ||
270 | continue; | ||
271 | } | ||
272 | |||
266 | if (PageActive(page) && !active) { | 273 | if (PageActive(page) && !active) { |
267 | __mem_cgroup_move_lists(pc, true); | 274 | __mem_cgroup_move_lists(pc, true); |
268 | scan--; | 275 | scan--; |