diff options
author | Hugh Dickins <hughd@google.com> | 2012-05-29 18:07:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:28 -0400 |
commit | fa9add641b1b1c564db916accac1db346e7a2759 (patch) | |
tree | 875e74ec4d7fed0018fdbc134ad899949c5e3384 /mm/compaction.c | |
parent | 75b00af77ed5b5a3d55549f9e0c33f3969b9330c (diff) |
mm/memcg: apply add/del_page to lruvec
Take lruvec further: pass it instead of zone to add_page_to_lru_list() and
del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to
its target functions.
This cleanup eliminates a swathe of cruft in memcontrol.c, including
mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and
mem_cgroup_lru_move_lists() - which never actually touched the lists.
In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously
a side-effect of add, and mem_cgroup_update_lru_size() to maintain the
lru_size stats.
Whilst these are simplifications in their own right, the goal is to bring
the evaluation of lruvec next to the spin_locking of the lrus, in
preparation for a future patch.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 5 |
1 files changed, 4 insertions, 1 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index 74e1b3803839..4ac338af5120 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -227,6 +227,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
227 | unsigned long nr_scanned = 0, nr_isolated = 0; | 227 | unsigned long nr_scanned = 0, nr_isolated = 0; |
228 | struct list_head *migratelist = &cc->migratepages; | 228 | struct list_head *migratelist = &cc->migratepages; |
229 | isolate_mode_t mode = 0; | 229 | isolate_mode_t mode = 0; |
230 | struct lruvec *lruvec; | ||
230 | 231 | ||
231 | /* | 232 | /* |
232 | * Ensure that there are not too many pages isolated from the LRU | 233 | * Ensure that there are not too many pages isolated from the LRU |
@@ -328,6 +329,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
328 | if (cc->mode != COMPACT_SYNC) | 329 | if (cc->mode != COMPACT_SYNC) |
329 | mode |= ISOLATE_ASYNC_MIGRATE; | 330 | mode |= ISOLATE_ASYNC_MIGRATE; |
330 | 331 | ||
332 | lruvec = mem_cgroup_page_lruvec(page, zone); | ||
333 | |||
331 | /* Try isolate the page */ | 334 | /* Try isolate the page */ |
332 | if (__isolate_lru_page(page, mode) != 0) | 335 | if (__isolate_lru_page(page, mode) != 0) |
333 | continue; | 336 | continue; |
@@ -335,7 +338,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
335 | VM_BUG_ON(PageTransCompound(page)); | 338 | VM_BUG_ON(PageTransCompound(page)); |
336 | 339 | ||
337 | /* Successfully isolated */ | 340 | /* Successfully isolated */ |
338 | del_page_from_lru_list(zone, page, page_lru(page)); | 341 | del_page_from_lru_list(page, lruvec, page_lru(page)); |
339 | list_add(&page->lru, migratelist); | 342 | list_add(&page->lru, migratelist); |
340 | cc->nr_migratepages++; | 343 | cc->nr_migratepages++; |
341 | nr_isolated++; | 344 | nr_isolated++; |