diff options
author | Hugh Dickins <hughd@google.com> | 2012-05-29 18:07:09 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-05-29 19:22:28 -0400 |
commit | fa9add641b1b1c564db916accac1db346e7a2759 (patch) | |
tree | 875e74ec4d7fed0018fdbc134ad899949c5e3384 /include/linux/mm_inline.h | |
parent | 75b00af77ed5b5a3d55549f9e0c33f3969b9330c (diff) |
mm/memcg: apply add/del_page to lruvec
Take lruvec further: pass it instead of zone to add_page_to_lru_list() and
del_page_from_lru_list(); and pagevec_lru_move_fn() pass lruvec down to
its target functions.
This cleanup eliminates a swathe of cruft in memcontrol.c, including
mem_cgroup_lru_add_list(), mem_cgroup_lru_del_list() and
mem_cgroup_lru_move_lists() - which never actually touched the lists.
In their place, mem_cgroup_page_lruvec() to decide the lruvec, previously
a side-effect of add, and mem_cgroup_update_lru_size() to maintain the
lru_size stats.
Whilst these are simplifications in their own right, the goal is to bring
the evaluation of lruvec next to the spin_locking of the lrus, in
preparation for a future patch.
Signed-off-by: Hugh Dickins <hughd@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Acked-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm_inline.h')
-rw-r--r-- | include/linux/mm_inline.h | 20 |
1 files changed, 10 insertions, 10 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h index 16d45d9c31a4..1397ccf81e91 100644 --- a/include/linux/mm_inline.h +++ b/include/linux/mm_inline.h | |||
@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page) | |||
21 | return !PageSwapBacked(page); | 21 | return !PageSwapBacked(page); |
22 | } | 22 | } |
23 | 23 | ||
24 | static __always_inline void | 24 | static __always_inline void add_page_to_lru_list(struct page *page, |
25 | add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) | 25 | struct lruvec *lruvec, enum lru_list lru) |
26 | { | 26 | { |
27 | struct lruvec *lruvec; | 27 | int nr_pages = hpage_nr_pages(page); |
28 | 28 | mem_cgroup_update_lru_size(lruvec, lru, nr_pages); | |
29 | lruvec = mem_cgroup_lru_add_list(zone, page, lru); | ||
30 | list_add(&page->lru, &lruvec->lists[lru]); | 29 | list_add(&page->lru, &lruvec->lists[lru]); |
31 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); | 30 | __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages); |
32 | } | 31 | } |
33 | 32 | ||
34 | static __always_inline void | 33 | static __always_inline void del_page_from_lru_list(struct page *page, |
35 | del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) | 34 | struct lruvec *lruvec, enum lru_list lru) |
36 | { | 35 | { |
37 | mem_cgroup_lru_del_list(page, lru); | 36 | int nr_pages = hpage_nr_pages(page); |
37 | mem_cgroup_update_lru_size(lruvec, lru, -nr_pages); | ||
38 | list_del(&page->lru); | 38 | list_del(&page->lru); |
39 | __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); | 39 | __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages); |
40 | } | 40 | } |
41 | 41 | ||
42 | /** | 42 | /** |