aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/mm_inline.h
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-12 20:18:15 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:05 -0500
commit925b7673cce39116ce61e7a06683a4a0dad1e72a (patch)
tree66c134db836e531e196ee3dfc23c124ff74ac827 /include/linux/mm_inline.h
parent6290df545814990ca2663baf6e894669132d5f73 (diff)
mm: make per-memcg LRU lists exclusive
Now that all code that operated on global per-zone LRU lists is converted to operate on per-memory cgroup LRU lists instead, there is no reason to keep the double-LRU scheme around any longer. The pc->lru member is removed and page->lru is linked directly to the per-memory cgroup LRU lists, which removes two pointers from a descriptor that exists for every page frame in the system. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Ying Han <yinghan@google.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/mm_inline.h')
-rw-r--r--include/linux/mm_inline.h21
1 files changed, 8 insertions, 13 deletions
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index e6a7ffe16d31..4e3478e71926 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -22,26 +22,21 @@ static inline int page_is_file_cache(struct page *page)
22} 22}
23 23
24static inline void 24static inline void
25__add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l,
26 struct list_head *head)
27{
28 list_add(&page->lru, head);
29 __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
30 mem_cgroup_add_lru_list(page, l);
31}
32
33static inline void
34add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l) 25add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list l)
35{ 26{
36 __add_page_to_lru_list(zone, page, l, &zone->lruvec.lists[l]); 27 struct lruvec *lruvec;
28
29 lruvec = mem_cgroup_lru_add_list(zone, page, l);
30 list_add(&page->lru, &lruvec->lists[l]);
31 __mod_zone_page_state(zone, NR_LRU_BASE + l, hpage_nr_pages(page));
37} 32}
38 33
39static inline void 34static inline void
40del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l) 35del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list l)
41{ 36{
37 mem_cgroup_lru_del_list(page, l);
42 list_del(&page->lru); 38 list_del(&page->lru);
43 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); 39 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
44 mem_cgroup_del_lru_list(page, l);
45} 40}
46 41
47/** 42/**
@@ -64,7 +59,6 @@ del_page_from_lru(struct zone *zone, struct page *page)
64{ 59{
65 enum lru_list l; 60 enum lru_list l;
66 61
67 list_del(&page->lru);
68 if (PageUnevictable(page)) { 62 if (PageUnevictable(page)) {
69 __ClearPageUnevictable(page); 63 __ClearPageUnevictable(page);
70 l = LRU_UNEVICTABLE; 64 l = LRU_UNEVICTABLE;
@@ -75,8 +69,9 @@ del_page_from_lru(struct zone *zone, struct page *page)
75 l += LRU_ACTIVE; 69 l += LRU_ACTIVE;
76 } 70 }
77 } 71 }
72 mem_cgroup_lru_del_list(page, l);
73 list_del(&page->lru);
78 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page)); 74 __mod_zone_page_state(zone, NR_LRU_BASE + l, -hpage_nr_pages(page));
79 mem_cgroup_del_lru_list(page, l);
80} 75}
81 76
82/** 77/**