aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-12 20:18:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:05 -0500
commit6290df545814990ca2663baf6e894669132d5f73 (patch)
treec62472270ba81a7146bed0854be74e2e2338c629 /mm/memcontrol.c
parentb95a2f2d486d0d768a92879c023a03757b9c7e58 (diff)
mm: collect LRU list heads into struct lruvec
Having a unified structure with a LRU list set for both global zones and per-memcg zones allows to keep that code simple which deals with LRU lists and does not care about the container itself. Once the per-memcg LRU lists directly link struct pages, the isolation function and all other list manipulations are shared between the memcg case and the global LRU case. Signed-off-by: Johannes Weiner <jweiner@redhat.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Michal Hocko <mhocko@suse.cz> Reviewed-by: Kirill A. Shutemov <kirill@shutemov.name> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <bsingharora@gmail.com> Cc: Ying Han <yinghan@google.com> Cc: Greg Thelen <gthelen@google.com> Cc: Michel Lespinasse <walken@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Christoph Hellwig <hch@infradead.org> Cc: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c17
1 files changed, 7 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ad7f36f676ff..6e7f849a1a9e 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -134,10 +134,7 @@ struct mem_cgroup_reclaim_iter {
134 * per-zone information in memory controller. 134 * per-zone information in memory controller.
135 */ 135 */
136struct mem_cgroup_per_zone { 136struct mem_cgroup_per_zone {
137 /* 137 struct lruvec lruvec;
138 * spin_lock to protect the per cgroup LRU
139 */
140 struct list_head lists[NR_LRU_LISTS];
141 unsigned long count[NR_LRU_LISTS]; 138 unsigned long count[NR_LRU_LISTS];
142 139
143 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 140 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
@@ -1061,7 +1058,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
1061 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1058 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1062 smp_rmb(); 1059 smp_rmb();
1063 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1060 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1064 list_move_tail(&pc->lru, &mz->lists[lru]); 1061 list_move_tail(&pc->lru, &mz->lruvec.lists[lru]);
1065} 1062}
1066 1063
1067void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru) 1064void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
@@ -1079,7 +1076,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
1079 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1076 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1080 smp_rmb(); 1077 smp_rmb();
1081 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page); 1078 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1082 list_move(&pc->lru, &mz->lists[lru]); 1079 list_move(&pc->lru, &mz->lruvec.lists[lru]);
1083} 1080}
1084 1081
1085void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru) 1082void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
@@ -1109,7 +1106,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
1109 /* huge page split is done under lru_lock. so, we have no races. */ 1106 /* huge page split is done under lru_lock. so, we have no races. */
1110 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 1107 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
1111 SetPageCgroupAcctLRU(pc); 1108 SetPageCgroupAcctLRU(pc);
1112 list_add(&pc->lru, &mz->lists[lru]); 1109 list_add(&pc->lru, &mz->lruvec.lists[lru]);
1113} 1110}
1114 1111
1115/* 1112/*
@@ -1307,7 +1304,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
1307 1304
1308 BUG_ON(!mem_cont); 1305 BUG_ON(!mem_cont);
1309 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid); 1306 mz = mem_cgroup_zoneinfo(mem_cont, nid, zid);
1310 src = &mz->lists[lru]; 1307 src = &mz->lruvec.lists[lru];
1311 1308
1312 scan = 0; 1309 scan = 0;
1313 list_for_each_entry_safe_reverse(pc, tmp, src, lru) { 1310 list_for_each_entry_safe_reverse(pc, tmp, src, lru) {
@@ -3738,7 +3735,7 @@ static int mem_cgroup_force_empty_list(struct mem_cgroup *memcg,
3738 3735
3739 zone = &NODE_DATA(node)->node_zones[zid]; 3736 zone = &NODE_DATA(node)->node_zones[zid];
3740 mz = mem_cgroup_zoneinfo(memcg, node, zid); 3737 mz = mem_cgroup_zoneinfo(memcg, node, zid);
3741 list = &mz->lists[lru]; 3738 list = &mz->lruvec.lists[lru];
3742 3739
3743 loop = MEM_CGROUP_ZSTAT(mz, lru); 3740 loop = MEM_CGROUP_ZSTAT(mz, lru);
3744 /* give some margin against EBUSY etc...*/ 3741 /* give some margin against EBUSY etc...*/
@@ -4864,7 +4861,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4864 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4861 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4865 mz = &pn->zoneinfo[zone]; 4862 mz = &pn->zoneinfo[zone];
4866 for_each_lru(l) 4863 for_each_lru(l)
4867 INIT_LIST_HEAD(&mz->lists[l]); 4864 INIT_LIST_HEAD(&mz->lruvec.lists[l]);
4868 mz->usage_in_excess = 0; 4865 mz->usage_in_excess = 0;
4869 mz->on_tree = false; 4866 mz->on_tree = false;
4870 mz->mem = memcg; 4867 mz->mem = memcg;