aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2011-03-23 19:42:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-03-23 22:46:26 -0400
commit97a6c37b34f46feed2544bd40891ee6dd0fd1554 (patch)
tree6135189ba1f589d7a3b10c5e24da9a42ea9036f5 /mm
parentad324e94475a04cfcdfdb11ad20f8ea81268e411 (diff)
memcg: change page_cgroup_zoneinfo signature
Instead of passing a whole struct page_cgroup to this function, let it take only what it really needs from it: the struct mem_cgroup and the page. This has the advantage that reading pc->mem_cgroup is now done at the same place where the ordering rules for this pointer are enforced and explained. It is also in preparation for removing the pc->page backpointer. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Minchan Kim <minchan.kim@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c19
1 files changed, 9 insertions, 10 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5f7b0e1d789c..2881c9ef969a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -364,11 +364,10 @@ struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *mem)
364} 364}
365 365
366static struct mem_cgroup_per_zone * 366static struct mem_cgroup_per_zone *
367page_cgroup_zoneinfo(struct page_cgroup *pc) 367page_cgroup_zoneinfo(struct mem_cgroup *mem, struct page *page)
368{ 368{
369 struct mem_cgroup *mem = pc->mem_cgroup; 369 int nid = page_to_nid(page);
370 int nid = page_cgroup_nid(pc); 370 int zid = page_zonenum(page);
371 int zid = page_cgroup_zid(pc);
372 371
373 return mem_cgroup_zoneinfo(mem, nid, zid); 372 return mem_cgroup_zoneinfo(mem, nid, zid);
374} 373}
@@ -800,7 +799,7 @@ void mem_cgroup_del_lru_list(struct page *page, enum lru_list lru)
800 * We don't check PCG_USED bit. It's cleared when the "page" is finally 799 * We don't check PCG_USED bit. It's cleared when the "page" is finally
801 * removed from global LRU. 800 * removed from global LRU.
802 */ 801 */
803 mz = page_cgroup_zoneinfo(pc); 802 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
804 /* huge page split is done under lru_lock. so, we have no races. */ 803 /* huge page split is done under lru_lock. so, we have no races. */
805 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page); 804 MEM_CGROUP_ZSTAT(mz, lru) -= 1 << compound_order(page);
806 if (mem_cgroup_is_root(pc->mem_cgroup)) 805 if (mem_cgroup_is_root(pc->mem_cgroup))
@@ -836,7 +835,7 @@ void mem_cgroup_rotate_reclaimable_page(struct page *page)
836 smp_rmb(); 835 smp_rmb();
837 if (mem_cgroup_is_root(pc->mem_cgroup)) 836 if (mem_cgroup_is_root(pc->mem_cgroup))
838 return; 837 return;
839 mz = page_cgroup_zoneinfo(pc); 838 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
840 list_move_tail(&pc->lru, &mz->lists[lru]); 839 list_move_tail(&pc->lru, &mz->lists[lru]);
841} 840}
842 841
@@ -856,7 +855,7 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
856 smp_rmb(); 855 smp_rmb();
857 if (mem_cgroup_is_root(pc->mem_cgroup)) 856 if (mem_cgroup_is_root(pc->mem_cgroup))
858 return; 857 return;
859 mz = page_cgroup_zoneinfo(pc); 858 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
860 list_move(&pc->lru, &mz->lists[lru]); 859 list_move(&pc->lru, &mz->lists[lru]);
861} 860}
862 861
@@ -873,7 +872,7 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
873 return; 872 return;
874 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 873 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
875 smp_rmb(); 874 smp_rmb();
876 mz = page_cgroup_zoneinfo(pc); 875 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
877 /* huge page split is done under lru_lock. so, we have no races. */ 876 /* huge page split is done under lru_lock. so, we have no races. */
878 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page); 877 MEM_CGROUP_ZSTAT(mz, lru) += 1 << compound_order(page);
879 SetPageCgroupAcctLRU(pc); 878 SetPageCgroupAcctLRU(pc);
@@ -1043,7 +1042,7 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1043 return NULL; 1042 return NULL;
1044 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */ 1043 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1045 smp_rmb(); 1044 smp_rmb();
1046 mz = page_cgroup_zoneinfo(pc); 1045 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1047 if (!mz) 1046 if (!mz)
1048 return NULL; 1047 return NULL;
1049 1048
@@ -2192,7 +2191,7 @@ void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail)
2192 * We hold lru_lock, then, reduce counter directly. 2191 * We hold lru_lock, then, reduce counter directly.
2193 */ 2192 */
2194 lru = page_lru(head); 2193 lru = page_lru(head);
2195 mz = page_cgroup_zoneinfo(head_pc); 2194 mz = page_cgroup_zoneinfo(head_pc->mem_cgroup, head);
2196 MEM_CGROUP_ZSTAT(mz, lru) -= 1; 2195 MEM_CGROUP_ZSTAT(mz, lru) -= 1;
2197 } 2196 }
2198 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT; 2197 tail_pc->flags = head_pc->flags & ~PCGF_NOCOPY_AT_SPLIT;