aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGreg Thelen <gthelen@google.com>2011-01-13 18:47:37 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2011-01-13 20:32:50 -0500
commit2a7106f2cb0768d00fe8c1eb42a754a7d8518f08 (patch)
tree730bef06e752c1edcb2d475fd193f94bea00bf6a /mm
parentece72400c2a27a3d726cb0854449f991d9fcd2da (diff)
memcg: create extensible page stat update routines
Replace usage of the mem_cgroup_update_file_mapped() memcg statistic update routine with two new routines: * mem_cgroup_inc_page_stat() * mem_cgroup_dec_page_stat() As before, only the file_mapped statistic is managed. However, these more general interfaces allow for new statistics to be more easily added. New statistics are added with memcg dirty page accounting. Signed-off-by: Greg Thelen <gthelen@google.com> Signed-off-by: Andrea Righi <arighi@develer.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Minchan Kim <minchan.kim@gmail.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c16
-rw-r--r--mm/rmap.c4
2 files changed, 9 insertions, 11 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 741206ffdace..3d8a0c79dece 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1600,7 +1600,8 @@ bool mem_cgroup_handle_oom(struct mem_cgroup *mem, gfp_t mask)
1600 * possibility of race condition. If there is, we take a lock. 1600 * possibility of race condition. If there is, we take a lock.
1601 */ 1601 */
1602 1602
1603static void mem_cgroup_update_file_stat(struct page *page, int idx, int val) 1603void mem_cgroup_update_page_stat(struct page *page,
1604 enum mem_cgroup_page_stat_item idx, int val)
1604{ 1605{
1605 struct mem_cgroup *mem; 1606 struct mem_cgroup *mem;
1606 struct page_cgroup *pc = lookup_page_cgroup(page); 1607 struct page_cgroup *pc = lookup_page_cgroup(page);
@@ -1623,30 +1624,27 @@ static void mem_cgroup_update_file_stat(struct page *page, int idx, int val)
1623 goto out; 1624 goto out;
1624 } 1625 }
1625 1626
1626 this_cpu_add(mem->stat->count[idx], val);
1627
1628 switch (idx) { 1627 switch (idx) {
1629 case MEM_CGROUP_STAT_FILE_MAPPED: 1628 case MEMCG_NR_FILE_MAPPED:
1630 if (val > 0) 1629 if (val > 0)
1631 SetPageCgroupFileMapped(pc); 1630 SetPageCgroupFileMapped(pc);
1632 else if (!page_mapped(page)) 1631 else if (!page_mapped(page))
1633 ClearPageCgroupFileMapped(pc); 1632 ClearPageCgroupFileMapped(pc);
1633 idx = MEM_CGROUP_STAT_FILE_MAPPED;
1634 break; 1634 break;
1635 default: 1635 default:
1636 BUG(); 1636 BUG();
1637 } 1637 }
1638 1638
1639 this_cpu_add(mem->stat->count[idx], val);
1640
1639out: 1641out:
1640 if (unlikely(need_unlock)) 1642 if (unlikely(need_unlock))
1641 unlock_page_cgroup(pc); 1643 unlock_page_cgroup(pc);
1642 rcu_read_unlock(); 1644 rcu_read_unlock();
1643 return; 1645 return;
1644} 1646}
1645 1647EXPORT_SYMBOL(mem_cgroup_update_page_stat);
1646void mem_cgroup_update_file_mapped(struct page *page, int val)
1647{
1648 mem_cgroup_update_file_stat(page, MEM_CGROUP_STAT_FILE_MAPPED, val);
1649}
1650 1648
1651/* 1649/*
1652 * size of first charge trial. "32" comes from vmscan.c's magic value. 1650 * size of first charge trial. "32" comes from vmscan.c's magic value.
diff --git a/mm/rmap.c b/mm/rmap.c
index c30f33854f97..f21f4a1d6a1c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -937,7 +937,7 @@ void page_add_file_rmap(struct page *page)
937{ 937{
938 if (atomic_inc_and_test(&page->_mapcount)) { 938 if (atomic_inc_and_test(&page->_mapcount)) {
939 __inc_zone_page_state(page, NR_FILE_MAPPED); 939 __inc_zone_page_state(page, NR_FILE_MAPPED);
940 mem_cgroup_update_file_mapped(page, 1); 940 mem_cgroup_inc_page_stat(page, MEMCG_NR_FILE_MAPPED);
941 } 941 }
942} 942}
943 943
@@ -979,7 +979,7 @@ void page_remove_rmap(struct page *page)
979 NR_ANON_TRANSPARENT_HUGEPAGES); 979 NR_ANON_TRANSPARENT_HUGEPAGES);
980 } else { 980 } else {
981 __dec_zone_page_state(page, NR_FILE_MAPPED); 981 __dec_zone_page_state(page, NR_FILE_MAPPED);
982 mem_cgroup_update_file_mapped(page, -1); 982 mem_cgroup_dec_page_stat(page, MEMCG_NR_FILE_MAPPED);
983 } 983 }
984 /* 984 /*
985 * It would be tidy to reset the PageAnon mapping here, 985 * It would be tidy to reset the PageAnon mapping here,