aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2008-03-04 17:29:08 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-03-04 19:35:15 -0500
commit8289546e573d5ff681cdf0fc7a1184cca66fdb55 (patch)
tree9cf874b55eb9a6c97233d137278c7b7c89a5f4a1 /mm/memcontrol.c
parent7e924aafa4b03ff71de34af8553d9a1ebc86c071 (diff)
memcg: remove mem_cgroup_uncharge
Nothing uses mem_cgroup_uncharge apart from mem_cgroup_uncharge_page, (a trivial wrapper around it) and mem_cgroup_end_migration (which does the same as mem_cgroup_uncharge_page). And it often ends up having to lock just to let its caller unlock. Remove it (but leave the silly locking until a later patch). Moved mem_cgroup_cache_charge next to mem_cgroup_charge in memcontrol.h. Signed-off-by: Hugh Dickins <hugh@veritas.com> Cc: David Rientjes <rientjes@google.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Hirokazu Takahashi <taka@valinux.co.jp> Cc: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Cc: Paul Menage <menage@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c23
1 files changed, 8 insertions, 15 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 83ba13ad31e1..1333d25163bb 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -697,20 +697,22 @@ int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
697 697
698/* 698/*
699 * Uncharging is always a welcome operation, we never complain, simply 699 * Uncharging is always a welcome operation, we never complain, simply
700 * uncharge. This routine should be called with lock_page_cgroup held 700 * uncharge.
701 */ 701 */
702void mem_cgroup_uncharge(struct page_cgroup *pc) 702void mem_cgroup_uncharge_page(struct page *page)
703{ 703{
704 struct page_cgroup *pc;
704 struct mem_cgroup *mem; 705 struct mem_cgroup *mem;
705 struct mem_cgroup_per_zone *mz; 706 struct mem_cgroup_per_zone *mz;
706 struct page *page;
707 unsigned long flags; 707 unsigned long flags;
708 708
709 /* 709 /*
710 * Check if our page_cgroup is valid 710 * Check if our page_cgroup is valid
711 */ 711 */
712 lock_page_cgroup(page);
713 pc = page_get_page_cgroup(page);
712 if (!pc) 714 if (!pc)
713 return; 715 goto unlock;
714 716
715 if (atomic_dec_and_test(&pc->ref_cnt)) { 717 if (atomic_dec_and_test(&pc->ref_cnt)) {
716 page = pc->page; 718 page = pc->page;
@@ -731,12 +733,8 @@ void mem_cgroup_uncharge(struct page_cgroup *pc)
731 } 733 }
732 lock_page_cgroup(page); 734 lock_page_cgroup(page);
733 } 735 }
734}
735 736
736void mem_cgroup_uncharge_page(struct page *page) 737unlock:
737{
738 lock_page_cgroup(page);
739 mem_cgroup_uncharge(page_get_page_cgroup(page));
740 unlock_page_cgroup(page); 738 unlock_page_cgroup(page);
741} 739}
742 740
@@ -759,12 +757,7 @@ int mem_cgroup_prepare_migration(struct page *page)
759 757
760void mem_cgroup_end_migration(struct page *page) 758void mem_cgroup_end_migration(struct page *page)
761{ 759{
762 struct page_cgroup *pc; 760 mem_cgroup_uncharge_page(page);
763
764 lock_page_cgroup(page);
765 pc = page_get_page_cgroup(page);
766 mem_cgroup_uncharge(pc);
767 unlock_page_cgroup(page);
768} 761}
769/* 762/*
770 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked. 763 * We know both *page* and *newpage* are now not-on-LRU and Pg_locked.