aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLi Zefan <lizf@cn.fujitsu.com>2008-07-25 04:47:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-07-25 13:53:37 -0400
commitcede86acd8bd5d2205dec28db8ac86410a3a19e8 (patch)
tree67ad2d42cadee0b6f9a4de21e06602faae8d8604
parentaccf163e6ab729f1fc5fffaa0310e498270bf4e7 (diff)
memcg: clean up checking of the disabled flag
Those checks are unnecessary, because when the subsystem is disabled it can't be mounted, so those functions won't get called. The check is needed in functions which will be called in other places except cgroup. [hugh@veritas.com: further checking of disabled flag] Signed-off-by: Li Zefan <lizf@cn.fujitsu.com> Acked-by: Balbir Singh <balbir@linux.vnet.ibm.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/memcontrol.c23
1 files changed, 12 insertions, 11 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 5b3759bd5494..0c035647d36a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -354,6 +354,9 @@ void mem_cgroup_move_lists(struct page *page, bool active)
354 struct mem_cgroup_per_zone *mz; 354 struct mem_cgroup_per_zone *mz;
355 unsigned long flags; 355 unsigned long flags;
356 356
357 if (mem_cgroup_subsys.disabled)
358 return;
359
357 /* 360 /*
358 * We cannot lock_page_cgroup while holding zone's lru_lock, 361 * We cannot lock_page_cgroup while holding zone's lru_lock,
359 * because other holders of lock_page_cgroup can be interrupted 362 * because other holders of lock_page_cgroup can be interrupted
@@ -533,9 +536,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
533 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES; 536 unsigned long nr_retries = MEM_CGROUP_RECLAIM_RETRIES;
534 struct mem_cgroup_per_zone *mz; 537 struct mem_cgroup_per_zone *mz;
535 538
536 if (mem_cgroup_subsys.disabled)
537 return 0;
538
539 pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask); 539 pc = kmem_cache_alloc(page_cgroup_cache, gfp_mask);
540 if (unlikely(pc == NULL)) 540 if (unlikely(pc == NULL))
541 goto err; 541 goto err;
@@ -620,6 +620,9 @@ err:
620 620
621int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask) 621int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
622{ 622{
623 if (mem_cgroup_subsys.disabled)
624 return 0;
625
623 /* 626 /*
624 * If already mapped, we don't have to account. 627 * If already mapped, we don't have to account.
625 * If page cache, page->mapping has address_space. 628 * If page cache, page->mapping has address_space.
@@ -638,6 +641,9 @@ int mem_cgroup_charge(struct page *page, struct mm_struct *mm, gfp_t gfp_mask)
638int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm, 641int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
639 gfp_t gfp_mask) 642 gfp_t gfp_mask)
640{ 643{
644 if (mem_cgroup_subsys.disabled)
645 return 0;
646
641 /* 647 /*
642 * Corner case handling. This is called from add_to_page_cache() 648 * Corner case handling. This is called from add_to_page_cache()
643 * in usual. But some FS (shmem) precharges this page before calling it 649 * in usual. But some FS (shmem) precharges this page before calling it
@@ -788,6 +794,9 @@ int mem_cgroup_shrink_usage(struct mm_struct *mm, gfp_t gfp_mask)
788 int progress = 0; 794 int progress = 0;
789 int retry = MEM_CGROUP_RECLAIM_RETRIES; 795 int retry = MEM_CGROUP_RECLAIM_RETRIES;
790 796
797 if (mem_cgroup_subsys.disabled)
798 return 0;
799
791 rcu_read_lock(); 800 rcu_read_lock();
792 mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); 801 mem = mem_cgroup_from_task(rcu_dereference(mm->owner));
793 css_get(&mem->css); 802 css_get(&mem->css);
@@ -857,9 +866,6 @@ static int mem_cgroup_force_empty(struct mem_cgroup *mem)
857 int ret = -EBUSY; 866 int ret = -EBUSY;
858 int node, zid; 867 int node, zid;
859 868
860 if (mem_cgroup_subsys.disabled)
861 return 0;
862
863 css_get(&mem->css); 869 css_get(&mem->css);
864 /* 870 /*
865 * page reclaim code (kswapd etc..) will move pages between 871 * page reclaim code (kswapd etc..) will move pages between
@@ -1103,8 +1109,6 @@ static void mem_cgroup_destroy(struct cgroup_subsys *ss,
1103static int mem_cgroup_populate(struct cgroup_subsys *ss, 1109static int mem_cgroup_populate(struct cgroup_subsys *ss,
1104 struct cgroup *cont) 1110 struct cgroup *cont)
1105{ 1111{
1106 if (mem_cgroup_subsys.disabled)
1107 return 0;
1108 return cgroup_add_files(cont, ss, mem_cgroup_files, 1112 return cgroup_add_files(cont, ss, mem_cgroup_files,
1109 ARRAY_SIZE(mem_cgroup_files)); 1113 ARRAY_SIZE(mem_cgroup_files));
1110} 1114}
@@ -1117,9 +1121,6 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss,
1117 struct mm_struct *mm; 1121 struct mm_struct *mm;
1118 struct mem_cgroup *mem, *old_mem; 1122 struct mem_cgroup *mem, *old_mem;
1119 1123
1120 if (mem_cgroup_subsys.disabled)
1121 return;
1122
1123 mm = get_task_mm(p); 1124 mm = get_task_mm(p);
1124 if (mm == NULL) 1125 if (mm == NULL)
1125 return; 1126 return;