aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorJohannes Weiner <jweiner@redhat.com>2012-01-12 20:18:38 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-12 23:13:06 -0500
commitcfa449461e67b60df986170eecb089831fa9e49a (patch)
treeff40cff591e27250b91b2f1fb4409875f397b048 /mm
parent0e574a932d2cab8eb3b02d21feb59f2c09154738 (diff)
mm: memcg: lookup_page_cgroup (almost) never returns NULL
Pages have their corresponding page_cgroup descriptors set up before they are used in userspace, and thus managed by a memory cgroup. The only time where lookup_page_cgroup() can return NULL is in the CONFIG_DEBUG_VM-only page sanity checking code that executes while feeding pages into the page allocator for the first time. Remove the NULL checks against lookup_page_cgroup() results from all callsites where we know that corresponding page_cgroup descriptors must be allocated, and add a comment to the callsite that actually does have to check the return value. [hughd@google.com: stop oops in mem_cgroup_update_page_stat()] Signed-off-by: Johannes Weiner <jweiner@redhat.com> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Balbir Singh <bsingharora@gmail.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c11
1 files changed, 7 insertions, 4 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 93cb16d2b96a..a63ad141083c 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1960,7 +1960,7 @@ void mem_cgroup_update_page_stat(struct page *page,
1960 bool need_unlock = false; 1960 bool need_unlock = false;
1961 unsigned long uninitialized_var(flags); 1961 unsigned long uninitialized_var(flags);
1962 1962
1963 if (unlikely(!pc)) 1963 if (mem_cgroup_disabled())
1964 return; 1964 return;
1965 1965
1966 rcu_read_lock(); 1966 rcu_read_lock();
@@ -2735,8 +2735,6 @@ static int mem_cgroup_charge_common(struct page *page, struct mm_struct *mm,
2735 } 2735 }
2736 2736
2737 pc = lookup_page_cgroup(page); 2737 pc = lookup_page_cgroup(page);
2738 BUG_ON(!pc); /* XXX: remove this and move pc lookup into commit */
2739
2740 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom); 2738 ret = __mem_cgroup_try_charge(mm, gfp_mask, nr_pages, &memcg, oom);
2741 if (ret || !memcg) 2739 if (ret || !memcg)
2742 return ret; 2740 return ret;
@@ -3008,7 +3006,7 @@ __mem_cgroup_uncharge_common(struct page *page, enum charge_type ctype)
3008 * Check if our page_cgroup is valid 3006 * Check if our page_cgroup is valid
3009 */ 3007 */
3010 pc = lookup_page_cgroup(page); 3008 pc = lookup_page_cgroup(page);
3011 if (unlikely(!pc || !PageCgroupUsed(pc))) 3009 if (unlikely(!PageCgroupUsed(pc)))
3012 return NULL; 3010 return NULL;
3013 3011
3014 lock_page_cgroup(pc); 3012 lock_page_cgroup(pc);
@@ -3436,6 +3434,11 @@ static struct page_cgroup *lookup_page_cgroup_used(struct page *page)
3436 struct page_cgroup *pc; 3434 struct page_cgroup *pc;
3437 3435
3438 pc = lookup_page_cgroup(page); 3436 pc = lookup_page_cgroup(page);
3437 /*
3438 * Can be NULL while feeding pages into the page allocator for
3439 * the first time, i.e. during boot or memory hotplug;
3440 * or when mem_cgroup_disabled().
3441 */
3439 if (likely(pc) && PageCgroupUsed(pc)) 3442 if (likely(pc) && PageCgroupUsed(pc))
3440 return pc; 3443 return pc;
3441 return NULL; 3444 return NULL;