aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2014-04-07 18:37:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-04-07 19:35:56 -0400
commitdf381975463996178d685f6ef7d3555c5f887201 (patch)
tree22fd4f923fc2cc1ed1d7c4aa94039452ce6123e5 /mm/memcontrol.c
parent03583f1a631c0511dfd2f16e716d5b40f675de5a (diff)
memcg: get_mem_cgroup_from_mm()
Instead of returning NULL from try_get_mem_cgroup_from_mm() when the mm owner is exiting, just return root_mem_cgroup. This makes sense for all callsites and gets rid of some of them having to fallback manually. [fengguang.wu@intel.com: fix warnings] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Fengguang Wu <fengguang.wu@intel.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c18
1 files changed, 4 insertions, 14 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index c3b674f9774f..87c3ec37dd26 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1071,7 +1071,7 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p)
1071 return mem_cgroup_from_css(task_css(p, memory_cgrp_id)); 1071 return mem_cgroup_from_css(task_css(p, memory_cgrp_id));
1072} 1072}
1073 1073
1074struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm) 1074static struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
1075{ 1075{
1076 struct mem_cgroup *memcg = NULL; 1076 struct mem_cgroup *memcg = NULL;
1077 1077
@@ -1079,7 +1079,7 @@ struct mem_cgroup *try_get_mem_cgroup_from_mm(struct mm_struct *mm)
1079 do { 1079 do {
1080 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner)); 1080 memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
1081 if (unlikely(!memcg)) 1081 if (unlikely(!memcg))
1082 break; 1082 memcg = root_mem_cgroup;
1083 } while (!css_tryget(&memcg->css)); 1083 } while (!css_tryget(&memcg->css));
1084 rcu_read_unlock(); 1084 rcu_read_unlock();
1085 return memcg; 1085 return memcg;
@@ -1475,7 +1475,7 @@ bool task_in_mem_cgroup(struct task_struct *task,
1475 1475
1476 p = find_lock_task_mm(task); 1476 p = find_lock_task_mm(task);
1477 if (p) { 1477 if (p) {
1478 curr = try_get_mem_cgroup_from_mm(p->mm); 1478 curr = get_mem_cgroup_from_mm(p->mm);
1479 task_unlock(p); 1479 task_unlock(p);
1480 } else { 1480 } else {
1481 /* 1481 /*
@@ -1489,8 +1489,6 @@ bool task_in_mem_cgroup(struct task_struct *task,
1489 css_get(&curr->css); 1489 css_get(&curr->css);
1490 rcu_read_unlock(); 1490 rcu_read_unlock();
1491 } 1491 }
1492 if (!curr)
1493 return false;
1494 /* 1492 /*
1495 * We should check use_hierarchy of "memcg" not "curr". Because checking 1493 * We should check use_hierarchy of "memcg" not "curr". Because checking
1496 * use_hierarchy of "curr" here make this function true if hierarchy is 1494 * use_hierarchy of "curr" here make this function true if hierarchy is
@@ -3617,15 +3615,7 @@ __memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **_memcg, int order)
3617 if (!current->mm || current->memcg_kmem_skip_account) 3615 if (!current->mm || current->memcg_kmem_skip_account)
3618 return true; 3616 return true;
3619 3617
3620 memcg = try_get_mem_cgroup_from_mm(current->mm); 3618 memcg = get_mem_cgroup_from_mm(current->mm);
3621
3622 /*
3623 * very rare case described in mem_cgroup_from_task. Unfortunately there
3624 * isn't much we can do without complicating this too much, and it would
3625 * be gfp-dependent anyway. Just let it go
3626 */
3627 if (unlikely(!memcg))
3628 return true;
3629 3619
3630 if (!memcg_can_account_kmem(memcg)) { 3620 if (!memcg_can_account_kmem(memcg)) {
3631 css_put(&memcg->css); 3621 css_put(&memcg->css);