aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2013-02-22 19:34:55 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:18 -0500
commit0999821b1d08f69e3879eb8fa0d28c4aba82ab5e (patch)
tree5a985ef3b378ede5c7008ecfaceca9d1d3169ea8 /mm/memcontrol.c
parentb5f99b537d047072bd50ff5c51000613eb537be8 (diff)
memcg: replace cgroup_lock with memcg specific memcg_lock
After the preparation work done in earlier patches, the cgroup_lock can be trivially replaced with a memcg-specific lock. This is an automatic translation at every site where the values involved were queried. The sites where values are written, however, used to be naturally called under cgroup_lock. This is the case for instance in the css_online callback. For those, we now need to explicitly add the memcg lock. With this, all the calls to cgroup_lock outside cgroup core are gone. Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c39
1 files changed, 21 insertions, 18 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2bc3fbe93154..46cdaef78b01 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -488,6 +488,13 @@ enum res_type {
488#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1 488#define MEM_CGROUP_RECLAIM_SHRINK_BIT 0x1
489#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT) 489#define MEM_CGROUP_RECLAIM_SHRINK (1 << MEM_CGROUP_RECLAIM_SHRINK_BIT)
490 490
491/*
492 * The memcg_create_mutex will be held whenever a new cgroup is created.
493 * As a consequence, any change that needs to protect against new child cgroups
494 * appearing has to hold it as well.
495 */
496static DEFINE_MUTEX(memcg_create_mutex);
497
491static void mem_cgroup_get(struct mem_cgroup *memcg); 498static void mem_cgroup_get(struct mem_cgroup *memcg);
492static void mem_cgroup_put(struct mem_cgroup *memcg); 499static void mem_cgroup_put(struct mem_cgroup *memcg);
493 500
@@ -4778,8 +4785,8 @@ static inline bool __memcg_has_children(struct mem_cgroup *memcg)
4778} 4785}
4779 4786
4780/* 4787/*
4781 * Must be called with cgroup_lock held, unless the cgroup is guaranteed to be 4788 * Must be called with memcg_create_mutex held, unless the cgroup is guaranteed
4782 * already dead (in mem_cgroup_force_empty(), for instance). This is different 4789 * to be already dead (as in mem_cgroup_force_empty, for instance). This is
4783 * from mem_cgroup_count_children(), in the sense that we don't really care how 4790 * from mem_cgroup_count_children(), in the sense that we don't really care how
4784 * many children we have; we only need to know if we have any. It also counts 4791 * many children we have; we only need to know if we have any. It also counts
4785 * any memcg without hierarchy as infertile. 4792 * any memcg without hierarchy as infertile.
@@ -4859,7 +4866,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
4859 if (parent) 4866 if (parent)
4860 parent_memcg = mem_cgroup_from_cont(parent); 4867 parent_memcg = mem_cgroup_from_cont(parent);
4861 4868
4862 cgroup_lock(); 4869 mutex_lock(&memcg_create_mutex);
4863 4870
4864 if (memcg->use_hierarchy == val) 4871 if (memcg->use_hierarchy == val)
4865 goto out; 4872 goto out;
@@ -4882,7 +4889,7 @@ static int mem_cgroup_hierarchy_write(struct cgroup *cont, struct cftype *cft,
4882 retval = -EINVAL; 4889 retval = -EINVAL;
4883 4890
4884out: 4891out:
4885 cgroup_unlock(); 4892 mutex_unlock(&memcg_create_mutex);
4886 4893
4887 return retval; 4894 return retval;
4888} 4895}
@@ -4981,14 +4988,8 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
4981 * 4988 *
4982 * After it first became limited, changes in the value of the limit are 4989 * After it first became limited, changes in the value of the limit are
4983 * of course permitted. 4990 * of course permitted.
4984 *
4985 * Taking the cgroup_lock is really offensive, but it is so far the only
4986 * way to guarantee that no children will appear. There are plenty of
4987 * other offenders, and they should all go away. Fine grained locking
4988 * is probably the way to go here. When we are fully hierarchical, we
4989 * can also get rid of the use_hierarchy check.
4990 */ 4991 */
4991 cgroup_lock(); 4992 mutex_lock(&memcg_create_mutex);
4992 mutex_lock(&set_limit_mutex); 4993 mutex_lock(&set_limit_mutex);
4993 if (!memcg->kmem_account_flags && val != RESOURCE_MAX) { 4994 if (!memcg->kmem_account_flags && val != RESOURCE_MAX) {
4994 if (cgroup_task_count(cont) || memcg_has_children(memcg)) { 4995 if (cgroup_task_count(cont) || memcg_has_children(memcg)) {
@@ -5015,7 +5016,7 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
5015 ret = res_counter_set_limit(&memcg->kmem, val); 5016 ret = res_counter_set_limit(&memcg->kmem, val);
5016out: 5017out:
5017 mutex_unlock(&set_limit_mutex); 5018 mutex_unlock(&set_limit_mutex);
5018 cgroup_unlock(); 5019 mutex_unlock(&memcg_create_mutex);
5019 5020
5020 /* 5021 /*
5021 * We are by now familiar with the fact that we can't inc the static 5022 * We are by now familiar with the fact that we can't inc the static
@@ -5396,17 +5397,17 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
5396 5397
5397 parent = mem_cgroup_from_cont(cgrp->parent); 5398 parent = mem_cgroup_from_cont(cgrp->parent);
5398 5399
5399 cgroup_lock(); 5400 mutex_lock(&memcg_create_mutex);
5400 5401
5401 /* If under hierarchy, only empty-root can set this value */ 5402 /* If under hierarchy, only empty-root can set this value */
5402 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5403 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5403 cgroup_unlock(); 5404 mutex_unlock(&memcg_create_mutex);
5404 return -EINVAL; 5405 return -EINVAL;
5405 } 5406 }
5406 5407
5407 memcg->swappiness = val; 5408 memcg->swappiness = val;
5408 5409
5409 cgroup_unlock(); 5410 mutex_unlock(&memcg_create_mutex);
5410 5411
5411 return 0; 5412 return 0;
5412} 5413}
@@ -5732,16 +5733,16 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
5732 5733
5733 parent = mem_cgroup_from_cont(cgrp->parent); 5734 parent = mem_cgroup_from_cont(cgrp->parent);
5734 5735
5735 cgroup_lock(); 5736 mutex_lock(&memcg_create_mutex);
5736 /* oom-kill-disable is a flag for subhierarchy. */ 5737 /* oom-kill-disable is a flag for subhierarchy. */
5737 if ((parent->use_hierarchy) || memcg_has_children(memcg)) { 5738 if ((parent->use_hierarchy) || memcg_has_children(memcg)) {
5738 cgroup_unlock(); 5739 mutex_unlock(&memcg_create_mutex);
5739 return -EINVAL; 5740 return -EINVAL;
5740 } 5741 }
5741 memcg->oom_kill_disable = val; 5742 memcg->oom_kill_disable = val;
5742 if (!val) 5743 if (!val)
5743 memcg_oom_recover(memcg); 5744 memcg_oom_recover(memcg);
5744 cgroup_unlock(); 5745 mutex_unlock(&memcg_create_mutex);
5745 return 0; 5746 return 0;
5746} 5747}
5747 5748
@@ -6170,6 +6171,7 @@ mem_cgroup_css_online(struct cgroup *cont)
6170 if (!cont->parent) 6171 if (!cont->parent)
6171 return 0; 6172 return 0;
6172 6173
6174 mutex_lock(&memcg_create_mutex);
6173 memcg = mem_cgroup_from_cont(cont); 6175 memcg = mem_cgroup_from_cont(cont);
6174 parent = mem_cgroup_from_cont(cont->parent); 6176 parent = mem_cgroup_from_cont(cont->parent);
6175 6177
@@ -6203,6 +6205,7 @@ mem_cgroup_css_online(struct cgroup *cont)
6203 } 6205 }
6204 6206
6205 error = memcg_init_kmem(memcg, &mem_cgroup_subsys); 6207 error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
6208 mutex_unlock(&memcg_create_mutex);
6206 if (error) { 6209 if (error) {
6207 /* 6210 /*
6208 * We call put now because our (and parent's) refcnts 6211 * We call put now because our (and parent's) refcnts