aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2013-02-22 19:34:56 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2013-02-23 20:50:18 -0500
commit692e89abd154b04d212dce0c18a449bda15aac04 (patch)
tree91073f62049a9ffe49686cd10d4fd5cb09b26ac7 /mm
parent0999821b1d08f69e3879eb8fa0d28c4aba82ab5e (diff)
memcg: increment static branch right after limit set
We were deferring the kmemcg static branch increment to a later time, due to a nasty dependency between the cpu_hotplug lock, taken by the jump label update, and the cgroup_lock. Now we no longer take the cgroup lock, and we can save ourselves the trouble. Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Cc: Tejun Heo <tj@kernel.org> Cc: Hiroyuki Kamezawa <kamezawa.hiroyuki@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/memcontrol.c31
1 files changed, 7 insertions, 24 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 46cdaef78b01..f4f41c36e703 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -4974,8 +4974,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
4974{ 4974{
4975 int ret = -EINVAL; 4975 int ret = -EINVAL;
4976#ifdef CONFIG_MEMCG_KMEM 4976#ifdef CONFIG_MEMCG_KMEM
4977 bool must_inc_static_branch = false;
4978
4979 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4977 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4980 /* 4978 /*
4981 * For simplicity, we won't allow this to be disabled. It also can't 4979 * For simplicity, we won't allow this to be disabled. It also can't
@@ -5004,7 +5002,13 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
5004 res_counter_set_limit(&memcg->kmem, RESOURCE_MAX); 5002 res_counter_set_limit(&memcg->kmem, RESOURCE_MAX);
5005 goto out; 5003 goto out;
5006 } 5004 }
5007 must_inc_static_branch = true; 5005 static_key_slow_inc(&memcg_kmem_enabled_key);
5006 /*
5007 * setting the active bit after the inc will guarantee no one
5008 * starts accounting before all call sites are patched
5009 */
5010 memcg_kmem_set_active(memcg);
5011
5008 /* 5012 /*
5009 * kmem charges can outlive the cgroup. In the case of slab 5013 * kmem charges can outlive the cgroup. In the case of slab
5010 * pages, for instance, a page contain objects from various 5014 * pages, for instance, a page contain objects from various
@@ -5017,27 +5021,6 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
5017out: 5021out:
5018 mutex_unlock(&set_limit_mutex); 5022 mutex_unlock(&set_limit_mutex);
5019 mutex_unlock(&memcg_create_mutex); 5023 mutex_unlock(&memcg_create_mutex);
5020
5021 /*
5022 * We are by now familiar with the fact that we can't inc the static
5023 * branch inside cgroup_lock. See disarm functions for details. A
5024 * worker here is overkill, but also wrong: After the limit is set, we
5025 * must start accounting right away. Since this operation can't fail,
5026 * we can safely defer it to here - no rollback will be needed.
5027 *
5028 * The boolean used to control this is also safe, because
5029 * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be
5030 * able to set it to true;
5031 */
5032 if (must_inc_static_branch) {
5033 static_key_slow_inc(&memcg_kmem_enabled_key);
5034 /*
5035 * setting the active bit after the inc will guarantee no one
5036 * starts accounting before all call sites are patched
5037 */
5038 memcg_kmem_set_active(memcg);
5039 }
5040
5041#endif 5024#endif
5042 return ret; 5025 return ret;
5043} 5026}