aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memcontrol.c
diff options
context:
space:
mode:
authorGlauber Costa <glommer@parallels.com>2012-12-18 17:22:09 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-18 18:02:13 -0500
commita8964b9b84f99c0b1b5d7c09520f89f0700e742e (patch)
tree468c57a6cd3bd03c91152ede7fdbcacc238341d2 /mm/memcontrol.c
parent7de37682bec35bbe0cd69b8112ef257bc5fb1c3e (diff)
memcg: use static branches when code not in use
We can use static branches to patch the code in or out when not used. Because the _ACTIVE bit on kmem_accounted is only set after the increment is done, we guarantee that the root memcg will always be selected for kmem charges until all call sites are patched (see memcg_kmem_enabled). This guarantees that no mischarges are applied. Static branch decrement happens when the last reference count from the kmem accounting in memcg dies. This will only happen when the charges drop down to 0. When that happens, we need to disable the static branch only on those memcgs that enabled it. To achieve this, we would be forced to complicate the code by keeping track of which memcgs were the ones that actually enabled limits, and which ones got it from its parents. It is a lot simpler just to do static_key_slow_inc() on every child that is accounted. Signed-off-by: Glauber Costa <glommer@parallels.com> Acked-by: Michal Hocko <mhocko@suse.cz> Acked-by: Kamezawa Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Suleiman Souhlal <suleiman@google.com> Cc: Tejun Heo <tj@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Frederic Weisbecker <fweisbec@redhat.com> Cc: Greg Thelen <gthelen@google.com> Cc: JoonSoo Kim <js1304@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Rik van Riel <riel@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/memcontrol.c')
-rw-r--r--mm/memcontrol.c79
1 files changed, 75 insertions, 4 deletions
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 9a62ac3ea881..bc70254558fa 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -346,10 +346,13 @@ struct mem_cgroup {
346/* internal only representation about the status of kmem accounting. */ 346/* internal only representation about the status of kmem accounting. */
347enum { 347enum {
348 KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */ 348 KMEM_ACCOUNTED_ACTIVE = 0, /* accounted by this cgroup itself */
349 KMEM_ACCOUNTED_ACTIVATED, /* static key enabled. */
349 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */ 350 KMEM_ACCOUNTED_DEAD, /* dead memcg with pending kmem charges */
350}; 351};
351 352
352#define KMEM_ACCOUNTED_MASK (1 << KMEM_ACCOUNTED_ACTIVE) 353/* We account when limit is on, but only after call sites are patched */
354#define KMEM_ACCOUNTED_MASK \
355 ((1 << KMEM_ACCOUNTED_ACTIVE) | (1 << KMEM_ACCOUNTED_ACTIVATED))
353 356
354#ifdef CONFIG_MEMCG_KMEM 357#ifdef CONFIG_MEMCG_KMEM
355static inline void memcg_kmem_set_active(struct mem_cgroup *memcg) 358static inline void memcg_kmem_set_active(struct mem_cgroup *memcg)
@@ -362,6 +365,11 @@ static bool memcg_kmem_is_active(struct mem_cgroup *memcg)
362 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags); 365 return test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags);
363} 366}
364 367
368static void memcg_kmem_set_activated(struct mem_cgroup *memcg)
369{
370 set_bit(KMEM_ACCOUNTED_ACTIVATED, &memcg->kmem_account_flags);
371}
372
365static void memcg_kmem_mark_dead(struct mem_cgroup *memcg) 373static void memcg_kmem_mark_dead(struct mem_cgroup *memcg)
366{ 374{
367 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags)) 375 if (test_bit(KMEM_ACCOUNTED_ACTIVE, &memcg->kmem_account_flags))
@@ -532,6 +540,26 @@ static void disarm_sock_keys(struct mem_cgroup *memcg)
532} 540}
533#endif 541#endif
534 542
543#ifdef CONFIG_MEMCG_KMEM
544struct static_key memcg_kmem_enabled_key;
545
546static void disarm_kmem_keys(struct mem_cgroup *memcg)
547{
548 if (memcg_kmem_is_active(memcg))
549 static_key_slow_dec(&memcg_kmem_enabled_key);
550}
551#else
552static void disarm_kmem_keys(struct mem_cgroup *memcg)
553{
554}
555#endif /* CONFIG_MEMCG_KMEM */
556
557static void disarm_static_keys(struct mem_cgroup *memcg)
558{
559 disarm_sock_keys(memcg);
560 disarm_kmem_keys(memcg);
561}
562
535static void drain_all_stock_async(struct mem_cgroup *memcg); 563static void drain_all_stock_async(struct mem_cgroup *memcg);
536 564
537static struct mem_cgroup_per_zone * 565static struct mem_cgroup_per_zone *
@@ -4204,6 +4232,8 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
4204{ 4232{
4205 int ret = -EINVAL; 4233 int ret = -EINVAL;
4206#ifdef CONFIG_MEMCG_KMEM 4234#ifdef CONFIG_MEMCG_KMEM
4235 bool must_inc_static_branch = false;
4236
4207 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4237 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4208 /* 4238 /*
4209 * For simplicity, we won't allow this to be disabled. It also can't 4239 * For simplicity, we won't allow this to be disabled. It also can't
@@ -4234,7 +4264,15 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
4234 ret = res_counter_set_limit(&memcg->kmem, val); 4264 ret = res_counter_set_limit(&memcg->kmem, val);
4235 VM_BUG_ON(ret); 4265 VM_BUG_ON(ret);
4236 4266
4237 memcg_kmem_set_active(memcg); 4267 /*
4268 * After this point, kmem_accounted (that we test atomically in
4269 * the beginning of this conditional), is no longer 0. This
4270 * guarantees only one process will set the following boolean
4271 * to true. We don't need test_and_set because we're protected
4272 * by the set_limit_mutex anyway.
4273 */
4274 memcg_kmem_set_activated(memcg);
4275 must_inc_static_branch = true;
4238 /* 4276 /*
4239 * kmem charges can outlive the cgroup. In the case of slab 4277 * kmem charges can outlive the cgroup. In the case of slab
4240 * pages, for instance, a page contain objects from various 4278 * pages, for instance, a page contain objects from various
@@ -4247,6 +4285,27 @@ static int memcg_update_kmem_limit(struct cgroup *cont, u64 val)
4247out: 4285out:
4248 mutex_unlock(&set_limit_mutex); 4286 mutex_unlock(&set_limit_mutex);
4249 cgroup_unlock(); 4287 cgroup_unlock();
4288
4289 /*
4290 * We are by now familiar with the fact that we can't inc the static
4291 * branch inside cgroup_lock. See disarm functions for details. A
4292 * worker here is overkill, but also wrong: After the limit is set, we
4293 * must start accounting right away. Since this operation can't fail,
4294 * we can safely defer it to here - no rollback will be needed.
4295 *
4296 * The boolean used to control this is also safe, because
4297 * KMEM_ACCOUNTED_ACTIVATED guarantees that only one process will be
4298 * able to set it to true;
4299 */
4300 if (must_inc_static_branch) {
4301 static_key_slow_inc(&memcg_kmem_enabled_key);
4302 /*
4303 * setting the active bit after the inc will guarantee no one
4304 * starts accounting before all call sites are patched
4305 */
4306 memcg_kmem_set_active(memcg);
4307 }
4308
4250#endif 4309#endif
4251 return ret; 4310 return ret;
4252} 4311}
@@ -4258,8 +4317,20 @@ static void memcg_propagate_kmem(struct mem_cgroup *memcg)
4258 return; 4317 return;
4259 memcg->kmem_account_flags = parent->kmem_account_flags; 4318 memcg->kmem_account_flags = parent->kmem_account_flags;
4260#ifdef CONFIG_MEMCG_KMEM 4319#ifdef CONFIG_MEMCG_KMEM
4261 if (memcg_kmem_is_active(memcg)) 4320 /*
4321 * When that happen, we need to disable the static branch only on those
4322 * memcgs that enabled it. To achieve this, we would be forced to
4323 * complicate the code by keeping track of which memcgs were the ones
4324 * that actually enabled limits, and which ones got it from its
4325 * parents.
4326 *
4327 * It is a lot simpler just to do static_key_slow_inc() on every child
4328 * that is accounted.
4329 */
4330 if (memcg_kmem_is_active(memcg)) {
4262 mem_cgroup_get(memcg); 4331 mem_cgroup_get(memcg);
4332 static_key_slow_inc(&memcg_kmem_enabled_key);
4333 }
4263#endif 4334#endif
4264} 4335}
4265 4336
@@ -5184,7 +5255,7 @@ static void free_work(struct work_struct *work)
5184 * to move this code around, and make sure it is outside 5255 * to move this code around, and make sure it is outside
5185 * the cgroup_lock. 5256 * the cgroup_lock.
5186 */ 5257 */
5187 disarm_sock_keys(memcg); 5258 disarm_static_keys(memcg);
5188 if (size < PAGE_SIZE) 5259 if (size < PAGE_SIZE)
5189 kfree(memcg); 5260 kfree(memcg);
5190 else 5261 else