aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorShakeel Butt <shakeelb@google.com>2018-10-26 18:07:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 19:26:33 -0400
commit85cfb245060e45640fa3447f8b0bad5e8bd3bdaf (patch)
treeb11584da1ced1278c9aa8201f2549f340dcf9033
parent86b27beae59685a42f81bcda9d502b5aebddfab8 (diff)
memcg: remove memcg_kmem_skip_account
The flag memcg_kmem_skip_account was added during the era of opt-out kmem accounting. There is no need for such flag in the opt-in world as there aren't any __GFP_ACCOUNT allocations within memcg_create_cache_enqueue(). Link: http://lkml.kernel.org/r/20180919004501.178023-1-shakeelb@google.com Signed-off-by: Shakeel Butt <shakeelb@google.com> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Michal Hocko <mhocko@kernel.org> Cc: Vladimir Davydov <vdavydov.dev@gmail.com> Cc: Greg Thelen <gthelen@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/sched.h3
-rw-r--r--mm/memcontrol.c24
2 files changed, 1 insertions, 26 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b8fcc6b3080c..8f8a5418b627 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -724,9 +724,6 @@ struct task_struct {
724#endif 724#endif
725#ifdef CONFIG_MEMCG 725#ifdef CONFIG_MEMCG
726 unsigned in_user_fault:1; 726 unsigned in_user_fault:1;
727#ifdef CONFIG_MEMCG_KMEM
728 unsigned memcg_kmem_skip_account:1;
729#endif
730#endif 727#endif
731#ifdef CONFIG_COMPAT_BRK 728#ifdef CONFIG_COMPAT_BRK
732 unsigned brk_randomized:1; 729 unsigned brk_randomized:1;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 0e9ede617b89..645ede7ad1b2 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -2460,7 +2460,7 @@ static void memcg_kmem_cache_create_func(struct work_struct *w)
2460/* 2460/*
2461 * Enqueue the creation of a per-memcg kmem_cache. 2461 * Enqueue the creation of a per-memcg kmem_cache.
2462 */ 2462 */
2463static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg, 2463static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2464 struct kmem_cache *cachep) 2464 struct kmem_cache *cachep)
2465{ 2465{
2466 struct memcg_kmem_cache_create_work *cw; 2466 struct memcg_kmem_cache_create_work *cw;
@@ -2478,25 +2478,6 @@ static void __memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2478 queue_work(memcg_kmem_cache_wq, &cw->work); 2478 queue_work(memcg_kmem_cache_wq, &cw->work);
2479} 2479}
2480 2480
2481static void memcg_schedule_kmem_cache_create(struct mem_cgroup *memcg,
2482 struct kmem_cache *cachep)
2483{
2484 /*
2485 * We need to stop accounting when we kmalloc, because if the
2486 * corresponding kmalloc cache is not yet created, the first allocation
2487 * in __memcg_schedule_kmem_cache_create will recurse.
2488 *
2489 * However, it is better to enclose the whole function. Depending on
2490 * the debugging options enabled, INIT_WORK(), for instance, can
2491 * trigger an allocation. This too, will make us recurse. Because at
2492 * this point we can't allow ourselves back into memcg_kmem_get_cache,
2493 * the safest choice is to do it like this, wrapping the whole function.
2494 */
2495 current->memcg_kmem_skip_account = 1;
2496 __memcg_schedule_kmem_cache_create(memcg, cachep);
2497 current->memcg_kmem_skip_account = 0;
2498}
2499
2500static inline bool memcg_kmem_bypass(void) 2481static inline bool memcg_kmem_bypass(void)
2501{ 2482{
2502 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD)) 2483 if (in_interrupt() || !current->mm || (current->flags & PF_KTHREAD))
@@ -2531,9 +2512,6 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep)
2531 if (memcg_kmem_bypass()) 2512 if (memcg_kmem_bypass())
2532 return cachep; 2513 return cachep;
2533 2514
2534 if (current->memcg_kmem_skip_account)
2535 return cachep;
2536
2537 memcg = get_mem_cgroup_from_current(); 2515 memcg = get_mem_cgroup_from_current();
2538 kmemcg_id = READ_ONCE(memcg->kmemcg_id); 2516 kmemcg_id = READ_ONCE(memcg->kmemcg_id);
2539 if (kmemcg_id < 0) 2517 if (kmemcg_id < 0)