summaryrefslogtreecommitdiffstats
path: root/include/linux/memcontrol.h
diff options
context:
space:
mode:
authorRoman Gushchin <guro@fb.com>2018-10-26 18:03:19 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-10-26 19:25:19 -0400
commit9b6f7e163cd0f468d1b9696b785659d3c27c8667 (patch)
treee92c50c153ad34cfe632761d782ffa872d99f91f /include/linux/memcontrol.h
parentc5fd3ca06b4699e251b4a1fb808c2d5124494101 (diff)
mm: rework memcg kernel stack accounting
If CONFIG_VMAP_STACK is set, kernel stacks are allocated using __vmalloc_node_range() with __GFP_ACCOUNT. So kernel stack pages are charged against corresponding memory cgroups on allocation and uncharged on releasing them. The problem is that we do cache kernel stacks in small per-cpu caches and do reuse them for new tasks, which can belong to different memory cgroups. Each stack page still holds a reference to the original cgroup, so the cgroup can't be released until the vmap area is released. To make this happen we need more than two subsequent exits without forks in between on the current cpu, which makes it very unlikely to happen. As a result, I saw a significant number of dying cgroups (in theory, up to 2 * number_of_cpu + number_of_tasks), which can't be released even by significant memory pressure. As a cgroup structure can take a significant amount of memory (first of all, per-cpu data like memcg statistics), it leads to a noticeable waste of memory. Link: http://lkml.kernel.org/r/20180827162621.30187-1-guro@fb.com Fixes: ac496bf48d97 ("fork: Optimize task creation by caching two thread stacks per CPU if CONFIG_VMAP_STACK=y") Signed-off-by: Roman Gushchin <guro@fb.com> Reviewed-by: Shakeel Butt <shakeelb@google.com> Acked-by: Michal Hocko <mhocko@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Andy Lutomirski <luto@kernel.org> Cc: Konstantin Khlebnikov <koct9i@gmail.com> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'include/linux/memcontrol.h')
-rw-r--r--include/linux/memcontrol.h13
1 files changed, 12 insertions, 1 deletions
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 652f602167df..4399cc3f00e4 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -1268,10 +1268,11 @@ struct kmem_cache *memcg_kmem_get_cache(struct kmem_cache *cachep);
1268void memcg_kmem_put_cache(struct kmem_cache *cachep); 1268void memcg_kmem_put_cache(struct kmem_cache *cachep);
1269int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order, 1269int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
1270 struct mem_cgroup *memcg); 1270 struct mem_cgroup *memcg);
1271
1272#ifdef CONFIG_MEMCG_KMEM
1271int memcg_kmem_charge(struct page *page, gfp_t gfp, int order); 1273int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
1272void memcg_kmem_uncharge(struct page *page, int order); 1274void memcg_kmem_uncharge(struct page *page, int order);
1273 1275
1274#ifdef CONFIG_MEMCG_KMEM
1275extern struct static_key_false memcg_kmem_enabled_key; 1276extern struct static_key_false memcg_kmem_enabled_key;
1276extern struct workqueue_struct *memcg_kmem_cache_wq; 1277extern struct workqueue_struct *memcg_kmem_cache_wq;
1277 1278
@@ -1307,6 +1308,16 @@ extern int memcg_expand_shrinker_maps(int new_id);
1307extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg, 1308extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
1308 int nid, int shrinker_id); 1309 int nid, int shrinker_id);
1309#else 1310#else
1311
1312static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
1313{
1314 return 0;
1315}
1316
1317static inline void memcg_kmem_uncharge(struct page *page, int order)
1318{
1319}
1320
1310#define for_each_memcg_cache_index(_idx) \ 1321#define for_each_memcg_cache_index(_idx) \
1311 for (; NULL; ) 1322 for (; NULL; )
1312 1323