summaryrefslogtreecommitdiffstats
path: root/mm/slab.h
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2017-02-22 18:41:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-02-22 19:41:27 -0500
commit510ded33e075c2bd662b1efab0110f4240325fc9 (patch)
tree9199fa1031aac4fcf633ae89a01233a8988e23fc /mm/slab.h
parentbc2791f857e1984b7548d2a2de2ffb1a913dee62 (diff)
slab: implement slab_root_caches list
With kmem cgroup support enabled, kmem_caches can be created and destroyed frequently and a great number of near empty kmem_caches can accumulate if there are a lot of transient cgroups and the system is not under memory pressure. When memory reclaim starts under such conditions, it can lead to consecutive deactivation and destruction of many kmem_caches, easily hundreds of thousands on moderately large systems, exposing scalability issues in the current slab management code. This is one of the patches to address the issue. slab_caches currently lists all caches including root and memcg ones. This is the only data structure which lists the root caches and iterating root caches can only be done by walking the list while skipping over memcg caches. As there can be a huge number of memcg caches, this can become very expensive. This also can make /proc/slabinfo behave very badly. seq_file processes reads in 4k chunks and seeks to the previous Nth position on slab_caches list to resume after each chunk. With a lot of memcg cache churns on the list, reading /proc/slabinfo can become very slow and its content often ends up with duplicate and/or missing entries. This patch adds a new list slab_root_caches which lists only the root caches. When memcg is not enabled, it becomes just an alias of slab_caches. memcg specific list operations are collected into memcg_[un]link_cache(). Link: http://lkml.kernel.org/r/20170117235411.9408-7-tj@kernel.org Signed-off-by: Tejun Heo <tj@kernel.org> Reported-by: Jay Vana <jsvana@fb.com> Acked-by: Vladimir Davydov <vdavydov@tarantool.org> Cc: Christoph Lameter <cl@linux.com> Cc: Pekka Enberg <penberg@kernel.org> Cc: David Rientjes <rientjes@google.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/slab.h')
-rw-r--r--mm/slab.h15
1 files changed, 15 insertions, 0 deletions
diff --git a/mm/slab.h b/mm/slab.h
index a08f01016a3f..9631bb27c772 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -201,6 +201,11 @@ void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
201int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); 201int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
202 202
203#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 203#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
204
205/* List of all root caches. */
206extern struct list_head slab_root_caches;
207#define root_caches_node memcg_params.__root_caches_node
208
204/* 209/*
205 * Iterate over all memcg caches of the given root cache. The caller must hold 210 * Iterate over all memcg caches of the given root cache. The caller must hold
206 * slab_mutex. 211 * slab_mutex.
@@ -300,9 +305,14 @@ static __always_inline void memcg_uncharge_slab(struct page *page, int order,
300} 305}
301 306
302extern void slab_init_memcg_params(struct kmem_cache *); 307extern void slab_init_memcg_params(struct kmem_cache *);
308extern void memcg_link_cache(struct kmem_cache *s);
303 309
304#else /* CONFIG_MEMCG && !CONFIG_SLOB */ 310#else /* CONFIG_MEMCG && !CONFIG_SLOB */
305 311
312/* If !memcg, all caches are root. */
313#define slab_root_caches slab_caches
314#define root_caches_node list
315
306#define for_each_memcg_cache(iter, root) \ 316#define for_each_memcg_cache(iter, root) \
307 for ((void)(iter), (void)(root); 0; ) 317 for ((void)(iter), (void)(root); 0; )
308 318
@@ -347,6 +357,11 @@ static inline void memcg_uncharge_slab(struct page *page, int order,
347static inline void slab_init_memcg_params(struct kmem_cache *s) 357static inline void slab_init_memcg_params(struct kmem_cache *s)
348{ 358{
349} 359}
360
361static inline void memcg_link_cache(struct kmem_cache *s)
362{
363}
364
350#endif /* CONFIG_MEMCG && !CONFIG_SLOB */ 365#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
351 366
352static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) 367static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)