summaryrefslogtreecommitdiffstats
path: root/mm/slab_common.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab_common.c')
-rw-r--r--mm/slab_common.c59
1 files changed, 46 insertions, 13 deletions
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 0d7fe71ff5e4..c8cb36774ba1 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -44,7 +44,7 @@ static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
44 SLAB_FAILSLAB | SLAB_KASAN) 44 SLAB_FAILSLAB | SLAB_KASAN)
45 45
46#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \ 46#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
47 SLAB_NOTRACK | SLAB_ACCOUNT) 47 SLAB_ACCOUNT)
48 48
49/* 49/*
50 * Merge control. If this is set then no merging of slab caches will occur. 50 * Merge control. If this is set then no merging of slab caches will occur.
@@ -291,7 +291,7 @@ int slab_unmergeable(struct kmem_cache *s)
291} 291}
292 292
293struct kmem_cache *find_mergeable(size_t size, size_t align, 293struct kmem_cache *find_mergeable(size_t size, size_t align,
294 unsigned long flags, const char *name, void (*ctor)(void *)) 294 slab_flags_t flags, const char *name, void (*ctor)(void *))
295{ 295{
296 struct kmem_cache *s; 296 struct kmem_cache *s;
297 297
@@ -341,7 +341,7 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
341 * Figure out what the alignment of the objects will be given a set of 341 * Figure out what the alignment of the objects will be given a set of
342 * flags, a user specified alignment and the size of the objects. 342 * flags, a user specified alignment and the size of the objects.
343 */ 343 */
344unsigned long calculate_alignment(unsigned long flags, 344unsigned long calculate_alignment(slab_flags_t flags,
345 unsigned long align, unsigned long size) 345 unsigned long align, unsigned long size)
346{ 346{
347 /* 347 /*
@@ -366,7 +366,7 @@ unsigned long calculate_alignment(unsigned long flags,
366 366
367static struct kmem_cache *create_cache(const char *name, 367static struct kmem_cache *create_cache(const char *name,
368 size_t object_size, size_t size, size_t align, 368 size_t object_size, size_t size, size_t align,
369 unsigned long flags, void (*ctor)(void *), 369 slab_flags_t flags, void (*ctor)(void *),
370 struct mem_cgroup *memcg, struct kmem_cache *root_cache) 370 struct mem_cgroup *memcg, struct kmem_cache *root_cache)
371{ 371{
372 struct kmem_cache *s; 372 struct kmem_cache *s;
@@ -431,7 +431,7 @@ out_free_cache:
431 */ 431 */
432struct kmem_cache * 432struct kmem_cache *
433kmem_cache_create(const char *name, size_t size, size_t align, 433kmem_cache_create(const char *name, size_t size, size_t align,
434 unsigned long flags, void (*ctor)(void *)) 434 slab_flags_t flags, void (*ctor)(void *))
435{ 435{
436 struct kmem_cache *s = NULL; 436 struct kmem_cache *s = NULL;
437 const char *cache_name; 437 const char *cache_name;
@@ -879,7 +879,7 @@ bool slab_is_available(void)
879#ifndef CONFIG_SLOB 879#ifndef CONFIG_SLOB
880/* Create a cache during boot when no slab services are available yet */ 880/* Create a cache during boot when no slab services are available yet */
881void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size, 881void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
882 unsigned long flags) 882 slab_flags_t flags)
883{ 883{
884 int err; 884 int err;
885 885
@@ -899,7 +899,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
899} 899}
900 900
901struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size, 901struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
902 unsigned long flags) 902 slab_flags_t flags)
903{ 903{
904 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT); 904 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
905 905
@@ -1057,7 +1057,7 @@ void __init setup_kmalloc_cache_index_table(void)
1057 } 1057 }
1058} 1058}
1059 1059
1060static void __init new_kmalloc_cache(int idx, unsigned long flags) 1060static void __init new_kmalloc_cache(int idx, slab_flags_t flags)
1061{ 1061{
1062 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name, 1062 kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
1063 kmalloc_info[idx].size, flags); 1063 kmalloc_info[idx].size, flags);
@@ -1068,7 +1068,7 @@ static void __init new_kmalloc_cache(int idx, unsigned long flags)
1068 * may already have been created because they were needed to 1068 * may already have been created because they were needed to
1069 * enable allocations for slab creation. 1069 * enable allocations for slab creation.
1070 */ 1070 */
1071void __init create_kmalloc_caches(unsigned long flags) 1071void __init create_kmalloc_caches(slab_flags_t flags)
1072{ 1072{
1073 int i; 1073 int i;
1074 1074
@@ -1184,8 +1184,7 @@ void cache_random_seq_destroy(struct kmem_cache *cachep)
1184} 1184}
1185#endif /* CONFIG_SLAB_FREELIST_RANDOM */ 1185#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1186 1186
1187#ifdef CONFIG_SLABINFO 1187#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
1188
1189#ifdef CONFIG_SLAB 1188#ifdef CONFIG_SLAB
1190#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR) 1189#define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1191#else 1190#else
@@ -1281,7 +1280,41 @@ static int slab_show(struct seq_file *m, void *p)
1281 return 0; 1280 return 0;
1282} 1281}
1283 1282
1284#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) 1283void dump_unreclaimable_slab(void)
1284{
1285 struct kmem_cache *s, *s2;
1286 struct slabinfo sinfo;
1287
1288 /*
1289 * Here acquiring slab_mutex is risky since we don't prefer to get
1290 * sleep in oom path. But, without mutex hold, it may introduce a
1291 * risk of crash.
1292 * Use mutex_trylock to protect the list traverse, dump nothing
1293 * without acquiring the mutex.
1294 */
1295 if (!mutex_trylock(&slab_mutex)) {
1296 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1297 return;
1298 }
1299
1300 pr_info("Unreclaimable slab info:\n");
1301 pr_info("Name Used Total\n");
1302
1303 list_for_each_entry_safe(s, s2, &slab_caches, list) {
1304 if (!is_root_cache(s) || (s->flags & SLAB_RECLAIM_ACCOUNT))
1305 continue;
1306
1307 get_slabinfo(s, &sinfo);
1308
1309 if (sinfo.num_objs > 0)
1310 pr_info("%-17s %10luKB %10luKB\n", cache_name(s),
1311 (sinfo.active_objs * s->size) / 1024,
1312 (sinfo.num_objs * s->size) / 1024);
1313 }
1314 mutex_unlock(&slab_mutex);
1315}
1316
1317#if defined(CONFIG_MEMCG)
1285void *memcg_slab_start(struct seq_file *m, loff_t *pos) 1318void *memcg_slab_start(struct seq_file *m, loff_t *pos)
1286{ 1319{
1287 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m)); 1320 struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
@@ -1355,7 +1388,7 @@ static int __init slab_proc_init(void)
1355 return 0; 1388 return 0;
1356} 1389}
1357module_init(slab_proc_init); 1390module_init(slab_proc_init);
1358#endif /* CONFIG_SLABINFO */ 1391#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1359 1392
1360static __always_inline void *__do_krealloc(const void *p, size_t new_size, 1393static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1361 gfp_t flags) 1394 gfp_t flags)