aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/slab.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/mm/slab.c b/mm/slab.c
index fd1e4c4c1397..5a57cda7490d 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -674,6 +674,37 @@ static struct kmem_cache cache_cache = {
674#endif 674#endif
675}; 675};
676 676
677#ifdef CONFIG_LOCKDEP
678
679/*
680 * Slab sometimes uses the kmalloc slabs to store the slab headers
681 * for other slabs "off slab".
682 * The locking for this is tricky in that it nests within the locks
683 * of all other slabs in a few places; to deal with this special
684 * locking we put on-slab caches into a separate lock-class.
685 */
686static struct lock_class_key on_slab_key;
687
688static inline void init_lock_keys(struct cache_sizes *s)
689{
690 int q;
691
692 for (q = 0; q < MAX_NUMNODES; q++) {
693 if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
694 continue;
695 lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
696 &on_slab_key);
697 }
698}
699
700#else
701static inline void init_lock_keys(struct cache_sizes *s)
702{
703}
704#endif
705
706
707
677/* Guard access to the cache-chain. */ 708/* Guard access to the cache-chain. */
678static DEFINE_MUTEX(cache_chain_mutex); 709static DEFINE_MUTEX(cache_chain_mutex);
679static struct list_head cache_chain; 710static struct list_head cache_chain;
@@ -1391,6 +1422,7 @@ void __init kmem_cache_init(void)
1391 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1422 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1392 NULL, NULL); 1423 NULL, NULL);
1393 } 1424 }
1425 init_lock_keys(sizes);
1394 1426
1395 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1427 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1396 sizes->cs_size, 1428 sizes->cs_size,