aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c78
1 files changed, 44 insertions, 34 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 85c2e03098a7..21ba06035700 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -674,6 +674,37 @@ static struct kmem_cache cache_cache = {
674#endif 674#endif
675}; 675};
676 676
677#ifdef CONFIG_LOCKDEP
678
679/*
680 * Slab sometimes uses the kmalloc slabs to store the slab headers
681 * for other slabs "off slab".
682 * The locking for this is tricky in that it nests within the locks
683 * of all other slabs in a few places; to deal with this special
684 * locking we put on-slab caches into a separate lock-class.
685 */
686static struct lock_class_key on_slab_key;
687
688static inline void init_lock_keys(struct cache_sizes *s)
689{
690 int q;
691
692 for (q = 0; q < MAX_NUMNODES; q++) {
693 if (!s->cs_cachep->nodelists[q] || OFF_SLAB(s->cs_cachep))
694 continue;
695 lockdep_set_class(&s->cs_cachep->nodelists[q]->list_lock,
696 &on_slab_key);
697 }
698}
699
700#else
701static inline void init_lock_keys(struct cache_sizes *s)
702{
703}
704#endif
705
706
707
677/* Guard access to the cache-chain. */ 708/* Guard access to the cache-chain. */
678static DEFINE_MUTEX(cache_chain_mutex); 709static DEFINE_MUTEX(cache_chain_mutex);
679static struct list_head cache_chain; 710static struct list_head cache_chain;
@@ -1021,8 +1052,7 @@ static void drain_alien_cache(struct kmem_cache *cachep,
1021 } 1052 }
1022} 1053}
1023 1054
1024static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1055static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1025 int nesting)
1026{ 1056{
1027 struct slab *slabp = virt_to_slab(objp); 1057 struct slab *slabp = virt_to_slab(objp);
1028 int nodeid = slabp->nodeid; 1058 int nodeid = slabp->nodeid;
@@ -1040,7 +1070,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp,
1040 STATS_INC_NODEFREES(cachep); 1070 STATS_INC_NODEFREES(cachep);
1041 if (l3->alien && l3->alien[nodeid]) { 1071 if (l3->alien && l3->alien[nodeid]) {
1042 alien = l3->alien[nodeid]; 1072 alien = l3->alien[nodeid];
1043 spin_lock_nested(&alien->lock, nesting); 1073 spin_lock(&alien->lock);
1044 if (unlikely(alien->avail == alien->limit)) { 1074 if (unlikely(alien->avail == alien->limit)) {
1045 STATS_INC_ACOVERFLOW(cachep); 1075 STATS_INC_ACOVERFLOW(cachep);
1046 __drain_alien_cache(cachep, alien, nodeid); 1076 __drain_alien_cache(cachep, alien, nodeid);
@@ -1069,15 +1099,14 @@ static inline void free_alien_cache(struct array_cache **ac_ptr)
1069{ 1099{
1070} 1100}
1071 1101
1072static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, 1102static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1073 int nesting)
1074{ 1103{
1075 return 0; 1104 return 0;
1076} 1105}
1077 1106
1078#endif 1107#endif
1079 1108
1080static int __devinit cpuup_callback(struct notifier_block *nfb, 1109static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1081 unsigned long action, void *hcpu) 1110 unsigned long action, void *hcpu)
1082{ 1111{
1083 long cpu = (long)hcpu; 1112 long cpu = (long)hcpu;
@@ -1393,6 +1422,7 @@ void __init kmem_cache_init(void)
1393 ARCH_KMALLOC_FLAGS|SLAB_PANIC, 1422 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1394 NULL, NULL); 1423 NULL, NULL);
1395 } 1424 }
1425 init_lock_keys(sizes);
1396 1426
1397 sizes->cs_dmacachep = kmem_cache_create(names->name_dma, 1427 sizes->cs_dmacachep = kmem_cache_create(names->name_dma,
1398 sizes->cs_size, 1428 sizes->cs_size,
@@ -1760,8 +1790,6 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp)
1760} 1790}
1761#endif 1791#endif
1762 1792
1763static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting);
1764
1765/** 1793/**
1766 * slab_destroy - destroy and release all objects in a slab 1794 * slab_destroy - destroy and release all objects in a slab
1767 * @cachep: cache pointer being destroyed 1795 * @cachep: cache pointer being destroyed
@@ -1785,17 +1813,8 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1785 call_rcu(&slab_rcu->head, kmem_rcu_free); 1813 call_rcu(&slab_rcu->head, kmem_rcu_free);
1786 } else { 1814 } else {
1787 kmem_freepages(cachep, addr); 1815 kmem_freepages(cachep, addr);
1788 if (OFF_SLAB(cachep)) { 1816 if (OFF_SLAB(cachep))
1789 unsigned long flags; 1817 kmem_cache_free(cachep->slabp_cache, slabp);
1790
1791 /*
1792 * lockdep: we may nest inside an already held
1793 * ac->lock, so pass in a nesting flag:
1794 */
1795 local_irq_save(flags);
1796 __cache_free(cachep->slabp_cache, slabp, 1);
1797 local_irq_restore(flags);
1798 }
1799 } 1818 }
1800} 1819}
1801 1820
@@ -3100,16 +3119,7 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3100 if (slabp->inuse == 0) { 3119 if (slabp->inuse == 0) {
3101 if (l3->free_objects > l3->free_limit) { 3120 if (l3->free_objects > l3->free_limit) {
3102 l3->free_objects -= cachep->num; 3121 l3->free_objects -= cachep->num;
3103 /*
3104 * It is safe to drop the lock. The slab is
3105 * no longer linked to the cache. cachep
3106 * cannot disappear - we are using it and
3107 * all destruction of caches must be
3108 * serialized properly by the user.
3109 */
3110 spin_unlock(&l3->list_lock);
3111 slab_destroy(cachep, slabp); 3122 slab_destroy(cachep, slabp);
3112 spin_lock(&l3->list_lock);
3113 } else { 3123 } else {
3114 list_add(&slabp->list, &l3->slabs_free); 3124 list_add(&slabp->list, &l3->slabs_free);
3115 } 3125 }
@@ -3135,7 +3145,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3135#endif 3145#endif
3136 check_irq_off(); 3146 check_irq_off();
3137 l3 = cachep->nodelists[node]; 3147 l3 = cachep->nodelists[node];
3138 spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING); 3148 spin_lock(&l3->list_lock);
3139 if (l3->shared) { 3149 if (l3->shared) {
3140 struct array_cache *shared_array = l3->shared; 3150 struct array_cache *shared_array = l3->shared;
3141 int max = shared_array->limit - shared_array->avail; 3151 int max = shared_array->limit - shared_array->avail;
@@ -3178,14 +3188,14 @@ free_done:
3178 * Release an obj back to its cache. If the obj has a constructed state, it must 3188 * Release an obj back to its cache. If the obj has a constructed state, it must
3179 * be in this state _before_ it is released. Called with disabled ints. 3189 * be in this state _before_ it is released. Called with disabled ints.
3180 */ 3190 */
3181static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting) 3191static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3182{ 3192{
3183 struct array_cache *ac = cpu_cache_get(cachep); 3193 struct array_cache *ac = cpu_cache_get(cachep);
3184 3194
3185 check_irq_off(); 3195 check_irq_off();
3186 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); 3196 objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3187 3197
3188 if (cache_free_alien(cachep, objp, nesting)) 3198 if (cache_free_alien(cachep, objp))
3189 return; 3199 return;
3190 3200
3191 if (likely(ac->avail < ac->limit)) { 3201 if (likely(ac->avail < ac->limit)) {
@@ -3214,7 +3224,7 @@ void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3214EXPORT_SYMBOL(kmem_cache_alloc); 3224EXPORT_SYMBOL(kmem_cache_alloc);
3215 3225
3216/** 3226/**
3217 * kmem_cache_alloc - Allocate an object. The memory is set to zero. 3227 * kmem_cache_zalloc - Allocate an object. The memory is set to zero.
3218 * @cache: The cache to allocate from. 3228 * @cache: The cache to allocate from.
3219 * @flags: See kmalloc(). 3229 * @flags: See kmalloc().
3220 * 3230 *
@@ -3424,7 +3434,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3424 BUG_ON(virt_to_cache(objp) != cachep); 3434 BUG_ON(virt_to_cache(objp) != cachep);
3425 3435
3426 local_irq_save(flags); 3436 local_irq_save(flags);
3427 __cache_free(cachep, objp, 0); 3437 __cache_free(cachep, objp);
3428 local_irq_restore(flags); 3438 local_irq_restore(flags);
3429} 3439}
3430EXPORT_SYMBOL(kmem_cache_free); 3440EXPORT_SYMBOL(kmem_cache_free);
@@ -3449,7 +3459,7 @@ void kfree(const void *objp)
3449 kfree_debugcheck(objp); 3459 kfree_debugcheck(objp);
3450 c = virt_to_cache(objp); 3460 c = virt_to_cache(objp);
3451 debug_check_no_locks_freed(objp, obj_size(c)); 3461 debug_check_no_locks_freed(objp, obj_size(c));
3452 __cache_free(c, (void *)objp, 0); 3462 __cache_free(c, (void *)objp);
3453 local_irq_restore(flags); 3463 local_irq_restore(flags);
3454} 3464}
3455EXPORT_SYMBOL(kfree); 3465EXPORT_SYMBOL(kfree);