diff options
Diffstat (limited to 'mm/slab.c')
| -rw-r--r-- | mm/slab.c | 59 |
1 files changed, 48 insertions, 11 deletions
| @@ -1021,7 +1021,8 @@ static void drain_alien_cache(struct kmem_cache *cachep, | |||
| 1021 | } | 1021 | } |
| 1022 | } | 1022 | } |
| 1023 | 1023 | ||
| 1024 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | 1024 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, |
| 1025 | int nesting) | ||
| 1025 | { | 1026 | { |
| 1026 | struct slab *slabp = virt_to_slab(objp); | 1027 | struct slab *slabp = virt_to_slab(objp); |
| 1027 | int nodeid = slabp->nodeid; | 1028 | int nodeid = slabp->nodeid; |
| @@ -1039,7 +1040,7 @@ static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | |||
| 1039 | STATS_INC_NODEFREES(cachep); | 1040 | STATS_INC_NODEFREES(cachep); |
| 1040 | if (l3->alien && l3->alien[nodeid]) { | 1041 | if (l3->alien && l3->alien[nodeid]) { |
| 1041 | alien = l3->alien[nodeid]; | 1042 | alien = l3->alien[nodeid]; |
| 1042 | spin_lock(&alien->lock); | 1043 | spin_lock_nested(&alien->lock, nesting); |
| 1043 | if (unlikely(alien->avail == alien->limit)) { | 1044 | if (unlikely(alien->avail == alien->limit)) { |
| 1044 | STATS_INC_ACOVERFLOW(cachep); | 1045 | STATS_INC_ACOVERFLOW(cachep); |
| 1045 | __drain_alien_cache(cachep, alien, nodeid); | 1046 | __drain_alien_cache(cachep, alien, nodeid); |
| @@ -1068,7 +1069,8 @@ static inline void free_alien_cache(struct array_cache **ac_ptr) | |||
| 1068 | { | 1069 | { |
| 1069 | } | 1070 | } |
| 1070 | 1071 | ||
| 1071 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp) | 1072 | static inline int cache_free_alien(struct kmem_cache *cachep, void *objp, |
| 1073 | int nesting) | ||
| 1072 | { | 1074 | { |
| 1073 | return 0; | 1075 | return 0; |
| 1074 | } | 1076 | } |
| @@ -1272,6 +1274,11 @@ static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list, | |||
| 1272 | 1274 | ||
| 1273 | local_irq_disable(); | 1275 | local_irq_disable(); |
| 1274 | memcpy(ptr, list, sizeof(struct kmem_list3)); | 1276 | memcpy(ptr, list, sizeof(struct kmem_list3)); |
| 1277 | /* | ||
| 1278 | * Do not assume that spinlocks can be initialized via memcpy: | ||
| 1279 | */ | ||
| 1280 | spin_lock_init(&ptr->list_lock); | ||
| 1281 | |||
| 1275 | MAKE_ALL_LISTS(cachep, ptr, nodeid); | 1282 | MAKE_ALL_LISTS(cachep, ptr, nodeid); |
| 1276 | cachep->nodelists[nodeid] = ptr; | 1283 | cachep->nodelists[nodeid] = ptr; |
| 1277 | local_irq_enable(); | 1284 | local_irq_enable(); |
| @@ -1398,7 +1405,7 @@ void __init kmem_cache_init(void) | |||
| 1398 | } | 1405 | } |
| 1399 | /* 4) Replace the bootstrap head arrays */ | 1406 | /* 4) Replace the bootstrap head arrays */ |
| 1400 | { | 1407 | { |
| 1401 | void *ptr; | 1408 | struct array_cache *ptr; |
| 1402 | 1409 | ||
| 1403 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); | 1410 | ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL); |
| 1404 | 1411 | ||
| @@ -1406,6 +1413,11 @@ void __init kmem_cache_init(void) | |||
| 1406 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); | 1413 | BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache); |
| 1407 | memcpy(ptr, cpu_cache_get(&cache_cache), | 1414 | memcpy(ptr, cpu_cache_get(&cache_cache), |
| 1408 | sizeof(struct arraycache_init)); | 1415 | sizeof(struct arraycache_init)); |
| 1416 | /* | ||
| 1417 | * Do not assume that spinlocks can be initialized via memcpy: | ||
| 1418 | */ | ||
| 1419 | spin_lock_init(&ptr->lock); | ||
| 1420 | |||
| 1409 | cache_cache.array[smp_processor_id()] = ptr; | 1421 | cache_cache.array[smp_processor_id()] = ptr; |
| 1410 | local_irq_enable(); | 1422 | local_irq_enable(); |
| 1411 | 1423 | ||
| @@ -1416,6 +1428,11 @@ void __init kmem_cache_init(void) | |||
| 1416 | != &initarray_generic.cache); | 1428 | != &initarray_generic.cache); |
| 1417 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), | 1429 | memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep), |
| 1418 | sizeof(struct arraycache_init)); | 1430 | sizeof(struct arraycache_init)); |
| 1431 | /* | ||
| 1432 | * Do not assume that spinlocks can be initialized via memcpy: | ||
| 1433 | */ | ||
| 1434 | spin_lock_init(&ptr->lock); | ||
| 1435 | |||
| 1419 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = | 1436 | malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] = |
| 1420 | ptr; | 1437 | ptr; |
| 1421 | local_irq_enable(); | 1438 | local_irq_enable(); |
| @@ -1743,6 +1760,8 @@ static void slab_destroy_objs(struct kmem_cache *cachep, struct slab *slabp) | |||
| 1743 | } | 1760 | } |
| 1744 | #endif | 1761 | #endif |
| 1745 | 1762 | ||
| 1763 | static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting); | ||
| 1764 | |||
| 1746 | /** | 1765 | /** |
| 1747 | * slab_destroy - destroy and release all objects in a slab | 1766 | * slab_destroy - destroy and release all objects in a slab |
| 1748 | * @cachep: cache pointer being destroyed | 1767 | * @cachep: cache pointer being destroyed |
| @@ -1766,8 +1785,17 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp) | |||
| 1766 | call_rcu(&slab_rcu->head, kmem_rcu_free); | 1785 | call_rcu(&slab_rcu->head, kmem_rcu_free); |
| 1767 | } else { | 1786 | } else { |
| 1768 | kmem_freepages(cachep, addr); | 1787 | kmem_freepages(cachep, addr); |
| 1769 | if (OFF_SLAB(cachep)) | 1788 | if (OFF_SLAB(cachep)) { |
| 1770 | kmem_cache_free(cachep->slabp_cache, slabp); | 1789 | unsigned long flags; |
| 1790 | |||
| 1791 | /* | ||
| 1792 | * lockdep: we may nest inside an already held | ||
| 1793 | * ac->lock, so pass in a nesting flag: | ||
| 1794 | */ | ||
| 1795 | local_irq_save(flags); | ||
| 1796 | __cache_free(cachep->slabp_cache, slabp, 1); | ||
| 1797 | local_irq_restore(flags); | ||
| 1798 | } | ||
| 1771 | } | 1799 | } |
| 1772 | } | 1800 | } |
| 1773 | 1801 | ||
| @@ -3072,7 +3100,16 @@ static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects, | |||
| 3072 | if (slabp->inuse == 0) { | 3100 | if (slabp->inuse == 0) { |
| 3073 | if (l3->free_objects > l3->free_limit) { | 3101 | if (l3->free_objects > l3->free_limit) { |
| 3074 | l3->free_objects -= cachep->num; | 3102 | l3->free_objects -= cachep->num; |
| 3103 | /* | ||
| 3104 | * It is safe to drop the lock. The slab is | ||
| 3105 | * no longer linked to the cache. cachep | ||
| 3106 | * cannot disappear - we are using it and | ||
| 3107 | * all destruction of caches must be | ||
| 3108 | * serialized properly by the user. | ||
| 3109 | */ | ||
| 3110 | spin_unlock(&l3->list_lock); | ||
| 3075 | slab_destroy(cachep, slabp); | 3111 | slab_destroy(cachep, slabp); |
| 3112 | spin_lock(&l3->list_lock); | ||
| 3076 | } else { | 3113 | } else { |
| 3077 | list_add(&slabp->list, &l3->slabs_free); | 3114 | list_add(&slabp->list, &l3->slabs_free); |
| 3078 | } | 3115 | } |
| @@ -3098,7 +3135,7 @@ static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac) | |||
| 3098 | #endif | 3135 | #endif |
| 3099 | check_irq_off(); | 3136 | check_irq_off(); |
| 3100 | l3 = cachep->nodelists[node]; | 3137 | l3 = cachep->nodelists[node]; |
| 3101 | spin_lock(&l3->list_lock); | 3138 | spin_lock_nested(&l3->list_lock, SINGLE_DEPTH_NESTING); |
| 3102 | if (l3->shared) { | 3139 | if (l3->shared) { |
| 3103 | struct array_cache *shared_array = l3->shared; | 3140 | struct array_cache *shared_array = l3->shared; |
| 3104 | int max = shared_array->limit - shared_array->avail; | 3141 | int max = shared_array->limit - shared_array->avail; |
| @@ -3141,14 +3178,14 @@ free_done: | |||
| 3141 | * Release an obj back to its cache. If the obj has a constructed state, it must | 3178 | * Release an obj back to its cache. If the obj has a constructed state, it must |
| 3142 | * be in this state _before_ it is released. Called with disabled ints. | 3179 | * be in this state _before_ it is released. Called with disabled ints. |
| 3143 | */ | 3180 | */ |
| 3144 | static inline void __cache_free(struct kmem_cache *cachep, void *objp) | 3181 | static void __cache_free(struct kmem_cache *cachep, void *objp, int nesting) |
| 3145 | { | 3182 | { |
| 3146 | struct array_cache *ac = cpu_cache_get(cachep); | 3183 | struct array_cache *ac = cpu_cache_get(cachep); |
| 3147 | 3184 | ||
| 3148 | check_irq_off(); | 3185 | check_irq_off(); |
| 3149 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); | 3186 | objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); |
| 3150 | 3187 | ||
| 3151 | if (cache_free_alien(cachep, objp)) | 3188 | if (cache_free_alien(cachep, objp, nesting)) |
| 3152 | return; | 3189 | return; |
| 3153 | 3190 | ||
| 3154 | if (likely(ac->avail < ac->limit)) { | 3191 | if (likely(ac->avail < ac->limit)) { |
| @@ -3387,7 +3424,7 @@ void kmem_cache_free(struct kmem_cache *cachep, void *objp) | |||
| 3387 | BUG_ON(virt_to_cache(objp) != cachep); | 3424 | BUG_ON(virt_to_cache(objp) != cachep); |
| 3388 | 3425 | ||
| 3389 | local_irq_save(flags); | 3426 | local_irq_save(flags); |
| 3390 | __cache_free(cachep, objp); | 3427 | __cache_free(cachep, objp, 0); |
| 3391 | local_irq_restore(flags); | 3428 | local_irq_restore(flags); |
| 3392 | } | 3429 | } |
| 3393 | EXPORT_SYMBOL(kmem_cache_free); | 3430 | EXPORT_SYMBOL(kmem_cache_free); |
| @@ -3412,7 +3449,7 @@ void kfree(const void *objp) | |||
| 3412 | kfree_debugcheck(objp); | 3449 | kfree_debugcheck(objp); |
| 3413 | c = virt_to_cache(objp); | 3450 | c = virt_to_cache(objp); |
| 3414 | debug_check_no_locks_freed(objp, obj_size(c)); | 3451 | debug_check_no_locks_freed(objp, obj_size(c)); |
| 3415 | __cache_free(c, (void *)objp); | 3452 | __cache_free(c, (void *)objp, 0); |
| 3416 | local_irq_restore(flags); | 3453 | local_irq_restore(flags); |
| 3417 | } | 3454 | } |
| 3418 | EXPORT_SYMBOL(kfree); | 3455 | EXPORT_SYMBOL(kfree); |
