aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slub.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 19:18:33 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:35 -0400
commit945cf2b6199be70ff03102b9e642c3bb05d01de9 (patch)
treeb0deef56b1d79af1054f0cf1bd91c6fb00ce31a5 /mm/slub.c
parent7c9adf5a5471647f392169ef19d3e81dcfa76045 (diff)
mm/sl[aou]b: Extract a common function for kmem_cache_destroy
kmem_cache_destroy does basically the same in all allocators. Extract common code which is easy since we already have common mutex handling. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slub.c')
-rw-r--r--mm/slub.c36
1 files changed, 11 insertions, 25 deletions
diff --git a/mm/slub.c b/mm/slub.c
index 24aa362edef7..724adea34384 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -624,7 +624,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
624 print_trailer(s, page, object); 624 print_trailer(s, page, object);
625} 625}
626 626
627static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 627static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
628{ 628{
629 va_list args; 629 va_list args;
630 char buf[100]; 630 char buf[100];
@@ -3146,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3146 sizeof(long), GFP_ATOMIC); 3146 sizeof(long), GFP_ATOMIC);
3147 if (!map) 3147 if (!map)
3148 return; 3148 return;
3149 slab_err(s, page, "%s", text); 3149 slab_err(s, page, text, s->name);
3150 slab_lock(page); 3150 slab_lock(page);
3151 3151
3152 get_map(s, page, map); 3152 get_map(s, page, map);
@@ -3178,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3178 discard_slab(s, page); 3178 discard_slab(s, page);
3179 } else { 3179 } else {
3180 list_slab_objects(s, page, 3180 list_slab_objects(s, page,
3181 "Objects remaining on kmem_cache_close()"); 3181 "Objects remaining in %s on kmem_cache_close()");
3182 } 3182 }
3183 } 3183 }
3184} 3184}
@@ -3191,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3191 int node; 3191 int node;
3192 3192
3193 flush_all(s); 3193 flush_all(s);
3194 free_percpu(s->cpu_slab);
3195 /* Attempt to free all objects */ 3194 /* Attempt to free all objects */
3196 for_each_node_state(node, N_NORMAL_MEMORY) { 3195 for_each_node_state(node, N_NORMAL_MEMORY) {
3197 struct kmem_cache_node *n = get_node(s, node); 3196 struct kmem_cache_node *n = get_node(s, node);
@@ -3200,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3200 if (n->nr_partial || slabs_node(s, node)) 3199 if (n->nr_partial || slabs_node(s, node))
3201 return 1; 3200 return 1;
3202 } 3201 }
3202 free_percpu(s->cpu_slab);
3203 free_kmem_cache_nodes(s); 3203 free_kmem_cache_nodes(s);
3204 return 0; 3204 return 0;
3205} 3205}
3206 3206
3207/* 3207int __kmem_cache_shutdown(struct kmem_cache *s)
3208 * Close a cache and release the kmem_cache structure
3209 * (must be used for caches created using kmem_cache_create)
3210 */
3211void kmem_cache_destroy(struct kmem_cache *s)
3212{ 3208{
3213 mutex_lock(&slab_mutex); 3209 return kmem_cache_close(s);
3214 s->refcount--; 3210}
3215 if (!s->refcount) { 3211
3216 list_del(&s->list); 3212void __kmem_cache_destroy(struct kmem_cache *s)
3217 mutex_unlock(&slab_mutex); 3213{
3218 if (kmem_cache_close(s)) { 3214 sysfs_slab_remove(s);
3219 printk(KERN_ERR "SLUB %s: %s called for cache that "
3220 "still has objects.\n", s->name, __func__);
3221 dump_stack();
3222 }
3223 if (s->flags & SLAB_DESTROY_BY_RCU)
3224 rcu_barrier();
3225 sysfs_slab_remove(s);
3226 } else
3227 mutex_unlock(&slab_mutex);
3228} 3215}
3229EXPORT_SYMBOL(kmem_cache_destroy);
3230 3216
3231/******************************************************************** 3217/********************************************************************
3232 * Kmalloc subsystem 3218 * Kmalloc subsystem