aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 19:18:33 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:35 -0400
commit945cf2b6199be70ff03102b9e642c3bb05d01de9 (patch)
treeb0deef56b1d79af1054f0cf1bd91c6fb00ce31a5
parent7c9adf5a5471647f392169ef19d3e81dcfa76045 (diff)
mm/sl[aou]b: Extract a common function for kmem_cache_destroy
kmem_cache_destroy does basically the same in all allocators. Extract common code which is easy since we already have common mutex handling. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slab.c45
-rw-r--r--mm/slab.h3
-rw-r--r--mm/slab_common.c25
-rw-r--r--mm/slob.c15
-rw-r--r--mm/slub.c36
5 files changed, 49 insertions, 75 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a69903168497..49a74b349e39 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2206,7 +2206,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2206 } 2206 }
2207} 2207}
2208 2208
2209static void __kmem_cache_destroy(struct kmem_cache *cachep) 2209void __kmem_cache_destroy(struct kmem_cache *cachep)
2210{ 2210{
2211 int i; 2211 int i;
2212 struct kmem_list3 *l3; 2212 struct kmem_list3 *l3;
@@ -2763,49 +2763,10 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
2763} 2763}
2764EXPORT_SYMBOL(kmem_cache_shrink); 2764EXPORT_SYMBOL(kmem_cache_shrink);
2765 2765
2766/** 2766int __kmem_cache_shutdown(struct kmem_cache *cachep)
2767 * kmem_cache_destroy - delete a cache
2768 * @cachep: the cache to destroy
2769 *
2770 * Remove a &struct kmem_cache object from the slab cache.
2771 *
2772 * It is expected this function will be called by a module when it is
2773 * unloaded. This will remove the cache completely, and avoid a duplicate
2774 * cache being allocated each time a module is loaded and unloaded, if the
2775 * module doesn't have persistent in-kernel storage across loads and unloads.
2776 *
2777 * The cache must be empty before calling this function.
2778 *
2779 * The caller must guarantee that no one will allocate memory from the cache
2780 * during the kmem_cache_destroy().
2781 */
2782void kmem_cache_destroy(struct kmem_cache *cachep)
2783{ 2767{
2784 BUG_ON(!cachep || in_interrupt()); 2768 return __cache_shrink(cachep);
2785
2786 /* Find the cache in the chain of caches. */
2787 get_online_cpus();
2788 mutex_lock(&slab_mutex);
2789 /*
2790 * the chain is never empty, cache_cache is never destroyed
2791 */
2792 list_del(&cachep->list);
2793 if (__cache_shrink(cachep)) {
2794 slab_error(cachep, "Can't free all objects");
2795 list_add(&cachep->list, &slab_caches);
2796 mutex_unlock(&slab_mutex);
2797 put_online_cpus();
2798 return;
2799 }
2800
2801 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2802 rcu_barrier();
2803
2804 __kmem_cache_destroy(cachep);
2805 mutex_unlock(&slab_mutex);
2806 put_online_cpus();
2807} 2769}
2808EXPORT_SYMBOL(kmem_cache_destroy);
2809 2770
2810/* 2771/*
2811 * Get the memory for a slab management obj. 2772 * Get the memory for a slab management obj.
diff --git a/mm/slab.h b/mm/slab.h
index db7848caaa25..07a537ed5da3 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -30,4 +30,7 @@ extern struct list_head slab_caches;
30struct kmem_cache *__kmem_cache_create(const char *name, size_t size, 30struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
31 size_t align, unsigned long flags, void (*ctor)(void *)); 31 size_t align, unsigned long flags, void (*ctor)(void *));
32 32
33int __kmem_cache_shutdown(struct kmem_cache *);
34void __kmem_cache_destroy(struct kmem_cache *);
35
33#endif 36#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 5190a7cd02bd..a1c4f0b5aaed 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -140,6 +140,31 @@ out_locked:
140} 140}
141EXPORT_SYMBOL(kmem_cache_create); 141EXPORT_SYMBOL(kmem_cache_create);
142 142
143void kmem_cache_destroy(struct kmem_cache *s)
144{
145 get_online_cpus();
146 mutex_lock(&slab_mutex);
147 s->refcount--;
148 if (!s->refcount) {
149 list_del(&s->list);
150
151 if (!__kmem_cache_shutdown(s)) {
152 if (s->flags & SLAB_DESTROY_BY_RCU)
153 rcu_barrier();
154
155 __kmem_cache_destroy(s);
156 } else {
157 list_add(&s->list, &slab_caches);
158 printk(KERN_ERR "kmem_cache_destroy %s: Slab cache still has objects\n",
159 s->name);
160 dump_stack();
161 }
162 }
163 mutex_unlock(&slab_mutex);
164 put_online_cpus();
165}
166EXPORT_SYMBOL(kmem_cache_destroy);
167
143int slab_is_available(void) 168int slab_is_available(void)
144{ 169{
145 return slab_state >= UP; 170 return slab_state >= UP;
diff --git a/mm/slob.c b/mm/slob.c
index 5225d28f2694..289be4f4681a 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -538,18 +538,11 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
538 return c; 538 return c;
539} 539}
540 540
541void kmem_cache_destroy(struct kmem_cache *c) 541void __kmem_cache_destroy(struct kmem_cache *c)
542{ 542{
543 mutex_lock(&slab_mutex);
544 list_del(&c->list);
545 mutex_unlock(&slab_mutex);
546
547 kmemleak_free(c); 543 kmemleak_free(c);
548 if (c->flags & SLAB_DESTROY_BY_RCU)
549 rcu_barrier();
550 slob_free(c, sizeof(struct kmem_cache)); 544 slob_free(c, sizeof(struct kmem_cache));
551} 545}
552EXPORT_SYMBOL(kmem_cache_destroy);
553 546
554void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 547void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
555{ 548{
@@ -617,6 +610,12 @@ unsigned int kmem_cache_size(struct kmem_cache *c)
617} 610}
618EXPORT_SYMBOL(kmem_cache_size); 611EXPORT_SYMBOL(kmem_cache_size);
619 612
613int __kmem_cache_shutdown(struct kmem_cache *c)
614{
615 /* No way to check for remaining objects */
616 return 0;
617}
618
620int kmem_cache_shrink(struct kmem_cache *d) 619int kmem_cache_shrink(struct kmem_cache *d)
621{ 620{
622 return 0; 621 return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 24aa362edef7..724adea34384 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -624,7 +624,7 @@ static void object_err(struct kmem_cache *s, struct page *page,
624 print_trailer(s, page, object); 624 print_trailer(s, page, object);
625} 625}
626 626
627static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...) 627static void slab_err(struct kmem_cache *s, struct page *page, const char *fmt, ...)
628{ 628{
629 va_list args; 629 va_list args;
630 char buf[100]; 630 char buf[100];
@@ -3146,7 +3146,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page,
3146 sizeof(long), GFP_ATOMIC); 3146 sizeof(long), GFP_ATOMIC);
3147 if (!map) 3147 if (!map)
3148 return; 3148 return;
3149 slab_err(s, page, "%s", text); 3149 slab_err(s, page, text, s->name);
3150 slab_lock(page); 3150 slab_lock(page);
3151 3151
3152 get_map(s, page, map); 3152 get_map(s, page, map);
@@ -3178,7 +3178,7 @@ static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3178 discard_slab(s, page); 3178 discard_slab(s, page);
3179 } else { 3179 } else {
3180 list_slab_objects(s, page, 3180 list_slab_objects(s, page,
3181 "Objects remaining on kmem_cache_close()"); 3181 "Objects remaining in %s on kmem_cache_close()");
3182 } 3182 }
3183 } 3183 }
3184} 3184}
@@ -3191,7 +3191,6 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3191 int node; 3191 int node;
3192 3192
3193 flush_all(s); 3193 flush_all(s);
3194 free_percpu(s->cpu_slab);
3195 /* Attempt to free all objects */ 3194 /* Attempt to free all objects */
3196 for_each_node_state(node, N_NORMAL_MEMORY) { 3195 for_each_node_state(node, N_NORMAL_MEMORY) {
3197 struct kmem_cache_node *n = get_node(s, node); 3196 struct kmem_cache_node *n = get_node(s, node);
@@ -3200,33 +3199,20 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3200 if (n->nr_partial || slabs_node(s, node)) 3199 if (n->nr_partial || slabs_node(s, node))
3201 return 1; 3200 return 1;
3202 } 3201 }
3202 free_percpu(s->cpu_slab);
3203 free_kmem_cache_nodes(s); 3203 free_kmem_cache_nodes(s);
3204 return 0; 3204 return 0;
3205} 3205}
3206 3206
3207/* 3207int __kmem_cache_shutdown(struct kmem_cache *s)
3208 * Close a cache and release the kmem_cache structure
3209 * (must be used for caches created using kmem_cache_create)
3210 */
3211void kmem_cache_destroy(struct kmem_cache *s)
3212{ 3208{
3213 mutex_lock(&slab_mutex); 3209 return kmem_cache_close(s);
3214 s->refcount--; 3210}
3215 if (!s->refcount) { 3211
3216 list_del(&s->list); 3212void __kmem_cache_destroy(struct kmem_cache *s)
3217 mutex_unlock(&slab_mutex); 3213{
3218 if (kmem_cache_close(s)) { 3214 sysfs_slab_remove(s);
3219 printk(KERN_ERR "SLUB %s: %s called for cache that "
3220 "still has objects.\n", s->name, __func__);
3221 dump_stack();
3222 }
3223 if (s->flags & SLAB_DESTROY_BY_RCU)
3224 rcu_barrier();
3225 sysfs_slab_remove(s);
3226 } else
3227 mutex_unlock(&slab_mutex);
3228} 3215}
3229EXPORT_SYMBOL(kmem_cache_destroy);
3230 3216
3231/******************************************************************** 3217/********************************************************************
3232 * Kmalloc subsystem 3218 * Kmalloc subsystem