aboutsummaryrefslogtreecommitdiffstats
path: root/mm/slab.c
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 19:18:33 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:35 -0400
commit945cf2b6199be70ff03102b9e642c3bb05d01de9 (patch)
treeb0deef56b1d79af1054f0cf1bd91c6fb00ce31a5 /mm/slab.c
parent7c9adf5a5471647f392169ef19d3e81dcfa76045 (diff)
mm/sl[aou]b: Extract a common function for kmem_cache_destroy
kmem_cache_destroy does basically the same in all allocators. Extract common code which is easy since we already have common mutex handling. Reviewed-by: Glauber Costa <glommer@parallels.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
Diffstat (limited to 'mm/slab.c')
-rw-r--r--mm/slab.c45
1 files changed, 3 insertions, 42 deletions
diff --git a/mm/slab.c b/mm/slab.c
index a69903168497..49a74b349e39 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2206,7 +2206,7 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2206 } 2206 }
2207} 2207}
2208 2208
2209static void __kmem_cache_destroy(struct kmem_cache *cachep) 2209void __kmem_cache_destroy(struct kmem_cache *cachep)
2210{ 2210{
2211 int i; 2211 int i;
2212 struct kmem_list3 *l3; 2212 struct kmem_list3 *l3;
@@ -2763,49 +2763,10 @@ int kmem_cache_shrink(struct kmem_cache *cachep)
2763} 2763}
2764EXPORT_SYMBOL(kmem_cache_shrink); 2764EXPORT_SYMBOL(kmem_cache_shrink);
2765 2765
2766/** 2766int __kmem_cache_shutdown(struct kmem_cache *cachep)
2767 * kmem_cache_destroy - delete a cache
2768 * @cachep: the cache to destroy
2769 *
2770 * Remove a &struct kmem_cache object from the slab cache.
2771 *
2772 * It is expected this function will be called by a module when it is
2773 * unloaded. This will remove the cache completely, and avoid a duplicate
2774 * cache being allocated each time a module is loaded and unloaded, if the
2775 * module doesn't have persistent in-kernel storage across loads and unloads.
2776 *
2777 * The cache must be empty before calling this function.
2778 *
2779 * The caller must guarantee that no one will allocate memory from the cache
2780 * during the kmem_cache_destroy().
2781 */
2782void kmem_cache_destroy(struct kmem_cache *cachep)
2783{ 2767{
2784 BUG_ON(!cachep || in_interrupt()); 2768 return __cache_shrink(cachep);
2785
2786 /* Find the cache in the chain of caches. */
2787 get_online_cpus();
2788 mutex_lock(&slab_mutex);
2789 /*
2790 * the chain is never empty, cache_cache is never destroyed
2791 */
2792 list_del(&cachep->list);
2793 if (__cache_shrink(cachep)) {
2794 slab_error(cachep, "Can't free all objects");
2795 list_add(&cachep->list, &slab_caches);
2796 mutex_unlock(&slab_mutex);
2797 put_online_cpus();
2798 return;
2799 }
2800
2801 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2802 rcu_barrier();
2803
2804 __kmem_cache_destroy(cachep);
2805 mutex_unlock(&slab_mutex);
2806 put_online_cpus();
2807} 2769}
2808EXPORT_SYMBOL(kmem_cache_destroy);
2809 2770
2810/* 2771/*
2811 * Get the memory for a slab management obj. 2772 * Get the memory for a slab management obj.