aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <cl@linux.com>2012-09-04 19:38:33 -0400
committerPekka Enberg <penberg@kernel.org>2012-09-05 05:00:36 -0400
commit12c3667fb780e20360ad0bde32dfb3591ef609ad (patch)
treedbced65da22c85212bf632ff6953c0a94252c3ef
parent8f4c765c22deee766319ae9a1db68325f14816e6 (diff)
mm/sl[aou]b: Get rid of __kmem_cache_destroy
What is done there can be done in __kmem_cache_shutdown. This affects RCU handling somewhat. On rcu free all slab allocators do not refer to other management structures than the kmem_cache structure. Therefore these other structures can be freed before the rcu deferred free to the page allocator occurs. Reviewed-by: Joonsoo Kim <js1304@gmail.com> Signed-off-by: Christoph Lameter <cl@linux.com> Signed-off-by: Pekka Enberg <penberg@kernel.org>
-rw-r--r--mm/slab.c46
-rw-r--r--mm/slab.h1
-rw-r--r--mm/slab_common.c1
-rw-r--r--mm/slob.c4
-rw-r--r--mm/slub.c10
5 files changed, 26 insertions, 36 deletions
diff --git a/mm/slab.c b/mm/slab.c
index 8ca6ec6301fa..de961b48a6a4 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2208,26 +2208,6 @@ static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
2208 } 2208 }
2209} 2209}
2210 2210
2211void __kmem_cache_destroy(struct kmem_cache *cachep)
2212{
2213 int i;
2214 struct kmem_list3 *l3;
2215
2216 for_each_online_cpu(i)
2217 kfree(cachep->array[i]);
2218
2219 /* NUMA: free the list3 structures */
2220 for_each_online_node(i) {
2221 l3 = cachep->nodelists[i];
2222 if (l3) {
2223 kfree(l3->shared);
2224 free_alien_cache(l3->alien);
2225 kfree(l3);
2226 }
2227 }
2228}
2229
2230
2231/** 2211/**
2232 * calculate_slab_order - calculate size (page order) of slabs 2212 * calculate_slab_order - calculate size (page order) of slabs
2233 * @cachep: pointer to the cache that is being created 2213 * @cachep: pointer to the cache that is being created
@@ -2364,9 +2344,6 @@ static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
2364 * Cannot be called within a int, but can be interrupted. 2344 * Cannot be called within a int, but can be interrupted.
2365 * The @ctor is run when new pages are allocated by the cache. 2345 * The @ctor is run when new pages are allocated by the cache.
2366 * 2346 *
2367 * @name must be valid until the cache is destroyed. This implies that
2368 * the module calling this has to destroy the cache before getting unloaded.
2369 *
2370 * The flags are 2347 * The flags are
2371 * 2348 *
2372 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5) 2349 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
@@ -2591,7 +2568,7 @@ __kmem_cache_create (const char *name, size_t size, size_t align,
2591 cachep->refcount = 1; 2568 cachep->refcount = 1;
2592 2569
2593 if (setup_cpu_cache(cachep, gfp)) { 2570 if (setup_cpu_cache(cachep, gfp)) {
2594 __kmem_cache_destroy(cachep); 2571 __kmem_cache_shutdown(cachep);
2595 return NULL; 2572 return NULL;
2596 } 2573 }
2597 2574
@@ -2766,7 +2743,26 @@ EXPORT_SYMBOL(kmem_cache_shrink);
2766 2743
2767int __kmem_cache_shutdown(struct kmem_cache *cachep) 2744int __kmem_cache_shutdown(struct kmem_cache *cachep)
2768{ 2745{
2769 return __cache_shrink(cachep); 2746 int i;
2747 struct kmem_list3 *l3;
2748 int rc = __cache_shrink(cachep);
2749
2750 if (rc)
2751 return rc;
2752
2753 for_each_online_cpu(i)
2754 kfree(cachep->array[i]);
2755
2756 /* NUMA: free the list3 structures */
2757 for_each_online_node(i) {
2758 l3 = cachep->nodelists[i];
2759 if (l3) {
2760 kfree(l3->shared);
2761 free_alien_cache(l3->alien);
2762 kfree(l3);
2763 }
2764 }
2765 return 0;
2770} 2766}
2771 2767
2772/* 2768/*
diff --git a/mm/slab.h b/mm/slab.h
index 6724aa6f662f..c4f9a361bd18 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -37,6 +37,5 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
37 size_t align, unsigned long flags, void (*ctor)(void *)); 37 size_t align, unsigned long flags, void (*ctor)(void *));
38 38
39int __kmem_cache_shutdown(struct kmem_cache *); 39int __kmem_cache_shutdown(struct kmem_cache *);
40void __kmem_cache_destroy(struct kmem_cache *);
41 40
42#endif 41#endif
diff --git a/mm/slab_common.c b/mm/slab_common.c
index d6deae9108cd..7df814e8fbea 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -153,7 +153,6 @@ void kmem_cache_destroy(struct kmem_cache *s)
153 if (s->flags & SLAB_DESTROY_BY_RCU) 153 if (s->flags & SLAB_DESTROY_BY_RCU)
154 rcu_barrier(); 154 rcu_barrier();
155 155
156 __kmem_cache_destroy(s);
157 kmem_cache_free(kmem_cache, s); 156 kmem_cache_free(kmem_cache, s);
158 } else { 157 } else {
159 list_add(&s->list, &slab_caches); 158 list_add(&s->list, &slab_caches);
diff --git a/mm/slob.c b/mm/slob.c
index cb4ab9675293..50f605322700 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -538,10 +538,6 @@ struct kmem_cache *__kmem_cache_create(const char *name, size_t size,
538 return c; 538 return c;
539} 539}
540 540
541void __kmem_cache_destroy(struct kmem_cache *c)
542{
543}
544
545void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) 541void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
546{ 542{
547 void *b; 543 void *b;
diff --git a/mm/slub.c b/mm/slub.c
index 6f932f7a8219..e5e09873f5ec 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -3205,12 +3205,12 @@ static inline int kmem_cache_close(struct kmem_cache *s)
3205 3205
3206int __kmem_cache_shutdown(struct kmem_cache *s) 3206int __kmem_cache_shutdown(struct kmem_cache *s)
3207{ 3207{
3208 return kmem_cache_close(s); 3208 int rc = kmem_cache_close(s);
3209}
3210 3209
3211void __kmem_cache_destroy(struct kmem_cache *s) 3210 if (!rc)
3212{ 3211 sysfs_slab_remove(s);
3213 sysfs_slab_remove(s); 3212
3213 return rc;
3214} 3214}
3215 3215
3216/******************************************************************** 3216/********************************************************************